aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c1
-rw-r--r--drivers/acpi/acpi_pad.c514
-rw-r--r--drivers/acpi/acpica/acconfig.h4
-rw-r--r--drivers/acpi/acpica/exregion.c35
-rw-r--r--drivers/acpi/bus.c49
-rw-r--r--drivers/acpi/button.c3
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c56
-rw-r--r--drivers/acpi/osl.c8
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/power_meter.c6
-rw-r--r--drivers/acpi/proc.c4
-rw-r--r--drivers/acpi/processor_core.c9
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_throttling.c6
-rw-r--r--drivers/acpi/scan.c704
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/acpi/video.c21
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/ata/ahci.c209
-rw-r--r--drivers/ata/libata-acpi.c40
-rw-r--r--drivers/ata/libata-core.c24
-rw-r--r--drivers/ata/libata-eh.c56
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_atp867x.c101
-rw-r--r--drivers/ata/pata_sc1200.c3
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c29
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/ata/sata_via.c1
-rw-r--r--drivers/base/bus.c17
-rw-r--r--drivers/base/driver.c2
-rw-r--r--drivers/base/platform.c6
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/block/DAC960.c156
-rw-r--r--drivers/block/cciss.c834
-rw-r--r--drivers/block/cciss.h12
-rw-r--r--drivers/block/cpqarray.c63
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/virtio_blk.c39
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c11
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/bfin-otp.c2
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/epca.c1
-rw-r--r--drivers/char/generic_serial.c1
-rw-r--r--drivers/char/genrtc.c1
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_iucv.c4
-rw-r--r--drivers/char/hvc_xen.c25
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/hw_random/virtio-rng.c3
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/istallion.c1
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pty.c48
-rw-r--r--drivers/char/rio/riocmd.c1
-rw-r--r--drivers/char/rio/rioctrl.c1
-rw-r--r--drivers/char/rio/riotty.c1
-rw-r--r--drivers/char/rtc.c1
-rw-r--r--drivers/char/ser_a2232.c1
-rw-r--r--drivers/char/serial167.c7
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/char/stallion.c1
-rw-r--r--drivers/char/tlclk.c1
-rw-r--r--drivers/char/tpm/tpm.c3
-rw-r--r--drivers/char/tty_buffer.c31
-rw-r--r--drivers/char/tty_io.c15
-rw-r--r--drivers/char/tty_ldisc.c7
-rw-r--r--drivers/char/tty_port.c3
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/char/vt_ioctl.c8
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/cpufreq/cpufreq.c48
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpuidle/cpuidle.c5
-rw-r--r--drivers/crypto/padlock-sha.c14
-rw-r--r--drivers/edac/Kconfig14
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/amd64_edac.c106
-rw-r--r--drivers/edac/amd64_edac.h23
-rw-r--r--drivers/edac/amd64_edac_inj.c49
-rw-r--r--drivers/edac/edac_mce_amd.c32
-rw-r--r--drivers/edac/i5000_edac.c7
-rw-r--r--drivers/edac/i5400_edac.c89
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c1
-rw-r--r--drivers/firewire/sbp2.c39
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/iscsi_ibft_find.c4
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpio/twl4030-gpio.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c89
-rw-r--r--drivers/gpu/drm/drm_edid.c61
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c391
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h43
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c305
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c193
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c15
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c35
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/radeon/.gitignore3
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c358
-rw-r--r--drivers/gpu/drm/radeon/avivod.h9
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c12
-rw-r--r--drivers/gpu/drm/radeon/r100.c669
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h69
-rw-r--r--drivers/gpu/drm/radeon/r100d.h145
-rw-r--r--drivers/gpu/drm/radeon/r200.c82
-rw-r--r--drivers/gpu/drm/radeon/r300.c447
-rw-r--r--drivers/gpu/drm/radeon/r300d.h205
-rw-r--r--drivers/gpu/drm/radeon/r420.c8
-rw-r--r--drivers/gpu/drm/radeon/r420d.h24
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h12
-rw-r--r--drivers/gpu/drm/radeon/r520.c277
-rw-r--r--drivers/gpu/drm/radeon/r520d.h187
-rw-r--r--drivers/gpu/drm/radeon/r600.c431
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c56
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c204
-rw-r--r--drivers/gpu/drm/radeon/r600d.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h145
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h304
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c291
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c278
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c103
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c270
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c131
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/radeon/rs100d.h40
-rw-r--r--drivers/gpu/drm/radeon/rs400.c277
-rw-r--r--drivers/gpu/drm/radeon/rs400d.h160
-rw-r--r--drivers/gpu/drm/radeon/rs600.c517
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h470
-rw-r--r--drivers/gpu/drm/radeon/rs690.c358
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h307
-rw-r--r--drivers/gpu/drm/radeon/rs690r.h99
-rw-r--r--drivers/gpu/drm/radeon/rv200d.h36
-rw-r--r--drivers/gpu/drm/radeon/rv250d.h123
-rw-r--r--drivers/gpu/drm/radeon/rv350d.h52
-rw-r--r--drivers/gpu/drm/radeon/rv515.c367
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h385
-rw-r--r--drivers/gpu/drm/radeon/rv770.c269
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-twinhan.c4
-rw-r--r--drivers/hid/hidraw.c6
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adt7475.c17
-rw-r--r--drivers/hwmon/asus_atk0110.c339
-rw-r--r--drivers/hwmon/dme1737.c29
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/hp_accel.c5
-rw-r--r--drivers/hwmon/it87.c5
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c3
-rw-r--r--drivers/hwmon/ltc4215.c47
-rw-r--r--drivers/hwmon/ltc4245.c131
-rw-r--r--drivers/hwmon/s3c-hwmon.c11
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c86
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/busses/i2c-scmi.c5
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/ide/atiixp.c2
-rw-r--r--drivers/ide/cmd64x.c6
-rw-r--r--drivers/ide/ide-probe.c9
-rw-r--r--drivers/ide/ide-proc.c8
-rw-r--r--drivers/ide/sis5513.c10
-rw-r--r--drivers/ieee1394/dma.c2
-rw-r--r--drivers/ieee1394/raw1394.c1
-rw-r--r--drivers/ieee1394/video1394.c1
-rw-r--r--drivers/ieee802154/fakehard.c5
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/mad_rmpp.c17
-rw-r--r--drivers/infiniband/core/ucm.c1
-rw-r--r--drivers/infiniband/core/ucma.c1
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/ff-core.c20
-rw-r--r--drivers/input/ff-memless.c26
-rw-r--r--drivers/input/input.c97
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/atkbd.c145
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/keyboard/hilkbd.c1
-rw-r--r--drivers/input/keyboard/sunkbd.c1
-rw-r--r--drivers/input/misc/Kconfig1
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/rotary_encoder.c4
-rw-r--r--drivers/input/misc/sparcspkr.c4
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/misc/wistron_btns.c9
-rw-r--r--drivers/input/mouse/lifebook.c3
-rw-r--r--drivers/input/mouse/logips2pp.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/serio/Kconfig13
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h13
-rw-r--r--drivers/input/serio/i8042.c38
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/serio_raw.c1
-rw-r--r--drivers/input/serio/serport.c1
-rw-r--r--drivers/input/touchscreen/ad7879.c4
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c1
-rw-r--r--drivers/isdn/hisax/arcofi.c1
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c1
-rw-r--r--drivers/isdn/hisax/hfc_pci.c1
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c1
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c1
-rw-r--r--drivers/isdn/pcbit/drv.c1
-rw-r--r--drivers/isdn/pcbit/layer2.c1
-rw-r--r--drivers/isdn/sc/init.c1
-rw-r--r--drivers/leds/Kconfig13
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cobalt-qube.c2
-rw-r--r--drivers/leds/leds-cobalt-raq.c4
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds-pca9532.c15
-rw-r--r--drivers/leds/leds-wm831x-status.c341
-rw-r--r--drivers/leds/ledtrig-gpio.c32
-rw-r--r--drivers/lguest/interrupts_and_traps.c1
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c4
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/macintosh/via-pmu.c40
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/md/Makefile22
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/dm-exception-store.c38
-rw-r--r--drivers/md/dm-exception-store.h8
-rw-r--r--drivers/md/dm-log-userspace-base.c2
-rw-r--r--drivers/md/dm-snap-persistent.c16
-rw-r--r--drivers/md/dm-snap.c25
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/md.c48
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c269
-rw-r--r--drivers/md/raid5.h14
-rw-r--r--drivers/md/raid6altivec.uc2
-rw-r--r--drivers/md/raid6int.uc2
-rw-r--r--drivers/md/raid6test/Makefile42
-rw-r--r--drivers/md/unroll.awk20
-rw-r--r--drivers/md/unroll.pl24
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c8
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/dvb/dvb-usb/ce6230.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c15
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c38
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c8
-rw-r--r--drivers/media/dvb/frontends/dib0070.h7
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/pt1/pt1.c1
-rw-r--r--drivers/media/dvb/siano/smsusb.c6
-rw-r--r--drivers/media/radio/radio-cadet.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c33
-rw-r--r--drivers/media/video/cafe_ccic.c2
-rw-r--r--drivers/media/video/cpia.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c20
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c3
-rw-r--r--drivers/media/video/meye.c3
-rw-r--r--drivers/media/video/pxa_camera.c5
-rw-r--r--drivers/media/video/s2255drv.c5
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-ts.c6
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/saa7164/saa7164-cmd.c2
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c8
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/video/soc_camera.c16
-rw-r--r--drivers/media/video/stk-webcam.c2
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c3
-rw-r--r--drivers/media/video/videobuf-core.c1
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c3
-rw-r--r--drivers/media/video/videobuf-vmalloc.c2
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/message/fusion/mptlan.c1
-rw-r--r--drivers/mfd/ab3100-core.c4
-rw-r--r--drivers/mfd/twl4030-core.c91
-rw-r--r--drivers/mfd/ucb1400_core.c1
-rw-r--r--drivers/mfd/ucb1x00-core.c1
-rw-r--r--drivers/mfd/wm831x-irq.c3
-rw-r--r--drivers/misc/eeprom/max6875.c29
-rw-r--r--drivers/misc/hpilo.c1
-rw-r--r--drivers/misc/ibmasm/command.c1
-rw-r--r--drivers/misc/ibmasm/event.c1
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c1
-rw-r--r--drivers/misc/phantom.c3
-rw-r--r--drivers/misc/sgi-gru/grufile.c5
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c13
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c72
-rw-r--r--drivers/mmc/host/Kconfig41
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/mmci.c111
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/omap.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c104
-rw-r--r--drivers/mmc/host/s3cmci.c608
-rw-r--r--drivers/mmc/host/s3cmci.h14
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/sst25l.c1
-rw-r--r--drivers/mtd/maps/Kconfig1
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c5
-rw-r--r--drivers/mtd/maps/sa1100-flash.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c19
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/ubi/build.c3
-rw-r--r--drivers/mtd/ubi/scan.c7
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/au1000_eth.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cris/eth_v10.c20
-rw-r--r--drivers/net/davinci_emac.c26
-rw-r--r--drivers/net/depca.c1
-rw-r--r--drivers/net/e100.c1
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/ethtool.c12
-rw-r--r--drivers/net/e1000e/ich8lan.c4
-rw-r--r--drivers/net/e1000e/netdev.c50
-rw-r--r--drivers/net/e1000e/phy.c56
-rw-r--r--drivers/net/eql.c1
-rw-r--r--drivers/net/ethoc.c1
-rw-r--r--drivers/net/ewrk3.c1
-rw-r--r--drivers/net/forcedeth.c1
-rw-r--r--drivers/net/hamachi.c1
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/igb/igb_ethtool.c1
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c47
-rw-r--r--drivers/net/irda/toim3232-sir.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ks8851_mll.c142
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c55
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ns83820.c1
-rw-r--r--drivers/net/pcnet32.c1
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/sfc/sfe4001.c4
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/skfp/skfddi.c1
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/stmmac/stmmac_timer.c4
-rw-r--r--drivers/net/stmmac/stmmac_timer.h1
-rw-r--r--drivers/net/sungem.c1
-rw-r--r--drivers/net/tokenring/ibmtr.c1
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/usb/hso.c8
-rw-r--r--drivers/net/usb/rndis_host.c6
-rw-r--r--drivers/net/veth.c35
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/cycx_x25.c1
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wireless/b43/pio.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1
-rw-r--r--drivers/net/wireless/libertas/cmd.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c1
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c1
-rw-r--r--drivers/of/of_mdio.c13
-rw-r--r--drivers/oprofile/event_buffer.c35
-rw-r--r--drivers/pci/dmar.c62
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h1
-rw-r--r--drivers/pci/intel-iommu.c121
-rw-r--r--drivers/pci/pci.c27
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c3
-rw-r--r--drivers/pci/pcie/aspm.c6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/pci/quirks.c38
-rw-r--r--drivers/pci/setup-res.c37
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/au1000_generic.c2
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c536
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.h60
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cistpl.c20
-rw-r--r--drivers/pcmcia/cs.c69
-rw-r--r--drivers/pcmcia/cs_internal.h3
-rw-r--r--drivers/pcmcia/ds.c8
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c6
-rw-r--r--drivers/pcmcia/m32r_cfc.c12
-rw-r--r--drivers/pcmcia/m32r_pcc.c12
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c17
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c2
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/pxa2xx_base.c18
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c230
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c3
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1100_neponset.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c2
-rw-r--r--drivers/pcmcia/soc_common.c5
-rw-r--r--drivers/pcmcia/socket_sysfs.c2
-rw-r--r--drivers/pcmcia/tcic.c6
-rw-r--r--drivers/pcmcia/vrc4171_card.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c98
-rw-r--r--drivers/platform/x86/eeepc-laptop.c9
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c127
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c634
-rw-r--r--drivers/pnp/pnpacpi/core.c21
-rw-r--r--drivers/pps/kapi.c20
-rw-r--r--drivers/pps/pps.c11
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/fixed.c5
-rw-r--r--drivers/regulator/wm831x-ldo.c6
-rw-r--r--drivers/rtc/interface.c1
-rw-r--r--drivers/rtc/rtc-coh901331.c11
-rw-r--r--drivers/rtc/rtc-dev.c1
-rw-r--r--drivers/rtc/rtc-pcf50633.c7
-rw-r--r--drivers/rtc/rtc-pxa.c27
-rw-r--r--drivers/rtc/rtc-sa1100.c23
-rw-r--r--drivers/rtc/rtc-v3020.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c9
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_eckd.c13
-rw-r--r--drivers/s390/char/monreader.c1
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_async.c51
-rw-r--r--drivers/s390/char/sclp_quiesce.c48
-rw-r--r--drivers/s390/char/sclp_vt220.c30
-rw-r--r--drivers/s390/char/tape_block.c3
-rw-r--r--drivers/s390/cio/blacklist.c13
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c35
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c13
-rw-r--r--drivers/s390/net/smsgiucv.c7
-rw-r--r--drivers/s390/scsi/zfcp_aux.c44
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c40
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c17
-rw-r--r--drivers/s390/scsi/zfcp_erp.c22
-rw-r--r--drivers/s390/scsi/zfcp_ext.h3
-rw-r--r--drivers/s390/scsi/zfcp_fc.c11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c35
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c1
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/be2iscsi/Kconfig8
-rw-r--r--drivers/scsi/be2iscsi/Makefile8
-rw-r--r--drivers/scsi/be2iscsi/be.h183
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c523
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h877
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c638
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h75
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3390
-rw-r--r--drivers/scsi/be2iscsi/be_main.h837
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c321
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h249
-rw-r--r--drivers/scsi/bfa/Makefile15
-rw-r--r--drivers/scsi/bfa/bfa_callback_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h205
-rw-r--r--drivers/scsi/bfa/bfa_cee.c492
-rw-r--r--drivers/scsi/bfa/bfa_core.c402
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c58
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c175
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h188
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1671
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c182
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c940
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c68
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c105
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c782
-rw-r--r--drivers/scsi/bfa/bfa_fcxp_priv.h138
-rw-r--r--drivers/scsi/bfa/bfa_fwimg_priv.h31
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c142
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c162
-rw-r--r--drivers/scsi/bfa/bfa_intr.c218
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h115
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2382
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h259
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c872
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h168
-rw-r--r--drivers/scsi/bfa/bfa_iocfc_q.c44
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c1311
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c1088
-rw-r--r--drivers/scsi/bfa/bfa_log.c346
-rw-r--r--drivers/scsi/bfa/bfa_log_module.c451
-rw-r--r--drivers/scsi/bfa/bfa_lps.c782
-rw-r--r--drivers/scsi/bfa/bfa_lps_priv.h38
-rw-r--r--drivers/scsi/bfa/bfa_module.c90
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h43
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h222
-rw-r--r--drivers/scsi/bfa/bfa_port.c460
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h90
-rw-r--r--drivers/scsi/bfa/bfa_priv.h113
-rw-r--r--drivers/scsi/bfa/bfa_rport.c911
-rw-r--r--drivers/scsi/bfa/bfa_rport_priv.h45
-rw-r--r--drivers/scsi/bfa/bfa_sgpg.c231
-rw-r--r--drivers/scsi/bfa/bfa_sgpg_priv.h79
-rw-r--r--drivers/scsi/bfa/bfa_sm.c38
-rw-r--r--drivers/scsi/bfa/bfa_timer.c90
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h66
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c689
-rw-r--r--drivers/scsi/bfa/bfa_uf.c345
-rw-r--r--drivers/scsi/bfa/bfa_uf_priv.h47
-rw-r--r--drivers/scsi/bfa/bfad.c1182
-rw-r--r--drivers/scsi/bfa/bfad_attr.c649
-rw-r--r--drivers/scsi/bfa/bfad_attr.h65
-rw-r--r--drivers/scsi/bfa/bfad_drv.h295
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c97
-rw-r--r--drivers/scsi/bfa/bfad_im.c1230
-rw-r--r--drivers/scsi/bfa/bfad_im.h150
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h46
-rw-r--r--drivers/scsi/bfa/bfad_intr.c214
-rw-r--r--drivers/scsi/bfa/bfad_ipfc.h42
-rw-r--r--drivers/scsi/bfa/bfad_os.c50
-rw-r--r--drivers/scsi/bfa/bfad_tm.h59
-rw-r--r--drivers/scsi/bfa/bfad_trcmod.h52
-rw-r--r--drivers/scsi/bfa/fab.c62
-rw-r--r--drivers/scsi/bfa/fabric.c1278
-rw-r--r--drivers/scsi/bfa/fcbuild.c1449
-rw-r--r--drivers/scsi/bfa/fcbuild.h273
-rw-r--r--drivers/scsi/bfa/fcpim.c844
-rw-r--r--drivers/scsi/bfa/fcptm.c68
-rw-r--r--drivers/scsi/bfa/fcs.h30
-rw-r--r--drivers/scsi/bfa/fcs_auth.h37
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h61
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h44
-rw-r--r--drivers/scsi/bfa/fcs_fcptm.h45
-rw-r--r--drivers/scsi/bfa/fcs_fcxp.h29
-rw-r--r--drivers/scsi/bfa/fcs_lport.h117
-rw-r--r--drivers/scsi/bfa/fcs_ms.h35
-rw-r--r--drivers/scsi/bfa/fcs_port.h32
-rw-r--r--drivers/scsi/bfa/fcs_rport.h61
-rw-r--r--drivers/scsi/bfa/fcs_trcmod.h56
-rw-r--r--drivers/scsi/bfa/fcs_uf.h32
-rw-r--r--drivers/scsi/bfa/fcs_vport.h39
-rw-r--r--drivers/scsi/bfa/fdmi.c1223
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h92
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_adapter.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_audit.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ethport.h35
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ioc.h37
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_itnim.h33
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_lport.h51
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_port.h57
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_rport.h37
-rw-r--r--drivers/scsi/bfa/include/bfa.h177
-rw-r--r--drivers/scsi/bfa/include/bfa_fcpim.h159
-rw-r--r--drivers/scsi/bfa/include/bfa_fcptm.h47
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h324
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h53
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h174
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_boot.h34
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cbreg.h305
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cee.h119
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h611
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fabric.h92
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcpim.h301
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcxp.h71
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h202
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_iocfc.h177
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lport.h89
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h96
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_port.h115
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h184
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_uf.h52
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h36
-rw-r--r--drivers/scsi/bfa/include/cna/cee/bfa_cee.h77
-rw-r--r--drivers/scsi/bfa/include/cna/port/bfa_port.h69
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/ethport_defs.h36
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/phyport_defs.h218
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h60
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_debug.h44
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h184
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_perf.h34
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_plog.h162
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_q.h81
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h69
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h176
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_wc.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_adapter.h82
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h73
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_audit.h38
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h112
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_boot.h71
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h159
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h40
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h98
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h45
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_common.h32
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_team.h72
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h152
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h310
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h70
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_itnim.h126
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_led.h35
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h58
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pci.h41
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pm.h33
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pom.h56
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h245
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h383
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_qos.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_rport.h199
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h255
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tin.h118
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h43
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_types.h30
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_version.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vf.h74
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vport.h91
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb.h33
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h76
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_port.h113
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h80
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h47
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h47
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h73
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h82
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h112
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h131
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h63
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h226
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h104
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h63
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_fcs.h28
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h30
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h44
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_wdrv.h36
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h492
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h1105
-rw-r--r--drivers/scsi/bfa/include/protocol/fc_sp.h224
-rw-r--r--drivers/scsi/bfa/include/protocol/fcp.h186
-rw-r--r--drivers/scsi/bfa/include/protocol/fdmi.h163
-rw-r--r--drivers/scsi/bfa/include/protocol/pcifw.h75
-rw-r--r--drivers/scsi/bfa/include/protocol/scsi.h1648
-rw-r--r--drivers/scsi/bfa/include/protocol/types.h42
-rw-r--r--drivers/scsi/bfa/loop.c422
-rw-r--r--drivers/scsi/bfa/lport_api.c291
-rw-r--r--drivers/scsi/bfa/lport_priv.h82
-rw-r--r--drivers/scsi/bfa/ms.c759
-rw-r--r--drivers/scsi/bfa/n2n.c105
-rw-r--r--drivers/scsi/bfa/ns.c1243
-rw-r--r--drivers/scsi/bfa/plog.c184
-rw-r--r--drivers/scsi/bfa/rport.c2618
-rw-r--r--drivers/scsi/bfa/rport_api.c180
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c375
-rw-r--r--drivers/scsi/bfa/scn.c482
-rw-r--r--drivers/scsi/bfa/vfapi.c292
-rw-r--r--drivers/scsi/bfa/vport.c891
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hptiop.c37
-rw-r--r--drivers/scsi/hptiop.h3
-rw-r--r--drivers/scsi/ipr.c42
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c15
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h103
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h200
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt334
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h18
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h65
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h134
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c446
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h106
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c61
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c568
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c26
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/pmcraid.c68
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c139
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/sd.c140
-rw-r--r--drivers/scsi/sd.h9
-rw-r--r--drivers/scsi/sd_dif.c65
-rw-r--r--drivers/scsi/sg.c55
-rw-r--r--drivers/scsi/sr.c22
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/serial/8250.c9
-rw-r--r--drivers/serial/8250_pci.c86
-rw-r--r--drivers/serial/Kconfig21
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/serial/bcm63xx_uart.c890
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c1
-rw-r--r--drivers/serial/icom.c54
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/pxa.c20
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_core.c7
-rw-r--r--drivers/serial/serial_txx9.c39
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/sfi/sfi_core.c17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c10
-rw-r--r--drivers/spi/pxa2xx_spi.c30
-rw-r--r--drivers/spi/spi_imx.c (renamed from drivers/spi/mxc_spi.c)383
-rw-r--r--drivers/spi/spi_stmp.c2
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/agnx/Kconfig5
-rw-r--r--drivers/staging/agnx/Makefile8
-rw-r--r--drivers/staging/agnx/TODO22
-rw-r--r--drivers/staging/agnx/agnx.h156
-rw-r--r--drivers/staging/agnx/debug.h416
-rw-r--r--drivers/staging/agnx/pci.c635
-rw-r--r--drivers/staging/agnx/phy.c960
-rw-r--r--drivers/staging/agnx/phy.h409
-rw-r--r--drivers/staging/agnx/rf.c893
-rw-r--r--drivers/staging/agnx/sta.c218
-rw-r--r--drivers/staging/agnx/sta.h222
-rw-r--r--drivers/staging/agnx/table.c168
-rw-r--r--drivers/staging/agnx/table.h10
-rw-r--r--drivers/staging/agnx/xmit.c836
-rw-r--r--drivers/staging/agnx/xmit.h250
-rw-r--r--drivers/staging/android/Kconfig1
-rw-r--r--drivers/staging/b3dfg/b3dfg.c1
-rw-r--r--drivers/staging/comedi/Kconfig2
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c8
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c18
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c22
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/s526.c109
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c1
-rw-r--r--drivers/staging/cowloop/Kconfig16
-rw-r--r--drivers/staging/cowloop/Makefile1
-rw-r--r--drivers/staging/cowloop/TODO11
-rw-r--r--drivers/staging/cowloop/cowloop.c2842
-rw-r--r--drivers/staging/cowloop/cowloop.h66
-rw-r--r--drivers/staging/dst/dcore.c2
-rw-r--r--drivers/staging/et131x/et1310_address_map.h2
-rw-r--r--drivers/staging/et131x/et1310_rx.c20
-rw-r--r--drivers/staging/hv/ChannelMgmt.h3
-rw-r--r--drivers/staging/hv/NetVsc.c10
-rw-r--r--drivers/staging/hv/TODO6
-rw-r--r--drivers/staging/hv/osd.c1
-rw-r--r--drivers/staging/hv/osd.h1
-rw-r--r--drivers/staging/hv/vmbus_drv.c28
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/industrialio-core.c2
-rw-r--r--drivers/staging/iio/light/tsl2561.c4
-rw-r--r--drivers/staging/otus/Kconfig2
-rw-r--r--drivers/staging/p9auth/p9auth.c14
-rw-r--r--drivers/staging/panel/panel.c13
-rw-r--r--drivers/staging/poch/poch.c1
-rw-r--r--drivers/staging/rt2860/Kconfig2
-rw-r--r--drivers/staging/rt2860/common/cmm_data_2860.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_info.c1
-rw-r--r--drivers/staging/rt2860/rt_linux.c1
-rw-r--r--drivers/staging/rt2870/Kconfig2
-rw-r--r--drivers/staging/rt3090/Kconfig2
-rw-r--r--drivers/staging/rt3090/common/cmm_info.c1
-rw-r--r--drivers/staging/rt3090/rt_linux.c1
-rw-r--r--drivers/staging/rtl8187se/Kconfig2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c12
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c17
-rw-r--r--drivers/staging/rtl8192e/Kconfig2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c8
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c8
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c19
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c21
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c1
-rw-r--r--drivers/staging/rtl8192su/Kconfig2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c15
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c17
-rw-r--r--drivers/staging/sep/sep_driver.c1
-rw-r--r--drivers/staging/stlc45xx/Kconfig8
-rw-r--r--drivers/staging/stlc45xx/Makefile1
-rw-r--r--drivers/staging/stlc45xx/stlc45xx.c2594
-rw-r--r--drivers/staging/stlc45xx/stlc45xx.h283
-rw-r--r--drivers/staging/stlc45xx/stlc45xx_lmac.h434
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c1
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c1
-rw-r--r--drivers/staging/vt6656/main_usb.c1
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/winbond/wbusb.c44
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--drivers/uio/uio.c3
-rw-r--r--drivers/uio/uio_pdrv_genirq.c1
-rw-r--r--drivers/usb/class/cdc-acm.c18
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/ether.c9
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/imx_udc.c8
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h105
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/host/isp1362-hcd.c18
-rw-r--r--drivers/usb/host/isp1362.h12
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-pci.c20
-rw-r--r--drivers/usb/host/ohci-pxa27x.c30
-rw-r--r--drivers/usb/host/ohci-q.c18
-rw-r--r--drivers/usb/host/ohci.h9
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c23
-rw-r--r--drivers/usb/host/whci/asl.c23
-rw-r--r--drivers/usb/host/whci/debug.c6
-rw-r--r--drivers/usb/host/whci/pzl.c24
-rw-r--r--drivers/usb/host/xhci-hcd.c29
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c7
-rw-r--r--drivers/usb/misc/rio500.c3
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/usb/misc/usblcd.c3
-rw-r--r--drivers/usb/mon/mon_bin.c13
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.h7
-rw-r--r--drivers/usb/musb/musb_regs.h9
-rw-r--r--drivers/usb/serial/aircable.c10
-rw-r--r--drivers/usb/serial/cp210x.c22
-rw-r--r--drivers/usb/serial/cypress_m8.c12
-rw-r--r--drivers/usb/serial/digi_acceleport.c8
-rw-r--r--drivers/usb/serial/empeg.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c422
-rw-r--r--drivers/usb/serial/garmin_gps.c12
-rw-r--r--drivers/usb/serial/generic.c2
-rw-r--r--drivers/usb/serial/ipaq.c9
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c2
-rw-r--r--drivers/usb/serial/mct_u232.c14
-rw-r--r--drivers/usb/serial/opticon.c13
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/oti6858.c6
-rw-r--r--drivers/usb/serial/pl2303.c8
-rw-r--r--drivers/usb/serial/sierra.c40
-rw-r--r--drivers/usb/serial/symbolserial.c22
-rw-r--r--drivers/usb/serial/usb-serial.c24
-rw-r--r--drivers/usb/serial/visor.c28
-rw-r--r--drivers/usb/serial/whiteheat.c10
-rw-r--r--drivers/usb/storage/transport.c46
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/wusbcore/security.c41
-rw-r--r--drivers/uwb/uwb-debug.c6
-rw-r--r--drivers/uwb/whc-rc.c1
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/atafb.c6
-rw-r--r--drivers/video/atmel_lcdfb.c11
-rw-r--r--drivers/video/backlight/Kconfig33
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp5520_bl.c377
-rw-r--r--drivers/video/backlight/adx_bl.c178
-rw-r--r--drivers/video/backlight/backlight.c42
-rw-r--r--drivers/video/backlight/corgi_lcd.c5
-rw-r--r--drivers/video/backlight/da903x_bl.c20
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c242
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c36
-rw-r--r--drivers/video/backlight/wm831x_bl.c250
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/da8xx-fb.c3
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/video/msm/mddi.c3
-rw-r--r--drivers/video/msm/mddi_client_nt35399.c1
-rw-r--r--drivers/video/msm/mddi_client_toshiba.c1
-rw-r--r--drivers/video/msm/mdp.c18
-rw-r--r--drivers/video/msm/mdp_ppp.c20
-rw-r--r--drivers/video/omap/blizzard.c10
-rw-r--r--drivers/video/omap/dispc.c2
-rw-r--r--drivers/video/omap/omapfb_main.c22
-rw-r--r--drivers/video/pxafb.c32
-rw-r--r--drivers/video/savage/savagefb_driver.c20
-rw-r--r--drivers/video/uvesafb.c28
-rw-r--r--drivers/virtio/virtio_balloon.c3
-rw-r--r--drivers/virtio/virtio_pci.c27
-rw-r--r--drivers/virtio/virtio_ring.c3
-rw-r--r--drivers/w1/masters/ds2482.c35
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c354
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/xen/xenfs/xenbus.c1
1060 files changed, 77034 insertions, 19911 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index dd8729d674e5..93d2c7971df6 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU
211 select ACPI_CONTAINER 211 select ACPI_CONTAINER
212 default y 212 default y
213 213
214config ACPI_PROCESSOR_AGGREGATOR
215 tristate "Processor Aggregator"
216 depends on ACPI_PROCESSOR
217 depends on EXPERIMENTAL
218 depends on X86
219 help
220 ACPI 4.0 defines processor Aggregator, which enables OS to perform
221 specific processor configuration and control that applies to all
222 processors in the platform. Currently only logical processor idling
223 is defined, which is to reduce power consumption. This driver
224 supports the new device.
225
214config ACPI_THERMAL 226config ACPI_THERMAL
215 tristate "Thermal Zone" 227 tristate "Thermal Zone"
216 depends on ACPI_PROCESSOR 228 depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82cd49dc603b..7702118509a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62processor-y := processor_core.o processor_throttling.o 62processor-y := processor_core.o processor_throttling.o
63processor-y += processor_idle.o processor_thermal.o 63processor-y += processor_idle.o processor_thermal.o
64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
65
66obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 98b9690b0159..b6ed60b57b0d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -245,6 +245,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
245 acpi_bus_generate_netlink_event(device->pnp.device_class, 245 acpi_bus_generate_netlink_event(device->pnp.device_class,
246 dev_name(&device->dev), event, 246 dev_name(&device->dev), event,
247 (u32) ac->state); 247 (u32) ac->state);
248 acpi_notifier_call_chain(device, event, (u32) ac->state);
248#ifdef CONFIG_ACPI_SYSFS_POWER 249#ifdef CONFIG_ACPI_SYSFS_POWER
249 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 250 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
250#endif 251#endif
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
new file mode 100644
index 000000000000..0d2cdb86158b
--- /dev/null
+++ b/drivers/acpi/acpi_pad.c
@@ -0,0 +1,514 @@
1/*
2 * acpi_pad.c ACPI Processor Aggregator Driver
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/cpu.h>
29#include <linux/clockchips.h>
30#include <acpi/acpi_bus.h>
31#include <acpi/acpi_drivers.h>
32
33#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
34#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
35#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
36static DEFINE_MUTEX(isolated_cpus_lock);
37
38#define MWAIT_SUBSTATE_MASK (0xf)
39#define MWAIT_CSTATE_MASK (0xf)
40#define MWAIT_SUBSTATE_SIZE (4)
41#define CPUID_MWAIT_LEAF (5)
42#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
43#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
44static unsigned long power_saving_mwait_eax;
45static void power_saving_mwait_init(void)
46{
47 unsigned int eax, ebx, ecx, edx;
48 unsigned int highest_cstate = 0;
49 unsigned int highest_subcstate = 0;
50 int i;
51
52 if (!boot_cpu_has(X86_FEATURE_MWAIT))
53 return;
54 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
55 return;
56
57 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
58
59 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
60 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
61 return;
62
63 edx >>= MWAIT_SUBSTATE_SIZE;
64 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
65 if (edx & MWAIT_SUBSTATE_MASK) {
66 highest_cstate = i;
67 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
68 }
69 }
70 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
71 (highest_subcstate - 1);
72
73 for_each_online_cpu(i)
74 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
75
76#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
77 switch (boot_cpu_data.x86_vendor) {
78 case X86_VENDOR_AMD:
79 case X86_VENDOR_INTEL:
80 /*
81 * AMD Fam10h TSC will tick in all
82 * C/P/S0/S1 states when this bit is set.
83 */
84 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
85 return;
86
87 /*FALL THROUGH*/
88 default:
89 /* TSC could halt in idle, so notify users */
90 mark_tsc_unstable("TSC halts in idle");
91 }
92#endif
93}
94
95static unsigned long cpu_weight[NR_CPUS];
96static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
97static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
98static void round_robin_cpu(unsigned int tsk_index)
99{
100 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
101 cpumask_var_t tmp;
102 int cpu;
103 unsigned long min_weight = -1, preferred_cpu;
104
105 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
106 return;
107
108 mutex_lock(&isolated_cpus_lock);
109 cpumask_clear(tmp);
110 for_each_cpu(cpu, pad_busy_cpus)
111 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
112 cpumask_andnot(tmp, cpu_online_mask, tmp);
113 /* avoid HT sibilings if possible */
114 if (cpumask_empty(tmp))
115 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
116 if (cpumask_empty(tmp)) {
117 mutex_unlock(&isolated_cpus_lock);
118 return;
119 }
120 for_each_cpu(cpu, tmp) {
121 if (cpu_weight[cpu] < min_weight) {
122 min_weight = cpu_weight[cpu];
123 preferred_cpu = cpu;
124 }
125 }
126
127 if (tsk_in_cpu[tsk_index] != -1)
128 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
129 tsk_in_cpu[tsk_index] = preferred_cpu;
130 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
131 cpu_weight[preferred_cpu]++;
132 mutex_unlock(&isolated_cpus_lock);
133
134 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
135}
136
137static void exit_round_robin(unsigned int tsk_index)
138{
139 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
140 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
141 tsk_in_cpu[tsk_index] = -1;
142}
143
144static unsigned int idle_pct = 5; /* percentage */
145static unsigned int round_robin_time = 10; /* second */
146static int power_saving_thread(void *data)
147{
148 struct sched_param param = {.sched_priority = 1};
149 int do_sleep;
150 unsigned int tsk_index = (unsigned long)data;
151 u64 last_jiffies = 0;
152
153 sched_setscheduler(current, SCHED_RR, &param);
154
155 while (!kthread_should_stop()) {
156 int cpu;
157 u64 expire_time;
158
159 try_to_freeze();
160
161 /* round robin to cpus */
162 if (last_jiffies + round_robin_time * HZ < jiffies) {
163 last_jiffies = jiffies;
164 round_robin_cpu(tsk_index);
165 }
166
167 do_sleep = 0;
168
169 current_thread_info()->status &= ~TS_POLLING;
170 /*
171 * TS_POLLING-cleared state must be visible before we test
172 * NEED_RESCHED:
173 */
174 smp_mb();
175
176 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
177
178 while (!need_resched()) {
179 local_irq_disable();
180 cpu = smp_processor_id();
181 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
182 &cpu);
183 stop_critical_timings();
184
185 __monitor((void *)&current_thread_info()->flags, 0, 0);
186 smp_mb();
187 if (!need_resched())
188 __mwait(power_saving_mwait_eax, 1);
189
190 start_critical_timings();
191 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
192 &cpu);
193 local_irq_enable();
194
195 if (jiffies > expire_time) {
196 do_sleep = 1;
197 break;
198 }
199 }
200
201 current_thread_info()->status |= TS_POLLING;
202
203 /*
204 * current sched_rt has threshold for rt task running time.
205 * When a rt task uses 95% CPU time, the rt thread will be
206 * scheduled out for 5% CPU time to not starve other tasks. But
207 * the mechanism only works when all CPUs have RT task running,
208 * as if one CPU hasn't RT task, RT task from other CPUs will
209 * borrow CPU time from this CPU and cause RT task use > 95%
210 * CPU time. To make 'avoid staration' work, takes a nap here.
211 */
212 if (do_sleep)
213 schedule_timeout_killable(HZ * idle_pct / 100);
214 }
215
216 exit_round_robin(tsk_index);
217 return 0;
218}
219
220static struct task_struct *ps_tsks[NR_CPUS];
221static unsigned int ps_tsk_num;
222static int create_power_saving_task(void)
223{
224 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
225 (void *)(unsigned long)ps_tsk_num,
226 "power_saving/%d", ps_tsk_num);
227 if (ps_tsks[ps_tsk_num]) {
228 ps_tsk_num++;
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static void destroy_power_saving_task(void)
235{
236 if (ps_tsk_num > 0) {
237 ps_tsk_num--;
238 kthread_stop(ps_tsks[ps_tsk_num]);
239 }
240}
241
242static void set_power_saving_task_num(unsigned int num)
243{
244 if (num > ps_tsk_num) {
245 while (ps_tsk_num < num) {
246 if (create_power_saving_task())
247 return;
248 }
249 } else if (num < ps_tsk_num) {
250 while (ps_tsk_num > num)
251 destroy_power_saving_task();
252 }
253}
254
255static int acpi_pad_idle_cpus(unsigned int num_cpus)
256{
257 get_online_cpus();
258
259 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
260 set_power_saving_task_num(num_cpus);
261
262 put_online_cpus();
263 return 0;
264}
265
266static uint32_t acpi_pad_idle_cpus_num(void)
267{
268 return ps_tsk_num;
269}
270
271static ssize_t acpi_pad_rrtime_store(struct device *dev,
272 struct device_attribute *attr, const char *buf, size_t count)
273{
274 unsigned long num;
275 if (strict_strtoul(buf, 0, &num))
276 return -EINVAL;
277 if (num < 1 || num >= 100)
278 return -EINVAL;
279 mutex_lock(&isolated_cpus_lock);
280 round_robin_time = num;
281 mutex_unlock(&isolated_cpus_lock);
282 return count;
283}
284
285static ssize_t acpi_pad_rrtime_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
289}
290static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
291 acpi_pad_rrtime_show,
292 acpi_pad_rrtime_store);
293
294static ssize_t acpi_pad_idlepct_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t count)
296{
297 unsigned long num;
298 if (strict_strtoul(buf, 0, &num))
299 return -EINVAL;
300 if (num < 1 || num >= 100)
301 return -EINVAL;
302 mutex_lock(&isolated_cpus_lock);
303 idle_pct = num;
304 mutex_unlock(&isolated_cpus_lock);
305 return count;
306}
307
308static ssize_t acpi_pad_idlepct_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310{
311 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
312}
313static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
314 acpi_pad_idlepct_show,
315 acpi_pad_idlepct_store);
316
317static ssize_t acpi_pad_idlecpus_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t count)
319{
320 unsigned long num;
321 if (strict_strtoul(buf, 0, &num))
322 return -EINVAL;
323 mutex_lock(&isolated_cpus_lock);
324 acpi_pad_idle_cpus(num);
325 mutex_unlock(&isolated_cpus_lock);
326 return count;
327}
328
329static ssize_t acpi_pad_idlecpus_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
331{
332 return cpumask_scnprintf(buf, PAGE_SIZE,
333 to_cpumask(pad_busy_cpus_bits));
334}
335static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
336 acpi_pad_idlecpus_show,
337 acpi_pad_idlecpus_store);
338
339static int acpi_pad_add_sysfs(struct acpi_device *device)
340{
341 int result;
342
343 result = device_create_file(&device->dev, &dev_attr_idlecpus);
344 if (result)
345 return -ENODEV;
346 result = device_create_file(&device->dev, &dev_attr_idlepct);
347 if (result) {
348 device_remove_file(&device->dev, &dev_attr_idlecpus);
349 return -ENODEV;
350 }
351 result = device_create_file(&device->dev, &dev_attr_rrtime);
352 if (result) {
353 device_remove_file(&device->dev, &dev_attr_idlecpus);
354 device_remove_file(&device->dev, &dev_attr_idlepct);
355 return -ENODEV;
356 }
357 return 0;
358}
359
360static void acpi_pad_remove_sysfs(struct acpi_device *device)
361{
362 device_remove_file(&device->dev, &dev_attr_idlecpus);
363 device_remove_file(&device->dev, &dev_attr_idlepct);
364 device_remove_file(&device->dev, &dev_attr_rrtime);
365}
366
367/* Query firmware how many CPUs should be idle */
368static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
369{
370 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
371 acpi_status status;
372 union acpi_object *package;
373 int rev, num, ret = -EINVAL;
374
375 status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
376 if (ACPI_FAILURE(status))
377 return -EINVAL;
378 package = buffer.pointer;
379 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
380 goto out;
381 rev = package->package.elements[0].integer.value;
382 num = package->package.elements[1].integer.value;
383 if (rev != 1)
384 goto out;
385 *num_cpus = num;
386 ret = 0;
387out:
388 kfree(buffer.pointer);
389 return ret;
390}
391
392/* Notify firmware how many CPUs are idle */
393static void acpi_pad_ost(acpi_handle handle, int stat,
394 uint32_t idle_cpus)
395{
396 union acpi_object params[3] = {
397 {.type = ACPI_TYPE_INTEGER,},
398 {.type = ACPI_TYPE_INTEGER,},
399 {.type = ACPI_TYPE_BUFFER,},
400 };
401 struct acpi_object_list arg_list = {3, params};
402
403 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
404 params[1].integer.value = stat;
405 params[2].buffer.length = 4;
406 params[2].buffer.pointer = (void *)&idle_cpus;
407 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
408}
409
410static void acpi_pad_handle_notify(acpi_handle handle)
411{
412 int num_cpus, ret;
413 uint32_t idle_cpus;
414
415 mutex_lock(&isolated_cpus_lock);
416 if (acpi_pad_pur(handle, &num_cpus)) {
417 mutex_unlock(&isolated_cpus_lock);
418 return;
419 }
420 ret = acpi_pad_idle_cpus(num_cpus);
421 idle_cpus = acpi_pad_idle_cpus_num();
422 if (!ret)
423 acpi_pad_ost(handle, 0, idle_cpus);
424 else
425 acpi_pad_ost(handle, 1, 0);
426 mutex_unlock(&isolated_cpus_lock);
427}
428
429static void acpi_pad_notify(acpi_handle handle, u32 event,
430 void *data)
431{
432 struct acpi_device *device = data;
433
434 switch (event) {
435 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
436 acpi_pad_handle_notify(handle);
437 acpi_bus_generate_proc_event(device, event, 0);
438 acpi_bus_generate_netlink_event(device->pnp.device_class,
439 dev_name(&device->dev), event, 0);
440 break;
441 default:
442 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
443 break;
444 }
445}
446
447static int acpi_pad_add(struct acpi_device *device)
448{
449 acpi_status status;
450
451 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
452 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
453
454 if (acpi_pad_add_sysfs(device))
455 return -ENODEV;
456
457 status = acpi_install_notify_handler(device->handle,
458 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
459 if (ACPI_FAILURE(status)) {
460 acpi_pad_remove_sysfs(device);
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int acpi_pad_remove(struct acpi_device *device,
468 int type)
469{
470 mutex_lock(&isolated_cpus_lock);
471 acpi_pad_idle_cpus(0);
472 mutex_unlock(&isolated_cpus_lock);
473
474 acpi_remove_notify_handler(device->handle,
475 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
476 acpi_pad_remove_sysfs(device);
477 return 0;
478}
479
480static const struct acpi_device_id pad_device_ids[] = {
481 {"ACPI000C", 0},
482 {"", 0},
483};
484MODULE_DEVICE_TABLE(acpi, pad_device_ids);
485
486static struct acpi_driver acpi_pad_driver = {
487 .name = "processor_aggregator",
488 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
489 .ids = pad_device_ids,
490 .ops = {
491 .add = acpi_pad_add,
492 .remove = acpi_pad_remove,
493 },
494};
495
496static int __init acpi_pad_init(void)
497{
498 power_saving_mwait_init();
499 if (power_saving_mwait_eax == 0)
500 return -EINVAL;
501
502 return acpi_bus_register_driver(&acpi_pad_driver);
503}
504
505static void __exit acpi_pad_exit(void)
506{
507 acpi_bus_unregister_driver(&acpi_pad_driver);
508}
509
510module_init(acpi_pad_init);
511module_exit(acpi_pad_exit);
512MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
513MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
514MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index 8e679ef5b231..a4471e3d3853 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -103,9 +103,9 @@
103 103
104#define ACPI_MAX_REFERENCE_COUNT 0x1000 104#define ACPI_MAX_REFERENCE_COUNT 0x1000
105 105
106/* Size of cached memory mapping for system memory operation region */ 106/* Default page size for use in mapping memory for operation regions */
107 107
108#define ACPI_SYSMEM_REGION_WINDOW_SIZE 4096 108#define ACPI_DEFAULT_PAGE_SIZE 4096 /* Must be power of 2 */
109 109
110/* owner_id tracking. 8 entries allows for 255 owner_ids */ 110/* owner_id tracking. 8 entries allows for 255 owner_ids */
111 111
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 3a54b737d2da..2bd83ac57c3a 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -77,7 +77,8 @@ acpi_ex_system_memory_space_handler(u32 function,
77 void *logical_addr_ptr = NULL; 77 void *logical_addr_ptr = NULL;
78 struct acpi_mem_space_context *mem_info = region_context; 78 struct acpi_mem_space_context *mem_info = region_context;
79 u32 length; 79 u32 length;
80 acpi_size window_size; 80 acpi_size map_length;
81 acpi_size page_boundary_map_length;
81#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED 82#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
82 u32 remainder; 83 u32 remainder;
83#endif 84#endif
@@ -144,25 +145,39 @@ acpi_ex_system_memory_space_handler(u32 function,
144 } 145 }
145 146
146 /* 147 /*
147 * Don't attempt to map memory beyond the end of the region, and 148 * Attempt to map from the requested address to the end of the region.
148 * constrain the maximum mapping size to something reasonable. 149 * However, we will never map more than one page, nor will we cross
150 * a page boundary.
149 */ 151 */
150 window_size = (acpi_size) 152 map_length = (acpi_size)
151 ((mem_info->address + mem_info->length) - address); 153 ((mem_info->address + mem_info->length) - address);
152 154
153 if (window_size > ACPI_SYSMEM_REGION_WINDOW_SIZE) { 155 /*
154 window_size = ACPI_SYSMEM_REGION_WINDOW_SIZE; 156 * If mapping the entire remaining portion of the region will cross
157 * a page boundary, just map up to the page boundary, do not cross.
158 * On some systems, crossing a page boundary while mapping regions
159 * can cause warnings if the pages have different attributes
160 * due to resource management
161 */
162 page_boundary_map_length =
163 ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
164
165 if (!page_boundary_map_length) {
166 page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
167 }
168
169 if (map_length > page_boundary_map_length) {
170 map_length = page_boundary_map_length;
155 } 171 }
156 172
157 /* Create a new mapping starting at the address given */ 173 /* Create a new mapping starting at the address given */
158 174
159 mem_info->mapped_logical_address = 175 mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
160 acpi_os_map_memory((acpi_physical_address) address, window_size);
161 if (!mem_info->mapped_logical_address) { 176 if (!mem_info->mapped_logical_address) {
162 ACPI_ERROR((AE_INFO, 177 ACPI_ERROR((AE_INFO,
163 "Could not map memory at %8.8X%8.8X, size %X", 178 "Could not map memory at %8.8X%8.8X, size %X",
164 ACPI_FORMAT_NATIVE_UINT(address), 179 ACPI_FORMAT_NATIVE_UINT(address),
165 (u32) window_size)); 180 (u32) map_length));
166 mem_info->mapped_length = 0; 181 mem_info->mapped_length = 0;
167 return_ACPI_STATUS(AE_NO_MEMORY); 182 return_ACPI_STATUS(AE_NO_MEMORY);
168 } 183 }
@@ -170,7 +185,7 @@ acpi_ex_system_memory_space_handler(u32 function,
170 /* Save the physical address and mapping size */ 185 /* Save the physical address and mapping size */
171 186
172 mem_info->mapped_physical_address = address; 187 mem_info->mapped_physical_address = address;
173 mem_info->mapped_length = window_size; 188 mem_info->mapped_length = map_length;
174 } 189 }
175 190
176 /* 191 /*
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 135fbfe1825c..741191524353 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -94,36 +94,33 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
94 94
95EXPORT_SYMBOL(acpi_bus_get_device); 95EXPORT_SYMBOL(acpi_bus_get_device);
96 96
97int acpi_bus_get_status(struct acpi_device *device) 97acpi_status acpi_bus_get_status_handle(acpi_handle handle,
98 unsigned long long *sta)
98{ 99{
99 acpi_status status = AE_OK; 100 acpi_status status;
100 unsigned long long sta = 0;
101
102 101
103 if (!device) 102 status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
104 return -EINVAL; 103 if (ACPI_SUCCESS(status))
104 return AE_OK;
105 105
106 /* 106 if (status == AE_NOT_FOUND) {
107 * Evaluate _STA if present. 107 *sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
108 */ 108 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
109 if (device->flags.dynamic_status) { 109 return AE_OK;
110 status =
111 acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
112 if (ACPI_FAILURE(status))
113 return -ENODEV;
114 STRUCT_TO_INT(device->status) = (int)sta;
115 } 110 }
111 return status;
112}
116 113
117 /* 114int acpi_bus_get_status(struct acpi_device *device)
118 * According to ACPI spec some device can be present and functional 115{
119 * even if the parent is not present but functional. 116 acpi_status status;
120 * In such conditions the child device should not inherit the status 117 unsigned long long sta;
121 * from the parent. 118
122 */ 119 status = acpi_bus_get_status_handle(device->handle, &sta);
123 else 120 if (ACPI_FAILURE(status))
124 STRUCT_TO_INT(device->status) = 121 return -ENODEV;
125 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | 122
126 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING; 123 STRUCT_TO_INT(device->status) = (int) sta;
127 124
128 if (device->status.functional && !device->status.present) { 125 if (device->status.functional && !device->status.present) {
129 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: " 126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
@@ -135,10 +132,8 @@ int acpi_bus_get_status(struct acpi_device *device)
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", 132 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
136 device->pnp.bus_id, 133 device->pnp.bus_id,
137 (u32) STRUCT_TO_INT(device->status))); 134 (u32) STRUCT_TO_INT(device->status)));
138
139 return 0; 135 return 0;
140} 136}
141
142EXPORT_SYMBOL(acpi_bus_get_status); 137EXPORT_SYMBOL(acpi_bus_get_status);
143 138
144void acpi_bus_private_data_handler(acpi_handle handle, 139void acpi_bus_private_data_handler(acpi_handle handle,
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 9335b87c5174..0c9c6a9a002c 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -251,6 +251,9 @@ int acpi_lid_open(void)
251 acpi_status status; 251 acpi_status status;
252 unsigned long long state; 252 unsigned long long state;
253 253
254 if (!lid_device)
255 return -ENODEV;
256
254 status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, 257 status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
255 &state); 258 &state);
256 if (ACPI_FAILURE(status)) 259 if (ACPI_FAILURE(status))
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3a2cfefc71ab..7338b6a3e049 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -67,7 +67,7 @@ struct dock_station {
67 struct list_head dependent_devices; 67 struct list_head dependent_devices;
68 struct list_head hotplug_devices; 68 struct list_head hotplug_devices;
69 69
70 struct list_head sibiling; 70 struct list_head sibling;
71 struct platform_device *dock_device; 71 struct platform_device *dock_device;
72}; 72};
73static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
@@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle)
275 275
276 if (is_dock(handle)) 276 if (is_dock(handle))
277 return 1; 277 return 1;
278 list_for_each_entry(dock_station, &dock_stations, sibiling) { 278 list_for_each_entry(dock_station, &dock_stations, sibling) {
279 if (find_dock_dependent_device(dock_station, handle)) 279 if (find_dock_dependent_device(dock_station, handle))
280 return 1; 280 return 1;
281 } 281 }
@@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
619 * make sure this handle is for a device dependent on the dock, 619 * make sure this handle is for a device dependent on the dock,
620 * this would include the dock station itself 620 * this would include the dock station itself
621 */ 621 */
622 list_for_each_entry(dock_station, &dock_stations, sibiling) { 622 list_for_each_entry(dock_station, &dock_stations, sibling) {
623 /* 623 /*
624 * An ATA bay can be in a dock and itself can be ejected 624 * An ATA bay can be in a dock and itself can be ejected
625 * seperately, so there are two 'dock stations' which need the 625 * seperately, so there are two 'dock stations' which need the
@@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
651 if (!dock_station_count) 651 if (!dock_station_count)
652 return; 652 return;
653 653
654 list_for_each_entry(dock_station, &dock_stations, sibiling) { 654 list_for_each_entry(dock_station, &dock_stations, sibling) {
655 dd = find_dock_dependent_device(dock_station, handle); 655 dd = find_dock_dependent_device(dock_station, handle);
656 if (dd) 656 if (dd)
657 dock_del_hotplug_device(dock_station, dd); 657 dock_del_hotplug_device(dock_station, dd);
@@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
788 && event != ACPI_NOTIFY_EJECT_REQUEST) 788 && event != ACPI_NOTIFY_EJECT_REQUEST)
789 return 0; 789 return 0;
790 list_for_each_entry(dock_station, &dock_stations, sibiling) { 790 list_for_each_entry(dock_station, &dock_stations, sibling) {
791 if (dock_station->handle == handle) { 791 if (dock_station->handle == handle) {
792 struct dock_data *dock_data; 792 struct dock_data *dock_data;
793 793
@@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle)
958 dock_station->last_dock_time = jiffies - HZ; 958 dock_station->last_dock_time = jiffies - HZ;
959 INIT_LIST_HEAD(&dock_station->dependent_devices); 959 INIT_LIST_HEAD(&dock_station->dependent_devices);
960 INIT_LIST_HEAD(&dock_station->hotplug_devices); 960 INIT_LIST_HEAD(&dock_station->hotplug_devices);
961 INIT_LIST_HEAD(&dock_station->sibiling); 961 INIT_LIST_HEAD(&dock_station->sibling);
962 spin_lock_init(&dock_station->dd_lock); 962 spin_lock_init(&dock_station->dd_lock);
963 mutex_init(&dock_station->hp_lock); 963 mutex_init(&dock_station->hp_lock);
964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
@@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle)
1044 add_dock_dependent_device(dock_station, dd); 1044 add_dock_dependent_device(dock_station, dd);
1045 1045
1046 dock_station_count++; 1046 dock_station_count++;
1047 list_add(&dock_station->sibiling, &dock_stations); 1047 list_add(&dock_station->sibling, &dock_stations);
1048 return 0; 1048 return 0;
1049 1049
1050dock_add_err_unregister: 1050dock_add_err_unregister:
@@ -1149,7 +1149,7 @@ static void __exit dock_exit(void)
1149 struct dock_station *tmp; 1149 struct dock_station *tmp;
1150 1150
1151 unregister_acpi_bus_notifier(&dock_acpi_notifier); 1151 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) 1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1153 dock_remove(dock_station); 1153 dock_remove(dock_station);
1154} 1154}
1155 1155
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f70796081c4c..baef28c1e630 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -119,6 +119,8 @@ static struct acpi_ec {
119} *boot_ec, *first_ec; 119} *boot_ec, *first_ec;
120 120
121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
122static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
123static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
122 124
123/* -------------------------------------------------------------------------- 125/* --------------------------------------------------------------------------
124 Transaction Management 126 Transaction Management
@@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
232 } 234 }
233 advance_transaction(ec, acpi_ec_read_status(ec)); 235 advance_transaction(ec, acpi_ec_read_status(ec));
234 } while (time_before(jiffies, delay)); 236 } while (time_before(jiffies, delay));
235 if (!ec->curr->irq_count || 237 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
236 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
237 break; 238 break;
238 /* try restart command if we get any false interrupts */
239 pr_debug(PREFIX "controller reset, restart transaction\n"); 239 pr_debug(PREFIX "controller reset, restart transaction\n");
240 spin_lock_irqsave(&ec->curr_lock, flags); 240 spin_lock_irqsave(&ec->curr_lock, flags);
241 start_transaction(ec); 241 start_transaction(ec);
@@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = {
899 {"", 0}, 899 {"", 0},
900}; 900};
901 901
902/* Some BIOS do not survive early DSDT scan, skip it */
903static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
904{
905 EC_FLAGS_SKIP_DSDT_SCAN = 1;
906 return 0;
907}
908
909/* ASUStek often supplies us with broken ECDT, validate it */
910static int ec_validate_ecdt(const struct dmi_system_id *id)
911{
912 EC_FLAGS_VALIDATE_ECDT = 1;
913 return 0;
914}
915
916/* MSI EC needs special treatment, enable it */
917static int ec_flag_msi(const struct dmi_system_id *id)
918{
919 EC_FLAGS_MSI = 1;
920 EC_FLAGS_VALIDATE_ECDT = 1;
921 return 0;
922}
923
924static struct dmi_system_id __initdata ec_dmi_table[] = {
925 {
926 ec_skip_dsdt_scan, "Compal JFL92", {
927 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
928 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
929 {
930 ec_flag_msi, "MSI hardware", {
931 DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
932 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
933 {
934 ec_validate_ecdt, "ASUS hardware", {
935 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
936 {},
937};
938
939
902int __init acpi_ec_ecdt_probe(void) 940int __init acpi_ec_ecdt_probe(void)
903{ 941{
904 acpi_status status; 942 acpi_status status;
@@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void)
911 /* 949 /*
912 * Generate a boot ec context 950 * Generate a boot ec context
913 */ 951 */
914 if (dmi_name_in_vendors("Micro-Star") || 952 dmi_check_system(ec_dmi_table);
915 dmi_name_in_vendors("Notebook")) {
916 pr_info(PREFIX "Enabling special treatment for EC from MSI.\n");
917 EC_FLAGS_MSI = 1;
918 }
919 status = acpi_get_table(ACPI_SIG_ECDT, 1, 953 status = acpi_get_table(ACPI_SIG_ECDT, 1,
920 (struct acpi_table_header **)&ecdt_ptr); 954 (struct acpi_table_header **)&ecdt_ptr);
921 if (ACPI_SUCCESS(status)) { 955 if (ACPI_SUCCESS(status)) {
@@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void)
926 boot_ec->handle = ACPI_ROOT_OBJECT; 960 boot_ec->handle = ACPI_ROOT_OBJECT;
927 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 961 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
928 /* Don't trust ECDT, which comes from ASUSTek */ 962 /* Don't trust ECDT, which comes from ASUSTek */
929 if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) 963 if (!EC_FLAGS_VALIDATE_ECDT)
930 goto install; 964 goto install;
931 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 965 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
932 if (!saved_ec) 966 if (!saved_ec)
@@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void)
934 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); 968 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
935 /* fall through */ 969 /* fall through */
936 } 970 }
971
972 if (EC_FLAGS_SKIP_DSDT_SCAN)
973 return -ENODEV;
974
937 /* This workaround is needed only on some broken machines, 975 /* This workaround is needed only on some broken machines,
938 * which require early EC, but fail to provide ECDT */ 976 * which require early EC, but fail to provide ECDT */
939 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 977 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5633b86e3ed1..7c1c59ea9ec6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1161,7 +1161,13 @@ int acpi_check_resource_conflict(struct resource *res)
1161 res_list_elem->name, 1161 res_list_elem->name,
1162 (long long) res_list_elem->start, 1162 (long long) res_list_elem->start,
1163 (long long) res_list_elem->end); 1163 (long long) res_list_elem->end);
1164 printk(KERN_INFO "ACPI: Device needs an ACPI driver\n"); 1164 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1165 printk(KERN_NOTICE "ACPI: This conflict may"
1166 " cause random problems and system"
1167 " instability\n");
1168 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1169 " for this device, you should use it instead of"
1170 " the native driver\n");
1165 } 1171 }
1166 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1172 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1167 return -EBUSY; 1173 return -EBUSY;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 31122214e0ec..1af808171d46 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -389,6 +389,17 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
389 389
390 pbus = pdev->subordinate; 390 pbus = pdev->subordinate;
391 pci_dev_put(pdev); 391 pci_dev_put(pdev);
392
393 /*
394 * This function may be called for a non-PCI device that has a
395 * PCI parent (eg. a disk under a PCI SATA controller). In that
396 * case pdev->subordinate will be NULL for the parent.
397 */
398 if (!pbus) {
399 dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
400 pdev = NULL;
401 break;
402 }
392 } 403 }
393out: 404out:
394 list_for_each_entry_safe(node, tmp, &device_list, node) 405 list_for_each_entry_safe(node, tmp, &device_list, node)
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index e6bfd77986b8..2ef7030a0c28 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -294,7 +294,11 @@ static int set_acpi_trip(struct acpi_power_meter_resource *resource)
294 return -EINVAL; 294 return -EINVAL;
295 } 295 }
296 296
297 return data; 297 /* _PTP returns 0 on success, nonzero otherwise */
298 if (data)
299 return -EINVAL;
300
301 return 0;
298} 302}
299 303
300static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, 304static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index d0d550d22a6d..d0d25e2e1ced 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -393,11 +393,13 @@ acpi_system_write_wakeup_device(struct file *file,
393 struct list_head *node, *next; 393 struct list_head *node, *next;
394 char strbuf[5]; 394 char strbuf[5];
395 char str[5] = ""; 395 char str[5] = "";
396 int len = count; 396 unsigned int len = count;
397 struct acpi_device *found_dev = NULL; 397 struct acpi_device *found_dev = NULL;
398 398
399 if (len > 4) 399 if (len > 4)
400 len = 4; 400 len = 4;
401 if (len < 0)
402 return -EFAULT;
401 403
402 if (copy_from_user(strbuf, buffer, len)) 404 if (copy_from_user(strbuf, buffer, len))
403 return -EFAULT; 405 return -EFAULT;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c2d4d6e09364..ec742a4e5635 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -770,7 +770,7 @@ static struct notifier_block acpi_cpu_notifier =
770 .notifier_call = acpi_cpu_soft_notify, 770 .notifier_call = acpi_cpu_soft_notify,
771}; 771};
772 772
773static int acpi_processor_add(struct acpi_device *device) 773static int __cpuinit acpi_processor_add(struct acpi_device *device)
774{ 774{
775 struct acpi_processor *pr = NULL; 775 struct acpi_processor *pr = NULL;
776 int result = 0; 776 int result = 0;
@@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device)
863 goto err_remove_sysfs; 863 goto err_remove_sysfs;
864 } 864 }
865 865
866 if (pr->flags.throttling) {
867 printk(KERN_INFO PREFIX "%s [%s] (supports",
868 acpi_device_name(device), acpi_device_bid(device));
869 printk(" %d throttling states", pr->throttling.state_count);
870 printk(")\n");
871 }
872
873 return 0; 866 return 0;
874 867
875err_remove_sysfs: 868err_remove_sysfs:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cc61a6220102..bbd066e7f854 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1166,7 +1166,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1166#ifdef CONFIG_ACPI_PROCFS 1166#ifdef CONFIG_ACPI_PROCFS
1167 struct proc_dir_entry *entry = NULL; 1167 struct proc_dir_entry *entry = NULL;
1168#endif 1168#endif
1169 unsigned int i;
1170 1169
1171 if (boot_option_idle_override) 1170 if (boot_option_idle_override)
1172 return 0; 1171 return 0;
@@ -1214,13 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1214 acpi_processor_setup_cpuidle(pr); 1213 acpi_processor_setup_cpuidle(pr);
1215 if (cpuidle_register_device(&pr->power.dev)) 1214 if (cpuidle_register_device(&pr->power.dev))
1216 return -EIO; 1215 return -EIO;
1217
1218 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1219 for (i = 1; i <= pr->power.count; i++)
1220 if (pr->power.states[i].valid)
1221 printk(" C%d[C%d]", i,
1222 pr->power.states[i].type);
1223 printk(")\n");
1224 } 1216 }
1225#ifdef CONFIG_ACPI_PROCFS 1217#ifdef CONFIG_ACPI_PROCFS
1226 /* 'power' [R] */ 1218 /* 'power' [R] */
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 4c6c14c1e307..1c5d7a8b2fdf 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -1133,15 +1133,15 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1133 int result = 0; 1133 int result = 0;
1134 struct acpi_processor_throttling *pthrottling; 1134 struct acpi_processor_throttling *pthrottling;
1135 1135
1136 if (!pr)
1137 return -EINVAL;
1138
1136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1139 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1137 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1140 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1138 pr->throttling.address, 1141 pr->throttling.address,
1139 pr->throttling.duty_offset, 1142 pr->throttling.duty_offset,
1140 pr->throttling.duty_width)); 1143 pr->throttling.duty_width));
1141 1144
1142 if (!pr)
1143 return -EINVAL;
1144
1145 /* 1145 /*
1146 * Evaluate _PTC, _TSS and _TPC 1146 * Evaluate _PTC, _TSS and _TPC
1147 * They must all be present or none of them can be used. 1147 * They must all be present or none of them can be used.
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 408ebde18986..14a7481c97d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -22,6 +22,8 @@ extern struct acpi_device *acpi_root;
22#define ACPI_BUS_HID "LNXSYBUS" 22#define ACPI_BUS_HID "LNXSYBUS"
23#define ACPI_BUS_DEVICE_NAME "System Bus" 23#define ACPI_BUS_DEVICE_NAME "System Bus"
24 24
25#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
26
25static LIST_HEAD(acpi_device_list); 27static LIST_HEAD(acpi_device_list);
26static LIST_HEAD(acpi_bus_id_list); 28static LIST_HEAD(acpi_bus_id_list);
27DEFINE_MUTEX(acpi_device_lock); 29DEFINE_MUTEX(acpi_device_lock);
@@ -43,40 +45,19 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
43{ 45{
44 int len; 46 int len;
45 int count; 47 int count;
46 48 struct acpi_hardware_id *id;
47 if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
48 return -ENODEV;
49 49
50 len = snprintf(modalias, size, "acpi:"); 50 len = snprintf(modalias, size, "acpi:");
51 size -= len; 51 size -= len;
52 52
53 if (acpi_dev->flags.hardware_id) { 53 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
54 count = snprintf(&modalias[len], size, "%s:", 54 count = snprintf(&modalias[len], size, "%s:", id->id);
55 acpi_dev->pnp.hardware_id);
56 if (count < 0 || count >= size) 55 if (count < 0 || count >= size)
57 return -EINVAL; 56 return -EINVAL;
58 len += count; 57 len += count;
59 size -= count; 58 size -= count;
60 } 59 }
61 60
62 if (acpi_dev->flags.compatible_ids) {
63 struct acpica_device_id_list *cid_list;
64 int i;
65
66 cid_list = acpi_dev->pnp.cid_list;
67 for (i = 0; i < cid_list->count; i++) {
68 count = snprintf(&modalias[len], size, "%s:",
69 cid_list->ids[i].string);
70 if (count < 0 || count >= size) {
71 printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
72 acpi_dev->pnp.device_name, i);
73 break;
74 }
75 len += count;
76 size -= count;
77 }
78 }
79
80 modalias[len] = '\0'; 61 modalias[len] = '\0';
81 return len; 62 return len;
82} 63}
@@ -183,7 +164,7 @@ static ssize_t
183acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { 164acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
184 struct acpi_device *acpi_dev = to_acpi_device(dev); 165 struct acpi_device *acpi_dev = to_acpi_device(dev);
185 166
186 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id); 167 return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
187} 168}
188static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); 169static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
189 170
@@ -219,17 +200,13 @@ static int acpi_device_setup_files(struct acpi_device *dev)
219 goto end; 200 goto end;
220 } 201 }
221 202
222 if (dev->flags.hardware_id) { 203 result = device_create_file(&dev->dev, &dev_attr_hid);
223 result = device_create_file(&dev->dev, &dev_attr_hid); 204 if (result)
224 if (result) 205 goto end;
225 goto end;
226 }
227 206
228 if (dev->flags.hardware_id || dev->flags.compatible_ids) { 207 result = device_create_file(&dev->dev, &dev_attr_modalias);
229 result = device_create_file(&dev->dev, &dev_attr_modalias); 208 if (result)
230 if (result) 209 goto end;
231 goto end;
232 }
233 210
234 /* 211 /*
235 * If device has _EJ0, 'eject' file is created that is used to trigger 212 * If device has _EJ0, 'eject' file is created that is used to trigger
@@ -255,11 +232,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
255 if (ACPI_SUCCESS(status)) 232 if (ACPI_SUCCESS(status))
256 device_remove_file(&dev->dev, &dev_attr_eject); 233 device_remove_file(&dev->dev, &dev_attr_eject);
257 234
258 if (dev->flags.hardware_id || dev->flags.compatible_ids) 235 device_remove_file(&dev->dev, &dev_attr_modalias);
259 device_remove_file(&dev->dev, &dev_attr_modalias); 236 device_remove_file(&dev->dev, &dev_attr_hid);
260
261 if (dev->flags.hardware_id)
262 device_remove_file(&dev->dev, &dev_attr_hid);
263 if (dev->handle) 237 if (dev->handle)
264 device_remove_file(&dev->dev, &dev_attr_path); 238 device_remove_file(&dev->dev, &dev_attr_path);
265} 239}
@@ -271,6 +245,7 @@ int acpi_match_device_ids(struct acpi_device *device,
271 const struct acpi_device_id *ids) 245 const struct acpi_device_id *ids)
272{ 246{
273 const struct acpi_device_id *id; 247 const struct acpi_device_id *id;
248 struct acpi_hardware_id *hwid;
274 249
275 /* 250 /*
276 * If the device is not present, it is unnecessary to load device 251 * If the device is not present, it is unnecessary to load device
@@ -279,40 +254,30 @@ int acpi_match_device_ids(struct acpi_device *device,
279 if (!device->status.present) 254 if (!device->status.present)
280 return -ENODEV; 255 return -ENODEV;
281 256
282 if (device->flags.hardware_id) { 257 for (id = ids; id->id[0]; id++)
283 for (id = ids; id->id[0]; id++) { 258 list_for_each_entry(hwid, &device->pnp.ids, list)
284 if (!strcmp((char*)id->id, device->pnp.hardware_id)) 259 if (!strcmp((char *) id->id, hwid->id))
285 return 0; 260 return 0;
286 }
287 }
288
289 if (device->flags.compatible_ids) {
290 struct acpica_device_id_list *cid_list = device->pnp.cid_list;
291 int i;
292
293 for (id = ids; id->id[0]; id++) {
294 /* compare multiple _CID entries against driver ids */
295 for (i = 0; i < cid_list->count; i++) {
296 if (!strcmp((char*)id->id,
297 cid_list->ids[i].string))
298 return 0;
299 }
300 }
301 }
302 261
303 return -ENOENT; 262 return -ENOENT;
304} 263}
305EXPORT_SYMBOL(acpi_match_device_ids); 264EXPORT_SYMBOL(acpi_match_device_ids);
306 265
266static void acpi_free_ids(struct acpi_device *device)
267{
268 struct acpi_hardware_id *id, *tmp;
269
270 list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
271 kfree(id->id);
272 kfree(id);
273 }
274}
275
307static void acpi_device_release(struct device *dev) 276static void acpi_device_release(struct device *dev)
308{ 277{
309 struct acpi_device *acpi_dev = to_acpi_device(dev); 278 struct acpi_device *acpi_dev = to_acpi_device(dev);
310 279
311 kfree(acpi_dev->pnp.cid_list); 280 acpi_free_ids(acpi_dev);
312 if (acpi_dev->flags.hardware_id)
313 kfree(acpi_dev->pnp.hardware_id);
314 if (acpi_dev->flags.unique_id)
315 kfree(acpi_dev->pnp.unique_id);
316 kfree(acpi_dev); 281 kfree(acpi_dev);
317} 282}
318 283
@@ -378,15 +343,13 @@ static acpi_status acpi_device_notify_fixed(void *data)
378static int acpi_device_install_notify_handler(struct acpi_device *device) 343static int acpi_device_install_notify_handler(struct acpi_device *device)
379{ 344{
380 acpi_status status; 345 acpi_status status;
381 char *hid;
382 346
383 hid = acpi_device_hid(device); 347 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
384 if (!strcmp(hid, ACPI_BUTTON_HID_POWERF))
385 status = 348 status =
386 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 349 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
387 acpi_device_notify_fixed, 350 acpi_device_notify_fixed,
388 device); 351 device);
389 else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) 352 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
390 status = 353 status =
391 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 354 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
392 acpi_device_notify_fixed, 355 acpi_device_notify_fixed,
@@ -404,10 +367,10 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
404 367
405static void acpi_device_remove_notify_handler(struct acpi_device *device) 368static void acpi_device_remove_notify_handler(struct acpi_device *device)
406{ 369{
407 if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF)) 370 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
408 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 371 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
409 acpi_device_notify_fixed); 372 acpi_device_notify_fixed);
410 else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF)) 373 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
411 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 374 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
412 acpi_device_notify_fixed); 375 acpi_device_notify_fixed);
413 else 376 else
@@ -474,12 +437,12 @@ struct bus_type acpi_bus_type = {
474 .uevent = acpi_device_uevent, 437 .uevent = acpi_device_uevent,
475}; 438};
476 439
477static int acpi_device_register(struct acpi_device *device, 440static int acpi_device_register(struct acpi_device *device)
478 struct acpi_device *parent)
479{ 441{
480 int result; 442 int result;
481 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; 443 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
482 int found = 0; 444 int found = 0;
445
483 /* 446 /*
484 * Linkage 447 * Linkage
485 * ------- 448 * -------
@@ -501,8 +464,9 @@ static int acpi_device_register(struct acpi_device *device,
501 * If failed, create one and link it into acpi_bus_id_list 464 * If failed, create one and link it into acpi_bus_id_list
502 */ 465 */
503 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { 466 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
504 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) { 467 if (!strcmp(acpi_device_bus_id->bus_id,
505 acpi_device_bus_id->instance_no ++; 468 acpi_device_hid(device))) {
469 acpi_device_bus_id->instance_no++;
506 found = 1; 470 found = 1;
507 kfree(new_bus_id); 471 kfree(new_bus_id);
508 break; 472 break;
@@ -510,7 +474,7 @@ static int acpi_device_register(struct acpi_device *device,
510 } 474 }
511 if (!found) { 475 if (!found) {
512 acpi_device_bus_id = new_bus_id; 476 acpi_device_bus_id = new_bus_id;
513 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); 477 strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
514 acpi_device_bus_id->instance_no = 0; 478 acpi_device_bus_id->instance_no = 0;
515 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 479 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
516 } 480 }
@@ -524,7 +488,7 @@ static int acpi_device_register(struct acpi_device *device,
524 mutex_unlock(&acpi_device_lock); 488 mutex_unlock(&acpi_device_lock);
525 489
526 if (device->parent) 490 if (device->parent)
527 device->dev.parent = &parent->dev; 491 device->dev.parent = &device->parent->dev;
528 device->dev.bus = &acpi_bus_type; 492 device->dev.bus = &acpi_bus_type;
529 device->dev.release = &acpi_device_release; 493 device->dev.release = &acpi_device_release;
530 result = device_register(&device->dev); 494 result = device_register(&device->dev);
@@ -664,6 +628,33 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
664/* -------------------------------------------------------------------------- 628/* --------------------------------------------------------------------------
665 Device Enumeration 629 Device Enumeration
666 -------------------------------------------------------------------------- */ 630 -------------------------------------------------------------------------- */
631static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
632{
633 acpi_status status;
634 int ret;
635 struct acpi_device *device;
636
637 /*
638 * Fixed hardware devices do not appear in the namespace and do not
639 * have handles, but we fabricate acpi_devices for them, so we have
640 * to deal with them specially.
641 */
642 if (handle == NULL)
643 return acpi_root;
644
645 do {
646 status = acpi_get_parent(handle, &handle);
647 if (status == AE_NULL_ENTRY)
648 return NULL;
649 if (ACPI_FAILURE(status))
650 return acpi_root;
651
652 ret = acpi_bus_get_device(handle, &device);
653 if (ret == 0)
654 return device;
655 } while (1);
656}
657
667acpi_status 658acpi_status
668acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) 659acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
669{ 660{
@@ -876,11 +867,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
876 if (ACPI_SUCCESS(status)) 867 if (ACPI_SUCCESS(status))
877 device->flags.dynamic_status = 1; 868 device->flags.dynamic_status = 1;
878 869
879 /* Presence of _CID indicates 'compatible_ids' */
880 status = acpi_get_handle(device->handle, "_CID", &temp);
881 if (ACPI_SUCCESS(status))
882 device->flags.compatible_ids = 1;
883
884 /* Presence of _RMV indicates 'removable' */ 870 /* Presence of _RMV indicates 'removable' */
885 status = acpi_get_handle(device->handle, "_RMV", &temp); 871 status = acpi_get_handle(device->handle, "_RMV", &temp);
886 if (ACPI_SUCCESS(status)) 872 if (ACPI_SUCCESS(status))
@@ -918,8 +904,7 @@ static int acpi_bus_get_flags(struct acpi_device *device)
918 return 0; 904 return 0;
919} 905}
920 906
921static void acpi_device_get_busid(struct acpi_device *device, 907static void acpi_device_get_busid(struct acpi_device *device)
922 acpi_handle handle, int type)
923{ 908{
924 char bus_id[5] = { '?', 0 }; 909 char bus_id[5] = { '?', 0 };
925 struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; 910 struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
@@ -931,10 +916,12 @@ static void acpi_device_get_busid(struct acpi_device *device,
931 * The device's Bus ID is simply the object name. 916 * The device's Bus ID is simply the object name.
932 * TBD: Shouldn't this value be unique (within the ACPI namespace)? 917 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
933 */ 918 */
934 switch (type) { 919 if (ACPI_IS_ROOT_DEVICE(device)) {
935 case ACPI_BUS_TYPE_SYSTEM:
936 strcpy(device->pnp.bus_id, "ACPI"); 920 strcpy(device->pnp.bus_id, "ACPI");
937 break; 921 return;
922 }
923
924 switch (device->device_type) {
938 case ACPI_BUS_TYPE_POWER_BUTTON: 925 case ACPI_BUS_TYPE_POWER_BUTTON:
939 strcpy(device->pnp.bus_id, "PWRF"); 926 strcpy(device->pnp.bus_id, "PWRF");
940 break; 927 break;
@@ -942,7 +929,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
942 strcpy(device->pnp.bus_id, "SLPF"); 929 strcpy(device->pnp.bus_id, "SLPF");
943 break; 930 break;
944 default: 931 default:
945 acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 932 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
946 /* Clean up trailing underscores (if any) */ 933 /* Clean up trailing underscores (if any) */
947 for (i = 3; i > 1; i--) { 934 for (i = 3; i > 1; i--) {
948 if (bus_id[i] == '_') 935 if (bus_id[i] == '_')
@@ -1000,204 +987,134 @@ static int acpi_dock_match(struct acpi_device *device)
1000 return acpi_get_handle(device->handle, "_DCK", &tmp); 987 return acpi_get_handle(device->handle, "_DCK", &tmp);
1001} 988}
1002 989
1003static struct acpica_device_id_list* 990char *acpi_device_hid(struct acpi_device *device)
1004acpi_add_cid(
1005 struct acpi_device_info *info,
1006 struct acpica_device_id *new_cid)
1007{ 991{
1008 struct acpica_device_id_list *cid; 992 struct acpi_hardware_id *hid;
1009 char *next_id_string;
1010 acpi_size cid_length;
1011 acpi_size new_cid_length;
1012 u32 i;
1013
1014
1015 /* Allocate new CID list with room for the new CID */
1016
1017 if (!new_cid)
1018 new_cid_length = info->compatible_id_list.list_size;
1019 else if (info->compatible_id_list.list_size)
1020 new_cid_length = info->compatible_id_list.list_size +
1021 new_cid->length + sizeof(struct acpica_device_id);
1022 else
1023 new_cid_length = sizeof(struct acpica_device_id_list) + new_cid->length;
1024
1025 cid = ACPI_ALLOCATE_ZEROED(new_cid_length);
1026 if (!cid) {
1027 return NULL;
1028 }
1029
1030 cid->list_size = new_cid_length;
1031 cid->count = info->compatible_id_list.count;
1032 if (new_cid)
1033 cid->count++;
1034 next_id_string = (char *) cid->ids + (cid->count * sizeof(struct acpica_device_id));
1035
1036 /* Copy all existing CIDs */
1037
1038 for (i = 0; i < info->compatible_id_list.count; i++) {
1039 cid_length = info->compatible_id_list.ids[i].length;
1040 cid->ids[i].string = next_id_string;
1041 cid->ids[i].length = cid_length;
1042 993
1043 ACPI_MEMCPY(next_id_string, info->compatible_id_list.ids[i].string, 994 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
1044 cid_length); 995 return hid->id;
1045 996}
1046 next_id_string += cid_length; 997EXPORT_SYMBOL(acpi_device_hid);
1047 }
1048 998
1049 /* Append the new CID */ 999static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1000{
1001 struct acpi_hardware_id *id;
1050 1002
1051 if (new_cid) { 1003 id = kmalloc(sizeof(*id), GFP_KERNEL);
1052 cid->ids[i].string = next_id_string; 1004 if (!id)
1053 cid->ids[i].length = new_cid->length; 1005 return;
1054 1006
1055 ACPI_MEMCPY(next_id_string, new_cid->string, new_cid->length); 1007 id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
1008 if (!id->id) {
1009 kfree(id);
1010 return;
1056 } 1011 }
1057 1012
1058 return cid; 1013 strcpy(id->id, dev_id);
1014 list_add_tail(&id->list, &device->pnp.ids);
1059} 1015}
1060 1016
1061static void acpi_device_set_id(struct acpi_device *device, 1017static void acpi_device_set_id(struct acpi_device *device)
1062 struct acpi_device *parent, acpi_handle handle,
1063 int type)
1064{ 1018{
1065 struct acpi_device_info *info = NULL;
1066 char *hid = NULL;
1067 char *uid = NULL;
1068 struct acpica_device_id_list *cid_list = NULL;
1069 char *cid_add = NULL;
1070 acpi_status status; 1019 acpi_status status;
1020 struct acpi_device_info *info;
1021 struct acpica_device_id_list *cid_list;
1022 int i;
1071 1023
1072 switch (type) { 1024 switch (device->device_type) {
1073 case ACPI_BUS_TYPE_DEVICE: 1025 case ACPI_BUS_TYPE_DEVICE:
1074 status = acpi_get_object_info(handle, &info); 1026 if (ACPI_IS_ROOT_DEVICE(device)) {
1027 acpi_add_id(device, ACPI_SYSTEM_HID);
1028 break;
1029 } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
1030 /* \_SB_, the only root-level namespace device */
1031 acpi_add_id(device, ACPI_BUS_HID);
1032 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1033 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1034 break;
1035 }
1036
1037 status = acpi_get_object_info(device->handle, &info);
1075 if (ACPI_FAILURE(status)) { 1038 if (ACPI_FAILURE(status)) {
1076 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); 1039 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
1077 return; 1040 return;
1078 } 1041 }
1079 1042
1080 if (info->valid & ACPI_VALID_HID) 1043 if (info->valid & ACPI_VALID_HID)
1081 hid = info->hardware_id.string; 1044 acpi_add_id(device, info->hardware_id.string);
1082 if (info->valid & ACPI_VALID_UID) 1045 if (info->valid & ACPI_VALID_CID) {
1083 uid = info->unique_id.string;
1084 if (info->valid & ACPI_VALID_CID)
1085 cid_list = &info->compatible_id_list; 1046 cid_list = &info->compatible_id_list;
1047 for (i = 0; i < cid_list->count; i++)
1048 acpi_add_id(device, cid_list->ids[i].string);
1049 }
1086 if (info->valid & ACPI_VALID_ADR) { 1050 if (info->valid & ACPI_VALID_ADR) {
1087 device->pnp.bus_address = info->address; 1051 device->pnp.bus_address = info->address;
1088 device->flags.bus_address = 1; 1052 device->flags.bus_address = 1;
1089 } 1053 }
1090 1054
1091 /* If we have a video/bay/dock device, add our selfdefined 1055 kfree(info);
1092 HID to the CID list. Like that the video/bay/dock drivers 1056
1093 will get autoloaded and the device might still match 1057 /*
1094 against another driver. 1058 * Some devices don't reliably have _HIDs & _CIDs, so add
1095 */ 1059 * synthetic HIDs to make sure drivers can find them.
1060 */
1096 if (acpi_is_video_device(device)) 1061 if (acpi_is_video_device(device))
1097 cid_add = ACPI_VIDEO_HID; 1062 acpi_add_id(device, ACPI_VIDEO_HID);
1098 else if (ACPI_SUCCESS(acpi_bay_match(device))) 1063 else if (ACPI_SUCCESS(acpi_bay_match(device)))
1099 cid_add = ACPI_BAY_HID; 1064 acpi_add_id(device, ACPI_BAY_HID);
1100 else if (ACPI_SUCCESS(acpi_dock_match(device))) 1065 else if (ACPI_SUCCESS(acpi_dock_match(device)))
1101 cid_add = ACPI_DOCK_HID; 1066 acpi_add_id(device, ACPI_DOCK_HID);
1102 1067
1103 break; 1068 break;
1104 case ACPI_BUS_TYPE_POWER: 1069 case ACPI_BUS_TYPE_POWER:
1105 hid = ACPI_POWER_HID; 1070 acpi_add_id(device, ACPI_POWER_HID);
1106 break; 1071 break;
1107 case ACPI_BUS_TYPE_PROCESSOR: 1072 case ACPI_BUS_TYPE_PROCESSOR:
1108 hid = ACPI_PROCESSOR_OBJECT_HID; 1073 acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
1109 break;
1110 case ACPI_BUS_TYPE_SYSTEM:
1111 hid = ACPI_SYSTEM_HID;
1112 break; 1074 break;
1113 case ACPI_BUS_TYPE_THERMAL: 1075 case ACPI_BUS_TYPE_THERMAL:
1114 hid = ACPI_THERMAL_HID; 1076 acpi_add_id(device, ACPI_THERMAL_HID);
1115 break; 1077 break;
1116 case ACPI_BUS_TYPE_POWER_BUTTON: 1078 case ACPI_BUS_TYPE_POWER_BUTTON:
1117 hid = ACPI_BUTTON_HID_POWERF; 1079 acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
1118 break; 1080 break;
1119 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1081 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1120 hid = ACPI_BUTTON_HID_SLEEPF; 1082 acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
1121 break; 1083 break;
1122 } 1084 }
1123 1085
1124 /* 1086 /*
1125 * \_SB 1087 * We build acpi_devices for some objects that don't have _HID or _CID,
1126 * ---- 1088 * e.g., PCI bridges and slots. Drivers can't bind to these objects,
1127 * Fix for the system root bus device -- the only root-level device. 1089 * but we do use them indirectly by traversing the acpi_device tree.
1090 * This generic ID isn't useful for driver binding, but it provides
1091 * the useful property that "every acpi_device has an ID."
1128 */ 1092 */
1129 if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) { 1093 if (list_empty(&device->pnp.ids))
1130 hid = ACPI_BUS_HID; 1094 acpi_add_id(device, "device");
1131 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1132 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1133 }
1134
1135 if (hid) {
1136 device->pnp.hardware_id = ACPI_ALLOCATE_ZEROED(strlen (hid) + 1);
1137 if (device->pnp.hardware_id) {
1138 strcpy(device->pnp.hardware_id, hid);
1139 device->flags.hardware_id = 1;
1140 }
1141 }
1142 if (!device->flags.hardware_id)
1143 device->pnp.hardware_id = "";
1144
1145 if (uid) {
1146 device->pnp.unique_id = ACPI_ALLOCATE_ZEROED(strlen (uid) + 1);
1147 if (device->pnp.unique_id) {
1148 strcpy(device->pnp.unique_id, uid);
1149 device->flags.unique_id = 1;
1150 }
1151 }
1152 if (!device->flags.unique_id)
1153 device->pnp.unique_id = "";
1154
1155 if (cid_list || cid_add) {
1156 struct acpica_device_id_list *list;
1157
1158 if (cid_add) {
1159 struct acpica_device_id cid;
1160 cid.length = strlen (cid_add) + 1;
1161 cid.string = cid_add;
1162
1163 list = acpi_add_cid(info, &cid);
1164 } else {
1165 list = acpi_add_cid(info, NULL);
1166 }
1167
1168 if (list) {
1169 device->pnp.cid_list = list;
1170 if (cid_add)
1171 device->flags.compatible_ids = 1;
1172 }
1173 }
1174
1175 kfree(info);
1176} 1095}
1177 1096
1178static int acpi_device_set_context(struct acpi_device *device, int type) 1097static int acpi_device_set_context(struct acpi_device *device)
1179{ 1098{
1180 acpi_status status = AE_OK; 1099 acpi_status status;
1181 int result = 0; 1100
1182 /* 1101 /*
1183 * Context 1102 * Context
1184 * ------- 1103 * -------
1185 * Attach this 'struct acpi_device' to the ACPI object. This makes 1104 * Attach this 'struct acpi_device' to the ACPI object. This makes
1186 * resolutions from handle->device very efficient. Note that we need 1105 * resolutions from handle->device very efficient. Fixed hardware
1187 * to be careful with fixed-feature devices as they all attach to the 1106 * devices have no handles, so we skip them.
1188 * root object.
1189 */ 1107 */
1190 if (type != ACPI_BUS_TYPE_POWER_BUTTON && 1108 if (!device->handle)
1191 type != ACPI_BUS_TYPE_SLEEP_BUTTON) { 1109 return 0;
1192 status = acpi_attach_data(device->handle,
1193 acpi_bus_data_handler, device);
1194 1110
1195 if (ACPI_FAILURE(status)) { 1111 status = acpi_attach_data(device->handle,
1196 printk(KERN_ERR PREFIX "Error attaching device data\n"); 1112 acpi_bus_data_handler, device);
1197 result = -ENODEV; 1113 if (ACPI_SUCCESS(status))
1198 } 1114 return 0;
1199 } 1115
1200 return result; 1116 printk(KERN_ERR PREFIX "Error attaching device data\n");
1117 return -ENODEV;
1201} 1118}
1202 1119
1203static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 1120static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
@@ -1223,17 +1140,14 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
1223 return 0; 1140 return 0;
1224} 1141}
1225 1142
1226static int 1143static int acpi_add_single_object(struct acpi_device **child,
1227acpi_add_single_object(struct acpi_device **child, 1144 acpi_handle handle, int type,
1228 struct acpi_device *parent, acpi_handle handle, int type, 1145 unsigned long long sta,
1229 struct acpi_bus_ops *ops) 1146 struct acpi_bus_ops *ops)
1230{ 1147{
1231 int result = 0; 1148 int result;
1232 struct acpi_device *device = NULL; 1149 struct acpi_device *device;
1233 1150 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1234
1235 if (!child)
1236 return -EINVAL;
1237 1151
1238 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); 1152 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
1239 if (!device) { 1153 if (!device) {
@@ -1241,75 +1155,31 @@ acpi_add_single_object(struct acpi_device **child,
1241 return -ENOMEM; 1155 return -ENOMEM;
1242 } 1156 }
1243 1157
1158 INIT_LIST_HEAD(&device->pnp.ids);
1159 device->device_type = type;
1244 device->handle = handle; 1160 device->handle = handle;
1245 device->parent = parent; 1161 device->parent = acpi_bus_get_parent(handle);
1246 device->bus_ops = *ops; /* workround for not call .start */ 1162 device->bus_ops = *ops; /* workround for not call .start */
1163 STRUCT_TO_INT(device->status) = sta;
1247 1164
1248 1165 acpi_device_get_busid(device);
1249 acpi_device_get_busid(device, handle, type);
1250 1166
1251 /* 1167 /*
1252 * Flags 1168 * Flags
1253 * ----- 1169 * -----
1254 * Get prior to calling acpi_bus_get_status() so we know whether 1170 * Note that we only look for object handles -- cannot evaluate objects
1255 * or not _STA is present. Note that we only look for object 1171 * until we know the device is present and properly initialized.
1256 * handles -- cannot evaluate objects until we know the device is
1257 * present and properly initialized.
1258 */ 1172 */
1259 result = acpi_bus_get_flags(device); 1173 result = acpi_bus_get_flags(device);
1260 if (result) 1174 if (result)
1261 goto end; 1175 goto end;
1262 1176
1263 /* 1177 /*
1264 * Status
1265 * ------
1266 * See if the device is present. We always assume that non-Device
1267 * and non-Processor objects (e.g. thermal zones, power resources,
1268 * etc.) are present, functioning, etc. (at least when parent object
1269 * is present). Note that _STA has a different meaning for some
1270 * objects (e.g. power resources) so we need to be careful how we use
1271 * it.
1272 */
1273 switch (type) {
1274 case ACPI_BUS_TYPE_PROCESSOR:
1275 case ACPI_BUS_TYPE_DEVICE:
1276 result = acpi_bus_get_status(device);
1277 if (ACPI_FAILURE(result)) {
1278 result = -ENODEV;
1279 goto end;
1280 }
1281 /*
1282 * When the device is neither present nor functional, the
1283 * device should not be added to Linux ACPI device tree.
1284 * When the status of the device is not present but functinal,
1285 * it should be added to Linux ACPI tree. For example : bay
1286 * device , dock device.
1287 * In such conditions it is unncessary to check whether it is
1288 * bay device or dock device.
1289 */
1290 if (!device->status.present && !device->status.functional) {
1291 result = -ENODEV;
1292 goto end;
1293 }
1294 break;
1295 default:
1296 STRUCT_TO_INT(device->status) =
1297 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
1298 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
1299 break;
1300 }
1301
1302 /*
1303 * Initialize Device 1178 * Initialize Device
1304 * ----------------- 1179 * -----------------
1305 * TBD: Synch with Core's enumeration/initialization process. 1180 * TBD: Synch with Core's enumeration/initialization process.
1306 */ 1181 */
1307 1182 acpi_device_set_id(device);
1308 /*
1309 * Hardware ID, Unique ID, & Bus Address
1310 * -------------------------------------
1311 */
1312 acpi_device_set_id(device, parent, handle, type);
1313 1183
1314 /* 1184 /*
1315 * Power Management 1185 * Power Management
@@ -1341,10 +1211,10 @@ acpi_add_single_object(struct acpi_device **child,
1341 goto end; 1211 goto end;
1342 } 1212 }
1343 1213
1344 if ((result = acpi_device_set_context(device, type))) 1214 if ((result = acpi_device_set_context(device)))
1345 goto end; 1215 goto end;
1346 1216
1347 result = acpi_device_register(device, parent); 1217 result = acpi_device_register(device);
1348 1218
1349 /* 1219 /*
1350 * Bind _ADR-Based Devices when hot add 1220 * Bind _ADR-Based Devices when hot add
@@ -1355,128 +1225,117 @@ acpi_add_single_object(struct acpi_device **child,
1355 } 1225 }
1356 1226
1357end: 1227end:
1358 if (!result) 1228 if (!result) {
1229 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1230 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1231 "Adding %s [%s] parent %s\n", dev_name(&device->dev),
1232 (char *) buffer.pointer,
1233 device->parent ? dev_name(&device->parent->dev) :
1234 "(null)"));
1235 kfree(buffer.pointer);
1359 *child = device; 1236 *child = device;
1360 else 1237 } else
1361 acpi_device_release(&device->dev); 1238 acpi_device_release(&device->dev);
1362 1239
1363 return result; 1240 return result;
1364} 1241}
1365 1242
1366static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops) 1243#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
1367{ 1244 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
1368 acpi_status status = AE_OK;
1369 struct acpi_device *parent = NULL;
1370 struct acpi_device *child = NULL;
1371 acpi_handle phandle = NULL;
1372 acpi_handle chandle = NULL;
1373 acpi_object_type type = 0;
1374 u32 level = 1;
1375
1376 1245
1377 if (!start) 1246static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1378 return -EINVAL; 1247 unsigned long long *sta)
1248{
1249 acpi_status status;
1250 acpi_object_type acpi_type;
1379 1251
1380 parent = start; 1252 status = acpi_get_type(handle, &acpi_type);
1381 phandle = start->handle; 1253 if (ACPI_FAILURE(status))
1254 return -ENODEV;
1382 1255
1383 /* 1256 switch (acpi_type) {
1384 * Parse through the ACPI namespace, identify all 'devices', and 1257 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1385 * create a new 'struct acpi_device' for each. 1258 case ACPI_TYPE_DEVICE:
1386 */ 1259 *type = ACPI_BUS_TYPE_DEVICE;
1387 while ((level > 0) && parent) { 1260 status = acpi_bus_get_status_handle(handle, sta);
1261 if (ACPI_FAILURE(status))
1262 return -ENODEV;
1263 break;
1264 case ACPI_TYPE_PROCESSOR:
1265 *type = ACPI_BUS_TYPE_PROCESSOR;
1266 status = acpi_bus_get_status_handle(handle, sta);
1267 if (ACPI_FAILURE(status))
1268 return -ENODEV;
1269 break;
1270 case ACPI_TYPE_THERMAL:
1271 *type = ACPI_BUS_TYPE_THERMAL;
1272 *sta = ACPI_STA_DEFAULT;
1273 break;
1274 case ACPI_TYPE_POWER:
1275 *type = ACPI_BUS_TYPE_POWER;
1276 *sta = ACPI_STA_DEFAULT;
1277 break;
1278 default:
1279 return -ENODEV;
1280 }
1388 1281
1389 status = acpi_get_next_object(ACPI_TYPE_ANY, phandle, 1282 return 0;
1390 chandle, &chandle); 1283}
1391 1284
1392 /* 1285static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
1393 * If this scope is exhausted then move our way back up. 1286 void *context, void **return_value)
1394 */ 1287{
1395 if (ACPI_FAILURE(status)) { 1288 struct acpi_bus_ops *ops = context;
1396 level--; 1289 int type;
1397 chandle = phandle; 1290 unsigned long long sta;
1398 acpi_get_parent(phandle, &phandle); 1291 struct acpi_device *device;
1399 if (parent->parent) 1292 acpi_status status;
1400 parent = parent->parent; 1293 int result;
1401 continue;
1402 }
1403 1294
1404 status = acpi_get_type(chandle, &type); 1295 result = acpi_bus_type_and_status(handle, &type, &sta);
1405 if (ACPI_FAILURE(status)) 1296 if (result)
1406 continue; 1297 return AE_OK;
1407 1298
1408 /* 1299 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1409 * If this is a scope object then parse it (depth-first). 1300 !(sta & ACPI_STA_DEVICE_FUNCTIONING))
1410 */ 1301 return AE_CTRL_DEPTH;
1411 if (type == ACPI_TYPE_LOCAL_SCOPE) {
1412 level++;
1413 phandle = chandle;
1414 chandle = NULL;
1415 continue;
1416 }
1417 1302
1418 /* 1303 /*
1419 * We're only interested in objects that we consider 'devices'. 1304 * We may already have an acpi_device from a previous enumeration. If
1420 */ 1305 * so, we needn't add it again, but we may still have to start it.
1421 switch (type) { 1306 */
1422 case ACPI_TYPE_DEVICE: 1307 device = NULL;
1423 type = ACPI_BUS_TYPE_DEVICE; 1308 acpi_bus_get_device(handle, &device);
1424 break; 1309 if (ops->acpi_op_add && !device)
1425 case ACPI_TYPE_PROCESSOR: 1310 acpi_add_single_object(&device, handle, type, sta, ops);
1426 type = ACPI_BUS_TYPE_PROCESSOR;
1427 break;
1428 case ACPI_TYPE_THERMAL:
1429 type = ACPI_BUS_TYPE_THERMAL;
1430 break;
1431 case ACPI_TYPE_POWER:
1432 type = ACPI_BUS_TYPE_POWER;
1433 break;
1434 default:
1435 continue;
1436 }
1437 1311
1438 if (ops->acpi_op_add) 1312 if (!device)
1439 status = acpi_add_single_object(&child, parent, 1313 return AE_CTRL_DEPTH;
1440 chandle, type, ops);
1441 else
1442 status = acpi_bus_get_device(chandle, &child);
1443 1314
1315 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1316 status = acpi_start_single_object(device);
1444 if (ACPI_FAILURE(status)) 1317 if (ACPI_FAILURE(status))
1445 continue; 1318 return AE_CTRL_DEPTH;
1319 }
1446 1320
1447 if (ops->acpi_op_start && !(ops->acpi_op_add)) { 1321 if (!*return_value)
1448 status = acpi_start_single_object(child); 1322 *return_value = device;
1449 if (ACPI_FAILURE(status)) 1323 return AE_OK;
1450 continue; 1324}
1451 }
1452 1325
1453 /* 1326static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1454 * If the device is present, enabled, and functioning then 1327 struct acpi_device **child)
1455 * parse its scope (depth-first). Note that we need to 1328{
1456 * represent absent devices to facilitate PnP notifications 1329 acpi_status status;
1457 * -- but only the subtree head (not all of its children, 1330 void *device = NULL;
1458 * which will be enumerated when the parent is inserted). 1331
1459 * 1332 status = acpi_bus_check_add(handle, 0, ops, &device);
1460 * TBD: Need notifications and other detection mechanisms 1333 if (ACPI_SUCCESS(status))
1461 * in place before we can fully implement this. 1334 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1462 */ 1335 acpi_bus_check_add, ops, &device);
1463 /*
1464 * When the device is not present but functional, it is also
1465 * necessary to scan the children of this device.
1466 */
1467 if (child->status.present || (!child->status.present &&
1468 child->status.functional)) {
1469 status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
1470 NULL, NULL);
1471 if (ACPI_SUCCESS(status)) {
1472 level++;
1473 phandle = chandle;
1474 chandle = NULL;
1475 parent = child;
1476 }
1477 }
1478 }
1479 1336
1337 if (child)
1338 *child = device;
1480 return 0; 1339 return 0;
1481} 1340}
1482 1341
@@ -1484,36 +1343,25 @@ int
1484acpi_bus_add(struct acpi_device **child, 1343acpi_bus_add(struct acpi_device **child,
1485 struct acpi_device *parent, acpi_handle handle, int type) 1344 struct acpi_device *parent, acpi_handle handle, int type)
1486{ 1345{
1487 int result;
1488 struct acpi_bus_ops ops; 1346 struct acpi_bus_ops ops;
1489 1347
1490 memset(&ops, 0, sizeof(ops)); 1348 memset(&ops, 0, sizeof(ops));
1491 ops.acpi_op_add = 1; 1349 ops.acpi_op_add = 1;
1492 1350
1493 result = acpi_add_single_object(child, parent, handle, type, &ops); 1351 acpi_bus_scan(handle, &ops, child);
1494 if (!result) 1352 return 0;
1495 result = acpi_bus_scan(*child, &ops);
1496
1497 return result;
1498} 1353}
1499EXPORT_SYMBOL(acpi_bus_add); 1354EXPORT_SYMBOL(acpi_bus_add);
1500 1355
1501int acpi_bus_start(struct acpi_device *device) 1356int acpi_bus_start(struct acpi_device *device)
1502{ 1357{
1503 int result;
1504 struct acpi_bus_ops ops; 1358 struct acpi_bus_ops ops;
1505 1359
1360 memset(&ops, 0, sizeof(ops));
1361 ops.acpi_op_start = 1;
1506 1362
1507 if (!device) 1363 acpi_bus_scan(device->handle, &ops, NULL);
1508 return -EINVAL; 1364 return 0;
1509
1510 result = acpi_start_single_object(device);
1511 if (!result) {
1512 memset(&ops, 0, sizeof(ops));
1513 ops.acpi_op_start = 1;
1514 result = acpi_bus_scan(device, &ops);
1515 }
1516 return result;
1517} 1365}
1518EXPORT_SYMBOL(acpi_bus_start); 1366EXPORT_SYMBOL(acpi_bus_start);
1519 1367
@@ -1572,15 +1420,12 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
1572} 1420}
1573EXPORT_SYMBOL_GPL(acpi_bus_trim); 1421EXPORT_SYMBOL_GPL(acpi_bus_trim);
1574 1422
1575static int acpi_bus_scan_fixed(struct acpi_device *root) 1423static int acpi_bus_scan_fixed(void)
1576{ 1424{
1577 int result = 0; 1425 int result = 0;
1578 struct acpi_device *device = NULL; 1426 struct acpi_device *device = NULL;
1579 struct acpi_bus_ops ops; 1427 struct acpi_bus_ops ops;
1580 1428
1581 if (!root)
1582 return -ENODEV;
1583
1584 memset(&ops, 0, sizeof(ops)); 1429 memset(&ops, 0, sizeof(ops));
1585 ops.acpi_op_add = 1; 1430 ops.acpi_op_add = 1;
1586 ops.acpi_op_start = 1; 1431 ops.acpi_op_start = 1;
@@ -1589,16 +1434,16 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1589 * Enumerate all fixed-feature devices. 1434 * Enumerate all fixed-feature devices.
1590 */ 1435 */
1591 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) { 1436 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1592 result = acpi_add_single_object(&device, acpi_root, 1437 result = acpi_add_single_object(&device, NULL,
1593 NULL,
1594 ACPI_BUS_TYPE_POWER_BUTTON, 1438 ACPI_BUS_TYPE_POWER_BUTTON,
1439 ACPI_STA_DEFAULT,
1595 &ops); 1440 &ops);
1596 } 1441 }
1597 1442
1598 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1443 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1599 result = acpi_add_single_object(&device, acpi_root, 1444 result = acpi_add_single_object(&device, NULL,
1600 NULL,
1601 ACPI_BUS_TYPE_SLEEP_BUTTON, 1445 ACPI_BUS_TYPE_SLEEP_BUTTON,
1446 ACPI_STA_DEFAULT,
1602 &ops); 1447 &ops);
1603 } 1448 }
1604 1449
@@ -1621,24 +1466,15 @@ int __init acpi_scan_init(void)
1621 } 1466 }
1622 1467
1623 /* 1468 /*
1624 * Create the root device in the bus's device tree
1625 */
1626 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1627 ACPI_BUS_TYPE_SYSTEM, &ops);
1628 if (result)
1629 goto Done;
1630
1631 /*
1632 * Enumerate devices in the ACPI namespace. 1469 * Enumerate devices in the ACPI namespace.
1633 */ 1470 */
1634 result = acpi_bus_scan_fixed(acpi_root); 1471 result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
1635 1472
1636 if (!result) 1473 if (!result)
1637 result = acpi_bus_scan(acpi_root, &ops); 1474 result = acpi_bus_scan_fixed();
1638 1475
1639 if (result) 1476 if (result)
1640 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1477 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
1641 1478
1642Done:
1643 return result; 1479 return result;
1644} 1480}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a90afcc723ab..4cc1b8116e76 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -413,6 +413,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
413 }, 413 },
414 }, 414 },
415 { 415 {
416 .callback = init_set_sci_en_on_resume,
417 .ident = "Hewlett-Packard Pavilion dv4",
418 .matches = {
419 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
420 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
421 },
422 },
423 {
424 .callback = init_set_sci_en_on_resume,
425 .ident = "Hewlett-Packard Pavilion dv7",
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
428 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
429 },
430 },
431 {
432 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
434 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
436 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
437 },
438 },
439 {
416 .callback = init_old_suspend_ordering, 440 .callback = init_old_suspend_ordering,
417 .ident = "Panasonic CF51-2L", 441 .ident = "Panasonic CF51-2L",
418 .matches = { 442 .matches = {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 94b1a4c5abab..05dff631591c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
285 struct file *file); 285 struct file *file);
286static ssize_t acpi_video_device_write_brightness(struct file *file, 286static ssize_t acpi_video_device_write_brightness(struct file *file,
287 const char __user *buffer, size_t count, loff_t *data); 287 const char __user *buffer, size_t count, loff_t *data);
288static struct file_operations acpi_video_device_brightness_fops = { 288static const struct file_operations acpi_video_device_brightness_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .open = acpi_video_device_brightness_open_fs, 290 .open = acpi_video_device_brightness_open_fs,
291 .read = seq_read, 291 .read = seq_read,
@@ -1109,7 +1109,12 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
1109 */ 1109 */
1110 1110
1111 /* Does this device support video switching? */ 1111 /* Does this device support video switching? */
1112 if (video->cap._DOS) { 1112 if (video->cap._DOS || video->cap._DOD) {
1113 if (!video->cap._DOS) {
1114 printk(KERN_WARNING FW_BUG
1115 "ACPI(%s) defines _DOD but not _DOS\n",
1116 acpi_device_bid(video->device));
1117 }
1113 video->flags.multihead = 1; 1118 video->flags.multihead = 1;
1114 status = 0; 1119 status = 0;
1115 } 1120 }
@@ -1218,7 +1223,7 @@ acpi_video_device_write_state(struct file *file,
1218 u32 state = 0; 1223 u32 state = 0;
1219 1224
1220 1225
1221 if (!dev || count + 1 > sizeof str) 1226 if (!dev || count >= sizeof(str))
1222 return -EINVAL; 1227 return -EINVAL;
1223 1228
1224 if (copy_from_user(str, buffer, count)) 1229 if (copy_from_user(str, buffer, count))
@@ -1275,7 +1280,7 @@ acpi_video_device_write_brightness(struct file *file,
1275 int i; 1280 int i;
1276 1281
1277 1282
1278 if (!dev || !dev->brightness || count + 1 > sizeof str) 1283 if (!dev || !dev->brightness || count >= sizeof(str))
1279 return -EINVAL; 1284 return -EINVAL;
1280 1285
1281 if (copy_from_user(str, buffer, count)) 1286 if (copy_from_user(str, buffer, count))
@@ -1557,7 +1562,7 @@ acpi_video_bus_write_POST(struct file *file,
1557 unsigned long long opt, options; 1562 unsigned long long opt, options;
1558 1563
1559 1564
1560 if (!video || count + 1 > sizeof str) 1565 if (!video || count >= sizeof(str))
1561 return -EINVAL; 1566 return -EINVAL;
1562 1567
1563 status = acpi_video_bus_POST_options(video, &options); 1568 status = acpi_video_bus_POST_options(video, &options);
@@ -1597,7 +1602,7 @@ acpi_video_bus_write_DOS(struct file *file,
1597 unsigned long opt; 1602 unsigned long opt;
1598 1603
1599 1604
1600 if (!video || count + 1 > sizeof str) 1605 if (!video || count >= sizeof(str))
1601 return -EINVAL; 1606 return -EINVAL;
1602 1607
1603 if (copy_from_user(str, buffer, count)) 1608 if (copy_from_user(str, buffer, count))
@@ -1986,6 +1991,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1986 1991
1987 result = acpi_video_device_lcd_set_level(device, level_next); 1992 result = acpi_video_device_lcd_set_level(device, level_next);
1988 1993
1994 if (!result)
1995 backlight_force_update(device->backlight,
1996 BACKLIGHT_UPDATE_HOTKEY);
1997
1989out: 1998out:
1990 if (result) 1999 if (result)
1991 printk(KERN_ERR PREFIX "Failed to switch the brightness\n"); 2000 printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 7032f25da9b5..575593a8b4e6 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -84,7 +84,7 @@ long acpi_is_video_device(struct acpi_device *device)
84 return 0; 84 return 0;
85 85
86 /* Does this device able to support video switching ? */ 86 /* Does this device able to support video switching ? */
87 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) && 87 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
88 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) 88 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
89 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 89 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
90 90
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index acd1162712b1..a3241a1a710b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -122,6 +122,7 @@ enum {
122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */ 123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */ 124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
125 HOST_CAP2 = 0x24, /* host capabilities, extended */
125 126
126 /* HOST_CTL bits */ 127 /* HOST_CTL bits */
127 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 128 HOST_RESET = (1 << 0), /* reset controller; self-clear */
@@ -129,16 +130,29 @@ enum {
129 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 130 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
130 131
131 /* HOST_CAP bits */ 132 /* HOST_CAP bits */
133 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
132 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ 134 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
133 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 135 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
136 HOST_CAP_PART = (1 << 13), /* Partial state capable */
137 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
138 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
139 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
134 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 140 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
141 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
135 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 142 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
143 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
136 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ 144 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
137 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 145 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
146 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
138 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 147 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
139 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 148 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
140 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 149 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
141 150
151 /* HOST_CAP2 bits */
152 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
153 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
154 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
155
142 /* registers for each SATA port */ 156 /* registers for each SATA port */
143 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 157 PORT_LST_ADDR = 0x00, /* command list DMA addr */
144 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 158 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
@@ -267,8 +281,10 @@ struct ahci_em_priv {
267struct ahci_host_priv { 281struct ahci_host_priv {
268 unsigned int flags; /* AHCI_HFLAG_* */ 282 unsigned int flags; /* AHCI_HFLAG_* */
269 u32 cap; /* cap to use */ 283 u32 cap; /* cap to use */
284 u32 cap2; /* cap2 to use */
270 u32 port_map; /* port map to use */ 285 u32 port_map; /* port map to use */
271 u32 saved_cap; /* saved initial cap */ 286 u32 saved_cap; /* saved initial cap */
287 u32 saved_cap2; /* saved initial cap2 */
272 u32 saved_port_map; /* saved initial port_map */ 288 u32 saved_port_map; /* saved initial port_map */
273 u32 em_loc; /* enclosure management location */ 289 u32 em_loc; /* enclosure management location */
274}; 290};
@@ -331,12 +347,15 @@ static void ahci_init_sw_activity(struct ata_link *link);
331 347
332static ssize_t ahci_show_host_caps(struct device *dev, 348static ssize_t ahci_show_host_caps(struct device *dev,
333 struct device_attribute *attr, char *buf); 349 struct device_attribute *attr, char *buf);
350static ssize_t ahci_show_host_cap2(struct device *dev,
351 struct device_attribute *attr, char *buf);
334static ssize_t ahci_show_host_version(struct device *dev, 352static ssize_t ahci_show_host_version(struct device *dev,
335 struct device_attribute *attr, char *buf); 353 struct device_attribute *attr, char *buf);
336static ssize_t ahci_show_port_cmd(struct device *dev, 354static ssize_t ahci_show_port_cmd(struct device *dev,
337 struct device_attribute *attr, char *buf); 355 struct device_attribute *attr, char *buf);
338 356
339DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 357DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
358DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
340DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); 359DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
341DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 360DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
342 361
@@ -345,6 +364,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
345 &dev_attr_em_message_type, 364 &dev_attr_em_message_type,
346 &dev_attr_em_message, 365 &dev_attr_em_message,
347 &dev_attr_ahci_host_caps, 366 &dev_attr_ahci_host_caps,
367 &dev_attr_ahci_host_cap2,
348 &dev_attr_ahci_host_version, 368 &dev_attr_ahci_host_version,
349 &dev_attr_ahci_port_cmd, 369 &dev_attr_ahci_port_cmd,
350 NULL 370 NULL
@@ -447,7 +467,8 @@ static const struct ata_port_info ahci_port_info[] = {
447 [board_ahci_sb600] = 467 [board_ahci_sb600] =
448 { 468 {
449 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 469 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
450 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255), 470 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
471 AHCI_HFLAG_32BIT_ONLY),
451 .flags = AHCI_FLAG_COMMON, 472 .flags = AHCI_FLAG_COMMON,
452 .pio_mask = ATA_PIO4, 473 .pio_mask = ATA_PIO4,
453 .udma_mask = ATA_UDMA6, 474 .udma_mask = ATA_UDMA6,
@@ -554,7 +575,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
554 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 575 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
555 576
556 /* AMD */ 577 /* AMD */
557 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */ 578 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
558 /* AMD is using RAID class only for ahci controllers */ 579 /* AMD is using RAID class only for ahci controllers */
559 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 580 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
560 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, 581 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
@@ -584,6 +605,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
584 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ 605 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
585 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ 606 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
586 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ 607 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
608 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
587 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ 609 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
588 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ 610 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
589 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ 611 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
@@ -732,6 +754,16 @@ static ssize_t ahci_show_host_caps(struct device *dev,
732 return sprintf(buf, "%x\n", hpriv->cap); 754 return sprintf(buf, "%x\n", hpriv->cap);
733} 755}
734 756
757static ssize_t ahci_show_host_cap2(struct device *dev,
758 struct device_attribute *attr, char *buf)
759{
760 struct Scsi_Host *shost = class_to_shost(dev);
761 struct ata_port *ap = ata_shost_to_port(shost);
762 struct ahci_host_priv *hpriv = ap->host->private_data;
763
764 return sprintf(buf, "%x\n", hpriv->cap2);
765}
766
735static ssize_t ahci_show_host_version(struct device *dev, 767static ssize_t ahci_show_host_version(struct device *dev,
736 struct device_attribute *attr, char *buf) 768 struct device_attribute *attr, char *buf)
737{ 769{
@@ -771,7 +803,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
771 struct ahci_host_priv *hpriv) 803 struct ahci_host_priv *hpriv)
772{ 804{
773 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 805 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
774 u32 cap, port_map; 806 u32 cap, cap2, vers, port_map;
775 int i; 807 int i;
776 int mv; 808 int mv;
777 809
@@ -784,6 +816,14 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
784 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 816 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
785 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 817 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
786 818
819 /* CAP2 register is only defined for AHCI 1.2 and later */
820 vers = readl(mmio + HOST_VERSION);
821 if ((vers >> 16) > 1 ||
822 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
823 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
824 else
825 hpriv->saved_cap2 = cap2 = 0;
826
787 /* some chips have errata preventing 64bit use */ 827 /* some chips have errata preventing 64bit use */
788 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 828 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
789 dev_printk(KERN_INFO, &pdev->dev, 829 dev_printk(KERN_INFO, &pdev->dev,
@@ -869,6 +909,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
869 909
870 /* record values to use during operation */ 910 /* record values to use during operation */
871 hpriv->cap = cap; 911 hpriv->cap = cap;
912 hpriv->cap2 = cap2;
872 hpriv->port_map = port_map; 913 hpriv->port_map = port_map;
873} 914}
874 915
@@ -887,6 +928,8 @@ static void ahci_restore_initial_config(struct ata_host *host)
887 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 928 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
888 929
889 writel(hpriv->saved_cap, mmio + HOST_CAP); 930 writel(hpriv->saved_cap, mmio + HOST_CAP);
931 if (hpriv->saved_cap2)
932 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
890 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 933 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
891 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 934 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
892} 935}
@@ -2534,13 +2577,14 @@ static void ahci_print_info(struct ata_host *host)
2534 struct ahci_host_priv *hpriv = host->private_data; 2577 struct ahci_host_priv *hpriv = host->private_data;
2535 struct pci_dev *pdev = to_pci_dev(host->dev); 2578 struct pci_dev *pdev = to_pci_dev(host->dev);
2536 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2579 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2537 u32 vers, cap, impl, speed; 2580 u32 vers, cap, cap2, impl, speed;
2538 const char *speed_s; 2581 const char *speed_s;
2539 u16 cc; 2582 u16 cc;
2540 const char *scc_s; 2583 const char *scc_s;
2541 2584
2542 vers = readl(mmio + HOST_VERSION); 2585 vers = readl(mmio + HOST_VERSION);
2543 cap = hpriv->cap; 2586 cap = hpriv->cap;
2587 cap2 = hpriv->cap2;
2544 impl = hpriv->port_map; 2588 impl = hpriv->port_map;
2545 2589
2546 speed = (cap >> 20) & 0xf; 2590 speed = (cap >> 20) & 0xf;
@@ -2583,25 +2627,29 @@ static void ahci_print_info(struct ata_host *host)
2583 "flags: " 2627 "flags: "
2584 "%s%s%s%s%s%s%s" 2628 "%s%s%s%s%s%s%s"
2585 "%s%s%s%s%s%s%s" 2629 "%s%s%s%s%s%s%s"
2586 "%s\n" 2630 "%s%s%s%s%s%s\n"
2587 , 2631 ,
2588 2632
2589 cap & (1 << 31) ? "64bit " : "", 2633 cap & HOST_CAP_64 ? "64bit " : "",
2590 cap & (1 << 30) ? "ncq " : "", 2634 cap & HOST_CAP_NCQ ? "ncq " : "",
2591 cap & (1 << 29) ? "sntf " : "", 2635 cap & HOST_CAP_SNTF ? "sntf " : "",
2592 cap & (1 << 28) ? "ilck " : "", 2636 cap & HOST_CAP_MPS ? "ilck " : "",
2593 cap & (1 << 27) ? "stag " : "", 2637 cap & HOST_CAP_SSS ? "stag " : "",
2594 cap & (1 << 26) ? "pm " : "", 2638 cap & HOST_CAP_ALPM ? "pm " : "",
2595 cap & (1 << 25) ? "led " : "", 2639 cap & HOST_CAP_LED ? "led " : "",
2596 2640 cap & HOST_CAP_CLO ? "clo " : "",
2597 cap & (1 << 24) ? "clo " : "", 2641 cap & HOST_CAP_ONLY ? "only " : "",
2598 cap & (1 << 19) ? "nz " : "", 2642 cap & HOST_CAP_PMP ? "pmp " : "",
2599 cap & (1 << 18) ? "only " : "", 2643 cap & HOST_CAP_FBS ? "fbs " : "",
2600 cap & (1 << 17) ? "pmp " : "", 2644 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2601 cap & (1 << 15) ? "pio " : "", 2645 cap & HOST_CAP_SSC ? "slum " : "",
2602 cap & (1 << 14) ? "slum " : "", 2646 cap & HOST_CAP_PART ? "part " : "",
2603 cap & (1 << 13) ? "part " : "", 2647 cap & HOST_CAP_CCC ? "ccc " : "",
2604 cap & (1 << 6) ? "ems ": "" 2648 cap & HOST_CAP_EMS ? "ems " : "",
2649 cap & HOST_CAP_SXS ? "sxs " : "",
2650 cap2 & HOST_CAP2_APST ? "apst " : "",
2651 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2652 cap2 & HOST_CAP2_BOH ? "boh " : ""
2605 ); 2653 );
2606} 2654}
2607 2655
@@ -2650,17 +2698,15 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
2650 } 2698 }
2651} 2699}
2652 2700
2653/* 2701/* only some SB600 ahci controllers can do 64bit DMA */
2654 * SB600 ahci controller on certain boards can't do 64bit DMA with 2702static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2655 * older BIOS.
2656 */
2657static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
2658{ 2703{
2659 static const struct dmi_system_id sysids[] = { 2704 static const struct dmi_system_id sysids[] = {
2660 /* 2705 /*
2661 * The oldest version known to be broken is 0901 and 2706 * The oldest version known to be broken is 0901 and
2662 * working is 1501 which was released on 2007-10-26. 2707 * working is 1501 which was released on 2007-10-26.
2663 * Force 32bit DMA on anything older than 1501. 2708 * Enable 64bit DMA on 1501 and anything newer.
2709 *
2664 * Please read bko#9412 for more info. 2710 * Please read bko#9412 for more info.
2665 */ 2711 */
2666 { 2712 {
@@ -2673,46 +2719,57 @@ static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
2673 .driver_data = "20071026", /* yyyymmdd */ 2719 .driver_data = "20071026", /* yyyymmdd */
2674 }, 2720 },
2675 /* 2721 /*
2676 * It's yet unknown whether more recent BIOS fixes the 2722 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2677 * problem. Blacklist the whole board for the time 2723 * support 64bit DMA.
2678 * being. Please read the following thread for more
2679 * info.
2680 * 2724 *
2681 * http://thread.gmane.org/gmane.linux.ide/42326 2725 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2726 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2727 * This spelling mistake was fixed in BIOS version 1.5, so
2728 * 1.5 and later have the Manufacturer as
2729 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2730 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2731 *
2732 * BIOS versions earlier than 1.9 had a Board Product Name
2733 * DMI field of "MS-7376". This was changed to be
2734 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2735 * match on DMI_BOARD_NAME of "MS-7376".
2682 */ 2736 */
2683 { 2737 {
2684 .ident = "Gigabyte GA-MA69VM-S2", 2738 .ident = "MSI K9A2 Platinum",
2685 .matches = { 2739 .matches = {
2686 DMI_MATCH(DMI_BOARD_VENDOR, 2740 DMI_MATCH(DMI_BOARD_VENDOR,
2687 "Gigabyte Technology Co., Ltd."), 2741 "MICRO-STAR INTER"),
2688 DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"), 2742 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2689 }, 2743 },
2690 }, 2744 },
2691 { } 2745 { }
2692 }; 2746 };
2693 const struct dmi_system_id *match; 2747 const struct dmi_system_id *match;
2748 int year, month, date;
2749 char buf[9];
2694 2750
2695 match = dmi_first_match(sysids); 2751 match = dmi_first_match(sysids);
2696 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || 2752 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2697 !match) 2753 !match)
2698 return false; 2754 return false;
2699 2755
2700 if (match->driver_data) { 2756 if (!match->driver_data)
2701 int year, month, date; 2757 goto enable_64bit;
2702 char buf[9];
2703
2704 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2705 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2706 2758
2707 if (strcmp(buf, match->driver_data) >= 0) 2759 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2708 return false; 2760 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2709 2761
2762 if (strcmp(buf, match->driver_data) >= 0)
2763 goto enable_64bit;
2764 else {
2710 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, " 2765 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2711 "forcing 32bit DMA, update BIOS\n", match->ident); 2766 "forcing 32bit DMA, update BIOS\n", match->ident);
2712 } else 2767 return false;
2713 dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't " 2768 }
2714 "do 64bit DMA, forcing 32bit\n", match->ident);
2715 2769
2770enable_64bit:
2771 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2772 match->ident);
2716 return true; 2773 return true;
2717} 2774}
2718 2775
@@ -2858,6 +2915,55 @@ static bool ahci_broken_online(struct pci_dev *pdev)
2858 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 2915 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2859} 2916}
2860 2917
2918#ifdef CONFIG_ATA_ACPI
2919static void ahci_gtf_filter_workaround(struct ata_host *host)
2920{
2921 static const struct dmi_system_id sysids[] = {
2922 /*
2923 * Aspire 3810T issues a bunch of SATA enable commands
2924 * via _GTF including an invalid one and one which is
2925 * rejected by the device. Among the successful ones
2926 * is FPDMA non-zero offset enable which when enabled
2927 * only on the drive side leads to NCQ command
2928 * failures. Filter it out.
2929 */
2930 {
2931 .ident = "Aspire 3810T",
2932 .matches = {
2933 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2934 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
2935 },
2936 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
2937 },
2938 { }
2939 };
2940 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2941 unsigned int filter;
2942 int i;
2943
2944 if (!dmi)
2945 return;
2946
2947 filter = (unsigned long)dmi->driver_data;
2948 dev_printk(KERN_INFO, host->dev,
2949 "applying extra ACPI _GTF filter 0x%x for %s\n",
2950 filter, dmi->ident);
2951
2952 for (i = 0; i < host->n_ports; i++) {
2953 struct ata_port *ap = host->ports[i];
2954 struct ata_link *link;
2955 struct ata_device *dev;
2956
2957 ata_for_each_link(link, ap, EDGE)
2958 ata_for_each_dev(dev, link, ALL)
2959 dev->gtf_filter |= filter;
2960 }
2961}
2962#else
2963static inline void ahci_gtf_filter_workaround(struct ata_host *host)
2964{}
2965#endif
2966
2861static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2967static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2862{ 2968{
2863 static int printed_version; 2969 static int printed_version;
@@ -2926,9 +3032,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2926 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) 3032 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2927 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; 3033 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2928 3034
2929 /* apply sb600 32bit only quirk */ 3035 /* only some SB600s can do 64bit DMA */
2930 if (ahci_sb600_32bit_only(pdev)) 3036 if (ahci_sb600_enable_64bit(pdev))
2931 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; 3037 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
2932 3038
2933 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 3039 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2934 pci_intx(pdev, 1); 3040 pci_intx(pdev, 1);
@@ -3023,6 +3129,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3023 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 3129 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3024 ahci_p5wdh_workaround(host); 3130 ahci_p5wdh_workaround(host);
3025 3131
3132 /* apply gtf filter quirk */
3133 ahci_gtf_filter_workaround(host);
3134
3026 /* initialize adapter */ 3135 /* initialize adapter */
3027 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 3136 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3028 if (rc) 3137 if (rc)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 01964b6e6f6b..b0882cddfd4c 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -20,19 +20,9 @@
20 20
21#include <acpi/acpi_bus.h> 21#include <acpi/acpi_bus.h>
22 22
23enum { 23unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
24 ATA_ACPI_FILTER_SETXFER = 1 << 0,
25 ATA_ACPI_FILTER_LOCK = 1 << 1,
26 ATA_ACPI_FILTER_DIPM = 1 << 2,
27
28 ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
29 ATA_ACPI_FILTER_LOCK |
30 ATA_ACPI_FILTER_DIPM,
31};
32
33static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
34module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644); 24module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
35MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM)"); 25MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)");
36 26
37#define NO_PORT_MULT 0xffff 27#define NO_PORT_MULT 0xffff
38#define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) 28#define SATA_ADR(root, pmp) (((root) << 16) | (pmp))
@@ -613,10 +603,11 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev,
613 tf->command = gtf->tf[6]; /* 0x1f7 */ 603 tf->command = gtf->tf[6]; /* 0x1f7 */
614} 604}
615 605
616static int ata_acpi_filter_tf(const struct ata_taskfile *tf, 606static int ata_acpi_filter_tf(struct ata_device *dev,
607 const struct ata_taskfile *tf,
617 const struct ata_taskfile *ptf) 608 const struct ata_taskfile *ptf)
618{ 609{
619 if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_SETXFER) { 610 if (dev->gtf_filter & ATA_ACPI_FILTER_SETXFER) {
620 /* libata doesn't use ACPI to configure transfer mode. 611 /* libata doesn't use ACPI to configure transfer mode.
621 * It will only confuse device configuration. Skip. 612 * It will only confuse device configuration. Skip.
622 */ 613 */
@@ -625,7 +616,7 @@ static int ata_acpi_filter_tf(const struct ata_taskfile *tf,
625 return 1; 616 return 1;
626 } 617 }
627 618
628 if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_LOCK) { 619 if (dev->gtf_filter & ATA_ACPI_FILTER_LOCK) {
629 /* BIOS writers, sorry but we don't wanna lock 620 /* BIOS writers, sorry but we don't wanna lock
630 * features unless the user explicitly said so. 621 * features unless the user explicitly said so.
631 */ 622 */
@@ -647,12 +638,23 @@ static int ata_acpi_filter_tf(const struct ata_taskfile *tf,
647 return 1; 638 return 1;
648 } 639 }
649 640
650 if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_DIPM) { 641 if (tf->command == ATA_CMD_SET_FEATURES &&
642 tf->feature == SETFEATURES_SATA_ENABLE) {
651 /* inhibit enabling DIPM */ 643 /* inhibit enabling DIPM */
652 if (tf->command == ATA_CMD_SET_FEATURES && 644 if (dev->gtf_filter & ATA_ACPI_FILTER_DIPM &&
653 tf->feature == SETFEATURES_SATA_ENABLE &&
654 tf->nsect == SATA_DIPM) 645 tf->nsect == SATA_DIPM)
655 return 1; 646 return 1;
647
648 /* inhibit FPDMA non-zero offset */
649 if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_OFFSET &&
650 (tf->nsect == SATA_FPDMA_OFFSET ||
651 tf->nsect == SATA_FPDMA_IN_ORDER))
652 return 1;
653
654 /* inhibit FPDMA auto activation */
655 if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_AA &&
656 tf->nsect == SATA_FPDMA_AA)
657 return 1;
656 } 658 }
657 659
658 return 0; 660 return 0;
@@ -704,7 +706,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
704 pptf = &ptf; 706 pptf = &ptf;
705 } 707 }
706 708
707 if (!ata_acpi_filter_tf(&tf, pptf)) { 709 if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
708 rtf = tf; 710 rtf = tf;
709 err_mask = ata_exec_internal(dev, &rtf, NULL, 711 err_mask = ata_exec_internal(dev, &rtf, NULL,
710 DMA_NONE, NULL, 0, 0); 712 DMA_NONE, NULL, 0, 0);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0ddaf43d68c6..dc72690ed5db 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4919,10 +4919,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4919 */ 4919 */
4920void ata_qc_free(struct ata_queued_cmd *qc) 4920void ata_qc_free(struct ata_queued_cmd *qc)
4921{ 4921{
4922 struct ata_port *ap = qc->ap; 4922 struct ata_port *ap;
4923 unsigned int tag; 4923 unsigned int tag;
4924 4924
4925 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4925 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4926 ap = qc->ap;
4926 4927
4927 qc->flags = 0; 4928 qc->flags = 0;
4928 tag = qc->tag; 4929 tag = qc->tag;
@@ -4934,11 +4935,13 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4934 4935
4935void __ata_qc_complete(struct ata_queued_cmd *qc) 4936void __ata_qc_complete(struct ata_queued_cmd *qc)
4936{ 4937{
4937 struct ata_port *ap = qc->ap; 4938 struct ata_port *ap;
4938 struct ata_link *link = qc->dev->link; 4939 struct ata_link *link;
4939 4940
4940 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4941 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4941 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4942 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4943 ap = qc->ap;
4944 link = qc->dev->link;
4942 4945
4943 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4946 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4944 ata_sg_clean(qc); 4947 ata_sg_clean(qc);
@@ -5028,12 +5031,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
5028 qc->flags |= ATA_QCFLAG_FAILED; 5031 qc->flags |= ATA_QCFLAG_FAILED;
5029 5032
5030 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5033 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5031 if (!ata_tag_internal(qc->tag)) { 5034 /* always fill result TF for failed qc */
5032 /* always fill result TF for failed qc */ 5035 fill_result_tf(qc);
5033 fill_result_tf(qc); 5036
5037 if (!ata_tag_internal(qc->tag))
5034 ata_qc_schedule_eh(qc); 5038 ata_qc_schedule_eh(qc);
5035 return; 5039 else
5036 } 5040 __ata_qc_complete(qc);
5041 return;
5037 } 5042 }
5038 5043
5039 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 5044 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
@@ -5591,6 +5596,9 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5591 5596
5592 dev->link = link; 5597 dev->link = link;
5593 dev->devno = dev - link->device; 5598 dev->devno = dev - link->device;
5599#ifdef CONFIG_ATA_ACPI
5600 dev->gtf_filter = ata_acpi_gtf_filter;
5601#endif
5594 ata_dev_init(dev); 5602 ata_dev_init(dev);
5595 } 5603 }
5596} 5604}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a04488f0de88..bba2ae5df1c2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2667,14 +2667,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
2667 dev->pio_mode = XFER_PIO_0; 2667 dev->pio_mode = XFER_PIO_0;
2668 dev->flags &= ~ATA_DFLAG_SLEEPING; 2668 dev->flags &= ~ATA_DFLAG_SLEEPING;
2669 2669
2670 if (!ata_phys_link_offline(ata_dev_phys_link(dev))) { 2670 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2671 /* apply class override */ 2671 continue;
2672 if (lflags & ATA_LFLAG_ASSUME_ATA) 2672
2673 classes[dev->devno] = ATA_DEV_ATA; 2673 /* apply class override */
2674 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2674 if (lflags & ATA_LFLAG_ASSUME_ATA)
2675 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2675 classes[dev->devno] = ATA_DEV_ATA;
2676 } else 2676 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2677 classes[dev->devno] = ATA_DEV_NONE; 2677 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2678 } 2678 }
2679 2679
2680 /* record current link speed */ 2680 /* record current link speed */
@@ -2713,34 +2713,48 @@ int ata_eh_reset(struct ata_link *link, int classify,
2713 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2713 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2714 spin_unlock_irqrestore(link->ap->lock, flags); 2714 spin_unlock_irqrestore(link->ap->lock, flags);
2715 2715
2716 /* Make sure onlineness and classification result correspond. 2716 /*
2717 * Make sure onlineness and classification result correspond.
2717 * Hotplug could have happened during reset and some 2718 * Hotplug could have happened during reset and some
2718 * controllers fail to wait while a drive is spinning up after 2719 * controllers fail to wait while a drive is spinning up after
2719 * being hotplugged causing misdetection. By cross checking 2720 * being hotplugged causing misdetection. By cross checking
2720 * link onlineness and classification result, those conditions 2721 * link on/offlineness and classification result, those
2721 * can be reliably detected and retried. 2722 * conditions can be reliably detected and retried.
2722 */ 2723 */
2723 nr_unknown = 0; 2724 nr_unknown = 0;
2724 ata_for_each_dev(dev, link, ALL) { 2725 ata_for_each_dev(dev, link, ALL) {
2725 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2726 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2726 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2727 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2727 classes[dev->devno] = ATA_DEV_NONE; 2728 ata_dev_printk(dev, KERN_DEBUG, "link online "
2728 if (ata_phys_link_online(ata_dev_phys_link(dev))) 2729 "but device misclassifed\n");
2730 classes[dev->devno] = ATA_DEV_NONE;
2729 nr_unknown++; 2731 nr_unknown++;
2732 }
2733 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2734 if (ata_class_enabled(classes[dev->devno]))
2735 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2736 "clearing class %d to NONE\n",
2737 classes[dev->devno]);
2738 classes[dev->devno] = ATA_DEV_NONE;
2739 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2740 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2741 "clearing UNKNOWN to NONE\n");
2742 classes[dev->devno] = ATA_DEV_NONE;
2730 } 2743 }
2731 } 2744 }
2732 2745
2733 if (classify && nr_unknown) { 2746 if (classify && nr_unknown) {
2734 if (try < max_tries) { 2747 if (try < max_tries) {
2735 ata_link_printk(link, KERN_WARNING, "link online but " 2748 ata_link_printk(link, KERN_WARNING, "link online but "
2736 "device misclassified, retrying\n"); 2749 "%d devices misclassified, retrying\n",
2750 nr_unknown);
2737 failed_link = link; 2751 failed_link = link;
2738 rc = -EAGAIN; 2752 rc = -EAGAIN;
2739 goto fail; 2753 goto fail;
2740 } 2754 }
2741 ata_link_printk(link, KERN_WARNING, 2755 ata_link_printk(link, KERN_WARNING,
2742 "link online but device misclassified, " 2756 "link online but %d devices misclassified, "
2743 "device detection might fail\n"); 2757 "device detection might fail\n", nr_unknown);
2744 } 2758 }
2745 2759
2746 /* reset successful, schedule revalidation */ 2760 /* reset successful, schedule revalidation */
@@ -2967,12 +2981,14 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2967 * device detection messages backwards. 2981 * device detection messages backwards.
2968 */ 2982 */
2969 ata_for_each_dev(dev, link, ALL) { 2983 ata_for_each_dev(dev, link, ALL) {
2970 if (!(new_mask & (1 << dev->devno)) || 2984 if (!(new_mask & (1 << dev->devno)))
2971 dev->class == ATA_DEV_PMP)
2972 continue; 2985 continue;
2973 2986
2974 dev->class = ehc->classes[dev->devno]; 2987 dev->class = ehc->classes[dev->devno];
2975 2988
2989 if (dev->class == ATA_DEV_PMP)
2990 continue;
2991
2976 ehc->i.flags |= ATA_EHI_PRINTINFO; 2992 ehc->i.flags |= ATA_EHI_PRINTINFO;
2977 rc = ata_dev_configure(dev); 2993 rc = ata_dev_configure(dev);
2978 ehc->i.flags &= ~ATA_EHI_PRINTINFO; 2994 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index be8e2628f82c..823e63096362 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -118,6 +118,8 @@ extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
118 118
119/* libata-acpi.c */ 119/* libata-acpi.c */
120#ifdef CONFIG_ATA_ACPI 120#ifdef CONFIG_ATA_ACPI
121extern unsigned int ata_acpi_gtf_filter;
122
121extern void ata_acpi_associate_sata_port(struct ata_port *ap); 123extern void ata_acpi_associate_sata_port(struct ata_port *ap);
122extern void ata_acpi_associate(struct ata_host *host); 124extern void ata_acpi_associate(struct ata_host *host);
123extern void ata_acpi_dissociate(struct ata_host *host); 125extern void ata_acpi_dissociate(struct ata_host *host);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index fc9c5d6d7d80..1432dc9d0ab8 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -290,7 +290,7 @@ static void ali_warn_atapi_dma(struct ata_device *adev)
290 290
291 if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) { 291 if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
292 ata_dev_printk(adev, KERN_WARNING, 292 ata_dev_printk(adev, KERN_WARNING,
293 "WARNING: ATAPI DMA disabled for reliablity issues. It can be enabled\n"); 293 "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
294 ata_dev_printk(adev, KERN_WARNING, 294 ata_dev_printk(adev, KERN_WARNING,
295 "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n"); 295 "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
296 } 296 }
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index aa4b3f6ae771..ae4454d4e955 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,7 +246,7 @@ static const struct pci_device_id atiixp[] = {
246 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, 246 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
247 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, 247 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
248 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, 248 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
249 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), }, 249 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
250 250
251 { }, 251 { },
252}; 252};
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 7990de925d2e..6fe7ded40c6a 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -118,20 +118,13 @@ struct atp867x_priv {
118 int pci66mhz; 118 int pci66mhz;
119}; 119};
120 120
121static inline u8 atp867x_speed_to_mode(u8 speed)
122{
123 return speed - XFER_UDMA_0 + 1;
124}
125
126static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev) 121static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
127{ 122{
128 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 123 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
129 struct atp867x_priv *dp = ap->private_data; 124 struct atp867x_priv *dp = ap->private_data;
130 u8 speed = adev->dma_mode; 125 u8 speed = adev->dma_mode;
131 u8 b; 126 u8 b;
132 u8 mode; 127 u8 mode = speed - XFER_UDMA_0 + 1;
133
134 mode = atp867x_speed_to_mode(speed);
135 128
136 /* 129 /*
137 * Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed 130 * Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed
@@ -156,25 +149,38 @@ static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
156 iowrite8(b, dp->dma_mode); 149 iowrite8(b, dp->dma_mode);
157} 150}
158 151
159static int atp867x_get_active_clocks_shifted(unsigned int clk) 152static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
153 unsigned int clk)
160{ 154{
155 struct atp867x_priv *dp = ap->private_data;
161 unsigned char clocks = clk; 156 unsigned char clocks = clk;
162 157
158 /*
159 * Doc 6.6.9: increase the clock value by 1 for safer PIO speed
160 * on 66MHz bus
161 */
162 if (dp->pci66mhz)
163 clocks++;
164
163 switch (clocks) { 165 switch (clocks) {
164 case 0: 166 case 0:
165 clocks = 1; 167 clocks = 1;
166 break; 168 break;
167 case 1 ... 7: 169 case 1 ... 6:
168 break;
169 case 8 ... 12:
170 clocks = 7;
171 break; 170 break;
172 default: 171 default:
173 printk(KERN_WARNING "ATP867X: active %dclk is invalid. " 172 printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
174 "Using default 8clk.\n", clk); 173 "Using 12clk.\n", clk);
175 clocks = 0; /* 8 clk */ 174 case 9 ... 12:
175 clocks = 7; /* 12 clk */
176 break; 176 break;
177 case 7:
178 case 8: /* default 8 clk */
179 clocks = 0;
180 goto active_clock_shift_done;
177 } 181 }
182
183active_clock_shift_done:
178 return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT; 184 return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
179} 185}
180 186
@@ -188,20 +194,20 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
188 break; 194 break;
189 case 1 ... 11: 195 case 1 ... 11:
190 break; 196 break;
191 case 12: 197 case 13:
192 clocks = 0; 198 case 14:
193 break; 199 --clocks; /* by the spec */
194 case 13: case 14:
195 --clocks;
196 break; 200 break;
197 case 15: 201 case 15:
198 break; 202 break;
199 default: 203 default:
200 printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " 204 printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
201 "Using default 15clk.\n", clk); 205 "Using default 12clk.\n", clk);
202 clocks = 0; /* 12 clk */ 206 case 12: /* default 12 clk */
207 clocks = 0;
203 break; 208 break;
204 } 209 }
210
205 return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT; 211 return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT;
206} 212}
207 213
@@ -230,25 +236,38 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
230 b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK); 236 b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK);
231 iowrite8(b, dp->dma_mode); 237 iowrite8(b, dp->dma_mode);
232 238
233 b = atp867x_get_active_clocks_shifted(t.active) | 239 b = atp867x_get_active_clocks_shifted(ap, t.active) |
234 atp867x_get_recover_clocks_shifted(t.recover); 240 atp867x_get_recover_clocks_shifted(t.recover);
235 if (dp->pci66mhz)
236 b += 0x10;
237 241
238 if (adev->devno & 1) 242 if (adev->devno & 1)
239 iowrite8(b, dp->slave_piospd); 243 iowrite8(b, dp->slave_piospd);
240 else 244 else
241 iowrite8(b, dp->mstr_piospd); 245 iowrite8(b, dp->mstr_piospd);
242 246
243 /* 247 b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
244 * use the same value for comand timing as for PIO timimg 248 atp867x_get_recover_clocks_shifted(t.rec8b);
245 */ 249
246 iowrite8(b, dp->eightb_piospd); 250 iowrite8(b, dp->eightb_piospd);
247} 251}
248 252
253static int atp867x_cable_override(struct pci_dev *pdev)
254{
255 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ARTOP &&
256 (pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867A ||
257 pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867B)) {
258 return 1;
259 }
260 return 0;
261}
262
249static int atp867x_cable_detect(struct ata_port *ap) 263static int atp867x_cable_detect(struct ata_port *ap)
250{ 264{
251 return ATA_CBL_PATA40_SHORT; 265 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
266
267 if (atp867x_cable_override(pdev))
268 return ATA_CBL_PATA40_SHORT;
269
270 return ATA_CBL_PATA_UNK;
252} 271}
253 272
254static struct scsi_host_template atp867x_sht = { 273static struct scsi_host_template atp867x_sht = {
@@ -471,7 +490,6 @@ static int atp867x_init_one(struct pci_dev *pdev,
471 static const struct ata_port_info info_867x = { 490 static const struct ata_port_info info_867x = {
472 .flags = ATA_FLAG_SLAVE_POSS, 491 .flags = ATA_FLAG_SLAVE_POSS,
473 .pio_mask = ATA_PIO4, 492 .pio_mask = ATA_PIO4,
474 .mwdma_mask = ATA_MWDMA2,
475 .udma_mask = ATA_UDMA6, 493 .udma_mask = ATA_UDMA6,
476 .port_ops = &atp867x_ops, 494 .port_ops = &atp867x_ops,
477 }; 495 };
@@ -515,6 +533,23 @@ err_out:
515 return rc; 533 return rc;
516} 534}
517 535
536#ifdef CONFIG_PM
537static int atp867x_reinit_one(struct pci_dev *pdev)
538{
539 struct ata_host *host = dev_get_drvdata(&pdev->dev);
540 int rc;
541
542 rc = ata_pci_device_do_resume(pdev);
543 if (rc)
544 return rc;
545
546 atp867x_fixup(host);
547
548 ata_host_resume(host);
549 return 0;
550}
551#endif
552
518static struct pci_device_id atp867x_pci_tbl[] = { 553static struct pci_device_id atp867x_pci_tbl[] = {
519 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 }, 554 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
520 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 }, 555 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
@@ -526,6 +561,10 @@ static struct pci_driver atp867x_driver = {
526 .id_table = atp867x_pci_tbl, 561 .id_table = atp867x_pci_tbl,
527 .probe = atp867x_init_one, 562 .probe = atp867x_init_one,
528 .remove = ata_pci_remove_one, 563 .remove = ata_pci_remove_one,
564#ifdef CONFIG_PM
565 .suspend = ata_pci_device_suspend,
566 .resume = atp867x_reinit_one,
567#endif
529}; 568};
530 569
531static int __init atp867x_init(void) 570static int __init atp867x_init(void)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index f49814d6fd2e..3bbed8322ecf 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -235,8 +235,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
235 .udma_mask = ATA_UDMA2, 235 .udma_mask = ATA_UDMA2,
236 .port_ops = &sc1200_port_ops 236 .port_ops = &sc1200_port_ops
237 }; 237 };
238 /* Can't enable port 2 yet, see top comments */ 238 const struct ata_port_info *ppi[] = { &info, NULL };
239 const struct ata_port_info *ppi[] = { &info, };
240 239
241 return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL); 240 return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL);
242} 241}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 45657cacec43..88984b803d6d 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -111,7 +111,7 @@ static const struct via_isa_bridge {
111 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 111 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
112 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA }, 112 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
113 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES }, 113 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
114 { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES }, 114 { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
115 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 115 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
116 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 116 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
117 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 117 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 17f9ff9067a2..6f5093b7c8c5 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1382,6 +1382,25 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
1382 */ 1382 */
1383 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1383 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1384 return ATA_DEFER_PORT; 1384 return ATA_DEFER_PORT;
1385
1386 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1387 * can run concurrently.
1388 * set excl_link when we want to send a PIO command in DMA mode
1389 * or a non-NCQ command in NCQ mode.
1390 * When we receive a command from that link, and there are no
1391 * outstanding commands, mark a flag to clear excl_link and let
1392 * the command go through.
1393 */
1394 if (unlikely(ap->excl_link)) {
1395 if (link == ap->excl_link) {
1396 if (ap->nr_active_links)
1397 return ATA_DEFER_PORT;
1398 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1399 return 0;
1400 } else
1401 return ATA_DEFER_PORT;
1402 }
1403
1385 /* 1404 /*
1386 * If the port is completely idle, then allow the new qc. 1405 * If the port is completely idle, then allow the new qc.
1387 */ 1406 */
@@ -1395,8 +1414,14 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
1395 * doesn't allow it. 1414 * doesn't allow it.
1396 */ 1415 */
1397 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1416 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1398 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) 1417 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1399 return 0; 1418 if (ata_is_ncq(qc->tf.protocol))
1419 return 0;
1420 else {
1421 ap->excl_link = link;
1422 return ATA_DEFER_PORT;
1423 }
1424 }
1400 1425
1401 return ATA_DEFER_PORT; 1426 return ATA_DEFER_PORT;
1402} 1427}
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 86a40582999c..1eb4e020eb5c 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1594,9 +1594,21 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
1594 !ata_dev_enabled(link->device)) 1594 !ata_dev_enabled(link->device))
1595 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline, 1595 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1596 NULL, NULL); 1596 NULL, NULL);
1597 else if (!(ehc->i.flags & ATA_EHI_QUIET)) 1597 else {
1598 ata_link_printk(link, KERN_INFO, 1598 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1599 "nv: skipping hardreset on occupied port\n"); 1599 int rc;
1600
1601 if (!(ehc->i.flags & ATA_EHI_QUIET))
1602 ata_link_printk(link, KERN_INFO, "nv: skipping "
1603 "hardreset on occupied port\n");
1604
1605 /* make sure the link is online */
1606 rc = sata_link_resume(link, timing, deadline);
1607 /* whine about phy resume failure but proceed */
1608 if (rc && rc != -EOPNOTSUPP)
1609 ata_link_printk(link, KERN_WARNING, "failed to resume "
1610 "link (errno=%d)\n", rc);
1611 }
1600 1612
1601 /* device signature acquisition is unreliable */ 1613 /* device signature acquisition is unreliable */
1602 return -EAGAIN; 1614 return -EAGAIN;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index bdd43c7f432e..02efd9a83d26 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -93,7 +93,6 @@ static const struct pci_device_id svia_pci_tbl[] = {
93 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 93 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */ 94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
95 { PCI_VDEVICE(VIA, 0x9000), vt8251 }, 95 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
96 { PCI_VDEVICE(VIA, 0x9040), vt8251 },
97 96
98 { } /* terminate list */ 97 { } /* terminate list */
99}; 98};
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 973bf2ad4e0d..63c143e54a57 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -689,15 +689,19 @@ int bus_add_driver(struct device_driver *drv)
689 printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", 689 printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
690 __func__, drv->name); 690 __func__, drv->name);
691 } 691 }
692 error = add_bind_files(drv); 692
693 if (error) { 693 if (!drv->suppress_bind_attrs) {
694 /* Ditto */ 694 error = add_bind_files(drv);
695 printk(KERN_ERR "%s: add_bind_files(%s) failed\n", 695 if (error) {
696 __func__, drv->name); 696 /* Ditto */
697 printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
698 __func__, drv->name);
699 }
697 } 700 }
698 701
699 kobject_uevent(&priv->kobj, KOBJ_ADD); 702 kobject_uevent(&priv->kobj, KOBJ_ADD);
700 return 0; 703 return 0;
704
701out_unregister: 705out_unregister:
702 kfree(drv->p); 706 kfree(drv->p);
703 drv->p = NULL; 707 drv->p = NULL;
@@ -720,7 +724,8 @@ void bus_remove_driver(struct device_driver *drv)
720 if (!drv->bus) 724 if (!drv->bus)
721 return; 725 return;
722 726
723 remove_bind_files(drv); 727 if (!drv->suppress_bind_attrs)
728 remove_bind_files(drv);
724 driver_remove_attrs(drv->bus, drv); 729 driver_remove_attrs(drv->bus, drv);
725 driver_remove_file(drv, &driver_attr_uevent); 730 driver_remove_file(drv, &driver_attr_uevent);
726 klist_remove(&drv->p->knode_bus); 731 klist_remove(&drv->p->knode_bus);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index ed2ebd3c287d..f367885a7646 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -236,7 +236,7 @@ int driver_register(struct device_driver *drv)
236 put_driver(other); 236 put_driver(other);
237 printk(KERN_ERR "Error: Driver '%s' is already registered, " 237 printk(KERN_ERR "Error: Driver '%s' is already registered, "
238 "aborting...\n", drv->name); 238 "aborting...\n", drv->name);
239 return -EEXIST; 239 return -EBUSY;
240 } 240 }
241 241
242 ret = bus_add_driver(drv); 242 ret = bus_add_driver(drv);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ed156a13aa40..4fa954b07ac4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -521,11 +521,15 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
521{ 521{
522 int retval, code; 522 int retval, code;
523 523
524 /* make sure driver won't have bind/unbind attributes */
525 drv->driver.suppress_bind_attrs = true;
526
524 /* temporary section violation during probe() */ 527 /* temporary section violation during probe() */
525 drv->probe = probe; 528 drv->probe = probe;
526 retval = code = platform_driver_register(drv); 529 retval = code = platform_driver_register(drv);
527 530
528 /* Fixup that section violation, being paranoid about code scanning 531 /*
532 * Fixup that section violation, being paranoid about code scanning
529 * the list of drivers in order to probe new devices. Check to see 533 * the list of drivers in order to probe new devices. Check to see
530 * if the probe was successful, and make sure any forced probes of 534 * if the probe was successful, and make sure any forced probes of
531 * new devices fail. 535 * new devices fail.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0dc4071e088..8aa2443182d5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -511,6 +511,7 @@ static void dpm_complete(pm_message_t state)
511 511
512 INIT_LIST_HEAD(&list); 512 INIT_LIST_HEAD(&list);
513 mutex_lock(&dpm_list_mtx); 513 mutex_lock(&dpm_list_mtx);
514 transition_started = false;
514 while (!list_empty(&dpm_list)) { 515 while (!list_empty(&dpm_list)) {
515 struct device *dev = to_device(dpm_list.prev); 516 struct device *dev = to_device(dpm_list.prev);
516 517
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 38556f6cc22d..a770498a74ec 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -51,8 +51,6 @@ static int __pm_runtime_idle(struct device *dev)
51{ 51{
52 int retval = 0; 52 int retval = 0;
53 53
54 dev_dbg(dev, "__pm_runtime_idle()!\n");
55
56 if (dev->power.runtime_error) 54 if (dev->power.runtime_error)
57 retval = -EINVAL; 55 retval = -EINVAL;
58 else if (dev->power.idle_notification) 56 else if (dev->power.idle_notification)
@@ -93,8 +91,6 @@ static int __pm_runtime_idle(struct device *dev)
93 wake_up_all(&dev->power.wait_queue); 91 wake_up_all(&dev->power.wait_queue);
94 92
95 out: 93 out:
96 dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
97
98 return retval; 94 return retval;
99} 95}
100 96
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 6fa7b0fdbdfd..eb4fa1943944 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
41#include <linux/reboot.h> 42#include <linux/reboot.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/timer.h> 44#include <linux/timer.h>
@@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
6422 return true; 6423 return true;
6423} 6424}
6424 6425
6425 6426static int dac960_proc_show(struct seq_file *m, void *v)
6426/*
6427 DAC960_ProcReadStatus implements reading /proc/rd/status.
6428*/
6429
6430static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6431 int Count, int *EOF, void *Data)
6432{ 6427{
6433 unsigned char *StatusMessage = "OK\n"; 6428 unsigned char *StatusMessage = "OK\n";
6434 int ControllerNumber, BytesAvailable; 6429 int ControllerNumber;
6435 for (ControllerNumber = 0; 6430 for (ControllerNumber = 0;
6436 ControllerNumber < DAC960_ControllerCount; 6431 ControllerNumber < DAC960_ControllerCount;
6437 ControllerNumber++) 6432 ControllerNumber++)
@@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6444 break; 6439 break;
6445 } 6440 }
6446 } 6441 }
6447 BytesAvailable = strlen(StatusMessage) - Offset; 6442 seq_puts(m, StatusMessage);
6448 if (Count >= BytesAvailable) 6443 return 0;
6449 {
6450 Count = BytesAvailable;
6451 *EOF = true;
6452 }
6453 if (Count <= 0) return 0;
6454 *Start = Page;
6455 memcpy(Page, &StatusMessage[Offset], Count);
6456 return Count;
6457} 6444}
6458 6445
6446static int dac960_proc_open(struct inode *inode, struct file *file)
6447{
6448 return single_open(file, dac960_proc_show, NULL);
6449}
6459 6450
6460/* 6451static const struct file_operations dac960_proc_fops = {
6461 DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. 6452 .owner = THIS_MODULE,
6462*/ 6453 .open = dac960_proc_open,
6454 .read = seq_read,
6455 .llseek = seq_lseek,
6456 .release = single_release,
6457};
6463 6458
6464static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, 6459static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
6465 int Count, int *EOF, void *Data)
6466{ 6460{
6467 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6461 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6468 int BytesAvailable = Controller->InitialStatusLength - Offset; 6462 seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
6469 if (Count >= BytesAvailable) 6463 return 0;
6470 {
6471 Count = BytesAvailable;
6472 *EOF = true;
6473 }
6474 if (Count <= 0) return 0;
6475 *Start = Page;
6476 memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
6477 return Count;
6478} 6464}
6479 6465
6466static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
6467{
6468 return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
6469}
6480 6470
6481/* 6471static const struct file_operations dac960_initial_status_proc_fops = {
6482 DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. 6472 .owner = THIS_MODULE,
6483*/ 6473 .open = dac960_initial_status_proc_open,
6474 .read = seq_read,
6475 .llseek = seq_lseek,
6476 .release = single_release,
6477};
6484 6478
6485static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, 6479static int dac960_current_status_proc_show(struct seq_file *m, void *v)
6486 int Count, int *EOF, void *Data)
6487{ 6480{
6488 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6481 DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
6489 unsigned char *StatusMessage = 6482 unsigned char *StatusMessage =
6490 "No Rebuild or Consistency Check in Progress\n"; 6483 "No Rebuild or Consistency Check in Progress\n";
6491 int ProgressMessageLength = strlen(StatusMessage); 6484 int ProgressMessageLength = strlen(StatusMessage);
6492 int BytesAvailable;
6493 if (jiffies != Controller->LastCurrentStatusTime) 6485 if (jiffies != Controller->LastCurrentStatusTime)
6494 { 6486 {
6495 Controller->CurrentStatusLength = 0; 6487 Controller->CurrentStatusLength = 0;
@@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
6513 } 6505 }
6514 Controller->LastCurrentStatusTime = jiffies; 6506 Controller->LastCurrentStatusTime = jiffies;
6515 } 6507 }
6516 BytesAvailable = Controller->CurrentStatusLength - Offset; 6508 seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
6517 if (Count >= BytesAvailable) 6509 return 0;
6518 {
6519 Count = BytesAvailable;
6520 *EOF = true;
6521 }
6522 if (Count <= 0) return 0;
6523 *Start = Page;
6524 memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
6525 return Count;
6526} 6510}
6527 6511
6512static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
6513{
6514 return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
6515}
6528 6516
6529/* 6517static const struct file_operations dac960_current_status_proc_fops = {
6530 DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. 6518 .owner = THIS_MODULE,
6531*/ 6519 .open = dac960_current_status_proc_open,
6520 .read = seq_read,
6521 .llseek = seq_lseek,
6522 .release = single_release,
6523};
6532 6524
6533static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, 6525static int dac960_user_command_proc_show(struct seq_file *m, void *v)
6534 int Count, int *EOF, void *Data)
6535{ 6526{
6536 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6527 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6537 int BytesAvailable = Controller->UserStatusLength - Offset;
6538 if (Count >= BytesAvailable)
6539 {
6540 Count = BytesAvailable;
6541 *EOF = true;
6542 }
6543 if (Count <= 0) return 0;
6544 *Start = Page;
6545 memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
6546 return Count;
6547}
6548 6528
6529 seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
6530 return 0;
6531}
6549 6532
6550/* 6533static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
6551 DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. 6534{
6552*/ 6535 return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
6536}
6553 6537
6554static int DAC960_ProcWriteUserCommand(struct file *file, 6538static ssize_t dac960_user_command_proc_write(struct file *file,
6555 const char __user *Buffer, 6539 const char __user *Buffer,
6556 unsigned long Count, void *Data) 6540 size_t Count, loff_t *pos)
6557{ 6541{
6558 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6542 DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
6559 unsigned char CommandBuffer[80]; 6543 unsigned char CommandBuffer[80];
6560 int Length; 6544 int Length;
6561 if (Count > sizeof(CommandBuffer)-1) return -EINVAL; 6545 if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
6572 ? Count : -EBUSY); 6556 ? Count : -EBUSY);
6573} 6557}
6574 6558
6559static const struct file_operations dac960_user_command_proc_fops = {
6560 .owner = THIS_MODULE,
6561 .open = dac960_user_command_proc_open,
6562 .read = seq_read,
6563 .llseek = seq_lseek,
6564 .release = single_release,
6565 .write = dac960_user_command_proc_write,
6566};
6575 6567
6576/* 6568/*
6577 DAC960_CreateProcEntries creates the /proc/rd/... entries for the 6569 DAC960_CreateProcEntries creates the /proc/rd/... entries for the
@@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
6586 6578
6587 if (DAC960_ProcDirectoryEntry == NULL) { 6579 if (DAC960_ProcDirectoryEntry == NULL) {
6588 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); 6580 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
6589 StatusProcEntry = create_proc_read_entry("status", 0, 6581 StatusProcEntry = proc_create("status", 0,
6590 DAC960_ProcDirectoryEntry, 6582 DAC960_ProcDirectoryEntry,
6591 DAC960_ProcReadStatus, NULL); 6583 &dac960_proc_fops);
6592 } 6584 }
6593 6585
6594 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6586 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
6595 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6587 ControllerProcEntry = proc_mkdir(Controller->ControllerName,
6596 DAC960_ProcDirectoryEntry); 6588 DAC960_ProcDirectoryEntry);
6597 create_proc_read_entry("initial_status", 0, ControllerProcEntry, 6589 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
6598 DAC960_ProcReadInitialStatus, Controller); 6590 proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
6599 create_proc_read_entry("current_status", 0, ControllerProcEntry, 6591 UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
6600 DAC960_ProcReadCurrentStatus, Controller);
6601 UserCommandProcEntry =
6602 create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
6603 ControllerProcEntry, DAC960_ProcReadUserCommand,
6604 Controller);
6605 UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
6606 Controller->ControllerProcEntry = ControllerProcEntry; 6592 Controller->ControllerProcEntry = ControllerProcEntry;
6607} 6593}
6608 6594
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 24c3e21ab263..6399e5090df4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -36,9 +36,11 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/jiffies.h>
39#include <linux/hdreg.h> 40#include <linux/hdreg.h>
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/compat.h> 42#include <linux/compat.h>
43#include <linux/mutex.h>
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/io.h> 45#include <asm/io.h>
44 46
@@ -66,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
66MODULE_VERSION("3.6.20"); 68MODULE_VERSION("3.6.20");
67MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
68 70
71static int cciss_allow_hpsa;
72module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
73MODULE_PARM_DESC(cciss_allow_hpsa,
74 "Prevent cciss driver from accessing hardware known to be "
75 " supported by the hpsa driver");
76
69#include "cciss_cmd.h" 77#include "cciss_cmd.h"
70#include "cciss.h" 78#include "cciss.h"
71#include <linux/cciss_ioctl.h> 79#include <linux/cciss_ioctl.h>
@@ -99,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
103 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
104 {0,} 110 {0,}
105}; 111};
106 112
@@ -121,8 +127,6 @@ static struct board_type products[] = {
121 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 127 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 {0x40910E11, "Smart Array 6i", &SA5_access}, 128 {0x40910E11, "Smart Array 6i", &SA5_access},
123 {0x3225103C, "Smart Array P600", &SA5_access}, 129 {0x3225103C, "Smart Array P600", &SA5_access},
124 {0x3223103C, "Smart Array P800", &SA5_access},
125 {0x3234103C, "Smart Array P400", &SA5_access},
126 {0x3235103C, "Smart Array P400i", &SA5_access}, 130 {0x3235103C, "Smart Array P400i", &SA5_access},
127 {0x3211103C, "Smart Array E200i", &SA5_access}, 131 {0x3211103C, "Smart Array E200i", &SA5_access},
128 {0x3212103C, "Smart Array E200", &SA5_access}, 132 {0x3212103C, "Smart Array E200", &SA5_access},
@@ -130,6 +134,10 @@ static struct board_type products[] = {
130 {0x3214103C, "Smart Array E200i", &SA5_access}, 134 {0x3214103C, "Smart Array E200i", &SA5_access},
131 {0x3215103C, "Smart Array E200i", &SA5_access}, 135 {0x3215103C, "Smart Array E200i", &SA5_access},
132 {0x3237103C, "Smart Array E500", &SA5_access}, 136 {0x3237103C, "Smart Array E500", &SA5_access},
137/* controllers below this line are also supported by the hpsa driver. */
138#define HPSA_BOUNDARY 0x3223103C
139 {0x3223103C, "Smart Array P800", &SA5_access},
140 {0x3234103C, "Smart Array P400", &SA5_access},
133 {0x323D103C, "Smart Array P700m", &SA5_access}, 141 {0x323D103C, "Smart Array P700m", &SA5_access},
134 {0x3241103C, "Smart Array P212", &SA5_access}, 142 {0x3241103C, "Smart Array P212", &SA5_access},
135 {0x3243103C, "Smart Array P410", &SA5_access}, 143 {0x3243103C, "Smart Array P410", &SA5_access},
@@ -138,7 +146,6 @@ static struct board_type products[] = {
138 {0x3249103C, "Smart Array P812", &SA5_access}, 146 {0x3249103C, "Smart Array P812", &SA5_access},
139 {0x324A103C, "Smart Array P712m", &SA5_access}, 147 {0x324A103C, "Smart Array P712m", &SA5_access},
140 {0x324B103C, "Smart Array P711m", &SA5_access}, 148 {0x324B103C, "Smart Array P711m", &SA5_access},
141 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
142}; 149};
143 150
144/* How long to wait (in milliseconds) for board to go into simple mode */ 151/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -155,6 +162,10 @@ static struct board_type products[] = {
155 162
156static ctlr_info_t *hba[MAX_CTLR]; 163static ctlr_info_t *hba[MAX_CTLR];
157 164
165static struct task_struct *cciss_scan_thread;
166static DEFINE_MUTEX(scan_mutex);
167static LIST_HEAD(scan_q);
168
158static void do_cciss_request(struct request_queue *q); 169static void do_cciss_request(struct request_queue *q);
159static irqreturn_t do_cciss_intr(int irq, void *dev_id); 170static irqreturn_t do_cciss_intr(int irq, void *dev_id);
160static int cciss_open(struct block_device *bdev, fmode_t mode); 171static int cciss_open(struct block_device *bdev, fmode_t mode);
@@ -164,9 +175,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
164static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 175static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 176
166static int cciss_revalidate(struct gendisk *disk); 177static int cciss_revalidate(struct gendisk *disk);
167static int rebuild_lun_table(ctlr_info_t *h, int first_time); 178static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
168static int deregister_disk(ctlr_info_t *h, int drv_index, 179static int deregister_disk(ctlr_info_t *h, int drv_index,
169 int clear_all); 180 int clear_all, int via_ioctl);
170 181
171static void cciss_read_capacity(int ctlr, int logvol, int withirq, 182static void cciss_read_capacity(int ctlr, int logvol, int withirq,
172 sector_t *total_size, unsigned int *block_size); 183 sector_t *total_size, unsigned int *block_size);
@@ -189,8 +200,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
189static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 200static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
190 201
191static void fail_all_cmds(unsigned long ctlr); 202static void fail_all_cmds(unsigned long ctlr);
203static int add_to_scan_list(struct ctlr_info *h);
192static int scan_thread(void *data); 204static int scan_thread(void *data);
193static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 205static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
206static void cciss_hba_release(struct device *dev);
207static void cciss_device_release(struct device *dev);
208static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
209static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
194 210
195#ifdef CONFIG_PROC_FS 211#ifdef CONFIG_PROC_FS
196static void cciss_procinit(int i); 212static void cciss_procinit(int i);
@@ -245,7 +261,10 @@ static inline void removeQ(CommandList_struct *c)
245 261
246#include "cciss_scsi.c" /* For SCSI tape support */ 262#include "cciss_scsi.c" /* For SCSI tape support */
247 263
248#define RAID_UNKNOWN 6 264static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
265 "UNKNOWN"
266};
267#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
249 268
250#ifdef CONFIG_PROC_FS 269#ifdef CONFIG_PROC_FS
251 270
@@ -255,9 +274,6 @@ static inline void removeQ(CommandList_struct *c)
255#define ENG_GIG 1000000000 274#define ENG_GIG 1000000000
256#define ENG_GIG_FACTOR (ENG_GIG/512) 275#define ENG_GIG_FACTOR (ENG_GIG/512)
257#define ENGAGE_SCSI "engage scsi" 276#define ENGAGE_SCSI "engage scsi"
258static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
259 "UNKNOWN"
260};
261 277
262static struct proc_dir_entry *proc_cciss; 278static struct proc_dir_entry *proc_cciss;
263 279
@@ -318,7 +334,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
318 ctlr_info_t *h = seq->private; 334 ctlr_info_t *h = seq->private;
319 unsigned ctlr = h->ctlr; 335 unsigned ctlr = h->ctlr;
320 loff_t *pos = v; 336 loff_t *pos = v;
321 drive_info_struct *drv = &h->drv[*pos]; 337 drive_info_struct *drv = h->drv[*pos];
322 338
323 if (*pos > h->highest_lun) 339 if (*pos > h->highest_lun)
324 return 0; 340 return 0;
@@ -331,7 +347,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
331 vol_sz_frac *= 100; 347 vol_sz_frac *= 100;
332 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 348 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
333 349
334 if (drv->raid_level > 5) 350 if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
335 drv->raid_level = RAID_UNKNOWN; 351 drv->raid_level = RAID_UNKNOWN;
336 seq_printf(seq, "cciss/c%dd%d:" 352 seq_printf(seq, "cciss/c%dd%d:"
337 "\t%4u.%02uGB\tRAID %s\n", 353 "\t%4u.%02uGB\tRAID %s\n",
@@ -426,7 +442,7 @@ out:
426 return err; 442 return err;
427} 443}
428 444
429static struct file_operations cciss_proc_fops = { 445static const struct file_operations cciss_proc_fops = {
430 .owner = THIS_MODULE, 446 .owner = THIS_MODULE,
431 .open = cciss_seq_open, 447 .open = cciss_seq_open,
432 .read = seq_read, 448 .read = seq_read,
@@ -454,9 +470,19 @@ static void __devinit cciss_procinit(int i)
454#define to_hba(n) container_of(n, struct ctlr_info, dev) 470#define to_hba(n) container_of(n, struct ctlr_info, dev)
455#define to_drv(n) container_of(n, drive_info_struct, dev) 471#define to_drv(n) container_of(n, drive_info_struct, dev)
456 472
457static struct device_type cciss_host_type = { 473static ssize_t host_store_rescan(struct device *dev,
458 .name = "cciss_host", 474 struct device_attribute *attr,
459}; 475 const char *buf, size_t count)
476{
477 struct ctlr_info *h = to_hba(dev);
478
479 add_to_scan_list(h);
480 wake_up_process(cciss_scan_thread);
481 wait_for_completion_interruptible(&h->scan_wait);
482
483 return count;
484}
485DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
460 486
461static ssize_t dev_show_unique_id(struct device *dev, 487static ssize_t dev_show_unique_id(struct device *dev,
462 struct device_attribute *attr, 488 struct device_attribute *attr,
@@ -560,11 +586,101 @@ static ssize_t dev_show_rev(struct device *dev,
560} 586}
561DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 587DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
562 588
589static ssize_t cciss_show_lunid(struct device *dev,
590 struct device_attribute *attr, char *buf)
591{
592 drive_info_struct *drv = to_drv(dev);
593 struct ctlr_info *h = to_hba(drv->dev.parent);
594 unsigned long flags;
595 unsigned char lunid[8];
596
597 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
598 if (h->busy_configuring) {
599 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
600 return -EBUSY;
601 }
602 if (!drv->heads) {
603 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
604 return -ENOTTY;
605 }
606 memcpy(lunid, drv->LunID, sizeof(lunid));
607 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
608 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
609 lunid[0], lunid[1], lunid[2], lunid[3],
610 lunid[4], lunid[5], lunid[6], lunid[7]);
611}
612DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
613
614static ssize_t cciss_show_raid_level(struct device *dev,
615 struct device_attribute *attr, char *buf)
616{
617 drive_info_struct *drv = to_drv(dev);
618 struct ctlr_info *h = to_hba(drv->dev.parent);
619 int raid;
620 unsigned long flags;
621
622 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
623 if (h->busy_configuring) {
624 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
625 return -EBUSY;
626 }
627 raid = drv->raid_level;
628 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
629 if (raid < 0 || raid > RAID_UNKNOWN)
630 raid = RAID_UNKNOWN;
631
632 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
633 raid_label[raid]);
634}
635DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
636
637static ssize_t cciss_show_usage_count(struct device *dev,
638 struct device_attribute *attr, char *buf)
639{
640 drive_info_struct *drv = to_drv(dev);
641 struct ctlr_info *h = to_hba(drv->dev.parent);
642 unsigned long flags;
643 int count;
644
645 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
646 if (h->busy_configuring) {
647 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
648 return -EBUSY;
649 }
650 count = drv->usage_count;
651 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
652 return snprintf(buf, 20, "%d\n", count);
653}
654DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
655
656static struct attribute *cciss_host_attrs[] = {
657 &dev_attr_rescan.attr,
658 NULL
659};
660
661static struct attribute_group cciss_host_attr_group = {
662 .attrs = cciss_host_attrs,
663};
664
665static const struct attribute_group *cciss_host_attr_groups[] = {
666 &cciss_host_attr_group,
667 NULL
668};
669
670static struct device_type cciss_host_type = {
671 .name = "cciss_host",
672 .groups = cciss_host_attr_groups,
673 .release = cciss_hba_release,
674};
675
563static struct attribute *cciss_dev_attrs[] = { 676static struct attribute *cciss_dev_attrs[] = {
564 &dev_attr_unique_id.attr, 677 &dev_attr_unique_id.attr,
565 &dev_attr_model.attr, 678 &dev_attr_model.attr,
566 &dev_attr_vendor.attr, 679 &dev_attr_vendor.attr,
567 &dev_attr_rev.attr, 680 &dev_attr_rev.attr,
681 &dev_attr_lunid.attr,
682 &dev_attr_raid_level.attr,
683 &dev_attr_usage_count.attr,
568 NULL 684 NULL
569}; 685};
570 686
@@ -580,12 +696,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = {
580static struct device_type cciss_dev_type = { 696static struct device_type cciss_dev_type = {
581 .name = "cciss_device", 697 .name = "cciss_device",
582 .groups = cciss_dev_attr_groups, 698 .groups = cciss_dev_attr_groups,
699 .release = cciss_device_release,
583}; 700};
584 701
585static struct bus_type cciss_bus_type = { 702static struct bus_type cciss_bus_type = {
586 .name = "cciss", 703 .name = "cciss",
587}; 704};
588 705
706/*
707 * cciss_hba_release is called when the reference count
708 * of h->dev goes to zero.
709 */
710static void cciss_hba_release(struct device *dev)
711{
712 /*
713 * nothing to do, but need this to avoid a warning
714 * about not having a release handler from lib/kref.c.
715 */
716}
589 717
590/* 718/*
591 * Initialize sysfs entry for each controller. This sets up and registers 719 * Initialize sysfs entry for each controller. This sets up and registers
@@ -609,6 +737,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
609static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 737static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
610{ 738{
611 device_del(&h->dev); 739 device_del(&h->dev);
740 put_device(&h->dev); /* final put. */
741}
742
743/* cciss_device_release is called when the reference count
744 * of h->drv[x]dev goes to zero.
745 */
746static void cciss_device_release(struct device *dev)
747{
748 drive_info_struct *drv = to_drv(dev);
749 kfree(drv);
612} 750}
613 751
614/* 752/*
@@ -617,24 +755,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
617 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 755 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
618 * /sys/block/cciss!c#d# to this entry. 756 * /sys/block/cciss!c#d# to this entry.
619 */ 757 */
620static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, 758static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
621 drive_info_struct *drv,
622 int drv_index) 759 int drv_index)
623{ 760{
624 device_initialize(&drv->dev); 761 struct device *dev;
625 drv->dev.type = &cciss_dev_type; 762
626 drv->dev.bus = &cciss_bus_type; 763 if (h->drv[drv_index]->device_initialized)
627 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); 764 return 0;
628 drv->dev.parent = &h->dev; 765
629 return device_add(&drv->dev); 766 dev = &h->drv[drv_index]->dev;
767 device_initialize(dev);
768 dev->type = &cciss_dev_type;
769 dev->bus = &cciss_bus_type;
770 dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
771 dev->parent = &h->dev;
772 h->drv[drv_index]->device_initialized = 1;
773 return device_add(dev);
630} 774}
631 775
632/* 776/*
633 * Remove sysfs entries for a logical drive. 777 * Remove sysfs entries for a logical drive.
634 */ 778 */
635static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) 779static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
780 int ctlr_exiting)
636{ 781{
637 device_del(&drv->dev); 782 struct device *dev = &h->drv[drv_index]->dev;
783
784 /* special case for c*d0, we only destroy it on controller exit */
785 if (drv_index == 0 && !ctlr_exiting)
786 return;
787
788 device_del(dev);
789 put_device(dev); /* the "final" put. */
790 h->drv[drv_index] = NULL;
638} 791}
639 792
640/* 793/*
@@ -751,7 +904,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
751 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); 904 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
752#endif /* CCISS_DEBUG */ 905#endif /* CCISS_DEBUG */
753 906
754 if (host->busy_initializing || drv->busy_configuring) 907 if (drv->busy_configuring)
755 return -EBUSY; 908 return -EBUSY;
756 /* 909 /*
757 * Root is allowed to open raw volume zero even if it's not configured 910 * Root is allowed to open raw volume zero even if it's not configured
@@ -767,7 +920,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
767 if (MINOR(bdev->bd_dev) & 0x0f) { 920 if (MINOR(bdev->bd_dev) & 0x0f) {
768 return -ENXIO; 921 return -ENXIO;
769 /* if it is, make sure we have a LUN ID */ 922 /* if it is, make sure we have a LUN ID */
770 } else if (drv->LunID == 0) { 923 } else if (memcmp(drv->LunID, CTLR_LUNID,
924 sizeof(drv->LunID))) {
771 return -ENXIO; 925 return -ENXIO;
772 } 926 }
773 } 927 }
@@ -1132,12 +1286,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1132 case CCISS_DEREGDISK: 1286 case CCISS_DEREGDISK:
1133 case CCISS_REGNEWD: 1287 case CCISS_REGNEWD:
1134 case CCISS_REVALIDVOLS: 1288 case CCISS_REVALIDVOLS:
1135 return rebuild_lun_table(host, 0); 1289 return rebuild_lun_table(host, 0, 1);
1136 1290
1137 case CCISS_GETLUNINFO:{ 1291 case CCISS_GETLUNINFO:{
1138 LogvolInfo_struct luninfo; 1292 LogvolInfo_struct luninfo;
1139 1293
1140 luninfo.LunID = drv->LunID; 1294 memcpy(&luninfo.LunID, drv->LunID,
1295 sizeof(luninfo.LunID));
1141 luninfo.num_opens = drv->usage_count; 1296 luninfo.num_opens = drv->usage_count;
1142 luninfo.num_parts = 0; 1297 luninfo.num_parts = 0;
1143 if (copy_to_user(argp, &luninfo, 1298 if (copy_to_user(argp, &luninfo,
@@ -1475,7 +1630,10 @@ static void cciss_check_queues(ctlr_info_t *h)
1475 /* make sure the disk has been added and the drive is real 1630 /* make sure the disk has been added and the drive is real
1476 * because this can be called from the middle of init_one. 1631 * because this can be called from the middle of init_one.
1477 */ 1632 */
1478 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) 1633 if (!h->drv[curr_queue])
1634 continue;
1635 if (!(h->drv[curr_queue]->queue) ||
1636 !(h->drv[curr_queue]->heads))
1479 continue; 1637 continue;
1480 blk_start_queue(h->gendisk[curr_queue]->queue); 1638 blk_start_queue(h->gendisk[curr_queue]->queue);
1481 1639
@@ -1532,13 +1690,11 @@ static void cciss_softirq_done(struct request *rq)
1532 spin_unlock_irqrestore(&h->lock, flags); 1690 spin_unlock_irqrestore(&h->lock, flags);
1533} 1691}
1534 1692
1535static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], 1693static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
1536 uint32_t log_unit) 1694 unsigned char scsi3addr[], uint32_t log_unit)
1537{ 1695{
1538 log_unit = h->drv[log_unit].LunID & 0x03fff; 1696 memcpy(scsi3addr, h->drv[log_unit]->LunID,
1539 memset(&scsi3addr[4], 0, 4); 1697 sizeof(h->drv[log_unit]->LunID));
1540 memcpy(&scsi3addr[0], &log_unit, 4);
1541 scsi3addr[3] |= 0x40;
1542} 1698}
1543 1699
1544/* This function gets the SCSI vendor, model, and revision of a logical drive 1700/* This function gets the SCSI vendor, model, and revision of a logical drive
@@ -1615,16 +1771,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1615 return; 1771 return;
1616} 1772}
1617 1773
1618static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1774/*
1775 * cciss_add_disk sets up the block device queue for a logical drive
1776 */
1777static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1619 int drv_index) 1778 int drv_index)
1620{ 1779{
1621 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1780 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1781 if (!disk->queue)
1782 goto init_queue_failure;
1622 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1783 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1623 disk->major = h->major; 1784 disk->major = h->major;
1624 disk->first_minor = drv_index << NWD_SHIFT; 1785 disk->first_minor = drv_index << NWD_SHIFT;
1625 disk->fops = &cciss_fops; 1786 disk->fops = &cciss_fops;
1626 disk->private_data = &h->drv[drv_index]; 1787 if (cciss_create_ld_sysfs_entry(h, drv_index))
1627 disk->driverfs_dev = &h->drv[drv_index].dev; 1788 goto cleanup_queue;
1789 disk->private_data = h->drv[drv_index];
1790 disk->driverfs_dev = &h->drv[drv_index]->dev;
1628 1791
1629 /* Set up queue information */ 1792 /* Set up queue information */
1630 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1793 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1642,14 +1805,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1642 disk->queue->queuedata = h; 1805 disk->queue->queuedata = h;
1643 1806
1644 blk_queue_logical_block_size(disk->queue, 1807 blk_queue_logical_block_size(disk->queue,
1645 h->drv[drv_index].block_size); 1808 h->drv[drv_index]->block_size);
1646 1809
1647 /* Make sure all queue data is written out before */ 1810 /* Make sure all queue data is written out before */
1648 /* setting h->drv[drv_index].queue, as setting this */ 1811 /* setting h->drv[drv_index]->queue, as setting this */
1649 /* allows the interrupt handler to start the queue */ 1812 /* allows the interrupt handler to start the queue */
1650 wmb(); 1813 wmb();
1651 h->drv[drv_index].queue = disk->queue; 1814 h->drv[drv_index]->queue = disk->queue;
1652 add_disk(disk); 1815 add_disk(disk);
1816 return 0;
1817
1818cleanup_queue:
1819 blk_cleanup_queue(disk->queue);
1820 disk->queue = NULL;
1821init_queue_failure:
1822 return -1;
1653} 1823}
1654 1824
1655/* This function will check the usage_count of the drive to be updated/added. 1825/* This function will check the usage_count of the drive to be updated/added.
@@ -1662,7 +1832,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1662 * is also the controller node. Any changes to disk 0 will show up on 1832 * is also the controller node. Any changes to disk 0 will show up on
1663 * the next reboot. 1833 * the next reboot.
1664 */ 1834 */
1665static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) 1835static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
1836 int via_ioctl)
1666{ 1837{
1667 ctlr_info_t *h = hba[ctlr]; 1838 ctlr_info_t *h = hba[ctlr];
1668 struct gendisk *disk; 1839 struct gendisk *disk;
@@ -1672,21 +1843,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1672 unsigned long flags = 0; 1843 unsigned long flags = 0;
1673 int ret = 0; 1844 int ret = 0;
1674 drive_info_struct *drvinfo; 1845 drive_info_struct *drvinfo;
1675 int was_only_controller_node;
1676 1846
1677 /* Get information about the disk and modify the driver structure */ 1847 /* Get information about the disk and modify the driver structure */
1678 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1848 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1679 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); 1849 drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
1680 if (inq_buff == NULL || drvinfo == NULL) 1850 if (inq_buff == NULL || drvinfo == NULL)
1681 goto mem_msg; 1851 goto mem_msg;
1682 1852
1683 /* See if we're trying to update the "controller node"
1684 * this will happen the when the first logical drive gets
1685 * created by ACU.
1686 */
1687 was_only_controller_node = (drv_index == 0 &&
1688 h->drv[0].raid_level == -1);
1689
1690 /* testing to see if 16-byte CDBs are already being used */ 1853 /* testing to see if 16-byte CDBs are already being used */
1691 if (h->cciss_read == CCISS_READ_16) { 1854 if (h->cciss_read == CCISS_READ_16) {
1692 cciss_read_capacity_16(h->ctlr, drv_index, 1, 1855 cciss_read_capacity_16(h->ctlr, drv_index, 1,
@@ -1719,16 +1882,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1719 drvinfo->model, drvinfo->rev); 1882 drvinfo->model, drvinfo->rev);
1720 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1883 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1721 sizeof(drvinfo->serial_no)); 1884 sizeof(drvinfo->serial_no));
1885 /* Save the lunid in case we deregister the disk, below. */
1886 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
1887 sizeof(drvinfo->LunID));
1722 1888
1723 /* Is it the same disk we already know, and nothing's changed? */ 1889 /* Is it the same disk we already know, and nothing's changed? */
1724 if (h->drv[drv_index].raid_level != -1 && 1890 if (h->drv[drv_index]->raid_level != -1 &&
1725 ((memcmp(drvinfo->serial_no, 1891 ((memcmp(drvinfo->serial_no,
1726 h->drv[drv_index].serial_no, 16) == 0) && 1892 h->drv[drv_index]->serial_no, 16) == 0) &&
1727 drvinfo->block_size == h->drv[drv_index].block_size && 1893 drvinfo->block_size == h->drv[drv_index]->block_size &&
1728 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && 1894 drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
1729 drvinfo->heads == h->drv[drv_index].heads && 1895 drvinfo->heads == h->drv[drv_index]->heads &&
1730 drvinfo->sectors == h->drv[drv_index].sectors && 1896 drvinfo->sectors == h->drv[drv_index]->sectors &&
1731 drvinfo->cylinders == h->drv[drv_index].cylinders)) 1897 drvinfo->cylinders == h->drv[drv_index]->cylinders))
1732 /* The disk is unchanged, nothing to update */ 1898 /* The disk is unchanged, nothing to update */
1733 goto freeret; 1899 goto freeret;
1734 1900
@@ -1738,18 +1904,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1738 * If the disk already exists then deregister it before proceeding 1904 * If the disk already exists then deregister it before proceeding
1739 * (unless it's the first disk (for the controller node). 1905 * (unless it's the first disk (for the controller node).
1740 */ 1906 */
1741 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { 1907 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
1742 printk(KERN_WARNING "disk %d has changed.\n", drv_index); 1908 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1743 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1909 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1744 h->drv[drv_index].busy_configuring = 1; 1910 h->drv[drv_index]->busy_configuring = 1;
1745 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1911 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1746 1912
1747 /* deregister_disk sets h->drv[drv_index].queue = NULL 1913 /* deregister_disk sets h->drv[drv_index]->queue = NULL
1748 * which keeps the interrupt handler from starting 1914 * which keeps the interrupt handler from starting
1749 * the queue. 1915 * the queue.
1750 */ 1916 */
1751 ret = deregister_disk(h, drv_index, 0); 1917 ret = deregister_disk(h, drv_index, 0, via_ioctl);
1752 h->drv[drv_index].busy_configuring = 0;
1753 } 1918 }
1754 1919
1755 /* If the disk is in use return */ 1920 /* If the disk is in use return */
@@ -1757,22 +1922,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1757 goto freeret; 1922 goto freeret;
1758 1923
1759 /* Save the new information from cciss_geometry_inquiry 1924 /* Save the new information from cciss_geometry_inquiry
1760 * and serial number inquiry. 1925 * and serial number inquiry. If the disk was deregistered
1926 * above, then h->drv[drv_index] will be NULL.
1761 */ 1927 */
1762 h->drv[drv_index].block_size = drvinfo->block_size; 1928 if (h->drv[drv_index] == NULL) {
1763 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; 1929 drvinfo->device_initialized = 0;
1764 h->drv[drv_index].heads = drvinfo->heads; 1930 h->drv[drv_index] = drvinfo;
1765 h->drv[drv_index].sectors = drvinfo->sectors; 1931 drvinfo = NULL; /* so it won't be freed below. */
1766 h->drv[drv_index].cylinders = drvinfo->cylinders; 1932 } else {
1767 h->drv[drv_index].raid_level = drvinfo->raid_level; 1933 /* special case for cxd0 */
1768 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1934 h->drv[drv_index]->block_size = drvinfo->block_size;
1769 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); 1935 h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
1770 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); 1936 h->drv[drv_index]->heads = drvinfo->heads;
1771 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); 1937 h->drv[drv_index]->sectors = drvinfo->sectors;
1938 h->drv[drv_index]->cylinders = drvinfo->cylinders;
1939 h->drv[drv_index]->raid_level = drvinfo->raid_level;
1940 memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
1941 memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
1942 VENDOR_LEN + 1);
1943 memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
1944 memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
1945 }
1772 1946
1773 ++h->num_luns; 1947 ++h->num_luns;
1774 disk = h->gendisk[drv_index]; 1948 disk = h->gendisk[drv_index];
1775 set_capacity(disk, h->drv[drv_index].nr_blocks); 1949 set_capacity(disk, h->drv[drv_index]->nr_blocks);
1776 1950
1777 /* If it's not disk 0 (drv_index != 0) 1951 /* If it's not disk 0 (drv_index != 0)
1778 * or if it was disk 0, but there was previously 1952 * or if it was disk 0, but there was previously
@@ -1780,8 +1954,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1780 * (raid_leve == -1) then we want to update the 1954 * (raid_leve == -1) then we want to update the
1781 * logical drive's information. 1955 * logical drive's information.
1782 */ 1956 */
1783 if (drv_index || first_time) 1957 if (drv_index || first_time) {
1784 cciss_add_disk(h, disk, drv_index); 1958 if (cciss_add_disk(h, disk, drv_index) != 0) {
1959 cciss_free_gendisk(h, drv_index);
1960 cciss_free_drive_info(h, drv_index);
1961 printk(KERN_WARNING "cciss:%d could not update "
1962 "disk %d\n", h->ctlr, drv_index);
1963 --h->num_luns;
1964 }
1965 }
1785 1966
1786freeret: 1967freeret:
1787 kfree(inq_buff); 1968 kfree(inq_buff);
@@ -1793,28 +1974,70 @@ mem_msg:
1793} 1974}
1794 1975
1795/* This function will find the first index of the controllers drive array 1976/* This function will find the first index of the controllers drive array
1796 * that has a -1 for the raid_level and will return that index. This is 1977 * that has a null drv pointer and allocate the drive info struct and
1797 * where new drives will be added. If the index to be returned is greater 1978 * will return that index This is where new drives will be added.
1798 * than the highest_lun index for the controller then highest_lun is set 1979 * If the index to be returned is greater than the highest_lun index for
1799 * to this new index. If there are no available indexes then -1 is returned. 1980 * the controller then highest_lun is set * to this new index.
1800 * "controller_node" is used to know if this is a real logical drive, or just 1981 * If there are no available indexes or if tha allocation fails, then -1
1801 * the controller node, which determines if this counts towards highest_lun. 1982 * is returned. * "controller_node" is used to know if this is a real
1983 * logical drive, or just the controller node, which determines if this
1984 * counts towards highest_lun.
1802 */ 1985 */
1803static int cciss_find_free_drive_index(int ctlr, int controller_node) 1986static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
1804{ 1987{
1805 int i; 1988 int i;
1989 drive_info_struct *drv;
1806 1990
1991 /* Search for an empty slot for our drive info */
1807 for (i = 0; i < CISS_MAX_LUN; i++) { 1992 for (i = 0; i < CISS_MAX_LUN; i++) {
1808 if (hba[ctlr]->drv[i].raid_level == -1) { 1993
1809 if (i > hba[ctlr]->highest_lun) 1994 /* if not cxd0 case, and it's occupied, skip it. */
1810 if (!controller_node) 1995 if (h->drv[i] && i != 0)
1811 hba[ctlr]->highest_lun = i; 1996 continue;
1997 /*
1998 * If it's cxd0 case, and drv is alloc'ed already, and a
1999 * disk is configured there, skip it.
2000 */
2001 if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
2002 continue;
2003
2004 /*
2005 * We've found an empty slot. Update highest_lun
2006 * provided this isn't just the fake cxd0 controller node.
2007 */
2008 if (i > h->highest_lun && !controller_node)
2009 h->highest_lun = i;
2010
2011 /* If adding a real disk at cxd0, and it's already alloc'ed */
2012 if (i == 0 && h->drv[i] != NULL)
1812 return i; 2013 return i;
1813 } 2014
2015 /*
2016 * Found an empty slot, not already alloc'ed. Allocate it.
2017 * Mark it with raid_level == -1, so we know it's new later on.
2018 */
2019 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
2020 if (!drv)
2021 return -1;
2022 drv->raid_level = -1; /* so we know it's new */
2023 h->drv[i] = drv;
2024 return i;
1814 } 2025 }
1815 return -1; 2026 return -1;
1816} 2027}
1817 2028
2029static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
2030{
2031 kfree(h->drv[drv_index]);
2032 h->drv[drv_index] = NULL;
2033}
2034
2035static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
2036{
2037 put_disk(h->gendisk[drv_index]);
2038 h->gendisk[drv_index] = NULL;
2039}
2040
1818/* cciss_add_gendisk finds a free hba[]->drv structure 2041/* cciss_add_gendisk finds a free hba[]->drv structure
1819 * and allocates a gendisk if needed, and sets the lunid 2042 * and allocates a gendisk if needed, and sets the lunid
1820 * in the drvinfo structure. It returns the index into 2043 * in the drvinfo structure. It returns the index into
@@ -1824,13 +2047,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node)
1824 * a means to talk to the controller in case no logical 2047 * a means to talk to the controller in case no logical
1825 * drives have yet been configured. 2048 * drives have yet been configured.
1826 */ 2049 */
1827static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) 2050static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
2051 int controller_node)
1828{ 2052{
1829 int drv_index; 2053 int drv_index;
1830 2054
1831 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); 2055 drv_index = cciss_alloc_drive_info(h, controller_node);
1832 if (drv_index == -1) 2056 if (drv_index == -1)
1833 return -1; 2057 return -1;
2058
1834 /*Check if the gendisk needs to be allocated */ 2059 /*Check if the gendisk needs to be allocated */
1835 if (!h->gendisk[drv_index]) { 2060 if (!h->gendisk[drv_index]) {
1836 h->gendisk[drv_index] = 2061 h->gendisk[drv_index] =
@@ -1839,23 +2064,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1839 printk(KERN_ERR "cciss%d: could not " 2064 printk(KERN_ERR "cciss%d: could not "
1840 "allocate a new disk %d\n", 2065 "allocate a new disk %d\n",
1841 h->ctlr, drv_index); 2066 h->ctlr, drv_index);
1842 return -1; 2067 goto err_free_drive_info;
1843 } 2068 }
1844 } 2069 }
1845 h->drv[drv_index].LunID = lunid; 2070 memcpy(h->drv[drv_index]->LunID, lunid,
1846 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) 2071 sizeof(h->drv[drv_index]->LunID));
2072 if (cciss_create_ld_sysfs_entry(h, drv_index))
1847 goto err_free_disk; 2073 goto err_free_disk;
1848
1849 /* Don't need to mark this busy because nobody */ 2074 /* Don't need to mark this busy because nobody */
1850 /* else knows about this disk yet to contend */ 2075 /* else knows about this disk yet to contend */
1851 /* for access to it. */ 2076 /* for access to it. */
1852 h->drv[drv_index].busy_configuring = 0; 2077 h->drv[drv_index]->busy_configuring = 0;
1853 wmb(); 2078 wmb();
1854 return drv_index; 2079 return drv_index;
1855 2080
1856err_free_disk: 2081err_free_disk:
1857 put_disk(h->gendisk[drv_index]); 2082 cciss_free_gendisk(h, drv_index);
1858 h->gendisk[drv_index] = NULL; 2083err_free_drive_info:
2084 cciss_free_drive_info(h, drv_index);
1859 return -1; 2085 return -1;
1860} 2086}
1861 2087
@@ -1872,21 +2098,25 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1872 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2098 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1873 return; 2099 return;
1874 2100
1875 drv_index = cciss_add_gendisk(h, 0, 1); 2101 drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
1876 if (drv_index == -1) { 2102 if (drv_index == -1)
1877 printk(KERN_WARNING "cciss%d: could not " 2103 goto error;
1878 "add disk 0.\n", h->ctlr); 2104 h->drv[drv_index]->block_size = 512;
1879 return; 2105 h->drv[drv_index]->nr_blocks = 0;
1880 } 2106 h->drv[drv_index]->heads = 0;
1881 h->drv[drv_index].block_size = 512; 2107 h->drv[drv_index]->sectors = 0;
1882 h->drv[drv_index].nr_blocks = 0; 2108 h->drv[drv_index]->cylinders = 0;
1883 h->drv[drv_index].heads = 0; 2109 h->drv[drv_index]->raid_level = -1;
1884 h->drv[drv_index].sectors = 0; 2110 memset(h->drv[drv_index]->serial_no, 0, 16);
1885 h->drv[drv_index].cylinders = 0;
1886 h->drv[drv_index].raid_level = -1;
1887 memset(h->drv[drv_index].serial_no, 0, 16);
1888 disk = h->gendisk[drv_index]; 2111 disk = h->gendisk[drv_index];
1889 cciss_add_disk(h, disk, drv_index); 2112 if (cciss_add_disk(h, disk, drv_index) == 0)
2113 return;
2114 cciss_free_gendisk(h, drv_index);
2115 cciss_free_drive_info(h, drv_index);
2116error:
2117 printk(KERN_WARNING "cciss%d: could not "
2118 "add disk 0.\n", h->ctlr);
2119 return;
1890} 2120}
1891 2121
1892/* This function will add and remove logical drives from the Logical 2122/* This function will add and remove logical drives from the Logical
@@ -1897,7 +2127,8 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1897 * INPUT 2127 * INPUT
1898 * h = The controller to perform the operations on 2128 * h = The controller to perform the operations on
1899 */ 2129 */
1900static int rebuild_lun_table(ctlr_info_t *h, int first_time) 2130static int rebuild_lun_table(ctlr_info_t *h, int first_time,
2131 int via_ioctl)
1901{ 2132{
1902 int ctlr = h->ctlr; 2133 int ctlr = h->ctlr;
1903 int num_luns; 2134 int num_luns;
@@ -1907,7 +2138,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1907 int i; 2138 int i;
1908 int drv_found; 2139 int drv_found;
1909 int drv_index = 0; 2140 int drv_index = 0;
1910 __u32 lunid = 0; 2141 unsigned char lunid[8] = CTLR_LUNID;
1911 unsigned long flags; 2142 unsigned long flags;
1912 2143
1913 if (!capable(CAP_SYS_RAWIO)) 2144 if (!capable(CAP_SYS_RAWIO))
@@ -1960,13 +2191,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1960 drv_found = 0; 2191 drv_found = 0;
1961 2192
1962 /* skip holes in the array from already deleted drives */ 2193 /* skip holes in the array from already deleted drives */
1963 if (h->drv[i].raid_level == -1) 2194 if (h->drv[i] == NULL)
1964 continue; 2195 continue;
1965 2196
1966 for (j = 0; j < num_luns; j++) { 2197 for (j = 0; j < num_luns; j++) {
1967 memcpy(&lunid, &ld_buff->LUN[j][0], 4); 2198 memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
1968 lunid = le32_to_cpu(lunid); 2199 if (memcmp(h->drv[i]->LunID, lunid,
1969 if (h->drv[i].LunID == lunid) { 2200 sizeof(lunid)) == 0) {
1970 drv_found = 1; 2201 drv_found = 1;
1971 break; 2202 break;
1972 } 2203 }
@@ -1974,11 +2205,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1974 if (!drv_found) { 2205 if (!drv_found) {
1975 /* Deregister it from the OS, it's gone. */ 2206 /* Deregister it from the OS, it's gone. */
1976 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2207 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1977 h->drv[i].busy_configuring = 1; 2208 h->drv[i]->busy_configuring = 1;
1978 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2209 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1979 return_code = deregister_disk(h, i, 1); 2210 return_code = deregister_disk(h, i, 1, via_ioctl);
1980 cciss_destroy_ld_sysfs_entry(&h->drv[i]); 2211 if (h->drv[i] != NULL)
1981 h->drv[i].busy_configuring = 0; 2212 h->drv[i]->busy_configuring = 0;
1982 } 2213 }
1983 } 2214 }
1984 2215
@@ -1992,17 +2223,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1992 2223
1993 drv_found = 0; 2224 drv_found = 0;
1994 2225
1995 memcpy(&lunid, &ld_buff->LUN[i][0], 4); 2226 memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
1996 lunid = le32_to_cpu(lunid);
1997
1998 /* Find if the LUN is already in the drive array 2227 /* Find if the LUN is already in the drive array
1999 * of the driver. If so then update its info 2228 * of the driver. If so then update its info
2000 * if not in use. If it does not exist then find 2229 * if not in use. If it does not exist then find
2001 * the first free index and add it. 2230 * the first free index and add it.
2002 */ 2231 */
2003 for (j = 0; j <= h->highest_lun; j++) { 2232 for (j = 0; j <= h->highest_lun; j++) {
2004 if (h->drv[j].raid_level != -1 && 2233 if (h->drv[j] != NULL &&
2005 h->drv[j].LunID == lunid) { 2234 memcmp(h->drv[j]->LunID, lunid,
2235 sizeof(h->drv[j]->LunID)) == 0) {
2006 drv_index = j; 2236 drv_index = j;
2007 drv_found = 1; 2237 drv_found = 1;
2008 break; 2238 break;
@@ -2015,7 +2245,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
2015 if (drv_index == -1) 2245 if (drv_index == -1)
2016 goto freeret; 2246 goto freeret;
2017 } 2247 }
2018 cciss_update_drive_info(ctlr, drv_index, first_time); 2248 cciss_update_drive_info(ctlr, drv_index, first_time,
2249 via_ioctl);
2019 } /* end for */ 2250 } /* end for */
2020 2251
2021freeret: 2252freeret:
@@ -2032,6 +2263,25 @@ mem_msg:
2032 goto freeret; 2263 goto freeret;
2033} 2264}
2034 2265
2266static void cciss_clear_drive_info(drive_info_struct *drive_info)
2267{
2268 /* zero out the disk size info */
2269 drive_info->nr_blocks = 0;
2270 drive_info->block_size = 0;
2271 drive_info->heads = 0;
2272 drive_info->sectors = 0;
2273 drive_info->cylinders = 0;
2274 drive_info->raid_level = -1;
2275 memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
2276 memset(drive_info->model, 0, sizeof(drive_info->model));
2277 memset(drive_info->rev, 0, sizeof(drive_info->rev));
2278 memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
2279 /*
2280 * don't clear the LUNID though, we need to remember which
2281 * one this one is.
2282 */
2283}
2284
2035/* This function will deregister the disk and it's queue from the 2285/* This function will deregister the disk and it's queue from the
2036 * kernel. It must be called with the controller lock held and the 2286 * kernel. It must be called with the controller lock held and the
2037 * drv structures busy_configuring flag set. It's parameters are: 2287 * drv structures busy_configuring flag set. It's parameters are:
@@ -2046,43 +2296,48 @@ mem_msg:
2046 * the disk in preparation for re-adding it. In this case 2296 * the disk in preparation for re-adding it. In this case
2047 * the highest_lun should be left unchanged and the LunID 2297 * the highest_lun should be left unchanged and the LunID
2048 * should not be cleared. 2298 * should not be cleared.
2299 * via_ioctl
2300 * This indicates whether we've reached this path via ioctl.
2301 * This affects the maximum usage count allowed for c0d0 to be messed with.
2302 * If this path is reached via ioctl(), then the max_usage_count will
2303 * be 1, as the process calling ioctl() has got to have the device open.
2304 * If we get here via sysfs, then the max usage count will be zero.
2049*/ 2305*/
2050static int deregister_disk(ctlr_info_t *h, int drv_index, 2306static int deregister_disk(ctlr_info_t *h, int drv_index,
2051 int clear_all) 2307 int clear_all, int via_ioctl)
2052{ 2308{
2053 int i; 2309 int i;
2054 struct gendisk *disk; 2310 struct gendisk *disk;
2055 drive_info_struct *drv; 2311 drive_info_struct *drv;
2312 int recalculate_highest_lun;
2056 2313
2057 if (!capable(CAP_SYS_RAWIO)) 2314 if (!capable(CAP_SYS_RAWIO))
2058 return -EPERM; 2315 return -EPERM;
2059 2316
2060 drv = &h->drv[drv_index]; 2317 drv = h->drv[drv_index];
2061 disk = h->gendisk[drv_index]; 2318 disk = h->gendisk[drv_index];
2062 2319
2063 /* make sure logical volume is NOT is use */ 2320 /* make sure logical volume is NOT is use */
2064 if (clear_all || (h->gendisk[0] == disk)) { 2321 if (clear_all || (h->gendisk[0] == disk)) {
2065 if (drv->usage_count > 1) 2322 if (drv->usage_count > via_ioctl)
2066 return -EBUSY; 2323 return -EBUSY;
2067 } else if (drv->usage_count > 0) 2324 } else if (drv->usage_count > 0)
2068 return -EBUSY; 2325 return -EBUSY;
2069 2326
2327 recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
2328
2070 /* invalidate the devices and deregister the disk. If it is disk 2329 /* invalidate the devices and deregister the disk. If it is disk
2071 * zero do not deregister it but just zero out it's values. This 2330 * zero do not deregister it but just zero out it's values. This
2072 * allows us to delete disk zero but keep the controller registered. 2331 * allows us to delete disk zero but keep the controller registered.
2073 */ 2332 */
2074 if (h->gendisk[0] != disk) { 2333 if (h->gendisk[0] != disk) {
2075 struct request_queue *q = disk->queue; 2334 struct request_queue *q = disk->queue;
2076 if (disk->flags & GENHD_FL_UP) 2335 if (disk->flags & GENHD_FL_UP) {
2336 cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
2077 del_gendisk(disk); 2337 del_gendisk(disk);
2078 if (q) {
2079 blk_cleanup_queue(q);
2080 /* Set drv->queue to NULL so that we do not try
2081 * to call blk_start_queue on this queue in the
2082 * interrupt handler
2083 */
2084 drv->queue = NULL;
2085 } 2338 }
2339 if (q)
2340 blk_cleanup_queue(q);
2086 /* If clear_all is set then we are deleting the logical 2341 /* If clear_all is set then we are deleting the logical
2087 * drive, not just refreshing its info. For drives 2342 * drive, not just refreshing its info. For drives
2088 * other than disk 0 we will call put_disk. We do not 2343 * other than disk 0 we will call put_disk. We do not
@@ -2105,34 +2360,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
2105 } 2360 }
2106 } else { 2361 } else {
2107 set_capacity(disk, 0); 2362 set_capacity(disk, 0);
2363 cciss_clear_drive_info(drv);
2108 } 2364 }
2109 2365
2110 --h->num_luns; 2366 --h->num_luns;
2111 /* zero out the disk size info */
2112 drv->nr_blocks = 0;
2113 drv->block_size = 0;
2114 drv->heads = 0;
2115 drv->sectors = 0;
2116 drv->cylinders = 0;
2117 drv->raid_level = -1; /* This can be used as a flag variable to
2118 * indicate that this element of the drive
2119 * array is free.
2120 */
2121
2122 if (clear_all) {
2123 /* check to see if it was the last disk */
2124 if (drv == h->drv + h->highest_lun) {
2125 /* if so, find the new hightest lun */
2126 int i, newhighest = -1;
2127 for (i = 0; i <= h->highest_lun; i++) {
2128 /* if the disk has size > 0, it is available */
2129 if (h->drv[i].heads)
2130 newhighest = i;
2131 }
2132 h->highest_lun = newhighest;
2133 }
2134 2367
2135 drv->LunID = 0; 2368 /* if it was the last disk, find the new hightest lun */
2369 if (clear_all && recalculate_highest_lun) {
2370 int i, newhighest = -1;
2371 for (i = 0; i <= h->highest_lun; i++) {
2372 /* if the disk has size > 0, it is available */
2373 if (h->drv[i] && h->drv[i]->heads)
2374 newhighest = i;
2375 }
2376 h->highest_lun = newhighest;
2136 } 2377 }
2137 return 0; 2378 return 0;
2138} 2379}
@@ -2479,8 +2720,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
2479 } else { /* Get geometry failed */ 2720 } else { /* Get geometry failed */
2480 printk(KERN_WARNING "cciss: reading geometry failed\n"); 2721 printk(KERN_WARNING "cciss: reading geometry failed\n");
2481 } 2722 }
2482 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2483 drv->heads, drv->sectors, drv->cylinders);
2484} 2723}
2485 2724
2486static void 2725static void
@@ -2514,9 +2753,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2514 *total_size = 0; 2753 *total_size = 0;
2515 *block_size = BLOCK_SIZE; 2754 *block_size = BLOCK_SIZE;
2516 } 2755 }
2517 if (*total_size != 0)
2518 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2519 (unsigned long long)*total_size+1, *block_size);
2520 kfree(buf); 2756 kfree(buf);
2521} 2757}
2522 2758
@@ -2568,7 +2804,8 @@ static int cciss_revalidate(struct gendisk *disk)
2568 InquiryData_struct *inq_buff = NULL; 2804 InquiryData_struct *inq_buff = NULL;
2569 2805
2570 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2806 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2571 if (h->drv[logvol].LunID == drv->LunID) { 2807 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
2808 sizeof(drv->LunID)) == 0) {
2572 FOUND = 1; 2809 FOUND = 1;
2573 break; 2810 break;
2574 } 2811 }
@@ -3053,8 +3290,7 @@ static void do_cciss_request(struct request_queue *q)
3053 /* The first 2 bits are reserved for controller error reporting. */ 3290 /* The first 2 bits are reserved for controller error reporting. */
3054 c->Header.Tag.lower = (c->cmdindex << 3); 3291 c->Header.Tag.lower = (c->cmdindex << 3);
3055 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 3292 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
3056 c->Header.LUN.LogDev.VolId = drv->LunID; 3293 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
3057 c->Header.LUN.LogDev.Mode = 1;
3058 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 3294 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
3059 c->Request.Type.Type = TYPE_CMD; // It is a command. 3295 c->Request.Type.Type = TYPE_CMD; // It is a command.
3060 c->Request.Type.Attribute = ATTR_SIMPLE; 3296 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3232,20 +3468,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3232 return IRQ_HANDLED; 3468 return IRQ_HANDLED;
3233} 3469}
3234 3470
3471/**
3472 * add_to_scan_list() - add controller to rescan queue
3473 * @h: Pointer to the controller.
3474 *
3475 * Adds the controller to the rescan queue if not already on the queue.
3476 *
3477 * returns 1 if added to the queue, 0 if skipped (could be on the
3478 * queue already, or the controller could be initializing or shutting
3479 * down).
3480 **/
3481static int add_to_scan_list(struct ctlr_info *h)
3482{
3483 struct ctlr_info *test_h;
3484 int found = 0;
3485 int ret = 0;
3486
3487 if (h->busy_initializing)
3488 return 0;
3489
3490 if (!mutex_trylock(&h->busy_shutting_down))
3491 return 0;
3492
3493 mutex_lock(&scan_mutex);
3494 list_for_each_entry(test_h, &scan_q, scan_list) {
3495 if (test_h == h) {
3496 found = 1;
3497 break;
3498 }
3499 }
3500 if (!found && !h->busy_scanning) {
3501 INIT_COMPLETION(h->scan_wait);
3502 list_add_tail(&h->scan_list, &scan_q);
3503 ret = 1;
3504 }
3505 mutex_unlock(&scan_mutex);
3506 mutex_unlock(&h->busy_shutting_down);
3507
3508 return ret;
3509}
3510
3511/**
3512 * remove_from_scan_list() - remove controller from rescan queue
3513 * @h: Pointer to the controller.
3514 *
3515 * Removes the controller from the rescan queue if present. Blocks if
3516 * the controller is currently conducting a rescan.
3517 **/
3518static void remove_from_scan_list(struct ctlr_info *h)
3519{
3520 struct ctlr_info *test_h, *tmp_h;
3521 int scanning = 0;
3522
3523 mutex_lock(&scan_mutex);
3524 list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
3525 if (test_h == h) {
3526 list_del(&h->scan_list);
3527 complete_all(&h->scan_wait);
3528 mutex_unlock(&scan_mutex);
3529 return;
3530 }
3531 }
3532 if (&h->busy_scanning)
3533 scanning = 0;
3534 mutex_unlock(&scan_mutex);
3535
3536 if (scanning)
3537 wait_for_completion(&h->scan_wait);
3538}
3539
3540/**
3541 * scan_thread() - kernel thread used to rescan controllers
3542 * @data: Ignored.
3543 *
3544 * A kernel thread used scan for drive topology changes on
3545 * controllers. The thread processes only one controller at a time
3546 * using a queue. Controllers are added to the queue using
3547 * add_to_scan_list() and removed from the queue either after done
3548 * processing or using remove_from_scan_list().
3549 *
3550 * returns 0.
3551 **/
3235static int scan_thread(void *data) 3552static int scan_thread(void *data)
3236{ 3553{
3237 ctlr_info_t *h = data; 3554 struct ctlr_info *h;
3238 int rc;
3239 DECLARE_COMPLETION_ONSTACK(wait);
3240 h->rescan_wait = &wait;
3241 3555
3242 for (;;) { 3556 while (1) {
3243 rc = wait_for_completion_interruptible(&wait); 3557 set_current_state(TASK_INTERRUPTIBLE);
3558 schedule();
3244 if (kthread_should_stop()) 3559 if (kthread_should_stop())
3245 break; 3560 break;
3246 if (!rc) 3561
3247 rebuild_lun_table(h, 0); 3562 while (1) {
3563 mutex_lock(&scan_mutex);
3564 if (list_empty(&scan_q)) {
3565 mutex_unlock(&scan_mutex);
3566 break;
3567 }
3568
3569 h = list_entry(scan_q.next,
3570 struct ctlr_info,
3571 scan_list);
3572 list_del(&h->scan_list);
3573 h->busy_scanning = 1;
3574 mutex_unlock(&scan_mutex);
3575
3576 if (h) {
3577 rebuild_lun_table(h, 0, 0);
3578 complete_all(&h->scan_wait);
3579 mutex_lock(&scan_mutex);
3580 h->busy_scanning = 0;
3581 mutex_unlock(&scan_mutex);
3582 }
3583 }
3248 } 3584 }
3585
3249 return 0; 3586 return 0;
3250} 3587}
3251 3588
@@ -3268,8 +3605,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
3268 case REPORT_LUNS_CHANGED: 3605 case REPORT_LUNS_CHANGED:
3269 printk(KERN_WARNING "cciss%d: report LUN data " 3606 printk(KERN_WARNING "cciss%d: report LUN data "
3270 "changed\n", h->ctlr); 3607 "changed\n", h->ctlr);
3271 if (h->rescan_wait) 3608 add_to_scan_list(h);
3272 complete(h->rescan_wait); 3609 wake_up_process(cciss_scan_thread);
3273 return 1; 3610 return 1;
3274 break; 3611 break;
3275 case POWER_OR_RESET: 3612 case POWER_OR_RESET:
@@ -3422,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3422 __u64 cfg_offset; 3759 __u64 cfg_offset;
3423 __u32 cfg_base_addr; 3760 __u32 cfg_base_addr;
3424 __u64 cfg_base_addr_index; 3761 __u64 cfg_base_addr_index;
3425 int i, err; 3762 int i, prod_index, err;
3763
3764 subsystem_vendor_id = pdev->subsystem_vendor;
3765 subsystem_device_id = pdev->subsystem_device;
3766 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3767 subsystem_vendor_id);
3768
3769 for (i = 0; i < ARRAY_SIZE(products); i++) {
3770 /* Stand aside for hpsa driver on request */
3771 if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
3772 return -ENODEV;
3773 if (board_id == products[i].board_id)
3774 break;
3775 }
3776 prod_index = i;
3777 if (prod_index == ARRAY_SIZE(products)) {
3778 dev_warn(&pdev->dev,
3779 "unrecognized board ID: 0x%08lx, ignoring.\n",
3780 (unsigned long) board_id);
3781 return -ENODEV;
3782 }
3426 3783
3427 /* check to see if controller has been disabled */ 3784 /* check to see if controller has been disabled */
3428 /* BEFORE trying to enable it */ 3785 /* BEFORE trying to enable it */
@@ -3446,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3446 return err; 3803 return err;
3447 } 3804 }
3448 3805
3449 subsystem_vendor_id = pdev->subsystem_vendor;
3450 subsystem_device_id = pdev->subsystem_device;
3451 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3452 subsystem_vendor_id);
3453
3454#ifdef CCISS_DEBUG 3806#ifdef CCISS_DEBUG
3455 printk("command = %x\n", command); 3807 printk("command = %x\n", command);
3456 printk("irq = %x\n", pdev->irq); 3808 printk("irq = %x\n", pdev->irq);
@@ -3489,7 +3841,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3489 if (scratchpad == CCISS_FIRMWARE_READY) 3841 if (scratchpad == CCISS_FIRMWARE_READY)
3490 break; 3842 break;
3491 set_current_state(TASK_INTERRUPTIBLE); 3843 set_current_state(TASK_INTERRUPTIBLE);
3492 schedule_timeout(HZ / 10); /* wait 100ms */ 3844 schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */
3493 } 3845 }
3494 if (scratchpad != CCISS_FIRMWARE_READY) { 3846 if (scratchpad != CCISS_FIRMWARE_READY) {
3495 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); 3847 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
@@ -3536,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3536 * leave a little room for ioctl calls. 3888 * leave a little room for ioctl calls.
3537 */ 3889 */
3538 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 3890 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3539 for (i = 0; i < ARRAY_SIZE(products); i++) { 3891 c->product_name = products[prod_index].product_name;
3540 if (board_id == products[i].board_id) { 3892 c->access = *(products[prod_index].access);
3541 c->product_name = products[i].product_name; 3893 c->nr_cmds = c->max_commands - 4;
3542 c->access = *(products[i].access);
3543 c->nr_cmds = c->max_commands - 4;
3544 break;
3545 }
3546 }
3547 if ((readb(&c->cfgtable->Signature[0]) != 'C') || 3894 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3548 (readb(&c->cfgtable->Signature[1]) != 'I') || 3895 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3549 (readb(&c->cfgtable->Signature[2]) != 'S') || 3896 (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -3552,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3552 err = -ENODEV; 3899 err = -ENODEV;
3553 goto err_out_free_res; 3900 goto err_out_free_res;
3554 } 3901 }
3555 /* We didn't find the controller in our list. We know the
3556 * signature is valid. If it's an HP device let's try to
3557 * bind to the device and fire it up. Otherwise we bail.
3558 */
3559 if (i == ARRAY_SIZE(products)) {
3560 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3561 c->product_name = products[i-1].product_name;
3562 c->access = *(products[i-1].access);
3563 c->nr_cmds = c->max_commands - 4;
3564 printk(KERN_WARNING "cciss: This is an unknown "
3565 "Smart Array controller.\n"
3566 "cciss: Please update to the latest driver "
3567 "available from www.hp.com.\n");
3568 } else {
3569 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3570 " to access the Smart Array controller %08lx\n"
3571 , (unsigned long)board_id);
3572 err = -ENODEV;
3573 goto err_out_free_res;
3574 }
3575 }
3576#ifdef CONFIG_X86 3902#ifdef CONFIG_X86
3577 { 3903 {
3578 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3904 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -3615,7 +3941,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3615 break; 3941 break;
3616 /* delay and try again */ 3942 /* delay and try again */
3617 set_current_state(TASK_INTERRUPTIBLE); 3943 set_current_state(TASK_INTERRUPTIBLE);
3618 schedule_timeout(10); 3944 schedule_timeout(msecs_to_jiffies(1));
3619 } 3945 }
3620 3946
3621#ifdef CCISS_DEBUG 3947#ifdef CCISS_DEBUG
@@ -3669,15 +3995,16 @@ Enomem:
3669 return -1; 3995 return -1;
3670} 3996}
3671 3997
3672static void free_hba(int i) 3998static void free_hba(int n)
3673{ 3999{
3674 ctlr_info_t *p = hba[i]; 4000 ctlr_info_t *h = hba[n];
3675 int n; 4001 int i;
3676 4002
3677 hba[i] = NULL; 4003 hba[n] = NULL;
3678 for (n = 0; n < CISS_MAX_LUN; n++) 4004 for (i = 0; i < h->highest_lun + 1; i++)
3679 put_disk(p->gendisk[n]); 4005 if (h->gendisk[i] != NULL)
3680 kfree(p); 4006 put_disk(h->gendisk[i]);
4007 kfree(h);
3681} 4008}
3682 4009
3683/* Send a message CDB to the firmware. */ 4010/* Send a message CDB to the firmware. */
@@ -3918,14 +4245,17 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3918 hba[i]->busy_initializing = 1; 4245 hba[i]->busy_initializing = 1;
3919 INIT_HLIST_HEAD(&hba[i]->cmpQ); 4246 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3920 INIT_HLIST_HEAD(&hba[i]->reqQ); 4247 INIT_HLIST_HEAD(&hba[i]->reqQ);
4248 mutex_init(&hba[i]->busy_shutting_down);
3921 4249
3922 if (cciss_pci_init(hba[i], pdev) != 0) 4250 if (cciss_pci_init(hba[i], pdev) != 0)
3923 goto clean0; 4251 goto clean_no_release_regions;
3924 4252
3925 sprintf(hba[i]->devname, "cciss%d", i); 4253 sprintf(hba[i]->devname, "cciss%d", i);
3926 hba[i]->ctlr = i; 4254 hba[i]->ctlr = i;
3927 hba[i]->pdev = pdev; 4255 hba[i]->pdev = pdev;
3928 4256
4257 init_completion(&hba[i]->scan_wait);
4258
3929 if (cciss_create_hba_sysfs_entry(hba[i])) 4259 if (cciss_create_hba_sysfs_entry(hba[i]))
3930 goto clean0; 4260 goto clean0;
3931 4261
@@ -4001,8 +4331,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4001 hba[i]->num_luns = 0; 4331 hba[i]->num_luns = 0;
4002 hba[i]->highest_lun = -1; 4332 hba[i]->highest_lun = -1;
4003 for (j = 0; j < CISS_MAX_LUN; j++) { 4333 for (j = 0; j < CISS_MAX_LUN; j++) {
4004 hba[i]->drv[j].raid_level = -1; 4334 hba[i]->drv[j] = NULL;
4005 hba[i]->drv[j].queue = NULL;
4006 hba[i]->gendisk[j] = NULL; 4335 hba[i]->gendisk[j] = NULL;
4007 } 4336 }
4008 4337
@@ -4035,14 +4364,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4035 4364
4036 hba[i]->cciss_max_sectors = 2048; 4365 hba[i]->cciss_max_sectors = 2048;
4037 4366
4367 rebuild_lun_table(hba[i], 1, 0);
4038 hba[i]->busy_initializing = 0; 4368 hba[i]->busy_initializing = 0;
4039
4040 rebuild_lun_table(hba[i], 1);
4041 hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
4042 "cciss_scan%02d", i);
4043 if (IS_ERR(hba[i]->cciss_scan_thread))
4044 return PTR_ERR(hba[i]->cciss_scan_thread);
4045
4046 return 1; 4369 return 1;
4047 4370
4048clean4: 4371clean4:
@@ -4062,18 +4385,14 @@ clean2:
4062clean1: 4385clean1:
4063 cciss_destroy_hba_sysfs_entry(hba[i]); 4386 cciss_destroy_hba_sysfs_entry(hba[i]);
4064clean0: 4387clean0:
4388 pci_release_regions(pdev);
4389clean_no_release_regions:
4065 hba[i]->busy_initializing = 0; 4390 hba[i]->busy_initializing = 0;
4066 /* cleanup any queues that may have been initialized */ 4391
4067 for (j=0; j <= hba[i]->highest_lun; j++){
4068 drive_info_struct *drv = &(hba[i]->drv[j]);
4069 if (drv->queue)
4070 blk_cleanup_queue(drv->queue);
4071 }
4072 /* 4392 /*
4073 * Deliberately omit pci_disable_device(): it does something nasty to 4393 * Deliberately omit pci_disable_device(): it does something nasty to
4074 * Smart Array controllers that pci_enable_device does not undo 4394 * Smart Array controllers that pci_enable_device does not undo
4075 */ 4395 */
4076 pci_release_regions(pdev);
4077 pci_set_drvdata(pdev, NULL); 4396 pci_set_drvdata(pdev, NULL);
4078 free_hba(i); 4397 free_hba(i);
4079 return -1; 4398 return -1;
@@ -4125,8 +4444,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4125 return; 4444 return;
4126 } 4445 }
4127 4446
4128 kthread_stop(hba[i]->cciss_scan_thread); 4447 mutex_lock(&hba[i]->busy_shutting_down);
4129 4448
4449 remove_from_scan_list(hba[i]);
4130 remove_proc_entry(hba[i]->devname, proc_cciss); 4450 remove_proc_entry(hba[i]->devname, proc_cciss);
4131 unregister_blkdev(hba[i]->major, hba[i]->devname); 4451 unregister_blkdev(hba[i]->major, hba[i]->devname);
4132 4452
@@ -4136,8 +4456,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4136 if (disk) { 4456 if (disk) {
4137 struct request_queue *q = disk->queue; 4457 struct request_queue *q = disk->queue;
4138 4458
4139 if (disk->flags & GENHD_FL_UP) 4459 if (disk->flags & GENHD_FL_UP) {
4460 cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
4140 del_gendisk(disk); 4461 del_gendisk(disk);
4462 }
4141 if (q) 4463 if (q)
4142 blk_cleanup_queue(q); 4464 blk_cleanup_queue(q);
4143 } 4465 }
@@ -4170,6 +4492,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4170 pci_release_regions(pdev); 4492 pci_release_regions(pdev);
4171 pci_set_drvdata(pdev, NULL); 4493 pci_set_drvdata(pdev, NULL);
4172 cciss_destroy_hba_sysfs_entry(hba[i]); 4494 cciss_destroy_hba_sysfs_entry(hba[i]);
4495 mutex_unlock(&hba[i]->busy_shutting_down);
4173 free_hba(i); 4496 free_hba(i);
4174} 4497}
4175 4498
@@ -4202,15 +4525,25 @@ static int __init cciss_init(void)
4202 if (err) 4525 if (err)
4203 return err; 4526 return err;
4204 4527
4528 /* Start the scan thread */
4529 cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
4530 if (IS_ERR(cciss_scan_thread)) {
4531 err = PTR_ERR(cciss_scan_thread);
4532 goto err_bus_unregister;
4533 }
4534
4205 /* Register for our PCI devices */ 4535 /* Register for our PCI devices */
4206 err = pci_register_driver(&cciss_pci_driver); 4536 err = pci_register_driver(&cciss_pci_driver);
4207 if (err) 4537 if (err)
4208 goto err_bus_register; 4538 goto err_thread_stop;
4209 4539
4210 return 0; 4540 return err;
4211 4541
4212err_bus_register: 4542err_thread_stop:
4543 kthread_stop(cciss_scan_thread);
4544err_bus_unregister:
4213 bus_unregister(&cciss_bus_type); 4545 bus_unregister(&cciss_bus_type);
4546
4214 return err; 4547 return err;
4215} 4548}
4216 4549
@@ -4227,6 +4560,7 @@ static void __exit cciss_cleanup(void)
4227 cciss_remove_one(hba[i]->pdev); 4560 cciss_remove_one(hba[i]->pdev);
4228 } 4561 }
4229 } 4562 }
4563 kthread_stop(cciss_scan_thread);
4230 remove_proc_entry("driver/cciss", NULL); 4564 remove_proc_entry("driver/cciss", NULL);
4231 bus_unregister(&cciss_bus_type); 4565 bus_unregister(&cciss_bus_type);
4232} 4566}
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 06a5db25b298..31524cf42c77 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -2,6 +2,7 @@
2#define CCISS_H 2#define CCISS_H
3 3
4#include <linux/genhd.h> 4#include <linux/genhd.h>
5#include <linux/mutex.h>
5 6
6#include "cciss_cmd.h" 7#include "cciss_cmd.h"
7 8
@@ -29,7 +30,7 @@ struct access_method {
29}; 30};
30typedef struct _drive_info_struct 31typedef struct _drive_info_struct
31{ 32{
32 __u32 LunID; 33 unsigned char LunID[8];
33 int usage_count; 34 int usage_count;
34 struct request_queue *queue; 35 struct request_queue *queue;
35 sector_t nr_blocks; 36 sector_t nr_blocks;
@@ -51,6 +52,7 @@ typedef struct _drive_info_struct
51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ 52 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
52 char model[MODEL_LEN + 1]; /* SCSI model string */ 53 char model[MODEL_LEN + 1]; /* SCSI model string */
53 char rev[REV_LEN + 1]; /* SCSI revision string */ 54 char rev[REV_LEN + 1]; /* SCSI revision string */
55 char device_initialized; /* indicates whether dev is initialized */
54} drive_info_struct; 56} drive_info_struct;
55 57
56struct ctlr_info 58struct ctlr_info
@@ -86,7 +88,7 @@ struct ctlr_info
86 BYTE cciss_read_capacity; 88 BYTE cciss_read_capacity;
87 89
88 // information about each logical volume 90 // information about each logical volume
89 drive_info_struct drv[CISS_MAX_LUN]; 91 drive_info_struct *drv[CISS_MAX_LUN];
90 92
91 struct access_method access; 93 struct access_method access;
92 94
@@ -108,6 +110,8 @@ struct ctlr_info
108 int nr_frees; 110 int nr_frees;
109 int busy_configuring; 111 int busy_configuring;
110 int busy_initializing; 112 int busy_initializing;
113 int busy_scanning;
114 struct mutex busy_shutting_down;
111 115
112 /* This element holds the zero based queue number of the last 116 /* This element holds the zero based queue number of the last
113 * queue to be started. It is used for fairness. 117 * queue to be started. It is used for fairness.
@@ -122,8 +126,8 @@ struct ctlr_info
122 /* and saved for later processing */ 126 /* and saved for later processing */
123#endif 127#endif
124 unsigned char alive; 128 unsigned char alive;
125 struct completion *rescan_wait; 129 struct list_head scan_list;
126 struct task_struct *cciss_scan_thread; 130 struct completion scan_wait;
127 struct device dev; 131 struct device dev;
128}; 132};
129 133
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b82d438e2607..6422651ec364 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -32,6 +32,7 @@
32#include <linux/blkpg.h> 32#include <linux/blkpg.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/hdreg.h> 37#include <linux/hdreg.h>
37#include <linux/spinlock.h> 38#include <linux/spinlock.h>
@@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
177 178
178#ifdef CONFIG_PROC_FS 179#ifdef CONFIG_PROC_FS
179static void ida_procinit(int i); 180static void ida_procinit(int i);
180static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
181#else 181#else
182static void ida_procinit(int i) {} 182static void ida_procinit(int i) {}
183#endif 183#endif
@@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = {
206#ifdef CONFIG_PROC_FS 206#ifdef CONFIG_PROC_FS
207 207
208static struct proc_dir_entry *proc_array; 208static struct proc_dir_entry *proc_array;
209static const struct file_operations ida_proc_fops;
209 210
210/* 211/*
211 * Get us a file in /proc/array that says something about each controller. 212 * Get us a file in /proc/array that says something about each controller.
@@ -218,19 +219,16 @@ static void __init ida_procinit(int i)
218 if (!proc_array) return; 219 if (!proc_array) return;
219 } 220 }
220 221
221 create_proc_read_entry(hba[i]->devname, 0, proc_array, 222 proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
222 ida_proc_get_info, hba[i]);
223} 223}
224 224
225/* 225/*
226 * Report information about this controller. 226 * Report information about this controller.
227 */ 227 */
228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 228static int ida_proc_show(struct seq_file *m, void *v)
229{ 229{
230 off_t pos = 0; 230 int i, ctlr;
231 off_t len = 0; 231 ctlr_info_t *h = (ctlr_info_t*)m->private;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv; 232 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES 233#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c; 234 cmdlist_t *c;
@@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
238#endif 236#endif
239 237
240 ctlr = h->ctlr; 238 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n" 239 seq_printf(m, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n" 240 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n" 241 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n" 242 " Controller Sig: 0x%08lx\n"
@@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
258 h->log_drives, h->phys_drives, 256 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit); 257 h->Qdepth, h->maxQsinceinit);
260 258
261 pos += size; len += size; 259 seq_puts(m, "Logical Drive Info:\n");
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265 260
266 for(i=0; i<h->log_drives; i++) { 261 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i]; 262 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 263 seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks); 264 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 } 265 }
272 266
273#ifdef CPQ_PROC_PRINT_QUEUES 267#ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 268 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n"); 269 seq_puts(m, "\nCurrent Queues:\n");
276 pos += size; len += size;
277 270
278 c = h->reqQ; 271 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; 272 seq_printf(m, "reqQ = %p", c);
280 if (c) c=c->next; 273 if (c) c=c->next;
281 while(c && c != h->reqQ) { 274 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c); 275 seq_printf(m, "->%p", c);
283 pos += size; len += size;
284 c=c->next; 276 c=c->next;
285 } 277 }
286 278
287 c = h->cmpQ; 279 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; 280 seq_printf(m, "\ncmpQ = %p", c);
289 if (c) c=c->next; 281 if (c) c=c->next;
290 while(c && c != h->cmpQ) { 282 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c); 283 seq_printf(m, "->%p", c);
292 pos += size; len += size;
293 c=c->next; 284 c=c->next;
294 } 285 }
295 286
296 size = sprintf(buffer+len, "\n"); pos += size; len += size; 287 seq_putc(m, '\n');
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 288 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298#endif 289#endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", 290 seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees); 291 h->nr_allocs, h->nr_frees);
301 pos += size; len += size; 292 return 0;
302 293}
303 *eof = 1; 294
304 *start = buffer+offset; 295static int ida_proc_open(struct inode *inode, struct file *file)
305 len -= offset; 296{
306 if (len>length) 297 return single_open(file, ida_proc_show, PDE(inode)->data);
307 len = length;
308 return len;
309} 298}
299
300static const struct file_operations ida_proc_fops = {
301 .owner = THIS_MODULE,
302 .open = ida_proc_open,
303 .read = seq_read,
304 .llseek = seq_lseek,
305 .release = single_release,
306};
310#endif /* CONFIG_PROC_FS */ 307#endif /* CONFIG_PROC_FS */
311 308
312module_param_array(eisa, int, NULL, 0); 309module_param_array(eisa, int, NULL, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index edda9ea7c626..bd112c8c7bcd 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -949,7 +949,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
949 lo->lo_state = Lo_unbound; 949 lo->lo_state = Lo_unbound;
950 /* This is safe: open() is still holding a reference. */ 950 /* This is safe: open() is still holding a reference. */
951 module_put(THIS_MODULE); 951 module_put(THIS_MODULE);
952 if (max_part > 0) 952 if (max_part > 0 && bdev)
953 ioctl_by_bdev(bdev, BLKRRPART, 0); 953 ioctl_by_bdev(bdev, BLKRRPART, 0);
954 mutex_unlock(&lo->lo_ctl_mutex); 954 mutex_unlock(&lo->lo_ctl_mutex);
955 /* 955 /*
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 43f19389647a..51042f0ba7e1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -3,7 +3,6 @@
3#include <linux/blkdev.h> 3#include <linux/blkdev.h>
4#include <linux/hdreg.h> 4#include <linux/hdreg.h>
5#include <linux/virtio.h> 5#include <linux/virtio.h>
6#include <linux/virtio_ids.h>
7#include <linux/virtio_blk.h> 6#include <linux/virtio_blk.h>
8#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
9 8
@@ -183,34 +182,6 @@ static void do_virtblk_request(struct request_queue *q)
183 vblk->vq->vq_ops->kick(vblk->vq); 182 vblk->vq->vq_ops->kick(vblk->vq);
184} 183}
185 184
186/* return ATA identify data
187 */
188static int virtblk_identify(struct gendisk *disk, void *argp)
189{
190 struct virtio_blk *vblk = disk->private_data;
191 void *opaque;
192 int err = -ENOMEM;
193
194 opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
195 if (!opaque)
196 goto out;
197
198 err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
199 offsetof(struct virtio_blk_config, identify), opaque,
200 VIRTIO_BLK_ID_BYTES);
201
202 if (err)
203 goto out_kfree;
204
205 if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
206 err = -EFAULT;
207
208out_kfree:
209 kfree(opaque);
210out:
211 return err;
212}
213
214static void virtblk_prepare_flush(struct request_queue *q, struct request *req) 185static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
215{ 186{
216 req->cmd_type = REQ_TYPE_LINUX_BLOCK; 187 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
@@ -222,10 +193,6 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
222{ 193{
223 struct gendisk *disk = bdev->bd_disk; 194 struct gendisk *disk = bdev->bd_disk;
224 struct virtio_blk *vblk = disk->private_data; 195 struct virtio_blk *vblk = disk->private_data;
225 void __user *argp = (void __user *)data;
226
227 if (cmd == HDIO_GET_IDENTITY)
228 return virtblk_identify(disk, argp);
229 196
230 /* 197 /*
231 * Only allow the generic SCSI ioctls if the host can support it. 198 * Only allow the generic SCSI ioctls if the host can support it.
@@ -233,7 +200,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
233 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) 200 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
234 return -ENOTTY; 201 return -ENOTTY;
235 202
236 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); 203 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
204 (void __user *)data);
237} 205}
238 206
239/* We provide getgeo only to please some old bootloader/partitioning tools */ 207/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -332,7 +300,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
332 } 300 }
333 301
334 vblk->disk->queue->queuedata = vblk; 302 vblk->disk->queue->queuedata = vblk;
335 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
336 303
337 if (index < 26) { 304 if (index < 26) {
338 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); 305 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
@@ -445,7 +412,7 @@ static struct virtio_device_id id_table[] = {
445static unsigned int features[] = { 412static unsigned int features[] = {
446 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 413 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
447 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 414 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
448 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH 415 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
449}; 416};
450 417
451/* 418/*
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7ba91aa3fe8b..44bc8bbabf54 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -591,6 +591,7 @@ static int btusb_close(struct hci_dev *hdev)
591 return 0; 591 return 0;
592 592
593 cancel_work_sync(&data->work); 593 cancel_work_sync(&data->work);
594 cancel_work_sync(&data->waker);
594 595
595 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 596 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
596 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 597 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
@@ -599,11 +600,13 @@ static int btusb_close(struct hci_dev *hdev)
599 btusb_stop_traffic(data); 600 btusb_stop_traffic(data);
600 err = usb_autopm_get_interface(data->intf); 601 err = usb_autopm_get_interface(data->intf);
601 if (err < 0) 602 if (err < 0)
602 return 0; 603 goto failed;
603 604
604 data->intf->needs_remote_wakeup = 0; 605 data->intf->needs_remote_wakeup = 0;
605 usb_autopm_put_interface(data->intf); 606 usb_autopm_put_interface(data->intf);
606 607
608failed:
609 usb_scuttle_anchored_urbs(&data->deferred);
607 return 0; 610 return 0;
608} 611}
609 612
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 08a6f50ae791..6aad99ec4e0f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -323,7 +323,7 @@ config SPECIALIX
323 323
324config SX 324config SX
325 tristate "Specialix SX (and SI) card support" 325 tristate "Specialix SX (and SI) card support"
326 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) 326 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) && BROKEN
327 help 327 help
328 This is a driver for the SX and SI multiport serial cards. 328 This is a driver for the SX and SI multiport serial cards.
329 Please read the file <file:Documentation/serial/sx.txt> for details. 329 Please read the file <file:Documentation/serial/sx.txt> for details.
@@ -334,7 +334,7 @@ config SX
334 334
335config RIO 335config RIO
336 tristate "Specialix RIO system support" 336 tristate "Specialix RIO system support"
337 depends on SERIAL_NONSTANDARD 337 depends on SERIAL_NONSTANDARD && BROKEN
338 help 338 help
339 This is a driver for the Specialix RIO, a smart serial card which 339 This is a driver for the Specialix RIO, a smart serial card which
340 drives an outboard box that can support up to 128 ports. Product 340 drives an outboard box that can support up to 128 ports. Product
@@ -395,7 +395,7 @@ config NOZOMI
395 395
396config A2232 396config A2232
397 tristate "Commodore A2232 serial support (EXPERIMENTAL)" 397 tristate "Commodore A2232 serial support (EXPERIMENTAL)"
398 depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP 398 depends on EXPERIMENTAL && ZORRO && BROKEN
399 ---help--- 399 ---help---
400 This option supports the 2232 7-port serial card shipped with the 400 This option supports the 2232 7-port serial card shipped with the
401 Amiga 2000 and other Zorro-bus machines, dating from 1989. At 401 Amiga 2000 and other Zorro-bus machines, dating from 1989. At
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index d6f36c004d9b..870f12cfed93 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -131,7 +131,7 @@ struct agp_bridge_driver {
131struct agp_bridge_data { 131struct agp_bridge_data {
132 const struct agp_version *version; 132 const struct agp_version *version;
133 const struct agp_bridge_driver *driver; 133 const struct agp_bridge_driver *driver;
134 struct vm_operations_struct *vm_ops; 134 const struct vm_operations_struct *vm_ops;
135 void *previous_size; 135 void *previous_size;
136 void *current_size; 136 void *current_size;
137 void *dev_private_data; 137 void *dev_private_data;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 5ea4da8e9954..dd84af4d4f7e 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -40,7 +40,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] =
40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ 40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
41}; 41};
42 42
43struct vm_operations_struct alpha_core_agp_vm_ops = { 43static const struct vm_operations_struct alpha_core_agp_vm_ops = {
44 .fault = alpha_core_agp_vm_fault, 44 .fault = alpha_core_agp_vm_fault,
45}; 45};
46 46
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4068467ce7b9..10e1f0390bbb 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1161,12 +1161,6 @@ static int intel_i915_configure(void)
1161 1161
1162 intel_i9xx_setup_flush(); 1162 intel_i9xx_setup_flush();
1163 1163
1164#ifdef USE_PCI_DMA_API
1165 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
1166 dev_err(&intel_private.pcidev->dev,
1167 "set gfx device dma mask 36bit failed!\n");
1168#endif
1169
1170 return 0; 1164 return 0;
1171} 1165}
1172 1166
@@ -2456,6 +2450,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2456 &bridge->mode); 2450 &bridge->mode);
2457 } 2451 }
2458 2452
2453 if (bridge->driver->mask_memory == intel_i965_mask_memory)
2454 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2455 dev_err(&intel_private.pcidev->dev,
2456 "set gfx device dma mask 36bit failed!\n");
2457
2459 pci_set_drvdata(pdev, bridge); 2458 pci_set_drvdata(pdev, bridge);
2460 return agp_add_bridge(bridge); 2459 return agp_add_bridge(bridge);
2461} 2460}
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 60ab75104da9..1c129211302d 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
217 .configure = parisc_agp_configure, 217 .configure = parisc_agp_configure,
218 .fetch_size = parisc_agp_fetch_size, 218 .fetch_size = parisc_agp_fetch_size,
219 .tlb_flush = parisc_agp_tlbflush, 219 .tlb_flush = parisc_agp_tlbflush,
220 .mask_memory = parisc_agp_page_mask_memory, 220 .mask_memory = parisc_agp_mask_memory,
221 .masks = parisc_agp_masks, 221 .masks = parisc_agp_masks,
222 .agp_enable = parisc_agp_enable, 222 .agp_enable = parisc_agp_enable,
223 .cache_flush = global_cache_flush, 223 .cache_flush = global_cache_flush,
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index aaca40283be9..4f568cb9af3f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
393 return as ? 0 : -ENOMEM; 393 return as ? 0 : -ENOMEM;
394} 394}
395 395
396static struct file_operations apm_bios_fops = { 396static const struct file_operations apm_bios_fops = {
397 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
398 .read = apm_read, 398 .read = apm_read,
399 .poll = apm_poll, 399 .poll = apm_poll,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 73a0765344b6..fe2cb2f5db17 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/sched.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28#include <linux/miscdevice.h> 29#include <linux/miscdevice.h>
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index e3dd24bff514..836d4f0a876f 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
217# define bfin_otp_ioctl NULL 217# define bfin_otp_ioctl NULL
218#endif 218#endif
219 219
220static struct file_operations bfin_otp_fops = { 220static const struct file_operations bfin_otp_fops = {
221 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
222 .unlocked_ioctl = bfin_otp_ioctl, 222 .unlocked_ioctl = bfin_otp_ioctl,
223 .read = bfin_otp_read, 223 .read = bfin_otp_read,
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index df5038bbcbc2..4254457d3911 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
3354 continue; 3354 continue;
3355 } 3355 }
3356#ifdef MODULE 3356#ifdef MODULE
3357 if (isparam && irq[i]) 3357 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3358 cy_isa_irq = irq[i];
3359 else 3359 else
3360#endif 3360#endif
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 52e06589821d..045c930e6320 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -56,6 +56,7 @@
56#include <linux/errno.h> /* for -EBUSY */ 56#include <linux/errno.h> /* for -EBUSY */
57#include <linux/ioport.h> /* for request_region */ 57#include <linux/ioport.h> /* for request_region */
58#include <linux/delay.h> /* for loops_per_jiffy */ 58#include <linux/delay.h> /* for loops_per_jiffy */
59#include <linux/sched.h>
59#include <linux/smp_lock.h> /* cycle_kernel_lock() */ 60#include <linux/smp_lock.h> /* cycle_kernel_lock() */
60#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ 61#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
61#include <asm/uaccess.h> /* for get_user, etc. */ 62#include <asm/uaccess.h> /* for get_user, etc. */
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 9d589e3144de..dde5134713e2 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -30,6 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h>
33#include <linux/serial.h> 34#include <linux/serial.h>
34#include <linux/delay.h> 35#include <linux/delay.h>
35#include <linux/ctype.h> 36#include <linux/ctype.h>
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index 9e4e569dc00d..d400cbd280f2 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/tty.h> 24#include <linux/tty.h>
25#include <linux/sched.h>
25#include <linux/serial.h> 26#include <linux/serial.h>
26#include <linux/mm.h> 27#include <linux/mm.h>
27#include <linux/generic_serial.h> 28#include <linux/generic_serial.h>
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index aac0985a572b..31e7c91c2d9d 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -43,6 +43,7 @@
43#define RTC_VERSION "1.07" 43#define RTC_VERSION "1.07"
44 44
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/sched.h>
46#include <linux/errno.h> 47#include <linux/errno.h>
47#include <linux/miscdevice.h> 48#include <linux/miscdevice.h>
48#include <linux/fcntl.h> 49#include <linux/fcntl.h>
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 25ce15bb1c08..a632f25f144a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -678,7 +678,7 @@ int hvc_poll(struct hvc_struct *hp)
678EXPORT_SYMBOL_GPL(hvc_poll); 678EXPORT_SYMBOL_GPL(hvc_poll);
679 679
680/** 680/**
681 * hvc_resize() - Update terminal window size information. 681 * __hvc_resize() - Update terminal window size information.
682 * @hp: HVC console pointer 682 * @hp: HVC console pointer
683 * @ws: Terminal window size structure 683 * @ws: Terminal window size structure
684 * 684 *
@@ -687,12 +687,12 @@ EXPORT_SYMBOL_GPL(hvc_poll);
687 * 687 *
688 * Locking: Locking free; the function MUST be called holding hp->lock 688 * Locking: Locking free; the function MUST be called holding hp->lock
689 */ 689 */
690void hvc_resize(struct hvc_struct *hp, struct winsize ws) 690void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
691{ 691{
692 hp->ws = ws; 692 hp->ws = ws;
693 schedule_work(&hp->tty_resize); 693 schedule_work(&hp->tty_resize);
694} 694}
695EXPORT_SYMBOL_GPL(hvc_resize); 695EXPORT_SYMBOL_GPL(__hvc_resize);
696 696
697/* 697/*
698 * This kthread is either polling or interrupt driven. This is determined by 698 * This kthread is either polling or interrupt driven. This is determined by
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 3c85d78c975c..10950ca706d8 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -28,6 +28,7 @@
28#define HVC_CONSOLE_H 28#define HVC_CONSOLE_H
29#include <linux/kref.h> 29#include <linux/kref.h>
30#include <linux/tty.h> 30#include <linux/tty.h>
31#include <linux/spinlock.h>
31 32
32/* 33/*
33 * This is the max number of console adapters that can/will be found as 34 * This is the max number of console adapters that can/will be found as
@@ -88,7 +89,16 @@ int hvc_poll(struct hvc_struct *hp);
88void hvc_kick(void); 89void hvc_kick(void);
89 90
90/* Resize hvc tty terminal window */ 91/* Resize hvc tty terminal window */
91extern void hvc_resize(struct hvc_struct *hp, struct winsize ws); 92extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
93
94static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
95{
96 unsigned long flags;
97
98 spin_lock_irqsave(&hp->lock, flags);
99 __hvc_resize(hp, ws);
100 spin_unlock_irqrestore(&hp->lock, flags);
101}
92 102
93/* default notifier for irq based notification */ 103/* default notifier for irq based notification */
94extern int notifier_add_irq(struct hvc_struct *hp, int data); 104extern int notifier_add_irq(struct hvc_struct *hp, int data);
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 0ecac7e532f6..b8a5d654d3d0 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -273,7 +273,9 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
273 case MSG_TYPE_WINSIZE: 273 case MSG_TYPE_WINSIZE:
274 if (rb->mbuf->datalen != sizeof(struct winsize)) 274 if (rb->mbuf->datalen != sizeof(struct winsize))
275 break; 275 break;
276 hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); 276 /* The caller must ensure that the hvc is locked, which
277 * is the case when called from hvc_iucv_get_chars() */
278 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
277 break; 279 break;
278 280
279 case MSG_TYPE_ERROR: /* ignored ... */ 281 case MSG_TYPE_ERROR: /* ignored ... */
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index eba999f8598d..a6ee32b599a8 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -55,7 +55,7 @@ static inline void notify_daemon(void)
55 notify_remote_via_evtchn(xen_start_info->console.domU.evtchn); 55 notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
56} 56}
57 57
58static int write_console(uint32_t vtermno, const char *data, int len) 58static int __write_console(const char *data, int len)
59{ 59{
60 struct xencons_interface *intf = xencons_interface(); 60 struct xencons_interface *intf = xencons_interface();
61 XENCONS_RING_IDX cons, prod; 61 XENCONS_RING_IDX cons, prod;
@@ -76,6 +76,29 @@ static int write_console(uint32_t vtermno, const char *data, int len)
76 return sent; 76 return sent;
77} 77}
78 78
79static int write_console(uint32_t vtermno, const char *data, int len)
80{
81 int ret = len;
82
83 /*
84 * Make sure the whole buffer is emitted, polling if
85 * necessary. We don't ever want to rely on the hvc daemon
86 * because the most interesting console output is when the
87 * kernel is crippled.
88 */
89 while (len) {
90 int sent = __write_console(data, len);
91
92 data += sent;
93 len -= sent;
94
95 if (unlikely(len))
96 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
97 }
98
99 return ret;
100}
101
79static int read_console(uint32_t vtermno, char *buf, int len) 102static int read_console(uint32_t vtermno, char *buf, int len)
80{ 103{
81 struct xencons_interface *intf = xencons_interface(); 104 struct xencons_interface *intf = xencons_interface();
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 00dd3de1be51..06aad0831c73 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -116,7 +116,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
116 if (!res) 116 if (!res)
117 return -ENOENT; 117 return -ENOENT;
118 118
119 mem = request_mem_region(res->start, res->end - res->start + 1, 119 mem = request_mem_region(res->start, resource_size(res),
120 pdev->name); 120 pdev->name);
121 if (mem == NULL) { 121 if (mem == NULL) {
122 ret = -EBUSY; 122 ret = -EBUSY;
@@ -124,7 +124,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
124 } 124 }
125 125
126 dev_set_drvdata(&pdev->dev, mem); 126 dev_set_drvdata(&pdev->dev, mem);
127 rng_base = ioremap(res->start, res->end - res->start + 1); 127 rng_base = ioremap(res->start, resource_size(res));
128 if (!rng_base) { 128 if (!rng_base) {
129 ret = -ENOMEM; 129 ret = -ENOMEM;
130 goto err_ioremap; 130 goto err_ioremap;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 962968f05b94..915157fcff98 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -21,7 +21,6 @@
21#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/virtio.h> 23#include <linux/virtio.h>
24#include <linux/virtio_ids.h>
25#include <linux/virtio_rng.h> 24#include <linux/virtio_rng.h>
26 25
27/* The host will fill any buffer we give it with sweet, sweet randomness. We 26/* The host will fill any buffer we give it with sweet, sweet randomness. We
@@ -117,7 +116,7 @@ static int virtrng_probe(struct virtio_device *vdev)
117 return 0; 116 return 0;
118} 117}
119 118
120static void virtrng_remove(struct virtio_device *vdev) 119static void __devexit virtrng_remove(struct virtio_device *vdev)
121{ 120{
122 vdev->config->reset(vdev); 121 vdev->config->reset(vdev);
123 hwrng_unregister(&virtio_hwrng); 122 hwrng_unregister(&virtio_hwrng);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 41fc11dc921c..65545de3dbf4 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <asm/system.h> 37#include <asm/system.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/sched.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/ipmi.h> 42#include <linux/ipmi.h>
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 09050797c76a..ec5e3f8df648 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -35,6 +35,7 @@
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <asm/system.h> 36#include <asm/system.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/sched.h>
38#include <linux/spinlock.h> 39#include <linux/spinlock.h>
39#include <linux/mutex.h> 40#include <linux/mutex.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index ab2f3349c5c4..402838f4083e 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -19,6 +19,7 @@
19/*****************************************************************************/ 19/*****************************************************************************/
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/sched.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6c8b65d069e5..a074fceb67d3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -301,7 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
301} 301}
302#endif 302#endif
303 303
304static struct vm_operations_struct mmap_mem_ops = { 304static const struct vm_operations_struct mmap_mem_ops = {
305#ifdef CONFIG_HAVE_IOREMAP_PROT 305#ifdef CONFIG_HAVE_IOREMAP_PROT
306 .access = generic_access_phys 306 .access = generic_access_phys
307#endif 307#endif
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 30f095a8c2d4..1997270bb6f4 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -239,7 +239,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
239 return VM_FAULT_NOPAGE; 239 return VM_FAULT_NOPAGE;
240} 240}
241 241
242static struct vm_operations_struct mspec_vm_ops = { 242static const struct vm_operations_struct mspec_vm_ops = {
243 .open = mspec_open, 243 .open = mspec_open,
244 .close = mspec_close, 244 .close = mspec_close,
245 .fault = mspec_fault, 245 .fault = mspec_fault,
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index ec58d8c387ff..d3400b20444f 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -48,6 +48,7 @@
48#include <linux/tty.h> 48#include <linux/tty.h>
49#include <linux/tty_driver.h> 49#include <linux/tty_driver.h>
50#include <linux/tty_flip.h> 50#include <linux/tty_flip.h>
51#include <linux/sched.h>
51#include <linux/serial.h> 52#include <linux/serial.h>
52#include <linux/interrupt.h> 53#include <linux/interrupt.h>
53#include <linux/kmod.h> 54#include <linux/kmod.h>
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 53761cefa915..62f282e67638 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -18,6 +18,7 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/tty_flip.h> 19#include <linux/tty_flip.h>
20#include <linux/fcntl.h> 20#include <linux/fcntl.h>
21#include <linux/sched.h>
21#include <linux/string.h> 22#include <linux/string.h>
22#include <linux/major.h> 23#include <linux/major.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
@@ -261,6 +262,9 @@ done:
261 return 0; 262 return 0;
262} 263}
263 264
265/* Traditional BSD devices */
266#ifdef CONFIG_LEGACY_PTYS
267
264static int pty_install(struct tty_driver *driver, struct tty_struct *tty) 268static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
265{ 269{
266 struct tty_struct *o_tty; 270 struct tty_struct *o_tty;
@@ -310,24 +314,6 @@ free_mem_out:
310 return -ENOMEM; 314 return -ENOMEM;
311} 315}
312 316
313
314static const struct tty_operations pty_ops = {
315 .install = pty_install,
316 .open = pty_open,
317 .close = pty_close,
318 .write = pty_write,
319 .write_room = pty_write_room,
320 .flush_buffer = pty_flush_buffer,
321 .chars_in_buffer = pty_chars_in_buffer,
322 .unthrottle = pty_unthrottle,
323 .set_termios = pty_set_termios,
324 .resize = pty_resize
325};
326
327/* Traditional BSD devices */
328#ifdef CONFIG_LEGACY_PTYS
329static struct tty_driver *pty_driver, *pty_slave_driver;
330
331static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file, 317static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
332 unsigned int cmd, unsigned long arg) 318 unsigned int cmd, unsigned long arg)
333{ 319{
@@ -341,7 +327,12 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
341static int legacy_count = CONFIG_LEGACY_PTY_COUNT; 327static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
342module_param(legacy_count, int, 0); 328module_param(legacy_count, int, 0);
343 329
344static const struct tty_operations pty_ops_bsd = { 330/*
331 * The master side of a pty can do TIOCSPTLCK and thus
332 * has pty_bsd_ioctl.
333 */
334static const struct tty_operations master_pty_ops_bsd = {
335 .install = pty_install,
345 .open = pty_open, 336 .open = pty_open,
346 .close = pty_close, 337 .close = pty_close,
347 .write = pty_write, 338 .write = pty_write,
@@ -354,8 +345,23 @@ static const struct tty_operations pty_ops_bsd = {
354 .resize = pty_resize 345 .resize = pty_resize
355}; 346};
356 347
348static const struct tty_operations slave_pty_ops_bsd = {
349 .install = pty_install,
350 .open = pty_open,
351 .close = pty_close,
352 .write = pty_write,
353 .write_room = pty_write_room,
354 .flush_buffer = pty_flush_buffer,
355 .chars_in_buffer = pty_chars_in_buffer,
356 .unthrottle = pty_unthrottle,
357 .set_termios = pty_set_termios,
358 .resize = pty_resize
359};
360
357static void __init legacy_pty_init(void) 361static void __init legacy_pty_init(void)
358{ 362{
363 struct tty_driver *pty_driver, *pty_slave_driver;
364
359 if (legacy_count <= 0) 365 if (legacy_count <= 0)
360 return; 366 return;
361 367
@@ -383,7 +389,7 @@ static void __init legacy_pty_init(void)
383 pty_driver->init_termios.c_ospeed = 38400; 389 pty_driver->init_termios.c_ospeed = 38400;
384 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; 390 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
385 pty_driver->other = pty_slave_driver; 391 pty_driver->other = pty_slave_driver;
386 tty_set_operations(pty_driver, &pty_ops); 392 tty_set_operations(pty_driver, &master_pty_ops_bsd);
387 393
388 pty_slave_driver->owner = THIS_MODULE; 394 pty_slave_driver->owner = THIS_MODULE;
389 pty_slave_driver->driver_name = "pty_slave"; 395 pty_slave_driver->driver_name = "pty_slave";
@@ -399,7 +405,7 @@ static void __init legacy_pty_init(void)
399 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS | 405 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
400 TTY_DRIVER_REAL_RAW; 406 TTY_DRIVER_REAL_RAW;
401 pty_slave_driver->other = pty_driver; 407 pty_slave_driver->other = pty_driver;
402 tty_set_operations(pty_slave_driver, &pty_ops); 408 tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
403 409
404 if (tty_register_driver(pty_driver)) 410 if (tty_register_driver(pty_driver))
405 panic("Couldn't register pty driver"); 411 panic("Couldn't register pty driver");
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 01f2654d5a2e..f121357e5af0 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -32,6 +32,7 @@
32*/ 32*/
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/sched.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/errno.h> 37#include <linux/errno.h>
37#include <linux/tty.h> 38#include <linux/tty.h>
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index 74339559f0b9..780506326a73 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -31,6 +31,7 @@
31*/ 31*/
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/sched.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/errno.h> 36#include <linux/errno.h>
36#include <asm/io.h> 37#include <asm/io.h>
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c
index 2fb49e89b324..47fab7c33073 100644
--- a/drivers/char/rio/riotty.c
+++ b/drivers/char/rio/riotty.c
@@ -33,6 +33,7 @@
33#define __EXPLICIT_DEF_H__ 33#define __EXPLICIT_DEF_H__
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/sched.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <linux/tty.h> 39#include <linux/tty.h>
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index e0d0f8b2696b..bc4ab3e54550 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -74,6 +74,7 @@
74#include <linux/proc_fs.h> 74#include <linux/proc_fs.h>
75#include <linux/seq_file.h> 75#include <linux/seq_file.h>
76#include <linux/spinlock.h> 76#include <linux/spinlock.h>
77#include <linux/sched.h>
77#include <linux/sysctl.h> 78#include <linux/sysctl.h>
78#include <linux/wait.h> 79#include <linux/wait.h>
79#include <linux/bcd.h> 80#include <linux/bcd.h>
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 33a2b531802e..9610861d1f5f 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -89,6 +89,7 @@
89#include <linux/interrupt.h> 89#include <linux/interrupt.h>
90#include <linux/kernel.h> 90#include <linux/kernel.h>
91#include <linux/errno.h> 91#include <linux/errno.h>
92#include <linux/sched.h>
92#include <linux/tty.h> 93#include <linux/tty.h>
93 94
94#include <asm/setup.h> 95#include <asm/setup.h>
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 5942a9d674c0..452370af95de 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
220 return 1; 220 return 1;
221 } 221 }
222 222
223 if ((long)info < (long)(&cy_port[0]) 223 if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
224 || (long)(&cy_port[NR_PORTS]) < (long)info) {
225 printk("Warning: cyclades_port out of range for (%s) in %s\n", 224 printk("Warning: cyclades_port out of range for (%s) in %s\n",
226 name, routine); 225 name, routine);
227 return 1; 226 return 1;
@@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
520 panic("TxInt on debug port!!!"); 519 panic("TxInt on debug port!!!");
521 } 520 }
522#endif 521#endif
523
524 info = &cy_port[channel];
525
526 /* validate the port number (as configured and open) */ 522 /* validate the port number (as configured and open) */
527 if ((channel < 0) || (NR_PORTS <= channel)) { 523 if ((channel < 0) || (NR_PORTS <= channel)) {
528 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 524 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
529 base_addr[CyTEOIR] = CyNOTRANS; 525 base_addr[CyTEOIR] = CyNOTRANS;
530 return IRQ_HANDLED; 526 return IRQ_HANDLED;
531 } 527 }
528 info = &cy_port[channel];
532 info->last_active = jiffies; 529 info->last_active = jiffies;
533 if (info->tty == 0) { 530 if (info->tty == 0) {
534 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 531 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index fd3dced97776..8c262aaf7c26 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -36,6 +36,7 @@
36 */ 36 */
37 37
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/sched.h>
39#include <linux/input.h> 40#include <linux/input.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/init.h> 42#include <linux/init.h>
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 53e504f41b20..db6dcfa35ba0 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -27,6 +27,7 @@
27/*****************************************************************************/ 27/*****************************************************************************/
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/sched.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/interrupt.h> 32#include <linux/interrupt.h>
32#include <linux/tty.h> 33#include <linux/tty.h>
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 8f2284be68e1..80ea6bcfffdc 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -32,6 +32,7 @@
32#include <linux/kernel.h> /* printk() */ 32#include <linux/kernel.h> /* printk() */
33#include <linux/fs.h> /* everything... */ 33#include <linux/fs.h> /* everything... */
34#include <linux/errno.h> /* error codes */ 34#include <linux/errno.h> /* error codes */
35#include <linux/sched.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/ioport.h> 37#include <linux/ioport.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 45d58002b06c..47c2d2763456 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -696,8 +696,7 @@ int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
696 696
697 cmd.header.in = pcrread_header; 697 cmd.header.in = pcrread_header;
698 cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); 698 cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
699 BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE); 699 rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
700 rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
701 "attempting to read a pcr value"); 700 "attempting to read a pcr value");
702 701
703 if (rc == 0) 702 if (rc == 0)
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 3108991c5c8b..66fa4e10d76b 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
402 container_of(work, struct tty_struct, buf.work.work); 402 container_of(work, struct tty_struct, buf.work.work);
403 unsigned long flags; 403 unsigned long flags;
404 struct tty_ldisc *disc; 404 struct tty_ldisc *disc;
405 struct tty_buffer *tbuf, *head;
406 char *char_buf;
407 unsigned char *flag_buf;
408 405
409 disc = tty_ldisc_ref(tty); 406 disc = tty_ldisc_ref(tty);
410 if (disc == NULL) /* !TTY_LDISC */ 407 if (disc == NULL) /* !TTY_LDISC */
411 return; 408 return;
412 409
413 spin_lock_irqsave(&tty->buf.lock, flags); 410 spin_lock_irqsave(&tty->buf.lock, flags);
414 /* So we know a flush is running */ 411
415 set_bit(TTY_FLUSHING, &tty->flags); 412 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 head = tty->buf.head; 413 struct tty_buffer *head;
417 if (head != NULL) { 414 while ((head = tty->buf.head) != NULL) {
418 tty->buf.head = NULL; 415 int count;
419 for (;;) { 416 char *char_buf;
420 int count = head->commit - head->read; 417 unsigned char *flag_buf;
418
419 count = head->commit - head->read;
421 if (!count) { 420 if (!count) {
422 if (head->next == NULL) 421 if (head->next == NULL)
423 break; 422 break;
424 tbuf = head; 423 tty->buf.head = head->next;
425 head = head->next; 424 tty_buffer_free(tty, head);
426 tty_buffer_free(tty, tbuf);
427 continue; 425 continue;
428 } 426 }
429 /* Ldisc or user is trying to flush the buffers 427 /* Ldisc or user is trying to flush the buffers
@@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
445 flag_buf, count); 443 flag_buf, count);
446 spin_lock_irqsave(&tty->buf.lock, flags); 444 spin_lock_irqsave(&tty->buf.lock, flags);
447 } 445 }
448 /* Restore the queue head */ 446 clear_bit(TTY_FLUSHING, &tty->flags);
449 tty->buf.head = head;
450 } 447 }
448
451 /* We may have a deferred request to flush the input buffer, 449 /* We may have a deferred request to flush the input buffer,
452 if so pull the chain under the lock and empty the queue */ 450 if so pull the chain under the lock and empty the queue */
453 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { 451 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
@@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
455 clear_bit(TTY_FLUSHPENDING, &tty->flags); 453 clear_bit(TTY_FLUSHPENDING, &tty->flags);
456 wake_up(&tty->read_wait); 454 wake_up(&tty->read_wait);
457 } 455 }
458 clear_bit(TTY_FLUSHING, &tty->flags);
459 spin_unlock_irqrestore(&tty->buf.lock, flags); 456 spin_unlock_irqrestore(&tty->buf.lock, flags);
460 457
461 tty_ldisc_deref(disc); 458 tty_ldisc_deref(disc);
@@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
471 */ 468 */
472void tty_flush_to_ldisc(struct tty_struct *tty) 469void tty_flush_to_ldisc(struct tty_struct *tty)
473{ 470{
474 flush_to_ldisc(&tty->buf.work.work); 471 flush_delayed_work(&tty->buf.work);
475} 472}
476 473
477/** 474/**
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index ea18a129b0b5..59499ee0fe6a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1389,7 +1389,7 @@ EXPORT_SYMBOL(tty_shutdown);
1389 * of ttys that the driver keeps. 1389 * of ttys that the driver keeps.
1390 * 1390 *
1391 * This method gets called from a work queue so that the driver private 1391 * This method gets called from a work queue so that the driver private
1392 * shutdown ops can sleep (needed for USB at least) 1392 * cleanup ops can sleep (needed for USB at least)
1393 */ 1393 */
1394static void release_one_tty(struct work_struct *work) 1394static void release_one_tty(struct work_struct *work)
1395{ 1395{
@@ -1397,10 +1397,9 @@ static void release_one_tty(struct work_struct *work)
1397 container_of(work, struct tty_struct, hangup_work); 1397 container_of(work, struct tty_struct, hangup_work);
1398 struct tty_driver *driver = tty->driver; 1398 struct tty_driver *driver = tty->driver;
1399 1399
1400 if (tty->ops->shutdown) 1400 if (tty->ops->cleanup)
1401 tty->ops->shutdown(tty); 1401 tty->ops->cleanup(tty);
1402 else 1402
1403 tty_shutdown(tty);
1404 tty->magic = 0; 1403 tty->magic = 0;
1405 tty_driver_kref_put(driver); 1404 tty_driver_kref_put(driver);
1406 module_put(driver->owner); 1405 module_put(driver->owner);
@@ -1415,6 +1414,12 @@ static void release_one_tty(struct work_struct *work)
1415static void queue_release_one_tty(struct kref *kref) 1414static void queue_release_one_tty(struct kref *kref)
1416{ 1415{
1417 struct tty_struct *tty = container_of(kref, struct tty_struct, kref); 1416 struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
1417
1418 if (tty->ops->shutdown)
1419 tty->ops->shutdown(tty);
1420 else
1421 tty_shutdown(tty);
1422
1418 /* The hangup queue is now free so we can reuse it rather than 1423 /* The hangup queue is now free so we can reuse it rather than
1419 waste a chunk of memory for each port */ 1424 waste a chunk of memory for each port */
1420 INIT_WORK(&tty->hangup_work, release_one_tty); 1425 INIT_WORK(&tty->hangup_work, release_one_tty);
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index aafdbaebc16a..feb55075819b 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -518,7 +518,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
518static int tty_ldisc_halt(struct tty_struct *tty) 518static int tty_ldisc_halt(struct tty_struct *tty)
519{ 519{
520 clear_bit(TTY_LDISC, &tty->flags); 520 clear_bit(TTY_LDISC, &tty->flags);
521 return cancel_delayed_work(&tty->buf.work); 521 return cancel_delayed_work_sync(&tty->buf.work);
522} 522}
523 523
524/** 524/**
@@ -756,12 +756,9 @@ void tty_ldisc_hangup(struct tty_struct *tty)
756 * N_TTY. 756 * N_TTY.
757 */ 757 */
758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
759 /* Make sure the old ldisc is quiescent */
760 tty_ldisc_halt(tty);
761 flush_scheduled_work();
762
763 /* Avoid racing set_ldisc or tty_ldisc_release */ 759 /* Avoid racing set_ldisc or tty_ldisc_release */
764 mutex_lock(&tty->ldisc_mutex); 760 mutex_lock(&tty->ldisc_mutex);
761 tty_ldisc_halt(tty);
765 if (tty->ldisc) { /* Not yet closed */ 762 if (tty->ldisc) { /* Not yet closed */
766 /* Switch back to N_TTY */ 763 /* Switch back to N_TTY */
767 tty_ldisc_reinit(tty); 764 tty_ldisc_reinit(tty);
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index a4bbb28f10be..2e8552dc5eda 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -221,6 +221,9 @@ int tty_port_block_til_ready(struct tty_port *port,
221 the port has just hung up or is in another error state */ 221 the port has just hung up or is in another error state */
222 if ((filp->f_flags & O_NONBLOCK) || 222 if ((filp->f_flags & O_NONBLOCK) ||
223 (tty->flags & (1 << TTY_IO_ERROR))) { 223 (tty->flags & (1 << TTY_IO_ERROR))) {
224 /* Indicate we are open */
225 if (tty->termios->c_cflag & CBAUD)
226 tty_port_raise_dtr_rts(port);
224 port->flags |= ASYNC_NORMAL_ACTIVE; 227 port->flags |= ASYNC_NORMAL_ACTIVE;
225 return 0; 228 return 0;
226 } 229 }
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 0d328b59568d..a035ae39a359 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -31,7 +31,6 @@
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/virtio.h> 33#include <linux/virtio.h>
34#include <linux/virtio_ids.h>
35#include <linux/virtio_console.h> 34#include <linux/virtio_console.h>
36#include "hvc_console.h" 35#include "hvc_console.h"
37 36
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 29c651ab0d78..ed86d3bf249a 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
981 goto eperm; 981 goto eperm;
982 982
983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, 983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
984 sizeof(struct vt_setactivate))) 984 sizeof(struct vt_setactivate))) {
985 return -EFAULT; 985 ret = -EFAULT;
986 goto out;
987 }
986 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 988 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
987 ret = -ENXIO; 989 ret = -ENXIO;
988 else { 990 else {
@@ -1530,7 +1532,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
1530 1532
1531 case PIO_UNIMAP: 1533 case PIO_UNIMAP:
1532 case GIO_UNIMAP: 1534 case GIO_UNIMAP:
1533 ret = do_unimap_ioctl(cmd, up, perm, vc); 1535 ret = compat_unimap_ioctl(cmd, up, perm, vc);
1534 break; 1536 break;
1535 1537
1536 /* 1538 /*
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f40ab699860f..4846d50199f3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
559 return status; 559 return status;
560} 560}
561 561
562static struct file_operations hwicap_fops = { 562static const struct file_operations hwicap_fops = {
563 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
564 .write = hwicap_write, 564 .write = hwicap_write,
565 .read = hwicap_read, 565 .read = hwicap_read,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3938c7817095..ff57c40e9b8b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver;
41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */ 43/* This one keeps track of the previously set governor of a removed CPU */
44static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor); 44static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
45#endif 45#endif
46static DEFINE_SPINLOCK(cpufreq_driver_lock); 46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 47
@@ -774,10 +774,12 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
774#ifdef CONFIG_SMP 774#ifdef CONFIG_SMP
775 unsigned long flags; 775 unsigned long flags;
776 unsigned int j; 776 unsigned int j;
777
778#ifdef CONFIG_HOTPLUG_CPU 777#ifdef CONFIG_HOTPLUG_CPU
779 if (per_cpu(cpufreq_cpu_governor, cpu)) { 778 struct cpufreq_governor *gov;
780 policy->governor = per_cpu(cpufreq_cpu_governor, cpu); 779
780 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
781 if (gov) {
782 policy->governor = gov;
781 dprintk("Restoring governor %s for cpu %d\n", 783 dprintk("Restoring governor %s for cpu %d\n",
782 policy->governor->name, cpu); 784 policy->governor->name, cpu);
783 } 785 }
@@ -949,10 +951,13 @@ err_out_kobj_put:
949static int cpufreq_add_dev(struct sys_device *sys_dev) 951static int cpufreq_add_dev(struct sys_device *sys_dev)
950{ 952{
951 unsigned int cpu = sys_dev->id; 953 unsigned int cpu = sys_dev->id;
952 int ret = 0; 954 int ret = 0, found = 0;
953 struct cpufreq_policy *policy; 955 struct cpufreq_policy *policy;
954 unsigned long flags; 956 unsigned long flags;
955 unsigned int j; 957 unsigned int j;
958#ifdef CONFIG_HOTPLUG_CPU
959 int sibling;
960#endif
956 961
957 if (cpu_is_offline(cpu)) 962 if (cpu_is_offline(cpu))
958 return 0; 963 return 0;
@@ -999,7 +1004,19 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
999 INIT_WORK(&policy->update, handle_update); 1004 INIT_WORK(&policy->update, handle_update);
1000 1005
1001 /* Set governor before ->init, so that driver could check it */ 1006 /* Set governor before ->init, so that driver could check it */
1002 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1007#ifdef CONFIG_HOTPLUG_CPU
1008 for_each_online_cpu(sibling) {
1009 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1010 if (cp && cp->governor &&
1011 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1012 policy->governor = cp->governor;
1013 found = 1;
1014 break;
1015 }
1016 }
1017#endif
1018 if (!found)
1019 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1003 /* call driver. From then on the cpufreq must be able 1020 /* call driver. From then on the cpufreq must be able
1004 * to accept all calls to ->verify and ->setpolicy for this CPU 1021 * to accept all calls to ->verify and ->setpolicy for this CPU
1005 */ 1022 */
@@ -1111,7 +1128,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1111#ifdef CONFIG_SMP 1128#ifdef CONFIG_SMP
1112 1129
1113#ifdef CONFIG_HOTPLUG_CPU 1130#ifdef CONFIG_HOTPLUG_CPU
1114 per_cpu(cpufreq_cpu_governor, cpu) = data->governor; 1131 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1132 CPUFREQ_NAME_LEN);
1115#endif 1133#endif
1116 1134
1117 /* if we have other CPUs still registered, we need to unlink them, 1135 /* if we have other CPUs still registered, we need to unlink them,
@@ -1135,7 +1153,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1135 continue; 1153 continue;
1136 dprintk("removing link for cpu %u\n", j); 1154 dprintk("removing link for cpu %u\n", j);
1137#ifdef CONFIG_HOTPLUG_CPU 1155#ifdef CONFIG_HOTPLUG_CPU
1138 per_cpu(cpufreq_cpu_governor, j) = data->governor; 1156 strncpy(per_cpu(cpufreq_cpu_governor, j),
1157 data->governor->name, CPUFREQ_NAME_LEN);
1139#endif 1158#endif
1140 cpu_sys_dev = get_cpu_sysdev(j); 1159 cpu_sys_dev = get_cpu_sysdev(j);
1141 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1160 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
@@ -1606,9 +1625,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1606 1625
1607void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1626void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1608{ 1627{
1628#ifdef CONFIG_HOTPLUG_CPU
1629 int cpu;
1630#endif
1631
1609 if (!governor) 1632 if (!governor)
1610 return; 1633 return;
1611 1634
1635#ifdef CONFIG_HOTPLUG_CPU
1636 for_each_present_cpu(cpu) {
1637 if (cpu_online(cpu))
1638 continue;
1639 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1640 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1641 }
1642#endif
1643
1612 mutex_lock(&cpufreq_governor_mutex); 1644 mutex_lock(&cpufreq_governor_mutex);
1613 list_del(&governor->governor_list); 1645 list_del(&governor->governor_list);
1614 mutex_unlock(&cpufreq_governor_mutex); 1646 mutex_unlock(&cpufreq_governor_mutex);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bc33ddc9c97c..c7b081b839ff 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
116 116
117 idle_time = cputime64_sub(cur_wall_time, busy_time); 117 idle_time = cputime64_sub(cur_wall_time, busy_time);
118 if (wall) 118 if (wall)
119 *wall = cur_wall_time; 119 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
120 120
121 return idle_time; 121 return (cputime64_t)jiffies_to_usecs(idle_time);;
122} 122}
123 123
124static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 124static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 071699de50ee..4b34ade2332b 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
133 133
134 idle_time = cputime64_sub(cur_wall_time, busy_time); 134 idle_time = cputime64_sub(cur_wall_time, busy_time);
135 if (wall) 135 if (wall)
136 *wall = cur_wall_time; 136 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
137 137
138 return idle_time; 138 return (cputime64_t)jiffies_to_usecs(idle_time);
139} 139}
140 140
141static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 141static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index ad41f19b8e3f..12fdd3987a36 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -76,8 +76,11 @@ static void cpuidle_idle_call(void)
76#endif 76#endif
77 /* ask the governor for the next state */ 77 /* ask the governor for the next state */
78 next_state = cpuidle_curr_governor->select(dev); 78 next_state = cpuidle_curr_governor->select(dev);
79 if (need_resched()) 79 if (need_resched()) {
80 local_irq_enable();
80 return; 81 return;
82 }
83
81 target_state = &dev->states[next_state]; 84 target_state = &dev->states[next_state];
82 85
83 /* enter the state and update stats */ 86 /* enter the state and update stats */
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 76cb6b345e7b..0af80577dc7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -24,6 +24,12 @@
24#include <asm/i387.h> 24#include <asm/i387.h>
25#include "padlock.h" 25#include "padlock.h"
26 26
27#ifdef CONFIG_64BIT
28#define STACK_ALIGN 16
29#else
30#define STACK_ALIGN 4
31#endif
32
27struct padlock_sha_desc { 33struct padlock_sha_desc {
28 struct shash_desc fallback; 34 struct shash_desc fallback;
29}; 35};
@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
64 /* We can't store directly to *out as it may be unaligned. */ 70 /* We can't store directly to *out as it may be unaligned. */
65 /* BTW Don't reduce the buffer size below 128 Bytes! 71 /* BTW Don't reduce the buffer size below 128 Bytes!
66 * PadLock microcode needs it that big. */ 72 * PadLock microcode needs it that big. */
67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 73 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
74 ((aligned(STACK_ALIGN)));
75 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 76 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state; 77 struct sha1_state state;
70 unsigned int space; 78 unsigned int space;
@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
128 /* We can't store directly to *out as it may be unaligned. */ 136 /* We can't store directly to *out as it may be unaligned. */
129 /* BTW Don't reduce the buffer size below 128 Bytes! 137 /* BTW Don't reduce the buffer size below 128 Bytes!
130 * PadLock microcode needs it that big. */ 138 * PadLock microcode needs it that big. */
131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 139 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
140 ((aligned(STACK_ALIGN)));
141 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 142 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state; 143 struct sha256_state state;
134 unsigned int space; 144 unsigned int space;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 02127e59fe8e..55c9c59b3f71 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -47,6 +47,18 @@ config EDAC_DEBUG_VERBOSE
47 Source file name and line number where debugging message 47 Source file name and line number where debugging message
48 printed will be added to debugging message. 48 printed will be added to debugging message.
49 49
50 config EDAC_DECODE_MCE
51 tristate "Decode MCEs in human-readable form (only on AMD for now)"
52 depends on CPU_SUP_AMD && X86_MCE
53 default y
54 ---help---
55 Enable this option if you want to decode Machine Check Exceptions
56 occuring on your machine in human-readable form.
57
58 You should definitely say Y here in case you want to decode MCEs
59 which occur really early upon boot, before the module infrastructure
60 has been initialized.
61
50config EDAC_MM_EDAC 62config EDAC_MM_EDAC
51 tristate "Main Memory EDAC (Error Detection And Correction) reporting" 63 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
52 help 64 help
@@ -59,7 +71,7 @@ config EDAC_MM_EDAC
59 71
60config EDAC_AMD64 72config EDAC_AMD64
61 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" 73 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
62 depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && CPU_SUP_AMD 74 depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
63 help 75 help
64 Support for error detection and correction on the AMD 64 76 Support for error detection and correction on the AMD 64
65 Families of Memory Controllers (K8, F10h and F11h) 77 Families of Memory Controllers (K8, F10h and F11h)
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 7a473bbe8abd..bc5dc232a0fb 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -6,7 +6,6 @@
6# GNU General Public License. 6# GNU General Public License.
7# 7#
8 8
9
10obj-$(CONFIG_EDAC) := edac_stub.o 9obj-$(CONFIG_EDAC) := edac_stub.o
11obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o 10obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
12 11
@@ -17,9 +16,7 @@ ifdef CONFIG_PCI
17edac_core-objs += edac_pci.o edac_pci_sysfs.o 16edac_core-objs += edac_pci.o edac_pci_sysfs.o
18endif 17endif
19 18
20ifdef CONFIG_CPU_SUP_AMD 19obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
21edac_core-objs += edac_mce_amd.o
22endif
23 20
24obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 21obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
25obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o 22obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e551e63b6dc..a38831c82649 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
15 15
16/* Lookup table for all possible MC control instances */ 16/* Lookup table for all possible MC control instances */
17struct amd64_pvt; 17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; 18static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189/* Map from a CSROW entry to the mask entry that operates on it */ 189/* Map from a CSROW entry to the mask entry that operates on it */
190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191{ 191{
192 return csrow >> (pvt->num_dcsm >> 3); 192 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
193 return csrow;
194 else
195 return csrow >> 1;
193} 196}
194 197
195/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 198/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
279 intlv_en = pvt->dram_IntlvEn[0]; 282 intlv_en = pvt->dram_IntlvEn[0];
280 283
281 if (intlv_en == 0) { 284 if (intlv_en == 0) {
282 for (node_id = 0; ; ) { 285 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
283 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
284 break; 287 goto found;
285
286 if (++node_id >= DRAM_REG_COUNT)
287 goto err_no_match;
288 } 288 }
289 goto found; 289 goto err_no_match;
290 } 290 }
291 291
292 if (unlikely((intlv_en != (0x01 << 8)) && 292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != (0x03 << 8)) && 293 (intlv_en != 0x03) &&
294 (intlv_en != (0x07 << 8)))) { 294 (intlv_en != 0x07))) {
295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " 295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
296 "IntlvEn field of DRAM Base Register for node 0: " 296 "IntlvEn field of DRAM Base Register for node 0: "
297 "This probably indicates a BIOS bug.\n", intlv_en); 297 "this probably indicates a BIOS bug.\n", intlv_en);
298 return NULL; 298 return NULL;
299 } 299 }
300 300
301 bits = (((u32) sys_addr) >> 12) & intlv_en; 301 bits = (((u32) sys_addr) >> 12) & intlv_en;
302 302
303 for (node_id = 0; ; ) { 303 for (node_id = 0; ; ) {
304 if ((pvt->dram_limit[node_id] & intlv_en) == bits) 304 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
305 break; /* intlv_sel field matches */ 305 break; /* intlv_sel field matches */
306 306
307 if (++node_id >= DRAM_REG_COUNT) 307 if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
311 /* sanity test for sys_addr */ 311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_printk(KERN_WARNING, 313 amd64_printk(KERN_WARNING,
314 "%s(): sys_addr 0x%lx falls outside base/limit " 314 "%s(): sys_addr 0x%llx falls outside base/limit "
315 "address range for node %d with node interleaving " 315 "address range for node %d with node interleaving "
316 "enabled.\n", __func__, (unsigned long)sys_addr, 316 "enabled.\n",
317 node_id); 317 __func__, sys_addr, node_id);
318 return NULL; 318 return NULL;
319 } 319 }
320 320
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
377 * base/mask register pair, test the condition shown near the start of 377 * base/mask register pair, test the condition shown near the start of
378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
379 */ 379 */
380 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 380 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
381 381
382 /* This DRAM chip select is disabled on this node */ 382 /* This DRAM chip select is disabled on this node */
383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
734 u64 base, mask; 734 u64 base, mask;
735 735
736 pvt = mci->pvt_info; 736 pvt = mci->pvt_info;
737 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); 737 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
738 738
739 base = base_from_dct_base(pvt, csrow); 739 base = base_from_dct_base(pvt, csrow);
740 mask = mask_from_dct_mask(pvt, csrow); 740 mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
962 */ 962 */
963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964{ 964{
965 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 965
966 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
967 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
968 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
969 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
970 pvt->dcs_shift = REV_E_DCS_SHIFT;
971 pvt->cs_count = 8;
972 pvt->num_dcsm = 8;
973 } else {
966 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 974 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
967 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 975 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
968 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 976 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
969 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 977 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
970 978
971 switch (boot_cpu_data.x86) { 979 if (boot_cpu_data.x86 == 0x11) {
972 case 0xf: 980 pvt->cs_count = 4;
973 pvt->num_dcsm = REV_F_DCSM_COUNT; 981 pvt->num_dcsm = 2;
974 break; 982 } else {
975 983 pvt->cs_count = 8;
976 case 0x10: 984 pvt->num_dcsm = 4;
977 pvt->num_dcsm = F10_DCSM_COUNT;
978 break;
979
980 case 0x11:
981 pvt->num_dcsm = F11_DCSM_COUNT;
982 break;
983
984 default:
985 amd64_printk(KERN_ERR, "Unsupported family!\n");
986 break;
987 } 985 }
988 } else {
989 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
990 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
991 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
992 pvt->dcs_shift = REV_E_DCS_SHIFT;
993 pvt->num_dcsm = REV_E_DCSM_COUNT;
994 } 986 }
995} 987}
996 988
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1003 995
1004 amd64_set_dct_base_and_mask(pvt); 996 amd64_set_dct_base_and_mask(pvt);
1005 997
1006 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { 998 for (cs = 0; cs < pvt->cs_count; cs++) {
1007 reg = K8_DCSB0 + (cs * 4); 999 reg = K8_DCSB0 + (cs * 4);
1008 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1009 &pvt->dcsb0[cs]); 1001 &pvt->dcsb0[cs]);
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1193 * different from the node that detected the error. 1185 * different from the node that detected the error.
1194 */ 1186 */
1195 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 1187 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1196 if (src_mci) { 1188 if (!src_mci) {
1197 amd64_mc_printk(mci, KERN_ERR, 1189 amd64_mc_printk(mci, KERN_ERR,
1198 "failed to map error address 0x%lx to a node\n", 1190 "failed to map error address 0x%lx to a node\n",
1199 (unsigned long)SystemAddress); 1191 (unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1376 1368
1377 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 1369 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1378 1370
1379 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | 1371 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1380 ((u64) low_base & 0xFFFF0000))) << 8; 1372 (((u64)low_base & 0xFFFF0000) << 8);
1381 1373
1382 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 1374 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1383 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1398 * Extract address values and form a LIMIT address. Limit is the HIGHEST 1390 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1399 * memory location of the region, so low 24 bits need to be all ones. 1391 * memory location of the region, so low 24 bits need to be all ones.
1400 */ 1392 */
1401 low_limit |= 0x0000FFFF; 1393 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1402 pvt->dram_limit[dram] = 1394 (((u64) low_limit & 0xFFFF0000) << 8) |
1403 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); 1395 0x00FFFFFF;
1404} 1396}
1405 1397
1406static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1566 1558
1567 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1559 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1568 1560
1569 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 1561 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1570 1562
1571 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1563 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1572 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1564 if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2262,7 +2254,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2262{ 2254{
2263 u32 ec = ERROR_CODE(info->nbsl); 2255 u32 ec = ERROR_CODE(info->nbsl);
2264 u32 xec = EXT_ERROR_CODE(info->nbsl); 2256 u32 xec = EXT_ERROR_CODE(info->nbsl);
2265 int ecc_type = info->nbsh & (0x3 << 13); 2257 int ecc_type = (info->nbsh >> 13) & 0x3;
2266 2258
2267 /* Bail early out if this was an 'observed' error */ 2259 /* Bail early out if this was an 'observed' error */
2268 if (PP(ec) == K8_NBSL_PP_OBS) 2260 if (PP(ec) == K8_NBSL_PP_OBS)
@@ -2497,7 +2489,7 @@ err_reg:
2497 * NOTE: CPU Revision Dependent code 2489 * NOTE: CPU Revision Dependent code
2498 * 2490 *
2499 * Input: 2491 * Input:
2500 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) 2492 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2501 * k8 private pointer to --> 2493 * k8 private pointer to -->
2502 * DRAM Bank Address mapping register 2494 * DRAM Bank Address mapping register
2503 * node_id 2495 * node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2577 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" 2569 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2578 ); 2570 );
2579 2571
2580 for (i = 0; i < CHIPSELECT_COUNT; i++) { 2572 for (i = 0; i < pvt->cs_count; i++) {
2581 csrow = &mci->csrows[i]; 2573 csrow = &mci->csrows[i];
2582 2574
2583 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2575 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2988 goto err_exit; 2980 goto err_exit;
2989 2981
2990 ret = -ENOMEM; 2982 ret = -ENOMEM;
2991 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); 2983 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2992 if (!mci) 2984 if (!mci)
2993 goto err_exit; 2985 goto err_exit;
2994 2986
@@ -3171,7 +3163,7 @@ static int __init amd64_edac_init(void)
3171 opstate_init(); 3163 opstate_init();
3172 3164
3173 if (cache_k8_northbridges() < 0) 3165 if (cache_k8_northbridges() < 0)
3174 goto err_exit; 3166 return err;
3175 3167
3176 err = pci_register_driver(&amd64_pci_driver); 3168 err = pci_register_driver(&amd64_pci_driver);
3177 if (err) 3169 if (err)
@@ -3197,8 +3189,6 @@ static int __init amd64_edac_init(void)
3197 3189
3198err_2nd_stage: 3190err_2nd_stage:
3199 debugf0("2nd stage failed\n"); 3191 debugf0("2nd stage failed\n");
3200
3201err_exit:
3202 pci_unregister_driver(&amd64_pci_driver); 3192 pci_unregister_driver(&amd64_pci_driver);
3203 3193
3204 return err; 3194 return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8ea07e2715dc..c6f359a85207 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -132,6 +132,8 @@
132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ 132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
133#define EDAC_MOD_STR "amd64_edac" 133#define EDAC_MOD_STR "amd64_edac"
134 134
135#define EDAC_MAX_NUMNODES 8
136
135/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
136#define OPTERON_CPU_LE_REV_C 0 138#define OPTERON_CPU_LE_REV_C 0
137#define OPTERON_CPU_REV_D 1 139#define OPTERON_CPU_REV_D 1
@@ -142,7 +144,7 @@
142#define OPTERON_CPU_REV_FA 5 144#define OPTERON_CPU_REV_FA 5
143 145
144/* Hardware limit on ChipSelect rows per MC and processors per system */ 146/* Hardware limit on ChipSelect rows per MC and processors per system */
145#define CHIPSELECT_COUNT 8 147#define MAX_CS_COUNT 8
146#define DRAM_REG_COUNT 8 148#define DRAM_REG_COUNT 8
147 149
148 150
@@ -193,7 +195,6 @@
193 */ 195 */
194#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) 196#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
195#define REV_E_DCS_SHIFT 4 197#define REV_E_DCS_SHIFT 4
196#define REV_E_DCSM_COUNT 8
197 198
198#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) 199#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
199#define REV_F_F1Xh_DCS_SHIFT 8 200#define REV_F_F1Xh_DCS_SHIFT 8
@@ -204,9 +205,6 @@
204 */ 205 */
205#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) 206#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
206#define REV_F_DCS_SHIFT 8 207#define REV_F_DCS_SHIFT 8
207#define REV_F_DCSM_COUNT 4
208#define F10_DCSM_COUNT 4
209#define F11_DCSM_COUNT 2
210 208
211/* DRAM CS Mask Registers */ 209/* DRAM CS Mask Registers */
212#define K8_DCSM0 0x60 210#define K8_DCSM0 0x60
@@ -374,13 +372,11 @@ enum {
374 372
375#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 373#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
376 (BIT(((word) & 0xF) + 20) | \ 374 (BIT(((word) & 0xF) + 20) | \
377 BIT(17) | \ 375 BIT(17) | bits)
378 ((bits) & 0xF))
379 376
380#define SET_NB_DRAM_INJECTION_READ(word, bits) \ 377#define SET_NB_DRAM_INJECTION_READ(word, bits) \
381 (BIT(((word) & 0xF) + 20) | \ 378 (BIT(((word) & 0xF) + 20) | \
382 BIT(16) | \ 379 BIT(16) | bits)
383 ((bits) & 0xF))
384 380
385#define K8_NBCAP 0xE8 381#define K8_NBCAP 0xE8
386#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 382#define K8_NBCAP_CORES (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
445 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 441 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
446 442
447 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 443 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
448 u32 dcsb0[CHIPSELECT_COUNT]; 444 u32 dcsb0[MAX_CS_COUNT];
449 u32 dcsb1[CHIPSELECT_COUNT]; 445 u32 dcsb1[MAX_CS_COUNT];
450 446
451 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 447 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
452 u32 dcsm0[CHIPSELECT_COUNT]; 448 u32 dcsm0[MAX_CS_COUNT];
453 u32 dcsm1[CHIPSELECT_COUNT]; 449 u32 dcsm1[MAX_CS_COUNT];
454 450
455 /* 451 /*
456 * Decoded parts of DRAM BASE and LIMIT Registers 452 * Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
470 */ 466 */
471 u32 dcsb_base; /* DCSB base bits */ 467 u32 dcsb_base; /* DCSB base bits */
472 u32 dcsm_mask; /* DCSM mask bits */ 468 u32 dcsm_mask; /* DCSM mask bits */
469 u32 cs_count; /* num chip selects (== num DCSB registers) */
473 u32 num_dcsm; /* Number of DCSM registers */ 470 u32 num_dcsm; /* Number of DCSM registers */
474 u32 dcs_mask_notused; /* DCSM notused mask bits */ 471 u32 dcs_mask_notused; /* DCSM notused mask bits */
475 u32 dcs_shift; /* DCSB and DCSM shift value */ 472 u32 dcs_shift; /* DCSB and DCSM shift value */
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index d3675b76b3a7..29f1f7a612d9 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,5 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
4{
5 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section);
7}
8
3/* 9/*
4 * store error injection section value which refers to one of 4 16-byte sections 10 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline 11 * within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
15 21
16 ret = strict_strtoul(data, 10, &value); 22 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) { 23 if (ret != -EINVAL) {
24
25 if (value > 3) {
26 amd64_printk(KERN_WARNING,
27 "%s: invalid section 0x%lx\n",
28 __func__, value);
29 return -EINVAL;
30 }
31
18 pvt->injection.section = (u32) value; 32 pvt->injection.section = (u32) value;
19 return count; 33 return count;
20 } 34 }
21 return ret; 35 return ret;
22} 36}
23 37
38static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
39{
40 struct amd64_pvt *pvt = mci->pvt_info;
41 return sprintf(buf, "0x%x\n", pvt->injection.word);
42}
43
24/* 44/*
25 * store error injection word value which refers to one of 9 16-bit word of the 45 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section 46 * 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
37 ret = strict_strtoul(data, 10, &value); 57 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) { 58 if (ret != -EINVAL) {
39 59
40 value = (value <= 8) ? value : 0; 60 if (value > 8) {
41 pvt->injection.word = (u32) value; 61 amd64_printk(KERN_WARNING,
62 "%s: invalid word 0x%lx\n",
63 __func__, value);
64 return -EINVAL;
65 }
42 66
67 pvt->injection.word = (u32) value;
43 return count; 68 return count;
44 } 69 }
45 return ret; 70 return ret;
46} 71}
47 72
73static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
74{
75 struct amd64_pvt *pvt = mci->pvt_info;
76 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
77}
78
48/* 79/*
49 * store 16 bit error injection vector which enables injecting errors to the 80 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a 81 * corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
60 ret = strict_strtoul(data, 16, &value); 91 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) { 92 if (ret != -EINVAL) {
62 93
63 pvt->injection.bit_map = (u32) value & 0xFFFF; 94 if (value & 0xFFFF0000) {
95 amd64_printk(KERN_WARNING,
96 "%s: invalid EccVector: 0x%lx\n",
97 __func__, value);
98 return -EINVAL;
99 }
64 100
101 pvt->injection.bit_map = (u32) value;
65 return count; 102 return count;
66 } 103 }
67 return ret; 104 return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
147 .name = "inject_section", 184 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR) 185 .mode = (S_IRUGO | S_IWUSR)
149 }, 186 },
150 .show = NULL, 187 .show = amd64_inject_section_show,
151 .store = amd64_inject_section_store, 188 .store = amd64_inject_section_store,
152 }, 189 },
153 { 190 {
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
155 .name = "inject_word", 192 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR) 193 .mode = (S_IRUGO | S_IWUSR)
157 }, 194 },
158 .show = NULL, 195 .show = amd64_inject_word_show,
159 .store = amd64_inject_word_store, 196 .store = amd64_inject_word_store,
160 }, 197 },
161 { 198 {
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
163 .name = "inject_ecc_vector", 200 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR) 201 .mode = (S_IRUGO | S_IWUSR)
165 }, 202 },
166 .show = NULL, 203 .show = amd64_inject_ecc_vector_show,
167 .store = amd64_inject_ecc_vector_store, 204 .store = amd64_inject_ecc_vector_store,
168 }, 205 },
169 { 206 {
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 0c21c370c9dd..713ed7d37247 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -3,6 +3,7 @@
3 3
4static bool report_gart_errors; 4static bool report_gart_errors;
5static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); 5static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
6static void (*orig_mce_callback)(struct mce *m);
6 7
7void amd_report_gart_errors(bool v) 8void amd_report_gart_errors(bool v)
8{ 9{
@@ -362,7 +363,7 @@ static inline void amd_decode_err_code(unsigned int ec)
362 pr_warning("Huh? Unknown MCE error 0x%x\n", ec); 363 pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
363} 364}
364 365
365void decode_mce(struct mce *m) 366static void amd_decode_mce(struct mce *m)
366{ 367{
367 struct err_regs regs; 368 struct err_regs regs;
368 int node, ecc; 369 int node, ecc;
@@ -420,3 +421,32 @@ void decode_mce(struct mce *m)
420 421
421 amd_decode_err_code(m->status & 0xffff); 422 amd_decode_err_code(m->status & 0xffff);
422} 423}
424
425static int __init mce_amd_init(void)
426{
427 /*
428 * We can decode MCEs for Opteron and later CPUs:
429 */
430 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
431 (boot_cpu_data.x86 >= 0xf)) {
432 /* safe the default decode mce callback */
433 orig_mce_callback = x86_mce_decode_callback;
434
435 x86_mce_decode_callback = amd_decode_mce;
436 }
437
438 return 0;
439}
440early_initcall(mce_amd_init);
441
442#ifdef MODULE
443static void __exit mce_amd_exit(void)
444{
445 x86_mce_decode_callback = orig_mce_callback;
446}
447
448MODULE_DESCRIPTION("AMD MCE decoder");
449MODULE_ALIAS("edac-mce-amd");
450MODULE_LICENSE("GPL");
451module_exit(mce_amd_exit);
452#endif
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index d335086f4a26..77a9579d7167 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1173,7 +1173,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1173 pci_read_config_word(pvt->branch_1, where, 1173 pci_read_config_word(pvt->branch_1, where,
1174 &pvt->b1_mtr[slot_row]); 1174 &pvt->b1_mtr[slot_row]);
1175 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, 1175 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
1176 where, pvt->b0_mtr[slot_row]); 1176 where, pvt->b1_mtr[slot_row]);
1177 } else { 1177 } else {
1178 pvt->b1_mtr[slot_row] = 0; 1178 pvt->b1_mtr[slot_row] = 0;
1179 } 1179 }
@@ -1232,7 +1232,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1232 struct csrow_info *p_csrow; 1232 struct csrow_info *p_csrow;
1233 int empty, channel_count; 1233 int empty, channel_count;
1234 int max_csrows; 1234 int max_csrows;
1235 int mtr; 1235 int mtr, mtr1;
1236 int csrow_megs; 1236 int csrow_megs;
1237 int channel; 1237 int channel;
1238 int csrow; 1238 int csrow;
@@ -1251,9 +1251,10 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1251 1251
1252 /* use branch 0 for the basis */ 1252 /* use branch 0 for the basis */
1253 mtr = pvt->b0_mtr[csrow >> 1]; 1253 mtr = pvt->b0_mtr[csrow >> 1];
1254 mtr1 = pvt->b1_mtr[csrow >> 1];
1254 1255
1255 /* if no DIMMS on this row, continue */ 1256 /* if no DIMMS on this row, continue */
1256 if (!MTR_DIMMS_PRESENT(mtr)) 1257 if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
1257 continue; 1258 continue;
1258 1259
1259 /* FAKE OUT VALUES, FIXME */ 1260 /* FAKE OUT VALUES, FIXME */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index b08b6d8e2dc7..f99d10655ed4 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -46,9 +46,10 @@
46/* Limits for i5400 */ 46/* Limits for i5400 */
47#define NUM_MTRS_PER_BRANCH 4 47#define NUM_MTRS_PER_BRANCH 4
48#define CHANNELS_PER_BRANCH 2 48#define CHANNELS_PER_BRANCH 2
49#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
49#define MAX_CHANNELS 4 50#define MAX_CHANNELS 4
50#define MAX_DIMMS (MAX_CHANNELS * 4) /* Up to 4 DIMM's per channel */ 51/* max possible csrows per channel */
51#define MAX_CSROWS (MAX_DIMMS * 2) /* max possible csrows per channel */ 52#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
52 53
53/* Device 16, 54/* Device 16,
54 * Function 0: System Address 55 * Function 0: System Address
@@ -331,7 +332,6 @@ static const struct i5400_dev_info i5400_devs[] = {
331 332
332struct i5400_dimm_info { 333struct i5400_dimm_info {
333 int megabytes; /* size, 0 means not present */ 334 int megabytes; /* size, 0 means not present */
334 int dual_rank;
335}; 335};
336 336
337/* driver private data structure */ 337/* driver private data structure */
@@ -849,11 +849,9 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
849 int n; 849 int n;
850 850
851 /* There is one MTR for each slot pair of FB-DIMMs, 851 /* There is one MTR for each slot pair of FB-DIMMs,
852 Each slot may have one or two ranks (2 csrows),
853 Each slot pair may be at branch 0 or branch 1. 852 Each slot pair may be at branch 0 or branch 1.
854 So, csrow should be divided by eight
855 */ 853 */
856 n = csrow >> 3; 854 n = csrow;
857 855
858 if (n >= NUM_MTRS_PER_BRANCH) { 856 if (n >= NUM_MTRS_PER_BRANCH) {
859 debugf0("ERROR: trying to access an invalid csrow: %d\n", 857 debugf0("ERROR: trying to access an invalid csrow: %d\n",
@@ -905,25 +903,22 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
905 amb_present_reg = determine_amb_present_reg(pvt, channel); 903 amb_present_reg = determine_amb_present_reg(pvt, channel);
906 904
907 /* Determine if there is a DIMM present in this DIMM slot */ 905 /* Determine if there is a DIMM present in this DIMM slot */
908 if (amb_present_reg & (1 << (csrow >> 1))) { 906 if (amb_present_reg & (1 << csrow)) {
909 dinfo->dual_rank = MTR_DIMM_RANK(mtr); 907 /* Start with the number of bits for a Bank
910 908 * on the DRAM */
911 if (!((dinfo->dual_rank == 0) && 909 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
912 ((csrow & 0x1) == 0x1))) { 910 /* Add thenumber of ROW bits */
913 /* Start with the number of bits for a Bank 911 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
914 * on the DRAM */ 912 /* add the number of COLUMN bits */
915 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 913 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
916 /* Add thenumber of ROW bits */ 914 /* add the number of RANK bits */
917 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 915 addrBits += MTR_DIMM_RANK(mtr);
918 /* add the number of COLUMN bits */ 916
919 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 917 addrBits += 6; /* add 64 bits per DIMM */
920 918 addrBits -= 20; /* divide by 2^^20 */
921 addrBits += 6; /* add 64 bits per DIMM */ 919 addrBits -= 3; /* 8 bits per bytes */
922 addrBits -= 20; /* divide by 2^^20 */ 920
923 addrBits -= 3; /* 8 bits per bytes */ 921 dinfo->megabytes = 1 << addrBits;
924
925 dinfo->megabytes = 1 << addrBits;
926 }
927 } 922 }
928 } 923 }
929} 924}
@@ -951,12 +946,12 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
951 return; 946 return;
952 } 947 }
953 948
954 /* Scan all the actual CSROWS (which is # of DIMMS * 2) 949 /* Scan all the actual CSROWS
955 * and calculate the information for each DIMM 950 * and calculate the information for each DIMM
956 * Start with the highest csrow first, to display it first 951 * Start with the highest csrow first, to display it first
957 * and work toward the 0th csrow 952 * and work toward the 0th csrow
958 */ 953 */
959 max_csrows = pvt->maxdimmperch * 2; 954 max_csrows = pvt->maxdimmperch;
960 for (csrow = max_csrows - 1; csrow >= 0; csrow--) { 955 for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
961 956
962 /* on an odd csrow, first output a 'boundary' marker, 957 /* on an odd csrow, first output a 'boundary' marker,
@@ -1064,7 +1059,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1064 1059
1065 /* Get the set of MTR[0-3] regs by each branch */ 1060 /* Get the set of MTR[0-3] regs by each branch */
1066 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { 1061 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
1067 int where = MTR0 + (slot_row * sizeof(u32)); 1062 int where = MTR0 + (slot_row * sizeof(u16));
1068 1063
1069 /* Branch 0 set of MTR registers */ 1064 /* Branch 0 set of MTR registers */
1070 pci_read_config_word(pvt->branch_0, where, 1065 pci_read_config_word(pvt->branch_0, where,
@@ -1146,7 +1141,7 @@ static int i5400_init_csrows(struct mem_ctl_info *mci)
1146 pvt = mci->pvt_info; 1141 pvt = mci->pvt_info;
1147 1142
1148 channel_count = pvt->maxch; 1143 channel_count = pvt->maxch;
1149 max_csrows = pvt->maxdimmperch * 2; 1144 max_csrows = pvt->maxdimmperch;
1150 1145
1151 empty = 1; /* Assume NO memory */ 1146 empty = 1; /* Assume NO memory */
1152 1147
@@ -1215,28 +1210,6 @@ static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
1215} 1210}
1216 1211
1217/* 1212/*
1218 * i5400_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
1219 *
1220 * ask the device how many channels are present and how many CSROWS
1221 * as well
1222 */
1223static void i5400_get_dimm_and_channel_counts(struct pci_dev *pdev,
1224 int *num_dimms_per_channel,
1225 int *num_channels)
1226{
1227 u8 value;
1228
1229 /* Need to retrieve just how many channels and dimms per channel are
1230 * supported on this memory controller
1231 */
1232 pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
1233 *num_dimms_per_channel = (int)value * 2;
1234
1235 pci_read_config_byte(pdev, MAXCH, &value);
1236 *num_channels = (int)value;
1237}
1238
1239/*
1240 * i5400_probe1 Probe for ONE instance of device to see if it is 1213 * i5400_probe1 Probe for ONE instance of device to see if it is
1241 * present. 1214 * present.
1242 * return: 1215 * return:
@@ -1263,22 +1236,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1263 if (PCI_FUNC(pdev->devfn) != 0) 1236 if (PCI_FUNC(pdev->devfn) != 0)
1264 return -ENODEV; 1237 return -ENODEV;
1265 1238
1266 /* Ask the devices for the number of CSROWS and CHANNELS so 1239 /* As we don't have a motherboard identification routine to determine
1267 * that we can calculate the memory resources, etc
1268 *
1269 * The Chipset will report what it can handle which will be greater
1270 * or equal to what the motherboard manufacturer will implement.
1271 *
1272 * As we don't have a motherboard identification routine to determine
1273 * actual number of slots/dimms per channel, we thus utilize the 1240 * actual number of slots/dimms per channel, we thus utilize the
1274 * resource as specified by the chipset. Thus, we might have 1241 * resource as specified by the chipset. Thus, we might have
1275 * have more DIMMs per channel than actually on the mobo, but this 1242 * have more DIMMs per channel than actually on the mobo, but this
1276 * allows the driver to support upto the chipset max, without 1243 * allows the driver to support upto the chipset max, without
1277 * some fancy mobo determination. 1244 * some fancy mobo determination.
1278 */ 1245 */
1279 i5400_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, 1246 num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
1280 &num_channels); 1247 num_channels = MAX_CHANNELS;
1281 num_csrows = num_dimms_per_channel * 2; 1248 num_csrows = num_dimms_per_channel;
1282 1249
1283 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 1250 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
1284 __func__, num_channels, num_dimms_per_channel, num_csrows); 1251 __func__, num_channels, num_dimms_per_channel, num_csrows);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 157f6504f25e..cf27402af97b 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -26,7 +26,9 @@
26#include "mpc85xx_edac.h" 26#include "mpc85xx_edac.h"
27 27
28static int edac_dev_idx; 28static int edac_dev_idx;
29#ifdef CONFIG_PCI
29static int edac_pci_idx; 30static int edac_pci_idx;
31#endif
30static int edac_mc_idx; 32static int edac_mc_idx;
31 33
32static u32 orig_ddr_err_disable; 34static u32 orig_ddr_err_disable;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index ced186d7e9a9..5089331544ed 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -33,6 +33,7 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/sched.h>
36#include <linux/spinlock.h> 37#include <linux/spinlock.h>
37#include <linux/time.h> 38#include <linux/time.h>
38#include <linux/uaccess.h> 39#include <linux/uaccess.h>
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 50f0176de615..98dbbda3ad41 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
188/* Impossible login_id, to detect logout attempt before successful login */ 188/* Impossible login_id, to detect logout attempt before successful login */
189#define INVALID_LOGIN_ID 0x10000 189#define INVALID_LOGIN_ID 0x10000
190 190
191/* 191#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
192 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
193 * provided in the config rom. Most devices do provide a value, which
194 * we'll use for login management orbs, but with some sane limits.
195 */
196#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */
197#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
198#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
199#define SBP2_ORB_NULL 0x80000000 192#define SBP2_ORB_NULL 0x80000000
200#define SBP2_RETRY_LIMIT 0xf /* 15 retries */ 193#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
201#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ 194#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
@@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
1034{ 1027{
1035 struct fw_csr_iterator ci; 1028 struct fw_csr_iterator ci;
1036 int key, value; 1029 int key, value;
1037 unsigned int timeout;
1038 1030
1039 fw_csr_iterator_init(&ci, directory); 1031 fw_csr_iterator_init(&ci, directory);
1040 while (fw_csr_iterator_next(&ci, &key, &value)) { 1032 while (fw_csr_iterator_next(&ci, &key, &value)) {
@@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
1059 1051
1060 case SBP2_CSR_UNIT_CHARACTERISTICS: 1052 case SBP2_CSR_UNIT_CHARACTERISTICS:
1061 /* the timeout value is stored in 500ms units */ 1053 /* the timeout value is stored in 500ms units */
1062 timeout = ((unsigned int) value >> 8 & 0xff) * 500; 1054 tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
1063 timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
1064 tgt->mgt_orb_timeout =
1065 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
1066
1067 if (timeout > tgt->mgt_orb_timeout)
1068 fw_notify("%s: config rom contains %ds "
1069 "management ORB timeout, limiting "
1070 "to %ds\n", tgt->bus_id,
1071 timeout / 1000,
1072 tgt->mgt_orb_timeout / 1000);
1073 break; 1055 break;
1074 1056
1075 case SBP2_CSR_LOGICAL_UNIT_NUMBER: 1057 case SBP2_CSR_LOGICAL_UNIT_NUMBER:
@@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
1087 return 0; 1069 return 0;
1088} 1070}
1089 1071
1072/*
1073 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
1074 * provided in the config rom. Most devices do provide a value, which
1075 * we'll use for login management orbs, but with some sane limits.
1076 */
1077static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
1078{
1079 unsigned int timeout = tgt->mgt_orb_timeout;
1080
1081 if (timeout > 40000)
1082 fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
1083 tgt->bus_id, timeout / 1000);
1084
1085 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
1086}
1087
1090static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, 1088static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
1091 u32 firmware_revision) 1089 u32 firmware_revision)
1092{ 1090{
@@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev)
1171 &firmware_revision) < 0) 1169 &firmware_revision) < 0)
1172 goto fail_tgt_put; 1170 goto fail_tgt_put;
1173 1171
1172 sbp2_clamp_management_orb_timeout(tgt);
1174 sbp2_init_workarounds(tgt, model, firmware_revision); 1173 sbp2_init_workarounds(tgt, model, firmware_revision);
1175 1174
1176 /* 1175 /*
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 420a96e7f2db..051d1ebbd287 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -939,7 +939,7 @@ static int __init ibft_init(void)
939 939
940 if (ibft_addr) { 940 if (ibft_addr) {
941 printk(KERN_INFO "iBFT detected at 0x%llx.\n", 941 printk(KERN_INFO "iBFT detected at 0x%llx.\n",
942 (u64)virt_to_phys((void *)ibft_addr)); 942 (u64)isa_virt_to_bus(ibft_addr));
943 943
944 rc = ibft_check_device(); 944 rc = ibft_check_device();
945 if (rc) 945 if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d53fbbfefa3e..dfb15c06c88f 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -65,10 +65,10 @@ void __init reserve_ibft_region(void)
65 * so skip that area */ 65 * so skip that area */
66 if (pos == VGA_MEM) 66 if (pos == VGA_MEM)
67 pos += VGA_SIZE; 67 pos += VGA_SIZE;
68 virt = phys_to_virt(pos); 68 virt = isa_bus_to_virt(pos);
69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { 69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) {
70 unsigned long *addr = 70 unsigned long *addr =
71 (unsigned long *)phys_to_virt(pos + 4); 71 (unsigned long *)isa_bus_to_virt(pos + 4);
72 len = *addr; 72 len = *addr;
73 /* if the length of the table extends past 1M, 73 /* if the length of the table extends past 1M,
74 * the table cannot be valid. */ 74 * the table cannot be valid. */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bb11a429394a..50de0f5750d8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -661,7 +661,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
661 661
662 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 662 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
663 desc, ioname ? ioname : "gpio%d", gpio); 663 desc, ioname ? ioname : "gpio%d", gpio);
664 if (dev) { 664 if (!IS_ERR(dev)) {
665 if (direction_may_change) 665 if (direction_may_change)
666 status = sysfs_create_group(&dev->kobj, 666 status = sysfs_create_group(&dev->kobj,
667 &gpio_attr_group); 667 &gpio_attr_group);
@@ -679,7 +679,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
679 if (status != 0) 679 if (status != 0)
680 device_unregister(dev); 680 device_unregister(dev);
681 } else 681 } else
682 status = -ENODEV; 682 status = PTR_ERR(dev);
683 if (status == 0) 683 if (status == 0)
684 set_bit(FLAG_EXPORT, &desc->flags); 684 set_bit(FLAG_EXPORT, &desc->flags);
685 } 685 }
@@ -800,11 +800,11 @@ static int gpiochip_export(struct gpio_chip *chip)
800 mutex_lock(&sysfs_lock); 800 mutex_lock(&sysfs_lock);
801 dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, 801 dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
802 "gpiochip%d", chip->base); 802 "gpiochip%d", chip->base);
803 if (dev) { 803 if (!IS_ERR(dev)) {
804 status = sysfs_create_group(&dev->kobj, 804 status = sysfs_create_group(&dev->kobj,
805 &gpiochip_attr_group); 805 &gpiochip_attr_group);
806 } else 806 } else
807 status = -ENODEV; 807 status = PTR_ERR(dev);
808 chip->exported = (status == 0); 808 chip->exported = (status == 0);
809 mutex_unlock(&sysfs_lock); 809 mutex_unlock(&sysfs_lock);
810 810
@@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
1487 return single_open(file, gpiolib_show, NULL); 1487 return single_open(file, gpiolib_show, NULL);
1488} 1488}
1489 1489
1490static struct file_operations gpiolib_operations = { 1490static const struct file_operations gpiolib_operations = {
1491 .open = gpiolib_open, 1491 .open = gpiolib_open,
1492 .read = seq_read, 1492 .read = seq_read,
1493 .llseek = seq_lseek, 1493 .llseek = seq_lseek,
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
index afad14792141..49384a7c5492 100644
--- a/drivers/gpio/twl4030-gpio.c
+++ b/drivers/gpio/twl4030-gpio.c
@@ -460,7 +460,8 @@ no_irqs:
460 return ret; 460 return ret;
461} 461}
462 462
463static int __devexit gpio_twl4030_remove(struct platform_device *pdev) 463/* Cannot use __devexit as gpio_twl4030_probe() calls us */
464static int gpio_twl4030_remove(struct platform_device *pdev)
464{ 465{
465 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; 466 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
466 int status; 467 int status;
@@ -493,7 +494,7 @@ static struct platform_driver gpio_twl4030_driver = {
493 .driver.name = "twl4030_gpio", 494 .driver.name = "twl4030_gpio",
494 .driver.owner = THIS_MODULE, 495 .driver.owner = THIS_MODULE,
495 .probe = gpio_twl4030_probe, 496 .probe = gpio_twl4030_probe,
496 .remove = __devexit_p(gpio_twl4030_remove), 497 .remove = gpio_twl4030_remove,
497}; 498};
498 499
499static int __init gpio_twl4030_init(void) 500static int __init gpio_twl4030_init(void)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ba728ad77f2a..5cae0b3eee9b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
482 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 482 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
483 drm_mode_remove(connector, mode); 483 drm_mode_remove(connector, mode);
484 484
485 kfree(connector->fb_helper_private);
485 mutex_lock(&dev->mode_config.mutex); 486 mutex_lock(&dev->mode_config.mutex);
486 drm_mode_object_put(dev, &connector->base); 487 drm_mode_object_put(dev, &connector->base);
487 list_del(&connector->head); 488 list_del(&connector->head);
@@ -1555,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1555 struct drm_crtc *crtc; 1556 struct drm_crtc *crtc;
1556 int ret = 0; 1557 int ret = 0;
1557 1558
1558 DRM_DEBUG_KMS("\n");
1559
1560 if (!req->flags) { 1559 if (!req->flags) {
1561 DRM_ERROR("no operation set\n"); 1560 DRM_ERROR("no operation set\n");
1562 return -EINVAL; 1561 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index fe8697447f32..bbfd110a7168 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h"
35 36
36static void drm_mode_validate_flag(struct drm_connector *connector, 37static void drm_mode_validate_flag(struct drm_connector *connector,
37 int flags) 38 int flags)
@@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
90 list_for_each_entry_safe(mode, t, &connector->modes, head) 91 list_for_each_entry_safe(mode, t, &connector->modes, head)
91 mode->status = MODE_UNVERIFIED; 92 mode->status = MODE_UNVERIFIED;
92 93
93 connector->status = connector->funcs->detect(connector); 94 if (connector->force) {
95 if (connector->force == DRM_FORCE_ON)
96 connector->status = connector_status_connected;
97 else
98 connector->status = connector_status_disconnected;
99 if (connector->funcs->force)
100 connector->funcs->force(connector);
101 } else
102 connector->status = connector->funcs->detect(connector);
94 103
95 if (connector->status == connector_status_disconnected) { 104 if (connector->status == connector_status_disconnected) {
96 DRM_DEBUG_KMS("%s is disconnected\n", 105 DRM_DEBUG_KMS("%s is disconnected\n",
@@ -267,6 +276,66 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con
267 return NULL; 276 return NULL;
268} 277}
269 278
279static bool drm_has_cmdline_mode(struct drm_connector *connector)
280{
281 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
282 struct drm_fb_helper_cmdline_mode *cmdline_mode;
283
284 if (!fb_help_conn)
285 return false;
286
287 cmdline_mode = &fb_help_conn->cmdline_mode;
288 return cmdline_mode->specified;
289}
290
291static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
292{
293 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
294 struct drm_fb_helper_cmdline_mode *cmdline_mode;
295 struct drm_display_mode *mode = NULL;
296
297 if (!fb_help_conn)
298 return mode;
299
300 cmdline_mode = &fb_help_conn->cmdline_mode;
301 if (cmdline_mode->specified == false)
302 return mode;
303
304 /* attempt to find a matching mode in the list of modes
305 * we have gotten so far, if not add a CVT mode that conforms
306 */
307 if (cmdline_mode->rb || cmdline_mode->margins)
308 goto create_mode;
309
310 list_for_each_entry(mode, &connector->modes, head) {
311 /* check width/height */
312 if (mode->hdisplay != cmdline_mode->xres ||
313 mode->vdisplay != cmdline_mode->yres)
314 continue;
315
316 if (cmdline_mode->refresh_specified) {
317 if (mode->vrefresh != cmdline_mode->refresh)
318 continue;
319 }
320
321 if (cmdline_mode->interlace) {
322 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
323 continue;
324 }
325 return mode;
326 }
327
328create_mode:
329 mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
330 cmdline_mode->yres,
331 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
332 cmdline_mode->rb, cmdline_mode->interlace,
333 cmdline_mode->margins);
334 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
335 list_add(&mode->head, &connector->modes);
336 return mode;
337}
338
270static bool drm_connector_enabled(struct drm_connector *connector, bool strict) 339static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
271{ 340{
272 bool enable; 341 bool enable;
@@ -317,10 +386,16 @@ static bool drm_target_preferred(struct drm_device *dev,
317 continue; 386 continue;
318 } 387 }
319 388
320 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 389 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
321 connector->base.id); 390 connector->base.id);
322 391
323 modes[i] = drm_has_preferred_mode(connector, width, height); 392 /* got for command line mode first */
393 modes[i] = drm_pick_cmdline_mode(connector, width, height);
394 if (!modes[i]) {
395 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
396 connector->base.id);
397 modes[i] = drm_has_preferred_mode(connector, width, height);
398 }
324 /* No preferred modes, pick one off the list */ 399 /* No preferred modes, pick one off the list */
325 if (!modes[i] && !list_empty(&connector->modes)) { 400 if (!modes[i] && !list_empty(&connector->modes)) {
326 list_for_each_entry(modes[i], &connector->modes, head) 401 list_for_each_entry(modes[i], &connector->modes, head)
@@ -369,6 +444,8 @@ static int drm_pick_crtcs(struct drm_device *dev,
369 my_score = 1; 444 my_score = 1;
370 if (connector->status == connector_status_connected) 445 if (connector->status == connector_status_connected)
371 my_score++; 446 my_score++;
447 if (drm_has_cmdline_mode(connector))
448 my_score++;
372 if (drm_has_preferred_mode(connector, width, height)) 449 if (drm_has_preferred_mode(connector, width, height))
373 my_score++; 450 my_score++;
374 451
@@ -943,6 +1020,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
943{ 1020{
944 int count = 0; 1021 int count = 0;
945 1022
1023 drm_fb_helper_parse_command_line(dev);
1024
946 count = drm_helper_probe_connector_modes(dev, 1025 count = drm_helper_probe_connector_modes(dev,
947 dev->mode_config.max_width, 1026 dev->mode_config.max_width,
948 dev->mode_config.max_height); 1027 dev->mode_config.max_height);
@@ -950,7 +1029,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
950 /* 1029 /*
951 * we shouldn't end up with no modes here. 1030 * we shouldn't end up with no modes here.
952 */ 1031 */
953 WARN(!count, "Connected connector with 0 modes\n"); 1032 WARN(!count, "No connectors reported connected with modes\n");
954 1033
955 drm_setup_crtcs(dev); 1034 drm_setup_crtcs(dev);
956 1035
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 90d76bacff17..cea665d86dd3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -109,7 +109,9 @@ static struct edid_quirk {
109 109
110 110
111/* Valid EDID header has these bytes */ 111/* Valid EDID header has these bytes */
112static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 112static const u8 edid_header[] = {
113 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
114};
113 115
114/** 116/**
115 * edid_is_valid - sanity check EDID data 117 * edid_is_valid - sanity check EDID data
@@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
500 } 502 }
501 return mode; 503 return mode;
502} 504}
505
506/*
507 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
508 * monitors fill with ascii space (0x20) instead.
509 */
510static int
511bad_std_timing(u8 a, u8 b)
512{
513 return (a == 0x00 && b == 0x00) ||
514 (a == 0x01 && b == 0x01) ||
515 (a == 0x20 && b == 0x20);
516}
517
503/** 518/**
504 * drm_mode_std - convert standard mode info (width, height, refresh) into mode 519 * drm_mode_std - convert standard mode info (width, height, refresh) into mode
505 * @t: standard timing params 520 * @t: standard timing params
@@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
513 */ 528 */
514struct drm_display_mode *drm_mode_std(struct drm_device *dev, 529struct drm_display_mode *drm_mode_std(struct drm_device *dev,
515 struct std_timing *t, 530 struct std_timing *t,
531 int revision,
516 int timing_level) 532 int timing_level)
517{ 533{
518 struct drm_display_mode *mode; 534 struct drm_display_mode *mode;
@@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
523 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) 539 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
524 >> EDID_TIMING_VFREQ_SHIFT; 540 >> EDID_TIMING_VFREQ_SHIFT;
525 541
542 if (bad_std_timing(t->hsize, t->vfreq_aspect))
543 return NULL;
544
526 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ 545 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
527 hsize = t->hsize * 8 + 248; 546 hsize = t->hsize * 8 + 248;
528 /* vrefresh_rate = vfreq + 60 */ 547 /* vrefresh_rate = vfreq + 60 */
529 vrefresh_rate = vfreq + 60; 548 vrefresh_rate = vfreq + 60;
530 /* the vdisplay is calculated based on the aspect ratio */ 549 /* the vdisplay is calculated based on the aspect ratio */
531 if (aspect_ratio == 0) 550 if (aspect_ratio == 0) {
532 vsize = (hsize * 10) / 16; 551 if (revision < 3)
533 else if (aspect_ratio == 1) 552 vsize = hsize;
553 else
554 vsize = (hsize * 10) / 16;
555 } else if (aspect_ratio == 1)
534 vsize = (hsize * 3) / 4; 556 vsize = (hsize * 3) / 4;
535 else if (aspect_ratio == 2) 557 else if (aspect_ratio == 2)
536 vsize = (hsize * 4) / 5; 558 vsize = (hsize * 4) / 5;
@@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
538 vsize = (hsize * 9) / 16; 560 vsize = (hsize * 9) / 16;
539 /* HDTV hack */ 561 /* HDTV hack */
540 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { 562 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
541 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 563 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
564 false);
542 mode->hdisplay = 1366; 565 mode->hdisplay = 1366;
543 mode->vsync_start = mode->vsync_start - 1; 566 mode->vsync_start = mode->vsync_start - 1;
544 mode->vsync_end = mode->vsync_end - 1; 567 mode->vsync_end = mode->vsync_end - 1;
@@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
557 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 580 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
558 break; 581 break;
559 case LEVEL_CVT: 582 case LEVEL_CVT:
560 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 583 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
584 false);
561 break; 585 break;
562 } 586 }
563 return mode; 587 return mode;
@@ -602,6 +626,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
602 return NULL; 626 return NULL;
603 } 627 }
604 628
629 /* it is incorrect if hsync/vsync width is zero */
630 if (!hsync_pulse_width || !vsync_pulse_width) {
631 DRM_DEBUG_KMS("Incorrect Detailed timing. "
632 "Wrong Hsync/Vsync pulse width\n");
633 return NULL;
634 }
605 mode = drm_mode_create(dev); 635 mode = drm_mode_create(dev);
606 if (!mode) 636 if (!mode)
607 return NULL; 637 return NULL;
@@ -623,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
623 mode->vsync_end = mode->vsync_start + vsync_pulse_width; 653 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
624 mode->vtotal = mode->vdisplay + vblank; 654 mode->vtotal = mode->vdisplay + vblank;
625 655
656 /* perform the basic check for the detailed timing */
657 if (mode->hsync_end > mode->htotal ||
658 mode->vsync_end > mode->vtotal) {
659 drm_mode_destroy(dev, mode);
660 DRM_DEBUG_KMS("Incorrect detailed timing. "
661 "Sync is beyond the blank.\n");
662 return NULL;
663 }
664
626 drm_mode_set_name(mode); 665 drm_mode_set_name(mode);
627 666
628 if (pt->misc & DRM_EDID_PT_INTERLACED) 667 if (pt->misc & DRM_EDID_PT_INTERLACED)
@@ -779,7 +818,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
779 continue; 818 continue;
780 819
781 newmode = drm_mode_std(dev, &edid->standard_timings[i], 820 newmode = drm_mode_std(dev, &edid->standard_timings[i],
782 timing_level); 821 edid->revision, timing_level);
783 if (newmode) { 822 if (newmode) {
784 drm_mode_probed_add(connector, newmode); 823 drm_mode_probed_add(connector, newmode);
785 modes++; 824 modes++;
@@ -829,13 +868,13 @@ static int add_detailed_info(struct drm_connector *connector,
829 case EDID_DETAIL_MONITOR_CPDATA: 868 case EDID_DETAIL_MONITOR_CPDATA:
830 break; 869 break;
831 case EDID_DETAIL_STD_MODES: 870 case EDID_DETAIL_STD_MODES:
832 /* Five modes per detailed section */ 871 for (j = 0; j < 6; i++) {
833 for (j = 0; j < 5; i++) {
834 struct std_timing *std; 872 struct std_timing *std;
835 struct drm_display_mode *newmode; 873 struct drm_display_mode *newmode;
836 874
837 std = &data->data.timings[j]; 875 std = &data->data.timings[j];
838 newmode = drm_mode_std(dev, std, 876 newmode = drm_mode_std(dev, std,
877 edid->revision,
839 timing_level); 878 timing_level);
840 if (newmode) { 879 if (newmode) {
841 drm_mode_probed_add(connector, newmode); 880 drm_mode_probed_add(connector, newmode);
@@ -964,7 +1003,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
964 struct drm_display_mode *newmode; 1003 struct drm_display_mode *newmode;
965 1004
966 std = &data->data.timings[j]; 1005 std = &data->data.timings[j];
967 newmode = drm_mode_std(dev, std, timing_level); 1006 newmode = drm_mode_std(dev, std,
1007 edid->revision,
1008 timing_level);
968 if (newmode) { 1009 if (newmode) {
969 drm_mode_probed_add(connector, newmode); 1010 drm_mode_probed_add(connector, newmode);
970 modes++; 1011 modes++;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2c4671314884..dc8e374a0b55 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights");
40 40
41static LIST_HEAD(kernel_fb_helper_list); 41static LIST_HEAD(kernel_fb_helper_list);
42 42
43int drm_fb_helper_add_connector(struct drm_connector *connector)
44{
45 connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
46 if (!connector->fb_helper_private)
47 return -ENOMEM;
48
49 return 0;
50}
51EXPORT_SYMBOL(drm_fb_helper_add_connector);
52
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for
71 * @mode_option - per connector mode option
72 *
73 * This parses the connector specific then generic command lines for
74 * modes and options to configure the connector.
75 *
76 * This uses the same parameters as the fb modedb.c, except for extra
77 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
78 *
79 * enable/enable Digital/disable bit at the end
80 */
81static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
82 const char *mode_option)
83{
84 const char *name;
85 unsigned int namelen;
86 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
87 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
88 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
89 int i;
90 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
91 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
92 struct drm_fb_helper_cmdline_mode *cmdline_mode;
93
94 if (!fb_help_conn)
95 return false;
96
97 cmdline_mode = &fb_help_conn->cmdline_mode;
98 if (!mode_option)
99 mode_option = fb_mode_option;
100
101 if (!mode_option) {
102 cmdline_mode->specified = false;
103 return false;
104 }
105
106 name = mode_option;
107 namelen = strlen(name);
108 for (i = namelen-1; i >= 0; i--) {
109 switch (name[i]) {
110 case '@':
111 namelen = i;
112 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) {
114 refresh = my_atoi(&name[i+1]);
115 refresh_specified = 1;
116 if (cvt || rb)
117 cvt = 0;
118 } else
119 goto done;
120 break;
121 case '-':
122 namelen = i;
123 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]);
125 bpp_specified = 1;
126 if (cvt || rb)
127 cvt = 0;
128 } else
129 goto done;
130 break;
131 case 'x':
132 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]);
134 yres_specified = 1;
135 } else
136 goto done;
137 case '0' ... '9':
138 break;
139 case 'M':
140 if (!yres_specified)
141 cvt = 1;
142 break;
143 case 'R':
144 if (!cvt)
145 rb = 1;
146 break;
147 case 'm':
148 if (!cvt)
149 margins = 1;
150 break;
151 case 'i':
152 if (!cvt)
153 interlace = 1;
154 break;
155 case 'e':
156 force = DRM_FORCE_ON;
157 break;
158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON;
162 else
163 force = DRM_FORCE_ON_DIGITAL;
164 break;
165 case 'd':
166 force = DRM_FORCE_OFF;
167 break;
168 default:
169 goto done;
170 }
171 }
172 if (i < 0 && yres_specified) {
173 xres = my_atoi(name);
174 res_specified = 1;
175 }
176done:
177
178 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
179 drm_get_connector_name(connector), xres, yres,
180 (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
181 "", (margins) ? " with margins" : "", (interlace) ?
182 " interlaced" : "");
183
184 if (force) {
185 const char *s;
186 switch (force) {
187 case DRM_FORCE_OFF: s = "OFF"; break;
188 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
189 default:
190 case DRM_FORCE_ON: s = "ON"; break;
191 }
192
193 DRM_INFO("forcing %s connector %s\n",
194 drm_get_connector_name(connector), s);
195 connector->force = force;
196 }
197
198 if (res_specified) {
199 cmdline_mode->specified = true;
200 cmdline_mode->xres = xres;
201 cmdline_mode->yres = yres;
202 }
203
204 if (refresh_specified) {
205 cmdline_mode->refresh_specified = true;
206 cmdline_mode->refresh = refresh;
207 }
208
209 if (bpp_specified) {
210 cmdline_mode->bpp_specified = true;
211 cmdline_mode->bpp = bpp;
212 }
213 cmdline_mode->rb = rb ? true : false;
214 cmdline_mode->cvt = cvt ? true : false;
215 cmdline_mode->interlace = interlace ? true : false;
216
217 return true;
218}
219
220int drm_fb_helper_parse_command_line(struct drm_device *dev)
221{
222 struct drm_connector *connector;
223
224 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
225 char *option = NULL;
226
227 /* do something on return - turn off connector maybe */
228 if (fb_get_options(drm_get_connector_name(connector), &option))
229 continue;
230
231 drm_fb_helper_connector_parse_command_line(connector, option);
232 }
233 return 0;
234}
235
43bool drm_fb_helper_force_kernel_mode(void) 236bool drm_fb_helper_force_kernel_mode(void)
44{ 237{
45 int i = 0; 238 int i = 0;
@@ -87,6 +280,7 @@ void drm_fb_helper_restore(void)
87} 280}
88EXPORT_SYMBOL(drm_fb_helper_restore); 281EXPORT_SYMBOL(drm_fb_helper_restore);
89 282
283#ifdef CONFIG_MAGIC_SYSRQ
90static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 284static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
91{ 285{
92 drm_fb_helper_restore(); 286 drm_fb_helper_restore();
@@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
103 .help_msg = "force-fb(V)", 297 .help_msg = "force-fb(V)",
104 .action_msg = "Restore framebuffer console", 298 .action_msg = "Restore framebuffer console",
105}; 299};
300#endif
106 301
107static void drm_fb_helper_on(struct fb_info *info) 302static void drm_fb_helper_on(struct fb_info *info)
108{ 303{
@@ -259,6 +454,109 @@ out_free:
259} 454}
260EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); 455EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
261 456
457static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
458 u16 blue, u16 regno, struct fb_info *info)
459{
460 struct drm_fb_helper *fb_helper = info->par;
461 struct drm_framebuffer *fb = fb_helper->fb;
462 int pindex;
463
464 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
465 u32 *palette;
466 u32 value;
467 /* place color in psuedopalette */
468 if (regno > 16)
469 return -EINVAL;
470 palette = (u32 *)info->pseudo_palette;
471 red >>= (16 - info->var.red.length);
472 green >>= (16 - info->var.green.length);
473 blue >>= (16 - info->var.blue.length);
474 value = (red << info->var.red.offset) |
475 (green << info->var.green.offset) |
476 (blue << info->var.blue.offset);
477 palette[regno] = value;
478 return 0;
479 }
480
481 pindex = regno;
482
483 if (fb->bits_per_pixel == 16) {
484 pindex = regno << 3;
485
486 if (fb->depth == 16 && regno > 63)
487 return -EINVAL;
488 if (fb->depth == 15 && regno > 31)
489 return -EINVAL;
490
491 if (fb->depth == 16) {
492 u16 r, g, b;
493 int i;
494 if (regno < 32) {
495 for (i = 0; i < 8; i++)
496 fb_helper->funcs->gamma_set(crtc, red,
497 green, blue, pindex + i);
498 }
499
500 fb_helper->funcs->gamma_get(crtc, &r,
501 &g, &b,
502 pindex >> 1);
503
504 for (i = 0; i < 4; i++)
505 fb_helper->funcs->gamma_set(crtc, r,
506 green, b,
507 (pindex >> 1) + i);
508 }
509 }
510
511 if (fb->depth != 16)
512 fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
513 return 0;
514}
515
516int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
517{
518 struct drm_fb_helper *fb_helper = info->par;
519 struct drm_device *dev = fb_helper->dev;
520 u16 *red, *green, *blue, *transp;
521 struct drm_crtc *crtc;
522 int i, rc = 0;
523 int start;
524
525 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
526 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
527 for (i = 0; i < fb_helper->crtc_count; i++) {
528 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
529 break;
530 }
531 if (i == fb_helper->crtc_count)
532 continue;
533
534 red = cmap->red;
535 green = cmap->green;
536 blue = cmap->blue;
537 transp = cmap->transp;
538 start = cmap->start;
539
540 for (i = 0; i < cmap->len; i++) {
541 u16 hred, hgreen, hblue, htransp = 0xffff;
542
543 hred = *red++;
544 hgreen = *green++;
545 hblue = *blue++;
546
547 if (transp)
548 htransp = *transp++;
549
550 rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
551 if (rc)
552 return rc;
553 }
554 crtc_funcs->load_lut(crtc);
555 }
556 return rc;
557}
558EXPORT_SYMBOL(drm_fb_helper_setcmap);
559
262int drm_fb_helper_setcolreg(unsigned regno, 560int drm_fb_helper_setcolreg(unsigned regno,
263 unsigned red, 561 unsigned red,
264 unsigned green, 562 unsigned green,
@@ -270,10 +568,13 @@ int drm_fb_helper_setcolreg(unsigned regno,
270 struct drm_device *dev = fb_helper->dev; 568 struct drm_device *dev = fb_helper->dev;
271 struct drm_crtc *crtc; 569 struct drm_crtc *crtc;
272 int i; 570 int i;
571 int ret;
273 572
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 573 if (regno > 255)
275 struct drm_framebuffer *fb = fb_helper->fb; 574 return 1;
276 575
576 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
577 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
277 for (i = 0; i < fb_helper->crtc_count; i++) { 578 for (i = 0; i < fb_helper->crtc_count; i++) {
278 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 579 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
279 break; 580 break;
@@ -281,35 +582,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
281 if (i == fb_helper->crtc_count) 582 if (i == fb_helper->crtc_count)
282 continue; 583 continue;
283 584
284 if (regno > 255) 585 ret = setcolreg(crtc, red, green, blue, regno, info);
285 return 1; 586 if (ret)
286 587 return ret;
287 if (fb->depth == 8) {
288 fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
289 return 0;
290 }
291 588
292 if (regno < 16) { 589 crtc_funcs->load_lut(crtc);
293 switch (fb->depth) {
294 case 15:
295 fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
296 ((green & 0xf800) >> 6) |
297 ((blue & 0xf800) >> 11);
298 break;
299 case 16:
300 fb->pseudo_palette[regno] = (red & 0xf800) |
301 ((green & 0xfc00) >> 5) |
302 ((blue & 0xf800) >> 11);
303 break;
304 case 24:
305 case 32:
306 fb->pseudo_palette[regno] =
307 (((red >> 8) & 0xff) << info->var.red.offset) |
308 (((green >> 8) & 0xff) << info->var.green.offset) |
309 (((blue >> 8) & 0xff) << info->var.blue.offset);
310 break;
311 }
312 }
313 } 590 }
314 return 0; 591 return 0;
315} 592}
@@ -430,7 +707,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
430 707
431 if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { 708 if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
432 mutex_lock(&dev->mode_config.mutex); 709 mutex_lock(&dev->mode_config.mutex);
433 ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set); 710 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
434 mutex_unlock(&dev->mode_config.mutex); 711 mutex_unlock(&dev->mode_config.mutex);
435 if (ret) 712 if (ret)
436 return ret; 713 return ret;
@@ -479,11 +756,14 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
479EXPORT_SYMBOL(drm_fb_helper_pan_display); 756EXPORT_SYMBOL(drm_fb_helper_pan_display);
480 757
481int drm_fb_helper_single_fb_probe(struct drm_device *dev, 758int drm_fb_helper_single_fb_probe(struct drm_device *dev,
759 int preferred_bpp,
482 int (*fb_create)(struct drm_device *dev, 760 int (*fb_create)(struct drm_device *dev,
483 uint32_t fb_width, 761 uint32_t fb_width,
484 uint32_t fb_height, 762 uint32_t fb_height,
485 uint32_t surface_width, 763 uint32_t surface_width,
486 uint32_t surface_height, 764 uint32_t surface_height,
765 uint32_t surface_depth,
766 uint32_t surface_bpp,
487 struct drm_framebuffer **fb_ptr)) 767 struct drm_framebuffer **fb_ptr))
488{ 768{
489 struct drm_crtc *crtc; 769 struct drm_crtc *crtc;
@@ -497,8 +777,48 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
497 struct drm_framebuffer *fb; 777 struct drm_framebuffer *fb;
498 struct drm_mode_set *modeset = NULL; 778 struct drm_mode_set *modeset = NULL;
499 struct drm_fb_helper *fb_helper; 779 struct drm_fb_helper *fb_helper;
780 uint32_t surface_depth = 24, surface_bpp = 32;
500 781
782 /* if driver picks 8 or 16 by default use that
783 for both depth/bpp */
784 if (preferred_bpp != surface_bpp) {
785 surface_depth = surface_bpp = preferred_bpp;
786 }
501 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 787 /* first up get a count of crtcs now in use and new min/maxes width/heights */
788 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
789 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
790
791 struct drm_fb_helper_cmdline_mode *cmdline_mode;
792
793 if (!fb_help_conn)
794 continue;
795
796 cmdline_mode = &fb_help_conn->cmdline_mode;
797
798 if (cmdline_mode->bpp_specified) {
799 switch (cmdline_mode->bpp) {
800 case 8:
801 surface_depth = surface_bpp = 8;
802 break;
803 case 15:
804 surface_depth = 15;
805 surface_bpp = 16;
806 break;
807 case 16:
808 surface_depth = surface_bpp = 16;
809 break;
810 case 24:
811 surface_depth = surface_bpp = 24;
812 break;
813 case 32:
814 surface_depth = 24;
815 surface_bpp = 32;
816 break;
817 }
818 break;
819 }
820 }
821
502 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 822 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
503 if (drm_helper_crtc_in_use(crtc)) { 823 if (drm_helper_crtc_in_use(crtc)) {
504 if (crtc->desired_mode) { 824 if (crtc->desired_mode) {
@@ -527,7 +847,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
527 /* do we have an fb already? */ 847 /* do we have an fb already? */
528 if (list_empty(&dev->mode_config.fb_kernel_list)) { 848 if (list_empty(&dev->mode_config.fb_kernel_list)) {
529 ret = (*fb_create)(dev, fb_width, fb_height, surface_width, 849 ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
530 surface_height, &fb); 850 surface_height, surface_depth, surface_bpp,
851 &fb);
531 if (ret) 852 if (ret)
532 return -EINVAL; 853 return -EINVAL;
533 new_fb = 1; 854 new_fb = 1;
@@ -618,10 +939,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
618} 939}
619EXPORT_SYMBOL(drm_fb_helper_free); 940EXPORT_SYMBOL(drm_fb_helper_free);
620 941
621void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) 942void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
943 uint32_t depth)
622{ 944{
623 info->fix.type = FB_TYPE_PACKED_PIXELS; 945 info->fix.type = FB_TYPE_PACKED_PIXELS;
624 info->fix.visual = FB_VISUAL_TRUECOLOR; 946 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
947 FB_VISUAL_TRUECOLOR;
625 info->fix.type_aux = 0; 948 info->fix.type_aux = 0;
626 info->fix.xpanstep = 1; /* doing it in hw */ 949 info->fix.xpanstep = 1; /* doing it in hw */
627 info->fix.ypanstep = 1; /* doing it in hw */ 950 info->fix.ypanstep = 1; /* doing it in hw */
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 49404ce1666e..51f677215f1d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
88#define HV_FACTOR 1000 88#define HV_FACTOR 1000
89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, 89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
90 int vdisplay, int vrefresh, 90 int vdisplay, int vrefresh,
91 bool reduced, bool interlaced) 91 bool reduced, bool interlaced, bool margins)
92{ 92{
93 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 93 /* 1) top/bottom margin size (% of height) - default: 1.8, */
94#define CVT_MARGIN_PERCENTAGE 18 94#define CVT_MARGIN_PERCENTAGE 18
@@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
101 /* Pixel Clock step (kHz) */ 101 /* Pixel Clock step (kHz) */
102#define CVT_CLOCK_STEP 250 102#define CVT_CLOCK_STEP 250
103 struct drm_display_mode *drm_mode; 103 struct drm_display_mode *drm_mode;
104 bool margins = false;
105 unsigned int vfieldrate, hperiod; 104 unsigned int vfieldrate, hperiod;
106 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; 105 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
107 int interlace; 106 int interlace;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 7e1fbe5d4779..4ac900f4647f 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369} 369}
370 370
371/** AGP virtual memory operations */ 371/** AGP virtual memory operations */
372static struct vm_operations_struct drm_vm_ops = { 372static const struct vm_operations_struct drm_vm_ops = {
373 .fault = drm_vm_fault, 373 .fault = drm_vm_fault,
374 .open = drm_vm_open, 374 .open = drm_vm_open,
375 .close = drm_vm_close, 375 .close = drm_vm_close,
376}; 376};
377 377
378/** Shared virtual memory operations */ 378/** Shared virtual memory operations */
379static struct vm_operations_struct drm_vm_shm_ops = { 379static const struct vm_operations_struct drm_vm_shm_ops = {
380 .fault = drm_vm_shm_fault, 380 .fault = drm_vm_shm_fault,
381 .open = drm_vm_open, 381 .open = drm_vm_open,
382 .close = drm_vm_shm_close, 382 .close = drm_vm_shm_close,
383}; 383};
384 384
385/** DMA virtual memory operations */ 385/** DMA virtual memory operations */
386static struct vm_operations_struct drm_vm_dma_ops = { 386static const struct vm_operations_struct drm_vm_dma_ops = {
387 .fault = drm_vm_dma_fault, 387 .fault = drm_vm_dma_fault,
388 .open = drm_vm_open, 388 .open = drm_vm_open,
389 .close = drm_vm_close, 389 .close = drm_vm_close,
390}; 390};
391 391
392/** Scatter-gather virtual memory operations */ 392/** Scatter-gather virtual memory operations */
393static struct vm_operations_struct drm_vm_sg_ops = { 393static const struct vm_operations_struct drm_vm_sg_ops = {
394 .fault = drm_vm_sg_fault, 394 .fault = drm_vm_sg_fault,
395 .open = drm_vm_open, 395 .open = drm_vm_open,
396 .close = drm_vm_close, 396 .close = drm_vm_close,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 45d507ebd3ff..e5b138be45fa 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1227,8 +1227,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1227 goto out; 1227 goto out;
1228 1228
1229 /* Try to set up FBC with a reasonable compressed buffer size */ 1229 /* Try to set up FBC with a reasonable compressed buffer size */
1230 if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && 1230 if (I915_HAS_FBC(dev) && i915_powersave) {
1231 i915_powersave) {
1232 int cfb_size; 1231 int cfb_size;
1233 1232
1234 /* Try to get an 8M buffer... */ 1233 /* Try to get an 8M buffer... */
@@ -1468,6 +1467,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1468 spin_lock_init(&dev_priv->user_irq_lock); 1467 spin_lock_init(&dev_priv->user_irq_lock);
1469 spin_lock_init(&dev_priv->error_lock); 1468 spin_lock_init(&dev_priv->error_lock);
1470 dev_priv->user_irq_refcount = 0; 1469 dev_priv->user_irq_refcount = 0;
1470 dev_priv->trace_irq_seqno = 0;
1471 1471
1472 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1472 ret = drm_vblank_init(dev, I915_NUM_PIPE);
1473 1473
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b93814c0d3e2..7f436ec075f6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -89,7 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
89 pci_set_power_state(dev->pdev, PCI_D3hot); 89 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 90 }
91 91
92 dev_priv->suspended = 1; 92 /* Modeset on resume, not lid events */
93 dev_priv->modeset_on_lid = 0;
93 94
94 return 0; 95 return 0;
95} 96}
@@ -124,7 +125,7 @@ static int i915_resume(struct drm_device *dev)
124 drm_helper_resume_force_mode(dev); 125 drm_helper_resume_force_mode(dev);
125 } 126 }
126 127
127 dev_priv->suspended = 0; 128 dev_priv->modeset_on_lid = 0;
128 129
129 return ret; 130 return ret;
130} 131}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b24b2d145b75..57204e298975 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -202,6 +202,7 @@ typedef struct drm_i915_private {
202 spinlock_t user_irq_lock; 202 spinlock_t user_irq_lock;
203 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 203 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
204 int user_irq_refcount; 204 int user_irq_refcount;
205 u32 trace_irq_seqno;
205 /** Cached value of IMR to avoid reads in updating the bitfield */ 206 /** Cached value of IMR to avoid reads in updating the bitfield */
206 u32 irq_mask_reg; 207 u32 irq_mask_reg;
207 u32 pipestat[2]; 208 u32 pipestat[2];
@@ -273,7 +274,7 @@ typedef struct drm_i915_private {
273 struct drm_i915_display_funcs display; 274 struct drm_i915_display_funcs display;
274 275
275 /* Register state */ 276 /* Register state */
276 bool suspended; 277 bool modeset_on_lid;
277 u8 saveLBB; 278 u8 saveLBB;
278 u32 saveDSPACNTR; 279 u32 saveDSPACNTR;
279 u32 saveDSPBCNTR; 280 u32 saveDSPBCNTR;
@@ -295,6 +296,12 @@ typedef struct drm_i915_private {
295 u32 saveVBLANK_A; 296 u32 saveVBLANK_A;
296 u32 saveVSYNC_A; 297 u32 saveVSYNC_A;
297 u32 saveBCLRPAT_A; 298 u32 saveBCLRPAT_A;
299 u32 saveTRANS_HTOTAL_A;
300 u32 saveTRANS_HBLANK_A;
301 u32 saveTRANS_HSYNC_A;
302 u32 saveTRANS_VTOTAL_A;
303 u32 saveTRANS_VBLANK_A;
304 u32 saveTRANS_VSYNC_A;
298 u32 savePIPEASTAT; 305 u32 savePIPEASTAT;
299 u32 saveDSPASTRIDE; 306 u32 saveDSPASTRIDE;
300 u32 saveDSPASIZE; 307 u32 saveDSPASIZE;
@@ -303,8 +310,11 @@ typedef struct drm_i915_private {
303 u32 saveDSPASURF; 310 u32 saveDSPASURF;
304 u32 saveDSPATILEOFF; 311 u32 saveDSPATILEOFF;
305 u32 savePFIT_PGM_RATIOS; 312 u32 savePFIT_PGM_RATIOS;
313 u32 saveBLC_HIST_CTL;
306 u32 saveBLC_PWM_CTL; 314 u32 saveBLC_PWM_CTL;
307 u32 saveBLC_PWM_CTL2; 315 u32 saveBLC_PWM_CTL2;
316 u32 saveBLC_CPU_PWM_CTL;
317 u32 saveBLC_CPU_PWM_CTL2;
308 u32 saveFPB0; 318 u32 saveFPB0;
309 u32 saveFPB1; 319 u32 saveFPB1;
310 u32 saveDPLL_B; 320 u32 saveDPLL_B;
@@ -316,6 +326,12 @@ typedef struct drm_i915_private {
316 u32 saveVBLANK_B; 326 u32 saveVBLANK_B;
317 u32 saveVSYNC_B; 327 u32 saveVSYNC_B;
318 u32 saveBCLRPAT_B; 328 u32 saveBCLRPAT_B;
329 u32 saveTRANS_HTOTAL_B;
330 u32 saveTRANS_HBLANK_B;
331 u32 saveTRANS_HSYNC_B;
332 u32 saveTRANS_VTOTAL_B;
333 u32 saveTRANS_VBLANK_B;
334 u32 saveTRANS_VSYNC_B;
319 u32 savePIPEBSTAT; 335 u32 savePIPEBSTAT;
320 u32 saveDSPBSTRIDE; 336 u32 saveDSPBSTRIDE;
321 u32 saveDSPBSIZE; 337 u32 saveDSPBSIZE;
@@ -341,6 +357,7 @@ typedef struct drm_i915_private {
341 u32 savePFIT_CONTROL; 357 u32 savePFIT_CONTROL;
342 u32 save_palette_a[256]; 358 u32 save_palette_a[256];
343 u32 save_palette_b[256]; 359 u32 save_palette_b[256];
360 u32 saveDPFC_CB_BASE;
344 u32 saveFBC_CFB_BASE; 361 u32 saveFBC_CFB_BASE;
345 u32 saveFBC_LL_BASE; 362 u32 saveFBC_LL_BASE;
346 u32 saveFBC_CONTROL; 363 u32 saveFBC_CONTROL;
@@ -348,6 +365,12 @@ typedef struct drm_i915_private {
348 u32 saveIER; 365 u32 saveIER;
349 u32 saveIIR; 366 u32 saveIIR;
350 u32 saveIMR; 367 u32 saveIMR;
368 u32 saveDEIER;
369 u32 saveDEIMR;
370 u32 saveGTIER;
371 u32 saveGTIMR;
372 u32 saveFDI_RXA_IMR;
373 u32 saveFDI_RXB_IMR;
351 u32 saveCACHE_MODE_0; 374 u32 saveCACHE_MODE_0;
352 u32 saveD_STATE; 375 u32 saveD_STATE;
353 u32 saveDSPCLK_GATE_D; 376 u32 saveDSPCLK_GATE_D;
@@ -381,6 +404,16 @@ typedef struct drm_i915_private {
381 u32 savePIPEB_DP_LINK_M; 404 u32 savePIPEB_DP_LINK_M;
382 u32 savePIPEA_DP_LINK_N; 405 u32 savePIPEA_DP_LINK_N;
383 u32 savePIPEB_DP_LINK_N; 406 u32 savePIPEB_DP_LINK_N;
407 u32 saveFDI_RXA_CTL;
408 u32 saveFDI_TXA_CTL;
409 u32 saveFDI_RXB_CTL;
410 u32 saveFDI_TXB_CTL;
411 u32 savePFA_CTL_1;
412 u32 savePFB_CTL_1;
413 u32 savePFA_WIN_SZ;
414 u32 savePFB_WIN_SZ;
415 u32 savePFA_WIN_POS;
416 u32 savePFB_WIN_POS;
384 417
385 struct { 418 struct {
386 struct drm_mm gtt_space; 419 struct drm_mm gtt_space;
@@ -491,6 +524,8 @@ typedef struct drm_i915_private {
491 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 524 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
492 } mm; 525 } mm;
493 struct sdvo_device_mapping sdvo_mappings[2]; 526 struct sdvo_device_mapping sdvo_mappings[2];
527 /* indicate whether the LVDS_BORDER should be enabled or not */
528 unsigned int lvds_border_bits;
494 529
495 /* Reclocking support */ 530 /* Reclocking support */
496 bool render_reclock_avail; 531 bool render_reclock_avail;
@@ -665,6 +700,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
665extern int i915_irq_wait(struct drm_device *dev, void *data, 700extern int i915_irq_wait(struct drm_device *dev, void *data,
666 struct drm_file *file_priv); 701 struct drm_file *file_priv);
667void i915_user_irq_get(struct drm_device *dev); 702void i915_user_irq_get(struct drm_device *dev);
703void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
668void i915_user_irq_put(struct drm_device *dev); 704void i915_user_irq_put(struct drm_device *dev);
669extern void i915_enable_interrupt (struct drm_device *dev); 705extern void i915_enable_interrupt (struct drm_device *dev);
670 706
@@ -979,7 +1015,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
979 1015
980#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 1016#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
981#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1017#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
982#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) 1018#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
1019 (IS_I9XX(dev) || IS_GM45(dev)) && \
1020 !IS_IGD(dev) && \
1021 !IS_IGDNG(dev))
983 1022
984#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1023#define PRIMARY_RINGBUFFER_SIZE (128*1024)
985 1024
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40727d4c2919..abfc27b0c2ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1770 drm_i915_private_t *dev_priv = dev->dev_private; 1770 drm_i915_private_t *dev_priv = dev->dev_private;
1771 uint32_t seqno; 1771 uint32_t seqno;
1772 1772
1773 if (!dev_priv->hw_status_page) 1773 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
1774 return; 1774 return;
1775 1775
1776 seqno = i915_get_gem_seqno(dev); 1776 seqno = i915_get_gem_seqno(dev);
@@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
1794 } else 1794 } else
1795 break; 1795 break;
1796 } 1796 }
1797
1798 if (unlikely (dev_priv->trace_irq_seqno &&
1799 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1800 i915_user_irq_put(dev);
1801 dev_priv->trace_irq_seqno = 0;
1802 }
1797} 1803}
1798 1804
1799void 1805void
@@ -3352,7 +3358,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3352 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3358 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3353 exec_len = (uint32_t) exec->batch_len; 3359 exec_len = (uint32_t) exec->batch_len;
3354 3360
3355 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); 3361 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3356 3362
3357 count = nbox ? nbox : 1; 3363 count = nbox ? nbox : 1;
3358 3364
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4dfeec7cdd42..c3ceffa46ea0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
725 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 725 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
726} 726}
727 727
728void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
729{
730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
731
732 if (dev_priv->trace_irq_seqno == 0)
733 i915_user_irq_get(dev);
734
735 dev_priv->trace_irq_seqno = seqno;
736}
737
728static int i915_wait_irq(struct drm_device * dev, int irq_nr) 738static int i915_wait_irq(struct drm_device * dev, int irq_nr)
729{ 739{
730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 740 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0466ddbeba32..1687edf68795 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -968,6 +968,8 @@
968#define LVDS_PORT_EN (1 << 31) 968#define LVDS_PORT_EN (1 << 31)
969/* Selects pipe B for LVDS data. Must be set on pre-965. */ 969/* Selects pipe B for LVDS data. Must be set on pre-965. */
970#define LVDS_PIPEB_SELECT (1 << 30) 970#define LVDS_PIPEB_SELECT (1 << 30)
971/* Enable border for unscaled (or aspect-scaled) display */
972#define LVDS_BORDER_ENABLE (1 << 15)
971/* 973/*
972 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 974 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
973 * pixel. 975 * pixel.
@@ -1078,6 +1080,8 @@
1078#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 1080#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
1079#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 1081#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
1080 1082
1083#define BLC_HIST_CTL 0x61260
1084
1081/* TV port control */ 1085/* TV port control */
1082#define TV_CTL 0x68000 1086#define TV_CTL 0x68000
1083/** Enables the TV encoder */ 1087/** Enables the TV encoder */
@@ -1780,6 +1784,11 @@
1780#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 1784#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
1781#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 1785#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
1782#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 1786#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
1787#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
1788#define PIPE_8BPC (0 << 5)
1789#define PIPE_10BPC (1 << 5)
1790#define PIPE_6BPC (2 << 5)
1791#define PIPE_12BPC (3 << 5)
1783 1792
1784#define DSPARB 0x70030 1793#define DSPARB 0x70030
1785#define DSPARB_CSTART_MASK (0x7f << 7) 1794#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -1790,17 +1799,29 @@
1790#define DSPARB_AEND_SHIFT 0 1799#define DSPARB_AEND_SHIFT 0
1791 1800
1792#define DSPFW1 0x70034 1801#define DSPFW1 0x70034
1802#define DSPFW_SR_SHIFT 23
1803#define DSPFW_CURSORB_SHIFT 16
1804#define DSPFW_PLANEB_SHIFT 8
1793#define DSPFW2 0x70038 1805#define DSPFW2 0x70038
1806#define DSPFW_CURSORA_MASK 0x00003f00
1807#define DSPFW_CURSORA_SHIFT 16
1794#define DSPFW3 0x7003c 1808#define DSPFW3 0x7003c
1809#define DSPFW_HPLL_SR_EN (1<<31)
1810#define DSPFW_CURSOR_SR_SHIFT 24
1795#define IGD_SELF_REFRESH_EN (1<<30) 1811#define IGD_SELF_REFRESH_EN (1<<30)
1796 1812
1797/* FIFO watermark sizes etc */ 1813/* FIFO watermark sizes etc */
1814#define G4X_FIFO_LINE_SIZE 64
1798#define I915_FIFO_LINE_SIZE 64 1815#define I915_FIFO_LINE_SIZE 64
1799#define I830_FIFO_LINE_SIZE 32 1816#define I830_FIFO_LINE_SIZE 32
1817
1818#define G4X_FIFO_SIZE 127
1800#define I945_FIFO_SIZE 127 /* 945 & 965 */ 1819#define I945_FIFO_SIZE 127 /* 945 & 965 */
1801#define I915_FIFO_SIZE 95 1820#define I915_FIFO_SIZE 95
1802#define I855GM_FIFO_SIZE 127 /* In cachelines */ 1821#define I855GM_FIFO_SIZE 127 /* In cachelines */
1803#define I830_FIFO_SIZE 95 1822#define I830_FIFO_SIZE 95
1823
1824#define G4X_MAX_WM 0x3f
1804#define I915_MAX_WM 0x3f 1825#define I915_MAX_WM 0x3f
1805 1826
1806#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ 1827#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
@@ -2030,6 +2051,11 @@
2030#define PFA_CTL_1 0x68080 2051#define PFA_CTL_1 0x68080
2031#define PFB_CTL_1 0x68880 2052#define PFB_CTL_1 0x68880
2032#define PF_ENABLE (1<<31) 2053#define PF_ENABLE (1<<31)
2054#define PF_FILTER_MASK (3<<23)
2055#define PF_FILTER_PROGRAMMED (0<<23)
2056#define PF_FILTER_MED_3x3 (1<<23)
2057#define PF_FILTER_EDGE_ENHANCE (2<<23)
2058#define PF_FILTER_EDGE_SOFTEN (3<<23)
2033#define PFA_WIN_SZ 0x68074 2059#define PFA_WIN_SZ 0x68074
2034#define PFB_WIN_SZ 0x68874 2060#define PFB_WIN_SZ 0x68874
2035#define PFA_WIN_POS 0x68070 2061#define PFA_WIN_POS 0x68070
@@ -2149,11 +2175,11 @@
2149#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) 2175#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
2150#define DREF_SSC_SOURCE_DISABLE (0<<11) 2176#define DREF_SSC_SOURCE_DISABLE (0<<11)
2151#define DREF_SSC_SOURCE_ENABLE (2<<11) 2177#define DREF_SSC_SOURCE_ENABLE (2<<11)
2152#define DREF_SSC_SOURCE_MASK (2<<11) 2178#define DREF_SSC_SOURCE_MASK (3<<11)
2153#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) 2179#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
2154#define DREF_NONSPREAD_CK505_ENABLE (1<<9) 2180#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
2155#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) 2181#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
2156#define DREF_NONSPREAD_SOURCE_MASK (2<<9) 2182#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
2157#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) 2183#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
2158#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) 2184#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
2159#define DREF_SSC4_DOWNSPREAD (0<<6) 2185#define DREF_SSC4_DOWNSPREAD (0<<6)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index bd6d8d91ca9f..992d5617e798 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -32,11 +32,15 @@
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg;
35 36
36 if (pipe == PIPE_A) 37 if (IS_IGDNG(dev)) {
37 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
38 else 39 } else {
39 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
41 }
42
43 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
40} 44}
41 45
42static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 46static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
@@ -49,6 +53,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
49 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
50 return; 54 return;
51 55
56 if (IS_IGDNG(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58
52 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
53 array = dev_priv->save_palette_a; 60 array = dev_priv->save_palette_a;
54 else 61 else
@@ -68,6 +75,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
68 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
69 return; 76 return;
70 77
78 if (IS_IGDNG(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80
71 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
72 array = dev_priv->save_palette_a; 82 array = dev_priv->save_palette_a;
73 else 83 else
@@ -232,10 +242,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
232 /* Pipe & plane A info */ 242 /* Pipe & plane A info */
233 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 243 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
234 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 244 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
235 dev_priv->saveFPA0 = I915_READ(FPA0); 245 if (IS_IGDNG(dev)) {
236 dev_priv->saveFPA1 = I915_READ(FPA1); 246 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
237 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 247 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
238 if (IS_I965G(dev)) 248 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
249 } else {
250 dev_priv->saveFPA0 = I915_READ(FPA0);
251 dev_priv->saveFPA1 = I915_READ(FPA1);
252 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
253 }
254 if (IS_I965G(dev) && !IS_IGDNG(dev))
239 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 255 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
240 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 256 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
241 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 257 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -243,7 +259,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
243 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 259 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
244 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 260 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
245 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 261 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
246 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 262 if (!IS_IGDNG(dev))
263 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
264
265 if (IS_IGDNG(dev)) {
266 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
267 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
268
269 dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
270 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
271 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
272
273 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
274 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
275 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
276 dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
277 dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
278 dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
279 }
247 280
248 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); 281 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
249 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); 282 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
@@ -260,10 +293,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
260 /* Pipe & plane B info */ 293 /* Pipe & plane B info */
261 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 294 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
262 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 295 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
263 dev_priv->saveFPB0 = I915_READ(FPB0); 296 if (IS_IGDNG(dev)) {
264 dev_priv->saveFPB1 = I915_READ(FPB1); 297 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
265 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 298 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
266 if (IS_I965G(dev)) 299 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
300 } else {
301 dev_priv->saveFPB0 = I915_READ(FPB0);
302 dev_priv->saveFPB1 = I915_READ(FPB1);
303 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
304 }
305 if (IS_I965G(dev) && !IS_IGDNG(dev))
267 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 306 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
268 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 307 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
269 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 308 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -271,7 +310,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
271 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 310 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
272 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 311 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
273 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 312 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
274 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 313 if (!IS_IGDNG(dev))
314 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
315
316 if (IS_IGDNG(dev)) {
317 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
318 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
319
320 dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
321 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
322 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
323
324 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
325 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
326 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
327 dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
328 dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
329 dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
330 }
275 331
276 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); 332 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
277 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); 333 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
@@ -290,23 +346,41 @@ static void i915_save_modeset_reg(struct drm_device *dev)
290static void i915_restore_modeset_reg(struct drm_device *dev) 346static void i915_restore_modeset_reg(struct drm_device *dev)
291{ 347{
292 struct drm_i915_private *dev_priv = dev->dev_private; 348 struct drm_i915_private *dev_priv = dev->dev_private;
349 int dpll_a_reg, fpa0_reg, fpa1_reg;
350 int dpll_b_reg, fpb0_reg, fpb1_reg;
293 351
294 if (drm_core_check_feature(dev, DRIVER_MODESET)) 352 if (drm_core_check_feature(dev, DRIVER_MODESET))
295 return; 353 return;
296 354
355 if (IS_IGDNG(dev)) {
356 dpll_a_reg = PCH_DPLL_A;
357 dpll_b_reg = PCH_DPLL_B;
358 fpa0_reg = PCH_FPA0;
359 fpb0_reg = PCH_FPB0;
360 fpa1_reg = PCH_FPA1;
361 fpb1_reg = PCH_FPB1;
362 } else {
363 dpll_a_reg = DPLL_A;
364 dpll_b_reg = DPLL_B;
365 fpa0_reg = FPA0;
366 fpb0_reg = FPB0;
367 fpa1_reg = FPA1;
368 fpb1_reg = FPB1;
369 }
370
297 /* Pipe & plane A info */ 371 /* Pipe & plane A info */
298 /* Prime the clock */ 372 /* Prime the clock */
299 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 373 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
300 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & 374 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
301 ~DPLL_VCO_ENABLE); 375 ~DPLL_VCO_ENABLE);
302 DRM_UDELAY(150); 376 DRM_UDELAY(150);
303 } 377 }
304 I915_WRITE(FPA0, dev_priv->saveFPA0); 378 I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
305 I915_WRITE(FPA1, dev_priv->saveFPA1); 379 I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
306 /* Actually enable it */ 380 /* Actually enable it */
307 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); 381 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
308 DRM_UDELAY(150); 382 DRM_UDELAY(150);
309 if (IS_I965G(dev)) 383 if (IS_I965G(dev) && !IS_IGDNG(dev))
310 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 384 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
311 DRM_UDELAY(150); 385 DRM_UDELAY(150);
312 386
@@ -317,7 +391,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
317 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 391 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
318 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 392 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
319 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 393 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
320 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 394 if (!IS_IGDNG(dev))
395 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
396
397 if (IS_IGDNG(dev)) {
398 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
399 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
400
401 I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
402 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
403 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
404
405 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
406 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
407 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
408 I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
409 I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
410 I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
411 }
321 412
322 /* Restore plane info */ 413 /* Restore plane info */
323 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); 414 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
@@ -339,14 +430,14 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
339 430
340 /* Pipe & plane B info */ 431 /* Pipe & plane B info */
341 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 432 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
342 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & 433 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
343 ~DPLL_VCO_ENABLE); 434 ~DPLL_VCO_ENABLE);
344 DRM_UDELAY(150); 435 DRM_UDELAY(150);
345 } 436 }
346 I915_WRITE(FPB0, dev_priv->saveFPB0); 437 I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
347 I915_WRITE(FPB1, dev_priv->saveFPB1); 438 I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
348 /* Actually enable it */ 439 /* Actually enable it */
349 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); 440 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
350 DRM_UDELAY(150); 441 DRM_UDELAY(150);
351 if (IS_I965G(dev)) 442 if (IS_I965G(dev))
352 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 443 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
@@ -359,7 +450,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
359 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 450 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
360 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 451 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
361 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 452 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
362 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 453 if (!IS_IGDNG(dev))
454 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
455
456 if (IS_IGDNG(dev)) {
457 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
458 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
459
460 I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
461 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
462 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
463
464 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
465 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
466 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
467 I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
468 I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
469 I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
470 }
363 471
364 /* Restore plane info */ 472 /* Restore plane info */
365 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); 473 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
@@ -404,21 +512,43 @@ void i915_save_display(struct drm_device *dev)
404 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 512 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
405 513
406 /* CRT state */ 514 /* CRT state */
407 dev_priv->saveADPA = I915_READ(ADPA); 515 if (IS_IGDNG(dev)) {
516 dev_priv->saveADPA = I915_READ(PCH_ADPA);
517 } else {
518 dev_priv->saveADPA = I915_READ(ADPA);
519 }
408 520
409 /* LVDS state */ 521 /* LVDS state */
410 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 522 if (IS_IGDNG(dev)) {
411 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 523 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
412 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 524 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
413 if (IS_I965G(dev)) 525 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
414 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 526 dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
415 if (IS_MOBILE(dev) && !IS_I830(dev)) 527 dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
416 dev_priv->saveLVDS = I915_READ(LVDS); 528 dev_priv->saveLVDS = I915_READ(PCH_LVDS);
417 if (!IS_I830(dev) && !IS_845G(dev)) 529 } else {
530 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
531 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
532 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
533 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
534 if (IS_I965G(dev))
535 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
536 if (IS_MOBILE(dev) && !IS_I830(dev))
537 dev_priv->saveLVDS = I915_READ(LVDS);
538 }
539
540 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
418 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 541 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
419 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 542
420 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 543 if (IS_IGDNG(dev)) {
421 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 544 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
545 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
546 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
547 } else {
548 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
549 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
550 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
551 }
422 552
423 /* Display Port state */ 553 /* Display Port state */
424 if (SUPPORTS_INTEGRATED_DP(dev)) { 554 if (SUPPORTS_INTEGRATED_DP(dev)) {
@@ -437,16 +567,23 @@ void i915_save_display(struct drm_device *dev)
437 /* FIXME: save TV & SDVO state */ 567 /* FIXME: save TV & SDVO state */
438 568
439 /* FBC state */ 569 /* FBC state */
440 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 570 if (IS_GM45(dev)) {
441 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 571 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
442 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 572 } else {
443 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 573 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
574 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
575 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
576 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
577 }
444 578
445 /* VGA state */ 579 /* VGA state */
446 dev_priv->saveVGA0 = I915_READ(VGA0); 580 dev_priv->saveVGA0 = I915_READ(VGA0);
447 dev_priv->saveVGA1 = I915_READ(VGA1); 581 dev_priv->saveVGA1 = I915_READ(VGA1);
448 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 582 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
449 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 583 if (IS_IGDNG(dev))
584 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
585 else
586 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
450 587
451 i915_save_vga(dev); 588 i915_save_vga(dev);
452} 589}
@@ -485,22 +622,41 @@ void i915_restore_display(struct drm_device *dev)
485 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 622 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
486 623
487 /* CRT state */ 624 /* CRT state */
488 I915_WRITE(ADPA, dev_priv->saveADPA); 625 if (IS_IGDNG(dev))
626 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
627 else
628 I915_WRITE(ADPA, dev_priv->saveADPA);
489 629
490 /* LVDS state */ 630 /* LVDS state */
491 if (IS_I965G(dev)) 631 if (IS_I965G(dev) && !IS_IGDNG(dev))
492 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 632 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
493 if (IS_MOBILE(dev) && !IS_I830(dev)) 633
634 if (IS_IGDNG(dev)) {
635 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
636 } else if (IS_MOBILE(dev) && !IS_I830(dev))
494 I915_WRITE(LVDS, dev_priv->saveLVDS); 637 I915_WRITE(LVDS, dev_priv->saveLVDS);
495 if (!IS_I830(dev) && !IS_845G(dev)) 638
639 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
496 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 640 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
497 641
498 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 642 if (IS_IGDNG(dev)) {
499 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 643 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
500 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 644 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
501 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 645 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
502 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 646 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
503 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 647 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
648 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
649 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
650 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
651 } else {
652 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
653 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
654 I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
655 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
656 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
657 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
658 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
659 }
504 660
505 /* Display Port state */ 661 /* Display Port state */
506 if (SUPPORTS_INTEGRATED_DP(dev)) { 662 if (SUPPORTS_INTEGRATED_DP(dev)) {
@@ -511,13 +667,22 @@ void i915_restore_display(struct drm_device *dev)
511 /* FIXME: restore TV & SDVO state */ 667 /* FIXME: restore TV & SDVO state */
512 668
513 /* FBC info */ 669 /* FBC info */
514 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 670 if (IS_GM45(dev)) {
515 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 671 g4x_disable_fbc(dev);
516 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 672 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
517 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 673 } else {
674 i8xx_disable_fbc(dev);
675 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
676 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
677 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
678 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
679 }
518 680
519 /* VGA state */ 681 /* VGA state */
520 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 682 if (IS_IGDNG(dev))
683 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
684 else
685 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
521 I915_WRITE(VGA0, dev_priv->saveVGA0); 686 I915_WRITE(VGA0, dev_priv->saveVGA0);
522 I915_WRITE(VGA1, dev_priv->saveVGA1); 687 I915_WRITE(VGA1, dev_priv->saveVGA1);
523 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 688 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -543,8 +708,17 @@ int i915_save_state(struct drm_device *dev)
543 i915_save_display(dev); 708 i915_save_display(dev);
544 709
545 /* Interrupt state */ 710 /* Interrupt state */
546 dev_priv->saveIER = I915_READ(IER); 711 if (IS_IGDNG(dev)) {
547 dev_priv->saveIMR = I915_READ(IMR); 712 dev_priv->saveDEIER = I915_READ(DEIER);
713 dev_priv->saveDEIMR = I915_READ(DEIMR);
714 dev_priv->saveGTIER = I915_READ(GTIER);
715 dev_priv->saveGTIMR = I915_READ(GTIMR);
716 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
717 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
718 } else {
719 dev_priv->saveIER = I915_READ(IER);
720 dev_priv->saveIMR = I915_READ(IMR);
721 }
548 722
549 /* Clock gating state */ 723 /* Clock gating state */
550 dev_priv->saveD_STATE = I915_READ(D_STATE); 724 dev_priv->saveD_STATE = I915_READ(D_STATE);
@@ -609,8 +783,17 @@ int i915_restore_state(struct drm_device *dev)
609 i915_restore_display(dev); 783 i915_restore_display(dev);
610 784
611 /* Interrupt state */ 785 /* Interrupt state */
612 I915_WRITE (IER, dev_priv->saveIER); 786 if (IS_IGDNG(dev)) {
613 I915_WRITE (IMR, dev_priv->saveIMR); 787 I915_WRITE(DEIER, dev_priv->saveDEIER);
788 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
789 I915_WRITE(GTIER, dev_priv->saveGTIER);
790 I915_WRITE(GTIMR, dev_priv->saveGTIMR);
791 I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
792 I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
793 } else {
794 I915_WRITE (IER, dev_priv->saveIER);
795 I915_WRITE (IMR, dev_priv->saveIMR);
796 }
614 797
615 /* Clock gating state */ 798 /* Clock gating state */
616 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 799 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5567a40816f3..01840d9bc38f 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -158,16 +158,17 @@ TRACE_EVENT(i915_gem_request_submit,
158 TP_ARGS(dev, seqno), 158 TP_ARGS(dev, seqno),
159 159
160 TP_STRUCT__entry( 160 TP_STRUCT__entry(
161 __field(struct drm_device *, dev) 161 __field(u32, dev)
162 __field(u32, seqno) 162 __field(u32, seqno)
163 ), 163 ),
164 164
165 TP_fast_assign( 165 TP_fast_assign(
166 __entry->dev = dev; 166 __entry->dev = dev->primary->index;
167 __entry->seqno = seqno; 167 __entry->seqno = seqno;
168 i915_trace_irq_get(dev, seqno);
168 ), 169 ),
169 170
170 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 171 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
171); 172);
172 173
173TRACE_EVENT(i915_gem_request_flush, 174TRACE_EVENT(i915_gem_request_flush,
@@ -178,20 +179,20 @@ TRACE_EVENT(i915_gem_request_flush,
178 TP_ARGS(dev, seqno, flush_domains, invalidate_domains), 179 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
179 180
180 TP_STRUCT__entry( 181 TP_STRUCT__entry(
181 __field(struct drm_device *, dev) 182 __field(u32, dev)
182 __field(u32, seqno) 183 __field(u32, seqno)
183 __field(u32, flush_domains) 184 __field(u32, flush_domains)
184 __field(u32, invalidate_domains) 185 __field(u32, invalidate_domains)
185 ), 186 ),
186 187
187 TP_fast_assign( 188 TP_fast_assign(
188 __entry->dev = dev; 189 __entry->dev = dev->primary->index;
189 __entry->seqno = seqno; 190 __entry->seqno = seqno;
190 __entry->flush_domains = flush_domains; 191 __entry->flush_domains = flush_domains;
191 __entry->invalidate_domains = invalidate_domains; 192 __entry->invalidate_domains = invalidate_domains;
192 ), 193 ),
193 194
194 TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", 195 TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
195 __entry->dev, __entry->seqno, 196 __entry->dev, __entry->seqno,
196 __entry->flush_domains, __entry->invalidate_domains) 197 __entry->flush_domains, __entry->invalidate_domains)
197); 198);
@@ -204,16 +205,16 @@ TRACE_EVENT(i915_gem_request_complete,
204 TP_ARGS(dev, seqno), 205 TP_ARGS(dev, seqno),
205 206
206 TP_STRUCT__entry( 207 TP_STRUCT__entry(
207 __field(struct drm_device *, dev) 208 __field(u32, dev)
208 __field(u32, seqno) 209 __field(u32, seqno)
209 ), 210 ),
210 211
211 TP_fast_assign( 212 TP_fast_assign(
212 __entry->dev = dev; 213 __entry->dev = dev->primary->index;
213 __entry->seqno = seqno; 214 __entry->seqno = seqno;
214 ), 215 ),
215 216
216 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 217 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
217); 218);
218 219
219TRACE_EVENT(i915_gem_request_retire, 220TRACE_EVENT(i915_gem_request_retire,
@@ -223,16 +224,16 @@ TRACE_EVENT(i915_gem_request_retire,
223 TP_ARGS(dev, seqno), 224 TP_ARGS(dev, seqno),
224 225
225 TP_STRUCT__entry( 226 TP_STRUCT__entry(
226 __field(struct drm_device *, dev) 227 __field(u32, dev)
227 __field(u32, seqno) 228 __field(u32, seqno)
228 ), 229 ),
229 230
230 TP_fast_assign( 231 TP_fast_assign(
231 __entry->dev = dev; 232 __entry->dev = dev->primary->index;
232 __entry->seqno = seqno; 233 __entry->seqno = seqno;
233 ), 234 ),
234 235
235 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 236 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
236); 237);
237 238
238TRACE_EVENT(i915_gem_request_wait_begin, 239TRACE_EVENT(i915_gem_request_wait_begin,
@@ -242,16 +243,16 @@ TRACE_EVENT(i915_gem_request_wait_begin,
242 TP_ARGS(dev, seqno), 243 TP_ARGS(dev, seqno),
243 244
244 TP_STRUCT__entry( 245 TP_STRUCT__entry(
245 __field(struct drm_device *, dev) 246 __field(u32, dev)
246 __field(u32, seqno) 247 __field(u32, seqno)
247 ), 248 ),
248 249
249 TP_fast_assign( 250 TP_fast_assign(
250 __entry->dev = dev; 251 __entry->dev = dev->primary->index;
251 __entry->seqno = seqno; 252 __entry->seqno = seqno;
252 ), 253 ),
253 254
254 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 255 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
255); 256);
256 257
257TRACE_EVENT(i915_gem_request_wait_end, 258TRACE_EVENT(i915_gem_request_wait_end,
@@ -261,16 +262,16 @@ TRACE_EVENT(i915_gem_request_wait_end,
261 TP_ARGS(dev, seqno), 262 TP_ARGS(dev, seqno),
262 263
263 TP_STRUCT__entry( 264 TP_STRUCT__entry(
264 __field(struct drm_device *, dev) 265 __field(u32, dev)
265 __field(u32, seqno) 266 __field(u32, seqno)
266 ), 267 ),
267 268
268 TP_fast_assign( 269 TP_fast_assign(
269 __entry->dev = dev; 270 __entry->dev = dev->primary->index;
270 __entry->seqno = seqno; 271 __entry->seqno = seqno;
271 ), 272 ),
272 273
273 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 274 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
274); 275);
275 276
276TRACE_EVENT(i915_ring_wait_begin, 277TRACE_EVENT(i915_ring_wait_begin,
@@ -280,14 +281,14 @@ TRACE_EVENT(i915_ring_wait_begin,
280 TP_ARGS(dev), 281 TP_ARGS(dev),
281 282
282 TP_STRUCT__entry( 283 TP_STRUCT__entry(
283 __field(struct drm_device *, dev) 284 __field(u32, dev)
284 ), 285 ),
285 286
286 TP_fast_assign( 287 TP_fast_assign(
287 __entry->dev = dev; 288 __entry->dev = dev->primary->index;
288 ), 289 ),
289 290
290 TP_printk("dev=%p", __entry->dev) 291 TP_printk("dev=%u", __entry->dev)
291); 292);
292 293
293TRACE_EVENT(i915_ring_wait_end, 294TRACE_EVENT(i915_ring_wait_end,
@@ -297,14 +298,14 @@ TRACE_EVENT(i915_ring_wait_end,
297 TP_ARGS(dev), 298 TP_ARGS(dev),
298 299
299 TP_STRUCT__entry( 300 TP_STRUCT__entry(
300 __field(struct drm_device *, dev) 301 __field(u32, dev)
301 ), 302 ),
302 303
303 TP_fast_assign( 304 TP_fast_assign(
304 __entry->dev = dev; 305 __entry->dev = dev->primary->index;
305 ), 306 ),
306 307
307 TP_printk("dev=%p", __entry->dev) 308 TP_printk("dev=%u", __entry->dev)
308); 309);
309 310
310#endif /* _I915_TRACE_H_ */ 311#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4337414846b6..96cd256e60e6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,20 +351,18 @@ parse_driver_features(struct drm_i915_private *dev_priv,
351 struct drm_device *dev = dev_priv->dev; 351 struct drm_device *dev = dev_priv->dev;
352 struct bdb_driver_features *driver; 352 struct bdb_driver_features *driver;
353 353
354 /* set default for chips without eDP */
355 if (!SUPPORTS_EDP(dev)) {
356 dev_priv->edp_support = 0;
357 return;
358 }
359
360 driver = find_section(bdb, BDB_DRIVER_FEATURES); 354 driver = find_section(bdb, BDB_DRIVER_FEATURES);
361 if (!driver) 355 if (!driver)
362 return; 356 return;
363 357
364 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) 358 if (driver && SUPPORTS_EDP(dev) &&
359 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
365 dev_priv->edp_support = 1; 360 dev_priv->edp_support = 1;
361 } else {
362 dev_priv->edp_support = 0;
363 }
366 364
367 if (driver->dual_frequency) 365 if (driver && driver->dual_frequency)
368 dev_priv->render_reclock_avail = true; 366 dev_priv->render_reclock_avail = true;
369} 367}
370 368
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 93ff6c03733e..3ba6546b7c7f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -943,6 +943,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
943 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 943 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
944 clock.p = (clock.p1 * clock.p2); 944 clock.p = (clock.p1 * clock.p2);
945 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 945 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
946 clock.vco = 0;
946 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 947 memcpy(best_clock, &clock, sizeof(intel_clock_t));
947 return true; 948 return true;
948} 949}
@@ -1260,9 +1261,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1260 return ret; 1261 return ret;
1261 } 1262 }
1262 1263
1263 /* Pre-i965 needs to install a fence for tiled scan-out */ 1264 /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
1264 if (!IS_I965G(dev) && 1265 * whereas 965+ only requires a fence if using framebuffer compression.
1265 obj_priv->fence_reg == I915_FENCE_REG_NONE && 1266 * For simplicity, we always install a fence as the cost is not that onerous.
1267 */
1268 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1266 obj_priv->tiling_mode != I915_TILING_NONE) { 1269 obj_priv->tiling_mode != I915_TILING_NONE) {
1267 ret = i915_gem_object_get_fence_reg(obj); 1270 ret = i915_gem_object_get_fence_reg(obj);
1268 if (ret != 0) { 1271 if (ret != 0) {
@@ -1513,7 +1516,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1513 /* Enable panel fitting for LVDS */ 1516 /* Enable panel fitting for LVDS */
1514 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1517 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1515 temp = I915_READ(pf_ctl_reg); 1518 temp = I915_READ(pf_ctl_reg);
1516 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); 1519 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
1517 1520
1518 /* currently full aspect */ 1521 /* currently full aspect */
1519 I915_WRITE(pf_win_pos, 0); 1522 I915_WRITE(pf_win_pos, 0);
@@ -1801,6 +1804,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1801 case DRM_MODE_DPMS_ON: 1804 case DRM_MODE_DPMS_ON:
1802 case DRM_MODE_DPMS_STANDBY: 1805 case DRM_MODE_DPMS_STANDBY:
1803 case DRM_MODE_DPMS_SUSPEND: 1806 case DRM_MODE_DPMS_SUSPEND:
1807 intel_update_watermarks(dev);
1808
1804 /* Enable the DPLL */ 1809 /* Enable the DPLL */
1805 temp = I915_READ(dpll_reg); 1810 temp = I915_READ(dpll_reg);
1806 if ((temp & DPLL_VCO_ENABLE) == 0) { 1811 if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -1838,7 +1843,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1838 1843
1839 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1844 /* Give the overlay scaler a chance to enable if it's on this pipe */
1840 //intel_crtc_dpms_video(crtc, true); TODO 1845 //intel_crtc_dpms_video(crtc, true); TODO
1841 intel_update_watermarks(dev);
1842 break; 1846 break;
1843 case DRM_MODE_DPMS_OFF: 1847 case DRM_MODE_DPMS_OFF:
1844 intel_update_watermarks(dev); 1848 intel_update_watermarks(dev);
@@ -2082,7 +2086,7 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2082#define LINK_N 0x80000 2086#define LINK_N 0x80000
2083 2087
2084static void 2088static void
2085igdng_compute_m_n(int bytes_per_pixel, int nlanes, 2089igdng_compute_m_n(int bits_per_pixel, int nlanes,
2086 int pixel_clock, int link_clock, 2090 int pixel_clock, int link_clock,
2087 struct fdi_m_n *m_n) 2091 struct fdi_m_n *m_n)
2088{ 2092{
@@ -2092,7 +2096,8 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
2092 2096
2093 temp = (u64) DATA_N * pixel_clock; 2097 temp = (u64) DATA_N * pixel_clock;
2094 temp = div_u64(temp, link_clock); 2098 temp = div_u64(temp, link_clock);
2095 m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); 2099 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
2100 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
2096 m_n->gmch_n = DATA_N; 2101 m_n->gmch_n = DATA_N;
2097 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 2102 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
2098 2103
@@ -2140,6 +2145,13 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = {
2140 IGD_CURSOR_GUARD_WM, 2145 IGD_CURSOR_GUARD_WM,
2141 IGD_FIFO_LINE_SIZE 2146 IGD_FIFO_LINE_SIZE
2142}; 2147};
2148static struct intel_watermark_params g4x_wm_info = {
2149 G4X_FIFO_SIZE,
2150 G4X_MAX_WM,
2151 G4X_MAX_WM,
2152 2,
2153 G4X_FIFO_LINE_SIZE,
2154};
2143static struct intel_watermark_params i945_wm_info = { 2155static struct intel_watermark_params i945_wm_info = {
2144 I945_FIFO_SIZE, 2156 I945_FIFO_SIZE,
2145 I915_MAX_WM, 2157 I915_MAX_WM,
@@ -2430,17 +2442,74 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2430 return size; 2442 return size;
2431} 2443}
2432 2444
2433static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, 2445static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2434 int unused3, int unused4) 2446 int planeb_clock, int sr_hdisplay, int pixel_size)
2435{ 2447{
2436 struct drm_i915_private *dev_priv = dev->dev_private; 2448 struct drm_i915_private *dev_priv = dev->dev_private;
2437 u32 fw_blc_self = I915_READ(FW_BLC_SELF); 2449 int total_size, cacheline_size;
2450 int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
2451 struct intel_watermark_params planea_params, planeb_params;
2452 unsigned long line_time_us;
2453 int sr_clock, sr_entries = 0, entries_required;
2438 2454
2439 if (i915_powersave) 2455 /* Create copies of the base settings for each pipe */
2440 fw_blc_self |= FW_BLC_SELF_EN; 2456 planea_params = planeb_params = g4x_wm_info;
2441 else 2457
2442 fw_blc_self &= ~FW_BLC_SELF_EN; 2458 /* Grab a couple of global values before we overwrite them */
2443 I915_WRITE(FW_BLC_SELF, fw_blc_self); 2459 total_size = planea_params.fifo_size;
2460 cacheline_size = planea_params.cacheline_size;
2461
2462 /*
2463 * Note: we need to make sure we don't overflow for various clock &
2464 * latency values.
2465 * clocks go from a few thousand to several hundred thousand.
2466 * latency is usually a few thousand
2467 */
2468 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
2469 1000;
2470 entries_required /= G4X_FIFO_LINE_SIZE;
2471 planea_wm = entries_required + planea_params.guard_size;
2472
2473 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
2474 1000;
2475 entries_required /= G4X_FIFO_LINE_SIZE;
2476 planeb_wm = entries_required + planeb_params.guard_size;
2477
2478 cursora_wm = cursorb_wm = 16;
2479 cursor_sr = 32;
2480
2481 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2482
2483 /* Calc sr entries for one plane configs */
2484 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2485 /* self-refresh has much higher latency */
2486 const static int sr_latency_ns = 12000;
2487
2488 sr_clock = planea_clock ? planea_clock : planeb_clock;
2489 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2490
2491 /* Use ns/us then divide to preserve precision */
2492 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2493 pixel_size * sr_hdisplay) / 1000;
2494 sr_entries = roundup(sr_entries / cacheline_size, 1);
2495 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2496 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2497 }
2498
2499 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
2500 planea_wm, planeb_wm, sr_entries);
2501
2502 planea_wm &= 0x3f;
2503 planeb_wm &= 0x3f;
2504
2505 I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
2506 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
2507 (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
2508 I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
2509 (cursora_wm << DSPFW_CURSORA_SHIFT));
2510 /* HPLL off in SR has some issues on G4x... disable it */
2511 I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
2512 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2444} 2513}
2445 2514
2446static void i965_update_wm(struct drm_device *dev, int unused, int unused2, 2515static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
@@ -2586,6 +2655,9 @@ static void intel_update_watermarks(struct drm_device *dev)
2586 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 2655 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
2587 int enabled = 0, pixel_size = 0; 2656 int enabled = 0, pixel_size = 0;
2588 2657
2658 if (!dev_priv->display.update_wm)
2659 return;
2660
2589 /* Get the clock config from both planes */ 2661 /* Get the clock config from both planes */
2590 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2662 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2591 intel_crtc = to_intel_crtc(crtc); 2663 intel_crtc = to_intel_crtc(crtc);
@@ -2763,7 +2835,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2763 2835
2764 /* FDI link */ 2836 /* FDI link */
2765 if (IS_IGDNG(dev)) { 2837 if (IS_IGDNG(dev)) {
2766 int lane, link_bw; 2838 int lane, link_bw, bpp;
2767 /* eDP doesn't require FDI link, so just set DP M/N 2839 /* eDP doesn't require FDI link, so just set DP M/N
2768 according to current link config */ 2840 according to current link config */
2769 if (is_edp) { 2841 if (is_edp) {
@@ -2782,10 +2854,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2782 lane = 4; 2854 lane = 4;
2783 link_bw = 270000; 2855 link_bw = 270000;
2784 } 2856 }
2785 igdng_compute_m_n(3, lane, target_clock, 2857
2858 /* determine panel color depth */
2859 temp = I915_READ(pipeconf_reg);
2860
2861 switch (temp & PIPE_BPC_MASK) {
2862 case PIPE_8BPC:
2863 bpp = 24;
2864 break;
2865 case PIPE_10BPC:
2866 bpp = 30;
2867 break;
2868 case PIPE_6BPC:
2869 bpp = 18;
2870 break;
2871 case PIPE_12BPC:
2872 bpp = 36;
2873 break;
2874 default:
2875 DRM_ERROR("unknown pipe bpc value\n");
2876 bpp = 24;
2877 }
2878
2879 igdng_compute_m_n(bpp, lane, target_clock,
2786 link_bw, &m_n); 2880 link_bw, &m_n);
2787 } 2881 }
2788 2882
2883 /* Ironlake: try to setup display ref clock before DPLL
2884 * enabling. This is only under driver's control after
2885 * PCH B stepping, previous chipset stepping should be
2886 * ignoring this setting.
2887 */
2888 if (IS_IGDNG(dev)) {
2889 temp = I915_READ(PCH_DREF_CONTROL);
2890 /* Always enable nonspread source */
2891 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
2892 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
2893 I915_WRITE(PCH_DREF_CONTROL, temp);
2894 POSTING_READ(PCH_DREF_CONTROL);
2895
2896 temp &= ~DREF_SSC_SOURCE_MASK;
2897 temp |= DREF_SSC_SOURCE_ENABLE;
2898 I915_WRITE(PCH_DREF_CONTROL, temp);
2899 POSTING_READ(PCH_DREF_CONTROL);
2900
2901 udelay(200);
2902
2903 if (is_edp) {
2904 if (dev_priv->lvds_use_ssc) {
2905 temp |= DREF_SSC1_ENABLE;
2906 I915_WRITE(PCH_DREF_CONTROL, temp);
2907 POSTING_READ(PCH_DREF_CONTROL);
2908
2909 udelay(200);
2910
2911 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2912 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2913 I915_WRITE(PCH_DREF_CONTROL, temp);
2914 POSTING_READ(PCH_DREF_CONTROL);
2915 } else {
2916 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2917 I915_WRITE(PCH_DREF_CONTROL, temp);
2918 POSTING_READ(PCH_DREF_CONTROL);
2919 }
2920 }
2921 }
2922
2789 if (IS_IGD(dev)) { 2923 if (IS_IGD(dev)) {
2790 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 2924 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2791 if (has_reduced_clock) 2925 if (has_reduced_clock)
@@ -2936,6 +3070,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2936 3070
2937 lvds = I915_READ(lvds_reg); 3071 lvds = I915_READ(lvds_reg);
2938 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 3072 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
3073 /* set the corresponsding LVDS_BORDER bit */
3074 lvds |= dev_priv->lvds_border_bits;
2939 /* Set the B0-B3 data pairs corresponding to whether we're going to 3075 /* Set the B0-B3 data pairs corresponding to whether we're going to
2940 * set the DPLLs for dual-channel mode or not. 3076 * set the DPLLs for dual-channel mode or not.
2941 */ 3077 */
@@ -3095,7 +3231,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3095 struct drm_gem_object *bo; 3231 struct drm_gem_object *bo;
3096 struct drm_i915_gem_object *obj_priv; 3232 struct drm_i915_gem_object *obj_priv;
3097 int pipe = intel_crtc->pipe; 3233 int pipe = intel_crtc->pipe;
3098 int plane = intel_crtc->plane;
3099 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 3234 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
3100 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 3235 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
3101 uint32_t temp = I915_READ(control); 3236 uint32_t temp = I915_READ(control);
@@ -3182,9 +3317,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3182 drm_gem_object_unreference(intel_crtc->cursor_bo); 3317 drm_gem_object_unreference(intel_crtc->cursor_bo);
3183 } 3318 }
3184 3319
3185 if ((IS_I965G(dev) || plane == 0))
3186 intel_update_fbc(crtc, &crtc->mode);
3187
3188 mutex_unlock(&dev->struct_mutex); 3320 mutex_unlock(&dev->struct_mutex);
3189 3321
3190 intel_crtc->cursor_addr = addr; 3322 intel_crtc->cursor_addr = addr;
@@ -3244,6 +3376,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
3244 intel_crtc->lut_b[regno] = blue >> 8; 3376 intel_crtc->lut_b[regno] = blue >> 8;
3245} 3377}
3246 3378
3379void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
3380 u16 *blue, int regno)
3381{
3382 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3383
3384 *red = intel_crtc->lut_r[regno] << 8;
3385 *green = intel_crtc->lut_g[regno] << 8;
3386 *blue = intel_crtc->lut_b[regno] << 8;
3387}
3388
3247static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 3389static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3248 u16 *blue, uint32_t size) 3390 u16 *blue, uint32_t size)
3249{ 3391{
@@ -3835,6 +3977,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
3835 .mode_set_base = intel_pipe_set_base, 3977 .mode_set_base = intel_pipe_set_base,
3836 .prepare = intel_crtc_prepare, 3978 .prepare = intel_crtc_prepare,
3837 .commit = intel_crtc_commit, 3979 .commit = intel_crtc_commit,
3980 .load_lut = intel_crtc_load_lut,
3838}; 3981};
3839 3982
3840static const struct drm_crtc_funcs intel_crtc_funcs = { 3983static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -4117,7 +4260,9 @@ void intel_init_clock_gating(struct drm_device *dev)
4117 * Disable clock gating reported to work incorrectly according to the 4260 * Disable clock gating reported to work incorrectly according to the
4118 * specs, but enable as much else as we can. 4261 * specs, but enable as much else as we can.
4119 */ 4262 */
4120 if (IS_G4X(dev)) { 4263 if (IS_IGDNG(dev)) {
4264 return;
4265 } else if (IS_G4X(dev)) {
4121 uint32_t dspclk_gate; 4266 uint32_t dspclk_gate;
4122 I915_WRITE(RENCLK_GATE_D1, 0); 4267 I915_WRITE(RENCLK_GATE_D1, 0);
4123 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 4268 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -4205,7 +4350,9 @@ static void intel_init_display(struct drm_device *dev)
4205 i830_get_display_clock_speed; 4350 i830_get_display_clock_speed;
4206 4351
4207 /* For FIFO watermark updates */ 4352 /* For FIFO watermark updates */
4208 if (IS_G4X(dev)) 4353 if (IS_IGDNG(dev))
4354 dev_priv->display.update_wm = NULL;
4355 else if (IS_G4X(dev))
4209 dev_priv->display.update_wm = g4x_update_wm; 4356 dev_priv->display.update_wm = g4x_update_wm;
4210 else if (IS_I965G(dev)) 4357 else if (IS_I965G(dev))
4211 dev_priv->display.update_wm = i965_update_wm; 4358 dev_priv->display.update_wm = i965_update_wm;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f4856a510476..d83447557f9b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -400,7 +400,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
400{ 400{
401 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 401 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
402 402
403 DRM_ERROR("i2c_init %s\n", name); 403 DRM_DEBUG_KMS("i2c_init %s\n", name);
404 dp_priv->algo.running = false; 404 dp_priv->algo.running = false;
405 dp_priv->algo.address = 0; 405 dp_priv->algo.address = 0;
406 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; 406 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8aa4b7f30daa..ef61fe9507e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
175extern void intelfb_restore(void); 175extern void intelfb_restore(void);
176extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 176extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
177 u16 blue, int regno); 177 u16 blue, int regno);
178extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 u16 *blue, int regno);
178 180
179extern int intel_framebuffer_create(struct drm_device *dev, 181extern int intel_framebuffer_create(struct drm_device *dev,
180 struct drm_mode_fb_cmd *mode_cmd, 182 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7ba4a232a97f..2b0fe54cd92c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = {
60 .fb_imageblit = cfb_imageblit, 60 .fb_imageblit = cfb_imageblit,
61 .fb_pan_display = drm_fb_helper_pan_display, 61 .fb_pan_display = drm_fb_helper_pan_display,
62 .fb_blank = drm_fb_helper_blank, 62 .fb_blank = drm_fb_helper_blank,
63 .fb_setcmap = drm_fb_helper_setcmap,
63}; 64};
64 65
65static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 66static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
66 .gamma_set = intel_crtc_fb_gamma_set, 67 .gamma_set = intel_crtc_fb_gamma_set,
68 .gamma_get = intel_crtc_fb_gamma_get,
67}; 69};
68 70
69 71
@@ -110,6 +112,7 @@ EXPORT_SYMBOL(intelfb_resize);
110static int intelfb_create(struct drm_device *dev, uint32_t fb_width, 112static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
111 uint32_t fb_height, uint32_t surface_width, 113 uint32_t fb_height, uint32_t surface_width,
112 uint32_t surface_height, 114 uint32_t surface_height,
115 uint32_t surface_depth, uint32_t surface_bpp,
113 struct drm_framebuffer **fb_p) 116 struct drm_framebuffer **fb_p)
114{ 117{
115 struct fb_info *info; 118 struct fb_info *info;
@@ -122,12 +125,16 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
122 struct device *device = &dev->pdev->dev; 125 struct device *device = &dev->pdev->dev;
123 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 126 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
124 127
128 /* we don't do packed 24bpp */
129 if (surface_bpp == 24)
130 surface_bpp = 32;
131
125 mode_cmd.width = surface_width; 132 mode_cmd.width = surface_width;
126 mode_cmd.height = surface_height; 133 mode_cmd.height = surface_height;
127 134
128 mode_cmd.bpp = 32; 135 mode_cmd.bpp = surface_bpp;
129 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 136 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
130 mode_cmd.depth = 24; 137 mode_cmd.depth = surface_depth;
131 138
132 size = mode_cmd.pitch * mode_cmd.height; 139 size = mode_cmd.pitch * mode_cmd.height;
133 size = ALIGN(size, PAGE_SIZE); 140 size = ALIGN(size, PAGE_SIZE);
@@ -205,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
205 212
206// memset(info->screen_base, 0, size); 213// memset(info->screen_base, 0, size);
207 214
208 drm_fb_helper_fill_fix(info, fb->pitch); 215 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
209 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 216 drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
210 217
211 /* FIXME: we really shouldn't expose mmio space at all */ 218 /* FIXME: we really shouldn't expose mmio space at all */
@@ -243,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
243 int ret; 250 int ret;
244 251
245 DRM_DEBUG("\n"); 252 DRM_DEBUG("\n");
246 ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); 253 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
247 return ret; 254 return ret;
248} 255}
249EXPORT_SYMBOL(intelfb_probe); 256EXPORT_SYMBOL(intelfb_probe);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa304e136010..663ab6de0b58 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -223,7 +223,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
223 223
224 connector = &intel_output->base; 224 connector = &intel_output->base;
225 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 225 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
226 DRM_MODE_CONNECTOR_DVID); 226 DRM_MODE_CONNECTOR_HDMIA);
227 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 227 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
228 228
229 intel_output->type = INTEL_OUTPUT_HDMI; 229 intel_output->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 98ae3d73577e..05598ae10c4b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,7 +380,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
380 adjusted_mode->crtc_vblank_start + vsync_pos; 380 adjusted_mode->crtc_vblank_start + vsync_pos;
381 /* keep the vsync width constant */ 381 /* keep the vsync width constant */
382 adjusted_mode->crtc_vsync_end = 382 adjusted_mode->crtc_vsync_end =
383 adjusted_mode->crtc_vblank_start + vsync_width; 383 adjusted_mode->crtc_vsync_start + vsync_width;
384 border = 1; 384 border = 1;
385 break; 385 break;
386 case DRM_MODE_SCALE_ASPECT: 386 case DRM_MODE_SCALE_ASPECT:
@@ -526,6 +526,14 @@ out:
526 lvds_priv->pfit_control = pfit_control; 526 lvds_priv->pfit_control = pfit_control;
527 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; 527 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
528 /* 528 /*
529 * When there exists the border, it means that the LVDS_BORDR
530 * should be enabled.
531 */
532 if (border)
533 dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
534 else
535 dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
536 /*
529 * XXX: It would be nice to support lower refresh rates on the 537 * XXX: It would be nice to support lower refresh rates on the
530 * panels to reduce power consumption, and perhaps match the 538 * panels to reduce power consumption, and perhaps match the
531 * user's requested refresh rate. 539 * user's requested refresh rate.
@@ -656,6 +664,15 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
656 return 0; 664 return 0;
657} 665}
658 666
667/*
668 * Lid events. Note the use of 'modeset_on_lid':
669 * - we set it on lid close, and reset it on open
670 * - we use it as a "only once" bit (ie we ignore
671 * duplicate events where it was already properly
672 * set/reset)
673 * - the suspend/resume paths will also set it to
674 * zero, since they restore the mode ("lid open").
675 */
659static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 676static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
660 void *unused) 677 void *unused)
661{ 678{
@@ -663,13 +680,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
663 container_of(nb, struct drm_i915_private, lid_notifier); 680 container_of(nb, struct drm_i915_private, lid_notifier);
664 struct drm_device *dev = dev_priv->dev; 681 struct drm_device *dev = dev_priv->dev;
665 682
666 if (acpi_lid_open() && !dev_priv->suspended) { 683 if (!acpi_lid_open()) {
667 mutex_lock(&dev->mode_config.mutex); 684 dev_priv->modeset_on_lid = 1;
668 drm_helper_resume_force_mode(dev); 685 return NOTIFY_OK;
669 mutex_unlock(&dev->mode_config.mutex);
670 } 686 }
671 687
672 drm_sysfs_hotplug_event(dev_priv->dev); 688 if (!dev_priv->modeset_on_lid)
689 return NOTIFY_OK;
690
691 dev_priv->modeset_on_lid = 0;
692
693 mutex_lock(&dev->mode_config.mutex);
694 drm_helper_resume_force_mode(dev);
695 mutex_unlock(&dev->mode_config.mutex);
673 696
674 return NOTIFY_OK; 697 return NOTIFY_OK;
675} 698}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c64eab493fb0..9ca917931afb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1082,7 +1082,8 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
1086 < 1000)
1086 return MODE_OK; 1087 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1088 return MODE_CLOCK_RANGE;
1088} 1089}
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 000000000000..403eb3a5891f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
1mkregtable
2*_reg_safe.h
3
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 09a28923f46e..b5713eedd6e1 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
52 r600_blit_kms.o 52 r600_blit_kms.o radeon_pm.o
53 53
54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
55 55
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5d402086bc47..c11ddddfb3b6 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2314 UCHAR ucSS_Step; 2314 UCHAR ucSS_Step;
2315 UCHAR ucSS_Delay; 2315 UCHAR ucSS_Delay;
2316 UCHAR ucSS_Id; 2316 UCHAR ucSS_Id;
2317 UCHAR ucRecommandedRef_Div; 2317 UCHAR ucRecommendedRef_Div;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */ 2318 UCHAR ucSS_Range; /* it was reserved for V11 */
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; 2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320 2320
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 6a015929deee..c15287a590ff 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -31,10 +31,6 @@
31#include "atom.h" 31#include "atom.h"
32#include "atom-bits.h" 32#include "atom-bits.h"
33 33
34/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
37 int32_t *pixel_clock);
38static void atombios_overscan_setup(struct drm_crtc *crtc, 34static void atombios_overscan_setup(struct drm_crtc *crtc,
39 struct drm_display_mode *mode, 35 struct drm_display_mode *mode,
40 struct drm_display_mode *adjusted_mode) 36 struct drm_display_mode *adjusted_mode)
@@ -248,18 +244,18 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
248 244
249 switch (mode) { 245 switch (mode) {
250 case DRM_MODE_DPMS_ON: 246 case DRM_MODE_DPMS_ON:
247 atombios_enable_crtc(crtc, 1);
251 if (ASIC_IS_DCE3(rdev)) 248 if (ASIC_IS_DCE3(rdev))
252 atombios_enable_crtc_memreq(crtc, 1); 249 atombios_enable_crtc_memreq(crtc, 1);
253 atombios_enable_crtc(crtc, 1);
254 atombios_blank_crtc(crtc, 0); 250 atombios_blank_crtc(crtc, 0);
255 break; 251 break;
256 case DRM_MODE_DPMS_STANDBY: 252 case DRM_MODE_DPMS_STANDBY:
257 case DRM_MODE_DPMS_SUSPEND: 253 case DRM_MODE_DPMS_SUSPEND:
258 case DRM_MODE_DPMS_OFF: 254 case DRM_MODE_DPMS_OFF:
259 atombios_blank_crtc(crtc, 1); 255 atombios_blank_crtc(crtc, 1);
260 atombios_enable_crtc(crtc, 0);
261 if (ASIC_IS_DCE3(rdev)) 256 if (ASIC_IS_DCE3(rdev))
262 atombios_enable_crtc_memreq(crtc, 0); 257 atombios_enable_crtc_memreq(crtc, 0);
258 atombios_enable_crtc(crtc, 0);
263 break; 259 break;
264 } 260 }
265 261
@@ -270,59 +266,147 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
270 266
271static void 267static void
272atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, 268atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
273 SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param) 269 struct drm_display_mode *mode)
274{ 270{
271 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
275 struct drm_device *dev = crtc->dev; 272 struct drm_device *dev = crtc->dev;
276 struct radeon_device *rdev = dev->dev_private; 273 struct radeon_device *rdev = dev->dev_private;
277 SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param; 274 SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
278 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); 275 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
276 u16 misc = 0;
279 277
280 conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size); 278 memset(&args, 0, sizeof(args));
281 conv_param.usH_Blanking_Time = 279 args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
282 cpu_to_le16(crtc_param->usH_Blanking_Time); 280 args.usH_Blanking_Time =
283 conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size); 281 cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
284 conv_param.usV_Blanking_Time = 282 args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
285 cpu_to_le16(crtc_param->usV_Blanking_Time); 283 args.usV_Blanking_Time =
286 conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset); 284 cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
287 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); 285 args.usH_SyncOffset =
288 conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset); 286 cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
289 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); 287 args.usH_SyncWidth =
290 conv_param.susModeMiscInfo.usAccess = 288 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
291 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); 289 args.usV_SyncOffset =
292 conv_param.ucCRTC = crtc_param->ucCRTC; 290 cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
291 args.usV_SyncWidth =
292 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
293 /*args.ucH_Border = mode->hborder;*/
294 /*args.ucV_Border = mode->vborder;*/
295
296 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
297 misc |= ATOM_VSYNC_POLARITY;
298 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
299 misc |= ATOM_HSYNC_POLARITY;
300 if (mode->flags & DRM_MODE_FLAG_CSYNC)
301 misc |= ATOM_COMPOSITESYNC;
302 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
303 misc |= ATOM_INTERLACE;
304 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
305 misc |= ATOM_DOUBLE_CLOCK_MODE;
306
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id;
293 309
294 printk("executing set crtc dtd timing\n"); 310 printk("executing set crtc dtd timing\n");
295 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param); 311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
296} 312}
297 313
298void atombios_crtc_set_timing(struct drm_crtc *crtc, 314static void atombios_crtc_set_timing(struct drm_crtc *crtc,
299 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION * 315 struct drm_display_mode *mode)
300 crtc_param)
301{ 316{
317 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
302 struct drm_device *dev = crtc->dev; 318 struct drm_device *dev = crtc->dev;
303 struct radeon_device *rdev = dev->dev_private; 319 struct radeon_device *rdev = dev->dev_private;
304 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param; 320 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
305 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); 321 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
322 u16 misc = 0;
306 323
307 conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total); 324 memset(&args, 0, sizeof(args));
308 conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp); 325 args.usH_Total = cpu_to_le16(mode->crtc_htotal);
309 conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart); 326 args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
310 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); 327 args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
311 conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total); 328 args.usH_SyncWidth =
312 conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp); 329 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
313 conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart); 330 args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
314 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); 331 args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
315 conv_param.susModeMiscInfo.usAccess = 332 args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
316 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); 333 args.usV_SyncWidth =
317 conv_param.ucCRTC = crtc_param->ucCRTC; 334 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
318 conv_param.ucOverscanRight = crtc_param->ucOverscanRight; 335
319 conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft; 336 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
320 conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom; 337 misc |= ATOM_VSYNC_POLARITY;
321 conv_param.ucOverscanTop = crtc_param->ucOverscanTop; 338 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
322 conv_param.ucReserved = crtc_param->ucReserved; 339 misc |= ATOM_HSYNC_POLARITY;
340 if (mode->flags & DRM_MODE_FLAG_CSYNC)
341 misc |= ATOM_COMPOSITESYNC;
342 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
343 misc |= ATOM_INTERLACE;
344 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
345 misc |= ATOM_DOUBLE_CLOCK_MODE;
346
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id;
323 349
324 printk("executing set crtc timing\n"); 350 printk("executing set crtc timing\n");
325 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param); 351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352}
353
354static void atombios_set_ss(struct drm_crtc *crtc, int enable)
355{
356 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
357 struct drm_device *dev = crtc->dev;
358 struct radeon_device *rdev = dev->dev_private;
359 struct drm_encoder *encoder = NULL;
360 struct radeon_encoder *radeon_encoder = NULL;
361 struct radeon_encoder_atom_dig *dig = NULL;
362 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
363 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
364 ENABLE_LVDS_SS_PARAMETERS legacy_args;
365 uint16_t percentage = 0;
366 uint8_t type = 0, step = 0, delay = 0, range = 0;
367
368 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
369 if (encoder->crtc == crtc) {
370 radeon_encoder = to_radeon_encoder(encoder);
371 /* only enable spread spectrum on LVDS */
372 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
373 dig = radeon_encoder->enc_priv;
374 if (dig && dig->ss) {
375 percentage = dig->ss->percentage;
376 type = dig->ss->type;
377 step = dig->ss->step;
378 delay = dig->ss->delay;
379 range = dig->ss->range;
380 } else if (enable)
381 return;
382 } else if (enable)
383 return;
384 break;
385 }
386 }
387
388 if (!radeon_encoder)
389 return;
390
391 if (ASIC_IS_AVIVO(rdev)) {
392 memset(&args, 0, sizeof(args));
393 args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
394 args.ucSpreadSpectrumType = type;
395 args.ucSpreadSpectrumStep = step;
396 args.ucSpreadSpectrumDelay = delay;
397 args.ucSpreadSpectrumRange = range;
398 args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
399 args.ucEnable = enable;
400 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
401 } else {
402 memset(&legacy_args, 0, sizeof(legacy_args));
403 legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
404 legacy_args.ucSpreadSpectrumType = type;
405 legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
406 legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
407 legacy_args.ucEnable = enable;
408 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
409 }
326} 410}
327 411
328void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
@@ -333,12 +417,13 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
333 struct drm_encoder *encoder = NULL; 417 struct drm_encoder *encoder = NULL;
334 struct radeon_encoder *radeon_encoder = NULL; 418 struct radeon_encoder *radeon_encoder = NULL;
335 uint8_t frev, crev; 419 uint8_t frev, crev;
336 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 420 int index;
337 SET_PIXEL_CLOCK_PS_ALLOCATION args; 421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
338 PIXEL_CLOCK_PARAMETERS *spc1_ptr; 422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
339 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; 423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
340 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; 424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
341 uint32_t sclock = mode->clock; 425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
342 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
343 struct radeon_pll *pll; 428 struct radeon_pll *pll;
344 int pll_flags = 0; 429 int pll_flags = 0;
@@ -346,8 +431,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
346 memset(&args, 0, sizeof(args)); 431 memset(&args, 0, sizeof(args));
347 432
348 if (ASIC_IS_AVIVO(rdev)) { 433 if (ASIC_IS_AVIVO(rdev)) {
349 uint32_t ss_cntl;
350
351 if ((rdev->family == CHIP_RS600) || 434 if ((rdev->family == CHIP_RS600) ||
352 (rdev->family == CHIP_RS690) || 435 (rdev->family == CHIP_RS690) ||
353 (rdev->family == CHIP_RS740)) 436 (rdev->family == CHIP_RS740))
@@ -358,15 +441,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
358 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
359 else 442 else
360 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
361
362 /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
363 if (radeon_crtc->crtc_id == 0) {
364 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
365 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
366 } else {
367 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
368 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
369 }
370 } else { 444 } else {
371 pll_flags |= RADEON_PLL_LEGACY; 445 pll_flags |= RADEON_PLL_LEGACY;
372 446
@@ -393,14 +467,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
393 } 467 }
394 } 468 }
395 469
470 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
471 * accordingly based on the encoder/transmitter to work around
472 * special hw requirements.
473 */
474 if (ASIC_IS_DCE3(rdev)) {
475 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
476
477 if (!encoder)
478 return;
479
480 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
481 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
482 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
483 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
484
485 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
486 atom_execute_table(rdev->mode_info.atom_context,
487 index, (uint32_t *)&adjust_pll_args);
488 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
489 } else {
490 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
491 if (ASIC_IS_AVIVO(rdev) &&
492 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
493 adjusted_clock = mode->clock * 2;
494 else
495 adjusted_clock = mode->clock;
496 }
497
396 if (radeon_crtc->crtc_id == 0) 498 if (radeon_crtc->crtc_id == 0)
397 pll = &rdev->clock.p1pll; 499 pll = &rdev->clock.p1pll;
398 else 500 else
399 pll = &rdev->clock.p2pll; 501 pll = &rdev->clock.p2pll;
400 502
401 radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div, 503 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
402 &ref_div, &post_div, pll_flags); 504 &ref_div, &post_div, pll_flags);
403 505
506 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
404 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 507 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
405 &crev); 508 &crev);
406 509
@@ -409,7 +512,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
409 switch (crev) { 512 switch (crev) {
410 case 1: 513 case 1:
411 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 514 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
412 spc1_ptr->usPixelClock = cpu_to_le16(sclock); 515 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
413 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 516 spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
414 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 517 spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
415 spc1_ptr->ucFracFbDiv = frac_fb_div; 518 spc1_ptr->ucFracFbDiv = frac_fb_div;
@@ -422,7 +525,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
422 case 2: 525 case 2:
423 spc2_ptr = 526 spc2_ptr =
424 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 527 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
425 spc2_ptr->usPixelClock = cpu_to_le16(sclock); 528 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
426 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 529 spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
427 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 530 spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
428 spc2_ptr->ucFracFbDiv = frac_fb_div; 531 spc2_ptr->ucFracFbDiv = frac_fb_div;
@@ -437,7 +540,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
437 return; 540 return;
438 spc3_ptr = 541 spc3_ptr =
439 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 542 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
440 spc3_ptr->usPixelClock = cpu_to_le16(sclock); 543 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
441 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 544 spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
442 spc3_ptr->usFbDiv = cpu_to_le16(fb_div); 545 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
443 spc3_ptr->ucFracFbDiv = frac_fb_div; 546 spc3_ptr->ucFracFbDiv = frac_fb_div;
@@ -527,6 +630,16 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
527 WREG32(AVIVO_D1VGA_CONTROL, 0); 630 WREG32(AVIVO_D1VGA_CONTROL, 0);
528 else 631 else
529 WREG32(AVIVO_D2VGA_CONTROL, 0); 632 WREG32(AVIVO_D2VGA_CONTROL, 0);
633
634 if (rdev->family >= CHIP_RV770) {
635 if (radeon_crtc->crtc_id) {
636 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
637 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
638 } else {
639 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
640 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
641 }
642 }
530 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 643 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
531 (u32) fb_location); 644 (u32) fb_location);
532 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + 645 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
@@ -563,6 +676,10 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
563 radeon_fb = to_radeon_framebuffer(old_fb); 676 radeon_fb = to_radeon_framebuffer(old_fb);
564 radeon_gem_object_unpin(radeon_fb->obj); 677 radeon_gem_object_unpin(radeon_fb->obj);
565 } 678 }
679
680 /* Bytes per pixel may have changed */
681 radeon_bandwidth_update(rdev);
682
566 return 0; 683 return 0;
567} 684}
568 685
@@ -574,134 +691,24 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
574 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 691 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
575 struct drm_device *dev = crtc->dev; 692 struct drm_device *dev = crtc->dev;
576 struct radeon_device *rdev = dev->dev_private; 693 struct radeon_device *rdev = dev->dev_private;
577 struct drm_encoder *encoder;
578 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
579 int need_tv_timings = 0;
580 bool ret;
581 694
582 /* TODO color tiling */ 695 /* TODO color tiling */
583 memset(&crtc_timing, 0, sizeof(crtc_timing));
584
585 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
586 /* find tv std */
587 if (encoder->crtc == crtc) {
588 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
589
590 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
591 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
592 if (tv_dac) {
593 if (tv_dac->tv_std == TV_STD_NTSC ||
594 tv_dac->tv_std == TV_STD_NTSC_J ||
595 tv_dac->tv_std == TV_STD_PAL_M)
596 need_tv_timings = 1;
597 else
598 need_tv_timings = 2;
599 break;
600 }
601 }
602 }
603 }
604
605 crtc_timing.ucCRTC = radeon_crtc->crtc_id;
606 if (need_tv_timings) {
607 ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1,
608 &crtc_timing, &adjusted_mode->clock);
609 if (ret == false)
610 need_tv_timings = 0;
611 }
612
613 if (!need_tv_timings) {
614 crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
615 crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
616 crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
617 crtc_timing.usH_SyncWidth =
618 adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
619
620 crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
621 crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
622 crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
623 crtc_timing.usV_SyncWidth =
624 adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
625
626 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
627 crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
628
629 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
630 crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
631
632 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
633 crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
634
635 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
636 crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
637
638 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
639 crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
640 }
641 696
697 atombios_set_ss(crtc, 0);
642 atombios_crtc_set_pll(crtc, adjusted_mode); 698 atombios_crtc_set_pll(crtc, adjusted_mode);
643 atombios_crtc_set_timing(crtc, &crtc_timing); 699 atombios_set_ss(crtc, 1);
700 atombios_crtc_set_timing(crtc, adjusted_mode);
644 701
645 if (ASIC_IS_AVIVO(rdev)) 702 if (ASIC_IS_AVIVO(rdev))
646 atombios_crtc_set_base(crtc, x, y, old_fb); 703 atombios_crtc_set_base(crtc, x, y, old_fb);
647 else { 704 else {
648 if (radeon_crtc->crtc_id == 0) { 705 if (radeon_crtc->crtc_id == 0)
649 SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing; 706 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
650 memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
651
652 /* setup FP shadow regs on R4xx */
653 crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
654 crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
655 crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
656 crtc_dtd_timing.usH_Blanking_Time =
657 adjusted_mode->crtc_hblank_end -
658 adjusted_mode->crtc_hdisplay;
659 crtc_dtd_timing.usV_Blanking_Time =
660 adjusted_mode->crtc_vblank_end -
661 adjusted_mode->crtc_vdisplay;
662 crtc_dtd_timing.usH_SyncOffset =
663 adjusted_mode->crtc_hsync_start -
664 adjusted_mode->crtc_hdisplay;
665 crtc_dtd_timing.usV_SyncOffset =
666 adjusted_mode->crtc_vsync_start -
667 adjusted_mode->crtc_vdisplay;
668 crtc_dtd_timing.usH_SyncWidth =
669 adjusted_mode->crtc_hsync_end -
670 adjusted_mode->crtc_hsync_start;
671 crtc_dtd_timing.usV_SyncWidth =
672 adjusted_mode->crtc_vsync_end -
673 adjusted_mode->crtc_vsync_start;
674 /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
675 /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
676
677 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
678 crtc_dtd_timing.susModeMiscInfo.usAccess |=
679 ATOM_VSYNC_POLARITY;
680
681 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
682 crtc_dtd_timing.susModeMiscInfo.usAccess |=
683 ATOM_HSYNC_POLARITY;
684
685 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
686 crtc_dtd_timing.susModeMiscInfo.usAccess |=
687 ATOM_COMPOSITESYNC;
688
689 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
690 crtc_dtd_timing.susModeMiscInfo.usAccess |=
691 ATOM_INTERLACE;
692
693 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
694 crtc_dtd_timing.susModeMiscInfo.usAccess |=
695 ATOM_DOUBLE_CLOCK_MODE;
696
697 atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
698 }
699 radeon_crtc_set_base(crtc, x, y, old_fb); 707 radeon_crtc_set_base(crtc, x, y, old_fb);
700 radeon_legacy_atom_set_surface(crtc); 708 radeon_legacy_atom_set_surface(crtc);
701 } 709 }
702 atombios_overscan_setup(crtc, mode, adjusted_mode); 710 atombios_overscan_setup(crtc, mode, adjusted_mode);
703 atombios_scaler_setup(crtc); 711 atombios_scaler_setup(crtc);
704 radeon_bandwidth_update(rdev);
705 return 0; 712 return 0;
706} 713}
707 714
@@ -733,6 +740,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
733 .mode_set_base = atombios_crtc_set_base, 740 .mode_set_base = atombios_crtc_set_base,
734 .prepare = atombios_crtc_prepare, 741 .prepare = atombios_crtc_prepare,
735 .commit = atombios_crtc_commit, 742 .commit = atombios_crtc_commit,
743 .load_lut = radeon_crtc_load_lut,
736}; 744};
737 745
738void radeon_atombios_init_crtc(struct drm_device *dev, 746void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index e2b92c445bab..d4e6e6e4a938 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -57,13 +57,4 @@
57#define VGA_RENDER_CONTROL 0x0300 57#define VGA_RENDER_CONTROL 0x0300
58#define VGA_VSTATUS_CNTL_MASK 0x00030000 58#define VGA_VSTATUS_CNTL_MASK 0x00030000
59 59
60/* AVIVO disable VGA rendering */
61static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev)
62{
63 u32 vga_render;
64 vga_render = RREG32(VGA_RENDER_CONTROL);
65 vga_render &= ~VGA_VSTATUS_CNTL_MASK;
66 WREG32(VGA_RENDER_CONTROL, vga_render);
67}
68
69#endif 60#endif
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index fb211e585dea..0d79577c1576 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -561,7 +561,7 @@ struct table {
561 char *gpu_prefix; 561 char *gpu_prefix;
562}; 562};
563 563
564struct offset *offset_new(unsigned o) 564static struct offset *offset_new(unsigned o)
565{ 565{
566 struct offset *offset; 566 struct offset *offset;
567 567
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o)
573 return offset; 573 return offset;
574} 574}
575 575
576void table_offset_add(struct table *t, struct offset *offset) 576static void table_offset_add(struct table *t, struct offset *offset)
577{ 577{
578 list_add_tail(&offset->list, &t->offsets); 578 list_add_tail(&offset->list, &t->offsets);
579} 579}
580 580
581void table_init(struct table *t) 581static void table_init(struct table *t)
582{ 582{
583 INIT_LIST_HEAD(&t->offsets); 583 INIT_LIST_HEAD(&t->offsets);
584 t->offset_max = 0; 584 t->offset_max = 0;
@@ -586,7 +586,7 @@ void table_init(struct table *t)
586 t->table = NULL; 586 t->table = NULL;
587} 587}
588 588
589void table_print(struct table *t) 589static void table_print(struct table *t)
590{ 590{
591 unsigned nlloop, i, j, n, c, id; 591 unsigned nlloop, i, j, n, c, id;
592 592
@@ -611,7 +611,7 @@ void table_print(struct table *t)
611 printf("};\n"); 611 printf("};\n");
612} 612}
613 613
614int table_build(struct table *t) 614static int table_build(struct table *t)
615{ 615{
616 struct offset *offset; 616 struct offset *offset;
617 unsigned i, m; 617 unsigned i, m;
@@ -631,7 +631,7 @@ int table_build(struct table *t)
631} 631}
632 632
633static char gpu_name[10]; 633static char gpu_name[10];
634int parser_auth(struct table *t, const char *filename) 634static int parser_auth(struct table *t, const char *filename)
635{ 635{
636 FILE *file; 636 FILE *file;
637 regex_t mask_rex; 637 regex_t mask_rex;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index be51c5f7d0f6..c9e93eabcf16 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -32,6 +32,9 @@
32#include "radeon_reg.h" 32#include "radeon_reg.h"
33#include "radeon.h" 33#include "radeon.h"
34#include "r100d.h" 34#include "r100d.h"
35#include "rs100d.h"
36#include "rv200d.h"
37#include "rv250d.h"
35 38
36#include <linux/firmware.h> 39#include <linux/firmware.h>
37#include <linux/platform_device.h> 40#include <linux/platform_device.h>
@@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520);
60 63
61/* This files gather functions specifics to: 64/* This files gather functions specifics to:
62 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
63 *
64 * Some of these functions might be used by newer ASICs.
65 */ 66 */
66int r200_init(struct radeon_device *rdev);
67void r100_hdp_reset(struct radeon_device *rdev);
68void r100_gpu_init(struct radeon_device *rdev);
69int r100_gui_wait_for_idle(struct radeon_device *rdev);
70int r100_mc_wait_for_idle(struct radeon_device *rdev);
71void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
72void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
73int r100_debugfs_mc_info_init(struct radeon_device *rdev);
74
75 67
76/* 68/*
77 * PCI GART 69 * PCI GART
@@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev)
152 radeon_gart_fini(rdev); 144 radeon_gart_fini(rdev);
153} 145}
154 146
155
156/*
157 * MC
158 */
159void r100_mc_disable_clients(struct radeon_device *rdev)
160{
161 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
162
163 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
164 if (r100_gui_wait_for_idle(rdev)) {
165 printk(KERN_WARNING "Failed to wait GUI idle while "
166 "programming pipes. Bad things might happen.\n");
167 }
168
169 /* stop display and memory access */
170 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
171 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
172 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
173 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
174 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
175
176 r100_gpu_wait_for_vsync(rdev);
177
178 WREG32(RADEON_CRTC_GEN_CNTL,
179 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
180 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
181
182 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
183 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
184
185 r100_gpu_wait_for_vsync2(rdev);
186 WREG32(RADEON_CRTC2_GEN_CNTL,
187 (crtc2_gen_cntl &
188 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
189 RADEON_CRTC2_DISP_REQ_EN_B);
190 }
191
192 udelay(500);
193}
194
195void r100_mc_setup(struct radeon_device *rdev)
196{
197 uint32_t tmp;
198 int r;
199
200 r = r100_debugfs_mc_info_init(rdev);
201 if (r) {
202 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
203 }
204 /* Write VRAM size in case we are limiting it */
205 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
206 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
207 * if the aperture is 64MB but we have 32MB VRAM
208 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
209 * to 64MB, otherwise the gpu accidentially dies */
210 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
211 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
212 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
213 WREG32(RADEON_MC_FB_LOCATION, tmp);
214
215 /* Enable bus mastering */
216 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
217 WREG32(RADEON_BUS_CNTL, tmp);
218
219 if (rdev->flags & RADEON_IS_AGP) {
220 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
221 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
222 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
223 WREG32(RADEON_MC_AGP_LOCATION, tmp);
224 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
225 } else {
226 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
227 WREG32(RADEON_AGP_BASE, 0);
228 }
229
230 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
231 tmp |= (7 << 28);
232 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
233 (void)RREG32(RADEON_HOST_PATH_CNTL);
234 WREG32(RADEON_HOST_PATH_CNTL, tmp);
235 (void)RREG32(RADEON_HOST_PATH_CNTL);
236}
237
238int r100_mc_init(struct radeon_device *rdev)
239{
240 int r;
241
242 if (r100_debugfs_rbbm_init(rdev)) {
243 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
244 }
245
246 r100_gpu_init(rdev);
247 /* Disable gart which also disable out of gart access */
248 r100_pci_gart_disable(rdev);
249
250 /* Setup GPU memory space */
251 rdev->mc.gtt_location = 0xFFFFFFFFUL;
252 if (rdev->flags & RADEON_IS_AGP) {
253 r = radeon_agp_init(rdev);
254 if (r) {
255 printk(KERN_WARNING "[drm] Disabling AGP\n");
256 rdev->flags &= ~RADEON_IS_AGP;
257 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
258 } else {
259 rdev->mc.gtt_location = rdev->mc.agp_base;
260 }
261 }
262 r = radeon_mc_setup(rdev);
263 if (r) {
264 return r;
265 }
266
267 r100_mc_disable_clients(rdev);
268 if (r100_mc_wait_for_idle(rdev)) {
269 printk(KERN_WARNING "Failed to wait MC idle while "
270 "programming pipes. Bad things might happen.\n");
271 }
272
273 r100_mc_setup(rdev);
274 return 0;
275}
276
277void r100_mc_fini(struct radeon_device *rdev)
278{
279}
280
281
282/*
283 * Interrupts
284 */
285int r100_irq_set(struct radeon_device *rdev) 147int r100_irq_set(struct radeon_device *rdev)
286{ 148{
287 uint32_t tmp = 0; 149 uint32_t tmp = 0;
@@ -324,7 +186,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
324 186
325int r100_irq_process(struct radeon_device *rdev) 187int r100_irq_process(struct radeon_device *rdev)
326{ 188{
327 uint32_t status; 189 uint32_t status, msi_rearm;
328 190
329 status = r100_irq_ack(rdev); 191 status = r100_irq_ack(rdev);
330 if (!status) { 192 if (!status) {
@@ -347,6 +209,21 @@ int r100_irq_process(struct radeon_device *rdev)
347 } 209 }
348 status = r100_irq_ack(rdev); 210 status = r100_irq_ack(rdev);
349 } 211 }
212 if (rdev->msi_enabled) {
213 switch (rdev->family) {
214 case CHIP_RS400:
215 case CHIP_RS480:
216 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
217 WREG32(RADEON_AIC_CNTL, msi_rearm);
218 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
219 break;
220 default:
221 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
222 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
223 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
224 break;
225 }
226 }
350 return IRQ_HANDLED; 227 return IRQ_HANDLED;
351} 228}
352 229
@@ -358,10 +235,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
358 return RREG32(RADEON_CRTC2_CRNT_FRAME); 235 return RREG32(RADEON_CRTC2_CRNT_FRAME);
359} 236}
360 237
361
362/*
363 * Fence emission
364 */
365void r100_fence_ring_emit(struct radeon_device *rdev, 238void r100_fence_ring_emit(struct radeon_device *rdev,
366 struct radeon_fence *fence) 239 struct radeon_fence *fence)
367{ 240{
@@ -377,16 +250,12 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
377 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 250 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
378} 251}
379 252
380
381/*
382 * Writeback
383 */
384int r100_wb_init(struct radeon_device *rdev) 253int r100_wb_init(struct radeon_device *rdev)
385{ 254{
386 int r; 255 int r;
387 256
388 if (rdev->wb.wb_obj == NULL) { 257 if (rdev->wb.wb_obj == NULL) {
389 r = radeon_object_create(rdev, NULL, 4096, 258 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
390 true, 259 true,
391 RADEON_GEM_DOMAIN_GTT, 260 RADEON_GEM_DOMAIN_GTT,
392 false, &rdev->wb.wb_obj); 261 false, &rdev->wb.wb_obj);
@@ -504,10 +373,6 @@ int r100_copy_blit(struct radeon_device *rdev,
504 return r; 373 return r;
505} 374}
506 375
507
508/*
509 * CP
510 */
511static int r100_cp_wait_for_idle(struct radeon_device *rdev) 376static int r100_cp_wait_for_idle(struct radeon_device *rdev)
512{ 377{
513 unsigned i; 378 unsigned i;
@@ -612,6 +477,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
612 } 477 }
613 return err; 478 return err;
614} 479}
480
615static void r100_cp_load_microcode(struct radeon_device *rdev) 481static void r100_cp_load_microcode(struct radeon_device *rdev)
616{ 482{
617 const __be32 *fw_data; 483 const __be32 *fw_data;
@@ -712,19 +578,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
712 indirect1_start = 16; 578 indirect1_start = 16;
713 /* cp setup */ 579 /* cp setup */
714 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 580 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
715 WREG32(RADEON_CP_RB_CNTL, 581 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
716#ifdef __BIG_ENDIAN
717 RADEON_BUF_SWAP_32BIT |
718#endif
719 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
720 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 582 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
721 REG_SET(RADEON_MAX_FETCH, max_fetch) | 583 REG_SET(RADEON_MAX_FETCH, max_fetch) |
722 RADEON_RB_NO_UPDATE); 584 RADEON_RB_NO_UPDATE);
585#ifdef __BIG_ENDIAN
586 tmp |= RADEON_BUF_SWAP_32BIT;
587#endif
588 WREG32(RADEON_CP_RB_CNTL, tmp);
589
723 /* Set ring address */ 590 /* Set ring address */
724 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); 591 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
725 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); 592 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
726 /* Force read & write ptr to 0 */ 593 /* Force read & write ptr to 0 */
727 tmp = RREG32(RADEON_CP_RB_CNTL);
728 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 594 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
729 WREG32(RADEON_CP_RB_RPTR_WR, 0); 595 WREG32(RADEON_CP_RB_RPTR_WR, 0);
730 WREG32(RADEON_CP_RB_WPTR, 0); 596 WREG32(RADEON_CP_RB_WPTR, 0);
@@ -863,13 +729,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
863void r100_cs_dump_packet(struct radeon_cs_parser *p, 729void r100_cs_dump_packet(struct radeon_cs_parser *p,
864 struct radeon_cs_packet *pkt) 730 struct radeon_cs_packet *pkt)
865{ 731{
866 struct radeon_cs_chunk *ib_chunk;
867 volatile uint32_t *ib; 732 volatile uint32_t *ib;
868 unsigned i; 733 unsigned i;
869 unsigned idx; 734 unsigned idx;
870 735
871 ib = p->ib->ptr; 736 ib = p->ib->ptr;
872 ib_chunk = &p->chunks[p->chunk_ib_idx];
873 idx = pkt->idx; 737 idx = pkt->idx;
874 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 738 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
875 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 739 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -896,7 +760,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
896 idx, ib_chunk->length_dw); 760 idx, ib_chunk->length_dw);
897 return -EINVAL; 761 return -EINVAL;
898 } 762 }
899 header = ib_chunk->kdata[idx]; 763 header = radeon_get_ib_value(p, idx);
900 pkt->idx = idx; 764 pkt->idx = idx;
901 pkt->type = CP_PACKET_GET_TYPE(header); 765 pkt->type = CP_PACKET_GET_TYPE(header);
902 pkt->count = CP_PACKET_GET_COUNT(header); 766 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -939,7 +803,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
939 */ 803 */
940int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 804int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
941{ 805{
942 struct radeon_cs_chunk *ib_chunk;
943 struct drm_mode_object *obj; 806 struct drm_mode_object *obj;
944 struct drm_crtc *crtc; 807 struct drm_crtc *crtc;
945 struct radeon_crtc *radeon_crtc; 808 struct radeon_crtc *radeon_crtc;
@@ -947,8 +810,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
947 int crtc_id; 810 int crtc_id;
948 int r; 811 int r;
949 uint32_t header, h_idx, reg; 812 uint32_t header, h_idx, reg;
813 volatile uint32_t *ib;
950 814
951 ib_chunk = &p->chunks[p->chunk_ib_idx]; 815 ib = p->ib->ptr;
952 816
953 /* parse the wait until */ 817 /* parse the wait until */
954 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 818 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -963,24 +827,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
963 return r; 827 return r;
964 } 828 }
965 829
966 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { 830 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
967 DRM_ERROR("vline wait had illegal wait until\n"); 831 DRM_ERROR("vline wait had illegal wait until\n");
968 r = -EINVAL; 832 r = -EINVAL;
969 return r; 833 return r;
970 } 834 }
971 835
972 /* jump over the NOP */ 836 /* jump over the NOP */
973 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 837 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
974 if (r) 838 if (r)
975 return r; 839 return r;
976 840
977 h_idx = p->idx - 2; 841 h_idx = p->idx - 2;
978 p->idx += waitreloc.count; 842 p->idx += waitreloc.count + 2;
979 p->idx += p3reloc.count; 843 p->idx += p3reloc.count + 2;
980 844
981 header = ib_chunk->kdata[h_idx]; 845 header = radeon_get_ib_value(p, h_idx);
982 crtc_id = ib_chunk->kdata[h_idx + 5]; 846 crtc_id = radeon_get_ib_value(p, h_idx + 5);
983 reg = ib_chunk->kdata[h_idx] >> 2; 847 reg = CP_PACKET0_GET_REG(header);
984 mutex_lock(&p->rdev->ddev->mode_config.mutex); 848 mutex_lock(&p->rdev->ddev->mode_config.mutex);
985 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 849 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
986 if (!obj) { 850 if (!obj) {
@@ -994,16 +858,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
994 858
995 if (!crtc->enabled) { 859 if (!crtc->enabled) {
996 /* if the CRTC isn't enabled - we need to nop out the wait until */ 860 /* if the CRTC isn't enabled - we need to nop out the wait until */
997 ib_chunk->kdata[h_idx + 2] = PACKET2(0); 861 ib[h_idx + 2] = PACKET2(0);
998 ib_chunk->kdata[h_idx + 3] = PACKET2(0); 862 ib[h_idx + 3] = PACKET2(0);
999 } else if (crtc_id == 1) { 863 } else if (crtc_id == 1) {
1000 switch (reg) { 864 switch (reg) {
1001 case AVIVO_D1MODE_VLINE_START_END: 865 case AVIVO_D1MODE_VLINE_START_END:
1002 header &= R300_CP_PACKET0_REG_MASK; 866 header &= ~R300_CP_PACKET0_REG_MASK;
1003 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 867 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1004 break; 868 break;
1005 case RADEON_CRTC_GUI_TRIG_VLINE: 869 case RADEON_CRTC_GUI_TRIG_VLINE:
1006 header &= R300_CP_PACKET0_REG_MASK; 870 header &= ~R300_CP_PACKET0_REG_MASK;
1007 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 871 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1008 break; 872 break;
1009 default: 873 default:
@@ -1011,8 +875,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1011 r = -EINVAL; 875 r = -EINVAL;
1012 goto out; 876 goto out;
1013 } 877 }
1014 ib_chunk->kdata[h_idx] = header; 878 ib[h_idx] = header;
1015 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 879 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1016 } 880 }
1017out: 881out:
1018 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 882 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@@ -1033,7 +897,6 @@ out:
1033int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 897int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1034 struct radeon_cs_reloc **cs_reloc) 898 struct radeon_cs_reloc **cs_reloc)
1035{ 899{
1036 struct radeon_cs_chunk *ib_chunk;
1037 struct radeon_cs_chunk *relocs_chunk; 900 struct radeon_cs_chunk *relocs_chunk;
1038 struct radeon_cs_packet p3reloc; 901 struct radeon_cs_packet p3reloc;
1039 unsigned idx; 902 unsigned idx;
@@ -1044,7 +907,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1044 return -EINVAL; 907 return -EINVAL;
1045 } 908 }
1046 *cs_reloc = NULL; 909 *cs_reloc = NULL;
1047 ib_chunk = &p->chunks[p->chunk_ib_idx];
1048 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 910 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1049 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 911 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1050 if (r) { 912 if (r) {
@@ -1057,7 +919,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1057 r100_cs_dump_packet(p, &p3reloc); 919 r100_cs_dump_packet(p, &p3reloc);
1058 return -EINVAL; 920 return -EINVAL;
1059 } 921 }
1060 idx = ib_chunk->kdata[p3reloc.idx + 1]; 922 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1061 if (idx >= relocs_chunk->length_dw) { 923 if (idx >= relocs_chunk->length_dw) {
1062 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 924 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1063 idx, relocs_chunk->length_dw); 925 idx, relocs_chunk->length_dw);
@@ -1126,7 +988,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1126 struct radeon_cs_packet *pkt, 988 struct radeon_cs_packet *pkt,
1127 unsigned idx, unsigned reg) 989 unsigned idx, unsigned reg)
1128{ 990{
1129 struct radeon_cs_chunk *ib_chunk;
1130 struct radeon_cs_reloc *reloc; 991 struct radeon_cs_reloc *reloc;
1131 struct r100_cs_track *track; 992 struct r100_cs_track *track;
1132 volatile uint32_t *ib; 993 volatile uint32_t *ib;
@@ -1134,11 +995,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1134 int r; 995 int r;
1135 int i, face; 996 int i, face;
1136 u32 tile_flags = 0; 997 u32 tile_flags = 0;
998 u32 idx_value;
1137 999
1138 ib = p->ib->ptr; 1000 ib = p->ib->ptr;
1139 ib_chunk = &p->chunks[p->chunk_ib_idx];
1140 track = (struct r100_cs_track *)p->track; 1001 track = (struct r100_cs_track *)p->track;
1141 1002
1003 idx_value = radeon_get_ib_value(p, idx);
1004
1142 switch (reg) { 1005 switch (reg) {
1143 case RADEON_CRTC_GUI_TRIG_VLINE: 1006 case RADEON_CRTC_GUI_TRIG_VLINE:
1144 r = r100_cs_packet_parse_vline(p); 1007 r = r100_cs_packet_parse_vline(p);
@@ -1166,8 +1029,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1166 return r; 1029 return r;
1167 } 1030 }
1168 track->zb.robj = reloc->robj; 1031 track->zb.robj = reloc->robj;
1169 track->zb.offset = ib_chunk->kdata[idx]; 1032 track->zb.offset = idx_value;
1170 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1033 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1171 break; 1034 break;
1172 case RADEON_RB3D_COLOROFFSET: 1035 case RADEON_RB3D_COLOROFFSET:
1173 r = r100_cs_packet_next_reloc(p, &reloc); 1036 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1178,8 +1041,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1178 return r; 1041 return r;
1179 } 1042 }
1180 track->cb[0].robj = reloc->robj; 1043 track->cb[0].robj = reloc->robj;
1181 track->cb[0].offset = ib_chunk->kdata[idx]; 1044 track->cb[0].offset = idx_value;
1182 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1045 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1183 break; 1046 break;
1184 case RADEON_PP_TXOFFSET_0: 1047 case RADEON_PP_TXOFFSET_0:
1185 case RADEON_PP_TXOFFSET_1: 1048 case RADEON_PP_TXOFFSET_1:
@@ -1192,7 +1055,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1192 r100_cs_dump_packet(p, pkt); 1055 r100_cs_dump_packet(p, pkt);
1193 return r; 1056 return r;
1194 } 1057 }
1195 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1058 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1196 track->textures[i].robj = reloc->robj; 1059 track->textures[i].robj = reloc->robj;
1197 break; 1060 break;
1198 case RADEON_PP_CUBIC_OFFSET_T0_0: 1061 case RADEON_PP_CUBIC_OFFSET_T0_0:
@@ -1208,8 +1071,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1208 r100_cs_dump_packet(p, pkt); 1071 r100_cs_dump_packet(p, pkt);
1209 return r; 1072 return r;
1210 } 1073 }
1211 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; 1074 track->textures[0].cube_info[i].offset = idx_value;
1212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1075 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1213 track->textures[0].cube_info[i].robj = reloc->robj; 1076 track->textures[0].cube_info[i].robj = reloc->robj;
1214 break; 1077 break;
1215 case RADEON_PP_CUBIC_OFFSET_T1_0: 1078 case RADEON_PP_CUBIC_OFFSET_T1_0:
@@ -1225,8 +1088,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1225 r100_cs_dump_packet(p, pkt); 1088 r100_cs_dump_packet(p, pkt);
1226 return r; 1089 return r;
1227 } 1090 }
1228 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; 1091 track->textures[1].cube_info[i].offset = idx_value;
1229 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1092 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1230 track->textures[1].cube_info[i].robj = reloc->robj; 1093 track->textures[1].cube_info[i].robj = reloc->robj;
1231 break; 1094 break;
1232 case RADEON_PP_CUBIC_OFFSET_T2_0: 1095 case RADEON_PP_CUBIC_OFFSET_T2_0:
@@ -1242,12 +1105,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1242 r100_cs_dump_packet(p, pkt); 1105 r100_cs_dump_packet(p, pkt);
1243 return r; 1106 return r;
1244 } 1107 }
1245 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; 1108 track->textures[2].cube_info[i].offset = idx_value;
1246 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1109 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1247 track->textures[2].cube_info[i].robj = reloc->robj; 1110 track->textures[2].cube_info[i].robj = reloc->robj;
1248 break; 1111 break;
1249 case RADEON_RE_WIDTH_HEIGHT: 1112 case RADEON_RE_WIDTH_HEIGHT:
1250 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 1113 track->maxy = ((idx_value >> 16) & 0x7FF);
1251 break; 1114 break;
1252 case RADEON_RB3D_COLORPITCH: 1115 case RADEON_RB3D_COLORPITCH:
1253 r = r100_cs_packet_next_reloc(p, &reloc); 1116 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1263,17 +1126,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1263 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1126 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1264 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1127 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1265 1128
1266 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 1129 tmp = idx_value & ~(0x7 << 16);
1267 tmp |= tile_flags; 1130 tmp |= tile_flags;
1268 ib[idx] = tmp; 1131 ib[idx] = tmp;
1269 1132
1270 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 1133 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1271 break; 1134 break;
1272 case RADEON_RB3D_DEPTHPITCH: 1135 case RADEON_RB3D_DEPTHPITCH:
1273 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 1136 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1274 break; 1137 break;
1275 case RADEON_RB3D_CNTL: 1138 case RADEON_RB3D_CNTL:
1276 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1139 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1277 case 7: 1140 case 7:
1278 case 8: 1141 case 8:
1279 case 9: 1142 case 9:
@@ -1291,13 +1154,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1291 break; 1154 break;
1292 default: 1155 default:
1293 DRM_ERROR("Invalid color buffer format (%d) !\n", 1156 DRM_ERROR("Invalid color buffer format (%d) !\n",
1294 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1157 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1295 return -EINVAL; 1158 return -EINVAL;
1296 } 1159 }
1297 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 1160 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1298 break; 1161 break;
1299 case RADEON_RB3D_ZSTENCILCNTL: 1162 case RADEON_RB3D_ZSTENCILCNTL:
1300 switch (ib_chunk->kdata[idx] & 0xf) { 1163 switch (idx_value & 0xf) {
1301 case 0: 1164 case 0:
1302 track->zb.cpp = 2; 1165 track->zb.cpp = 2;
1303 break; 1166 break;
@@ -1321,44 +1184,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1321 r100_cs_dump_packet(p, pkt); 1184 r100_cs_dump_packet(p, pkt);
1322 return r; 1185 return r;
1323 } 1186 }
1324 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1187 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1325 break; 1188 break;
1326 case RADEON_PP_CNTL: 1189 case RADEON_PP_CNTL:
1327 { 1190 {
1328 uint32_t temp = ib_chunk->kdata[idx] >> 4; 1191 uint32_t temp = idx_value >> 4;
1329 for (i = 0; i < track->num_texture; i++) 1192 for (i = 0; i < track->num_texture; i++)
1330 track->textures[i].enabled = !!(temp & (1 << i)); 1193 track->textures[i].enabled = !!(temp & (1 << i));
1331 } 1194 }
1332 break; 1195 break;
1333 case RADEON_SE_VF_CNTL: 1196 case RADEON_SE_VF_CNTL:
1334 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1197 track->vap_vf_cntl = idx_value;
1335 break; 1198 break;
1336 case RADEON_SE_VTX_FMT: 1199 case RADEON_SE_VTX_FMT:
1337 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); 1200 track->vtx_size = r100_get_vtx_size(idx_value);
1338 break; 1201 break;
1339 case RADEON_PP_TEX_SIZE_0: 1202 case RADEON_PP_TEX_SIZE_0:
1340 case RADEON_PP_TEX_SIZE_1: 1203 case RADEON_PP_TEX_SIZE_1:
1341 case RADEON_PP_TEX_SIZE_2: 1204 case RADEON_PP_TEX_SIZE_2:
1342 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1205 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1343 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 1206 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1344 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1207 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1345 break; 1208 break;
1346 case RADEON_PP_TEX_PITCH_0: 1209 case RADEON_PP_TEX_PITCH_0:
1347 case RADEON_PP_TEX_PITCH_1: 1210 case RADEON_PP_TEX_PITCH_1:
1348 case RADEON_PP_TEX_PITCH_2: 1211 case RADEON_PP_TEX_PITCH_2:
1349 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1212 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1350 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 1213 track->textures[i].pitch = idx_value + 32;
1351 break; 1214 break;
1352 case RADEON_PP_TXFILTER_0: 1215 case RADEON_PP_TXFILTER_0:
1353 case RADEON_PP_TXFILTER_1: 1216 case RADEON_PP_TXFILTER_1:
1354 case RADEON_PP_TXFILTER_2: 1217 case RADEON_PP_TXFILTER_2:
1355 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1218 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1356 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) 1219 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1357 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1220 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1358 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 1221 tmp = (idx_value >> 23) & 0x7;
1359 if (tmp == 2 || tmp == 6) 1222 if (tmp == 2 || tmp == 6)
1360 track->textures[i].roundup_w = false; 1223 track->textures[i].roundup_w = false;
1361 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 1224 tmp = (idx_value >> 27) & 0x7;
1362 if (tmp == 2 || tmp == 6) 1225 if (tmp == 2 || tmp == 6)
1363 track->textures[i].roundup_h = false; 1226 track->textures[i].roundup_h = false;
1364 break; 1227 break;
@@ -1366,16 +1229,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1366 case RADEON_PP_TXFORMAT_1: 1229 case RADEON_PP_TXFORMAT_1:
1367 case RADEON_PP_TXFORMAT_2: 1230 case RADEON_PP_TXFORMAT_2:
1368 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1231 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1369 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { 1232 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1370 track->textures[i].use_pitch = 1; 1233 track->textures[i].use_pitch = 1;
1371 } else { 1234 } else {
1372 track->textures[i].use_pitch = 0; 1235 track->textures[i].use_pitch = 0;
1373 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1236 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1374 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1237 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1375 } 1238 }
1376 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1239 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1377 track->textures[i].tex_coord_type = 2; 1240 track->textures[i].tex_coord_type = 2;
1378 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 1241 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1379 case RADEON_TXFORMAT_I8: 1242 case RADEON_TXFORMAT_I8:
1380 case RADEON_TXFORMAT_RGB332: 1243 case RADEON_TXFORMAT_RGB332:
1381 case RADEON_TXFORMAT_Y8: 1244 case RADEON_TXFORMAT_Y8:
@@ -1402,13 +1265,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1402 track->textures[i].cpp = 4; 1265 track->textures[i].cpp = 4;
1403 break; 1266 break;
1404 } 1267 }
1405 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 1268 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1406 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 1269 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1407 break; 1270 break;
1408 case RADEON_PP_CUBIC_FACES_0: 1271 case RADEON_PP_CUBIC_FACES_0:
1409 case RADEON_PP_CUBIC_FACES_1: 1272 case RADEON_PP_CUBIC_FACES_1:
1410 case RADEON_PP_CUBIC_FACES_2: 1273 case RADEON_PP_CUBIC_FACES_2:
1411 tmp = ib_chunk->kdata[idx]; 1274 tmp = idx_value;
1412 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1275 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1413 for (face = 0; face < 4; face++) { 1276 for (face = 0; face < 4; face++) {
1414 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1277 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -1427,15 +1290,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1427 struct radeon_cs_packet *pkt, 1290 struct radeon_cs_packet *pkt,
1428 struct radeon_object *robj) 1291 struct radeon_object *robj)
1429{ 1292{
1430 struct radeon_cs_chunk *ib_chunk;
1431 unsigned idx; 1293 unsigned idx;
1432 1294 u32 value;
1433 ib_chunk = &p->chunks[p->chunk_ib_idx];
1434 idx = pkt->idx + 1; 1295 idx = pkt->idx + 1;
1435 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { 1296 value = radeon_get_ib_value(p, idx + 2);
1297 if ((value + 1) > radeon_object_size(robj)) {
1436 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1298 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1437 "(need %u have %lu) !\n", 1299 "(need %u have %lu) !\n",
1438 ib_chunk->kdata[idx+2] + 1, 1300 value + 1,
1439 radeon_object_size(robj)); 1301 radeon_object_size(robj));
1440 return -EINVAL; 1302 return -EINVAL;
1441 } 1303 }
@@ -1445,59 +1307,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1445static int r100_packet3_check(struct radeon_cs_parser *p, 1307static int r100_packet3_check(struct radeon_cs_parser *p,
1446 struct radeon_cs_packet *pkt) 1308 struct radeon_cs_packet *pkt)
1447{ 1309{
1448 struct radeon_cs_chunk *ib_chunk;
1449 struct radeon_cs_reloc *reloc; 1310 struct radeon_cs_reloc *reloc;
1450 struct r100_cs_track *track; 1311 struct r100_cs_track *track;
1451 unsigned idx; 1312 unsigned idx;
1452 unsigned i, c;
1453 volatile uint32_t *ib; 1313 volatile uint32_t *ib;
1454 int r; 1314 int r;
1455 1315
1456 ib = p->ib->ptr; 1316 ib = p->ib->ptr;
1457 ib_chunk = &p->chunks[p->chunk_ib_idx];
1458 idx = pkt->idx + 1; 1317 idx = pkt->idx + 1;
1459 track = (struct r100_cs_track *)p->track; 1318 track = (struct r100_cs_track *)p->track;
1460 switch (pkt->opcode) { 1319 switch (pkt->opcode) {
1461 case PACKET3_3D_LOAD_VBPNTR: 1320 case PACKET3_3D_LOAD_VBPNTR:
1462 c = ib_chunk->kdata[idx++]; 1321 r = r100_packet3_load_vbpntr(p, pkt, idx);
1463 track->num_arrays = c; 1322 if (r)
1464 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1323 return r;
1465 r = r100_cs_packet_next_reloc(p, &reloc);
1466 if (r) {
1467 DRM_ERROR("No reloc for packet3 %d\n",
1468 pkt->opcode);
1469 r100_cs_dump_packet(p, pkt);
1470 return r;
1471 }
1472 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1473 track->arrays[i + 0].robj = reloc->robj;
1474 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1475 track->arrays[i + 0].esize &= 0x7F;
1476 r = r100_cs_packet_next_reloc(p, &reloc);
1477 if (r) {
1478 DRM_ERROR("No reloc for packet3 %d\n",
1479 pkt->opcode);
1480 r100_cs_dump_packet(p, pkt);
1481 return r;
1482 }
1483 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1484 track->arrays[i + 1].robj = reloc->robj;
1485 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1486 track->arrays[i + 1].esize &= 0x7F;
1487 }
1488 if (c & 1) {
1489 r = r100_cs_packet_next_reloc(p, &reloc);
1490 if (r) {
1491 DRM_ERROR("No reloc for packet3 %d\n",
1492 pkt->opcode);
1493 r100_cs_dump_packet(p, pkt);
1494 return r;
1495 }
1496 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1497 track->arrays[i + 0].robj = reloc->robj;
1498 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1499 track->arrays[i + 0].esize &= 0x7F;
1500 }
1501 break; 1324 break;
1502 case PACKET3_INDX_BUFFER: 1325 case PACKET3_INDX_BUFFER:
1503 r = r100_cs_packet_next_reloc(p, &reloc); 1326 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1506,7 +1329,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1506 r100_cs_dump_packet(p, pkt); 1329 r100_cs_dump_packet(p, pkt);
1507 return r; 1330 return r;
1508 } 1331 }
1509 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1332 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1510 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1333 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1511 if (r) { 1334 if (r) {
1512 return r; 1335 return r;
@@ -1520,27 +1343,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1520 r100_cs_dump_packet(p, pkt); 1343 r100_cs_dump_packet(p, pkt);
1521 return r; 1344 return r;
1522 } 1345 }
1523 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1346 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1524 track->num_arrays = 1; 1347 track->num_arrays = 1;
1525 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); 1348 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1526 1349
1527 track->arrays[0].robj = reloc->robj; 1350 track->arrays[0].robj = reloc->robj;
1528 track->arrays[0].esize = track->vtx_size; 1351 track->arrays[0].esize = track->vtx_size;
1529 1352
1530 track->max_indx = ib_chunk->kdata[idx+1]; 1353 track->max_indx = radeon_get_ib_value(p, idx+1);
1531 1354
1532 track->vap_vf_cntl = ib_chunk->kdata[idx+3]; 1355 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1533 track->immd_dwords = pkt->count - 1; 1356 track->immd_dwords = pkt->count - 1;
1534 r = r100_cs_track_check(p->rdev, track); 1357 r = r100_cs_track_check(p->rdev, track);
1535 if (r) 1358 if (r)
1536 return r; 1359 return r;
1537 break; 1360 break;
1538 case PACKET3_3D_DRAW_IMMD: 1361 case PACKET3_3D_DRAW_IMMD:
1539 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1362 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1540 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1363 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1541 return -EINVAL; 1364 return -EINVAL;
1542 } 1365 }
1543 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1366 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1544 track->immd_dwords = pkt->count - 1; 1367 track->immd_dwords = pkt->count - 1;
1545 r = r100_cs_track_check(p->rdev, track); 1368 r = r100_cs_track_check(p->rdev, track);
1546 if (r) 1369 if (r)
@@ -1548,11 +1371,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1548 break; 1371 break;
1549 /* triggers drawing using in-packet vertex data */ 1372 /* triggers drawing using in-packet vertex data */
1550 case PACKET3_3D_DRAW_IMMD_2: 1373 case PACKET3_3D_DRAW_IMMD_2:
1551 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1374 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1552 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1375 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1553 return -EINVAL; 1376 return -EINVAL;
1554 } 1377 }
1555 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1378 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1556 track->immd_dwords = pkt->count; 1379 track->immd_dwords = pkt->count;
1557 r = r100_cs_track_check(p->rdev, track); 1380 r = r100_cs_track_check(p->rdev, track);
1558 if (r) 1381 if (r)
@@ -1560,28 +1383,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1560 break; 1383 break;
1561 /* triggers drawing using in-packet vertex data */ 1384 /* triggers drawing using in-packet vertex data */
1562 case PACKET3_3D_DRAW_VBUF_2: 1385 case PACKET3_3D_DRAW_VBUF_2:
1563 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1386 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1564 r = r100_cs_track_check(p->rdev, track); 1387 r = r100_cs_track_check(p->rdev, track);
1565 if (r) 1388 if (r)
1566 return r; 1389 return r;
1567 break; 1390 break;
1568 /* triggers drawing of vertex buffers setup elsewhere */ 1391 /* triggers drawing of vertex buffers setup elsewhere */
1569 case PACKET3_3D_DRAW_INDX_2: 1392 case PACKET3_3D_DRAW_INDX_2:
1570 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1393 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1571 r = r100_cs_track_check(p->rdev, track); 1394 r = r100_cs_track_check(p->rdev, track);
1572 if (r) 1395 if (r)
1573 return r; 1396 return r;
1574 break; 1397 break;
1575 /* triggers drawing using indices to vertex buffer */ 1398 /* triggers drawing using indices to vertex buffer */
1576 case PACKET3_3D_DRAW_VBUF: 1399 case PACKET3_3D_DRAW_VBUF:
1577 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1400 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1578 r = r100_cs_track_check(p->rdev, track); 1401 r = r100_cs_track_check(p->rdev, track);
1579 if (r) 1402 if (r)
1580 return r; 1403 return r;
1581 break; 1404 break;
1582 /* triggers drawing of vertex buffers setup elsewhere */ 1405 /* triggers drawing of vertex buffers setup elsewhere */
1583 case PACKET3_3D_DRAW_INDX: 1406 case PACKET3_3D_DRAW_INDX:
1584 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1407 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1585 r = r100_cs_track_check(p->rdev, track); 1408 r = r100_cs_track_check(p->rdev, track);
1586 if (r) 1409 if (r)
1587 return r; 1410 return r;
@@ -2033,7 +1856,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2033 r100_pll_errata_after_data(rdev); 1856 r100_pll_errata_after_data(rdev);
2034} 1857}
2035 1858
2036int r100_init(struct radeon_device *rdev) 1859void r100_set_safe_registers(struct radeon_device *rdev)
2037{ 1860{
2038 if (ASIC_IS_RN50(rdev)) { 1861 if (ASIC_IS_RN50(rdev)) {
2039 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 1862 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -2042,9 +1865,8 @@ int r100_init(struct radeon_device *rdev)
2042 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 1865 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2043 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 1866 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2044 } else { 1867 } else {
2045 return r200_init(rdev); 1868 r200_set_safe_registers(rdev);
2046 } 1869 }
2047 return 0;
2048} 1870}
2049 1871
2050/* 1872/*
@@ -2342,9 +2164,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2342 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2164 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2343 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2165 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2344 } 2166 }
2345 if (rdev->mode_info.crtcs[1]->base.enabled) { 2167 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2346 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 2168 if (rdev->mode_info.crtcs[1]->base.enabled) {
2347 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 2169 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2170 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2171 }
2348 } 2172 }
2349 2173
2350 min_mem_eff.full = rfixed_const_8(0); 2174 min_mem_eff.full = rfixed_const_8(0);
@@ -2555,7 +2379,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2555 /* 2379 /*
2556 Find the total latency for the display data. 2380 Find the total latency for the display data.
2557 */ 2381 */
2558 disp_latency_overhead.full = rfixed_const(80); 2382 disp_latency_overhead.full = rfixed_const(8);
2559 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); 2383 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2560 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 2384 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2561 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 2385 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2753,8 +2577,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2753static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2577static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2754{ 2578{
2755 DRM_ERROR("pitch %d\n", t->pitch); 2579 DRM_ERROR("pitch %d\n", t->pitch);
2580 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2756 DRM_ERROR("width %d\n", t->width); 2581 DRM_ERROR("width %d\n", t->width);
2582 DRM_ERROR("width_11 %d\n", t->width_11);
2757 DRM_ERROR("height %d\n", t->height); 2583 DRM_ERROR("height %d\n", t->height);
2584 DRM_ERROR("height_11 %d\n", t->height_11);
2758 DRM_ERROR("num levels %d\n", t->num_levels); 2585 DRM_ERROR("num levels %d\n", t->num_levels);
2759 DRM_ERROR("depth %d\n", t->txdepth); 2586 DRM_ERROR("depth %d\n", t->txdepth);
2760 DRM_ERROR("bpp %d\n", t->cpp); 2587 DRM_ERROR("bpp %d\n", t->cpp);
@@ -2814,15 +2641,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2814 else 2641 else
2815 w = track->textures[u].pitch / (1 << i); 2642 w = track->textures[u].pitch / (1 << i);
2816 } else { 2643 } else {
2817 w = track->textures[u].width / (1 << i); 2644 w = track->textures[u].width;
2818 if (rdev->family >= CHIP_RV515) 2645 if (rdev->family >= CHIP_RV515)
2819 w |= track->textures[u].width_11; 2646 w |= track->textures[u].width_11;
2647 w = w / (1 << i);
2820 if (track->textures[u].roundup_w) 2648 if (track->textures[u].roundup_w)
2821 w = roundup_pow_of_two(w); 2649 w = roundup_pow_of_two(w);
2822 } 2650 }
2823 h = track->textures[u].height / (1 << i); 2651 h = track->textures[u].height;
2824 if (rdev->family >= CHIP_RV515) 2652 if (rdev->family >= CHIP_RV515)
2825 h |= track->textures[u].height_11; 2653 h |= track->textures[u].height_11;
2654 h = h / (1 << i);
2826 if (track->textures[u].roundup_h) 2655 if (track->textures[u].roundup_h)
2827 h = roundup_pow_of_two(h); 2656 h = roundup_pow_of_two(h);
2828 size += w * h; 2657 size += w * h;
@@ -3157,7 +2986,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3157 WREG32(R_000740_CP_CSQ_CNTL, 0); 2986 WREG32(R_000740_CP_CSQ_CNTL, 0);
3158 2987
3159 /* Save few CRTC registers */ 2988 /* Save few CRTC registers */
3160 save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); 2989 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3161 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 2990 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3162 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 2991 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3163 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 2992 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
@@ -3167,7 +2996,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3167 } 2996 }
3168 2997
3169 /* Disable VGA aperture access */ 2998 /* Disable VGA aperture access */
3170 WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); 2999 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3171 /* Disable cursor, overlay, crtc */ 3000 /* Disable cursor, overlay, crtc */
3172 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3001 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3173 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3002 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
@@ -3199,10 +3028,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3199 rdev->mc.vram_location); 3028 rdev->mc.vram_location);
3200 } 3029 }
3201 /* Restore CRTC registers */ 3030 /* Restore CRTC registers */
3202 WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); 3031 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3203 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3032 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3204 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3033 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3205 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3034 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3206 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3035 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3207 } 3036 }
3208} 3037}
3038
3039void r100_vga_render_disable(struct radeon_device *rdev)
3040{
3041 u32 tmp;
3042
3043 tmp = RREG8(R_0003C2_GENMO_WT);
3044 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3045}
3046
3047static void r100_debugfs(struct radeon_device *rdev)
3048{
3049 int r;
3050
3051 r = r100_debugfs_mc_info_init(rdev);
3052 if (r)
3053 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3054}
3055
3056static void r100_mc_program(struct radeon_device *rdev)
3057{
3058 struct r100_mc_save save;
3059
3060 /* Stops all mc clients */
3061 r100_mc_stop(rdev, &save);
3062 if (rdev->flags & RADEON_IS_AGP) {
3063 WREG32(R_00014C_MC_AGP_LOCATION,
3064 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3065 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3066 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3067 if (rdev->family > CHIP_RV200)
3068 WREG32(R_00015C_AGP_BASE_2,
3069 upper_32_bits(rdev->mc.agp_base) & 0xff);
3070 } else {
3071 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3072 WREG32(R_000170_AGP_BASE, 0);
3073 if (rdev->family > CHIP_RV200)
3074 WREG32(R_00015C_AGP_BASE_2, 0);
3075 }
3076 /* Wait for mc idle */
3077 if (r100_mc_wait_for_idle(rdev))
3078 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3079 /* Program MC, should be a 32bits limited address space */
3080 WREG32(R_000148_MC_FB_LOCATION,
3081 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3082 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3083 r100_mc_resume(rdev, &save);
3084}
3085
3086void r100_clock_startup(struct radeon_device *rdev)
3087{
3088 u32 tmp;
3089
3090 if (radeon_dynclks != -1 && radeon_dynclks)
3091 radeon_legacy_set_clock_gating(rdev, 1);
3092 /* We need to force on some of the block */
3093 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3094 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3095 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3096 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3097 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3098}
3099
3100static int r100_startup(struct radeon_device *rdev)
3101{
3102 int r;
3103
3104 r100_mc_program(rdev);
3105 /* Resume clock */
3106 r100_clock_startup(rdev);
3107 /* Initialize GPU configuration (# pipes, ...) */
3108 r100_gpu_init(rdev);
3109 /* Initialize GART (initialize after TTM so we can allocate
3110 * memory through TTM but finalize after TTM) */
3111 if (rdev->flags & RADEON_IS_PCI) {
3112 r = r100_pci_gart_enable(rdev);
3113 if (r)
3114 return r;
3115 }
3116 /* Enable IRQ */
3117 rdev->irq.sw_int = true;
3118 r100_irq_set(rdev);
3119 /* 1M ring buffer */
3120 r = r100_cp_init(rdev, 1024 * 1024);
3121 if (r) {
3122 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3123 return r;
3124 }
3125 r = r100_wb_init(rdev);
3126 if (r)
3127 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3128 r = r100_ib_init(rdev);
3129 if (r) {
3130 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3131 return r;
3132 }
3133 return 0;
3134}
3135
3136int r100_resume(struct radeon_device *rdev)
3137{
3138 /* Make sur GART are not working */
3139 if (rdev->flags & RADEON_IS_PCI)
3140 r100_pci_gart_disable(rdev);
3141 /* Resume clock before doing reset */
3142 r100_clock_startup(rdev);
3143 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3144 if (radeon_gpu_reset(rdev)) {
3145 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3146 RREG32(R_000E40_RBBM_STATUS),
3147 RREG32(R_0007C0_CP_STAT));
3148 }
3149 /* post */
3150 radeon_combios_asic_init(rdev->ddev);
3151 /* Resume clock after posting */
3152 r100_clock_startup(rdev);
3153 return r100_startup(rdev);
3154}
3155
3156int r100_suspend(struct radeon_device *rdev)
3157{
3158 r100_cp_disable(rdev);
3159 r100_wb_disable(rdev);
3160 r100_irq_disable(rdev);
3161 if (rdev->flags & RADEON_IS_PCI)
3162 r100_pci_gart_disable(rdev);
3163 return 0;
3164}
3165
3166void r100_fini(struct radeon_device *rdev)
3167{
3168 r100_suspend(rdev);
3169 r100_cp_fini(rdev);
3170 r100_wb_fini(rdev);
3171 r100_ib_fini(rdev);
3172 radeon_gem_fini(rdev);
3173 if (rdev->flags & RADEON_IS_PCI)
3174 r100_pci_gart_fini(rdev);
3175 radeon_irq_kms_fini(rdev);
3176 radeon_fence_driver_fini(rdev);
3177 radeon_object_fini(rdev);
3178 radeon_atombios_fini(rdev);
3179 kfree(rdev->bios);
3180 rdev->bios = NULL;
3181}
3182
3183int r100_mc_init(struct radeon_device *rdev)
3184{
3185 int r;
3186 u32 tmp;
3187
3188 /* Setup GPU memory space */
3189 rdev->mc.vram_location = 0xFFFFFFFFUL;
3190 rdev->mc.gtt_location = 0xFFFFFFFFUL;
3191 if (rdev->flags & RADEON_IS_IGP) {
3192 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
3193 rdev->mc.vram_location = tmp << 16;
3194 }
3195 if (rdev->flags & RADEON_IS_AGP) {
3196 r = radeon_agp_init(rdev);
3197 if (r) {
3198 printk(KERN_WARNING "[drm] Disabling AGP\n");
3199 rdev->flags &= ~RADEON_IS_AGP;
3200 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3201 } else {
3202 rdev->mc.gtt_location = rdev->mc.agp_base;
3203 }
3204 }
3205 r = radeon_mc_setup(rdev);
3206 if (r)
3207 return r;
3208 return 0;
3209}
3210
3211int r100_init(struct radeon_device *rdev)
3212{
3213 int r;
3214
3215 /* Register debugfs file specific to this group of asics */
3216 r100_debugfs(rdev);
3217 /* Disable VGA */
3218 r100_vga_render_disable(rdev);
3219 /* Initialize scratch registers */
3220 radeon_scratch_init(rdev);
3221 /* Initialize surface registers */
3222 radeon_surface_init(rdev);
3223 /* TODO: disable VGA need to use VGA request */
3224 /* BIOS*/
3225 if (!radeon_get_bios(rdev)) {
3226 if (ASIC_IS_AVIVO(rdev))
3227 return -EINVAL;
3228 }
3229 if (rdev->is_atom_bios) {
3230 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3231 return -EINVAL;
3232 } else {
3233 r = radeon_combios_init(rdev);
3234 if (r)
3235 return r;
3236 }
3237 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3238 if (radeon_gpu_reset(rdev)) {
3239 dev_warn(rdev->dev,
3240 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3241 RREG32(R_000E40_RBBM_STATUS),
3242 RREG32(R_0007C0_CP_STAT));
3243 }
3244 /* check if cards are posted or not */
3245 if (!radeon_card_posted(rdev) && rdev->bios) {
3246 DRM_INFO("GPU not posted. posting now...\n");
3247 radeon_combios_asic_init(rdev->ddev);
3248 }
3249 /* Set asic errata */
3250 r100_errata(rdev);
3251 /* Initialize clocks */
3252 radeon_get_clock_info(rdev->ddev);
3253 /* Get vram informations */
3254 r100_vram_info(rdev);
3255 /* Initialize memory controller (also test AGP) */
3256 r = r100_mc_init(rdev);
3257 if (r)
3258 return r;
3259 /* Fence driver */
3260 r = radeon_fence_driver_init(rdev);
3261 if (r)
3262 return r;
3263 r = radeon_irq_kms_init(rdev);
3264 if (r)
3265 return r;
3266 /* Memory manager */
3267 r = radeon_object_init(rdev);
3268 if (r)
3269 return r;
3270 if (rdev->flags & RADEON_IS_PCI) {
3271 r = r100_pci_gart_init(rdev);
3272 if (r)
3273 return r;
3274 }
3275 r100_set_safe_registers(rdev);
3276 rdev->accel_working = true;
3277 r = r100_startup(rdev);
3278 if (r) {
3279 /* Somethings want wront with the accel init stop accel */
3280 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3281 r100_suspend(rdev);
3282 r100_cp_fini(rdev);
3283 r100_wb_fini(rdev);
3284 r100_ib_fini(rdev);
3285 if (rdev->flags & RADEON_IS_PCI)
3286 r100_pci_gart_fini(rdev);
3287 radeon_irq_kms_fini(rdev);
3288 rdev->accel_working = false;
3289 }
3290 return 0;
3291}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 70a82eda394a..0daf0d76a891 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
84 struct radeon_cs_packet *pkt, 84 struct radeon_cs_packet *pkt,
85 unsigned idx, unsigned reg); 85 unsigned idx, unsigned reg);
86 86
87
88
87static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 89static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
88 struct radeon_cs_packet *pkt, 90 struct radeon_cs_packet *pkt,
89 unsigned idx, 91 unsigned idx,
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
93 u32 tile_flags = 0; 95 u32 tile_flags = 0;
94 u32 tmp; 96 u32 tmp;
95 struct radeon_cs_reloc *reloc; 97 struct radeon_cs_reloc *reloc;
96 struct radeon_cs_chunk *ib_chunk; 98 u32 value;
97
98 ib_chunk = &p->chunks[p->chunk_ib_idx];
99 99
100 r = r100_cs_packet_next_reloc(p, &reloc); 100 r = r100_cs_packet_next_reloc(p, &reloc);
101 if (r) { 101 if (r) {
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
104 r100_cs_dump_packet(p, pkt); 104 r100_cs_dump_packet(p, pkt);
105 return r; 105 return r;
106 } 106 }
107 tmp = ib_chunk->kdata[idx] & 0x003fffff; 107 value = radeon_get_ib_value(p, idx);
108 tmp = value & 0x003fffff;
108 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 109 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
109 110
110 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 111 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
119 } 120 }
120 121
121 tmp |= tile_flags; 122 tmp |= tile_flags;
122 p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; 123 p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
123 return 0; 124 return 0;
124} 125}
126
127static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
128 struct radeon_cs_packet *pkt,
129 int idx)
130{
131 unsigned c, i;
132 struct radeon_cs_reloc *reloc;
133 struct r100_cs_track *track;
134 int r = 0;
135 volatile uint32_t *ib;
136 u32 idx_value;
137
138 ib = p->ib->ptr;
139 track = (struct r100_cs_track *)p->track;
140 c = radeon_get_ib_value(p, idx++) & 0x1F;
141 track->num_arrays = c;
142 for (i = 0; i < (c - 1); i+=2, idx+=3) {
143 r = r100_cs_packet_next_reloc(p, &reloc);
144 if (r) {
145 DRM_ERROR("No reloc for packet3 %d\n",
146 pkt->opcode);
147 r100_cs_dump_packet(p, pkt);
148 return r;
149 }
150 idx_value = radeon_get_ib_value(p, idx);
151 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
152
153 track->arrays[i + 0].esize = idx_value >> 8;
154 track->arrays[i + 0].robj = reloc->robj;
155 track->arrays[i + 0].esize &= 0x7F;
156 r = r100_cs_packet_next_reloc(p, &reloc);
157 if (r) {
158 DRM_ERROR("No reloc for packet3 %d\n",
159 pkt->opcode);
160 r100_cs_dump_packet(p, pkt);
161 return r;
162 }
163 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
164 track->arrays[i + 1].robj = reloc->robj;
165 track->arrays[i + 1].esize = idx_value >> 24;
166 track->arrays[i + 1].esize &= 0x7F;
167 }
168 if (c & 1) {
169 r = r100_cs_packet_next_reloc(p, &reloc);
170 if (r) {
171 DRM_ERROR("No reloc for packet3 %d\n",
172 pkt->opcode);
173 r100_cs_dump_packet(p, pkt);
174 return r;
175 }
176 idx_value = radeon_get_ib_value(p, idx);
177 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
178 track->arrays[i + 0].robj = reloc->robj;
179 track->arrays[i + 0].esize = idx_value >> 8;
180 track->arrays[i + 0].esize &= 0x7F;
181 }
182 return r;
183}
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index c4b257ec920e..df29a630c466 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -381,6 +381,24 @@
381#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) 381#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24)
382#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) 382#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F)
383#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF 383#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF
384#define R_000148_MC_FB_LOCATION 0x000148
385#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
386#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
387#define C_000148_MC_FB_START 0xFFFF0000
388#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
389#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
390#define C_000148_MC_FB_TOP 0x0000FFFF
391#define R_00014C_MC_AGP_LOCATION 0x00014C
392#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
393#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
394#define C_00014C_MC_AGP_START 0xFFFF0000
395#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
396#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
397#define C_00014C_MC_AGP_TOP 0x0000FFFF
398#define R_000170_AGP_BASE 0x000170
399#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
400#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
401#define C_000170_AGP_BASE_ADDR 0x00000000
384#define R_00023C_DISPLAY_BASE_ADDR 0x00023C 402#define R_00023C_DISPLAY_BASE_ADDR 0x00023C
385#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) 403#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
386#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) 404#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -403,25 +421,25 @@
403#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) 421#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
404#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) 422#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
405#define C_000360_CUR2_LOCK 0x7FFFFFFF 423#define C_000360_CUR2_LOCK 0x7FFFFFFF
406#define R_0003C0_GENMO_WT 0x0003C0 424#define R_0003C2_GENMO_WT 0x0003C0
407#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) 425#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
408#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) 426#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
409#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE 427#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
410#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) 428#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1)
411#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) 429#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1)
412#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD 430#define C_0003C2_VGA_RAM_EN 0xFD
413#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) 431#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2)
414#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) 432#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3)
415#define C_0003C0_VGA_CKSEL 0xFFFFFFF3 433#define C_0003C2_VGA_CKSEL 0xF3
416#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) 434#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5)
417#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) 435#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1)
418#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF 436#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF
419#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) 437#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6)
420#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) 438#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1)
421#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF 439#define C_0003C2_VGA_HSYNC_POL 0xBF
422#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) 440#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7)
423#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) 441#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1)
424#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F 442#define C_0003C2_VGA_VSYNC_POL 0x7F
425#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 443#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8
426#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) 444#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0)
427#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) 445#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1)
@@ -545,6 +563,46 @@
545#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) 563#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5)
546#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) 564#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF)
547#define C_000774_SCRATCH_ADDR 0x0000001F 565#define C_000774_SCRATCH_ADDR 0x0000001F
566#define R_0007C0_CP_STAT 0x0007C0
567#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
568#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
569#define C_0007C0_MRU_BUSY 0xFFFFFFFE
570#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
571#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
572#define C_0007C0_MWU_BUSY 0xFFFFFFFD
573#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
574#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
575#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
576#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
577#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
578#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
579#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
580#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
581#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
582#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
583#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
584#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
585#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
586#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
587#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
588#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
589#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
590#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
591#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
592#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
593#define C_0007C0_CSI_BUSY 0xFFFFDFFF
594#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
595#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
596#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
597#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
598#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
599#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
600#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
601#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
602#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
603#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
604#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
605#define C_0007C0_CP_BUSY 0x7FFFFFFF
548#define R_000E40_RBBM_STATUS 0x000E40 606#define R_000E40_RBBM_STATUS 0x000E40
549#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) 607#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
550#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) 608#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
@@ -604,4 +662,53 @@
604#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) 662#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
605#define C_000E40_GUI_ACTIVE 0x7FFFFFFF 663#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
606 664
665
666#define R_00000D_SCLK_CNTL 0x00000D
667#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
668#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
669#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
670#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8)
671#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7)
672#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF
673#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
674#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
675#define C_00000D_FORCE_CP 0xFFFEFFFF
676#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
677#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
678#define C_00000D_FORCE_HDP 0xFFFDFFFF
679#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18)
680#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1)
681#define C_00000D_FORCE_DISP 0xFFFBFFFF
682#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
683#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
684#define C_00000D_FORCE_TOP 0xFFF7FFFF
685#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
686#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
687#define C_00000D_FORCE_E2 0xFFEFFFFF
688#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
689#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
690#define C_00000D_FORCE_SE 0xFFDFFFFF
691#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
692#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
693#define C_00000D_FORCE_IDCT 0xFFBFFFFF
694#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
695#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
696#define C_00000D_FORCE_VIP 0xFF7FFFFF
697#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
698#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
699#define C_00000D_FORCE_RE 0xFEFFFFFF
700#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
701#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
702#define C_00000D_FORCE_PB 0xFDFFFFFF
703#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
704#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
705#define C_00000D_FORCE_TAM 0xFBFFFFFF
706#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
707#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
708#define C_00000D_FORCE_TDM 0xF7FFFFFF
709#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
710#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
711#define C_00000D_FORCE_RB 0xEFFFFFFF
712
713
607#endif 714#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 568c74bfba3d..eb740fc3549f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
96 struct radeon_cs_packet *pkt, 96 struct radeon_cs_packet *pkt,
97 unsigned idx, unsigned reg) 97 unsigned idx, unsigned reg)
98{ 98{
99 struct radeon_cs_chunk *ib_chunk;
100 struct radeon_cs_reloc *reloc; 99 struct radeon_cs_reloc *reloc;
101 struct r100_cs_track *track; 100 struct r100_cs_track *track;
102 volatile uint32_t *ib; 101 volatile uint32_t *ib;
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
105 int i; 104 int i;
106 int face; 105 int face;
107 u32 tile_flags = 0; 106 u32 tile_flags = 0;
107 u32 idx_value;
108 108
109 ib = p->ib->ptr; 109 ib = p->ib->ptr;
110 ib_chunk = &p->chunks[p->chunk_ib_idx];
111 track = (struct r100_cs_track *)p->track; 110 track = (struct r100_cs_track *)p->track;
112 111 idx_value = radeon_get_ib_value(p, idx);
113 switch (reg) { 112 switch (reg) {
114 case RADEON_CRTC_GUI_TRIG_VLINE: 113 case RADEON_CRTC_GUI_TRIG_VLINE:
115 r = r100_cs_packet_parse_vline(p); 114 r = r100_cs_packet_parse_vline(p);
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
137 return r; 136 return r;
138 } 137 }
139 track->zb.robj = reloc->robj; 138 track->zb.robj = reloc->robj;
140 track->zb.offset = ib_chunk->kdata[idx]; 139 track->zb.offset = idx_value;
141 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 140 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
142 break; 141 break;
143 case RADEON_RB3D_COLOROFFSET: 142 case RADEON_RB3D_COLOROFFSET:
144 r = r100_cs_packet_next_reloc(p, &reloc); 143 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
149 return r; 148 return r;
150 } 149 }
151 track->cb[0].robj = reloc->robj; 150 track->cb[0].robj = reloc->robj;
152 track->cb[0].offset = ib_chunk->kdata[idx]; 151 track->cb[0].offset = idx_value;
153 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 152 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
154 break; 153 break;
155 case R200_PP_TXOFFSET_0: 154 case R200_PP_TXOFFSET_0:
156 case R200_PP_TXOFFSET_1: 155 case R200_PP_TXOFFSET_1:
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
166 r100_cs_dump_packet(p, pkt); 165 r100_cs_dump_packet(p, pkt);
167 return r; 166 return r;
168 } 167 }
169 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 168 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
170 track->textures[i].robj = reloc->robj; 169 track->textures[i].robj = reloc->robj;
171 break; 170 break;
172 case R200_PP_CUBIC_OFFSET_F1_0: 171 case R200_PP_CUBIC_OFFSET_F1_0:
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
208 r100_cs_dump_packet(p, pkt); 207 r100_cs_dump_packet(p, pkt);
209 return r; 208 return r;
210 } 209 }
211 track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; 210 track->textures[i].cube_info[face - 1].offset = idx_value;
212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 211 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
213 track->textures[i].cube_info[face - 1].robj = reloc->robj; 212 track->textures[i].cube_info[face - 1].robj = reloc->robj;
214 break; 213 break;
215 case RADEON_RE_WIDTH_HEIGHT: 214 case RADEON_RE_WIDTH_HEIGHT:
216 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 215 track->maxy = ((idx_value >> 16) & 0x7FF);
217 break; 216 break;
218 case RADEON_RB3D_COLORPITCH: 217 case RADEON_RB3D_COLORPITCH:
219 r = r100_cs_packet_next_reloc(p, &reloc); 218 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
229 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 228 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
230 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 229 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
231 230
232 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 231 tmp = idx_value & ~(0x7 << 16);
233 tmp |= tile_flags; 232 tmp |= tile_flags;
234 ib[idx] = tmp; 233 ib[idx] = tmp;
235 234
236 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 235 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
237 break; 236 break;
238 case RADEON_RB3D_DEPTHPITCH: 237 case RADEON_RB3D_DEPTHPITCH:
239 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 238 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
240 break; 239 break;
241 case RADEON_RB3D_CNTL: 240 case RADEON_RB3D_CNTL:
242 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 241 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
243 case 7: 242 case 7:
244 case 8: 243 case 8:
245 case 9: 244 case 9:
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 break; 256 break;
258 default: 257 default:
259 DRM_ERROR("Invalid color buffer format (%d) !\n", 258 DRM_ERROR("Invalid color buffer format (%d) !\n",
260 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 259 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
261 return -EINVAL; 260 return -EINVAL;
262 } 261 }
263 if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { 262 if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
264 DRM_ERROR("No support for depth xy offset in kms\n"); 263 DRM_ERROR("No support for depth xy offset in kms\n");
265 return -EINVAL; 264 return -EINVAL;
266 } 265 }
267 266
268 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 267 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
269 break; 268 break;
270 case RADEON_RB3D_ZSTENCILCNTL: 269 case RADEON_RB3D_ZSTENCILCNTL:
271 switch (ib_chunk->kdata[idx] & 0xf) { 270 switch (idx_value & 0xf) {
272 case 0: 271 case 0:
273 track->zb.cpp = 2; 272 track->zb.cpp = 2;
274 break; 273 break;
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
292 r100_cs_dump_packet(p, pkt); 291 r100_cs_dump_packet(p, pkt);
293 return r; 292 return r;
294 } 293 }
295 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 294 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
296 break; 295 break;
297 case RADEON_PP_CNTL: 296 case RADEON_PP_CNTL:
298 { 297 {
299 uint32_t temp = ib_chunk->kdata[idx] >> 4; 298 uint32_t temp = idx_value >> 4;
300 for (i = 0; i < track->num_texture; i++) 299 for (i = 0; i < track->num_texture; i++)
301 track->textures[i].enabled = !!(temp & (1 << i)); 300 track->textures[i].enabled = !!(temp & (1 << i));
302 } 301 }
303 break; 302 break;
304 case RADEON_SE_VF_CNTL: 303 case RADEON_SE_VF_CNTL:
305 track->vap_vf_cntl = ib_chunk->kdata[idx]; 304 track->vap_vf_cntl = idx_value;
306 break; 305 break;
307 case 0x210c: 306 case 0x210c:
308 /* VAP_VF_MAX_VTX_INDX */ 307 /* VAP_VF_MAX_VTX_INDX */
309 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 308 track->max_indx = idx_value & 0x00FFFFFFUL;
310 break; 309 break;
311 case R200_SE_VTX_FMT_0: 310 case R200_SE_VTX_FMT_0:
312 track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); 311 track->vtx_size = r200_get_vtx_size_0(idx_value);
313 break; 312 break;
314 case R200_SE_VTX_FMT_1: 313 case R200_SE_VTX_FMT_1:
315 track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); 314 track->vtx_size += r200_get_vtx_size_1(idx_value);
316 break; 315 break;
317 case R200_PP_TXSIZE_0: 316 case R200_PP_TXSIZE_0:
318 case R200_PP_TXSIZE_1: 317 case R200_PP_TXSIZE_1:
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
321 case R200_PP_TXSIZE_4: 320 case R200_PP_TXSIZE_4:
322 case R200_PP_TXSIZE_5: 321 case R200_PP_TXSIZE_5:
323 i = (reg - R200_PP_TXSIZE_0) / 32; 322 i = (reg - R200_PP_TXSIZE_0) / 32;
324 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 323 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
325 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 324 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
326 break; 325 break;
327 case R200_PP_TXPITCH_0: 326 case R200_PP_TXPITCH_0:
328 case R200_PP_TXPITCH_1: 327 case R200_PP_TXPITCH_1:
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
331 case R200_PP_TXPITCH_4: 330 case R200_PP_TXPITCH_4:
332 case R200_PP_TXPITCH_5: 331 case R200_PP_TXPITCH_5:
333 i = (reg - R200_PP_TXPITCH_0) / 32; 332 i = (reg - R200_PP_TXPITCH_0) / 32;
334 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 333 track->textures[i].pitch = idx_value + 32;
335 break; 334 break;
336 case R200_PP_TXFILTER_0: 335 case R200_PP_TXFILTER_0:
337 case R200_PP_TXFILTER_1: 336 case R200_PP_TXFILTER_1:
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
340 case R200_PP_TXFILTER_4: 339 case R200_PP_TXFILTER_4:
341 case R200_PP_TXFILTER_5: 340 case R200_PP_TXFILTER_5:
342 i = (reg - R200_PP_TXFILTER_0) / 32; 341 i = (reg - R200_PP_TXFILTER_0) / 32;
343 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) 342 track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
344 >> R200_MAX_MIP_LEVEL_SHIFT); 343 >> R200_MAX_MIP_LEVEL_SHIFT);
345 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 344 tmp = (idx_value >> 23) & 0x7;
346 if (tmp == 2 || tmp == 6) 345 if (tmp == 2 || tmp == 6)
347 track->textures[i].roundup_w = false; 346 track->textures[i].roundup_w = false;
348 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 347 tmp = (idx_value >> 27) & 0x7;
349 if (tmp == 2 || tmp == 6) 348 if (tmp == 2 || tmp == 6)
350 track->textures[i].roundup_h = false; 349 track->textures[i].roundup_h = false;
351 break; 350 break;
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
364 case R200_PP_TXFORMAT_X_4: 363 case R200_PP_TXFORMAT_X_4:
365 case R200_PP_TXFORMAT_X_5: 364 case R200_PP_TXFORMAT_X_5:
366 i = (reg - R200_PP_TXFORMAT_X_0) / 32; 365 i = (reg - R200_PP_TXFORMAT_X_0) / 32;
367 track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; 366 track->textures[i].txdepth = idx_value & 0x7;
368 tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; 367 tmp = (idx_value >> 16) & 0x3;
369 /* 2D, 3D, CUBE */ 368 /* 2D, 3D, CUBE */
370 switch (tmp) { 369 switch (tmp) {
371 case 0: 370 case 0:
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
389 case R200_PP_TXFORMAT_4: 388 case R200_PP_TXFORMAT_4:
390 case R200_PP_TXFORMAT_5: 389 case R200_PP_TXFORMAT_5:
391 i = (reg - R200_PP_TXFORMAT_0) / 32; 390 i = (reg - R200_PP_TXFORMAT_0) / 32;
392 if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { 391 if (idx_value & R200_TXFORMAT_NON_POWER2) {
393 track->textures[i].use_pitch = 1; 392 track->textures[i].use_pitch = 1;
394 } else { 393 } else {
395 track->textures[i].use_pitch = 0; 394 track->textures[i].use_pitch = 0;
396 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 395 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
397 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 396 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
398 } 397 }
399 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 398 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
400 case R200_TXFORMAT_I8: 399 case R200_TXFORMAT_I8:
401 case R200_TXFORMAT_RGB332: 400 case R200_TXFORMAT_RGB332:
402 case R200_TXFORMAT_Y8: 401 case R200_TXFORMAT_Y8:
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
424 track->textures[i].cpp = 4; 423 track->textures[i].cpp = 4;
425 break; 424 break;
426 } 425 }
427 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 426 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
428 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 427 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
429 break; 428 break;
430 case R200_PP_CUBIC_FACES_0: 429 case R200_PP_CUBIC_FACES_0:
431 case R200_PP_CUBIC_FACES_1: 430 case R200_PP_CUBIC_FACES_1:
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
433 case R200_PP_CUBIC_FACES_3: 432 case R200_PP_CUBIC_FACES_3:
434 case R200_PP_CUBIC_FACES_4: 433 case R200_PP_CUBIC_FACES_4:
435 case R200_PP_CUBIC_FACES_5: 434 case R200_PP_CUBIC_FACES_5:
436 tmp = ib_chunk->kdata[idx]; 435 tmp = idx_value;
437 i = (reg - R200_PP_CUBIC_FACES_0) / 32; 436 i = (reg - R200_PP_CUBIC_FACES_0) / 32;
438 for (face = 0; face < 4; face++) { 437 for (face = 0; face < 4; face++) {
439 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 438 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -448,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
448 return 0; 447 return 0;
449} 448}
450 449
451int r200_init(struct radeon_device *rdev) 450void r200_set_safe_registers(struct radeon_device *rdev)
452{ 451{
453 rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; 452 rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
454 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); 453 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
455 return 0;
456} 454}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index bb151ecdf8fc..2f43ee8e4048 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -33,43 +33,16 @@
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "r100_track.h" 34#include "r100_track.h"
35#include "r300d.h" 35#include "r300d.h"
36 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* r300,r350,rv350,rv370,rv380 depends on : */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
40void r100_hdp_reset(struct radeon_device *rdev);
41int r100_cp_reset(struct radeon_device *rdev);
42int r100_rb2d_reset(struct radeon_device *rdev);
43int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
44int r100_pci_gart_enable(struct radeon_device *rdev);
45void r100_mc_setup(struct radeon_device *rdev);
46void r100_mc_disable_clients(struct radeon_device *rdev);
47int r100_gui_wait_for_idle(struct radeon_device *rdev);
48int r100_cs_packet_parse(struct radeon_cs_parser *p,
49 struct radeon_cs_packet *pkt,
50 unsigned idx);
51int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
52int r100_cs_parse_packet0(struct radeon_cs_parser *p,
53 struct radeon_cs_packet *pkt,
54 const unsigned *auth, unsigned n,
55 radeon_packet0_check_t check);
56int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
57 struct radeon_cs_packet *pkt,
58 struct radeon_object *robj);
59
60/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380
62 *
63 * Some of these functions might be used by newer ASICs.
64 */
65void r300_gpu_init(struct radeon_device *rdev);
66int r300_mc_wait_for_idle(struct radeon_device *rdev);
67int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
68
69 40
70/* 41/*
71 * rv370,rv380 PCIE GART 42 * rv370,rv380 PCIE GART
72 */ 43 */
44static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
45
73void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 46void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74{ 47{
75 uint32_t tmp; 48 uint32_t tmp;
@@ -140,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
140 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 113 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
141 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 114 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
142 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); 115 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
143 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096; 116 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
144 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
145 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 118 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
146 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 119 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
@@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
182 radeon_gart_fini(rdev); 155 radeon_gart_fini(rdev);
183} 156}
184 157
185/*
186 * MC
187 */
188int r300_mc_init(struct radeon_device *rdev)
189{
190 int r;
191
192 if (r100_debugfs_rbbm_init(rdev)) {
193 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
194 }
195
196 r300_gpu_init(rdev);
197 r100_pci_gart_disable(rdev);
198 if (rdev->flags & RADEON_IS_PCIE) {
199 rv370_pcie_gart_disable(rdev);
200 }
201
202 /* Setup GPU memory space */
203 rdev->mc.vram_location = 0xFFFFFFFFUL;
204 rdev->mc.gtt_location = 0xFFFFFFFFUL;
205 if (rdev->flags & RADEON_IS_AGP) {
206 r = radeon_agp_init(rdev);
207 if (r) {
208 printk(KERN_WARNING "[drm] Disabling AGP\n");
209 rdev->flags &= ~RADEON_IS_AGP;
210 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
211 } else {
212 rdev->mc.gtt_location = rdev->mc.agp_base;
213 }
214 }
215 r = radeon_mc_setup(rdev);
216 if (r) {
217 return r;
218 }
219
220 /* Program GPU memory space */
221 r100_mc_disable_clients(rdev);
222 if (r300_mc_wait_for_idle(rdev)) {
223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n");
225 }
226 r100_mc_setup(rdev);
227 return 0;
228}
229
230void r300_mc_fini(struct radeon_device *rdev)
231{
232}
233
234
235/*
236 * Fence emission
237 */
238void r300_fence_ring_emit(struct radeon_device *rdev, 158void r300_fence_ring_emit(struct radeon_device *rdev,
239 struct radeon_fence *fence) 159 struct radeon_fence *fence)
240{ 160{
@@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
260 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 180 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
261} 181}
262 182
263
264/*
265 * Global GPU functions
266 */
267int r300_copy_dma(struct radeon_device *rdev, 183int r300_copy_dma(struct radeon_device *rdev,
268 uint64_t src_offset, 184 uint64_t src_offset,
269 uint64_t dst_offset, 185 uint64_t dst_offset,
@@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev)
582 r100_vram_init_sizes(rdev); 498 r100_vram_init_sizes(rdev);
583} 499}
584 500
585
586/*
587 * PCIE Lanes
588 */
589
590void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 501void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
591{ 502{
592 uint32_t link_width_cntl, mask; 503 uint32_t link_width_cntl, mask;
@@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
646 557
647} 558}
648 559
649
650/*
651 * Debugfs info
652 */
653#if defined(CONFIG_DEBUG_FS) 560#if defined(CONFIG_DEBUG_FS)
654static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 561static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
655{ 562{
@@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = {
680}; 587};
681#endif 588#endif
682 589
683int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 590static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
684{ 591{
685#if defined(CONFIG_DEBUG_FS) 592#if defined(CONFIG_DEBUG_FS)
686 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 593 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
@@ -689,25 +596,22 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
689#endif 596#endif
690} 597}
691 598
692
693/*
694 * CS functions
695 */
696static int r300_packet0_check(struct radeon_cs_parser *p, 599static int r300_packet0_check(struct radeon_cs_parser *p,
697 struct radeon_cs_packet *pkt, 600 struct radeon_cs_packet *pkt,
698 unsigned idx, unsigned reg) 601 unsigned idx, unsigned reg)
699{ 602{
700 struct radeon_cs_chunk *ib_chunk;
701 struct radeon_cs_reloc *reloc; 603 struct radeon_cs_reloc *reloc;
702 struct r100_cs_track *track; 604 struct r100_cs_track *track;
703 volatile uint32_t *ib; 605 volatile uint32_t *ib;
704 uint32_t tmp, tile_flags = 0; 606 uint32_t tmp, tile_flags = 0;
705 unsigned i; 607 unsigned i;
706 int r; 608 int r;
609 u32 idx_value;
707 610
708 ib = p->ib->ptr; 611 ib = p->ib->ptr;
709 ib_chunk = &p->chunks[p->chunk_ib_idx];
710 track = (struct r100_cs_track *)p->track; 612 track = (struct r100_cs_track *)p->track;
613 idx_value = radeon_get_ib_value(p, idx);
614
711 switch(reg) { 615 switch(reg) {
712 case AVIVO_D1MODE_VLINE_START_END: 616 case AVIVO_D1MODE_VLINE_START_END:
713 case RADEON_CRTC_GUI_TRIG_VLINE: 617 case RADEON_CRTC_GUI_TRIG_VLINE:
@@ -738,8 +642,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
738 return r; 642 return r;
739 } 643 }
740 track->cb[i].robj = reloc->robj; 644 track->cb[i].robj = reloc->robj;
741 track->cb[i].offset = ib_chunk->kdata[idx]; 645 track->cb[i].offset = idx_value;
742 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 646 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
743 break; 647 break;
744 case R300_ZB_DEPTHOFFSET: 648 case R300_ZB_DEPTHOFFSET:
745 r = r100_cs_packet_next_reloc(p, &reloc); 649 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -750,8 +654,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
750 return r; 654 return r;
751 } 655 }
752 track->zb.robj = reloc->robj; 656 track->zb.robj = reloc->robj;
753 track->zb.offset = ib_chunk->kdata[idx]; 657 track->zb.offset = idx_value;
754 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 658 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
755 break; 659 break;
756 case R300_TX_OFFSET_0: 660 case R300_TX_OFFSET_0:
757 case R300_TX_OFFSET_0+4: 661 case R300_TX_OFFSET_0+4:
@@ -777,32 +681,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
777 r100_cs_dump_packet(p, pkt); 681 r100_cs_dump_packet(p, pkt);
778 return r; 682 return r;
779 } 683 }
780 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
781 track->textures[i].robj = reloc->robj; 685 track->textures[i].robj = reloc->robj;
782 break; 686 break;
783 /* Tracked registers */ 687 /* Tracked registers */
784 case 0x2084: 688 case 0x2084:
785 /* VAP_VF_CNTL */ 689 /* VAP_VF_CNTL */
786 track->vap_vf_cntl = ib_chunk->kdata[idx]; 690 track->vap_vf_cntl = idx_value;
787 break; 691 break;
788 case 0x20B4: 692 case 0x20B4:
789 /* VAP_VTX_SIZE */ 693 /* VAP_VTX_SIZE */
790 track->vtx_size = ib_chunk->kdata[idx] & 0x7F; 694 track->vtx_size = idx_value & 0x7F;
791 break; 695 break;
792 case 0x2134: 696 case 0x2134:
793 /* VAP_VF_MAX_VTX_INDX */ 697 /* VAP_VF_MAX_VTX_INDX */
794 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 698 track->max_indx = idx_value & 0x00FFFFFFUL;
795 break; 699 break;
796 case 0x43E4: 700 case 0x43E4:
797 /* SC_SCISSOR1 */ 701 /* SC_SCISSOR1 */
798 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; 702 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
799 if (p->rdev->family < CHIP_RV515) { 703 if (p->rdev->family < CHIP_RV515) {
800 track->maxy -= 1440; 704 track->maxy -= 1440;
801 } 705 }
802 break; 706 break;
803 case 0x4E00: 707 case 0x4E00:
804 /* RB3D_CCTL */ 708 /* RB3D_CCTL */
805 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; 709 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
806 break; 710 break;
807 case 0x4E38: 711 case 0x4E38:
808 case 0x4E3C: 712 case 0x4E3C:
@@ -825,13 +729,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
825 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 729 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
826 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 730 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
827 731
828 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 732 tmp = idx_value & ~(0x7 << 16);
829 tmp |= tile_flags; 733 tmp |= tile_flags;
830 ib[idx] = tmp; 734 ib[idx] = tmp;
831 735
832 i = (reg - 0x4E38) >> 2; 736 i = (reg - 0x4E38) >> 2;
833 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; 737 track->cb[i].pitch = idx_value & 0x3FFE;
834 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { 738 switch (((idx_value >> 21) & 0xF)) {
835 case 9: 739 case 9:
836 case 11: 740 case 11:
837 case 12: 741 case 12:
@@ -854,13 +758,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
854 break; 758 break;
855 default: 759 default:
856 DRM_ERROR("Invalid color buffer format (%d) !\n", 760 DRM_ERROR("Invalid color buffer format (%d) !\n",
857 ((ib_chunk->kdata[idx] >> 21) & 0xF)); 761 ((idx_value >> 21) & 0xF));
858 return -EINVAL; 762 return -EINVAL;
859 } 763 }
860 break; 764 break;
861 case 0x4F00: 765 case 0x4F00:
862 /* ZB_CNTL */ 766 /* ZB_CNTL */
863 if (ib_chunk->kdata[idx] & 2) { 767 if (idx_value & 2) {
864 track->z_enabled = true; 768 track->z_enabled = true;
865 } else { 769 } else {
866 track->z_enabled = false; 770 track->z_enabled = false;
@@ -868,7 +772,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
868 break; 772 break;
869 case 0x4F10: 773 case 0x4F10:
870 /* ZB_FORMAT */ 774 /* ZB_FORMAT */
871 switch ((ib_chunk->kdata[idx] & 0xF)) { 775 switch ((idx_value & 0xF)) {
872 case 0: 776 case 0:
873 case 1: 777 case 1:
874 track->zb.cpp = 2; 778 track->zb.cpp = 2;
@@ -878,7 +782,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
878 break; 782 break;
879 default: 783 default:
880 DRM_ERROR("Invalid z buffer format (%d) !\n", 784 DRM_ERROR("Invalid z buffer format (%d) !\n",
881 (ib_chunk->kdata[idx] & 0xF)); 785 (idx_value & 0xF));
882 return -EINVAL; 786 return -EINVAL;
883 } 787 }
884 break; 788 break;
@@ -897,17 +801,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
897 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 801 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
898 tile_flags |= R300_DEPTHMICROTILE_TILED;; 802 tile_flags |= R300_DEPTHMICROTILE_TILED;;
899 803
900 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 804 tmp = idx_value & ~(0x7 << 16);
901 tmp |= tile_flags; 805 tmp |= tile_flags;
902 ib[idx] = tmp; 806 ib[idx] = tmp;
903 807
904 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 808 track->zb.pitch = idx_value & 0x3FFC;
905 break; 809 break;
906 case 0x4104: 810 case 0x4104:
907 for (i = 0; i < 16; i++) { 811 for (i = 0; i < 16; i++) {
908 bool enabled; 812 bool enabled;
909 813
910 enabled = !!(ib_chunk->kdata[idx] & (1 << i)); 814 enabled = !!(idx_value & (1 << i));
911 track->textures[i].enabled = enabled; 815 track->textures[i].enabled = enabled;
912 } 816 }
913 break; 817 break;
@@ -929,9 +833,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
929 case 0x44FC: 833 case 0x44FC:
930 /* TX_FORMAT1_[0-15] */ 834 /* TX_FORMAT1_[0-15] */
931 i = (reg - 0x44C0) >> 2; 835 i = (reg - 0x44C0) >> 2;
932 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; 836 tmp = (idx_value >> 25) & 0x3;
933 track->textures[i].tex_coord_type = tmp; 837 track->textures[i].tex_coord_type = tmp;
934 switch ((ib_chunk->kdata[idx] & 0x1F)) { 838 switch ((idx_value & 0x1F)) {
935 case R300_TX_FORMAT_X8: 839 case R300_TX_FORMAT_X8:
936 case R300_TX_FORMAT_Y4X4: 840 case R300_TX_FORMAT_Y4X4:
937 case R300_TX_FORMAT_Z3Y3X2: 841 case R300_TX_FORMAT_Z3Y3X2:
@@ -971,7 +875,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
971 break; 875 break;
972 default: 876 default:
973 DRM_ERROR("Invalid texture format %u\n", 877 DRM_ERROR("Invalid texture format %u\n",
974 (ib_chunk->kdata[idx] & 0x1F)); 878 (idx_value & 0x1F));
975 return -EINVAL; 879 return -EINVAL;
976 break; 880 break;
977 } 881 }
@@ -994,11 +898,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
994 case 0x443C: 898 case 0x443C:
995 /* TX_FILTER0_[0-15] */ 899 /* TX_FILTER0_[0-15] */
996 i = (reg - 0x4400) >> 2; 900 i = (reg - 0x4400) >> 2;
997 tmp = ib_chunk->kdata[idx] & 0x7; 901 tmp = idx_value & 0x7;
998 if (tmp == 2 || tmp == 4 || tmp == 6) { 902 if (tmp == 2 || tmp == 4 || tmp == 6) {
999 track->textures[i].roundup_w = false; 903 track->textures[i].roundup_w = false;
1000 } 904 }
1001 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; 905 tmp = (idx_value >> 3) & 0x7;
1002 if (tmp == 2 || tmp == 4 || tmp == 6) { 906 if (tmp == 2 || tmp == 4 || tmp == 6) {
1003 track->textures[i].roundup_h = false; 907 track->textures[i].roundup_h = false;
1004 } 908 }
@@ -1021,12 +925,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1021 case 0x453C: 925 case 0x453C:
1022 /* TX_FORMAT2_[0-15] */ 926 /* TX_FORMAT2_[0-15] */
1023 i = (reg - 0x4500) >> 2; 927 i = (reg - 0x4500) >> 2;
1024 tmp = ib_chunk->kdata[idx] & 0x3FFF; 928 tmp = idx_value & 0x3FFF;
1025 track->textures[i].pitch = tmp + 1; 929 track->textures[i].pitch = tmp + 1;
1026 if (p->rdev->family >= CHIP_RV515) { 930 if (p->rdev->family >= CHIP_RV515) {
1027 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; 931 tmp = ((idx_value >> 15) & 1) << 11;
1028 track->textures[i].width_11 = tmp; 932 track->textures[i].width_11 = tmp;
1029 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; 933 tmp = ((idx_value >> 16) & 1) << 11;
1030 track->textures[i].height_11 = tmp; 934 track->textures[i].height_11 = tmp;
1031 } 935 }
1032 break; 936 break;
@@ -1048,15 +952,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1048 case 0x44BC: 952 case 0x44BC:
1049 /* TX_FORMAT0_[0-15] */ 953 /* TX_FORMAT0_[0-15] */
1050 i = (reg - 0x4480) >> 2; 954 i = (reg - 0x4480) >> 2;
1051 tmp = ib_chunk->kdata[idx] & 0x7FF; 955 tmp = idx_value & 0x7FF;
1052 track->textures[i].width = tmp + 1; 956 track->textures[i].width = tmp + 1;
1053 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; 957 tmp = (idx_value >> 11) & 0x7FF;
1054 track->textures[i].height = tmp + 1; 958 track->textures[i].height = tmp + 1;
1055 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; 959 tmp = (idx_value >> 26) & 0xF;
1056 track->textures[i].num_levels = tmp; 960 track->textures[i].num_levels = tmp;
1057 tmp = ib_chunk->kdata[idx] & (1 << 31); 961 tmp = idx_value & (1 << 31);
1058 track->textures[i].use_pitch = !!tmp; 962 track->textures[i].use_pitch = !!tmp;
1059 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; 963 tmp = (idx_value >> 22) & 0xF;
1060 track->textures[i].txdepth = tmp; 964 track->textures[i].txdepth = tmp;
1061 break; 965 break;
1062 case R300_ZB_ZPASS_ADDR: 966 case R300_ZB_ZPASS_ADDR:
@@ -1067,7 +971,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1067 r100_cs_dump_packet(p, pkt); 971 r100_cs_dump_packet(p, pkt);
1068 return r; 972 return r;
1069 } 973 }
1070 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 974 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1071 break; 975 break;
1072 case 0x4be8: 976 case 0x4be8:
1073 /* valid register only on RV530 */ 977 /* valid register only on RV530 */
@@ -1085,60 +989,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1085static int r300_packet3_check(struct radeon_cs_parser *p, 989static int r300_packet3_check(struct radeon_cs_parser *p,
1086 struct radeon_cs_packet *pkt) 990 struct radeon_cs_packet *pkt)
1087{ 991{
1088 struct radeon_cs_chunk *ib_chunk;
1089
1090 struct radeon_cs_reloc *reloc; 992 struct radeon_cs_reloc *reloc;
1091 struct r100_cs_track *track; 993 struct r100_cs_track *track;
1092 volatile uint32_t *ib; 994 volatile uint32_t *ib;
1093 unsigned idx; 995 unsigned idx;
1094 unsigned i, c;
1095 int r; 996 int r;
1096 997
1097 ib = p->ib->ptr; 998 ib = p->ib->ptr;
1098 ib_chunk = &p->chunks[p->chunk_ib_idx];
1099 idx = pkt->idx + 1; 999 idx = pkt->idx + 1;
1100 track = (struct r100_cs_track *)p->track; 1000 track = (struct r100_cs_track *)p->track;
1101 switch(pkt->opcode) { 1001 switch(pkt->opcode) {
1102 case PACKET3_3D_LOAD_VBPNTR: 1002 case PACKET3_3D_LOAD_VBPNTR:
1103 c = ib_chunk->kdata[idx++] & 0x1F; 1003 r = r100_packet3_load_vbpntr(p, pkt, idx);
1104 track->num_arrays = c; 1004 if (r)
1105 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1005 return r;
1106 r = r100_cs_packet_next_reloc(p, &reloc);
1107 if (r) {
1108 DRM_ERROR("No reloc for packet3 %d\n",
1109 pkt->opcode);
1110 r100_cs_dump_packet(p, pkt);
1111 return r;
1112 }
1113 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1114 track->arrays[i + 0].robj = reloc->robj;
1115 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1116 track->arrays[i + 0].esize &= 0x7F;
1117 r = r100_cs_packet_next_reloc(p, &reloc);
1118 if (r) {
1119 DRM_ERROR("No reloc for packet3 %d\n",
1120 pkt->opcode);
1121 r100_cs_dump_packet(p, pkt);
1122 return r;
1123 }
1124 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1125 track->arrays[i + 1].robj = reloc->robj;
1126 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1127 track->arrays[i + 1].esize &= 0x7F;
1128 }
1129 if (c & 1) {
1130 r = r100_cs_packet_next_reloc(p, &reloc);
1131 if (r) {
1132 DRM_ERROR("No reloc for packet3 %d\n",
1133 pkt->opcode);
1134 r100_cs_dump_packet(p, pkt);
1135 return r;
1136 }
1137 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1138 track->arrays[i + 0].robj = reloc->robj;
1139 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1140 track->arrays[i + 0].esize &= 0x7F;
1141 }
1142 break; 1006 break;
1143 case PACKET3_INDX_BUFFER: 1007 case PACKET3_INDX_BUFFER:
1144 r = r100_cs_packet_next_reloc(p, &reloc); 1008 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1147,7 +1011,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1147 r100_cs_dump_packet(p, pkt); 1011 r100_cs_dump_packet(p, pkt);
1148 return r; 1012 return r;
1149 } 1013 }
1150 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1014 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1151 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1015 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1152 if (r) { 1016 if (r) {
1153 return r; 1017 return r;
@@ -1158,11 +1022,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1158 /* Number of dwords is vtx_size * (num_vertices - 1) 1022 /* Number of dwords is vtx_size * (num_vertices - 1)
1159 * PRIM_WALK must be equal to 3 vertex data in embedded 1023 * PRIM_WALK must be equal to 3 vertex data in embedded
1160 * in cmd stream */ 1024 * in cmd stream */
1161 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1025 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1162 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1026 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1163 return -EINVAL; 1027 return -EINVAL;
1164 } 1028 }
1165 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1029 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1166 track->immd_dwords = pkt->count - 1; 1030 track->immd_dwords = pkt->count - 1;
1167 r = r100_cs_track_check(p->rdev, track); 1031 r = r100_cs_track_check(p->rdev, track);
1168 if (r) { 1032 if (r) {
@@ -1173,11 +1037,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1173 /* Number of dwords is vtx_size * (num_vertices - 1) 1037 /* Number of dwords is vtx_size * (num_vertices - 1)
1174 * PRIM_WALK must be equal to 3 vertex data in embedded 1038 * PRIM_WALK must be equal to 3 vertex data in embedded
1175 * in cmd stream */ 1039 * in cmd stream */
1176 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1040 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1177 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1041 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1178 return -EINVAL; 1042 return -EINVAL;
1179 } 1043 }
1180 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1044 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1181 track->immd_dwords = pkt->count; 1045 track->immd_dwords = pkt->count;
1182 r = r100_cs_track_check(p->rdev, track); 1046 r = r100_cs_track_check(p->rdev, track);
1183 if (r) { 1047 if (r) {
@@ -1185,28 +1049,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1185 } 1049 }
1186 break; 1050 break;
1187 case PACKET3_3D_DRAW_VBUF: 1051 case PACKET3_3D_DRAW_VBUF:
1188 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1052 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1189 r = r100_cs_track_check(p->rdev, track); 1053 r = r100_cs_track_check(p->rdev, track);
1190 if (r) { 1054 if (r) {
1191 return r; 1055 return r;
1192 } 1056 }
1193 break; 1057 break;
1194 case PACKET3_3D_DRAW_VBUF_2: 1058 case PACKET3_3D_DRAW_VBUF_2:
1195 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1059 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1196 r = r100_cs_track_check(p->rdev, track); 1060 r = r100_cs_track_check(p->rdev, track);
1197 if (r) { 1061 if (r) {
1198 return r; 1062 return r;
1199 } 1063 }
1200 break; 1064 break;
1201 case PACKET3_3D_DRAW_INDX: 1065 case PACKET3_3D_DRAW_INDX:
1202 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1066 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1203 r = r100_cs_track_check(p->rdev, track); 1067 r = r100_cs_track_check(p->rdev, track);
1204 if (r) { 1068 if (r) {
1205 return r; 1069 return r;
1206 } 1070 }
1207 break; 1071 break;
1208 case PACKET3_3D_DRAW_INDX_2: 1072 case PACKET3_3D_DRAW_INDX_2:
1209 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1073 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1210 r = r100_cs_track_check(p->rdev, track); 1074 r = r100_cs_track_check(p->rdev, track);
1211 if (r) { 1075 if (r) {
1212 return r; 1076 return r;
@@ -1265,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev)
1265 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1129 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1266} 1130}
1267 1131
1268int r300_init(struct radeon_device *rdev)
1269{
1270 r300_set_reg_safe(rdev);
1271 return 0;
1272}
1273
1274void r300_mc_program(struct radeon_device *rdev) 1132void r300_mc_program(struct radeon_device *rdev)
1275{ 1133{
1276 struct r100_mc_save save; 1134 struct r100_mc_save save;
@@ -1304,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev)
1304 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1162 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1305 r100_mc_resume(rdev, &save); 1163 r100_mc_resume(rdev, &save);
1306} 1164}
1165
1166void r300_clock_startup(struct radeon_device *rdev)
1167{
1168 u32 tmp;
1169
1170 if (radeon_dynclks != -1 && radeon_dynclks)
1171 radeon_legacy_set_clock_gating(rdev, 1);
1172 /* We need to force on some of the block */
1173 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1174 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1175 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1176 tmp |= S_00000D_FORCE_VAP(1);
1177 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1178}
1179
1180static int r300_startup(struct radeon_device *rdev)
1181{
1182 int r;
1183
1184 r300_mc_program(rdev);
1185 /* Resume clock */
1186 r300_clock_startup(rdev);
1187 /* Initialize GPU configuration (# pipes, ...) */
1188 r300_gpu_init(rdev);
1189 /* Initialize GART (initialize after TTM so we can allocate
1190 * memory through TTM but finalize after TTM) */
1191 if (rdev->flags & RADEON_IS_PCIE) {
1192 r = rv370_pcie_gart_enable(rdev);
1193 if (r)
1194 return r;
1195 }
1196 if (rdev->flags & RADEON_IS_PCI) {
1197 r = r100_pci_gart_enable(rdev);
1198 if (r)
1199 return r;
1200 }
1201 /* Enable IRQ */
1202 rdev->irq.sw_int = true;
1203 r100_irq_set(rdev);
1204 /* 1M ring buffer */
1205 r = r100_cp_init(rdev, 1024 * 1024);
1206 if (r) {
1207 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1208 return r;
1209 }
1210 r = r100_wb_init(rdev);
1211 if (r)
1212 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1213 r = r100_ib_init(rdev);
1214 if (r) {
1215 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1216 return r;
1217 }
1218 return 0;
1219}
1220
1221int r300_resume(struct radeon_device *rdev)
1222{
1223 /* Make sur GART are not working */
1224 if (rdev->flags & RADEON_IS_PCIE)
1225 rv370_pcie_gart_disable(rdev);
1226 if (rdev->flags & RADEON_IS_PCI)
1227 r100_pci_gart_disable(rdev);
1228 /* Resume clock before doing reset */
1229 r300_clock_startup(rdev);
1230 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1231 if (radeon_gpu_reset(rdev)) {
1232 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1233 RREG32(R_000E40_RBBM_STATUS),
1234 RREG32(R_0007C0_CP_STAT));
1235 }
1236 /* post */
1237 radeon_combios_asic_init(rdev->ddev);
1238 /* Resume clock after posting */
1239 r300_clock_startup(rdev);
1240 return r300_startup(rdev);
1241}
1242
1243int r300_suspend(struct radeon_device *rdev)
1244{
1245 r100_cp_disable(rdev);
1246 r100_wb_disable(rdev);
1247 r100_irq_disable(rdev);
1248 if (rdev->flags & RADEON_IS_PCIE)
1249 rv370_pcie_gart_disable(rdev);
1250 if (rdev->flags & RADEON_IS_PCI)
1251 r100_pci_gart_disable(rdev);
1252 return 0;
1253}
1254
1255void r300_fini(struct radeon_device *rdev)
1256{
1257 r300_suspend(rdev);
1258 r100_cp_fini(rdev);
1259 r100_wb_fini(rdev);
1260 r100_ib_fini(rdev);
1261 radeon_gem_fini(rdev);
1262 if (rdev->flags & RADEON_IS_PCIE)
1263 rv370_pcie_gart_fini(rdev);
1264 if (rdev->flags & RADEON_IS_PCI)
1265 r100_pci_gart_fini(rdev);
1266 radeon_irq_kms_fini(rdev);
1267 radeon_fence_driver_fini(rdev);
1268 radeon_object_fini(rdev);
1269 radeon_atombios_fini(rdev);
1270 kfree(rdev->bios);
1271 rdev->bios = NULL;
1272}
1273
1274int r300_init(struct radeon_device *rdev)
1275{
1276 int r;
1277
1278 /* Disable VGA */
1279 r100_vga_render_disable(rdev);
1280 /* Initialize scratch registers */
1281 radeon_scratch_init(rdev);
1282 /* Initialize surface registers */
1283 radeon_surface_init(rdev);
1284 /* TODO: disable VGA need to use VGA request */
1285 /* BIOS*/
1286 if (!radeon_get_bios(rdev)) {
1287 if (ASIC_IS_AVIVO(rdev))
1288 return -EINVAL;
1289 }
1290 if (rdev->is_atom_bios) {
1291 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1292 return -EINVAL;
1293 } else {
1294 r = radeon_combios_init(rdev);
1295 if (r)
1296 return r;
1297 }
1298 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1299 if (radeon_gpu_reset(rdev)) {
1300 dev_warn(rdev->dev,
1301 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1302 RREG32(R_000E40_RBBM_STATUS),
1303 RREG32(R_0007C0_CP_STAT));
1304 }
1305 /* check if cards are posted or not */
1306 if (!radeon_card_posted(rdev) && rdev->bios) {
1307 DRM_INFO("GPU not posted. posting now...\n");
1308 radeon_combios_asic_init(rdev->ddev);
1309 }
1310 /* Set asic errata */
1311 r300_errata(rdev);
1312 /* Initialize clocks */
1313 radeon_get_clock_info(rdev->ddev);
1314 /* Get vram informations */
1315 r300_vram_info(rdev);
1316 /* Initialize memory controller (also test AGP) */
1317 r = r420_mc_init(rdev);
1318 if (r)
1319 return r;
1320 /* Fence driver */
1321 r = radeon_fence_driver_init(rdev);
1322 if (r)
1323 return r;
1324 r = radeon_irq_kms_init(rdev);
1325 if (r)
1326 return r;
1327 /* Memory manager */
1328 r = radeon_object_init(rdev);
1329 if (r)
1330 return r;
1331 if (rdev->flags & RADEON_IS_PCIE) {
1332 r = rv370_pcie_gart_init(rdev);
1333 if (r)
1334 return r;
1335 }
1336 if (rdev->flags & RADEON_IS_PCI) {
1337 r = r100_pci_gart_init(rdev);
1338 if (r)
1339 return r;
1340 }
1341 r300_set_reg_safe(rdev);
1342 rdev->accel_working = true;
1343 r = r300_startup(rdev);
1344 if (r) {
1345 /* Somethings want wront with the accel init stop accel */
1346 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1347 r300_suspend(rdev);
1348 r100_cp_fini(rdev);
1349 r100_wb_fini(rdev);
1350 r100_ib_fini(rdev);
1351 if (rdev->flags & RADEON_IS_PCIE)
1352 rv370_pcie_gart_fini(rdev);
1353 if (rdev->flags & RADEON_IS_PCI)
1354 r100_pci_gart_fini(rdev);
1355 radeon_irq_kms_fini(rdev);
1356 rdev->accel_working = false;
1357 }
1358 return 0;
1359}
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index d4fa3eb1074f..4c73114f0de9 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -96,6 +96,211 @@
96#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) 96#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
97#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) 97#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
98#define C_000170_AGP_BASE_ADDR 0x00000000 98#define C_000170_AGP_BASE_ADDR 0x00000000
99#define R_0007C0_CP_STAT 0x0007C0
100#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
101#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
102#define C_0007C0_MRU_BUSY 0xFFFFFFFE
103#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
104#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
105#define C_0007C0_MWU_BUSY 0xFFFFFFFD
106#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
107#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
108#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
109#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
110#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
111#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
112#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
113#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
114#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
115#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
116#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
117#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
118#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
119#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
120#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
121#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
122#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
123#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
124#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
125#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
126#define C_0007C0_CSI_BUSY 0xFFFFDFFF
127#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
128#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
129#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
130#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
131#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
132#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
133#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
134#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
135#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
136#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
137#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
138#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
139#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
140#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
141#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
142#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
143#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
144#define C_0007C0_CP_BUSY 0x7FFFFFFF
145#define R_000E40_RBBM_STATUS 0x000E40
146#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
147#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
148#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
149#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
150#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
151#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
152#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
153#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
154#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
155#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
156#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
157#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
158#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
159#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
160#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
161#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
162#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
163#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
164#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
165#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
166#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
167#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
168#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
169#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
170#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
171#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
172#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
173#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
174#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
175#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
176#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
177#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
178#define C_000E40_E2_BUSY 0xFFFDFFFF
179#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
180#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
181#define C_000E40_RB2D_BUSY 0xFFFBFFFF
182#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
183#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
184#define C_000E40_RB3D_BUSY 0xFFF7FFFF
185#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
186#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
187#define C_000E40_VAP_BUSY 0xFFEFFFFF
188#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
189#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
190#define C_000E40_RE_BUSY 0xFFDFFFFF
191#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
192#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
193#define C_000E40_TAM_BUSY 0xFFBFFFFF
194#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
195#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
196#define C_000E40_TDM_BUSY 0xFF7FFFFF
197#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
198#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
199#define C_000E40_PB_BUSY 0xFEFFFFFF
200#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
201#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
202#define C_000E40_TIM_BUSY 0xFDFFFFFF
203#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
204#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
205#define C_000E40_GA_BUSY 0xFBFFFFFF
206#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
207#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
208#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
99 212
100 213
214#define R_00000D_SCLK_CNTL 0x00000D
215#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
216#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
217#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
218#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
219#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
220#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
221#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
222#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
223#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
224#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
225#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
226#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
227#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
228#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
229#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
230#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
231#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
232#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
233#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
234#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
235#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
236#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
237#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
238#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
239#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
240#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
241#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
242#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
243#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
244#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
245#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
246#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
247#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
248#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
249#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
250#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
251#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
252#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
253#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
254#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
255#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
256#define C_00000D_FORCE_DISP2 0xFFFF7FFF
257#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
258#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
259#define C_00000D_FORCE_CP 0xFFFEFFFF
260#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
261#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
262#define C_00000D_FORCE_HDP 0xFFFDFFFF
263#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
264#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
265#define C_00000D_FORCE_DISP1 0xFFFBFFFF
266#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
267#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
268#define C_00000D_FORCE_TOP 0xFFF7FFFF
269#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
270#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
271#define C_00000D_FORCE_E2 0xFFEFFFFF
272#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
273#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
274#define C_00000D_FORCE_SE 0xFFDFFFFF
275#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
276#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
277#define C_00000D_FORCE_IDCT 0xFFBFFFFF
278#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
279#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
280#define C_00000D_FORCE_VIP 0xFF7FFFFF
281#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
282#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
283#define C_00000D_FORCE_RE 0xFEFFFFFF
284#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
285#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
286#define C_00000D_FORCE_PB 0xFDFFFFFF
287#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
288#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
289#define C_00000D_FORCE_TAM 0xFBFFFFFF
290#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
291#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
292#define C_00000D_FORCE_TDM 0xF7FFFFFF
293#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
294#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
295#define C_00000D_FORCE_RB 0xEFFFFFFF
296#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
297#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
298#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
299#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
300#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
301#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
302#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
303#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
304#define C_00000D_FORCE_OV0 0x7FFFFFFF
305
101#endif 306#endif
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 49a2fdc57d27..1cefdbcc0850 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev)
155static void r420_clock_resume(struct radeon_device *rdev) 155static void r420_clock_resume(struct radeon_device *rdev)
156{ 156{
157 u32 sclk_cntl; 157 u32 sclk_cntl;
158
159 if (radeon_dynclks != -1 && radeon_dynclks)
160 radeon_atom_set_clock_gating(rdev, 1);
158 sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); 161 sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
159 sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 162 sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
160 if (rdev->family == CHIP_R420) 163 if (rdev->family == CHIP_R420)
@@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev)
167 int r; 170 int r;
168 171
169 r300_mc_program(rdev); 172 r300_mc_program(rdev);
173 /* Resume clock */
174 r420_clock_resume(rdev);
170 /* Initialize GART (initialize after TTM so we can allocate 175 /* Initialize GART (initialize after TTM so we can allocate
171 * memory through TTM but finalize after TTM) */ 176 * memory through TTM but finalize after TTM) */
172 if (rdev->flags & RADEON_IS_PCIE) { 177 if (rdev->flags & RADEON_IS_PCIE) {
@@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev)
267{ 272{
268 int r; 273 int r;
269 274
270 rdev->new_init_path = true;
271 /* Initialize scratch registers */ 275 /* Initialize scratch registers */
272 radeon_scratch_init(rdev); 276 radeon_scratch_init(rdev);
273 /* Initialize surface registers */ 277 /* Initialize surface registers */
@@ -307,6 +311,8 @@ int r420_init(struct radeon_device *rdev)
307 } 311 }
308 /* Initialize clocks */ 312 /* Initialize clocks */
309 radeon_get_clock_info(rdev->ddev); 313 radeon_get_clock_info(rdev->ddev);
314 /* Initialize power management */
315 radeon_pm_init(rdev);
310 /* Get vram informations */ 316 /* Get vram informations */
311 r300_vram_info(rdev); 317 r300_vram_info(rdev);
312 /* Initialize memory controller (also test AGP) */ 318 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h
index a48a7db1e2aa..fc78d31a0b4a 100644
--- a/drivers/gpu/drm/radeon/r420d.h
+++ b/drivers/gpu/drm/radeon/r420d.h
@@ -212,9 +212,9 @@
212#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) 212#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
213#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) 213#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
214#define C_00000D_FORCE_E2 0xFFEFFFFF 214#define C_00000D_FORCE_E2 0xFFEFFFFF
215#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) 215#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
216#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) 216#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
217#define C_00000D_FORCE_SE 0xFFDFFFFF 217#define C_00000D_FORCE_VAP 0xFFDFFFFF
218#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) 218#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
219#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) 219#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
220#define C_00000D_FORCE_IDCT 0xFFBFFFFF 220#define C_00000D_FORCE_IDCT 0xFFBFFFFF
@@ -224,24 +224,24 @@
224#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) 224#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
225#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) 225#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
226#define C_00000D_FORCE_RE 0xFEFFFFFF 226#define C_00000D_FORCE_RE 0xFEFFFFFF
227#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) 227#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
228#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) 228#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
229#define C_00000D_FORCE_PB 0xFDFFFFFF 229#define C_00000D_FORCE_SR 0xFDFFFFFF
230#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) 230#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
231#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) 231#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
232#define C_00000D_FORCE_PX 0xFBFFFFFF 232#define C_00000D_FORCE_PX 0xFBFFFFFF
233#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) 233#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
234#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) 234#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
235#define C_00000D_FORCE_TX 0xF7FFFFFF 235#define C_00000D_FORCE_TX 0xF7FFFFFF
236#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) 236#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
237#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) 237#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
238#define C_00000D_FORCE_RB 0xEFFFFFFF 238#define C_00000D_FORCE_US 0xEFFFFFFF
239#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) 239#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
240#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) 240#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
241#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF 241#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
242#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) 242#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
243#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) 243#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
244#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF 244#define C_00000D_FORCE_SU 0xBFFFFFFF
245#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) 245#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
246#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) 246#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
247#define C_00000D_FORCE_OV0 0x7FFFFFFF 247#define C_00000D_FORCE_OV0 0x7FFFFFFF
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index e1d5e0331e19..7baa73955563 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -384,9 +384,16 @@
384# define AVIVO_D1GRPH_TILED (1 << 20) 384# define AVIVO_D1GRPH_TILED (1 << 20)
385# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) 385# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
386 386
387/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
388 * block and vice versa. This applies to GRPH, CUR, etc.
389 */
387#define AVIVO_D1GRPH_LUT_SEL 0x6108 390#define AVIVO_D1GRPH_LUT_SEL 0x6108
388#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 391#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
392#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
393#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
389#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 394#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
395#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
396#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
390#define AVIVO_D1GRPH_PITCH 0x6120 397#define AVIVO_D1GRPH_PITCH 0x6120
391#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 398#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
392#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 399#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
@@ -404,6 +411,8 @@
404# define AVIVO_D1CURSOR_MODE_MASK (3 << 8) 411# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
405# define AVIVO_D1CURSOR_MODE_24BPP 2 412# define AVIVO_D1CURSOR_MODE_24BPP 2
406#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 413#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
414#define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c
415#define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c
407#define AVIVO_D1CUR_SIZE 0x6410 416#define AVIVO_D1CUR_SIZE 0x6410
408#define AVIVO_D1CUR_POSITION 0x6414 417#define AVIVO_D1CUR_POSITION 0x6414
409#define AVIVO_D1CUR_HOT_SPOT 0x6418 418#define AVIVO_D1CUR_HOT_SPOT 0x6418
@@ -445,6 +454,8 @@
445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534 454#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
446# define AVIVO_VBLANK_ACK (1 << 4) 455# define AVIVO_VBLANK_ACK (1 << 4)
447#define AVIVO_D1MODE_VLINE_START_END 0x6538 456#define AVIVO_D1MODE_VLINE_START_END 0x6538
457#define AVIVO_D1MODE_VLINE_STATUS 0x653c
458# define AVIVO_D1MODE_VLINE_STAT (1 << 12)
448#define AVIVO_DxMODE_INT_MASK 0x6540 459#define AVIVO_DxMODE_INT_MASK 0x6540
449# define AVIVO_D1MODE_INT_MASK (1 << 0) 460# define AVIVO_D1MODE_INT_MASK (1 << 0)
450# define AVIVO_D2MODE_INT_MASK (1 << 8) 461# define AVIVO_D2MODE_INT_MASK (1 << 8)
@@ -502,6 +513,7 @@
502 513
503#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 514#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
504#define AVIVO_D2MODE_VLINE_START_END 0x6d38 515#define AVIVO_D2MODE_VLINE_START_END 0x6d38
516#define AVIVO_D2MODE_VLINE_STATUS 0x6d3c
505#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 517#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
506#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 518#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
507#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 519#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index d4b0b9d2e39b..f7435185c0a6 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -26,108 +26,13 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 29#include "radeon.h"
30#include "atom.h"
31#include "r520d.h"
31 32
32/* r520,rv530,rv560,rv570,r580 depends on : */ 33/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
33void r100_hdp_reset(struct radeon_device *rdev);
34void r420_pipes_init(struct radeon_device *rdev);
35void rs600_mc_disable_clients(struct radeon_device *rdev);
36void rs600_disable_vga(struct radeon_device *rdev);
37int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
38int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
39 34
40/* This files gather functions specifics to: 35static int r520_mc_wait_for_idle(struct radeon_device *rdev)
41 * r520,rv530,rv560,rv570,r580
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void r520_gpu_init(struct radeon_device *rdev);
46int r520_mc_wait_for_idle(struct radeon_device *rdev);
47
48
49/*
50 * MC
51 */
52int r520_mc_init(struct radeon_device *rdev)
53{
54 uint32_t tmp;
55 int r;
56
57 if (r100_debugfs_rbbm_init(rdev)) {
58 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
59 }
60 if (rv515_debugfs_pipes_info_init(rdev)) {
61 DRM_ERROR("Failed to register debugfs file for pipes !\n");
62 }
63 if (rv515_debugfs_ga_info_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for pipes !\n");
65 }
66
67 r520_gpu_init(rdev);
68 rv370_pcie_gart_disable(rdev);
69
70 /* Setup GPU memory space */
71 rdev->mc.vram_location = 0xFFFFFFFFUL;
72 rdev->mc.gtt_location = 0xFFFFFFFFUL;
73 if (rdev->flags & RADEON_IS_AGP) {
74 r = radeon_agp_init(rdev);
75 if (r) {
76 printk(KERN_WARNING "[drm] Disabling AGP\n");
77 rdev->flags &= ~RADEON_IS_AGP;
78 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
79 } else {
80 rdev->mc.gtt_location = rdev->mc.agp_base;
81 }
82 }
83 r = radeon_mc_setup(rdev);
84 if (r) {
85 return r;
86 }
87
88 /* Program GPU memory space */
89 rs600_mc_disable_clients(rdev);
90 if (r520_mc_wait_for_idle(rdev)) {
91 printk(KERN_WARNING "Failed to wait MC idle while "
92 "programming pipes. Bad things might happen.\n");
93 }
94 /* Write VRAM size in case we are limiting it */
95 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
96 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
97 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
98 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
99 WREG32_MC(R520_MC_FB_LOCATION, tmp);
100 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
101 WREG32(0x310, rdev->mc.vram_location);
102 if (rdev->flags & RADEON_IS_AGP) {
103 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
104 tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
105 tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
106 WREG32_MC(R520_MC_AGP_LOCATION, tmp);
107 WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
108 WREG32_MC(R520_MC_AGP_BASE_2, 0);
109 } else {
110 WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
111 WREG32_MC(R520_MC_AGP_BASE, 0);
112 WREG32_MC(R520_MC_AGP_BASE_2, 0);
113 }
114 return 0;
115}
116
117void r520_mc_fini(struct radeon_device *rdev)
118{
119}
120
121
122/*
123 * Global GPU functions
124 */
125void r520_errata(struct radeon_device *rdev)
126{
127 rdev->pll_errata = 0;
128}
129
130int r520_mc_wait_for_idle(struct radeon_device *rdev)
131{ 36{
132 unsigned i; 37 unsigned i;
133 uint32_t tmp; 38 uint32_t tmp;
@@ -143,12 +48,12 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
143 return -1; 48 return -1;
144} 49}
145 50
146void r520_gpu_init(struct radeon_device *rdev) 51static void r520_gpu_init(struct radeon_device *rdev)
147{ 52{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 53 unsigned pipe_select_current, gb_pipe_select, tmp;
149 54
150 r100_hdp_reset(rdev); 55 r100_hdp_reset(rdev);
151 rs600_disable_vga(rdev); 56 rv515_vga_render_disable(rdev);
152 /* 57 /*
153 * DST_PIPE_CONFIG 0x170C 58 * DST_PIPE_CONFIG 0x170C
154 * GB_TILE_CONFIG 0x4018 59 * GB_TILE_CONFIG 0x4018
@@ -186,10 +91,6 @@ void r520_gpu_init(struct radeon_device *rdev)
186 } 91 }
187} 92}
188 93
189
190/*
191 * VRAM info
192 */
193static void r520_vram_get_type(struct radeon_device *rdev) 94static void r520_vram_get_type(struct radeon_device *rdev)
194{ 95{
195 uint32_t tmp; 96 uint32_t tmp;
@@ -233,7 +134,169 @@ void r520_vram_info(struct radeon_device *rdev)
233 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 134 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
234} 135}
235 136
236void r520_bandwidth_update(struct radeon_device *rdev) 137void r520_mc_program(struct radeon_device *rdev)
138{
139 struct rv515_mc_save save;
140
141 /* Stops all mc clients */
142 rv515_mc_stop(rdev, &save);
143
144 /* Wait for mc idle */
145 if (r520_mc_wait_for_idle(rdev))
146 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
147 /* Write VRAM size in case we are limiting it */
148 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
149 /* Program MC, should be a 32bits limited address space */
150 WREG32_MC(R_000004_MC_FB_LOCATION,
151 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
152 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
153 WREG32(R_000134_HDP_FB_LOCATION,
154 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
155 if (rdev->flags & RADEON_IS_AGP) {
156 WREG32_MC(R_000005_MC_AGP_LOCATION,
157 S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
158 S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
159 WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
160 WREG32_MC(R_000007_AGP_BASE_2,
161 S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
162 } else {
163 WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
164 WREG32_MC(R_000006_AGP_BASE, 0);
165 WREG32_MC(R_000007_AGP_BASE_2, 0);
166 }
167
168 rv515_mc_resume(rdev, &save);
169}
170
171static int r520_startup(struct radeon_device *rdev)
172{
173 int r;
174
175 r520_mc_program(rdev);
176 /* Resume clock */
177 rv515_clock_startup(rdev);
178 /* Initialize GPU configuration (# pipes, ...) */
179 r520_gpu_init(rdev);
180 /* Initialize GART (initialize after TTM so we can allocate
181 * memory through TTM but finalize after TTM) */
182 if (rdev->flags & RADEON_IS_PCIE) {
183 r = rv370_pcie_gart_enable(rdev);
184 if (r)
185 return r;
186 }
187 /* Enable IRQ */
188 rdev->irq.sw_int = true;
189 rs600_irq_set(rdev);
190 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024);
192 if (r) {
193 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
194 return r;
195 }
196 r = r100_wb_init(rdev);
197 if (r)
198 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
199 r = r100_ib_init(rdev);
200 if (r) {
201 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
202 return r;
203 }
204 return 0;
205}
206
207int r520_resume(struct radeon_device *rdev)
237{ 208{
238 rv515_bandwidth_avivo_update(rdev); 209 /* Make sur GART are not working */
210 if (rdev->flags & RADEON_IS_PCIE)
211 rv370_pcie_gart_disable(rdev);
212 /* Resume clock before doing reset */
213 rv515_clock_startup(rdev);
214 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
215 if (radeon_gpu_reset(rdev)) {
216 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
217 RREG32(R_000E40_RBBM_STATUS),
218 RREG32(R_0007C0_CP_STAT));
219 }
220 /* post */
221 atom_asic_init(rdev->mode_info.atom_context);
222 /* Resume clock after posting */
223 rv515_clock_startup(rdev);
224 return r520_startup(rdev);
225}
226
227int r520_init(struct radeon_device *rdev)
228{
229 int r;
230
231 /* Initialize scratch registers */
232 radeon_scratch_init(rdev);
233 /* Initialize surface registers */
234 radeon_surface_init(rdev);
235 /* TODO: disable VGA need to use VGA request */
236 /* BIOS*/
237 if (!radeon_get_bios(rdev)) {
238 if (ASIC_IS_AVIVO(rdev))
239 return -EINVAL;
240 }
241 if (rdev->is_atom_bios) {
242 r = radeon_atombios_init(rdev);
243 if (r)
244 return r;
245 } else {
246 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
247 return -EINVAL;
248 }
249 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
250 if (radeon_gpu_reset(rdev)) {
251 dev_warn(rdev->dev,
252 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
253 RREG32(R_000E40_RBBM_STATUS),
254 RREG32(R_0007C0_CP_STAT));
255 }
256 /* check if cards are posted or not */
257 if (!radeon_card_posted(rdev) && rdev->bios) {
258 DRM_INFO("GPU not posted. posting now...\n");
259 atom_asic_init(rdev->mode_info.atom_context);
260 }
261 /* Initialize clocks */
262 radeon_get_clock_info(rdev->ddev);
263 /* Initialize power management */
264 radeon_pm_init(rdev);
265 /* Get vram informations */
266 r520_vram_info(rdev);
267 /* Initialize memory controller (also test AGP) */
268 r = r420_mc_init(rdev);
269 if (r)
270 return r;
271 rv515_debugfs(rdev);
272 /* Fence driver */
273 r = radeon_fence_driver_init(rdev);
274 if (r)
275 return r;
276 r = radeon_irq_kms_init(rdev);
277 if (r)
278 return r;
279 /* Memory manager */
280 r = radeon_object_init(rdev);
281 if (r)
282 return r;
283 r = rv370_pcie_gart_init(rdev);
284 if (r)
285 return r;
286 rv515_set_safe_registers(rdev);
287 rdev->accel_working = true;
288 r = r520_startup(rdev);
289 if (r) {
290 /* Somethings want wront with the accel init stop accel */
291 dev_err(rdev->dev, "Disabling GPU acceleration\n");
292 rv515_suspend(rdev);
293 r100_cp_fini(rdev);
294 r100_wb_fini(rdev);
295 r100_ib_fini(rdev);
296 rv370_pcie_gart_fini(rdev);
297 radeon_agp_fini(rdev);
298 radeon_irq_kms_fini(rdev);
299 rdev->accel_working = false;
300 }
301 return 0;
239} 302}
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 000000000000..61af61f644bc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R520D_H__
29#define __R520D_H__
30
31/* Registers */
32#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
33#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
34#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
35#define C_0000F8_CONFIG_MEMSIZE 0x00000000
36#define R_000134_HDP_FB_LOCATION 0x000134
37#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
38#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
39#define C_000134_HDP_FB_START 0xFFFF0000
40#define R_0007C0_CP_STAT 0x0007C0
41#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
42#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
43#define C_0007C0_MRU_BUSY 0xFFFFFFFE
44#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
45#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
46#define C_0007C0_MWU_BUSY 0xFFFFFFFD
47#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
48#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
49#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
50#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
51#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
52#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
53#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
54#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
55#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
56#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
57#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
58#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
59#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
60#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
61#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
62#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
63#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
64#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
65#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
66#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
67#define C_0007C0_CSI_BUSY 0xFFFFDFFF
68#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
69#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
70#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
71#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
72#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
73#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
74#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
75#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
76#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
77#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
78#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
79#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
80#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
81#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
82#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
83#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
84#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
85#define C_0007C0_CP_BUSY 0x7FFFFFFF
86#define R_000E40_RBBM_STATUS 0x000E40
87#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
88#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
89#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
90#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
91#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
92#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
93#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
94#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
95#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
96#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
97#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
98#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
99#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
100#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
101#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
102#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
103#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
104#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
105#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
106#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
107#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
108#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
109#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
110#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
111#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
112#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
113#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
114#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
115#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
116#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
117#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
118#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
119#define C_000E40_E2_BUSY 0xFFFDFFFF
120#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
121#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
122#define C_000E40_RB2D_BUSY 0xFFFBFFFF
123#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
124#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
125#define C_000E40_RB3D_BUSY 0xFFF7FFFF
126#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
127#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
128#define C_000E40_VAP_BUSY 0xFFEFFFFF
129#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
130#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
131#define C_000E40_RE_BUSY 0xFFDFFFFF
132#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
133#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
134#define C_000E40_TAM_BUSY 0xFFBFFFFF
135#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
136#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
137#define C_000E40_TDM_BUSY 0xFF7FFFFF
138#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
139#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
140#define C_000E40_PB_BUSY 0xFEFFFFFF
141#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
142#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
143#define C_000E40_TIM_BUSY 0xFDFFFFFF
144#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
145#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
146#define C_000E40_GA_BUSY 0xFBFFFFFF
147#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
148#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
149#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
150#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
151#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
152#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
153#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
154#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
155#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
156#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
157#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
158#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
159#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
160#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
161#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
162
163
164#define R_000004_MC_FB_LOCATION 0x000004
165#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
166#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
167#define C_000004_MC_FB_START 0xFFFF0000
168#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
169#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
170#define C_000004_MC_FB_TOP 0x0000FFFF
171#define R_000005_MC_AGP_LOCATION 0x000005
172#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
173#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
174#define C_000005_MC_AGP_START 0xFFFF0000
175#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
176#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
177#define C_000005_MC_AGP_TOP 0x0000FFFF
178#define R_000006_AGP_BASE 0x000006
179#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
180#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
181#define C_000006_AGP_BASE_ADDR 0x00000000
182#define R_000007_AGP_BASE_2 0x000007
183#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
184#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
185#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
186
187#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index eab31c1d6df1..278f646bc18e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -33,8 +33,8 @@
33#include "radeon.h" 33#include "radeon.h"
34#include "radeon_mode.h" 34#include "radeon_mode.h"
35#include "r600d.h" 35#include "r600d.h"
36#include "avivod.h"
37#include "atom.h" 36#include "atom.h"
37#include "avivod.h"
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin");
65 65
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 66int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 67
68/* This files gather functions specifics to: 68/* r600,rv610,rv630,rv620,rv635,rv670 */
69 * r600,rv610,rv630,rv620,rv635,rv670
70 *
71 * Some of these functions might be used by newer ASICs.
72 */
73int r600_mc_wait_for_idle(struct radeon_device *rdev); 69int r600_mc_wait_for_idle(struct radeon_device *rdev);
74void r600_gpu_init(struct radeon_device *rdev); 70void r600_gpu_init(struct radeon_device *rdev);
75void r600_fini(struct radeon_device *rdev); 71void r600_fini(struct radeon_device *rdev);
76 72
77
78/* 73/*
79 * R600 PCIE GART 74 * R600 PCIE GART
80 */ 75 */
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
168 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
169 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
170 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 165 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
171 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); 166 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
172 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 167 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
173 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 168 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
174 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 169 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev)
225 radeon_gart_fini(rdev); 220 radeon_gart_fini(rdev);
226} 221}
227 222
223void r600_agp_enable(struct radeon_device *rdev)
224{
225 u32 tmp;
226 int i;
227
228 /* Setup L2 cache */
229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
231 EFFECTIVE_L2_QUEUE_SIZE(7));
232 WREG32(VM_L2_CNTL2, 0);
233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
234 /* Setup TLB control */
235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
236 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
238 ENABLE_WAIT_L2_QUERY;
239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
253 for (i = 0; i < 7; i++)
254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
255}
256
228int r600_mc_wait_for_idle(struct radeon_device *rdev) 257int r600_mc_wait_for_idle(struct radeon_device *rdev)
229{ 258{
230 unsigned i; 259 unsigned i;
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
240 return -1; 269 return -1;
241} 270}
242 271
243static void r600_mc_resume(struct radeon_device *rdev) 272static void r600_mc_program(struct radeon_device *rdev)
244{ 273{
245 u32 d1vga_control, d2vga_control; 274 struct rv515_mc_save save;
246 u32 vga_render_control, vga_hdp_control;
247 u32 d1crtc_control, d2crtc_control;
248 u32 new_d1grph_primary, new_d1grph_secondary;
249 u32 new_d2grph_primary, new_d2grph_secondary;
250 u64 old_vram_start;
251 u32 tmp; 275 u32 tmp;
252 int i, j; 276 int i, j;
253 277
@@ -261,99 +285,64 @@ static void r600_mc_resume(struct radeon_device *rdev)
261 } 285 }
262 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 286 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
263 287
264 d1vga_control = RREG32(D1VGA_CONTROL); 288 rv515_mc_stop(rdev, &save);
265 d2vga_control = RREG32(D2VGA_CONTROL);
266 vga_render_control = RREG32(VGA_RENDER_CONTROL);
267 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
268 d1crtc_control = RREG32(D1CRTC_CONTROL);
269 d2crtc_control = RREG32(D2CRTC_CONTROL);
270 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
271 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
272 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
273 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
274 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
275 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
276 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
277 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
278 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
279
280 /* Stop all video */
281 WREG32(D1VGA_CONTROL, 0);
282 WREG32(D2VGA_CONTROL, 0);
283 WREG32(VGA_RENDER_CONTROL, 0);
284 WREG32(D1CRTC_UPDATE_LOCK, 1);
285 WREG32(D2CRTC_UPDATE_LOCK, 1);
286 WREG32(D1CRTC_CONTROL, 0);
287 WREG32(D2CRTC_CONTROL, 0);
288 WREG32(D1CRTC_UPDATE_LOCK, 0);
289 WREG32(D2CRTC_UPDATE_LOCK, 0);
290
291 mdelay(1);
292 if (r600_mc_wait_for_idle(rdev)) { 289 if (r600_mc_wait_for_idle(rdev)) {
293 printk(KERN_WARNING "[drm] MC not idle !\n"); 290 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
294 } 291 }
295 292 /* Lockout access through VGA aperture (doesn't exist before R600) */
296 /* Lockout access through VGA aperture*/
297 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 293 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
298
299 /* Update configuration */ 294 /* Update configuration */
300 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 295 if (rdev->flags & RADEON_IS_AGP) {
301 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); 296 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
297 /* VRAM before AGP */
298 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
299 rdev->mc.vram_start >> 12);
300 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
301 rdev->mc.gtt_end >> 12);
302 } else {
303 /* VRAM after AGP */
304 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
305 rdev->mc.gtt_start >> 12);
306 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
307 rdev->mc.vram_end >> 12);
308 }
309 } else {
310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
311 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
312 }
302 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 313 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
303 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; 314 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
304 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 315 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
305 WREG32(MC_VM_FB_LOCATION, tmp); 316 WREG32(MC_VM_FB_LOCATION, tmp);
306 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 317 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
307 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 318 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
308 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 319 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
309 if (rdev->flags & RADEON_IS_AGP) { 320 if (rdev->flags & RADEON_IS_AGP) {
310 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); 321 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
311 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 322 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
312 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 323 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
313 } else { 324 } else {
314 WREG32(MC_VM_AGP_BASE, 0); 325 WREG32(MC_VM_AGP_BASE, 0);
315 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 326 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
316 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 327 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
317 } 328 }
318 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
319 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
320 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
321 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
322 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
323
324 /* Unlock host access */
325 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
326
327 mdelay(1);
328 if (r600_mc_wait_for_idle(rdev)) { 329 if (r600_mc_wait_for_idle(rdev)) {
329 printk(KERN_WARNING "[drm] MC not idle !\n"); 330 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
330 } 331 }
331 332 rv515_mc_resume(rdev, &save);
332 /* Restore video state */
333 WREG32(D1CRTC_UPDATE_LOCK, 1);
334 WREG32(D2CRTC_UPDATE_LOCK, 1);
335 WREG32(D1CRTC_CONTROL, d1crtc_control);
336 WREG32(D2CRTC_CONTROL, d2crtc_control);
337 WREG32(D1CRTC_UPDATE_LOCK, 0);
338 WREG32(D2CRTC_UPDATE_LOCK, 0);
339 WREG32(D1VGA_CONTROL, d1vga_control);
340 WREG32(D2VGA_CONTROL, d2vga_control);
341 WREG32(VGA_RENDER_CONTROL, vga_render_control);
342
343 /* we need to own VRAM, so turn off the VGA renderer here 333 /* we need to own VRAM, so turn off the VGA renderer here
344 * to stop it overwriting our objects */ 334 * to stop it overwriting our objects */
345 radeon_avivo_vga_render_disable(rdev); 335 rv515_vga_render_disable(rdev);
346} 336}
347 337
348int r600_mc_init(struct radeon_device *rdev) 338int r600_mc_init(struct radeon_device *rdev)
349{ 339{
350 fixed20_12 a; 340 fixed20_12 a;
351 u32 tmp; 341 u32 tmp;
352 int chansize; 342 int chansize, numchan;
353 int r; 343 int r;
354 344
355 /* Get VRAM informations */ 345 /* Get VRAM informations */
356 rdev->mc.vram_width = 128;
357 rdev->mc.vram_is_ddr = true; 346 rdev->mc.vram_is_ddr = true;
358 tmp = RREG32(RAMCFG); 347 tmp = RREG32(RAMCFG);
359 if (tmp & CHANSIZE_OVERRIDE) { 348 if (tmp & CHANSIZE_OVERRIDE) {
@@ -363,23 +352,36 @@ int r600_mc_init(struct radeon_device *rdev)
363 } else { 352 } else {
364 chansize = 32; 353 chansize = 32;
365 } 354 }
366 if (rdev->family == CHIP_R600) { 355 tmp = RREG32(CHMAP);
367 rdev->mc.vram_width = 8 * chansize; 356 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
368 } else if (rdev->family == CHIP_RV670) { 357 case 0:
369 rdev->mc.vram_width = 4 * chansize; 358 default:
370 } else if ((rdev->family == CHIP_RV610) || 359 numchan = 1;
371 (rdev->family == CHIP_RV620)) { 360 break;
372 rdev->mc.vram_width = chansize; 361 case 1:
373 } else if ((rdev->family == CHIP_RV630) || 362 numchan = 2;
374 (rdev->family == CHIP_RV635)) { 363 break;
375 rdev->mc.vram_width = 2 * chansize; 364 case 2:
365 numchan = 4;
366 break;
367 case 3:
368 numchan = 8;
369 break;
376 } 370 }
371 rdev->mc.vram_width = numchan * chansize;
377 /* Could aper size report 0 ? */ 372 /* Could aper size report 0 ? */
378 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 373 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
379 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 374 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
380 /* Setup GPU memory space */ 375 /* Setup GPU memory space */
381 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 376 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
382 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 377 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
378
379 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
380 rdev->mc.mc_vram_size = rdev->mc.aper_size;
381
382 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
383 rdev->mc.real_vram_size = rdev->mc.aper_size;
384
383 if (rdev->flags & RADEON_IS_AGP) { 385 if (rdev->flags & RADEON_IS_AGP) {
384 r = radeon_agp_init(rdev); 386 r = radeon_agp_init(rdev);
385 if (r) 387 if (r)
@@ -407,40 +409,34 @@ int r600_mc_init(struct radeon_device *rdev)
407 rdev->mc.gtt_location = rdev->mc.mc_vram_size; 409 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
408 } 410 }
409 } else { 411 } else {
410 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 412 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
411 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & 413 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
412 0xFFFF) << 24; 414 0xFFFF) << 24;
413 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 415 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
414 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; 416 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
415 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 417 /* Enough place after vram */
416 /* Enough place after vram */ 418 rdev->mc.gtt_location = tmp;
417 rdev->mc.gtt_location = tmp; 419 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
418 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { 420 /* Enough place before vram */
419 /* Enough place before vram */ 421 rdev->mc.gtt_location = 0;
422 } else {
423 /* Not enough place after or before shrink
424 * gart size
425 */
426 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
420 rdev->mc.gtt_location = 0; 427 rdev->mc.gtt_location = 0;
428 rdev->mc.gtt_size = rdev->mc.vram_location;
421 } else { 429 } else {
422 /* Not enough place after or before shrink 430 rdev->mc.gtt_location = tmp;
423 * gart size 431 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
424 */
425 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
426 rdev->mc.gtt_location = 0;
427 rdev->mc.gtt_size = rdev->mc.vram_location;
428 } else {
429 rdev->mc.gtt_location = tmp;
430 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
431 }
432 } 432 }
433 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
434 } else {
435 rdev->mc.vram_location = 0x00000000UL;
436 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
437 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
438 } 433 }
434 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
439 } 435 }
440 rdev->mc.vram_start = rdev->mc.vram_location; 436 rdev->mc.vram_start = rdev->mc.vram_location;
441 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; 437 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
442 rdev->mc.gtt_start = rdev->mc.gtt_location; 438 rdev->mc.gtt_start = rdev->mc.gtt_location;
443 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; 439 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
444 /* FIXME: we should enforce default clock in case GPU is not in 440 /* FIXME: we should enforce default clock in case GPU is not in
445 * default setup 441 * default setup
446 */ 442 */
@@ -456,6 +452,7 @@ int r600_mc_init(struct radeon_device *rdev)
456 */ 452 */
457int r600_gpu_soft_reset(struct radeon_device *rdev) 453int r600_gpu_soft_reset(struct radeon_device *rdev)
458{ 454{
455 struct rv515_mc_save save;
459 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 456 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
460 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | 457 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
461 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | 458 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -473,13 +470,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
473 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | 470 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
474 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 471 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
475 u32 srbm_reset = 0; 472 u32 srbm_reset = 0;
473 u32 tmp;
476 474
475 dev_info(rdev->dev, "GPU softreset \n");
476 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
477 RREG32(R_008010_GRBM_STATUS));
478 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
479 RREG32(R_008014_GRBM_STATUS2));
480 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
481 RREG32(R_000E50_SRBM_STATUS));
482 rv515_mc_stop(rdev, &save);
483 if (r600_mc_wait_for_idle(rdev)) {
484 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
485 }
477 /* Disable CP parsing/prefetching */ 486 /* Disable CP parsing/prefetching */
478 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); 487 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
479 /* Check if any of the rendering block is busy and reset it */ 488 /* Check if any of the rendering block is busy and reset it */
480 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 489 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
481 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 490 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
482 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | 491 tmp = S_008020_SOFT_RESET_CR(1) |
483 S_008020_SOFT_RESET_DB(1) | 492 S_008020_SOFT_RESET_DB(1) |
484 S_008020_SOFT_RESET_CB(1) | 493 S_008020_SOFT_RESET_CB(1) |
485 S_008020_SOFT_RESET_PA(1) | 494 S_008020_SOFT_RESET_PA(1) |
@@ -491,14 +500,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
491 S_008020_SOFT_RESET_TC(1) | 500 S_008020_SOFT_RESET_TC(1) |
492 S_008020_SOFT_RESET_TA(1) | 501 S_008020_SOFT_RESET_TA(1) |
493 S_008020_SOFT_RESET_VC(1) | 502 S_008020_SOFT_RESET_VC(1) |
494 S_008020_SOFT_RESET_VGT(1)); 503 S_008020_SOFT_RESET_VGT(1);
504 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
505 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
495 (void)RREG32(R_008020_GRBM_SOFT_RESET); 506 (void)RREG32(R_008020_GRBM_SOFT_RESET);
496 udelay(50); 507 udelay(50);
497 WREG32(R_008020_GRBM_SOFT_RESET, 0); 508 WREG32(R_008020_GRBM_SOFT_RESET, 0);
498 (void)RREG32(R_008020_GRBM_SOFT_RESET); 509 (void)RREG32(R_008020_GRBM_SOFT_RESET);
499 } 510 }
500 /* Reset CP (we always reset CP) */ 511 /* Reset CP (we always reset CP) */
501 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); 512 tmp = S_008020_SOFT_RESET_CP(1);
513 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
514 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
502 (void)RREG32(R_008020_GRBM_SOFT_RESET); 515 (void)RREG32(R_008020_GRBM_SOFT_RESET);
503 udelay(50); 516 udelay(50);
504 WREG32(R_008020_GRBM_SOFT_RESET, 0); 517 WREG32(R_008020_GRBM_SOFT_RESET, 0);
@@ -526,6 +539,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
526 srbm_reset |= S_000E60_SOFT_RESET_RLC(1); 539 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
527 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) 540 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
528 srbm_reset |= S_000E60_SOFT_RESET_SEM(1); 541 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
542 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
543 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
544 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
545 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
546 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
547 udelay(50);
548 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
549 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
529 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); 550 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
530 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 551 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
531 udelay(50); 552 udelay(50);
@@ -533,6 +554,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
533 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 554 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
534 /* Wait a little for things to settle down */ 555 /* Wait a little for things to settle down */
535 udelay(50); 556 udelay(50);
557 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
558 RREG32(R_008010_GRBM_STATUS));
559 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
560 RREG32(R_008014_GRBM_STATUS2));
561 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
562 RREG32(R_000E50_SRBM_STATUS));
563 /* After reset we need to reinit the asic as GPU often endup in an
564 * incoherent state.
565 */
566 atom_asic_init(rdev->mode_info.atom_context);
567 rv515_mc_resume(rdev, &save);
536 return 0; 568 return 0;
537} 569}
538 570
@@ -826,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev)
826 ((rdev->family) == CHIP_RV630) || 858 ((rdev->family) == CHIP_RV630) ||
827 ((rdev->family) == CHIP_RV610) || 859 ((rdev->family) == CHIP_RV610) ||
828 ((rdev->family) == CHIP_RV620) || 860 ((rdev->family) == CHIP_RV620) ||
829 ((rdev->family) == CHIP_RS780)) { 861 ((rdev->family) == CHIP_RS780) ||
862 ((rdev->family) == CHIP_RS880)) {
830 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 863 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
831 } else { 864 } else {
832 WREG32(DB_DEBUG, 0); 865 WREG32(DB_DEBUG, 0);
@@ -843,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev)
843 tmp = RREG32(SQ_MS_FIFO_SIZES); 876 tmp = RREG32(SQ_MS_FIFO_SIZES);
844 if (((rdev->family) == CHIP_RV610) || 877 if (((rdev->family) == CHIP_RV610) ||
845 ((rdev->family) == CHIP_RV620) || 878 ((rdev->family) == CHIP_RV620) ||
846 ((rdev->family) == CHIP_RS780)) { 879 ((rdev->family) == CHIP_RS780) ||
880 ((rdev->family) == CHIP_RS880)) {
847 tmp = (CACHE_FIFO_SIZE(0xa) | 881 tmp = (CACHE_FIFO_SIZE(0xa) |
848 FETCH_FIFO_HIWATER(0xa) | 882 FETCH_FIFO_HIWATER(0xa) |
849 DONE_FIFO_HIWATER(0xe0) | 883 DONE_FIFO_HIWATER(0xe0) |
@@ -886,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev)
886 NUM_ES_STACK_ENTRIES(0)); 920 NUM_ES_STACK_ENTRIES(0));
887 } else if (((rdev->family) == CHIP_RV610) || 921 } else if (((rdev->family) == CHIP_RV610) ||
888 ((rdev->family) == CHIP_RV620) || 922 ((rdev->family) == CHIP_RV620) ||
889 ((rdev->family) == CHIP_RS780)) { 923 ((rdev->family) == CHIP_RS780) ||
924 ((rdev->family) == CHIP_RS880)) {
890 /* no vertex cache */ 925 /* no vertex cache */
891 sq_config &= ~VC_ENABLE; 926 sq_config &= ~VC_ENABLE;
892 927
@@ -943,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev)
943 978
944 if (((rdev->family) == CHIP_RV610) || 979 if (((rdev->family) == CHIP_RV610) ||
945 ((rdev->family) == CHIP_RV620) || 980 ((rdev->family) == CHIP_RV620) ||
946 ((rdev->family) == CHIP_RS780)) { 981 ((rdev->family) == CHIP_RS780) ||
982 ((rdev->family) == CHIP_RS880)) {
947 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 983 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
948 } else { 984 } else {
949 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 985 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
@@ -969,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev)
969 tmp = rdev->config.r600.max_pipes * 16; 1005 tmp = rdev->config.r600.max_pipes * 16;
970 switch (rdev->family) { 1006 switch (rdev->family) {
971 case CHIP_RV610: 1007 case CHIP_RV610:
972 case CHIP_RS780:
973 case CHIP_RV620: 1008 case CHIP_RV620:
1009 case CHIP_RS780:
1010 case CHIP_RS880:
974 tmp += 32; 1011 tmp += 32;
975 break; 1012 break;
976 case CHIP_RV670: 1013 case CHIP_RV670:
@@ -1011,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1011 1048
1012 switch (rdev->family) { 1049 switch (rdev->family) {
1013 case CHIP_RV610: 1050 case CHIP_RV610:
1014 case CHIP_RS780:
1015 case CHIP_RV620: 1051 case CHIP_RV620:
1052 case CHIP_RS780:
1053 case CHIP_RS880:
1016 tmp = TC_L2_SIZE(8); 1054 tmp = TC_L2_SIZE(8);
1017 break; 1055 break;
1018 case CHIP_RV630: 1056 case CHIP_RV630:
@@ -1234,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev)
1234 1272
1235 /* Set ring buffer size */ 1273 /* Set ring buffer size */
1236 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1274 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1275 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1237#ifdef __BIG_ENDIAN 1276#ifdef __BIG_ENDIAN
1238 WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE | 1277 tmp |= BUF_SWAP_32BIT;
1239 (drm_order(4096/8) << 8) | rb_bufsz);
1240#else
1241 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
1242#endif 1278#endif
1279 WREG32(CP_RB_CNTL, tmp);
1243 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1280 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1244 1281
1245 /* Set the write pointer delay */ 1282 /* Set the write pointer delay */
1246 WREG32(CP_RB_WPTR_DELAY, 0); 1283 WREG32(CP_RB_WPTR_DELAY, 0);
1247 1284
1248 /* Initialize the ring buffer's read and write pointers */ 1285 /* Initialize the ring buffer's read and write pointers */
1249 tmp = RREG32(CP_RB_CNTL);
1250 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1286 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1251 WREG32(CP_RB_RPTR_WR, 0); 1287 WREG32(CP_RB_RPTR_WR, 0);
1252 WREG32(CP_RB_WPTR, 0); 1288 WREG32(CP_RB_WPTR, 0);
@@ -1343,32 +1379,47 @@ int r600_ring_test(struct radeon_device *rdev)
1343 return r; 1379 return r;
1344} 1380}
1345 1381
1346/* 1382void r600_wb_disable(struct radeon_device *rdev)
1347 * Writeback 1383{
1348 */ 1384 WREG32(SCRATCH_UMSK, 0);
1349int r600_wb_init(struct radeon_device *rdev) 1385 if (rdev->wb.wb_obj) {
1386 radeon_object_kunmap(rdev->wb.wb_obj);
1387 radeon_object_unpin(rdev->wb.wb_obj);
1388 }
1389}
1390
1391void r600_wb_fini(struct radeon_device *rdev)
1392{
1393 r600_wb_disable(rdev);
1394 if (rdev->wb.wb_obj) {
1395 radeon_object_unref(&rdev->wb.wb_obj);
1396 rdev->wb.wb = NULL;
1397 rdev->wb.wb_obj = NULL;
1398 }
1399}
1400
1401int r600_wb_enable(struct radeon_device *rdev)
1350{ 1402{
1351 int r; 1403 int r;
1352 1404
1353 if (rdev->wb.wb_obj == NULL) { 1405 if (rdev->wb.wb_obj == NULL) {
1354 r = radeon_object_create(rdev, NULL, 4096, 1406 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1355 true, 1407 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
1356 RADEON_GEM_DOMAIN_GTT,
1357 false, &rdev->wb.wb_obj);
1358 if (r) { 1408 if (r) {
1359 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 1409 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
1360 return r; 1410 return r;
1361 } 1411 }
1362 r = radeon_object_pin(rdev->wb.wb_obj, 1412 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1363 RADEON_GEM_DOMAIN_GTT, 1413 &rdev->wb.gpu_addr);
1364 &rdev->wb.gpu_addr);
1365 if (r) { 1414 if (r) {
1366 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 1415 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
1416 r600_wb_fini(rdev);
1367 return r; 1417 return r;
1368 } 1418 }
1369 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1419 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1370 if (r) { 1420 if (r) {
1371 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 1421 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
1422 r600_wb_fini(rdev);
1372 return r; 1423 return r;
1373 } 1424 }
1374 } 1425 }
@@ -1379,21 +1430,6 @@ int r600_wb_init(struct radeon_device *rdev)
1379 return 0; 1430 return 0;
1380} 1431}
1381 1432
1382void r600_wb_fini(struct radeon_device *rdev)
1383{
1384 if (rdev->wb.wb_obj) {
1385 radeon_object_kunmap(rdev->wb.wb_obj);
1386 radeon_object_unpin(rdev->wb.wb_obj);
1387 radeon_object_unref(&rdev->wb.wb_obj);
1388 rdev->wb.wb = NULL;
1389 rdev->wb.wb_obj = NULL;
1390 }
1391}
1392
1393
1394/*
1395 * CS
1396 */
1397void r600_fence_ring_emit(struct radeon_device *rdev, 1433void r600_fence_ring_emit(struct radeon_device *rdev,
1398 struct radeon_fence *fence) 1434 struct radeon_fence *fence)
1399{ 1435{
@@ -1417,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev,
1417 uint64_t src_offset, uint64_t dst_offset, 1453 uint64_t src_offset, uint64_t dst_offset,
1418 unsigned num_pages, struct radeon_fence *fence) 1454 unsigned num_pages, struct radeon_fence *fence)
1419{ 1455{
1420 r600_blit_prepare_copy(rdev, num_pages * 4096); 1456 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1421 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096); 1457 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1422 r600_blit_done_copy(rdev, fence); 1458 r600_blit_done_copy(rdev, fence);
1423 return 0; 1459 return 0;
1424} 1460}
@@ -1470,11 +1506,14 @@ int r600_startup(struct radeon_device *rdev)
1470{ 1506{
1471 int r; 1507 int r;
1472 1508
1473 r600_gpu_reset(rdev); 1509 r600_mc_program(rdev);
1474 r600_mc_resume(rdev); 1510 if (rdev->flags & RADEON_IS_AGP) {
1475 r = r600_pcie_gart_enable(rdev); 1511 r600_agp_enable(rdev);
1476 if (r) 1512 } else {
1477 return r; 1513 r = r600_pcie_gart_enable(rdev);
1514 if (r)
1515 return r;
1516 }
1478 r600_gpu_init(rdev); 1517 r600_gpu_init(rdev);
1479 1518
1480 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1519 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -1493,9 +1532,8 @@ int r600_startup(struct radeon_device *rdev)
1493 r = r600_cp_resume(rdev); 1532 r = r600_cp_resume(rdev);
1494 if (r) 1533 if (r)
1495 return r; 1534 return r;
1496 r = r600_wb_init(rdev); 1535 /* write back buffer are not vital so don't worry about failure */
1497 if (r) 1536 r600_wb_enable(rdev);
1498 return r;
1499 return 0; 1537 return 0;
1500} 1538}
1501 1539
@@ -1517,15 +1555,12 @@ int r600_resume(struct radeon_device *rdev)
1517{ 1555{
1518 int r; 1556 int r;
1519 1557
1520 if (radeon_gpu_reset(rdev)) { 1558 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1521 /* FIXME: what do we want to do here ? */ 1559 * posting will perform necessary task to bring back GPU into good
1522 } 1560 * shape.
1561 */
1523 /* post card */ 1562 /* post card */
1524 if (rdev->is_atom_bios) { 1563 atom_asic_init(rdev->mode_info.atom_context);
1525 atom_asic_init(rdev->mode_info.atom_context);
1526 } else {
1527 radeon_combios_asic_init(rdev->ddev);
1528 }
1529 /* Initialize clocks */ 1564 /* Initialize clocks */
1530 r = radeon_clocks_init(rdev); 1565 r = radeon_clocks_init(rdev);
1531 if (r) { 1566 if (r) {
@@ -1538,7 +1573,7 @@ int r600_resume(struct radeon_device *rdev)
1538 return r; 1573 return r;
1539 } 1574 }
1540 1575
1541 r = radeon_ib_test(rdev); 1576 r = r600_ib_test(rdev);
1542 if (r) { 1577 if (r) {
1543 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1578 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1544 return r; 1579 return r;
@@ -1546,13 +1581,12 @@ int r600_resume(struct radeon_device *rdev)
1546 return r; 1581 return r;
1547} 1582}
1548 1583
1549
1550int r600_suspend(struct radeon_device *rdev) 1584int r600_suspend(struct radeon_device *rdev)
1551{ 1585{
1552 /* FIXME: we should wait for ring to be empty */ 1586 /* FIXME: we should wait for ring to be empty */
1553 r600_cp_stop(rdev); 1587 r600_cp_stop(rdev);
1554 rdev->cp.ready = false; 1588 rdev->cp.ready = false;
1555 1589 r600_wb_disable(rdev);
1556 r600_pcie_gart_disable(rdev); 1590 r600_pcie_gart_disable(rdev);
1557 /* unpin shaders bo */ 1591 /* unpin shaders bo */
1558 radeon_object_unpin(rdev->r600_blit.shader_obj); 1592 radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1569,7 +1603,6 @@ int r600_init(struct radeon_device *rdev)
1569{ 1603{
1570 int r; 1604 int r;
1571 1605
1572 rdev->new_init_path = true;
1573 r = radeon_dummy_page_init(rdev); 1606 r = radeon_dummy_page_init(rdev);
1574 if (r) 1607 if (r)
1575 return r; 1608 return r;
@@ -1586,8 +1619,10 @@ int r600_init(struct radeon_device *rdev)
1586 return -EINVAL; 1619 return -EINVAL;
1587 } 1620 }
1588 /* Must be an ATOMBIOS */ 1621 /* Must be an ATOMBIOS */
1589 if (!rdev->is_atom_bios) 1622 if (!rdev->is_atom_bios) {
1623 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1590 return -EINVAL; 1624 return -EINVAL;
1625 }
1591 r = radeon_atombios_init(rdev); 1626 r = radeon_atombios_init(rdev);
1592 if (r) 1627 if (r)
1593 return r; 1628 return r;
@@ -1600,24 +1635,20 @@ int r600_init(struct radeon_device *rdev)
1600 r600_scratch_init(rdev); 1635 r600_scratch_init(rdev);
1601 /* Initialize surface registers */ 1636 /* Initialize surface registers */
1602 radeon_surface_init(rdev); 1637 radeon_surface_init(rdev);
1638 /* Initialize clocks */
1603 radeon_get_clock_info(rdev->ddev); 1639 radeon_get_clock_info(rdev->ddev);
1604 r = radeon_clocks_init(rdev); 1640 r = radeon_clocks_init(rdev);
1605 if (r) 1641 if (r)
1606 return r; 1642 return r;
1643 /* Initialize power management */
1644 radeon_pm_init(rdev);
1607 /* Fence driver */ 1645 /* Fence driver */
1608 r = radeon_fence_driver_init(rdev); 1646 r = radeon_fence_driver_init(rdev);
1609 if (r) 1647 if (r)
1610 return r; 1648 return r;
1611 r = r600_mc_init(rdev); 1649 r = r600_mc_init(rdev);
1612 if (r) { 1650 if (r)
1613 if (rdev->flags & RADEON_IS_AGP) {
1614 /* Retry with disabling AGP */
1615 r600_fini(rdev);
1616 rdev->flags &= ~RADEON_IS_AGP;
1617 return r600_init(rdev);
1618 }
1619 return r; 1651 return r;
1620 }
1621 /* Memory manager */ 1652 /* Memory manager */
1622 r = radeon_object_init(rdev); 1653 r = radeon_object_init(rdev);
1623 if (r) 1654 if (r)
@@ -1646,12 +1677,10 @@ int r600_init(struct radeon_device *rdev)
1646 1677
1647 r = r600_startup(rdev); 1678 r = r600_startup(rdev);
1648 if (r) { 1679 if (r) {
1649 if (rdev->flags & RADEON_IS_AGP) { 1680 r600_suspend(rdev);
1650 /* Retry with disabling AGP */ 1681 r600_wb_fini(rdev);
1651 r600_fini(rdev); 1682 radeon_ring_fini(rdev);
1652 rdev->flags &= ~RADEON_IS_AGP; 1683 r600_pcie_gart_fini(rdev);
1653 return r600_init(rdev);
1654 }
1655 rdev->accel_working = false; 1684 rdev->accel_working = false;
1656 } 1685 }
1657 if (rdev->accel_working) { 1686 if (rdev->accel_working) {
@@ -1660,7 +1689,7 @@ int r600_init(struct radeon_device *rdev)
1660 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1689 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1661 rdev->accel_working = false; 1690 rdev->accel_working = false;
1662 } 1691 }
1663 r = radeon_ib_test(rdev); 1692 r = r600_ib_test(rdev);
1664 if (r) { 1693 if (r) {
1665 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1694 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1666 rdev->accel_working = false; 1695 rdev->accel_working = false;
@@ -1676,19 +1705,15 @@ void r600_fini(struct radeon_device *rdev)
1676 1705
1677 r600_blit_fini(rdev); 1706 r600_blit_fini(rdev);
1678 radeon_ring_fini(rdev); 1707 radeon_ring_fini(rdev);
1708 r600_wb_fini(rdev);
1679 r600_pcie_gart_fini(rdev); 1709 r600_pcie_gart_fini(rdev);
1680 radeon_gem_fini(rdev); 1710 radeon_gem_fini(rdev);
1681 radeon_fence_driver_fini(rdev); 1711 radeon_fence_driver_fini(rdev);
1682 radeon_clocks_fini(rdev); 1712 radeon_clocks_fini(rdev);
1683#if __OS_HAS_AGP
1684 if (rdev->flags & RADEON_IS_AGP) 1713 if (rdev->flags & RADEON_IS_AGP)
1685 radeon_agp_fini(rdev); 1714 radeon_agp_fini(rdev);
1686#endif
1687 radeon_object_fini(rdev); 1715 radeon_object_fini(rdev);
1688 if (rdev->is_atom_bios) 1716 radeon_atombios_fini(rdev);
1689 radeon_atombios_fini(rdev);
1690 else
1691 radeon_combios_fini(rdev);
1692 kfree(rdev->bios); 1717 kfree(rdev->bios);
1693 rdev->bios = NULL; 1718 rdev->bios = NULL;
1694 radeon_dummy_page_fini(rdev); 1719 radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index d988eece0187..5ea432347589 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -774,11 +774,10 @@ r600_blit_swap(struct drm_device *dev,
774{ 774{
775 drm_radeon_private_t *dev_priv = dev->dev_private; 775 drm_radeon_private_t *dev_priv = dev->dev_private;
776 int cb_format, tex_format; 776 int cb_format, tex_format;
777 int sx2, sy2, dx2, dy2;
777 u64 vb_addr; 778 u64 vb_addr;
778 u32 *vb; 779 u32 *vb;
779 780
780 vb = r600_nomm_get_vb_ptr(dev);
781
782 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { 781 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
783 782
784 r600_nomm_put_vb(dev); 783 r600_nomm_put_vb(dev);
@@ -787,19 +786,13 @@ r600_blit_swap(struct drm_device *dev,
787 return; 786 return;
788 787
789 set_shaders(dev); 788 set_shaders(dev);
790 vb = r600_nomm_get_vb_ptr(dev);
791 } 789 }
790 vb = r600_nomm_get_vb_ptr(dev);
792 791
793 if (cpp == 4) { 792 sx2 = sx + w;
794 cb_format = COLOR_8_8_8_8; 793 sy2 = sy + h;
795 tex_format = FMT_8_8_8_8; 794 dx2 = dx + w;
796 } else if (cpp == 2) { 795 dy2 = dy + h;
797 cb_format = COLOR_5_6_5;
798 tex_format = FMT_5_6_5;
799 } else {
800 cb_format = COLOR_8;
801 tex_format = FMT_8;
802 }
803 796
804 vb[0] = i2f(dx); 797 vb[0] = i2f(dx);
805 vb[1] = i2f(dy); 798 vb[1] = i2f(dy);
@@ -807,31 +800,46 @@ r600_blit_swap(struct drm_device *dev,
807 vb[3] = i2f(sy); 800 vb[3] = i2f(sy);
808 801
809 vb[4] = i2f(dx); 802 vb[4] = i2f(dx);
810 vb[5] = i2f(dy + h); 803 vb[5] = i2f(dy2);
811 vb[6] = i2f(sx); 804 vb[6] = i2f(sx);
812 vb[7] = i2f(sy + h); 805 vb[7] = i2f(sy2);
806
807 vb[8] = i2f(dx2);
808 vb[9] = i2f(dy2);
809 vb[10] = i2f(sx2);
810 vb[11] = i2f(sy2);
813 811
814 vb[8] = i2f(dx + w); 812 switch(cpp) {
815 vb[9] = i2f(dy + h); 813 case 4:
816 vb[10] = i2f(sx + w); 814 cb_format = COLOR_8_8_8_8;
817 vb[11] = i2f(sy + h); 815 tex_format = FMT_8_8_8_8;
816 break;
817 case 2:
818 cb_format = COLOR_5_6_5;
819 tex_format = FMT_5_6_5;
820 break;
821 default:
822 cb_format = COLOR_8;
823 tex_format = FMT_8;
824 break;
825 }
818 826
819 /* src */ 827 /* src */
820 set_tex_resource(dev_priv, tex_format, 828 set_tex_resource(dev_priv, tex_format,
821 src_pitch / cpp, 829 src_pitch / cpp,
822 sy + h, src_pitch / cpp, 830 sy2, src_pitch / cpp,
823 src_gpu_addr); 831 src_gpu_addr);
824 832
825 cp_set_surface_sync(dev_priv, 833 cp_set_surface_sync(dev_priv,
826 R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr); 834 R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
827 835
828 /* dst */ 836 /* dst */
829 set_render_target(dev_priv, cb_format, 837 set_render_target(dev_priv, cb_format,
830 dst_pitch / cpp, dy + h, 838 dst_pitch / cpp, dy2,
831 dst_gpu_addr); 839 dst_gpu_addr);
832 840
833 /* scissors */ 841 /* scissors */
834 set_scissors(dev_priv, dx, dy, dx + w, dy + h); 842 set_scissors(dev_priv, dx, dy, dx2, dy2);
835 843
836 /* Vertex buffer setup */ 844 /* Vertex buffer setup */
837 vb_addr = dev_priv->gart_buffers_offset + 845 vb_addr = dev_priv->gart_buffers_offset +
@@ -844,7 +852,7 @@ r600_blit_swap(struct drm_device *dev,
844 852
845 cp_set_surface_sync(dev_priv, 853 cp_set_surface_sync(dev_priv,
846 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, 854 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
847 dst_pitch * (dy + h), dst_gpu_addr); 855 dst_pitch * dy2, dst_gpu_addr);
848 856
849 dev_priv->blit_vb->used += 12 * 4; 857 dev_priv->blit_vb->used += 12 * 4;
850} 858}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index acae33e2ad51..dbf716e1fbf3 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev)
368 if ((rdev->family == CHIP_RV610) || 368 if ((rdev->family == CHIP_RV610) ||
369 (rdev->family == CHIP_RV620) || 369 (rdev->family == CHIP_RV620) ||
370 (rdev->family == CHIP_RS780) || 370 (rdev->family == CHIP_RS780) ||
371 (rdev->family == CHIP_RS780) || 371 (rdev->family == CHIP_RS880) ||
372 (rdev->family == CHIP_RV710)) 372 (rdev->family == CHIP_RV710))
373 sq_config = 0; 373 sq_config = 0;
374 else 374 else
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 33b89cd8743e..0d820764f340 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "avivod.h"
32 31
33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc); 33 struct radeon_cs_reloc **cs_reloc);
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
57 idx, ib_chunk->length_dw); 56 idx, ib_chunk->length_dw);
58 return -EINVAL; 57 return -EINVAL;
59 } 58 }
60 header = ib_chunk->kdata[idx]; 59 header = radeon_get_ib_value(p, idx);
61 pkt->idx = idx; 60 pkt->idx = idx;
62 pkt->type = CP_PACKET_GET_TYPE(header); 61 pkt->type = CP_PACKET_GET_TYPE(header);
63 pkt->count = CP_PACKET_GET_COUNT(header); 62 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
98static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 97static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
99 struct radeon_cs_reloc **cs_reloc) 98 struct radeon_cs_reloc **cs_reloc)
100{ 99{
101 struct radeon_cs_chunk *ib_chunk;
102 struct radeon_cs_chunk *relocs_chunk; 100 struct radeon_cs_chunk *relocs_chunk;
103 struct radeon_cs_packet p3reloc; 101 struct radeon_cs_packet p3reloc;
104 unsigned idx; 102 unsigned idx;
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
109 return -EINVAL; 107 return -EINVAL;
110 } 108 }
111 *cs_reloc = NULL; 109 *cs_reloc = NULL;
112 ib_chunk = &p->chunks[p->chunk_ib_idx];
113 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 110 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
114 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 111 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
115 if (r) { 112 if (r) {
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
121 p3reloc.idx); 118 p3reloc.idx);
122 return -EINVAL; 119 return -EINVAL;
123 } 120 }
124 idx = ib_chunk->kdata[p3reloc.idx + 1]; 121 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
125 if (idx >= relocs_chunk->length_dw) { 122 if (idx >= relocs_chunk->length_dw) {
126 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
127 idx, relocs_chunk->length_dw); 124 idx, relocs_chunk->length_dw);
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
146static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 143static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
147 struct radeon_cs_reloc **cs_reloc) 144 struct radeon_cs_reloc **cs_reloc)
148{ 145{
149 struct radeon_cs_chunk *ib_chunk;
150 struct radeon_cs_chunk *relocs_chunk; 146 struct radeon_cs_chunk *relocs_chunk;
151 struct radeon_cs_packet p3reloc; 147 struct radeon_cs_packet p3reloc;
152 unsigned idx; 148 unsigned idx;
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
157 return -EINVAL; 153 return -EINVAL;
158 } 154 }
159 *cs_reloc = NULL; 155 *cs_reloc = NULL;
160 ib_chunk = &p->chunks[p->chunk_ib_idx];
161 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 156 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
162 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 157 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
163 if (r) { 158 if (r) {
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
169 p3reloc.idx); 164 p3reloc.idx);
170 return -EINVAL; 165 return -EINVAL;
171 } 166 }
172 idx = ib_chunk->kdata[p3reloc.idx + 1]; 167 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
173 if (idx >= relocs_chunk->length_dw) { 168 if (idx >= relocs_chunk->length_dw) {
174 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
175 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
181 return 0; 176 return 0;
182} 177}
183 178
179/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
182 *
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
187 *
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
191 * wait in that case.
192 */
193static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
194{
195 struct drm_mode_object *obj;
196 struct drm_crtc *crtc;
197 struct radeon_crtc *radeon_crtc;
198 struct radeon_cs_packet p3reloc, wait_reg_mem;
199 int crtc_id;
200 int r;
201 uint32_t header, h_idx, reg, wait_reg_mem_info;
202 volatile uint32_t *ib;
203
204 ib = p->ib->ptr;
205
206 /* parse the WAIT_REG_MEM */
207 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 if (r)
209 return r;
210
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem.type != PACKET_TYPE3 ||
213 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 r = -EINVAL;
216 return r;
217 }
218
219 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info & 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 r = -EINVAL;
224 return r;
225 }
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info & 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 r = -EINVAL;
230 return r;
231 }
232 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 r = -EINVAL;
235 return r;
236 }
237
238 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 r = -EINVAL;
241 return r;
242 }
243
244 /* jump over the NOP */
245 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 if (r)
247 return r;
248
249 h_idx = p->idx - 2;
250 p->idx += wait_reg_mem.count + 2;
251 p->idx += p3reloc.count + 2;
252
253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = CP_PACKET0_GET_REG(header);
256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) {
259 DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 r = -EINVAL;
261 goto out;
262 }
263 crtc = obj_to_crtc(obj);
264 radeon_crtc = to_radeon_crtc(crtc);
265 crtc_id = radeon_crtc->crtc_id;
266
267 if (!crtc->enabled) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib[h_idx + 2] = PACKET2(0);
270 ib[h_idx + 3] = PACKET2(0);
271 ib[h_idx + 4] = PACKET2(0);
272 ib[h_idx + 5] = PACKET2(0);
273 ib[h_idx + 6] = PACKET2(0);
274 ib[h_idx + 7] = PACKET2(0);
275 ib[h_idx + 8] = PACKET2(0);
276 } else if (crtc_id == 1) {
277 switch (reg) {
278 case AVIVO_D1MODE_VLINE_START_END:
279 header &= ~R600_CP_PACKET0_REG_MASK;
280 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 break;
282 default:
283 DRM_ERROR("unknown crtc reloc\n");
284 r = -EINVAL;
285 goto out;
286 }
287 ib[h_idx] = header;
288 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
289 }
290out:
291 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 return r;
293}
294
184static int r600_packet0_check(struct radeon_cs_parser *p, 295static int r600_packet0_check(struct radeon_cs_parser *p,
185 struct radeon_cs_packet *pkt, 296 struct radeon_cs_packet *pkt,
186 unsigned idx, unsigned reg) 297 unsigned idx, unsigned reg)
187{ 298{
299 int r;
300
188 switch (reg) { 301 switch (reg) {
189 case AVIVO_D1MODE_VLINE_START_END: 302 case AVIVO_D1MODE_VLINE_START_END:
190 case AVIVO_D2MODE_VLINE_START_END: 303 r = r600_cs_packet_parse_vline(p);
304 if (r) {
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 idx, reg);
307 return r;
308 }
191 break; 309 break;
192 default: 310 default:
193 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 311 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
218static int r600_packet3_check(struct radeon_cs_parser *p, 336static int r600_packet3_check(struct radeon_cs_parser *p,
219 struct radeon_cs_packet *pkt) 337 struct radeon_cs_packet *pkt)
220{ 338{
221 struct radeon_cs_chunk *ib_chunk;
222 struct radeon_cs_reloc *reloc; 339 struct radeon_cs_reloc *reloc;
223 volatile u32 *ib; 340 volatile u32 *ib;
224 unsigned idx; 341 unsigned idx;
225 unsigned i; 342 unsigned i;
226 unsigned start_reg, end_reg, reg; 343 unsigned start_reg, end_reg, reg;
227 int r; 344 int r;
345 u32 idx_value;
228 346
229 ib = p->ib->ptr; 347 ib = p->ib->ptr;
230 ib_chunk = &p->chunks[p->chunk_ib_idx];
231 idx = pkt->idx + 1; 348 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx);
350
232 switch (pkt->opcode) { 351 switch (pkt->opcode) {
233 case PACKET3_START_3D_CMDBUF: 352 case PACKET3_START_3D_CMDBUF:
234 if (p->family >= CHIP_RV770 || pkt->count) { 353 if (p->family >= CHIP_RV770 || pkt->count) {
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
259 DRM_ERROR("bad DRAW_INDEX\n"); 378 DRM_ERROR("bad DRAW_INDEX\n");
260 return -EINVAL; 379 return -EINVAL;
261 } 380 }
262 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 381 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
263 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 382 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
264 break; 383 break;
265 case PACKET3_DRAW_INDEX_AUTO: 384 case PACKET3_DRAW_INDEX_AUTO:
266 if (pkt->count != 1) { 385 if (pkt->count != 1) {
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
281 return -EINVAL; 400 return -EINVAL;
282 } 401 }
283 /* bit 4 is reg (0) or mem (1) */ 402 /* bit 4 is reg (0) or mem (1) */
284 if (ib_chunk->kdata[idx+0] & 0x10) { 403 if (idx_value & 0x10) {
285 r = r600_cs_packet_next_reloc(p, &reloc); 404 r = r600_cs_packet_next_reloc(p, &reloc);
286 if (r) { 405 if (r) {
287 DRM_ERROR("bad WAIT_REG_MEM\n"); 406 DRM_ERROR("bad WAIT_REG_MEM\n");
288 return -EINVAL; 407 return -EINVAL;
289 } 408 }
290 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 409 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
291 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 410 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
292 } 411 }
293 break; 412 break;
294 case PACKET3_SURFACE_SYNC: 413 case PACKET3_SURFACE_SYNC:
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
297 return -EINVAL; 416 return -EINVAL;
298 } 417 }
299 /* 0xffffffff/0x0 is flush all cache flag */ 418 /* 0xffffffff/0x0 is flush all cache flag */
300 if (ib_chunk->kdata[idx+1] != 0xffffffff || 419 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
301 ib_chunk->kdata[idx+2] != 0) { 420 radeon_get_ib_value(p, idx + 2) != 0) {
302 r = r600_cs_packet_next_reloc(p, &reloc); 421 r = r600_cs_packet_next_reloc(p, &reloc);
303 if (r) { 422 if (r) {
304 DRM_ERROR("bad SURFACE_SYNC\n"); 423 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
319 return -EINVAL; 438 return -EINVAL;
320 } 439 }
321 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 440 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
322 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 441 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
323 } 442 }
324 break; 443 break;
325 case PACKET3_EVENT_WRITE_EOP: 444 case PACKET3_EVENT_WRITE_EOP:
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
333 return -EINVAL; 452 return -EINVAL;
334 } 453 }
335 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 454 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
336 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 455 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
337 break; 456 break;
338 case PACKET3_SET_CONFIG_REG: 457 case PACKET3_SET_CONFIG_REG:
339 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 458 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
340 end_reg = 4 * pkt->count + start_reg - 4; 459 end_reg = 4 * pkt->count + start_reg - 4;
341 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 460 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
342 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 461 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
@@ -347,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
347 for (i = 0; i < pkt->count; i++) { 466 for (i = 0; i < pkt->count; i++) {
348 reg = start_reg + (4 * i); 467 reg = start_reg + (4 * i);
349 switch (reg) { 468 switch (reg) {
469 case SQ_ESGS_RING_BASE:
470 case SQ_GSVS_RING_BASE:
471 case SQ_ESTMP_RING_BASE:
472 case SQ_GSTMP_RING_BASE:
473 case SQ_VSTMP_RING_BASE:
474 case SQ_PSTMP_RING_BASE:
475 case SQ_FBUF_RING_BASE:
476 case SQ_REDUC_RING_BASE:
477 case SX_MEMORY_EXPORT_BASE:
478 r = r600_cs_packet_next_reloc(p, &reloc);
479 if (r) {
480 DRM_ERROR("bad SET_CONFIG_REG "
481 "0x%04X\n", reg);
482 return -EINVAL;
483 }
484 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
485 break;
350 case CP_COHER_BASE: 486 case CP_COHER_BASE:
351 /* use PACKET3_SURFACE_SYNC */ 487 /* use PACKET3_SURFACE_SYNC */
352 return -EINVAL; 488 return -EINVAL;
@@ -356,7 +492,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
356 } 492 }
357 break; 493 break;
358 case PACKET3_SET_CONTEXT_REG: 494 case PACKET3_SET_CONTEXT_REG:
359 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 495 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
360 end_reg = 4 * pkt->count + start_reg - 4; 496 end_reg = 4 * pkt->count + start_reg - 4;
361 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 497 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
362 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 498 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
@@ -368,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
368 reg = start_reg + (4 * i); 504 reg = start_reg + (4 * i);
369 switch (reg) { 505 switch (reg) {
370 case DB_DEPTH_BASE: 506 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE:
371 case CB_COLOR0_BASE: 508 case CB_COLOR0_BASE:
372 case CB_COLOR1_BASE: 509 case CB_COLOR1_BASE:
373 case CB_COLOR2_BASE: 510 case CB_COLOR2_BASE:
@@ -421,7 +558,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad SET_RESOURCE\n"); 558 DRM_ERROR("bad SET_RESOURCE\n");
422 return -EINVAL; 559 return -EINVAL;
423 } 560 }
424 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; 561 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
425 end_reg = 4 * pkt->count + start_reg - 4; 562 end_reg = 4 * pkt->count + start_reg - 4;
426 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 563 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
427 (start_reg >= PACKET3_SET_RESOURCE_END) || 564 (start_reg >= PACKET3_SET_RESOURCE_END) ||
@@ -430,7 +567,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
430 return -EINVAL; 567 return -EINVAL;
431 } 568 }
432 for (i = 0; i < (pkt->count / 7); i++) { 569 for (i = 0; i < (pkt->count / 7); i++) {
433 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { 570 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
434 case SQ_TEX_VTX_VALID_TEXTURE: 571 case SQ_TEX_VTX_VALID_TEXTURE:
435 /* tex base */ 572 /* tex base */
436 r = r600_cs_packet_next_reloc(p, &reloc); 573 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -455,7 +592,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
455 return -EINVAL; 592 return -EINVAL;
456 } 593 }
457 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 594 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
458 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 595 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
459 break; 596 break;
460 case SQ_TEX_VTX_INVALID_TEXTURE: 597 case SQ_TEX_VTX_INVALID_TEXTURE:
461 case SQ_TEX_VTX_INVALID_BUFFER: 598 case SQ_TEX_VTX_INVALID_BUFFER:
@@ -466,7 +603,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
466 } 603 }
467 break; 604 break;
468 case PACKET3_SET_ALU_CONST: 605 case PACKET3_SET_ALU_CONST:
469 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; 606 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
470 end_reg = 4 * pkt->count + start_reg - 4; 607 end_reg = 4 * pkt->count + start_reg - 4;
471 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 608 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
472 (start_reg >= PACKET3_SET_ALU_CONST_END) || 609 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
@@ -476,7 +613,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
476 } 613 }
477 break; 614 break;
478 case PACKET3_SET_BOOL_CONST: 615 case PACKET3_SET_BOOL_CONST:
479 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 616 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
480 end_reg = 4 * pkt->count + start_reg - 4; 617 end_reg = 4 * pkt->count + start_reg - 4;
481 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 618 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
482 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 619 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
@@ -486,7 +623,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
486 } 623 }
487 break; 624 break;
488 case PACKET3_SET_LOOP_CONST: 625 case PACKET3_SET_LOOP_CONST:
489 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 626 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
490 end_reg = 4 * pkt->count + start_reg - 4; 627 end_reg = 4 * pkt->count + start_reg - 4;
491 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 628 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
492 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 629 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
@@ -496,7 +633,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
496 } 633 }
497 break; 634 break;
498 case PACKET3_SET_CTL_CONST: 635 case PACKET3_SET_CTL_CONST:
499 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; 636 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
500 end_reg = 4 * pkt->count + start_reg - 4; 637 end_reg = 4 * pkt->count + start_reg - 4;
501 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 638 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
502 (start_reg >= PACKET3_SET_CTL_CONST_END) || 639 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
@@ -510,7 +647,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
510 DRM_ERROR("bad SET_SAMPLER\n"); 647 DRM_ERROR("bad SET_SAMPLER\n");
511 return -EINVAL; 648 return -EINVAL;
512 } 649 }
513 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; 650 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
514 end_reg = 4 * pkt->count + start_reg - 4; 651 end_reg = 4 * pkt->count + start_reg - 4;
515 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 652 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
516 (start_reg >= PACKET3_SET_SAMPLER_END) || 653 (start_reg >= PACKET3_SET_SAMPLER_END) ||
@@ -602,6 +739,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
602 kfree(parser->relocs); 739 kfree(parser->relocs);
603 for (i = 0; i < parser->nchunks; i++) { 740 for (i = 0; i < parser->nchunks; i++) {
604 kfree(parser->chunks[i].kdata); 741 kfree(parser->chunks[i].kdata);
742 kfree(parser->chunks[i].kpage[0]);
743 kfree(parser->chunks[i].kpage[1]);
605 } 744 }
606 kfree(parser->chunks); 745 kfree(parser->chunks);
607 kfree(parser->chunks_array); 746 kfree(parser->chunks_array);
@@ -639,7 +778,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
639 * uncached). */ 778 * uncached). */
640 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 779 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
641 parser.ib->length_dw = ib_chunk->length_dw; 780 parser.ib->length_dw = ib_chunk->length_dw;
642 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
643 *l = parser.ib->length_dw; 781 *l = parser.ib->length_dw;
644 r = r600_cs_parse(&parser); 782 r = r600_cs_parse(&parser);
645 if (r) { 783 if (r) {
@@ -647,6 +785,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
647 r600_cs_parser_fini(&parser, r); 785 r600_cs_parser_fini(&parser, r);
648 return r; 786 return r;
649 } 787 }
788 r = radeon_cs_finish_pages(&parser);
789 if (r) {
790 DRM_ERROR("Invalid command stream !\n");
791 r600_cs_parser_fini(&parser, r);
792 return r;
793 }
650 r600_cs_parser_fini(&parser, r); 794 r600_cs_parser_fini(&parser, r);
651 return r; 795 return r;
652} 796}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a9028a85c9b..27ab428b149b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -119,6 +119,7 @@
119#define DB_DEBUG 0x9830 119#define DB_DEBUG 0x9830
120#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) 120#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
121#define DB_DEPTH_BASE 0x2800C 121#define DB_DEPTH_BASE 0x2800C
122#define DB_HTILE_DATA_BASE 0x28014
122#define DB_WATERMARKS 0x9838 123#define DB_WATERMARKS 0x9838
123#define DEPTH_FREE(x) ((x) << 0) 124#define DEPTH_FREE(x) ((x) << 0)
124#define DEPTH_FLUSH(x) ((x) << 5) 125#define DEPTH_FLUSH(x) ((x) << 5)
@@ -171,6 +172,14 @@
171#define SQ_STACK_RESOURCE_MGMT_2 0x8c14 172#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
172# define NUM_GS_STACK_ENTRIES(x) ((x) << 0) 173# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
173# define NUM_ES_STACK_ENTRIES(x) ((x) << 16) 174# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
175#define SQ_ESGS_RING_BASE 0x8c40
176#define SQ_GSVS_RING_BASE 0x8c48
177#define SQ_ESTMP_RING_BASE 0x8c50
178#define SQ_GSTMP_RING_BASE 0x8c58
179#define SQ_VSTMP_RING_BASE 0x8c60
180#define SQ_PSTMP_RING_BASE 0x8c68
181#define SQ_FBUF_RING_BASE 0x8c70
182#define SQ_REDUC_RING_BASE 0x8c78
174 183
175#define GRBM_CNTL 0x8000 184#define GRBM_CNTL 0x8000
176# define GRBM_READ_TIMEOUT(x) ((x) << 0) 185# define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -271,6 +280,10 @@
271#define PCIE_PORT_INDEX 0x0038 280#define PCIE_PORT_INDEX 0x0038
272#define PCIE_PORT_DATA 0x003C 281#define PCIE_PORT_DATA 0x003C
273 282
283#define CHMAP 0x2004
284#define NOOFCHAN_SHIFT 12
285#define NOOFCHAN_MASK 0x00003000
286
274#define RAMCFG 0x2408 287#define RAMCFG 0x2408
275#define NOOFBANK_SHIFT 0 288#define NOOFBANK_SHIFT 0
276#define NOOFBANK_MASK 0x00000001 289#define NOOFBANK_MASK 0x00000001
@@ -352,6 +365,7 @@
352 365
353 366
354#define SX_MISC 0x28350 367#define SX_MISC 0x28350
368#define SX_MEMORY_EXPORT_BASE 0x9010
355#define SX_DEBUG_1 0x9054 369#define SX_DEBUG_1 0x9054
356#define SMX_EVENT_RELEASE (1 << 0) 370#define SMX_EVENT_RELEASE (1 << 0)
357#define ENABLE_NEW_SMX_ADDRESS (1 << 16) 371#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
@@ -643,6 +657,7 @@
643#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) 657#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
644#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) 658#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
645#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) 659#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
660#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
646#define R_000E60_SRBM_SOFT_RESET 0x0E60 661#define R_000E60_SRBM_SOFT_RESET 0x0E60
647#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) 662#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
648#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) 663#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 6311b1362594..757f5cd37744 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -44,6 +44,24 @@
44 * - TESTING, TESTING, TESTING 44 * - TESTING, TESTING, TESTING
45 */ 45 */
46 46
47/* Initialization path:
48 * We expect that acceleration initialization might fail for various
49 * reasons even thought we work hard to make it works on most
50 * configurations. In order to still have a working userspace in such
51 * situation the init path must succeed up to the memory controller
52 * initialization point. Failure before this point are considered as
53 * fatal error. Here is the init callchain :
54 * radeon_device_init perform common structure, mutex initialization
55 * asic_init setup the GPU memory layout and perform all
56 * one time initialization (failure in this
57 * function are considered fatal)
58 * asic_startup setup the GPU acceleration, in order to
59 * follow guideline the first thing this
60 * function should do is setting the GPU
61 * memory controller (only MC setup failure
62 * are considered as fatal)
63 */
64
47#include <asm/atomic.h> 65#include <asm/atomic.h>
48#include <linux/wait.h> 66#include <linux/wait.h>
49#include <linux/list.h> 67#include <linux/list.h>
@@ -121,6 +139,10 @@ struct radeon_clock {
121 uint32_t default_sclk; 139 uint32_t default_sclk;
122}; 140};
123 141
142/*
143 * Power management
144 */
145int radeon_pm_init(struct radeon_device *rdev);
124 146
125/* 147/*
126 * Fences. 148 * Fences.
@@ -258,6 +280,8 @@ union radeon_gart_table {
258 struct radeon_gart_table_vram vram; 280 struct radeon_gart_table_vram vram;
259}; 281};
260 282
283#define RADEON_GPU_PAGE_SIZE 4096
284
261struct radeon_gart { 285struct radeon_gart {
262 dma_addr_t table_addr; 286 dma_addr_t table_addr;
263 unsigned num_gpu_pages; 287 unsigned num_gpu_pages;
@@ -342,7 +366,7 @@ struct radeon_ib {
342 unsigned long idx; 366 unsigned long idx;
343 uint64_t gpu_addr; 367 uint64_t gpu_addr;
344 struct radeon_fence *fence; 368 struct radeon_fence *fence;
345 volatile uint32_t *ptr; 369 uint32_t *ptr;
346 uint32_t length_dw; 370 uint32_t length_dw;
347}; 371};
348 372
@@ -415,7 +439,12 @@ struct radeon_cs_reloc {
415struct radeon_cs_chunk { 439struct radeon_cs_chunk {
416 uint32_t chunk_id; 440 uint32_t chunk_id;
417 uint32_t length_dw; 441 uint32_t length_dw;
442 int kpage_idx[2];
443 uint32_t *kpage[2];
418 uint32_t *kdata; 444 uint32_t *kdata;
445 void __user *user_ptr;
446 int last_copied_page;
447 int last_page_index;
419}; 448};
420 449
421struct radeon_cs_parser { 450struct radeon_cs_parser {
@@ -438,8 +467,38 @@ struct radeon_cs_parser {
438 struct radeon_ib *ib; 467 struct radeon_ib *ib;
439 void *track; 468 void *track;
440 unsigned family; 469 unsigned family;
470 int parser_error;
441}; 471};
442 472
473extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
474extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
475
476
477static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
478{
479 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
480 u32 pg_idx, pg_offset;
481 u32 idx_value = 0;
482 int new_page;
483
484 pg_idx = (idx * 4) / PAGE_SIZE;
485 pg_offset = (idx * 4) % PAGE_SIZE;
486
487 if (ibc->kpage_idx[0] == pg_idx)
488 return ibc->kpage[0][pg_offset/4];
489 if (ibc->kpage_idx[1] == pg_idx)
490 return ibc->kpage[1][pg_offset/4];
491
492 new_page = radeon_cs_update_pages(p, pg_idx);
493 if (new_page < 0) {
494 p->parser_error = new_page;
495 return 0;
496 }
497
498 idx_value = ibc->kpage[new_page][pg_offset/4];
499 return idx_value;
500}
501
443struct radeon_cs_packet { 502struct radeon_cs_packet {
444 unsigned idx; 503 unsigned idx;
445 unsigned type; 504 unsigned type;
@@ -537,18 +596,8 @@ struct radeon_asic {
537 void (*fini)(struct radeon_device *rdev); 596 void (*fini)(struct radeon_device *rdev);
538 int (*resume)(struct radeon_device *rdev); 597 int (*resume)(struct radeon_device *rdev);
539 int (*suspend)(struct radeon_device *rdev); 598 int (*suspend)(struct radeon_device *rdev);
540 void (*errata)(struct radeon_device *rdev);
541 void (*vram_info)(struct radeon_device *rdev);
542 void (*vga_set_state)(struct radeon_device *rdev, bool state); 599 void (*vga_set_state)(struct radeon_device *rdev, bool state);
543 int (*gpu_reset)(struct radeon_device *rdev); 600 int (*gpu_reset)(struct radeon_device *rdev);
544 int (*mc_init)(struct radeon_device *rdev);
545 void (*mc_fini)(struct radeon_device *rdev);
546 int (*wb_init)(struct radeon_device *rdev);
547 void (*wb_fini)(struct radeon_device *rdev);
548 int (*gart_init)(struct radeon_device *rdev);
549 void (*gart_fini)(struct radeon_device *rdev);
550 int (*gart_enable)(struct radeon_device *rdev);
551 void (*gart_disable)(struct radeon_device *rdev);
552 void (*gart_tlb_flush)(struct radeon_device *rdev); 601 void (*gart_tlb_flush)(struct radeon_device *rdev);
553 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 602 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
554 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 603 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -558,7 +607,6 @@ struct radeon_asic {
558 void (*ring_start)(struct radeon_device *rdev); 607 void (*ring_start)(struct radeon_device *rdev);
559 int (*ring_test)(struct radeon_device *rdev); 608 int (*ring_test)(struct radeon_device *rdev);
560 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 609 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
561 int (*ib_test)(struct radeon_device *rdev);
562 int (*irq_set)(struct radeon_device *rdev); 610 int (*irq_set)(struct radeon_device *rdev);
563 int (*irq_process)(struct radeon_device *rdev); 611 int (*irq_process)(struct radeon_device *rdev);
564 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 612 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
@@ -579,7 +627,9 @@ struct radeon_asic {
579 uint64_t dst_offset, 627 uint64_t dst_offset,
580 unsigned num_pages, 628 unsigned num_pages,
581 struct radeon_fence *fence); 629 struct radeon_fence *fence);
630 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
582 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 631 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
632 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
583 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 633 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
584 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 634 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
585 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 635 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
@@ -736,12 +786,12 @@ struct radeon_device {
736 bool shutdown; 786 bool shutdown;
737 bool suspend; 787 bool suspend;
738 bool need_dma32; 788 bool need_dma32;
739 bool new_init_path;
740 bool accel_working; 789 bool accel_working;
741 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 790 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
742 const struct firmware *me_fw; /* all family ME firmware */ 791 const struct firmware *me_fw; /* all family ME firmware */
743 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 792 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
744 struct r600_blit r600_blit; 793 struct r600_blit r600_blit;
794 int msi_enabled; /* msi enabled */
745}; 795};
746 796
747int radeon_device_init(struct radeon_device *rdev, 797int radeon_device_init(struct radeon_device *rdev,
@@ -896,28 +946,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
896#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 946#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
897#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 947#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
898#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 948#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
899#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
900#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
901#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 949#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
902#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 950#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
903#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
904#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
905#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
906#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
907#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev))
908#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev))
909#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
910#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
911#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 951#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
912#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 952#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
913#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
914#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
915#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
916#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 953#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
917#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 954#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
918#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) 955#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
919#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) 956#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
920#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
921#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 957#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
922#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 958#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
923#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 959#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
@@ -925,7 +961,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
925#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) 961#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
926#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) 962#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
927#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) 963#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
964#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
928#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 965#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
966#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
929#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 967#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
930#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 968#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
931#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 969#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
@@ -943,6 +981,8 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
943extern void radeon_scratch_init(struct radeon_device *rdev); 981extern void radeon_scratch_init(struct radeon_device *rdev);
944extern void radeon_surface_init(struct radeon_device *rdev); 982extern void radeon_surface_init(struct radeon_device *rdev);
945extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 983extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
984extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
985extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
946 986
947/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 987/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
948struct r100_mc_save { 988struct r100_mc_save {
@@ -974,23 +1014,71 @@ extern void r100_vram_init_sizes(struct radeon_device *rdev);
974extern void r100_wb_disable(struct radeon_device *rdev); 1014extern void r100_wb_disable(struct radeon_device *rdev);
975extern void r100_wb_fini(struct radeon_device *rdev); 1015extern void r100_wb_fini(struct radeon_device *rdev);
976extern int r100_wb_init(struct radeon_device *rdev); 1016extern int r100_wb_init(struct radeon_device *rdev);
1017extern void r100_hdp_reset(struct radeon_device *rdev);
1018extern int r100_rb2d_reset(struct radeon_device *rdev);
1019extern int r100_cp_reset(struct radeon_device *rdev);
1020extern void r100_vga_render_disable(struct radeon_device *rdev);
1021extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1022 struct radeon_cs_packet *pkt,
1023 struct radeon_object *robj);
1024extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1025 struct radeon_cs_packet *pkt,
1026 const unsigned *auth, unsigned n,
1027 radeon_packet0_check_t check);
1028extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1029 struct radeon_cs_packet *pkt,
1030 unsigned idx);
1031
1032/* rv200,rv250,rv280 */
1033extern void r200_set_safe_registers(struct radeon_device *rdev);
977 1034
978/* r300,r350,rv350,rv370,rv380 */ 1035/* r300,r350,rv350,rv370,rv380 */
979extern void r300_set_reg_safe(struct radeon_device *rdev); 1036extern void r300_set_reg_safe(struct radeon_device *rdev);
980extern void r300_mc_program(struct radeon_device *rdev); 1037extern void r300_mc_program(struct radeon_device *rdev);
981extern void r300_vram_info(struct radeon_device *rdev); 1038extern void r300_vram_info(struct radeon_device *rdev);
1039extern void r300_clock_startup(struct radeon_device *rdev);
1040extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
982extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1041extern int rv370_pcie_gart_init(struct radeon_device *rdev);
983extern void rv370_pcie_gart_fini(struct radeon_device *rdev); 1042extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
984extern int rv370_pcie_gart_enable(struct radeon_device *rdev); 1043extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
985extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1044extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
986 1045
987/* r420,r423,rv410 */ 1046/* r420,r423,rv410 */
1047extern int r420_mc_init(struct radeon_device *rdev);
988extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1048extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
989extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1049extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
990extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1050extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
1051extern void r420_pipes_init(struct radeon_device *rdev);
991 1052
992/* rv515 */ 1053/* rv515 */
1054struct rv515_mc_save {
1055 u32 d1vga_control;
1056 u32 d2vga_control;
1057 u32 vga_render_control;
1058 u32 vga_hdp_control;
1059 u32 d1crtc_control;
1060 u32 d2crtc_control;
1061};
993extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); 1062extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
1063extern void rv515_vga_render_disable(struct radeon_device *rdev);
1064extern void rv515_set_safe_registers(struct radeon_device *rdev);
1065extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
1066extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
1067extern void rv515_clock_startup(struct radeon_device *rdev);
1068extern void rv515_debugfs(struct radeon_device *rdev);
1069extern int rv515_suspend(struct radeon_device *rdev);
1070
1071/* rs400 */
1072extern int rs400_gart_init(struct radeon_device *rdev);
1073extern int rs400_gart_enable(struct radeon_device *rdev);
1074extern void rs400_gart_adjust_size(struct radeon_device *rdev);
1075extern void rs400_gart_disable(struct radeon_device *rdev);
1076extern void rs400_gart_fini(struct radeon_device *rdev);
1077
1078/* rs600 */
1079extern void rs600_set_safe_registers(struct radeon_device *rdev);
1080extern int rs600_irq_set(struct radeon_device *rdev);
1081extern void rs600_irq_disable(struct radeon_device *rdev);
994 1082
995/* rs690, rs740 */ 1083/* rs690, rs740 */
996extern void rs690_line_buffer_adjust(struct radeon_device *rdev, 1084extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -1009,8 +1097,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
1009extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1097extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
1010extern int r600_ib_test(struct radeon_device *rdev); 1098extern int r600_ib_test(struct radeon_device *rdev);
1011extern int r600_ring_test(struct radeon_device *rdev); 1099extern int r600_ring_test(struct radeon_device *rdev);
1012extern int r600_wb_init(struct radeon_device *rdev);
1013extern void r600_wb_fini(struct radeon_device *rdev); 1100extern void r600_wb_fini(struct radeon_device *rdev);
1101extern int r600_wb_enable(struct radeon_device *rdev);
1102extern void r600_wb_disable(struct radeon_device *rdev);
1014extern void r600_scratch_init(struct radeon_device *rdev); 1103extern void r600_scratch_init(struct radeon_device *rdev);
1015extern int r600_blit_init(struct radeon_device *rdev); 1104extern int r600_blit_init(struct radeon_device *rdev);
1016extern void r600_blit_fini(struct radeon_device *rdev); 1105extern void r600_blit_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8968f78fa1e3..c18fbee387d7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -31,38 +31,30 @@
31/* 31/*
32 * common functions 32 * common functions
33 */ 33 */
34uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
34void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); 35void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
35void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 36void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
36 37
38uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
37void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); 39void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
40uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
38void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); 41void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
39void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 42void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
40 43
41/* 44/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 45 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */ 46 */
44int r100_init(struct radeon_device *rdev); 47extern int r100_init(struct radeon_device *rdev);
45int r200_init(struct radeon_device *rdev); 48extern void r100_fini(struct radeon_device *rdev);
49extern int r100_suspend(struct radeon_device *rdev);
50extern int r100_resume(struct radeon_device *rdev);
46uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 51uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
47void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 52void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
48void r100_errata(struct radeon_device *rdev);
49void r100_vram_info(struct radeon_device *rdev);
50void r100_vga_set_state(struct radeon_device *rdev, bool state); 53void r100_vga_set_state(struct radeon_device *rdev, bool state);
51int r100_gpu_reset(struct radeon_device *rdev); 54int r100_gpu_reset(struct radeon_device *rdev);
52int r100_mc_init(struct radeon_device *rdev);
53void r100_mc_fini(struct radeon_device *rdev);
54u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 55u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
55int r100_wb_init(struct radeon_device *rdev);
56void r100_wb_fini(struct radeon_device *rdev);
57int r100_pci_gart_init(struct radeon_device *rdev);
58void r100_pci_gart_fini(struct radeon_device *rdev);
59int r100_pci_gart_enable(struct radeon_device *rdev);
60void r100_pci_gart_disable(struct radeon_device *rdev);
61void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 56void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
62int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 57int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
63int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
64void r100_cp_fini(struct radeon_device *rdev);
65void r100_cp_disable(struct radeon_device *rdev);
66void r100_cp_commit(struct radeon_device *rdev); 58void r100_cp_commit(struct radeon_device *rdev);
67void r100_ring_start(struct radeon_device *rdev); 59void r100_ring_start(struct radeon_device *rdev);
68int r100_irq_set(struct radeon_device *rdev); 60int r100_irq_set(struct radeon_device *rdev);
@@ -83,33 +75,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
83int r100_clear_surface_reg(struct radeon_device *rdev, int reg); 75int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
84void r100_bandwidth_update(struct radeon_device *rdev); 76void r100_bandwidth_update(struct radeon_device *rdev);
85void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 77void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
86int r100_ib_test(struct radeon_device *rdev);
87int r100_ring_test(struct radeon_device *rdev); 78int r100_ring_test(struct radeon_device *rdev);
88 79
89static struct radeon_asic r100_asic = { 80static struct radeon_asic r100_asic = {
90 .init = &r100_init, 81 .init = &r100_init,
91 .errata = &r100_errata, 82 .fini = &r100_fini,
92 .vram_info = &r100_vram_info, 83 .suspend = &r100_suspend,
84 .resume = &r100_resume,
93 .vga_set_state = &r100_vga_set_state, 85 .vga_set_state = &r100_vga_set_state,
94 .gpu_reset = &r100_gpu_reset, 86 .gpu_reset = &r100_gpu_reset,
95 .mc_init = &r100_mc_init,
96 .mc_fini = &r100_mc_fini,
97 .wb_init = &r100_wb_init,
98 .wb_fini = &r100_wb_fini,
99 .gart_init = &r100_pci_gart_init,
100 .gart_fini = &r100_pci_gart_fini,
101 .gart_enable = &r100_pci_gart_enable,
102 .gart_disable = &r100_pci_gart_disable,
103 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 87 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
104 .gart_set_page = &r100_pci_gart_set_page, 88 .gart_set_page = &r100_pci_gart_set_page,
105 .cp_init = &r100_cp_init,
106 .cp_fini = &r100_cp_fini,
107 .cp_disable = &r100_cp_disable,
108 .cp_commit = &r100_cp_commit, 89 .cp_commit = &r100_cp_commit,
109 .ring_start = &r100_ring_start, 90 .ring_start = &r100_ring_start,
110 .ring_test = &r100_ring_test, 91 .ring_test = &r100_ring_test,
111 .ring_ib_execute = &r100_ring_ib_execute, 92 .ring_ib_execute = &r100_ring_ib_execute,
112 .ib_test = &r100_ib_test,
113 .irq_set = &r100_irq_set, 93 .irq_set = &r100_irq_set,
114 .irq_process = &r100_irq_process, 94 .irq_process = &r100_irq_process,
115 .get_vblank_counter = &r100_get_vblank_counter, 95 .get_vblank_counter = &r100_get_vblank_counter,
@@ -118,7 +98,9 @@ static struct radeon_asic r100_asic = {
118 .copy_blit = &r100_copy_blit, 98 .copy_blit = &r100_copy_blit,
119 .copy_dma = NULL, 99 .copy_dma = NULL,
120 .copy = &r100_copy_blit, 100 .copy = &r100_copy_blit,
101 .get_engine_clock = &radeon_legacy_get_engine_clock,
121 .set_engine_clock = &radeon_legacy_set_engine_clock, 102 .set_engine_clock = &radeon_legacy_set_engine_clock,
103 .get_memory_clock = NULL,
122 .set_memory_clock = NULL, 104 .set_memory_clock = NULL,
123 .set_pcie_lanes = NULL, 105 .set_pcie_lanes = NULL,
124 .set_clock_gating = &radeon_legacy_set_clock_gating, 106 .set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -131,55 +113,38 @@ static struct radeon_asic r100_asic = {
131/* 113/*
132 * r300,r350,rv350,rv380 114 * r300,r350,rv350,rv380
133 */ 115 */
134int r300_init(struct radeon_device *rdev); 116extern int r300_init(struct radeon_device *rdev);
135void r300_errata(struct radeon_device *rdev); 117extern void r300_fini(struct radeon_device *rdev);
136void r300_vram_info(struct radeon_device *rdev); 118extern int r300_suspend(struct radeon_device *rdev);
137int r300_gpu_reset(struct radeon_device *rdev); 119extern int r300_resume(struct radeon_device *rdev);
138int r300_mc_init(struct radeon_device *rdev); 120extern int r300_gpu_reset(struct radeon_device *rdev);
139void r300_mc_fini(struct radeon_device *rdev); 121extern void r300_ring_start(struct radeon_device *rdev);
140void r300_ring_start(struct radeon_device *rdev); 122extern void r300_fence_ring_emit(struct radeon_device *rdev,
141void r300_fence_ring_emit(struct radeon_device *rdev, 123 struct radeon_fence *fence);
142 struct radeon_fence *fence); 124extern int r300_cs_parse(struct radeon_cs_parser *p);
143int r300_cs_parse(struct radeon_cs_parser *p); 125extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
144int rv370_pcie_gart_init(struct radeon_device *rdev); 126extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
145void rv370_pcie_gart_fini(struct radeon_device *rdev); 127extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
146int rv370_pcie_gart_enable(struct radeon_device *rdev); 128extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
147void rv370_pcie_gart_disable(struct radeon_device *rdev); 129extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
148void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 130extern int r300_copy_dma(struct radeon_device *rdev,
149int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 131 uint64_t src_offset,
150uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 132 uint64_t dst_offset,
151void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 133 unsigned num_pages,
152void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 134 struct radeon_fence *fence);
153int r300_copy_dma(struct radeon_device *rdev,
154 uint64_t src_offset,
155 uint64_t dst_offset,
156 unsigned num_pages,
157 struct radeon_fence *fence);
158
159static struct radeon_asic r300_asic = { 135static struct radeon_asic r300_asic = {
160 .init = &r300_init, 136 .init = &r300_init,
161 .errata = &r300_errata, 137 .fini = &r300_fini,
162 .vram_info = &r300_vram_info, 138 .suspend = &r300_suspend,
139 .resume = &r300_resume,
163 .vga_set_state = &r100_vga_set_state, 140 .vga_set_state = &r100_vga_set_state,
164 .gpu_reset = &r300_gpu_reset, 141 .gpu_reset = &r300_gpu_reset,
165 .mc_init = &r300_mc_init,
166 .mc_fini = &r300_mc_fini,
167 .wb_init = &r100_wb_init,
168 .wb_fini = &r100_wb_fini,
169 .gart_init = &r100_pci_gart_init,
170 .gart_fini = &r100_pci_gart_fini,
171 .gart_enable = &r100_pci_gart_enable,
172 .gart_disable = &r100_pci_gart_disable,
173 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 142 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
174 .gart_set_page = &r100_pci_gart_set_page, 143 .gart_set_page = &r100_pci_gart_set_page,
175 .cp_init = &r100_cp_init,
176 .cp_fini = &r100_cp_fini,
177 .cp_disable = &r100_cp_disable,
178 .cp_commit = &r100_cp_commit, 144 .cp_commit = &r100_cp_commit,
179 .ring_start = &r300_ring_start, 145 .ring_start = &r300_ring_start,
180 .ring_test = &r100_ring_test, 146 .ring_test = &r100_ring_test,
181 .ring_ib_execute = &r100_ring_ib_execute, 147 .ring_ib_execute = &r100_ring_ib_execute,
182 .ib_test = &r100_ib_test,
183 .irq_set = &r100_irq_set, 148 .irq_set = &r100_irq_set,
184 .irq_process = &r100_irq_process, 149 .irq_process = &r100_irq_process,
185 .get_vblank_counter = &r100_get_vblank_counter, 150 .get_vblank_counter = &r100_get_vblank_counter,
@@ -188,7 +153,9 @@ static struct radeon_asic r300_asic = {
188 .copy_blit = &r100_copy_blit, 153 .copy_blit = &r100_copy_blit,
189 .copy_dma = &r300_copy_dma, 154 .copy_dma = &r300_copy_dma,
190 .copy = &r100_copy_blit, 155 .copy = &r100_copy_blit,
156 .get_engine_clock = &radeon_legacy_get_engine_clock,
191 .set_engine_clock = &radeon_legacy_set_engine_clock, 157 .set_engine_clock = &radeon_legacy_set_engine_clock,
158 .get_memory_clock = NULL,
192 .set_memory_clock = NULL, 159 .set_memory_clock = NULL,
193 .set_pcie_lanes = &rv370_set_pcie_lanes, 160 .set_pcie_lanes = &rv370_set_pcie_lanes,
194 .set_clock_gating = &radeon_legacy_set_clock_gating, 161 .set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -209,26 +176,14 @@ static struct radeon_asic r420_asic = {
209 .fini = &r420_fini, 176 .fini = &r420_fini,
210 .suspend = &r420_suspend, 177 .suspend = &r420_suspend,
211 .resume = &r420_resume, 178 .resume = &r420_resume,
212 .errata = NULL,
213 .vram_info = NULL,
214 .vga_set_state = &r100_vga_set_state, 179 .vga_set_state = &r100_vga_set_state,
215 .gpu_reset = &r300_gpu_reset, 180 .gpu_reset = &r300_gpu_reset,
216 .mc_init = NULL,
217 .mc_fini = NULL,
218 .wb_init = NULL,
219 .wb_fini = NULL,
220 .gart_enable = NULL,
221 .gart_disable = NULL,
222 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 181 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
223 .gart_set_page = &rv370_pcie_gart_set_page, 182 .gart_set_page = &rv370_pcie_gart_set_page,
224 .cp_init = NULL,
225 .cp_fini = NULL,
226 .cp_disable = NULL,
227 .cp_commit = &r100_cp_commit, 183 .cp_commit = &r100_cp_commit,
228 .ring_start = &r300_ring_start, 184 .ring_start = &r300_ring_start,
229 .ring_test = &r100_ring_test, 185 .ring_test = &r100_ring_test,
230 .ring_ib_execute = &r100_ring_ib_execute, 186 .ring_ib_execute = &r100_ring_ib_execute,
231 .ib_test = NULL,
232 .irq_set = &r100_irq_set, 187 .irq_set = &r100_irq_set,
233 .irq_process = &r100_irq_process, 188 .irq_process = &r100_irq_process,
234 .get_vblank_counter = &r100_get_vblank_counter, 189 .get_vblank_counter = &r100_get_vblank_counter,
@@ -237,7 +192,9 @@ static struct radeon_asic r420_asic = {
237 .copy_blit = &r100_copy_blit, 192 .copy_blit = &r100_copy_blit,
238 .copy_dma = &r300_copy_dma, 193 .copy_dma = &r300_copy_dma,
239 .copy = &r100_copy_blit, 194 .copy = &r100_copy_blit,
195 .get_engine_clock = &radeon_atom_get_engine_clock,
240 .set_engine_clock = &radeon_atom_set_engine_clock, 196 .set_engine_clock = &radeon_atom_set_engine_clock,
197 .get_memory_clock = &radeon_atom_get_memory_clock,
241 .set_memory_clock = &radeon_atom_set_memory_clock, 198 .set_memory_clock = &radeon_atom_set_memory_clock,
242 .set_pcie_lanes = &rv370_set_pcie_lanes, 199 .set_pcie_lanes = &rv370_set_pcie_lanes,
243 .set_clock_gating = &radeon_atom_set_clock_gating, 200 .set_clock_gating = &radeon_atom_set_clock_gating,
@@ -250,42 +207,27 @@ static struct radeon_asic r420_asic = {
250/* 207/*
251 * rs400,rs480 208 * rs400,rs480
252 */ 209 */
253void rs400_errata(struct radeon_device *rdev); 210extern int rs400_init(struct radeon_device *rdev);
254void rs400_vram_info(struct radeon_device *rdev); 211extern void rs400_fini(struct radeon_device *rdev);
255int rs400_mc_init(struct radeon_device *rdev); 212extern int rs400_suspend(struct radeon_device *rdev);
256void rs400_mc_fini(struct radeon_device *rdev); 213extern int rs400_resume(struct radeon_device *rdev);
257int rs400_gart_init(struct radeon_device *rdev);
258void rs400_gart_fini(struct radeon_device *rdev);
259int rs400_gart_enable(struct radeon_device *rdev);
260void rs400_gart_disable(struct radeon_device *rdev);
261void rs400_gart_tlb_flush(struct radeon_device *rdev); 214void rs400_gart_tlb_flush(struct radeon_device *rdev);
262int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 215int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
263uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
264void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
265static struct radeon_asic rs400_asic = { 218static struct radeon_asic rs400_asic = {
266 .init = &r300_init, 219 .init = &rs400_init,
267 .errata = &rs400_errata, 220 .fini = &rs400_fini,
268 .vram_info = &rs400_vram_info, 221 .suspend = &rs400_suspend,
222 .resume = &rs400_resume,
269 .vga_set_state = &r100_vga_set_state, 223 .vga_set_state = &r100_vga_set_state,
270 .gpu_reset = &r300_gpu_reset, 224 .gpu_reset = &r300_gpu_reset,
271 .mc_init = &rs400_mc_init,
272 .mc_fini = &rs400_mc_fini,
273 .wb_init = &r100_wb_init,
274 .wb_fini = &r100_wb_fini,
275 .gart_init = &rs400_gart_init,
276 .gart_fini = &rs400_gart_fini,
277 .gart_enable = &rs400_gart_enable,
278 .gart_disable = &rs400_gart_disable,
279 .gart_tlb_flush = &rs400_gart_tlb_flush, 225 .gart_tlb_flush = &rs400_gart_tlb_flush,
280 .gart_set_page = &rs400_gart_set_page, 226 .gart_set_page = &rs400_gart_set_page,
281 .cp_init = &r100_cp_init,
282 .cp_fini = &r100_cp_fini,
283 .cp_disable = &r100_cp_disable,
284 .cp_commit = &r100_cp_commit, 227 .cp_commit = &r100_cp_commit,
285 .ring_start = &r300_ring_start, 228 .ring_start = &r300_ring_start,
286 .ring_test = &r100_ring_test, 229 .ring_test = &r100_ring_test,
287 .ring_ib_execute = &r100_ring_ib_execute, 230 .ring_ib_execute = &r100_ring_ib_execute,
288 .ib_test = &r100_ib_test,
289 .irq_set = &r100_irq_set, 231 .irq_set = &r100_irq_set,
290 .irq_process = &r100_irq_process, 232 .irq_process = &r100_irq_process,
291 .get_vblank_counter = &r100_get_vblank_counter, 233 .get_vblank_counter = &r100_get_vblank_counter,
@@ -294,7 +236,9 @@ static struct radeon_asic rs400_asic = {
294 .copy_blit = &r100_copy_blit, 236 .copy_blit = &r100_copy_blit,
295 .copy_dma = &r300_copy_dma, 237 .copy_dma = &r300_copy_dma,
296 .copy = &r100_copy_blit, 238 .copy = &r100_copy_blit,
239 .get_engine_clock = &radeon_legacy_get_engine_clock,
297 .set_engine_clock = &radeon_legacy_set_engine_clock, 240 .set_engine_clock = &radeon_legacy_set_engine_clock,
241 .get_memory_clock = NULL,
298 .set_memory_clock = NULL, 242 .set_memory_clock = NULL,
299 .set_pcie_lanes = NULL, 243 .set_pcie_lanes = NULL,
300 .set_clock_gating = &radeon_legacy_set_clock_gating, 244 .set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -307,18 +251,13 @@ static struct radeon_asic rs400_asic = {
307/* 251/*
308 * rs600. 252 * rs600.
309 */ 253 */
310int rs600_init(struct radeon_device *rdev); 254extern int rs600_init(struct radeon_device *rdev);
311void rs600_errata(struct radeon_device *rdev); 255extern void rs600_fini(struct radeon_device *rdev);
312void rs600_vram_info(struct radeon_device *rdev); 256extern int rs600_suspend(struct radeon_device *rdev);
313int rs600_mc_init(struct radeon_device *rdev); 257extern int rs600_resume(struct radeon_device *rdev);
314void rs600_mc_fini(struct radeon_device *rdev);
315int rs600_irq_set(struct radeon_device *rdev); 258int rs600_irq_set(struct radeon_device *rdev);
316int rs600_irq_process(struct radeon_device *rdev); 259int rs600_irq_process(struct radeon_device *rdev);
317u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 260u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
318int rs600_gart_init(struct radeon_device *rdev);
319void rs600_gart_fini(struct radeon_device *rdev);
320int rs600_gart_enable(struct radeon_device *rdev);
321void rs600_gart_disable(struct radeon_device *rdev);
322void rs600_gart_tlb_flush(struct radeon_device *rdev); 261void rs600_gart_tlb_flush(struct radeon_device *rdev);
323int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 262int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
324uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 263uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -326,28 +265,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
326void rs600_bandwidth_update(struct radeon_device *rdev); 265void rs600_bandwidth_update(struct radeon_device *rdev);
327static struct radeon_asic rs600_asic = { 266static struct radeon_asic rs600_asic = {
328 .init = &rs600_init, 267 .init = &rs600_init,
329 .errata = &rs600_errata, 268 .fini = &rs600_fini,
330 .vram_info = &rs600_vram_info, 269 .suspend = &rs600_suspend,
270 .resume = &rs600_resume,
331 .vga_set_state = &r100_vga_set_state, 271 .vga_set_state = &r100_vga_set_state,
332 .gpu_reset = &r300_gpu_reset, 272 .gpu_reset = &r300_gpu_reset,
333 .mc_init = &rs600_mc_init,
334 .mc_fini = &rs600_mc_fini,
335 .wb_init = &r100_wb_init,
336 .wb_fini = &r100_wb_fini,
337 .gart_init = &rs600_gart_init,
338 .gart_fini = &rs600_gart_fini,
339 .gart_enable = &rs600_gart_enable,
340 .gart_disable = &rs600_gart_disable,
341 .gart_tlb_flush = &rs600_gart_tlb_flush, 273 .gart_tlb_flush = &rs600_gart_tlb_flush,
342 .gart_set_page = &rs600_gart_set_page, 274 .gart_set_page = &rs600_gart_set_page,
343 .cp_init = &r100_cp_init,
344 .cp_fini = &r100_cp_fini,
345 .cp_disable = &r100_cp_disable,
346 .cp_commit = &r100_cp_commit, 275 .cp_commit = &r100_cp_commit,
347 .ring_start = &r300_ring_start, 276 .ring_start = &r300_ring_start,
348 .ring_test = &r100_ring_test, 277 .ring_test = &r100_ring_test,
349 .ring_ib_execute = &r100_ring_ib_execute, 278 .ring_ib_execute = &r100_ring_ib_execute,
350 .ib_test = &r100_ib_test,
351 .irq_set = &rs600_irq_set, 279 .irq_set = &rs600_irq_set,
352 .irq_process = &rs600_irq_process, 280 .irq_process = &rs600_irq_process,
353 .get_vblank_counter = &rs600_get_vblank_counter, 281 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -356,7 +284,9 @@ static struct radeon_asic rs600_asic = {
356 .copy_blit = &r100_copy_blit, 284 .copy_blit = &r100_copy_blit,
357 .copy_dma = &r300_copy_dma, 285 .copy_dma = &r300_copy_dma,
358 .copy = &r100_copy_blit, 286 .copy = &r100_copy_blit,
287 .get_engine_clock = &radeon_atom_get_engine_clock,
359 .set_engine_clock = &radeon_atom_set_engine_clock, 288 .set_engine_clock = &radeon_atom_set_engine_clock,
289 .get_memory_clock = &radeon_atom_get_memory_clock,
360 .set_memory_clock = &radeon_atom_set_memory_clock, 290 .set_memory_clock = &radeon_atom_set_memory_clock,
361 .set_pcie_lanes = NULL, 291 .set_pcie_lanes = NULL,
362 .set_clock_gating = &radeon_atom_set_clock_gating, 292 .set_clock_gating = &radeon_atom_set_clock_gating,
@@ -367,37 +297,26 @@ static struct radeon_asic rs600_asic = {
367/* 297/*
368 * rs690,rs740 298 * rs690,rs740
369 */ 299 */
370void rs690_errata(struct radeon_device *rdev); 300int rs690_init(struct radeon_device *rdev);
371void rs690_vram_info(struct radeon_device *rdev); 301void rs690_fini(struct radeon_device *rdev);
372int rs690_mc_init(struct radeon_device *rdev); 302int rs690_resume(struct radeon_device *rdev);
373void rs690_mc_fini(struct radeon_device *rdev); 303int rs690_suspend(struct radeon_device *rdev);
374uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 304uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
375void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 305void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
376void rs690_bandwidth_update(struct radeon_device *rdev); 306void rs690_bandwidth_update(struct radeon_device *rdev);
377static struct radeon_asic rs690_asic = { 307static struct radeon_asic rs690_asic = {
378 .init = &rs600_init, 308 .init = &rs690_init,
379 .errata = &rs690_errata, 309 .fini = &rs690_fini,
380 .vram_info = &rs690_vram_info, 310 .suspend = &rs690_suspend,
311 .resume = &rs690_resume,
381 .vga_set_state = &r100_vga_set_state, 312 .vga_set_state = &r100_vga_set_state,
382 .gpu_reset = &r300_gpu_reset, 313 .gpu_reset = &r300_gpu_reset,
383 .mc_init = &rs690_mc_init,
384 .mc_fini = &rs690_mc_fini,
385 .wb_init = &r100_wb_init,
386 .wb_fini = &r100_wb_fini,
387 .gart_init = &rs400_gart_init,
388 .gart_fini = &rs400_gart_fini,
389 .gart_enable = &rs400_gart_enable,
390 .gart_disable = &rs400_gart_disable,
391 .gart_tlb_flush = &rs400_gart_tlb_flush, 314 .gart_tlb_flush = &rs400_gart_tlb_flush,
392 .gart_set_page = &rs400_gart_set_page, 315 .gart_set_page = &rs400_gart_set_page,
393 .cp_init = &r100_cp_init,
394 .cp_fini = &r100_cp_fini,
395 .cp_disable = &r100_cp_disable,
396 .cp_commit = &r100_cp_commit, 316 .cp_commit = &r100_cp_commit,
397 .ring_start = &r300_ring_start, 317 .ring_start = &r300_ring_start,
398 .ring_test = &r100_ring_test, 318 .ring_test = &r100_ring_test,
399 .ring_ib_execute = &r100_ring_ib_execute, 319 .ring_ib_execute = &r100_ring_ib_execute,
400 .ib_test = &r100_ib_test,
401 .irq_set = &rs600_irq_set, 320 .irq_set = &rs600_irq_set,
402 .irq_process = &rs600_irq_process, 321 .irq_process = &rs600_irq_process,
403 .get_vblank_counter = &rs600_get_vblank_counter, 322 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -406,7 +325,9 @@ static struct radeon_asic rs690_asic = {
406 .copy_blit = &r100_copy_blit, 325 .copy_blit = &r100_copy_blit,
407 .copy_dma = &r300_copy_dma, 326 .copy_dma = &r300_copy_dma,
408 .copy = &r300_copy_dma, 327 .copy = &r300_copy_dma,
328 .get_engine_clock = &radeon_atom_get_engine_clock,
409 .set_engine_clock = &radeon_atom_set_engine_clock, 329 .set_engine_clock = &radeon_atom_set_engine_clock,
330 .get_memory_clock = &radeon_atom_get_memory_clock,
410 .set_memory_clock = &radeon_atom_set_memory_clock, 331 .set_memory_clock = &radeon_atom_set_memory_clock,
411 .set_pcie_lanes = NULL, 332 .set_pcie_lanes = NULL,
412 .set_clock_gating = &radeon_atom_set_clock_gating, 333 .set_clock_gating = &radeon_atom_set_clock_gating,
@@ -420,41 +341,29 @@ static struct radeon_asic rs690_asic = {
420 * rv515 341 * rv515
421 */ 342 */
422int rv515_init(struct radeon_device *rdev); 343int rv515_init(struct radeon_device *rdev);
423void rv515_errata(struct radeon_device *rdev); 344void rv515_fini(struct radeon_device *rdev);
424void rv515_vram_info(struct radeon_device *rdev);
425int rv515_gpu_reset(struct radeon_device *rdev); 345int rv515_gpu_reset(struct radeon_device *rdev);
426int rv515_mc_init(struct radeon_device *rdev);
427void rv515_mc_fini(struct radeon_device *rdev);
428uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 346uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
429void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 347void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
430void rv515_ring_start(struct radeon_device *rdev); 348void rv515_ring_start(struct radeon_device *rdev);
431uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 349uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
432void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 350void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
433void rv515_bandwidth_update(struct radeon_device *rdev); 351void rv515_bandwidth_update(struct radeon_device *rdev);
352int rv515_resume(struct radeon_device *rdev);
353int rv515_suspend(struct radeon_device *rdev);
434static struct radeon_asic rv515_asic = { 354static struct radeon_asic rv515_asic = {
435 .init = &rv515_init, 355 .init = &rv515_init,
436 .errata = &rv515_errata, 356 .fini = &rv515_fini,
437 .vram_info = &rv515_vram_info, 357 .suspend = &rv515_suspend,
358 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state, 359 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset, 360 .gpu_reset = &rv515_gpu_reset,
440 .mc_init = &rv515_mc_init,
441 .mc_fini = &rv515_mc_fini,
442 .wb_init = &r100_wb_init,
443 .wb_fini = &r100_wb_fini,
444 .gart_init = &rv370_pcie_gart_init,
445 .gart_fini = &rv370_pcie_gart_fini,
446 .gart_enable = &rv370_pcie_gart_enable,
447 .gart_disable = &rv370_pcie_gart_disable,
448 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 361 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
449 .gart_set_page = &rv370_pcie_gart_set_page, 362 .gart_set_page = &rv370_pcie_gart_set_page,
450 .cp_init = &r100_cp_init,
451 .cp_fini = &r100_cp_fini,
452 .cp_disable = &r100_cp_disable,
453 .cp_commit = &r100_cp_commit, 363 .cp_commit = &r100_cp_commit,
454 .ring_start = &rv515_ring_start, 364 .ring_start = &rv515_ring_start,
455 .ring_test = &r100_ring_test, 365 .ring_test = &r100_ring_test,
456 .ring_ib_execute = &r100_ring_ib_execute, 366 .ring_ib_execute = &r100_ring_ib_execute,
457 .ib_test = &r100_ib_test,
458 .irq_set = &rs600_irq_set, 367 .irq_set = &rs600_irq_set,
459 .irq_process = &rs600_irq_process, 368 .irq_process = &rs600_irq_process,
460 .get_vblank_counter = &rs600_get_vblank_counter, 369 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -463,7 +372,9 @@ static struct radeon_asic rv515_asic = {
463 .copy_blit = &r100_copy_blit, 372 .copy_blit = &r100_copy_blit,
464 .copy_dma = &r300_copy_dma, 373 .copy_dma = &r300_copy_dma,
465 .copy = &r100_copy_blit, 374 .copy = &r100_copy_blit,
375 .get_engine_clock = &radeon_atom_get_engine_clock,
466 .set_engine_clock = &radeon_atom_set_engine_clock, 376 .set_engine_clock = &radeon_atom_set_engine_clock,
377 .get_memory_clock = &radeon_atom_get_memory_clock,
467 .set_memory_clock = &radeon_atom_set_memory_clock, 378 .set_memory_clock = &radeon_atom_set_memory_clock,
468 .set_pcie_lanes = &rv370_set_pcie_lanes, 379 .set_pcie_lanes = &rv370_set_pcie_lanes,
469 .set_clock_gating = &radeon_atom_set_clock_gating, 380 .set_clock_gating = &radeon_atom_set_clock_gating,
@@ -476,35 +387,21 @@ static struct radeon_asic rv515_asic = {
476/* 387/*
477 * r520,rv530,rv560,rv570,r580 388 * r520,rv530,rv560,rv570,r580
478 */ 389 */
479void r520_errata(struct radeon_device *rdev); 390int r520_init(struct radeon_device *rdev);
480void r520_vram_info(struct radeon_device *rdev); 391int r520_resume(struct radeon_device *rdev);
481int r520_mc_init(struct radeon_device *rdev);
482void r520_mc_fini(struct radeon_device *rdev);
483void r520_bandwidth_update(struct radeon_device *rdev);
484static struct radeon_asic r520_asic = { 392static struct radeon_asic r520_asic = {
485 .init = &rv515_init, 393 .init = &r520_init,
486 .errata = &r520_errata, 394 .fini = &rv515_fini,
487 .vram_info = &r520_vram_info, 395 .suspend = &rv515_suspend,
396 .resume = &r520_resume,
488 .vga_set_state = &r100_vga_set_state, 397 .vga_set_state = &r100_vga_set_state,
489 .gpu_reset = &rv515_gpu_reset, 398 .gpu_reset = &rv515_gpu_reset,
490 .mc_init = &r520_mc_init,
491 .mc_fini = &r520_mc_fini,
492 .wb_init = &r100_wb_init,
493 .wb_fini = &r100_wb_fini,
494 .gart_init = &rv370_pcie_gart_init,
495 .gart_fini = &rv370_pcie_gart_fini,
496 .gart_enable = &rv370_pcie_gart_enable,
497 .gart_disable = &rv370_pcie_gart_disable,
498 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 399 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
499 .gart_set_page = &rv370_pcie_gart_set_page, 400 .gart_set_page = &rv370_pcie_gart_set_page,
500 .cp_init = &r100_cp_init,
501 .cp_fini = &r100_cp_fini,
502 .cp_disable = &r100_cp_disable,
503 .cp_commit = &r100_cp_commit, 401 .cp_commit = &r100_cp_commit,
504 .ring_start = &rv515_ring_start, 402 .ring_start = &rv515_ring_start,
505 .ring_test = &r100_ring_test, 403 .ring_test = &r100_ring_test,
506 .ring_ib_execute = &r100_ring_ib_execute, 404 .ring_ib_execute = &r100_ring_ib_execute,
507 .ib_test = &r100_ib_test,
508 .irq_set = &rs600_irq_set, 405 .irq_set = &rs600_irq_set,
509 .irq_process = &rs600_irq_process, 406 .irq_process = &rs600_irq_process,
510 .get_vblank_counter = &rs600_get_vblank_counter, 407 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -513,13 +410,15 @@ static struct radeon_asic r520_asic = {
513 .copy_blit = &r100_copy_blit, 410 .copy_blit = &r100_copy_blit,
514 .copy_dma = &r300_copy_dma, 411 .copy_dma = &r300_copy_dma,
515 .copy = &r100_copy_blit, 412 .copy = &r100_copy_blit,
413 .get_engine_clock = &radeon_atom_get_engine_clock,
516 .set_engine_clock = &radeon_atom_set_engine_clock, 414 .set_engine_clock = &radeon_atom_set_engine_clock,
415 .get_memory_clock = &radeon_atom_get_memory_clock,
517 .set_memory_clock = &radeon_atom_set_memory_clock, 416 .set_memory_clock = &radeon_atom_set_memory_clock,
518 .set_pcie_lanes = &rv370_set_pcie_lanes, 417 .set_pcie_lanes = &rv370_set_pcie_lanes,
519 .set_clock_gating = &radeon_atom_set_clock_gating, 418 .set_clock_gating = &radeon_atom_set_clock_gating,
520 .set_surface_reg = r100_set_surface_reg, 419 .set_surface_reg = r100_set_surface_reg,
521 .clear_surface_reg = r100_clear_surface_reg, 420 .clear_surface_reg = r100_clear_surface_reg,
522 .bandwidth_update = &r520_bandwidth_update, 421 .bandwidth_update = &rv515_bandwidth_update,
523}; 422};
524 423
525/* 424/*
@@ -552,37 +451,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
552 uint32_t offset, uint32_t obj_size); 451 uint32_t offset, uint32_t obj_size);
553int r600_clear_surface_reg(struct radeon_device *rdev, int reg); 452int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
554void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 453void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
555int r600_ib_test(struct radeon_device *rdev);
556int r600_ring_test(struct radeon_device *rdev); 454int r600_ring_test(struct radeon_device *rdev);
557int r600_copy_blit(struct radeon_device *rdev, 455int r600_copy_blit(struct radeon_device *rdev,
558 uint64_t src_offset, uint64_t dst_offset, 456 uint64_t src_offset, uint64_t dst_offset,
559 unsigned num_pages, struct radeon_fence *fence); 457 unsigned num_pages, struct radeon_fence *fence);
560 458
561static struct radeon_asic r600_asic = { 459static struct radeon_asic r600_asic = {
562 .errata = NULL,
563 .init = &r600_init, 460 .init = &r600_init,
564 .fini = &r600_fini, 461 .fini = &r600_fini,
565 .suspend = &r600_suspend, 462 .suspend = &r600_suspend,
566 .resume = &r600_resume, 463 .resume = &r600_resume,
567 .cp_commit = &r600_cp_commit, 464 .cp_commit = &r600_cp_commit,
568 .vram_info = NULL,
569 .vga_set_state = &r600_vga_set_state, 465 .vga_set_state = &r600_vga_set_state,
570 .gpu_reset = &r600_gpu_reset, 466 .gpu_reset = &r600_gpu_reset,
571 .mc_init = NULL,
572 .mc_fini = NULL,
573 .wb_init = &r600_wb_init,
574 .wb_fini = &r600_wb_fini,
575 .gart_enable = NULL,
576 .gart_disable = NULL,
577 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 467 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
578 .gart_set_page = &rs600_gart_set_page, 468 .gart_set_page = &rs600_gart_set_page,
579 .cp_init = NULL,
580 .cp_fini = NULL,
581 .cp_disable = NULL,
582 .ring_start = NULL,
583 .ring_test = &r600_ring_test, 469 .ring_test = &r600_ring_test,
584 .ring_ib_execute = &r600_ring_ib_execute, 470 .ring_ib_execute = &r600_ring_ib_execute,
585 .ib_test = &r600_ib_test,
586 .irq_set = &r600_irq_set, 471 .irq_set = &r600_irq_set,
587 .irq_process = &r600_irq_process, 472 .irq_process = &r600_irq_process,
588 .fence_ring_emit = &r600_fence_ring_emit, 473 .fence_ring_emit = &r600_fence_ring_emit,
@@ -590,13 +475,15 @@ static struct radeon_asic r600_asic = {
590 .copy_blit = &r600_copy_blit, 475 .copy_blit = &r600_copy_blit,
591 .copy_dma = &r600_copy_blit, 476 .copy_dma = &r600_copy_blit,
592 .copy = &r600_copy_blit, 477 .copy = &r600_copy_blit,
478 .get_engine_clock = &radeon_atom_get_engine_clock,
593 .set_engine_clock = &radeon_atom_set_engine_clock, 479 .set_engine_clock = &radeon_atom_set_engine_clock,
480 .get_memory_clock = &radeon_atom_get_memory_clock,
594 .set_memory_clock = &radeon_atom_set_memory_clock, 481 .set_memory_clock = &radeon_atom_set_memory_clock,
595 .set_pcie_lanes = NULL, 482 .set_pcie_lanes = NULL,
596 .set_clock_gating = &radeon_atom_set_clock_gating, 483 .set_clock_gating = &radeon_atom_set_clock_gating,
597 .set_surface_reg = r600_set_surface_reg, 484 .set_surface_reg = r600_set_surface_reg,
598 .clear_surface_reg = r600_clear_surface_reg, 485 .clear_surface_reg = r600_clear_surface_reg,
599 .bandwidth_update = &r520_bandwidth_update, 486 .bandwidth_update = &rv515_bandwidth_update,
600}; 487};
601 488
602/* 489/*
@@ -609,30 +496,17 @@ int rv770_resume(struct radeon_device *rdev);
609int rv770_gpu_reset(struct radeon_device *rdev); 496int rv770_gpu_reset(struct radeon_device *rdev);
610 497
611static struct radeon_asic rv770_asic = { 498static struct radeon_asic rv770_asic = {
612 .errata = NULL,
613 .init = &rv770_init, 499 .init = &rv770_init,
614 .fini = &rv770_fini, 500 .fini = &rv770_fini,
615 .suspend = &rv770_suspend, 501 .suspend = &rv770_suspend,
616 .resume = &rv770_resume, 502 .resume = &rv770_resume,
617 .cp_commit = &r600_cp_commit, 503 .cp_commit = &r600_cp_commit,
618 .vram_info = NULL,
619 .gpu_reset = &rv770_gpu_reset, 504 .gpu_reset = &rv770_gpu_reset,
620 .vga_set_state = &r600_vga_set_state, 505 .vga_set_state = &r600_vga_set_state,
621 .mc_init = NULL,
622 .mc_fini = NULL,
623 .wb_init = &r600_wb_init,
624 .wb_fini = &r600_wb_fini,
625 .gart_enable = NULL,
626 .gart_disable = NULL,
627 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 506 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
628 .gart_set_page = &rs600_gart_set_page, 507 .gart_set_page = &rs600_gart_set_page,
629 .cp_init = NULL,
630 .cp_fini = NULL,
631 .cp_disable = NULL,
632 .ring_start = NULL,
633 .ring_test = &r600_ring_test, 508 .ring_test = &r600_ring_test,
634 .ring_ib_execute = &r600_ring_ib_execute, 509 .ring_ib_execute = &r600_ring_ib_execute,
635 .ib_test = &r600_ib_test,
636 .irq_set = &r600_irq_set, 510 .irq_set = &r600_irq_set,
637 .irq_process = &r600_irq_process, 511 .irq_process = &r600_irq_process,
638 .fence_ring_emit = &r600_fence_ring_emit, 512 .fence_ring_emit = &r600_fence_ring_emit,
@@ -640,13 +514,15 @@ static struct radeon_asic rv770_asic = {
640 .copy_blit = &r600_copy_blit, 514 .copy_blit = &r600_copy_blit,
641 .copy_dma = &r600_copy_blit, 515 .copy_dma = &r600_copy_blit,
642 .copy = &r600_copy_blit, 516 .copy = &r600_copy_blit,
517 .get_engine_clock = &radeon_atom_get_engine_clock,
643 .set_engine_clock = &radeon_atom_set_engine_clock, 518 .set_engine_clock = &radeon_atom_set_engine_clock,
519 .get_memory_clock = &radeon_atom_get_memory_clock,
644 .set_memory_clock = &radeon_atom_set_memory_clock, 520 .set_memory_clock = &radeon_atom_set_memory_clock,
645 .set_pcie_lanes = NULL, 521 .set_pcie_lanes = NULL,
646 .set_clock_gating = &radeon_atom_set_clock_gating, 522 .set_clock_gating = &radeon_atom_set_clock_gating,
647 .set_surface_reg = r600_set_surface_reg, 523 .set_surface_reg = r600_set_surface_reg,
648 .clear_surface_reg = r600_clear_surface_reg, 524 .clear_surface_reg = r600_clear_surface_reg,
649 .bandwidth_update = &r520_bandwidth_update, 525 .bandwidth_update = &rv515_bandwidth_update,
650}; 526};
651 527
652#endif 528#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 743742128307..2ed88a820935 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -46,7 +46,8 @@ radeon_add_atom_connector(struct drm_device *dev,
46 uint32_t supported_device, 46 uint32_t supported_device,
47 int connector_type, 47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus, 48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info); 49 bool linkb, uint32_t igp_lane_info,
50 uint16_t connector_object_id);
50 51
51/* from radeon_legacy_encoder.c */ 52/* from radeon_legacy_encoder.c */
52extern void 53extern void
@@ -193,6 +194,23 @@ const int supported_devices_connector_convert[] = {
193 DRM_MODE_CONNECTOR_DisplayPort 194 DRM_MODE_CONNECTOR_DisplayPort
194}; 195};
195 196
197const uint16_t supported_devices_connector_object_id_convert[] = {
198 CONNECTOR_OBJECT_ID_NONE,
199 CONNECTOR_OBJECT_ID_VGA,
200 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
201 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
202 CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
203 CONNECTOR_OBJECT_ID_COMPOSITE,
204 CONNECTOR_OBJECT_ID_SVIDEO,
205 CONNECTOR_OBJECT_ID_LVDS,
206 CONNECTOR_OBJECT_ID_9PIN_DIN,
207 CONNECTOR_OBJECT_ID_9PIN_DIN,
208 CONNECTOR_OBJECT_ID_DISPLAYPORT,
209 CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
210 CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
211 CONNECTOR_OBJECT_ID_SVIDEO
212};
213
196const int object_connector_convert[] = { 214const int object_connector_convert[] = {
197 DRM_MODE_CONNECTOR_Unknown, 215 DRM_MODE_CONNECTOR_Unknown,
198 DRM_MODE_CONNECTOR_DVII, 216 DRM_MODE_CONNECTOR_DVII,
@@ -229,7 +247,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
229 ATOM_OBJECT_HEADER *obj_header; 247 ATOM_OBJECT_HEADER *obj_header;
230 int i, j, path_size, device_support; 248 int i, j, path_size, device_support;
231 int connector_type; 249 int connector_type;
232 uint16_t igp_lane_info, conn_id; 250 uint16_t igp_lane_info, conn_id, connector_object_id;
233 bool linkb; 251 bool linkb;
234 struct radeon_i2c_bus_rec ddc_bus; 252 struct radeon_i2c_bus_rec ddc_bus;
235 253
@@ -272,15 +290,13 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
272 (le16_to_cpu(path->usConnObjectId) & 290 (le16_to_cpu(path->usConnObjectId) &
273 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 291 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
274 292
275 if ((le16_to_cpu(path->usDeviceTag) == 293 /* TODO CV support */
276 ATOM_DEVICE_TV1_SUPPORT) 294 if (le16_to_cpu(path->usDeviceTag) ==
277 || (le16_to_cpu(path->usDeviceTag) == 295 ATOM_DEVICE_CV_SUPPORT)
278 ATOM_DEVICE_TV2_SUPPORT)
279 || (le16_to_cpu(path->usDeviceTag) ==
280 ATOM_DEVICE_CV_SUPPORT))
281 continue; 296 continue;
282 297
283 if ((rdev->family == CHIP_RS780) && 298 /* IGP chips */
299 if ((rdev->flags & RADEON_IS_IGP) &&
284 (con_obj_id == 300 (con_obj_id ==
285 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { 301 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
286 uint16_t igp_offset = 0; 302 uint16_t igp_offset = 0;
@@ -314,6 +330,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
314 connector_type = 330 connector_type =
315 object_connector_convert 331 object_connector_convert
316 [ct]; 332 [ct];
333 connector_object_id = ct;
317 igp_lane_info = 334 igp_lane_info =
318 slot_config & 0xffff; 335 slot_config & 0xffff;
319 } else 336 } else
@@ -324,6 +341,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
324 igp_lane_info = 0; 341 igp_lane_info = 0;
325 connector_type = 342 connector_type =
326 object_connector_convert[con_obj_id]; 343 object_connector_convert[con_obj_id];
344 connector_object_id = con_obj_id;
327 } 345 }
328 346
329 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 347 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -428,7 +446,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
428 le16_to_cpu(path-> 446 le16_to_cpu(path->
429 usDeviceTag), 447 usDeviceTag),
430 connector_type, &ddc_bus, 448 connector_type, &ddc_bus,
431 linkb, igp_lane_info); 449 linkb, igp_lane_info,
450 connector_object_id);
432 451
433 } 452 }
434 } 453 }
@@ -438,6 +457,45 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
438 return true; 457 return true;
439} 458}
440 459
460static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
461 int connector_type,
462 uint16_t devices)
463{
464 struct radeon_device *rdev = dev->dev_private;
465
466 if (rdev->flags & RADEON_IS_IGP) {
467 return supported_devices_connector_object_id_convert
468 [connector_type];
469 } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
470 (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
471 (devices & ATOM_DEVICE_DFP2_SUPPORT)) {
472 struct radeon_mode_info *mode_info = &rdev->mode_info;
473 struct atom_context *ctx = mode_info->atom_context;
474 int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
475 uint16_t size, data_offset;
476 uint8_t frev, crev;
477 ATOM_XTMDS_INFO *xtmds;
478
479 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
480 xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
481
482 if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
483 if (connector_type == DRM_MODE_CONNECTOR_DVII)
484 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
485 else
486 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
487 } else {
488 if (connector_type == DRM_MODE_CONNECTOR_DVII)
489 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
490 else
491 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
492 }
493 } else {
494 return supported_devices_connector_object_id_convert
495 [connector_type];
496 }
497}
498
441struct bios_connector { 499struct bios_connector {
442 bool valid; 500 bool valid;
443 uint16_t line_mux; 501 uint16_t line_mux;
@@ -596,14 +654,20 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
596 654
597 /* add the connectors */ 655 /* add the connectors */
598 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 656 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
599 if (bios_connectors[i].valid) 657 if (bios_connectors[i].valid) {
658 uint16_t connector_object_id =
659 atombios_get_connector_object_id(dev,
660 bios_connectors[i].connector_type,
661 bios_connectors[i].devices);
600 radeon_add_atom_connector(dev, 662 radeon_add_atom_connector(dev,
601 bios_connectors[i].line_mux, 663 bios_connectors[i].line_mux,
602 bios_connectors[i].devices, 664 bios_connectors[i].devices,
603 bios_connectors[i]. 665 bios_connectors[i].
604 connector_type, 666 connector_type,
605 &bios_connectors[i].ddc_bus, 667 &bios_connectors[i].ddc_bus,
606 false, 0); 668 false, 0,
669 connector_object_id);
670 }
607 } 671 }
608 672
609 radeon_link_encoder_connector(dev); 673 radeon_link_encoder_connector(dev);
@@ -644,8 +708,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
644 le16_to_cpu(firmware_info->info.usReferenceClock); 708 le16_to_cpu(firmware_info->info.usReferenceClock);
645 p1pll->reference_div = 0; 709 p1pll->reference_div = 0;
646 710
647 p1pll->pll_out_min = 711 if (crev < 2)
648 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); 712 p1pll->pll_out_min =
713 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
714 else
715 p1pll->pll_out_min =
716 le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
649 p1pll->pll_out_max = 717 p1pll->pll_out_max =
650 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); 718 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
651 719
@@ -654,6 +722,16 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
654 p1pll->pll_out_min = 64800; 722 p1pll->pll_out_min = 64800;
655 else 723 else
656 p1pll->pll_out_min = 20000; 724 p1pll->pll_out_min = 20000;
725 } else if (p1pll->pll_out_min > 64800) {
726 /* Limiting the pll output range is a good thing generally as
727 * it limits the number of possible pll combinations for a given
728 * frequency presumably to the ones that work best on each card.
729 * However, certain duallink DVI monitors seem to like
730 * pll combinations that would be limited by this at least on
731 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
732 * family.
733 */
734 p1pll->pll_out_min = 64800;
657 } 735 }
658 736
659 p1pll->pll_in_min = 737 p1pll->pll_in_min =
@@ -770,6 +848,46 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
770 return false; 848 return false;
771} 849}
772 850
851static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
852 radeon_encoder
853 *encoder,
854 int id)
855{
856 struct drm_device *dev = encoder->base.dev;
857 struct radeon_device *rdev = dev->dev_private;
858 struct radeon_mode_info *mode_info = &rdev->mode_info;
859 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
860 uint16_t data_offset;
861 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
862 uint8_t frev, crev;
863 struct radeon_atom_ss *ss = NULL;
864
865 if (id > ATOM_MAX_SS_ENTRY)
866 return NULL;
867
868 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
869 &crev, &data_offset);
870
871 ss_info =
872 (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
873
874 if (ss_info) {
875 ss =
876 kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
877
878 if (!ss)
879 return NULL;
880
881 ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
882 ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
883 ss->step = ss_info->asSS_Info[id].ucSS_Step;
884 ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
885 ss->range = ss_info->asSS_Info[id].ucSS_Range;
886 ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
887 }
888 return ss;
889}
890
773union lvds_info { 891union lvds_info {
774 struct _ATOM_LVDS_INFO info; 892 struct _ATOM_LVDS_INFO info;
775 struct _ATOM_LVDS_INFO_V12 info_12; 893 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -801,27 +919,31 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
801 if (!lvds) 919 if (!lvds)
802 return NULL; 920 return NULL;
803 921
804 lvds->native_mode.dotclock = 922 lvds->native_mode.clock =
805 le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; 923 le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
806 lvds->native_mode.panel_xres = 924 lvds->native_mode.hdisplay =
807 le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); 925 le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
808 lvds->native_mode.panel_yres = 926 lvds->native_mode.vdisplay =
809 le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); 927 le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
810 lvds->native_mode.hblank = 928 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
811 le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); 929 le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
812 lvds->native_mode.hoverplus = 930 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
813 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); 931 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
814 lvds->native_mode.hsync_width = 932 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
815 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); 933 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
816 lvds->native_mode.vblank = 934 lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
817 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); 935 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
818 lvds->native_mode.voverplus = 936 lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
819 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); 937 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
820 lvds->native_mode.vsync_width = 938 lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
821 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); 939 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
822 lvds->panel_pwr_delay = 940 lvds->panel_pwr_delay =
823 le16_to_cpu(lvds_info->info.usOffDelayInMs); 941 le16_to_cpu(lvds_info->info.usOffDelayInMs);
824 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; 942 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
943 /* set crtc values */
944 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
945
946 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
825 947
826 encoder->native_mode = lvds->native_mode; 948 encoder->native_mode = lvds->native_mode;
827 } 949 }
@@ -860,8 +982,7 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
860} 982}
861 983
862bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 984bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
863 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing, 985 struct drm_display_mode *mode)
864 int32_t *pixel_clock)
865{ 986{
866 struct radeon_mode_info *mode_info = &rdev->mode_info; 987 struct radeon_mode_info *mode_info = &rdev->mode_info;
867 ATOM_ANALOG_TV_INFO *tv_info; 988 ATOM_ANALOG_TV_INFO *tv_info;
@@ -869,7 +990,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
869 ATOM_DTD_FORMAT *dtd_timings; 990 ATOM_DTD_FORMAT *dtd_timings;
870 int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); 991 int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
871 u8 frev, crev; 992 u8 frev, crev;
872 uint16_t data_offset; 993 u16 data_offset, misc;
873 994
874 atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); 995 atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
875 996
@@ -879,28 +1000,37 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
879 if (index > MAX_SUPPORTED_TV_TIMING) 1000 if (index > MAX_SUPPORTED_TV_TIMING)
880 return false; 1001 return false;
881 1002
882 crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); 1003 mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
883 crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); 1004 mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
884 crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); 1005 mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
885 crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); 1006 mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
886 1007 le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
887 crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); 1008
888 crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); 1009 mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
889 crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); 1010 mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
890 crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); 1011 mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
891 1012 mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
892 crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo; 1013 le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
893 1014
894 crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight); 1015 mode->flags = 0;
895 crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft); 1016 misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
896 crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom); 1017 if (misc & ATOM_VSYNC_POLARITY)
897 crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop); 1018 mode->flags |= DRM_MODE_FLAG_NVSYNC;
898 *pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; 1019 if (misc & ATOM_HSYNC_POLARITY)
1020 mode->flags |= DRM_MODE_FLAG_NHSYNC;
1021 if (misc & ATOM_COMPOSITESYNC)
1022 mode->flags |= DRM_MODE_FLAG_CSYNC;
1023 if (misc & ATOM_INTERLACE)
1024 mode->flags |= DRM_MODE_FLAG_INTERLACE;
1025 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1026 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
1027
1028 mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
899 1029
900 if (index == 1) { 1030 if (index == 1) {
901 /* PAL timings appear to have wrong values for totals */ 1031 /* PAL timings appear to have wrong values for totals */
902 crtc_timing->usH_Total -= 1; 1032 mode->crtc_htotal -= 1;
903 crtc_timing->usV_Total -= 1; 1033 mode->crtc_vtotal -= 1;
904 } 1034 }
905 break; 1035 break;
906 case 2: 1036 case 2:
@@ -909,17 +1039,36 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
909 return false; 1039 return false;
910 1040
911 dtd_timings = &tv_info_v1_2->aModeTimings[index]; 1041 dtd_timings = &tv_info_v1_2->aModeTimings[index];
912 crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); 1042 mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
913 crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive); 1043 le16_to_cpu(dtd_timings->usHBlanking_Time);
914 crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); 1044 mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
915 crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth); 1045 mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
916 crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); 1046 le16_to_cpu(dtd_timings->usHSyncOffset);
917 crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive); 1047 mode->crtc_hsync_end = mode->crtc_hsync_start +
918 crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); 1048 le16_to_cpu(dtd_timings->usHSyncWidth);
919 crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth); 1049
920 1050 mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
921 crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); 1051 le16_to_cpu(dtd_timings->usVBlanking_Time);
922 *pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10; 1052 mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
1053 mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
1054 le16_to_cpu(dtd_timings->usVSyncOffset);
1055 mode->crtc_vsync_end = mode->crtc_vsync_start +
1056 le16_to_cpu(dtd_timings->usVSyncWidth);
1057
1058 mode->flags = 0;
1059 misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
1060 if (misc & ATOM_VSYNC_POLARITY)
1061 mode->flags |= DRM_MODE_FLAG_NVSYNC;
1062 if (misc & ATOM_HSYNC_POLARITY)
1063 mode->flags |= DRM_MODE_FLAG_NHSYNC;
1064 if (misc & ATOM_COMPOSITESYNC)
1065 mode->flags |= DRM_MODE_FLAG_CSYNC;
1066 if (misc & ATOM_INTERLACE)
1067 mode->flags |= DRM_MODE_FLAG_INTERLACE;
1068 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1069 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
1070
1071 mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
923 break; 1072 break;
924 } 1073 }
925 return true; 1074 return true;
@@ -984,6 +1133,24 @@ void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
984 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1133 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
985} 1134}
986 1135
1136uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
1137{
1138 GET_ENGINE_CLOCK_PS_ALLOCATION args;
1139 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
1140
1141 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1142 return args.ulReturnEngineClock;
1143}
1144
1145uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
1146{
1147 GET_MEMORY_CLOCK_PS_ALLOCATION args;
1148 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
1149
1150 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1151 return args.ulReturnMemoryClock;
1152}
1153
987void radeon_atom_set_engine_clock(struct radeon_device *rdev, 1154void radeon_atom_set_engine_clock(struct radeon_device *rdev,
988 uint32_t eng_clock) 1155 uint32_t eng_clock)
989{ 1156{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 2e938f7496fb..10bd50a7db87 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
63 if (r) { 63 if (r) {
64 goto out_cleanup; 64 goto out_cleanup;
65 } 65 }
66 r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); 66 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
67 if (r) { 67 if (r) {
68 goto out_cleanup; 68 goto out_cleanup;
69 } 69 }
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
88 if (r) { 88 if (r) {
89 goto out_cleanup; 89 goto out_cleanup;
90 } 90 }
91 r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); 91 r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
92 if (r) { 92 if (r) {
93 goto out_cleanup; 93 goto out_cleanup;
94 } 94 }
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 96e37a6e7ce4..906921740c60 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -33,12 +33,47 @@
33/* 33/*
34 * BIOS. 34 * BIOS.
35 */ 35 */
36
37/* If you boot an IGP board with a discrete card as the primary,
38 * the IGP rom is not accessible via the rom bar as the IGP rom is
39 * part of the system bios. On boot, the system bios puts a
40 * copy of the igp rom at the start of vram if a discrete card is
41 * present.
42 */
43static bool igp_read_bios_from_vram(struct radeon_device *rdev)
44{
45 uint8_t __iomem *bios;
46 resource_size_t vram_base;
47 resource_size_t size = 256 * 1024; /* ??? */
48
49 rdev->bios = NULL;
50 vram_base = drm_get_resource_start(rdev->ddev, 0);
51 bios = ioremap(vram_base, size);
52 if (!bios) {
53 return false;
54 }
55
56 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
57 iounmap(bios);
58 return false;
59 }
60 rdev->bios = kmalloc(size, GFP_KERNEL);
61 if (rdev->bios == NULL) {
62 iounmap(bios);
63 return false;
64 }
65 memcpy(rdev->bios, bios, size);
66 iounmap(bios);
67 return true;
68}
69
36static bool radeon_read_bios(struct radeon_device *rdev) 70static bool radeon_read_bios(struct radeon_device *rdev)
37{ 71{
38 uint8_t __iomem *bios; 72 uint8_t __iomem *bios;
39 size_t size; 73 size_t size;
40 74
41 rdev->bios = NULL; 75 rdev->bios = NULL;
76 /* XXX: some cards may return 0 for rom size? ddx has a workaround */
42 bios = pci_map_rom(rdev->pdev, &size); 77 bios = pci_map_rom(rdev->pdev, &size);
43 if (!bios) { 78 if (!bios) {
44 return false; 79 return false;
@@ -341,7 +376,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
341 376
342static bool radeon_read_disabled_bios(struct radeon_device *rdev) 377static bool radeon_read_disabled_bios(struct radeon_device *rdev)
343{ 378{
344 if (rdev->family >= CHIP_RV770) 379 if (rdev->flags & RADEON_IS_IGP)
380 return igp_read_bios_from_vram(rdev);
381 else if (rdev->family >= CHIP_RV770)
345 return r700_read_disabled_bios(rdev); 382 return r700_read_disabled_bios(rdev);
346 else if (rdev->family >= CHIP_R600) 383 else if (rdev->family >= CHIP_R600)
347 return r600_read_disabled_bios(rdev); 384 return r600_read_disabled_bios(rdev);
@@ -356,7 +393,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
356 bool r; 393 bool r;
357 uint16_t tmp; 394 uint16_t tmp;
358 395
359 r = radeon_read_bios(rdev); 396 if (rdev->flags & RADEON_IS_IGP) {
397 r = igp_read_bios_from_vram(rdev);
398 if (r == false)
399 r = radeon_read_bios(rdev);
400 } else
401 r = radeon_read_bios(rdev);
360 if (r == false) { 402 if (r == false) {
361 r = radeon_read_disabled_bios(rdev); 403 r = radeon_read_disabled_bios(rdev);
362 } 404 }
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 152eef13197a..a81354167621 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -32,7 +32,7 @@
32#include "atom.h" 32#include "atom.h"
33 33
34/* 10 khz */ 34/* 10 khz */
35static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) 35uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
36{ 36{
37 struct radeon_pll *spll = &rdev->clock.spll; 37 struct radeon_pll *spll = &rdev->clock.spll;
38 uint32_t fb_div, ref_div, post_div, sclk; 38 uint32_t fb_div, ref_div, post_div, sclk;
@@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
411 R300_PIXCLK_TRANS_ALWAYS_ONb | 411 R300_PIXCLK_TRANS_ALWAYS_ONb |
412 R300_PIXCLK_TVO_ALWAYS_ONb | 412 R300_PIXCLK_TVO_ALWAYS_ONb |
413 R300_P2G2CLK_ALWAYS_ONb | 413 R300_P2G2CLK_ALWAYS_ONb |
414 R300_P2G2CLK_ALWAYS_ONb); 414 R300_P2G2CLK_DAC_ALWAYS_ONb);
415 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 415 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
416 } else if (rdev->family >= CHIP_RV350) { 416 } else if (rdev->family >= CHIP_RV350) {
417 tmp = RREG32_PLL(R300_SCLK_CNTL2); 417 tmp = RREG32_PLL(R300_SCLK_CNTL2);
@@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
464 R300_PIXCLK_TRANS_ALWAYS_ONb | 464 R300_PIXCLK_TRANS_ALWAYS_ONb |
465 R300_PIXCLK_TVO_ALWAYS_ONb | 465 R300_PIXCLK_TVO_ALWAYS_ONb |
466 R300_P2G2CLK_ALWAYS_ONb | 466 R300_P2G2CLK_ALWAYS_ONb |
467 R300_P2G2CLK_ALWAYS_ONb); 467 R300_P2G2CLK_DAC_ALWAYS_ONb);
468 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 468 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
469 469
470 tmp = RREG32_PLL(RADEON_MCLK_MISC); 470 tmp = RREG32_PLL(RADEON_MCLK_MISC);
@@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
654 R300_PIXCLK_TRANS_ALWAYS_ONb | 654 R300_PIXCLK_TRANS_ALWAYS_ONb |
655 R300_PIXCLK_TVO_ALWAYS_ONb | 655 R300_PIXCLK_TVO_ALWAYS_ONb |
656 R300_P2G2CLK_ALWAYS_ONb | 656 R300_P2G2CLK_ALWAYS_ONb |
657 R300_P2G2CLK_ALWAYS_ONb | 657 R300_P2G2CLK_DAC_ALWAYS_ONb |
658 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 658 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
659 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 659 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
660 } else if (rdev->family >= CHIP_RV350) { 660 } else if (rdev->family >= CHIP_RV350) {
@@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
705 R300_PIXCLK_TRANS_ALWAYS_ONb | 705 R300_PIXCLK_TRANS_ALWAYS_ONb |
706 R300_PIXCLK_TVO_ALWAYS_ONb | 706 R300_PIXCLK_TVO_ALWAYS_ONb |
707 R300_P2G2CLK_ALWAYS_ONb | 707 R300_P2G2CLK_ALWAYS_ONb |
708 R300_P2G2CLK_ALWAYS_ONb | 708 R300_P2G2CLK_DAC_ALWAYS_ONb |
709 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 709 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
710 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 710 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
711 } else { 711 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 748265a105b3..5253cbf6db1f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -49,7 +49,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
49 uint32_t connector_id, 49 uint32_t connector_id,
50 uint32_t supported_device, 50 uint32_t supported_device,
51 int connector_type, 51 int connector_type,
52 struct radeon_i2c_bus_rec *i2c_bus); 52 struct radeon_i2c_bus_rec *i2c_bus,
53 uint16_t connector_object_id);
53 54
54/* from radeon_legacy_encoder.c */ 55/* from radeon_legacy_encoder.c */
55extern void 56extern void
@@ -808,25 +809,25 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
808 lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf; 809 lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
809 810
810 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) 811 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
811 lvds->native_mode.panel_yres = 812 lvds->native_mode.vdisplay =
812 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> 813 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
813 RADEON_VERT_PANEL_SHIFT) + 1; 814 RADEON_VERT_PANEL_SHIFT) + 1;
814 else 815 else
815 lvds->native_mode.panel_yres = 816 lvds->native_mode.vdisplay =
816 (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1; 817 (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
817 818
818 if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE) 819 if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
819 lvds->native_mode.panel_xres = 820 lvds->native_mode.hdisplay =
820 (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >> 821 (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
821 RADEON_HORZ_PANEL_SHIFT) + 1) * 8; 822 RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
822 else 823 else
823 lvds->native_mode.panel_xres = 824 lvds->native_mode.hdisplay =
824 ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8; 825 ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
825 826
826 if ((lvds->native_mode.panel_xres < 640) || 827 if ((lvds->native_mode.hdisplay < 640) ||
827 (lvds->native_mode.panel_yres < 480)) { 828 (lvds->native_mode.vdisplay < 480)) {
828 lvds->native_mode.panel_xres = 640; 829 lvds->native_mode.hdisplay = 640;
829 lvds->native_mode.panel_yres = 480; 830 lvds->native_mode.vdisplay = 480;
830 } 831 }
831 832
832 ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3; 833 ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
@@ -846,8 +847,8 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
846 lvds->panel_vcc_delay = 200; 847 lvds->panel_vcc_delay = 200;
847 848
848 DRM_INFO("Panel info derived from registers\n"); 849 DRM_INFO("Panel info derived from registers\n");
849 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres, 850 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
850 lvds->native_mode.panel_yres); 851 lvds->native_mode.vdisplay);
851 852
852 return lvds; 853 return lvds;
853} 854}
@@ -882,11 +883,11 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
882 883
883 DRM_INFO("Panel ID String: %s\n", stmp); 884 DRM_INFO("Panel ID String: %s\n", stmp);
884 885
885 lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19); 886 lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
886 lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b); 887 lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
887 888
888 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres, 889 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
889 lvds->native_mode.panel_yres); 890 lvds->native_mode.vdisplay);
890 891
891 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 892 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
892 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 893 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
@@ -944,27 +945,25 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
944 if (tmp == 0) 945 if (tmp == 0)
945 break; 946 break;
946 947
947 if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) && 948 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
948 (RBIOS16(tmp + 2) == 949 (RBIOS16(tmp + 2) ==
949 lvds->native_mode.panel_yres)) { 950 lvds->native_mode.vdisplay)) {
950 lvds->native_mode.hblank = 951 lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
951 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 952 lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
952 lvds->native_mode.hoverplus = 953 lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
953 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 954 RBIOS16(tmp + 21)) * 8;
954 1) * 8; 955
955 lvds->native_mode.hsync_width = 956 lvds->native_mode.vtotal = RBIOS16(tmp + 24);
956 RBIOS8(tmp + 23) * 8; 957 lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
957 958 lvds->native_mode.vsync_end =
958 lvds->native_mode.vblank = (RBIOS16(tmp + 24) - 959 ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
959 RBIOS16(tmp + 26)); 960 (RBIOS16(tmp + 28) & 0x7ff);
960 lvds->native_mode.voverplus = 961
961 ((RBIOS16(tmp + 28) & 0x7ff) - 962 lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
962 RBIOS16(tmp + 26));
963 lvds->native_mode.vsync_width =
964 ((RBIOS16(tmp + 28) & 0xf800) >> 11);
965 lvds->native_mode.dotclock =
966 RBIOS16(tmp + 9) * 10;
967 lvds->native_mode.flags = 0; 963 lvds->native_mode.flags = 0;
964 /* set crtc values */
965 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
966
968 } 967 }
969 } 968 }
970 } else { 969 } else {
@@ -1178,7 +1177,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1178 radeon_add_legacy_connector(dev, 0, 1177 radeon_add_legacy_connector(dev, 0,
1179 ATOM_DEVICE_CRT1_SUPPORT, 1178 ATOM_DEVICE_CRT1_SUPPORT,
1180 DRM_MODE_CONNECTOR_VGA, 1179 DRM_MODE_CONNECTOR_VGA,
1181 &ddc_i2c); 1180 &ddc_i2c,
1181 CONNECTOR_OBJECT_ID_VGA);
1182 } else if (rdev->flags & RADEON_IS_MOBILITY) { 1182 } else if (rdev->flags & RADEON_IS_MOBILITY) {
1183 /* LVDS */ 1183 /* LVDS */
1184 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); 1184 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
@@ -1190,7 +1190,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1190 radeon_add_legacy_connector(dev, 0, 1190 radeon_add_legacy_connector(dev, 0,
1191 ATOM_DEVICE_LCD1_SUPPORT, 1191 ATOM_DEVICE_LCD1_SUPPORT,
1192 DRM_MODE_CONNECTOR_LVDS, 1192 DRM_MODE_CONNECTOR_LVDS,
1193 &ddc_i2c); 1193 &ddc_i2c,
1194 CONNECTOR_OBJECT_ID_LVDS);
1194 1195
1195 /* VGA - primary dac */ 1196 /* VGA - primary dac */
1196 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1197 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1202,7 +1203,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1202 radeon_add_legacy_connector(dev, 1, 1203 radeon_add_legacy_connector(dev, 1,
1203 ATOM_DEVICE_CRT1_SUPPORT, 1204 ATOM_DEVICE_CRT1_SUPPORT,
1204 DRM_MODE_CONNECTOR_VGA, 1205 DRM_MODE_CONNECTOR_VGA,
1205 &ddc_i2c); 1206 &ddc_i2c,
1207 CONNECTOR_OBJECT_ID_VGA);
1206 } else { 1208 } else {
1207 /* DVI-I - tv dac, int tmds */ 1209 /* DVI-I - tv dac, int tmds */
1208 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1210 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
@@ -1220,7 +1222,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1220 ATOM_DEVICE_DFP1_SUPPORT | 1222 ATOM_DEVICE_DFP1_SUPPORT |
1221 ATOM_DEVICE_CRT2_SUPPORT, 1223 ATOM_DEVICE_CRT2_SUPPORT,
1222 DRM_MODE_CONNECTOR_DVII, 1224 DRM_MODE_CONNECTOR_DVII,
1223 &ddc_i2c); 1225 &ddc_i2c,
1226 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
1224 1227
1225 /* VGA - primary dac */ 1228 /* VGA - primary dac */
1226 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1229 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1232,7 +1235,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1232 radeon_add_legacy_connector(dev, 1, 1235 radeon_add_legacy_connector(dev, 1,
1233 ATOM_DEVICE_CRT1_SUPPORT, 1236 ATOM_DEVICE_CRT1_SUPPORT,
1234 DRM_MODE_CONNECTOR_VGA, 1237 DRM_MODE_CONNECTOR_VGA,
1235 &ddc_i2c); 1238 &ddc_i2c,
1239 CONNECTOR_OBJECT_ID_VGA);
1236 } 1240 }
1237 1241
1238 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { 1242 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
@@ -1245,7 +1249,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1245 radeon_add_legacy_connector(dev, 2, 1249 radeon_add_legacy_connector(dev, 2,
1246 ATOM_DEVICE_TV1_SUPPORT, 1250 ATOM_DEVICE_TV1_SUPPORT,
1247 DRM_MODE_CONNECTOR_SVIDEO, 1251 DRM_MODE_CONNECTOR_SVIDEO,
1248 &ddc_i2c); 1252 &ddc_i2c,
1253 CONNECTOR_OBJECT_ID_SVIDEO);
1249 } 1254 }
1250 break; 1255 break;
1251 case CT_IBOOK: 1256 case CT_IBOOK:
@@ -1259,7 +1264,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1259 0), 1264 0),
1260 ATOM_DEVICE_LCD1_SUPPORT); 1265 ATOM_DEVICE_LCD1_SUPPORT);
1261 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1266 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1262 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); 1267 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1268 CONNECTOR_OBJECT_ID_LVDS);
1263 /* VGA - TV DAC */ 1269 /* VGA - TV DAC */
1264 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1270 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1265 radeon_add_legacy_encoder(dev, 1271 radeon_add_legacy_encoder(dev,
@@ -1268,7 +1274,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1268 2), 1274 2),
1269 ATOM_DEVICE_CRT2_SUPPORT); 1275 ATOM_DEVICE_CRT2_SUPPORT);
1270 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1276 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1271 DRM_MODE_CONNECTOR_VGA, &ddc_i2c); 1277 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1278 CONNECTOR_OBJECT_ID_VGA);
1272 /* TV - TV DAC */ 1279 /* TV - TV DAC */
1273 radeon_add_legacy_encoder(dev, 1280 radeon_add_legacy_encoder(dev,
1274 radeon_get_encoder_id(dev, 1281 radeon_get_encoder_id(dev,
@@ -1277,7 +1284,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1277 ATOM_DEVICE_TV1_SUPPORT); 1284 ATOM_DEVICE_TV1_SUPPORT);
1278 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1285 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1279 DRM_MODE_CONNECTOR_SVIDEO, 1286 DRM_MODE_CONNECTOR_SVIDEO,
1280 &ddc_i2c); 1287 &ddc_i2c,
1288 CONNECTOR_OBJECT_ID_SVIDEO);
1281 break; 1289 break;
1282 case CT_POWERBOOK_EXTERNAL: 1290 case CT_POWERBOOK_EXTERNAL:
1283 DRM_INFO("Connector Table: %d (powerbook external tmds)\n", 1291 DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
@@ -1290,7 +1298,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1290 0), 1298 0),
1291 ATOM_DEVICE_LCD1_SUPPORT); 1299 ATOM_DEVICE_LCD1_SUPPORT);
1292 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1300 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1293 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); 1301 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1302 CONNECTOR_OBJECT_ID_LVDS);
1294 /* DVI-I - primary dac, ext tmds */ 1303 /* DVI-I - primary dac, ext tmds */
1295 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1304 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1296 radeon_add_legacy_encoder(dev, 1305 radeon_add_legacy_encoder(dev,
@@ -1303,10 +1312,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1303 ATOM_DEVICE_CRT1_SUPPORT, 1312 ATOM_DEVICE_CRT1_SUPPORT,
1304 1), 1313 1),
1305 ATOM_DEVICE_CRT1_SUPPORT); 1314 ATOM_DEVICE_CRT1_SUPPORT);
1315 /* XXX some are SL */
1306 radeon_add_legacy_connector(dev, 1, 1316 radeon_add_legacy_connector(dev, 1,
1307 ATOM_DEVICE_DFP2_SUPPORT | 1317 ATOM_DEVICE_DFP2_SUPPORT |
1308 ATOM_DEVICE_CRT1_SUPPORT, 1318 ATOM_DEVICE_CRT1_SUPPORT,
1309 DRM_MODE_CONNECTOR_DVII, &ddc_i2c); 1319 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1320 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
1310 /* TV - TV DAC */ 1321 /* TV - TV DAC */
1311 radeon_add_legacy_encoder(dev, 1322 radeon_add_legacy_encoder(dev,
1312 radeon_get_encoder_id(dev, 1323 radeon_get_encoder_id(dev,
@@ -1315,7 +1326,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1315 ATOM_DEVICE_TV1_SUPPORT); 1326 ATOM_DEVICE_TV1_SUPPORT);
1316 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1327 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1317 DRM_MODE_CONNECTOR_SVIDEO, 1328 DRM_MODE_CONNECTOR_SVIDEO,
1318 &ddc_i2c); 1329 &ddc_i2c,
1330 CONNECTOR_OBJECT_ID_SVIDEO);
1319 break; 1331 break;
1320 case CT_POWERBOOK_INTERNAL: 1332 case CT_POWERBOOK_INTERNAL:
1321 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", 1333 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
@@ -1328,7 +1340,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1328 0), 1340 0),
1329 ATOM_DEVICE_LCD1_SUPPORT); 1341 ATOM_DEVICE_LCD1_SUPPORT);
1330 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1342 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1331 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); 1343 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1344 CONNECTOR_OBJECT_ID_LVDS);
1332 /* DVI-I - primary dac, int tmds */ 1345 /* DVI-I - primary dac, int tmds */
1333 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1346 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1334 radeon_add_legacy_encoder(dev, 1347 radeon_add_legacy_encoder(dev,
@@ -1344,7 +1357,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1344 radeon_add_legacy_connector(dev, 1, 1357 radeon_add_legacy_connector(dev, 1,
1345 ATOM_DEVICE_DFP1_SUPPORT | 1358 ATOM_DEVICE_DFP1_SUPPORT |
1346 ATOM_DEVICE_CRT1_SUPPORT, 1359 ATOM_DEVICE_CRT1_SUPPORT,
1347 DRM_MODE_CONNECTOR_DVII, &ddc_i2c); 1360 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1361 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
1348 /* TV - TV DAC */ 1362 /* TV - TV DAC */
1349 radeon_add_legacy_encoder(dev, 1363 radeon_add_legacy_encoder(dev,
1350 radeon_get_encoder_id(dev, 1364 radeon_get_encoder_id(dev,
@@ -1353,7 +1367,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1353 ATOM_DEVICE_TV1_SUPPORT); 1367 ATOM_DEVICE_TV1_SUPPORT);
1354 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1368 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1355 DRM_MODE_CONNECTOR_SVIDEO, 1369 DRM_MODE_CONNECTOR_SVIDEO,
1356 &ddc_i2c); 1370 &ddc_i2c,
1371 CONNECTOR_OBJECT_ID_SVIDEO);
1357 break; 1372 break;
1358 case CT_POWERBOOK_VGA: 1373 case CT_POWERBOOK_VGA:
1359 DRM_INFO("Connector Table: %d (powerbook vga)\n", 1374 DRM_INFO("Connector Table: %d (powerbook vga)\n",
@@ -1366,7 +1381,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1366 0), 1381 0),
1367 ATOM_DEVICE_LCD1_SUPPORT); 1382 ATOM_DEVICE_LCD1_SUPPORT);
1368 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1383 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1369 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); 1384 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1385 CONNECTOR_OBJECT_ID_LVDS);
1370 /* VGA - primary dac */ 1386 /* VGA - primary dac */
1371 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1387 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1372 radeon_add_legacy_encoder(dev, 1388 radeon_add_legacy_encoder(dev,
@@ -1375,7 +1391,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1375 1), 1391 1),
1376 ATOM_DEVICE_CRT1_SUPPORT); 1392 ATOM_DEVICE_CRT1_SUPPORT);
1377 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, 1393 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
1378 DRM_MODE_CONNECTOR_VGA, &ddc_i2c); 1394 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1395 CONNECTOR_OBJECT_ID_VGA);
1379 /* TV - TV DAC */ 1396 /* TV - TV DAC */
1380 radeon_add_legacy_encoder(dev, 1397 radeon_add_legacy_encoder(dev,
1381 radeon_get_encoder_id(dev, 1398 radeon_get_encoder_id(dev,
@@ -1384,7 +1401,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1384 ATOM_DEVICE_TV1_SUPPORT); 1401 ATOM_DEVICE_TV1_SUPPORT);
1385 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1402 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1386 DRM_MODE_CONNECTOR_SVIDEO, 1403 DRM_MODE_CONNECTOR_SVIDEO,
1387 &ddc_i2c); 1404 &ddc_i2c,
1405 CONNECTOR_OBJECT_ID_SVIDEO);
1388 break; 1406 break;
1389 case CT_MINI_EXTERNAL: 1407 case CT_MINI_EXTERNAL:
1390 DRM_INFO("Connector Table: %d (mini external tmds)\n", 1408 DRM_INFO("Connector Table: %d (mini external tmds)\n",
@@ -1401,10 +1419,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1401 ATOM_DEVICE_CRT2_SUPPORT, 1419 ATOM_DEVICE_CRT2_SUPPORT,
1402 2), 1420 2),
1403 ATOM_DEVICE_CRT2_SUPPORT); 1421 ATOM_DEVICE_CRT2_SUPPORT);
1422 /* XXX are any DL? */
1404 radeon_add_legacy_connector(dev, 0, 1423 radeon_add_legacy_connector(dev, 0,
1405 ATOM_DEVICE_DFP2_SUPPORT | 1424 ATOM_DEVICE_DFP2_SUPPORT |
1406 ATOM_DEVICE_CRT2_SUPPORT, 1425 ATOM_DEVICE_CRT2_SUPPORT,
1407 DRM_MODE_CONNECTOR_DVII, &ddc_i2c); 1426 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1427 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
1408 /* TV - TV DAC */ 1428 /* TV - TV DAC */
1409 radeon_add_legacy_encoder(dev, 1429 radeon_add_legacy_encoder(dev,
1410 radeon_get_encoder_id(dev, 1430 radeon_get_encoder_id(dev,
@@ -1413,7 +1433,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1413 ATOM_DEVICE_TV1_SUPPORT); 1433 ATOM_DEVICE_TV1_SUPPORT);
1414 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1434 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1415 DRM_MODE_CONNECTOR_SVIDEO, 1435 DRM_MODE_CONNECTOR_SVIDEO,
1416 &ddc_i2c); 1436 &ddc_i2c,
1437 CONNECTOR_OBJECT_ID_SVIDEO);
1417 break; 1438 break;
1418 case CT_MINI_INTERNAL: 1439 case CT_MINI_INTERNAL:
1419 DRM_INFO("Connector Table: %d (mini internal tmds)\n", 1440 DRM_INFO("Connector Table: %d (mini internal tmds)\n",
@@ -1433,7 +1454,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1433 radeon_add_legacy_connector(dev, 0, 1454 radeon_add_legacy_connector(dev, 0,
1434 ATOM_DEVICE_DFP1_SUPPORT | 1455 ATOM_DEVICE_DFP1_SUPPORT |
1435 ATOM_DEVICE_CRT2_SUPPORT, 1456 ATOM_DEVICE_CRT2_SUPPORT,
1436 DRM_MODE_CONNECTOR_DVII, &ddc_i2c); 1457 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1458 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
1437 /* TV - TV DAC */ 1459 /* TV - TV DAC */
1438 radeon_add_legacy_encoder(dev, 1460 radeon_add_legacy_encoder(dev,
1439 radeon_get_encoder_id(dev, 1461 radeon_get_encoder_id(dev,
@@ -1442,7 +1464,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1442 ATOM_DEVICE_TV1_SUPPORT); 1464 ATOM_DEVICE_TV1_SUPPORT);
1443 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1465 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1444 DRM_MODE_CONNECTOR_SVIDEO, 1466 DRM_MODE_CONNECTOR_SVIDEO,
1445 &ddc_i2c); 1467 &ddc_i2c,
1468 CONNECTOR_OBJECT_ID_SVIDEO);
1446 break; 1469 break;
1447 case CT_IMAC_G5_ISIGHT: 1470 case CT_IMAC_G5_ISIGHT:
1448 DRM_INFO("Connector Table: %d (imac g5 isight)\n", 1471 DRM_INFO("Connector Table: %d (imac g5 isight)\n",
@@ -1455,7 +1478,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1455 0), 1478 0),
1456 ATOM_DEVICE_DFP1_SUPPORT); 1479 ATOM_DEVICE_DFP1_SUPPORT);
1457 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, 1480 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
1458 DRM_MODE_CONNECTOR_DVID, &ddc_i2c); 1481 DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
1482 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
1459 /* VGA - tv dac */ 1483 /* VGA - tv dac */
1460 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1484 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1461 radeon_add_legacy_encoder(dev, 1485 radeon_add_legacy_encoder(dev,
@@ -1464,7 +1488,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1464 2), 1488 2),
1465 ATOM_DEVICE_CRT2_SUPPORT); 1489 ATOM_DEVICE_CRT2_SUPPORT);
1466 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1490 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1467 DRM_MODE_CONNECTOR_VGA, &ddc_i2c); 1491 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1492 CONNECTOR_OBJECT_ID_VGA);
1468 /* TV - TV DAC */ 1493 /* TV - TV DAC */
1469 radeon_add_legacy_encoder(dev, 1494 radeon_add_legacy_encoder(dev,
1470 radeon_get_encoder_id(dev, 1495 radeon_get_encoder_id(dev,
@@ -1473,7 +1498,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1473 ATOM_DEVICE_TV1_SUPPORT); 1498 ATOM_DEVICE_TV1_SUPPORT);
1474 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1499 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1475 DRM_MODE_CONNECTOR_SVIDEO, 1500 DRM_MODE_CONNECTOR_SVIDEO,
1476 &ddc_i2c); 1501 &ddc_i2c,
1502 CONNECTOR_OBJECT_ID_SVIDEO);
1477 break; 1503 break;
1478 case CT_EMAC: 1504 case CT_EMAC:
1479 DRM_INFO("Connector Table: %d (emac)\n", 1505 DRM_INFO("Connector Table: %d (emac)\n",
@@ -1486,7 +1512,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1486 1), 1512 1),
1487 ATOM_DEVICE_CRT1_SUPPORT); 1513 ATOM_DEVICE_CRT1_SUPPORT);
1488 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, 1514 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1489 DRM_MODE_CONNECTOR_VGA, &ddc_i2c); 1515 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1516 CONNECTOR_OBJECT_ID_VGA);
1490 /* VGA - tv dac */ 1517 /* VGA - tv dac */
1491 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1518 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1492 radeon_add_legacy_encoder(dev, 1519 radeon_add_legacy_encoder(dev,
@@ -1495,7 +1522,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1495 2), 1522 2),
1496 ATOM_DEVICE_CRT2_SUPPORT); 1523 ATOM_DEVICE_CRT2_SUPPORT);
1497 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1524 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1498 DRM_MODE_CONNECTOR_VGA, &ddc_i2c); 1525 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1526 CONNECTOR_OBJECT_ID_VGA);
1499 /* TV - TV DAC */ 1527 /* TV - TV DAC */
1500 radeon_add_legacy_encoder(dev, 1528 radeon_add_legacy_encoder(dev,
1501 radeon_get_encoder_id(dev, 1529 radeon_get_encoder_id(dev,
@@ -1504,7 +1532,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1504 ATOM_DEVICE_TV1_SUPPORT); 1532 ATOM_DEVICE_TV1_SUPPORT);
1505 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1533 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1506 DRM_MODE_CONNECTOR_SVIDEO, 1534 DRM_MODE_CONNECTOR_SVIDEO,
1507 &ddc_i2c); 1535 &ddc_i2c,
1536 CONNECTOR_OBJECT_ID_SVIDEO);
1508 break; 1537 break;
1509 default: 1538 default:
1510 DRM_INFO("Connector table: %d (invalid)\n", 1539 DRM_INFO("Connector table: %d (invalid)\n",
@@ -1581,11 +1610,63 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1581 return true; 1610 return true;
1582} 1611}
1583 1612
1613static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
1614{
1615 /* Acer 5102 has non-existent TV port */
1616 if (dev->pdev->device == 0x5975 &&
1617 dev->pdev->subsystem_vendor == 0x1025 &&
1618 dev->pdev->subsystem_device == 0x009f)
1619 return false;
1620
1621 /* HP dc5750 has non-existent TV port */
1622 if (dev->pdev->device == 0x5974 &&
1623 dev->pdev->subsystem_vendor == 0x103c &&
1624 dev->pdev->subsystem_device == 0x280a)
1625 return false;
1626
1627 return true;
1628}
1629
1630static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
1631{
1632 struct radeon_device *rdev = dev->dev_private;
1633 uint32_t ext_tmds_info;
1634
1635 if (rdev->flags & RADEON_IS_IGP) {
1636 if (is_dvi_d)
1637 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
1638 else
1639 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
1640 }
1641 ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1642 if (ext_tmds_info) {
1643 uint8_t rev = RBIOS8(ext_tmds_info);
1644 uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
1645 if (rev >= 3) {
1646 if (is_dvi_d)
1647 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
1648 else
1649 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
1650 } else {
1651 if (flags & 1) {
1652 if (is_dvi_d)
1653 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
1654 else
1655 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
1656 }
1657 }
1658 }
1659 if (is_dvi_d)
1660 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
1661 else
1662 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
1663}
1664
1584bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1665bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1585{ 1666{
1586 struct radeon_device *rdev = dev->dev_private; 1667 struct radeon_device *rdev = dev->dev_private;
1587 uint32_t conn_info, entry, devices; 1668 uint32_t conn_info, entry, devices;
1588 uint16_t tmp; 1669 uint16_t tmp, connector_object_id;
1589 enum radeon_combios_ddc ddc_type; 1670 enum radeon_combios_ddc ddc_type;
1590 enum radeon_combios_connector connector; 1671 enum radeon_combios_connector connector;
1591 int i = 0; 1672 int i = 0;
@@ -1628,8 +1709,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1628 break; 1709 break;
1629 } 1710 }
1630 1711
1631 radeon_apply_legacy_quirks(dev, i, &connector, 1712 if (!radeon_apply_legacy_quirks(dev, i, &connector,
1632 &ddc_i2c); 1713 &ddc_i2c))
1714 continue;
1633 1715
1634 switch (connector) { 1716 switch (connector) {
1635 case CONNECTOR_PROPRIETARY_LEGACY: 1717 case CONNECTOR_PROPRIETARY_LEGACY:
@@ -1644,7 +1726,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1644 radeon_add_legacy_connector(dev, i, devices, 1726 radeon_add_legacy_connector(dev, i, devices,
1645 legacy_connector_convert 1727 legacy_connector_convert
1646 [connector], 1728 [connector],
1647 &ddc_i2c); 1729 &ddc_i2c,
1730 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
1648 break; 1731 break;
1649 case CONNECTOR_CRT_LEGACY: 1732 case CONNECTOR_CRT_LEGACY:
1650 if (tmp & 0x1) { 1733 if (tmp & 0x1) {
@@ -1669,7 +1752,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1669 devices, 1752 devices,
1670 legacy_connector_convert 1753 legacy_connector_convert
1671 [connector], 1754 [connector],
1672 &ddc_i2c); 1755 &ddc_i2c,
1756 CONNECTOR_OBJECT_ID_VGA);
1673 break; 1757 break;
1674 case CONNECTOR_DVI_I_LEGACY: 1758 case CONNECTOR_DVI_I_LEGACY:
1675 devices = 0; 1759 devices = 0;
@@ -1698,6 +1782,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1698 ATOM_DEVICE_DFP2_SUPPORT, 1782 ATOM_DEVICE_DFP2_SUPPORT,
1699 0), 1783 0),
1700 ATOM_DEVICE_DFP2_SUPPORT); 1784 ATOM_DEVICE_DFP2_SUPPORT);
1785 connector_object_id = combios_check_dl_dvi(dev, 0);
1701 } else { 1786 } else {
1702 devices |= ATOM_DEVICE_DFP1_SUPPORT; 1787 devices |= ATOM_DEVICE_DFP1_SUPPORT;
1703 radeon_add_legacy_encoder(dev, 1788 radeon_add_legacy_encoder(dev,
@@ -1706,19 +1791,24 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1706 ATOM_DEVICE_DFP1_SUPPORT, 1791 ATOM_DEVICE_DFP1_SUPPORT,
1707 0), 1792 0),
1708 ATOM_DEVICE_DFP1_SUPPORT); 1793 ATOM_DEVICE_DFP1_SUPPORT);
1794 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
1709 } 1795 }
1710 radeon_add_legacy_connector(dev, 1796 radeon_add_legacy_connector(dev,
1711 i, 1797 i,
1712 devices, 1798 devices,
1713 legacy_connector_convert 1799 legacy_connector_convert
1714 [connector], 1800 [connector],
1715 &ddc_i2c); 1801 &ddc_i2c,
1802 connector_object_id);
1716 break; 1803 break;
1717 case CONNECTOR_DVI_D_LEGACY: 1804 case CONNECTOR_DVI_D_LEGACY:
1718 if ((tmp >> 4) & 0x1) 1805 if ((tmp >> 4) & 0x1) {
1719 devices = ATOM_DEVICE_DFP2_SUPPORT; 1806 devices = ATOM_DEVICE_DFP2_SUPPORT;
1720 else 1807 connector_object_id = combios_check_dl_dvi(dev, 1);
1808 } else {
1721 devices = ATOM_DEVICE_DFP1_SUPPORT; 1809 devices = ATOM_DEVICE_DFP1_SUPPORT;
1810 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
1811 }
1722 radeon_add_legacy_encoder(dev, 1812 radeon_add_legacy_encoder(dev,
1723 radeon_get_encoder_id 1813 radeon_get_encoder_id
1724 (dev, devices, 0), 1814 (dev, devices, 0),
@@ -1726,7 +1816,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1726 radeon_add_legacy_connector(dev, i, devices, 1816 radeon_add_legacy_connector(dev, i, devices,
1727 legacy_connector_convert 1817 legacy_connector_convert
1728 [connector], 1818 [connector],
1729 &ddc_i2c); 1819 &ddc_i2c,
1820 connector_object_id);
1730 break; 1821 break;
1731 case CONNECTOR_CTV_LEGACY: 1822 case CONNECTOR_CTV_LEGACY:
1732 case CONNECTOR_STV_LEGACY: 1823 case CONNECTOR_STV_LEGACY:
@@ -1740,7 +1831,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1740 ATOM_DEVICE_TV1_SUPPORT, 1831 ATOM_DEVICE_TV1_SUPPORT,
1741 legacy_connector_convert 1832 legacy_connector_convert
1742 [connector], 1833 [connector],
1743 &ddc_i2c); 1834 &ddc_i2c,
1835 CONNECTOR_OBJECT_ID_SVIDEO);
1744 break; 1836 break;
1745 default: 1837 default:
1746 DRM_ERROR("Unknown connector type: %d\n", 1838 DRM_ERROR("Unknown connector type: %d\n",
@@ -1772,10 +1864,29 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1772 ATOM_DEVICE_CRT1_SUPPORT | 1864 ATOM_DEVICE_CRT1_SUPPORT |
1773 ATOM_DEVICE_DFP1_SUPPORT, 1865 ATOM_DEVICE_DFP1_SUPPORT,
1774 DRM_MODE_CONNECTOR_DVII, 1866 DRM_MODE_CONNECTOR_DVII,
1775 &ddc_i2c); 1867 &ddc_i2c,
1868 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
1776 } else { 1869 } else {
1777 DRM_DEBUG("No connector info found\n"); 1870 uint16_t crt_info =
1778 return false; 1871 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
1872 DRM_DEBUG("Found CRT table, assuming VGA connector\n");
1873 if (crt_info) {
1874 radeon_add_legacy_encoder(dev,
1875 radeon_get_encoder_id(dev,
1876 ATOM_DEVICE_CRT1_SUPPORT,
1877 1),
1878 ATOM_DEVICE_CRT1_SUPPORT);
1879 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1880 radeon_add_legacy_connector(dev,
1881 0,
1882 ATOM_DEVICE_CRT1_SUPPORT,
1883 DRM_MODE_CONNECTOR_VGA,
1884 &ddc_i2c,
1885 CONNECTOR_OBJECT_ID_VGA);
1886 } else {
1887 DRM_DEBUG("No connector info found\n");
1888 return false;
1889 }
1779 } 1890 }
1780 } 1891 }
1781 1892
@@ -1870,7 +1981,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1870 5, 1981 5,
1871 ATOM_DEVICE_LCD1_SUPPORT, 1982 ATOM_DEVICE_LCD1_SUPPORT,
1872 DRM_MODE_CONNECTOR_LVDS, 1983 DRM_MODE_CONNECTOR_LVDS,
1873 &ddc_i2c); 1984 &ddc_i2c,
1985 CONNECTOR_OBJECT_ID_LVDS);
1874 } 1986 }
1875 } 1987 }
1876 1988
@@ -1880,16 +1992,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1880 combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 1992 combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
1881 if (tv_info) { 1993 if (tv_info) {
1882 if (RBIOS8(tv_info + 6) == 'T') { 1994 if (RBIOS8(tv_info + 6) == 'T') {
1883 radeon_add_legacy_encoder(dev, 1995 if (radeon_apply_legacy_tv_quirks(dev)) {
1884 radeon_get_encoder_id 1996 radeon_add_legacy_encoder(dev,
1885 (dev, 1997 radeon_get_encoder_id
1886 ATOM_DEVICE_TV1_SUPPORT, 1998 (dev,
1887 2), 1999 ATOM_DEVICE_TV1_SUPPORT,
1888 ATOM_DEVICE_TV1_SUPPORT); 2000 2),
1889 radeon_add_legacy_connector(dev, 6, 2001 ATOM_DEVICE_TV1_SUPPORT);
1890 ATOM_DEVICE_TV1_SUPPORT, 2002 radeon_add_legacy_connector(dev, 6,
1891 DRM_MODE_CONNECTOR_SVIDEO, 2003 ATOM_DEVICE_TV1_SUPPORT,
1892 &ddc_i2c); 2004 DRM_MODE_CONNECTOR_SVIDEO,
2005 &ddc_i2c,
2006 CONNECTOR_OBJECT_ID_SVIDEO);
2007 }
1893 } 2008 }
1894 } 2009 }
1895 } 2010 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index af1d551f1a8f..fce4c4087fda 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "drm_edid.h" 27#include "drm_edid.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "drm_fb_helper.h"
29#include "radeon_drm.h" 30#include "radeon_drm.h"
30#include "radeon.h" 31#include "radeon.h"
31#include "atom.h" 32#include "atom.h"
@@ -177,25 +178,12 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
177 struct drm_device *dev = encoder->dev; 178 struct drm_device *dev = encoder->dev;
178 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 179 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
179 struct drm_display_mode *mode = NULL; 180 struct drm_display_mode *mode = NULL;
180 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; 181 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
181
182 if (native_mode->panel_xres != 0 &&
183 native_mode->panel_yres != 0 &&
184 native_mode->dotclock != 0) {
185 mode = drm_mode_create(dev);
186
187 mode->hdisplay = native_mode->panel_xres;
188 mode->vdisplay = native_mode->panel_yres;
189
190 mode->htotal = mode->hdisplay + native_mode->hblank;
191 mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
192 mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
193 mode->vtotal = mode->vdisplay + native_mode->vblank;
194 mode->vsync_start = mode->vdisplay + native_mode->voverplus;
195 mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
196 mode->clock = native_mode->dotclock;
197 mode->flags = 0;
198 182
183 if (native_mode->hdisplay != 0 &&
184 native_mode->vdisplay != 0 &&
185 native_mode->clock != 0) {
186 mode = drm_mode_duplicate(dev, native_mode);
199 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; 187 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
200 drm_mode_set_name(mode); 188 drm_mode_set_name(mode);
201 189
@@ -209,7 +197,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
209 struct drm_device *dev = encoder->dev; 197 struct drm_device *dev = encoder->dev;
210 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 198 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
211 struct drm_display_mode *mode = NULL; 199 struct drm_display_mode *mode = NULL;
212 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; 200 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
213 int i; 201 int i;
214 struct mode_size { 202 struct mode_size {
215 int w; 203 int w;
@@ -235,17 +223,22 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
235 }; 223 };
236 224
237 for (i = 0; i < 17; i++) { 225 for (i = 0; i < 17; i++) {
226 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
227 if (common_modes[i].w > 1024 ||
228 common_modes[i].h > 768)
229 continue;
230 }
238 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 231 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
239 if (common_modes[i].w > native_mode->panel_xres || 232 if (common_modes[i].w > native_mode->hdisplay ||
240 common_modes[i].h > native_mode->panel_yres || 233 common_modes[i].h > native_mode->vdisplay ||
241 (common_modes[i].w == native_mode->panel_xres && 234 (common_modes[i].w == native_mode->hdisplay &&
242 common_modes[i].h == native_mode->panel_yres)) 235 common_modes[i].h == native_mode->vdisplay))
243 continue; 236 continue;
244 } 237 }
245 if (common_modes[i].w < 320 || common_modes[i].h < 200) 238 if (common_modes[i].w < 320 || common_modes[i].h < 200)
246 continue; 239 continue;
247 240
248 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); 241 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
249 drm_mode_probed_add(connector, mode); 242 drm_mode_probed_add(connector, mode);
250 } 243 }
251} 244}
@@ -343,28 +336,23 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
343 struct drm_connector *connector) 336 struct drm_connector *connector)
344{ 337{
345 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 338 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
346 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; 339 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
347 340
348 /* Try to get native mode details from EDID if necessary */ 341 /* Try to get native mode details from EDID if necessary */
349 if (!native_mode->dotclock) { 342 if (!native_mode->clock) {
350 struct drm_display_mode *t, *mode; 343 struct drm_display_mode *t, *mode;
351 344
352 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { 345 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
353 if (mode->hdisplay == native_mode->panel_xres && 346 if (mode->hdisplay == native_mode->hdisplay &&
354 mode->vdisplay == native_mode->panel_yres) { 347 mode->vdisplay == native_mode->vdisplay) {
355 native_mode->hblank = mode->htotal - mode->hdisplay; 348 *native_mode = *mode;
356 native_mode->hoverplus = mode->hsync_start - mode->hdisplay; 349 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
357 native_mode->hsync_width = mode->hsync_end - mode->hsync_start;
358 native_mode->vblank = mode->vtotal - mode->vdisplay;
359 native_mode->voverplus = mode->vsync_start - mode->vdisplay;
360 native_mode->vsync_width = mode->vsync_end - mode->vsync_start;
361 native_mode->dotclock = mode->clock;
362 DRM_INFO("Determined LVDS native mode details from EDID\n"); 350 DRM_INFO("Determined LVDS native mode details from EDID\n");
363 break; 351 break;
364 } 352 }
365 } 353 }
366 } 354 }
367 if (!native_mode->dotclock) { 355 if (!native_mode->clock) {
368 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 356 DRM_INFO("No LVDS native mode details, disabling RMX\n");
369 radeon_encoder->rmx_type = RMX_OFF; 357 radeon_encoder->rmx_type = RMX_OFF;
370 } 358 }
@@ -409,13 +397,64 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
409static int radeon_lvds_mode_valid(struct drm_connector *connector, 397static int radeon_lvds_mode_valid(struct drm_connector *connector,
410 struct drm_display_mode *mode) 398 struct drm_display_mode *mode)
411{ 399{
400 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
401
402 if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
403 return MODE_PANEL;
404
405 if (encoder) {
406 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
407 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
408
409 /* AVIVO hardware supports downscaling modes larger than the panel
410 * to the panel size, but I'm not sure this is desirable.
411 */
412 if ((mode->hdisplay > native_mode->hdisplay) ||
413 (mode->vdisplay > native_mode->vdisplay))
414 return MODE_PANEL;
415
416 /* if scaling is disabled, block non-native modes */
417 if (radeon_encoder->rmx_type == RMX_OFF) {
418 if ((mode->hdisplay != native_mode->hdisplay) ||
419 (mode->vdisplay != native_mode->vdisplay))
420 return MODE_PANEL;
421 }
422 }
423
412 return MODE_OK; 424 return MODE_OK;
413} 425}
414 426
415static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) 427static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
416{ 428{
417 enum drm_connector_status ret = connector_status_connected; 429 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
430 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
431 enum drm_connector_status ret = connector_status_disconnected;
432
433 if (encoder) {
434 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
435 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
436
437 /* check if panel is valid */
438 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
439 ret = connector_status_connected;
440
441 }
442
443 /* check for edid as well */
444 if (radeon_connector->edid)
445 ret = connector_status_connected;
446 else {
447 if (radeon_connector->ddc_bus) {
448 radeon_i2c_do_lock(radeon_connector, 1);
449 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
450 &radeon_connector->ddc_bus->adapter);
451 radeon_i2c_do_lock(radeon_connector, 0);
452 if (radeon_connector->edid)
453 ret = connector_status_connected;
454 }
455 }
418 /* check acpi lid status ??? */ 456 /* check acpi lid status ??? */
457
419 radeon_connector_update_scratch_regs(connector, ret); 458 radeon_connector_update_scratch_regs(connector, ret);
420 return ret; 459 return ret;
421} 460}
@@ -426,6 +465,8 @@ static void radeon_connector_destroy(struct drm_connector *connector)
426 465
427 if (radeon_connector->ddc_bus) 466 if (radeon_connector->ddc_bus)
428 radeon_i2c_destroy(radeon_connector->ddc_bus); 467 radeon_i2c_destroy(radeon_connector->ddc_bus);
468 if (radeon_connector->edid)
469 kfree(radeon_connector->edid);
429 kfree(radeon_connector->con_priv); 470 kfree(radeon_connector->con_priv);
430 drm_sysfs_connector_remove(connector); 471 drm_sysfs_connector_remove(connector);
431 drm_connector_cleanup(connector); 472 drm_connector_cleanup(connector);
@@ -495,6 +536,8 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
495static int radeon_vga_mode_valid(struct drm_connector *connector, 536static int radeon_vga_mode_valid(struct drm_connector *connector,
496 struct drm_display_mode *mode) 537 struct drm_display_mode *mode)
497{ 538{
539 /* XXX check mode bandwidth */
540 /* XXX verify against max DAC output frequency */
498 return MODE_OK; 541 return MODE_OK;
499} 542}
500 543
@@ -513,9 +556,32 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
513 radeon_i2c_do_lock(radeon_connector, 1); 556 radeon_i2c_do_lock(radeon_connector, 1);
514 dret = radeon_ddc_probe(radeon_connector); 557 dret = radeon_ddc_probe(radeon_connector);
515 radeon_i2c_do_lock(radeon_connector, 0); 558 radeon_i2c_do_lock(radeon_connector, 0);
516 if (dret) 559 if (dret) {
517 ret = connector_status_connected; 560 if (radeon_connector->edid) {
518 else { 561 kfree(radeon_connector->edid);
562 radeon_connector->edid = NULL;
563 }
564 radeon_i2c_do_lock(radeon_connector, 1);
565 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
566 radeon_i2c_do_lock(radeon_connector, 0);
567
568 if (!radeon_connector->edid) {
569 DRM_ERROR("DDC responded but not EDID found for %s\n",
570 drm_get_connector_name(connector));
571 } else {
572 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
573
574 /* some oems have boards with separate digital and analog connectors
575 * with a shared ddc line (often vga + hdmi)
576 */
577 if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
578 kfree(radeon_connector->edid);
579 radeon_connector->edid = NULL;
580 ret = connector_status_disconnected;
581 } else
582 ret = connector_status_connected;
583 }
584 } else {
519 if (radeon_connector->dac_load_detect) { 585 if (radeon_connector->dac_load_detect) {
520 encoder_funcs = encoder->helper_private; 586 encoder_funcs = encoder->helper_private;
521 ret = encoder_funcs->detect(encoder, connector); 587 ret = encoder_funcs->detect(encoder, connector);
@@ -559,7 +625,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
559 radeon_add_common_modes(encoder, connector); 625 radeon_add_common_modes(encoder, connector);
560 else { 626 else {
561 /* only 800x600 is supported right now on pre-avivo chips */ 627 /* only 800x600 is supported right now on pre-avivo chips */
562 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); 628 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
563 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 629 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
564 drm_mode_probed_add(connector, tv_mode); 630 drm_mode_probed_add(connector, tv_mode);
565 } 631 }
@@ -569,6 +635,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
569static int radeon_tv_mode_valid(struct drm_connector *connector, 635static int radeon_tv_mode_valid(struct drm_connector *connector,
570 struct drm_display_mode *mode) 636 struct drm_display_mode *mode)
571{ 637{
638 if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
639 return MODE_CLOCK_RANGE;
572 return MODE_OK; 640 return MODE_OK;
573} 641}
574 642
@@ -643,6 +711,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
643 dret = radeon_ddc_probe(radeon_connector); 711 dret = radeon_ddc_probe(radeon_connector);
644 radeon_i2c_do_lock(radeon_connector, 0); 712 radeon_i2c_do_lock(radeon_connector, 0);
645 if (dret) { 713 if (dret) {
714 if (radeon_connector->edid) {
715 kfree(radeon_connector->edid);
716 radeon_connector->edid = NULL;
717 }
646 radeon_i2c_do_lock(radeon_connector, 1); 718 radeon_i2c_do_lock(radeon_connector, 1);
647 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 719 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
648 radeon_i2c_do_lock(radeon_connector, 0); 720 radeon_i2c_do_lock(radeon_connector, 0);
@@ -653,10 +725,15 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
653 } else { 725 } else {
654 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 726 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
655 727
656 /* if this isn't a digital monitor 728 /* some oems have boards with separate digital and analog connectors
657 then we need to make sure we don't have any 729 * with a shared ddc line (often vga + hdmi)
658 TV conflicts */ 730 */
659 ret = connector_status_connected; 731 if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
732 kfree(radeon_connector->edid);
733 radeon_connector->edid = NULL;
734 ret = connector_status_disconnected;
735 } else
736 ret = connector_status_connected;
660 } 737 }
661 } 738 }
662 739
@@ -743,9 +820,36 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
743 return NULL; 820 return NULL;
744} 821}
745 822
823static void radeon_dvi_force(struct drm_connector *connector)
824{
825 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
826 if (connector->force == DRM_FORCE_ON)
827 radeon_connector->use_digital = false;
828 if (connector->force == DRM_FORCE_ON_DIGITAL)
829 radeon_connector->use_digital = true;
830}
831
832static int radeon_dvi_mode_valid(struct drm_connector *connector,
833 struct drm_display_mode *mode)
834{
835 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
836
837 /* XXX check mode bandwidth */
838
839 if (radeon_connector->use_digital && (mode->clock > 165000)) {
840 if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
841 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
842 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
843 return MODE_OK;
844 else
845 return MODE_CLOCK_HIGH;
846 }
847 return MODE_OK;
848}
849
746struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 850struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
747 .get_modes = radeon_dvi_get_modes, 851 .get_modes = radeon_dvi_get_modes,
748 .mode_valid = radeon_vga_mode_valid, 852 .mode_valid = radeon_dvi_mode_valid,
749 .best_encoder = radeon_dvi_encoder, 853 .best_encoder = radeon_dvi_encoder,
750}; 854};
751 855
@@ -755,6 +859,7 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
755 .fill_modes = drm_helper_probe_single_connector_modes, 859 .fill_modes = drm_helper_probe_single_connector_modes,
756 .set_property = radeon_connector_set_property, 860 .set_property = radeon_connector_set_property,
757 .destroy = radeon_connector_destroy, 861 .destroy = radeon_connector_destroy,
862 .force = radeon_dvi_force,
758}; 863};
759 864
760void 865void
@@ -764,13 +869,16 @@ radeon_add_atom_connector(struct drm_device *dev,
764 int connector_type, 869 int connector_type,
765 struct radeon_i2c_bus_rec *i2c_bus, 870 struct radeon_i2c_bus_rec *i2c_bus,
766 bool linkb, 871 bool linkb,
767 uint32_t igp_lane_info) 872 uint32_t igp_lane_info,
873 uint16_t connector_object_id)
768{ 874{
769 struct radeon_device *rdev = dev->dev_private; 875 struct radeon_device *rdev = dev->dev_private;
770 struct drm_connector *connector; 876 struct drm_connector *connector;
771 struct radeon_connector *radeon_connector; 877 struct radeon_connector *radeon_connector;
772 struct radeon_connector_atom_dig *radeon_dig_connector; 878 struct radeon_connector_atom_dig *radeon_dig_connector;
773 uint32_t subpixel_order = SubPixelNone; 879 uint32_t subpixel_order = SubPixelNone;
880 bool shared_ddc = false;
881 int ret;
774 882
775 /* fixme - tv/cv/din */ 883 /* fixme - tv/cv/din */
776 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 884 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -783,6 +891,13 @@ radeon_add_atom_connector(struct drm_device *dev,
783 radeon_connector->devices |= supported_device; 891 radeon_connector->devices |= supported_device;
784 return; 892 return;
785 } 893 }
894 if (radeon_connector->ddc_bus && i2c_bus->valid) {
895 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
896 sizeof(struct radeon_i2c_bus_rec)) == 0) {
897 radeon_connector->shared_ddc = true;
898 shared_ddc = true;
899 }
900 }
786 } 901 }
787 902
788 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); 903 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
@@ -793,27 +908,35 @@ radeon_add_atom_connector(struct drm_device *dev,
793 908
794 radeon_connector->connector_id = connector_id; 909 radeon_connector->connector_id = connector_id;
795 radeon_connector->devices = supported_device; 910 radeon_connector->devices = supported_device;
911 radeon_connector->shared_ddc = shared_ddc;
912 radeon_connector->connector_object_id = connector_object_id;
796 switch (connector_type) { 913 switch (connector_type) {
797 case DRM_MODE_CONNECTOR_VGA: 914 case DRM_MODE_CONNECTOR_VGA:
798 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 915 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
799 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 916 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
917 if (ret)
918 goto failed;
800 if (i2c_bus->valid) { 919 if (i2c_bus->valid) {
801 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 920 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
802 if (!radeon_connector->ddc_bus) 921 if (!radeon_connector->ddc_bus)
803 goto failed; 922 goto failed;
804 } 923 }
924 radeon_connector->dac_load_detect = true;
805 drm_connector_attach_property(&radeon_connector->base, 925 drm_connector_attach_property(&radeon_connector->base,
806 rdev->mode_info.load_detect_property, 926 rdev->mode_info.load_detect_property,
807 1); 927 1);
808 break; 928 break;
809 case DRM_MODE_CONNECTOR_DVIA: 929 case DRM_MODE_CONNECTOR_DVIA:
810 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 930 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
811 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 931 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
932 if (ret)
933 goto failed;
812 if (i2c_bus->valid) { 934 if (i2c_bus->valid) {
813 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 935 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
814 if (!radeon_connector->ddc_bus) 936 if (!radeon_connector->ddc_bus)
815 goto failed; 937 goto failed;
816 } 938 }
939 radeon_connector->dac_load_detect = true;
817 drm_connector_attach_property(&radeon_connector->base, 940 drm_connector_attach_property(&radeon_connector->base,
818 rdev->mode_info.load_detect_property, 941 rdev->mode_info.load_detect_property,
819 1); 942 1);
@@ -827,7 +950,9 @@ radeon_add_atom_connector(struct drm_device *dev,
827 radeon_dig_connector->igp_lane_info = igp_lane_info; 950 radeon_dig_connector->igp_lane_info = igp_lane_info;
828 radeon_connector->con_priv = radeon_dig_connector; 951 radeon_connector->con_priv = radeon_dig_connector;
829 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 952 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
830 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 953 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
954 if (ret)
955 goto failed;
831 if (i2c_bus->valid) { 956 if (i2c_bus->valid) {
832 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 957 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
833 if (!radeon_connector->ddc_bus) 958 if (!radeon_connector->ddc_bus)
@@ -837,6 +962,7 @@ radeon_add_atom_connector(struct drm_device *dev,
837 drm_connector_attach_property(&radeon_connector->base, 962 drm_connector_attach_property(&radeon_connector->base,
838 rdev->mode_info.coherent_mode_property, 963 rdev->mode_info.coherent_mode_property,
839 1); 964 1);
965 radeon_connector->dac_load_detect = true;
840 drm_connector_attach_property(&radeon_connector->base, 966 drm_connector_attach_property(&radeon_connector->base,
841 rdev->mode_info.load_detect_property, 967 rdev->mode_info.load_detect_property,
842 1); 968 1);
@@ -850,7 +976,9 @@ radeon_add_atom_connector(struct drm_device *dev,
850 radeon_dig_connector->igp_lane_info = igp_lane_info; 976 radeon_dig_connector->igp_lane_info = igp_lane_info;
851 radeon_connector->con_priv = radeon_dig_connector; 977 radeon_connector->con_priv = radeon_dig_connector;
852 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 978 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
853 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 979 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
980 if (ret)
981 goto failed;
854 if (i2c_bus->valid) { 982 if (i2c_bus->valid) {
855 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); 983 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
856 if (!radeon_connector->ddc_bus) 984 if (!radeon_connector->ddc_bus)
@@ -869,7 +997,9 @@ radeon_add_atom_connector(struct drm_device *dev,
869 radeon_dig_connector->igp_lane_info = igp_lane_info; 997 radeon_dig_connector->igp_lane_info = igp_lane_info;
870 radeon_connector->con_priv = radeon_dig_connector; 998 radeon_connector->con_priv = radeon_dig_connector;
871 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 999 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
872 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1000 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1001 if (ret)
1002 goto failed;
873 if (i2c_bus->valid) { 1003 if (i2c_bus->valid) {
874 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1004 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
875 if (!radeon_connector->ddc_bus) 1005 if (!radeon_connector->ddc_bus)
@@ -882,11 +1012,14 @@ radeon_add_atom_connector(struct drm_device *dev,
882 case DRM_MODE_CONNECTOR_9PinDIN: 1012 case DRM_MODE_CONNECTOR_9PinDIN:
883 if (radeon_tv == 1) { 1013 if (radeon_tv == 1) {
884 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1014 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
885 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1015 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1016 if (ret)
1017 goto failed;
1018 radeon_connector->dac_load_detect = true;
1019 drm_connector_attach_property(&radeon_connector->base,
1020 rdev->mode_info.load_detect_property,
1021 1);
886 } 1022 }
887 drm_connector_attach_property(&radeon_connector->base,
888 rdev->mode_info.load_detect_property,
889 1);
890 break; 1023 break;
891 case DRM_MODE_CONNECTOR_LVDS: 1024 case DRM_MODE_CONNECTOR_LVDS:
892 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1025 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -896,7 +1029,9 @@ radeon_add_atom_connector(struct drm_device *dev,
896 radeon_dig_connector->igp_lane_info = igp_lane_info; 1029 radeon_dig_connector->igp_lane_info = igp_lane_info;
897 radeon_connector->con_priv = radeon_dig_connector; 1030 radeon_connector->con_priv = radeon_dig_connector;
898 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1031 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
899 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1032 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1033 if (ret)
1034 goto failed;
900 if (i2c_bus->valid) { 1035 if (i2c_bus->valid) {
901 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1036 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
902 if (!radeon_connector->ddc_bus) 1037 if (!radeon_connector->ddc_bus)
@@ -926,12 +1061,14 @@ radeon_add_legacy_connector(struct drm_device *dev,
926 uint32_t connector_id, 1061 uint32_t connector_id,
927 uint32_t supported_device, 1062 uint32_t supported_device,
928 int connector_type, 1063 int connector_type,
929 struct radeon_i2c_bus_rec *i2c_bus) 1064 struct radeon_i2c_bus_rec *i2c_bus,
1065 uint16_t connector_object_id)
930{ 1066{
931 struct radeon_device *rdev = dev->dev_private; 1067 struct radeon_device *rdev = dev->dev_private;
932 struct drm_connector *connector; 1068 struct drm_connector *connector;
933 struct radeon_connector *radeon_connector; 1069 struct radeon_connector *radeon_connector;
934 uint32_t subpixel_order = SubPixelNone; 1070 uint32_t subpixel_order = SubPixelNone;
1071 int ret;
935 1072
936 /* fixme - tv/cv/din */ 1073 /* fixme - tv/cv/din */
937 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1074 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -954,27 +1091,34 @@ radeon_add_legacy_connector(struct drm_device *dev,
954 1091
955 radeon_connector->connector_id = connector_id; 1092 radeon_connector->connector_id = connector_id;
956 radeon_connector->devices = supported_device; 1093 radeon_connector->devices = supported_device;
1094 radeon_connector->connector_object_id = connector_object_id;
957 switch (connector_type) { 1095 switch (connector_type) {
958 case DRM_MODE_CONNECTOR_VGA: 1096 case DRM_MODE_CONNECTOR_VGA:
959 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1097 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
960 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1098 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1099 if (ret)
1100 goto failed;
961 if (i2c_bus->valid) { 1101 if (i2c_bus->valid) {
962 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 1102 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
963 if (!radeon_connector->ddc_bus) 1103 if (!radeon_connector->ddc_bus)
964 goto failed; 1104 goto failed;
965 } 1105 }
1106 radeon_connector->dac_load_detect = true;
966 drm_connector_attach_property(&radeon_connector->base, 1107 drm_connector_attach_property(&radeon_connector->base,
967 rdev->mode_info.load_detect_property, 1108 rdev->mode_info.load_detect_property,
968 1); 1109 1);
969 break; 1110 break;
970 case DRM_MODE_CONNECTOR_DVIA: 1111 case DRM_MODE_CONNECTOR_DVIA:
971 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1112 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
972 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1113 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1114 if (ret)
1115 goto failed;
973 if (i2c_bus->valid) { 1116 if (i2c_bus->valid) {
974 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1117 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
975 if (!radeon_connector->ddc_bus) 1118 if (!radeon_connector->ddc_bus)
976 goto failed; 1119 goto failed;
977 } 1120 }
1121 radeon_connector->dac_load_detect = true;
978 drm_connector_attach_property(&radeon_connector->base, 1122 drm_connector_attach_property(&radeon_connector->base,
979 rdev->mode_info.load_detect_property, 1123 rdev->mode_info.load_detect_property,
980 1); 1124 1);
@@ -982,11 +1126,14 @@ radeon_add_legacy_connector(struct drm_device *dev,
982 case DRM_MODE_CONNECTOR_DVII: 1126 case DRM_MODE_CONNECTOR_DVII:
983 case DRM_MODE_CONNECTOR_DVID: 1127 case DRM_MODE_CONNECTOR_DVID:
984 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1128 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
985 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1129 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1130 if (ret)
1131 goto failed;
986 if (i2c_bus->valid) { 1132 if (i2c_bus->valid) {
987 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1133 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
988 if (!radeon_connector->ddc_bus) 1134 if (!radeon_connector->ddc_bus)
989 goto failed; 1135 goto failed;
1136 radeon_connector->dac_load_detect = true;
990 drm_connector_attach_property(&radeon_connector->base, 1137 drm_connector_attach_property(&radeon_connector->base,
991 rdev->mode_info.load_detect_property, 1138 rdev->mode_info.load_detect_property,
992 1); 1139 1);
@@ -998,7 +1145,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
998 case DRM_MODE_CONNECTOR_9PinDIN: 1145 case DRM_MODE_CONNECTOR_9PinDIN:
999 if (radeon_tv == 1) { 1146 if (radeon_tv == 1) {
1000 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1147 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1001 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1148 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1149 if (ret)
1150 goto failed;
1151 radeon_connector->dac_load_detect = true;
1002 drm_connector_attach_property(&radeon_connector->base, 1152 drm_connector_attach_property(&radeon_connector->base,
1003 rdev->mode_info.load_detect_property, 1153 rdev->mode_info.load_detect_property,
1004 1); 1154 1);
@@ -1006,7 +1156,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1006 break; 1156 break;
1007 case DRM_MODE_CONNECTOR_LVDS: 1157 case DRM_MODE_CONNECTOR_LVDS:
1008 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1158 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1009 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1159 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1160 if (ret)
1161 goto failed;
1010 if (i2c_bus->valid) { 1162 if (i2c_bus->valid) {
1011 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1163 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1012 if (!radeon_connector->ddc_bus) 1164 if (!radeon_connector->ddc_bus)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 12f5990c2d2a..5ab2cf96a264 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
142 } 142 }
143 143
144 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
145 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
146 146
147 size = p->chunks[i].length_dw * sizeof(uint32_t); 147 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 148 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149 if (p->chunks[i].kdata == NULL) { 149 size = p->chunks[i].length_dw * sizeof(uint32_t);
150 return -ENOMEM; 150 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151 } 151 if (p->chunks[i].kdata == NULL) {
152 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { 152 return -ENOMEM;
153 return -EFAULT; 153 }
154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155 p->chunks[i].user_ptr, size)) {
156 return -EFAULT;
157 }
158 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162 kfree(p->chunks[i].kpage[0]);
163 kfree(p->chunks[i].kpage[1]);
164 return -ENOMEM;
165 }
166 p->chunks[i].kpage_idx[0] = -1;
167 p->chunks[i].kpage_idx[1] = -1;
168 p->chunks[i].last_copied_page = -1;
169 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
154 } 170 }
155 } 171 }
156 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 172 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 kfree(parser->relocs_ptr); 206 kfree(parser->relocs_ptr);
191 for (i = 0; i < parser->nchunks; i++) { 207 for (i = 0; i < parser->nchunks; i++) {
192 kfree(parser->chunks[i].kdata); 208 kfree(parser->chunks[i].kdata);
209 kfree(parser->chunks[i].kpage[0]);
210 kfree(parser->chunks[i].kpage[1]);
193 } 211 }
194 kfree(parser->chunks); 212 kfree(parser->chunks);
195 kfree(parser->chunks_array); 213 kfree(parser->chunks_array);
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
238 * uncached). */ 256 * uncached). */
239 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 257 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
240 parser.ib->length_dw = ib_chunk->length_dw; 258 parser.ib->length_dw = ib_chunk->length_dw;
241 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
242 r = radeon_cs_parse(&parser); 259 r = radeon_cs_parse(&parser);
260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex);
264 return r;
265 }
266 r = radeon_cs_finish_pages(&parser);
243 if (r) { 267 if (r) {
244 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
245 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
@@ -254,3 +278,64 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
254 mutex_unlock(&rdev->cs_mutex); 278 mutex_unlock(&rdev->cs_mutex);
255 return r; 279 return r;
256} 280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
283{
284 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285 int i;
286 int size = PAGE_SIZE;
287
288 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289 if (i == ibc->last_page_index) {
290 size = (ibc->length_dw * 4) % PAGE_SIZE;
291 if (size == 0)
292 size = PAGE_SIZE;
293 }
294
295 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296 ibc->user_ptr + (i * PAGE_SIZE),
297 size))
298 return -EFAULT;
299 }
300 return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305 int new_page;
306 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307 int i;
308 int size = PAGE_SIZE;
309
310 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312 ibc->user_ptr + (i * PAGE_SIZE),
313 PAGE_SIZE)) {
314 p->parser_error = -EFAULT;
315 return 0;
316 }
317 }
318
319 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
320
321 if (pg_idx == ibc->last_page_index) {
322 size = (ibc->length_dw * 4) % PAGE_SIZE;
323 if (size == 0)
324 size = PAGE_SIZE;
325 }
326
327 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328 ibc->user_ptr + (pg_idx * PAGE_SIZE),
329 size)) {
330 p->parser_error = -EFAULT;
331 return 0;
332 }
333
334 /* copy to IB here */
335 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
336
337 ibc->last_copied_page = pg_idx;
338 ibc->kpage_idx[new_page] = pg_idx;
339
340 return new_page;
341}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b13c79e38bc0..28772a37009c 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private; 110 struct radeon_device *rdev = crtc->dev->dev_private;
111 111
112 if (ASIC_IS_AVIVO(rdev)) 112 if (ASIC_IS_AVIVO(rdev)) {
113 if (rdev->family >= CHIP_RV770) {
114 if (radeon_crtc->crtc_id)
115 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
116 else
117 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
118 }
113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); 119 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
114 else { 120 } else {
115 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; 121 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
116 /* offset is from DISP(2)_BASE_ADDRESS */ 122 /* offset is from DISP(2)_BASE_ADDRESS */
117 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); 123 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index daf5db780956..e3f9edfa40fe 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev)
322 case CHIP_RV380: 322 case CHIP_RV380:
323 rdev->asic = &r300_asic; 323 rdev->asic = &r300_asic;
324 if (rdev->flags & RADEON_IS_PCIE) { 324 if (rdev->flags & RADEON_IS_PCIE) {
325 rdev->asic->gart_init = &rv370_pcie_gart_init;
326 rdev->asic->gart_fini = &rv370_pcie_gart_fini;
327 rdev->asic->gart_enable = &rv370_pcie_gart_enable;
328 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
329 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 325 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
330 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 326 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
331 } 327 }
@@ -448,20 +444,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
448 return r; 444 return r;
449} 445}
450 446
451static struct card_info atom_card_info = {
452 .dev = NULL,
453 .reg_read = cail_reg_read,
454 .reg_write = cail_reg_write,
455 .mc_read = cail_mc_read,
456 .mc_write = cail_mc_write,
457 .pll_read = cail_pll_read,
458 .pll_write = cail_pll_write,
459};
460
461int radeon_atombios_init(struct radeon_device *rdev) 447int radeon_atombios_init(struct radeon_device *rdev)
462{ 448{
463 atom_card_info.dev = rdev->ddev; 449 struct card_info *atom_card_info =
464 rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); 450 kzalloc(sizeof(struct card_info), GFP_KERNEL);
451
452 if (!atom_card_info)
453 return -ENOMEM;
454
455 rdev->mode_info.atom_card_info = atom_card_info;
456 atom_card_info->dev = rdev->ddev;
457 atom_card_info->reg_read = cail_reg_read;
458 atom_card_info->reg_write = cail_reg_write;
459 atom_card_info->mc_read = cail_mc_read;
460 atom_card_info->mc_write = cail_mc_write;
461 atom_card_info->pll_read = cail_pll_read;
462 atom_card_info->pll_write = cail_pll_write;
463
464 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
466 return 0; 466 return 0;
467} 467}
@@ -469,6 +469,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
469void radeon_atombios_fini(struct radeon_device *rdev) 469void radeon_atombios_fini(struct radeon_device *rdev)
470{ 470{
471 kfree(rdev->mode_info.atom_context); 471 kfree(rdev->mode_info.atom_context);
472 kfree(rdev->mode_info.atom_card_info);
472} 473}
473 474
474int radeon_combios_init(struct radeon_device *rdev) 475int radeon_combios_init(struct radeon_device *rdev)
@@ -485,7 +486,6 @@ void radeon_combios_fini(struct radeon_device *rdev)
485static unsigned int radeon_vga_set_decode(void *cookie, bool state) 486static unsigned int radeon_vga_set_decode(void *cookie, bool state)
486{ 487{
487 struct radeon_device *rdev = cookie; 488 struct radeon_device *rdev = cookie;
488
489 radeon_vga_set_state(rdev, state); 489 radeon_vga_set_state(rdev, state);
490 if (state) 490 if (state)
491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -493,6 +493,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
493 else 493 else
494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
495} 495}
496
497void radeon_agp_disable(struct radeon_device *rdev)
498{
499 rdev->flags &= ~RADEON_IS_AGP;
500 if (rdev->family >= CHIP_R600) {
501 DRM_INFO("Forcing AGP to PCIE mode\n");
502 rdev->flags |= RADEON_IS_PCIE;
503 } else if (rdev->family >= CHIP_RV515 ||
504 rdev->family == CHIP_RV380 ||
505 rdev->family == CHIP_RV410 ||
506 rdev->family == CHIP_R423) {
507 DRM_INFO("Forcing AGP to PCIE mode\n");
508 rdev->flags |= RADEON_IS_PCIE;
509 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
510 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
511 } else {
512 DRM_INFO("Forcing AGP to PCI mode\n");
513 rdev->flags |= RADEON_IS_PCI;
514 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
515 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
516 }
517}
518
496/* 519/*
497 * Radeon device. 520 * Radeon device.
498 */ 521 */
@@ -531,29 +554,7 @@ int radeon_device_init(struct radeon_device *rdev,
531 } 554 }
532 555
533 if (radeon_agpmode == -1) { 556 if (radeon_agpmode == -1) {
534 rdev->flags &= ~RADEON_IS_AGP; 557 radeon_agp_disable(rdev);
535 if (rdev->family >= CHIP_RV515 ||
536 rdev->family == CHIP_RV380 ||
537 rdev->family == CHIP_RV410 ||
538 rdev->family == CHIP_R423) {
539 DRM_INFO("Forcing AGP to PCIE mode\n");
540 rdev->flags |= RADEON_IS_PCIE;
541 rdev->asic->gart_init = &rv370_pcie_gart_init;
542 rdev->asic->gart_fini = &rv370_pcie_gart_fini;
543 rdev->asic->gart_enable = &rv370_pcie_gart_enable;
544 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
545 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
546 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
547 } else {
548 DRM_INFO("Forcing AGP to PCI mode\n");
549 rdev->flags |= RADEON_IS_PCI;
550 rdev->asic->gart_init = &r100_pci_gart_init;
551 rdev->asic->gart_fini = &r100_pci_gart_fini;
552 rdev->asic->gart_enable = &r100_pci_gart_enable;
553 rdev->asic->gart_disable = &r100_pci_gart_disable;
554 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
555 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
556 }
557 } 558 }
558 559
559 /* set DMA mask + need_dma32 flags. 560 /* set DMA mask + need_dma32 flags.
@@ -585,111 +586,26 @@ int radeon_device_init(struct radeon_device *rdev,
585 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 586 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
586 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 587 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
587 588
588 rdev->new_init_path = false;
589 r = radeon_init(rdev);
590 if (r) {
591 return r;
592 }
593
594 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 589 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
595 r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 590 /* this will fail for cards that aren't VGA class devices, just
596 if (r) { 591 * ignore it */
597 return -EINVAL; 592 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
598 }
599 593
600 if (!rdev->new_init_path) { 594 r = radeon_init(rdev);
601 /* Setup errata flags */ 595 if (r)
602 radeon_errata(rdev); 596 return r;
603 /* Initialize scratch registers */
604 radeon_scratch_init(rdev);
605 /* Initialize surface registers */
606 radeon_surface_init(rdev);
607
608 /* BIOS*/
609 if (!radeon_get_bios(rdev)) {
610 if (ASIC_IS_AVIVO(rdev))
611 return -EINVAL;
612 }
613 if (rdev->is_atom_bios) {
614 r = radeon_atombios_init(rdev);
615 if (r) {
616 return r;
617 }
618 } else {
619 r = radeon_combios_init(rdev);
620 if (r) {
621 return r;
622 }
623 }
624 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
625 if (radeon_gpu_reset(rdev)) {
626 /* FIXME: what do we want to do here ? */
627 }
628 /* check if cards are posted or not */
629 if (!radeon_card_posted(rdev) && rdev->bios) {
630 DRM_INFO("GPU not posted. posting now...\n");
631 if (rdev->is_atom_bios) {
632 atom_asic_init(rdev->mode_info.atom_context);
633 } else {
634 radeon_combios_asic_init(rdev->ddev);
635 }
636 }
637 /* Get clock & vram information */
638 radeon_get_clock_info(rdev->ddev);
639 radeon_vram_info(rdev);
640 /* Initialize clocks */
641 r = radeon_clocks_init(rdev);
642 if (r) {
643 return r;
644 }
645 597
646 /* Initialize memory controller (also test AGP) */ 598 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
647 r = radeon_mc_init(rdev); 599 /* Acceleration not working on AGP card try again
648 if (r) { 600 * with fallback to PCI or PCIE GART
649 return r; 601 */
650 } 602 radeon_gpu_reset(rdev);
651 /* Fence driver */ 603 radeon_fini(rdev);
652 r = radeon_fence_driver_init(rdev); 604 radeon_agp_disable(rdev);
653 if (r) { 605 r = radeon_init(rdev);
654 return r;
655 }
656 r = radeon_irq_kms_init(rdev);
657 if (r) {
658 return r;
659 }
660 /* Memory manager */
661 r = radeon_object_init(rdev);
662 if (r) {
663 return r;
664 }
665 r = radeon_gpu_gart_init(rdev);
666 if (r) 606 if (r)
667 return r; 607 return r;
668 /* Initialize GART (initialize after TTM so we can allocate
669 * memory through TTM but finalize after TTM) */
670 r = radeon_gart_enable(rdev);
671 if (r)
672 return 0;
673 r = radeon_gem_init(rdev);
674 if (r)
675 return 0;
676
677 /* 1M ring buffer */
678 r = radeon_cp_init(rdev, 1024 * 1024);
679 if (r)
680 return 0;
681 r = radeon_wb_init(rdev);
682 if (r)
683 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
684 r = radeon_ib_pool_init(rdev);
685 if (r)
686 return 0;
687 r = radeon_ib_test(rdev);
688 if (r)
689 return 0;
690 rdev->accel_working = true;
691 } 608 }
692 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
693 if (radeon_testing) { 609 if (radeon_testing) {
694 radeon_test_moves(rdev); 610 radeon_test_moves(rdev);
695 } 611 }
@@ -703,32 +619,8 @@ void radeon_device_fini(struct radeon_device *rdev)
703{ 619{
704 DRM_INFO("radeon: finishing device.\n"); 620 DRM_INFO("radeon: finishing device.\n");
705 rdev->shutdown = true; 621 rdev->shutdown = true;
706 /* Order matter so becarefull if you rearrange anythings */ 622 radeon_fini(rdev);
707 if (!rdev->new_init_path) { 623 vga_client_register(rdev->pdev, NULL, NULL, NULL);
708 radeon_ib_pool_fini(rdev);
709 radeon_cp_fini(rdev);
710 radeon_wb_fini(rdev);
711 radeon_gpu_gart_fini(rdev);
712 radeon_gem_fini(rdev);
713 radeon_mc_fini(rdev);
714#if __OS_HAS_AGP
715 radeon_agp_fini(rdev);
716#endif
717 radeon_irq_kms_fini(rdev);
718 vga_client_register(rdev->pdev, NULL, NULL, NULL);
719 radeon_fence_driver_fini(rdev);
720 radeon_clocks_fini(rdev);
721 radeon_object_fini(rdev);
722 if (rdev->is_atom_bios) {
723 radeon_atombios_fini(rdev);
724 } else {
725 radeon_combios_fini(rdev);
726 }
727 kfree(rdev->bios);
728 rdev->bios = NULL;
729 } else {
730 radeon_fini(rdev);
731 }
732 iounmap(rdev->rmmio); 624 iounmap(rdev->rmmio);
733 rdev->rmmio = NULL; 625 rdev->rmmio = NULL;
734} 626}
@@ -768,14 +660,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
768 660
769 radeon_save_bios_scratch_regs(rdev); 661 radeon_save_bios_scratch_regs(rdev);
770 662
771 if (!rdev->new_init_path) { 663 radeon_suspend(rdev);
772 radeon_cp_disable(rdev);
773 radeon_gart_disable(rdev);
774 rdev->irq.sw_int = false;
775 radeon_irq_set(rdev);
776 } else {
777 radeon_suspend(rdev);
778 }
779 /* evict remaining vram memory */ 664 /* evict remaining vram memory */
780 radeon_object_evict_vram(rdev); 665 radeon_object_evict_vram(rdev);
781 666
@@ -794,7 +679,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
794int radeon_resume_kms(struct drm_device *dev) 679int radeon_resume_kms(struct drm_device *dev)
795{ 680{
796 struct radeon_device *rdev = dev->dev_private; 681 struct radeon_device *rdev = dev->dev_private;
797 int r;
798 682
799 acquire_console_sem(); 683 acquire_console_sem();
800 pci_set_power_state(dev->pdev, PCI_D0); 684 pci_set_power_state(dev->pdev, PCI_D0);
@@ -804,43 +688,7 @@ int radeon_resume_kms(struct drm_device *dev)
804 return -1; 688 return -1;
805 } 689 }
806 pci_set_master(dev->pdev); 690 pci_set_master(dev->pdev);
807 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 691 radeon_resume(rdev);
808 if (!rdev->new_init_path) {
809 if (radeon_gpu_reset(rdev)) {
810 /* FIXME: what do we want to do here ? */
811 }
812 /* post card */
813 if (rdev->is_atom_bios) {
814 atom_asic_init(rdev->mode_info.atom_context);
815 } else {
816 radeon_combios_asic_init(rdev->ddev);
817 }
818 /* Initialize clocks */
819 r = radeon_clocks_init(rdev);
820 if (r) {
821 release_console_sem();
822 return r;
823 }
824 /* Enable IRQ */
825 rdev->irq.sw_int = true;
826 radeon_irq_set(rdev);
827 /* Initialize GPU Memory Controller */
828 r = radeon_mc_init(rdev);
829 if (r) {
830 goto out;
831 }
832 r = radeon_gart_enable(rdev);
833 if (r) {
834 goto out;
835 }
836 r = radeon_cp_init(rdev, rdev->cp.ring_size);
837 if (r) {
838 goto out;
839 }
840 } else {
841 radeon_resume(rdev);
842 }
843out:
844 radeon_restore_bios_scratch_regs(rdev); 692 radeon_restore_bios_scratch_regs(rdev);
845 fb_set_suspend(rdev->fbdev_info, 0); 693 fb_set_suspend(rdev->fbdev_info, 0);
846 release_console_sem(); 694 release_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5d8141b13765..c85df4afcb7a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -106,51 +106,44 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
106 legacy_crtc_load_lut(crtc); 106 legacy_crtc_load_lut(crtc);
107} 107}
108 108
109/** Sets the color ramps on behalf of RandR */ 109/** Sets the color ramps on behalf of fbcon */
110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 111 u16 blue, int regno)
112{ 112{
113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
114 114
115 if (regno == 0)
116 DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
117 radeon_crtc->lut_r[regno] = red >> 6; 115 radeon_crtc->lut_r[regno] = red >> 6;
118 radeon_crtc->lut_g[regno] = green >> 6; 116 radeon_crtc->lut_g[regno] = green >> 6;
119 radeon_crtc->lut_b[regno] = blue >> 6; 117 radeon_crtc->lut_b[regno] = blue >> 6;
120} 118}
121 119
120/** Gets the color ramps on behalf of fbcon */
121void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
122 u16 *blue, int regno)
123{
124 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
125
126 *red = radeon_crtc->lut_r[regno] << 6;
127 *green = radeon_crtc->lut_g[regno] << 6;
128 *blue = radeon_crtc->lut_b[regno] << 6;
129}
130
122static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 131static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, uint32_t size) 132 u16 *blue, uint32_t size)
124{ 133{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 134 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 int i, j; 135 int i;
127 136
128 if (size != 256) { 137 if (size != 256) {
129 return; 138 return;
130 } 139 }
131 if (crtc->fb == NULL) {
132 return;
133 }
134 140
135 if (crtc->fb->depth == 16) { 141 /* userspace palettes are always correct as is */
136 for (i = 0; i < 64; i++) { 142 for (i = 0; i < 256; i++) {
137 if (i <= 31) { 143 radeon_crtc->lut_r[i] = red[i] >> 6;
138 for (j = 0; j < 8; j++) { 144 radeon_crtc->lut_g[i] = green[i] >> 6;
139 radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; 145 radeon_crtc->lut_b[i] = blue[i] >> 6;
140 radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
141 }
142 }
143 for (j = 0; j < 4; j++)
144 radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
145 }
146 } else {
147 for (i = 0; i < 256; i++) {
148 radeon_crtc->lut_r[i] = red[i] >> 6;
149 radeon_crtc->lut_g[i] = green[i] >> 6;
150 radeon_crtc->lut_b[i] = blue[i] >> 6;
151 }
152 } 146 }
153
154 radeon_crtc_load_lut(crtc); 147 radeon_crtc_load_lut(crtc);
155} 148}
156 149
@@ -341,27 +334,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
341 334
342int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 335int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
343{ 336{
344 struct edid *edid;
345 int ret = 0; 337 int ret = 0;
346 338
347 if (!radeon_connector->ddc_bus) 339 if (!radeon_connector->ddc_bus)
348 return -1; 340 return -1;
349 if (!radeon_connector->edid) { 341 if (!radeon_connector->edid) {
350 radeon_i2c_do_lock(radeon_connector, 1); 342 radeon_i2c_do_lock(radeon_connector, 1);
351 edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 343 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
352 radeon_i2c_do_lock(radeon_connector, 0); 344 radeon_i2c_do_lock(radeon_connector, 0);
353 } else 345 }
354 edid = radeon_connector->edid;
355 346
356 if (edid) { 347 if (radeon_connector->edid) {
357 /* update digital bits here */ 348 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
358 if (edid->input & DRM_EDID_INPUT_DIGITAL) 349 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
359 radeon_connector->use_digital = 1;
360 else
361 radeon_connector->use_digital = 0;
362 drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
363 ret = drm_add_edid_modes(&radeon_connector->base, edid);
364 kfree(edid);
365 return ret; 350 return ret;
366 } 351 }
367 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); 352 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -724,7 +709,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
724 if (ret) { 709 if (ret) {
725 return ret; 710 return ret;
726 } 711 }
727 /* allocate crtcs - TODO single crtc */ 712
713 if (rdev->flags & RADEON_SINGLE_CRTC)
714 num_crtc = 1;
715
716 /* allocate crtcs */
728 for (i = 0; i < num_crtc; i++) { 717 for (i = 0; i < num_crtc; i++) {
729 radeon_crtc_init(rdev->ddev, i); 718 radeon_crtc_init(rdev->ddev, i);
730 } 719 }
@@ -764,7 +753,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
764 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 753 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
765 memcpy(&radeon_crtc->native_mode, 754 memcpy(&radeon_crtc->native_mode,
766 &radeon_encoder->native_mode, 755 &radeon_encoder->native_mode,
767 sizeof(struct radeon_native_mode)); 756 sizeof(struct drm_display_mode));
768 first = false; 757 first = false;
769 } else { 758 } else {
770 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { 759 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
@@ -782,10 +771,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
782 if (radeon_crtc->rmx_type != RMX_OFF) { 771 if (radeon_crtc->rmx_type != RMX_OFF) {
783 fixed20_12 a, b; 772 fixed20_12 a, b;
784 a.full = rfixed_const(crtc->mode.vdisplay); 773 a.full = rfixed_const(crtc->mode.vdisplay);
785 b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); 774 b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
786 radeon_crtc->vsc.full = rfixed_div(a, b); 775 radeon_crtc->vsc.full = rfixed_div(a, b);
787 a.full = rfixed_const(crtc->mode.hdisplay); 776 a.full = rfixed_const(crtc->mode.hdisplay);
788 b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); 777 b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
789 radeon_crtc->hsc.full = rfixed_div(a, b); 778 radeon_crtc->hsc.full = rfixed_div(a, b);
790 } else { 779 } else {
791 radeon_crtc->vsc.full = rfixed_const(1); 780 radeon_crtc->vsc.full = rfixed_const(1);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 50fce498910c..7f50fb864af8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -62,9 +62,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
62int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 62int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
63void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 63void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
65int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
66void radeon_master_destroy_kms(struct drm_device *dev,
67 struct drm_master *master);
68int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 65int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
69 struct drm_file *file_priv); 66 struct drm_file *file_priv);
70int radeon_gem_object_init(struct drm_gem_object *obj); 67int radeon_gem_object_init(struct drm_gem_object *obj);
@@ -260,8 +257,6 @@ static struct drm_driver kms_driver = {
260 .get_vblank_counter = radeon_get_vblank_counter_kms, 257 .get_vblank_counter = radeon_get_vblank_counter_kms,
261 .enable_vblank = radeon_enable_vblank_kms, 258 .enable_vblank = radeon_enable_vblank_kms,
262 .disable_vblank = radeon_disable_vblank_kms, 259 .disable_vblank = radeon_disable_vblank_kms,
263 .master_create = radeon_master_create_kms,
264 .master_destroy = radeon_master_destroy_kms,
265#if defined(CONFIG_DEBUG_FS) 260#if defined(CONFIG_DEBUG_FS)
266 .debugfs_init = radeon_debugfs_init, 261 .debugfs_init = radeon_debugfs_init,
267 .debugfs_cleanup = radeon_debugfs_cleanup, 262 .debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 621646752cd2..d42bc512d75a 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -31,6 +31,10 @@
31 31
32extern int atom_debug; 32extern int atom_debug;
33 33
34/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode);
37
34uint32_t 38uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) 39radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
36{ 40{
@@ -167,49 +171,17 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
167 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 171 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
168 struct drm_device *dev = encoder->dev; 172 struct drm_device *dev = encoder->dev;
169 struct radeon_device *rdev = dev->dev_private; 173 struct radeon_device *rdev = dev->dev_private;
170 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; 174 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
171 175
172 if (mode->hdisplay < native_mode->panel_xres || 176 if (mode->hdisplay < native_mode->hdisplay ||
173 mode->vdisplay < native_mode->panel_yres) { 177 mode->vdisplay < native_mode->vdisplay) {
174 if (ASIC_IS_AVIVO(rdev)) { 178 int mode_id = adjusted_mode->base.id;
175 adjusted_mode->hdisplay = native_mode->panel_xres; 179 *adjusted_mode = *native_mode;
176 adjusted_mode->vdisplay = native_mode->panel_yres; 180 if (!ASIC_IS_AVIVO(rdev)) {
177 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank; 181 adjusted_mode->hdisplay = mode->hdisplay;
178 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus; 182 adjusted_mode->vdisplay = mode->vdisplay;
179 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
180 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
181 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
182 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
183 /* update crtc values */
184 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
185 /* adjust crtc values */
186 adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
187 adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
188 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
189 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
190 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
191 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
192 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
193 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
194 } else {
195 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
196 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
197 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
198 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
199 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
200 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
201 /* update crtc values */
202 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
203 /* adjust crtc values */
204 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
205 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
206 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
207 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
208 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
209 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
210 } 183 }
211 adjusted_mode->flags = native_mode->flags; 184 adjusted_mode->base.id = mode_id;
212 adjusted_mode->clock = native_mode->dotclock;
213 } 185 }
214} 186}
215 187
@@ -219,7 +191,11 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
219 struct drm_display_mode *adjusted_mode) 191 struct drm_display_mode *adjusted_mode)
220{ 192{
221 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct drm_device *dev = encoder->dev;
195 struct radeon_device *rdev = dev->dev_private;
222 196
197 /* set the active encoder to connector routing */
198 radeon_encoder_set_active_device(encoder);
223 drm_mode_set_crtcinfo(adjusted_mode, 0); 199 drm_mode_set_crtcinfo(adjusted_mode, 0);
224 200
225 if (radeon_encoder->rmx_type != RMX_OFF) 201 if (radeon_encoder->rmx_type != RMX_OFF)
@@ -230,6 +206,18 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
230 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 206 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
231 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 207 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
232 208
209 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
210 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
211 if (tv_dac) {
212 if (tv_dac->tv_std == TV_STD_NTSC ||
213 tv_dac->tv_std == TV_STD_NTSC_J ||
214 tv_dac->tv_std == TV_STD_PAL_M)
215 radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
216 else
217 radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
218 }
219 }
220
233 return true; 221 return true;
234} 222}
235 223
@@ -461,7 +449,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
461 case 1: 449 case 1:
462 args.v1.ucMisc = 0; 450 args.v1.ucMisc = 0;
463 args.v1.ucAction = action; 451 args.v1.ucAction = action;
464 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) 452 if (drm_detect_hdmi_monitor(radeon_connector->edid))
465 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 453 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
466 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 454 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
467 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 455 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -486,7 +474,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
486 if (dig->coherent_mode) 474 if (dig->coherent_mode)
487 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; 475 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
488 } 476 }
489 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) 477 if (drm_detect_hdmi_monitor(radeon_connector->edid))
490 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 478 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
491 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 479 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
492 args.v2.ucTruncate = 0; 480 args.v2.ucTruncate = 0;
@@ -544,7 +532,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
544 switch (connector->connector_type) { 532 switch (connector->connector_type) {
545 case DRM_MODE_CONNECTOR_DVII: 533 case DRM_MODE_CONNECTOR_DVII:
546 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 534 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
547 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) 535 if (drm_detect_hdmi_monitor(radeon_connector->edid))
548 return ATOM_ENCODER_MODE_HDMI; 536 return ATOM_ENCODER_MODE_HDMI;
549 else if (radeon_connector->use_digital) 537 else if (radeon_connector->use_digital)
550 return ATOM_ENCODER_MODE_DVI; 538 return ATOM_ENCODER_MODE_DVI;
@@ -554,7 +542,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
554 case DRM_MODE_CONNECTOR_DVID: 542 case DRM_MODE_CONNECTOR_DVID:
555 case DRM_MODE_CONNECTOR_HDMIA: 543 case DRM_MODE_CONNECTOR_HDMIA:
556 default: 544 default:
557 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) 545 if (drm_detect_hdmi_monitor(radeon_connector->edid))
558 return ATOM_ENCODER_MODE_HDMI; 546 return ATOM_ENCODER_MODE_HDMI;
559 else 547 else
560 return ATOM_ENCODER_MODE_DVI; 548 return ATOM_ENCODER_MODE_DVI;
@@ -566,7 +554,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
566 /*if (radeon_output->MonType == MT_DP) 554 /*if (radeon_output->MonType == MT_DP)
567 return ATOM_ENCODER_MODE_DP; 555 return ATOM_ENCODER_MODE_DP;
568 else*/ 556 else*/
569 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) 557 if (drm_detect_hdmi_monitor(radeon_connector->edid))
570 return ATOM_ENCODER_MODE_HDMI; 558 return ATOM_ENCODER_MODE_HDMI;
571 else 559 else
572 return ATOM_ENCODER_MODE_DVI; 560 return ATOM_ENCODER_MODE_DVI;
@@ -734,14 +722,17 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
734 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 722 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
735 723
736 args.v1.ucAction = action; 724 args.v1.ucAction = action;
737 725 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
726 args.v1.usInitInfo = radeon_connector->connector_object_id;
727 } else {
728 if (radeon_encoder->pixel_clock > 165000)
729 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
730 else
731 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
732 }
738 if (ASIC_IS_DCE32(rdev)) { 733 if (ASIC_IS_DCE32(rdev)) {
739 if (radeon_encoder->pixel_clock > 165000) { 734 if (radeon_encoder->pixel_clock > 165000)
740 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100); 735 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
741 args.v2.acConfig.fDualLinkConnector = 1;
742 } else {
743 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
744 }
745 if (dig->dig_block) 736 if (dig->dig_block)
746 args.v2.acConfig.ucEncoderSel = 1; 737 args.v2.acConfig.ucEncoderSel = 1;
747 738
@@ -766,7 +757,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
766 } 757 }
767 } else { 758 } else {
768 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 759 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
769 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
770 760
771 switch (radeon_encoder->encoder_id) { 761 switch (radeon_encoder->encoder_id) {
772 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 762 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -874,16 +864,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
874 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 864 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
875 int index = 0; 865 int index = 0;
876 bool is_dig = false; 866 bool is_dig = false;
877 int devices;
878 867
879 memset(&args, 0, sizeof(args)); 868 memset(&args, 0, sizeof(args));
880 869
881 /* on DPMS off we have no idea if active device is meaningful */
882 if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device)
883 devices = radeon_encoder->devices;
884 else
885 devices = radeon_encoder->active_device;
886
887 DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 870 DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
888 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 871 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
889 radeon_encoder->active_device); 872 radeon_encoder->active_device);
@@ -914,18 +897,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
914 break; 897 break;
915 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 898 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
916 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 899 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
917 if (devices & (ATOM_DEVICE_TV_SUPPORT)) 900 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
918 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); 901 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
919 else if (devices & (ATOM_DEVICE_CV_SUPPORT)) 902 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
920 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); 903 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
921 else 904 else
922 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); 905 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
923 break; 906 break;
924 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 907 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
925 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 908 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
926 if (devices & (ATOM_DEVICE_TV_SUPPORT)) 909 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
927 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); 910 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
928 else if (devices & (ATOM_DEVICE_CV_SUPPORT)) 911 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
929 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); 912 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
930 else 913 else
931 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); 914 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
@@ -1104,8 +1087,11 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1104 } 1087 }
1105 1088
1106 /* set scaler clears this on some chips */ 1089 /* set scaler clears this on some chips */
1107 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1090 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
1108 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN); 1091 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1092 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1093 AVIVO_D1MODE_INTERLEAVE_EN);
1094 }
1109} 1095}
1110 1096
1111static void 1097static void
@@ -1153,6 +1139,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1153 1139
1154 /* setup and enable the encoder and transmitter */ 1140 /* setup and enable the encoder and transmitter */
1155 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1141 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1142 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
1156 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); 1143 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
1157 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); 1144 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
1158 break; 1145 break;
@@ -1268,8 +1255,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1268{ 1255{
1269 radeon_atom_output_lock(encoder, true); 1256 radeon_atom_output_lock(encoder, true);
1270 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1257 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1271
1272 radeon_encoder_set_active_device(encoder);
1273} 1258}
1274 1259
1275static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 1260static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1345,6 +1330,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1345void 1330void
1346radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1331radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1347{ 1332{
1333 struct radeon_device *rdev = dev->dev_private;
1348 struct drm_encoder *encoder; 1334 struct drm_encoder *encoder;
1349 struct radeon_encoder *radeon_encoder; 1335 struct radeon_encoder *radeon_encoder;
1350 1336
@@ -1364,7 +1350,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1364 return; 1350 return;
1365 1351
1366 encoder = &radeon_encoder->base; 1352 encoder = &radeon_encoder->base;
1367 encoder->possible_crtcs = 0x3; 1353 if (rdev->flags & RADEON_SINGLE_CRTC)
1354 encoder->possible_crtcs = 0x1;
1355 else
1356 encoder->possible_crtcs = 0x3;
1368 encoder->possible_clones = 0; 1357 encoder->possible_clones = 0;
1369 1358
1370 radeon_encoder->enc_priv = NULL; 1359 radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 944e4fa78db5..b38c4c8e2c61 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = {
55 .fb_imageblit = cfb_imageblit, 55 .fb_imageblit = cfb_imageblit,
56 .fb_pan_display = drm_fb_helper_pan_display, 56 .fb_pan_display = drm_fb_helper_pan_display,
57 .fb_blank = drm_fb_helper_blank, 57 .fb_blank = drm_fb_helper_blank,
58 .fb_setcmap = drm_fb_helper_setcmap,
58}; 59};
59 60
60/** 61/**
@@ -123,11 +124,13 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
123 124
124static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 125static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
125 .gamma_set = radeon_crtc_fb_gamma_set, 126 .gamma_set = radeon_crtc_fb_gamma_set,
127 .gamma_get = radeon_crtc_fb_gamma_get,
126}; 128};
127 129
128int radeonfb_create(struct drm_device *dev, 130int radeonfb_create(struct drm_device *dev,
129 uint32_t fb_width, uint32_t fb_height, 131 uint32_t fb_width, uint32_t fb_height,
130 uint32_t surface_width, uint32_t surface_height, 132 uint32_t surface_width, uint32_t surface_height,
133 uint32_t surface_depth, uint32_t surface_bpp,
131 struct drm_framebuffer **fb_p) 134 struct drm_framebuffer **fb_p)
132{ 135{
133 struct radeon_device *rdev = dev->dev_private; 136 struct radeon_device *rdev = dev->dev_private;
@@ -145,13 +148,19 @@ int radeonfb_create(struct drm_device *dev,
145 unsigned long tmp; 148 unsigned long tmp;
146 bool fb_tiled = false; /* useful for testing */ 149 bool fb_tiled = false; /* useful for testing */
147 u32 tiling_flags = 0; 150 u32 tiling_flags = 0;
151 int crtc_count;
148 152
149 mode_cmd.width = surface_width; 153 mode_cmd.width = surface_width;
150 mode_cmd.height = surface_height; 154 mode_cmd.height = surface_height;
151 mode_cmd.bpp = 32; 155
156 /* avivo can't scanout real 24bpp */
157 if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
158 surface_bpp = 32;
159
160 mode_cmd.bpp = surface_bpp;
152 /* need to align pitch with crtc limits */ 161 /* need to align pitch with crtc limits */
153 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 162 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
154 mode_cmd.depth = 24; 163 mode_cmd.depth = surface_depth;
155 164
156 size = mode_cmd.pitch * mode_cmd.height; 165 size = mode_cmd.pitch * mode_cmd.height;
157 aligned_size = ALIGN(size, PAGE_SIZE); 166 aligned_size = ALIGN(size, PAGE_SIZE);
@@ -216,7 +225,11 @@ int radeonfb_create(struct drm_device *dev,
216 rfbdev = info->par; 225 rfbdev = info->par;
217 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 226 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
218 rfbdev->helper.dev = dev; 227 rfbdev->helper.dev = dev;
219 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, 228 if (rdev->flags & RADEON_SINGLE_CRTC)
229 crtc_count = 1;
230 else
231 crtc_count = 2;
232 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
220 RADEONFB_CONN_LIMIT); 233 RADEONFB_CONN_LIMIT);
221 if (ret) 234 if (ret)
222 goto out_unref; 235 goto out_unref;
@@ -233,7 +246,7 @@ int radeonfb_create(struct drm_device *dev,
233 246
234 strcpy(info->fix.id, "radeondrmfb"); 247 strcpy(info->fix.id, "radeondrmfb");
235 248
236 drm_fb_helper_fill_fix(info, fb->pitch); 249 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
237 250
238 info->flags = FBINFO_DEFAULT; 251 info->flags = FBINFO_DEFAULT;
239 info->fbops = &radeonfb_ops; 252 info->fbops = &radeonfb_ops;
@@ -290,13 +303,26 @@ out:
290 return ret; 303 return ret;
291} 304}
292 305
306static char *mode_option;
307int radeon_parse_options(char *options)
308{
309 char *this_opt;
310
311 if (!options || !*options)
312 return 0;
313
314 while ((this_opt = strsep(&options, ",")) != NULL) {
315 if (!*this_opt)
316 continue;
317 mode_option = this_opt;
318 }
319 return 0;
320}
321
293int radeonfb_probe(struct drm_device *dev) 322int radeonfb_probe(struct drm_device *dev)
294{ 323{
295 int ret; 324 return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
296 ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
297 return ret;
298} 325}
299EXPORT_SYMBOL(radeonfb_probe);
300 326
301int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 327int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
302{ 328{
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a931af065dd4..a68d7566178c 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
140 WARN(1, "trying to unbind memory to unitialized GART !\n"); 140 WARN(1, "trying to unbind memory to unitialized GART !\n");
141 return; 141 return;
142 } 142 }
143 t = offset / 4096; 143 t = offset / RADEON_GPU_PAGE_SIZE;
144 p = t / (PAGE_SIZE / 4096); 144 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
145 for (i = 0; i < pages; i++, p++) { 145 for (i = 0; i < pages; i++, p++) {
146 if (rdev->gart.pages[p]) { 146 if (rdev->gart.pages[p]) {
147 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 147 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
148 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 148 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
149 rdev->gart.pages[p] = NULL; 149 rdev->gart.pages[p] = NULL;
150 rdev->gart.pages_addr[p] = 0; 150 rdev->gart.pages_addr[p] = 0;
151 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { 151 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
152 radeon_gart_set_page(rdev, t, 0); 152 radeon_gart_set_page(rdev, t, 0);
153 } 153 }
154 } 154 }
@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
169 DRM_ERROR("trying to bind memory to unitialized GART !\n"); 169 DRM_ERROR("trying to bind memory to unitialized GART !\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 t = offset / 4096; 172 t = offset / RADEON_GPU_PAGE_SIZE;
173 p = t / (PAGE_SIZE / 4096); 173 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
174 174
175 for (i = 0; i < pages; i++, p++) { 175 for (i = 0; i < pages; i++, p++) {
176 /* we need to support large memory configurations */ 176 /* we need to support large memory configurations */
@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
185 } 185 }
186 rdev->gart.pages[p] = pagelist[i]; 186 rdev->gart.pages[p] = pagelist[i];
187 page_base = rdev->gart.pages_addr[p]; 187 page_base = rdev->gart.pages_addr[p];
188 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { 188 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
189 radeon_gart_set_page(rdev, t, page_base); 189 radeon_gart_set_page(rdev, t, page_base);
190 page_base += 4096; 190 page_base += RADEON_GPU_PAGE_SIZE;
191 } 191 }
192 } 192 }
193 mb(); 193 mb();
@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev)
200 if (rdev->gart.pages) { 200 if (rdev->gart.pages) {
201 return 0; 201 return 0;
202 } 202 }
203 /* We need PAGE_SIZE >= 4096 */ 203 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
204 if (PAGE_SIZE < 4096) { 204 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
205 DRM_ERROR("Page size is smaller than GPU page size!\n"); 205 DRM_ERROR("Page size is smaller than GPU page size!\n");
206 return -EINVAL; 206 return -EINVAL;
207 } 207 }
208 /* Compute table size */ 208 /* Compute table size */
209 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 209 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
210 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; 210 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
211 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 211 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
212 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 212 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
213 /* Allocate pages table */ 213 /* Allocate pages table */
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1841145a7c4f..a0fe6232dcb6 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -83,11 +83,22 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
83int radeon_irq_kms_init(struct radeon_device *rdev) 83int radeon_irq_kms_init(struct radeon_device *rdev)
84{ 84{
85 int r = 0; 85 int r = 0;
86 int num_crtc = 2;
86 87
87 r = drm_vblank_init(rdev->ddev, 2); 88 if (rdev->flags & RADEON_SINGLE_CRTC)
89 num_crtc = 1;
90
91 r = drm_vblank_init(rdev->ddev, num_crtc);
88 if (r) { 92 if (r) {
89 return r; 93 return r;
90 } 94 }
95 /* enable msi */
96 rdev->msi_enabled = 0;
97 if (rdev->family >= CHIP_RV380) {
98 int ret = pci_enable_msi(rdev->pdev);
99 if (!ret)
100 rdev->msi_enabled = 1;
101 }
91 drm_irq_install(rdev->ddev); 102 drm_irq_install(rdev->ddev);
92 rdev->irq.installed = true; 103 rdev->irq.installed = true;
93 DRM_INFO("radeon: irq initialized.\n"); 104 DRM_INFO("radeon: irq initialized.\n");
@@ -99,5 +110,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
99 if (rdev->irq.installed) { 110 if (rdev->irq.installed) {
100 rdev->irq.installed = false; 111 rdev->irq.installed = false;
101 drm_irq_uninstall(rdev->ddev); 112 drm_irq_uninstall(rdev->ddev);
113 if (rdev->msi_enabled)
114 pci_disable_msi(rdev->pdev);
102 } 115 }
103} 116}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 709bd892b3a9..ba128621057a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,55 +201,6 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
201 201
202 202
203/* 203/*
204 * For multiple master (like multiple X).
205 */
206struct drm_radeon_master_private {
207 drm_local_map_t *sarea;
208 drm_radeon_sarea_t *sarea_priv;
209};
210
211int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
212{
213 struct drm_radeon_master_private *master_priv;
214 unsigned long sareapage;
215 int ret;
216
217 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
218 if (master_priv == NULL) {
219 return -ENOMEM;
220 }
221 /* prebuild the SAREA */
222 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
223 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
224 _DRM_CONTAINS_LOCK,
225 &master_priv->sarea);
226 if (ret) {
227 DRM_ERROR("SAREA setup failed\n");
228 return ret;
229 }
230 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
231 master_priv->sarea_priv->pfCurrentPage = 0;
232 master->driver_priv = master_priv;
233 return 0;
234}
235
236void radeon_master_destroy_kms(struct drm_device *dev,
237 struct drm_master *master)
238{
239 struct drm_radeon_master_private *master_priv = master->driver_priv;
240
241 if (master_priv == NULL) {
242 return;
243 }
244 if (master_priv->sarea) {
245 drm_rmmap_locked(dev, master_priv->sarea);
246 }
247 kfree(master_priv);
248 master->driver_priv = NULL;
249}
250
251
252/*
253 * IOCTL. 204 * IOCTL.
254 */ 205 */
255int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 206int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 2b997a15fb1f..8d0b7aa87fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -48,7 +48,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
48 u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; 48 u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
49 u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; 49 u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
50 u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; 50 u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
51 struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; 51 struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
52 52
53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & 53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
54 (RADEON_VERT_STRETCH_RESERVED | 54 (RADEON_VERT_STRETCH_RESERVED |
@@ -95,19 +95,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
95 95
96 fp_horz_vert_active = 0; 96 fp_horz_vert_active = 0;
97 97
98 if (native_mode->panel_xres == 0 || 98 if (native_mode->hdisplay == 0 ||
99 native_mode->panel_yres == 0) { 99 native_mode->vdisplay == 0) {
100 hscale = false; 100 hscale = false;
101 vscale = false; 101 vscale = false;
102 } else { 102 } else {
103 if (xres > native_mode->panel_xres) 103 if (xres > native_mode->hdisplay)
104 xres = native_mode->panel_xres; 104 xres = native_mode->hdisplay;
105 if (yres > native_mode->panel_yres) 105 if (yres > native_mode->vdisplay)
106 yres = native_mode->panel_yres; 106 yres = native_mode->vdisplay;
107 107
108 if (xres == native_mode->panel_xres) 108 if (xres == native_mode->hdisplay)
109 hscale = false; 109 hscale = false;
110 if (yres == native_mode->panel_yres) 110 if (yres == native_mode->vdisplay)
111 vscale = false; 111 vscale = false;
112 } 112 }
113 113
@@ -119,11 +119,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
119 else { 119 else {
120 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; 120 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
121 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) 121 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
122 / native_mode->panel_xres + 1; 122 / native_mode->hdisplay + 1;
123 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | 123 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
124 RADEON_HORZ_STRETCH_BLEND | 124 RADEON_HORZ_STRETCH_BLEND |
125 RADEON_HORZ_STRETCH_ENABLE | 125 RADEON_HORZ_STRETCH_ENABLE |
126 ((native_mode->panel_xres/8-1) << 16)); 126 ((native_mode->hdisplay/8-1) << 16));
127 } 127 }
128 128
129 if (!vscale) 129 if (!vscale)
@@ -131,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
131 else { 131 else {
132 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; 132 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
133 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) 133 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
134 / native_mode->panel_yres + 1; 134 / native_mode->vdisplay + 1;
135 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | 135 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
136 RADEON_VERT_STRETCH_ENABLE | 136 RADEON_VERT_STRETCH_ENABLE |
137 RADEON_VERT_STRETCH_BLEND | 137 RADEON_VERT_STRETCH_BLEND |
138 ((native_mode->panel_yres-1) << 12)); 138 ((native_mode->vdisplay-1) << 12));
139 } 139 }
140 break; 140 break;
141 case RMX_CENTER: 141 case RMX_CENTER:
@@ -175,8 +175,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
175 ? RADEON_CRTC_V_SYNC_POL 175 ? RADEON_CRTC_V_SYNC_POL
176 : 0))); 176 : 0)));
177 177
178 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | 178 fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
179 (((native_mode->panel_xres / 8) & 0x1ff) << 16)); 179 (((native_mode->hdisplay / 8) & 0x1ff) << 16));
180 break; 180 break;
181 case RMX_OFF: 181 case RMX_OFF:
182 default: 182 default:
@@ -532,6 +532,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
532 radeon_fb = to_radeon_framebuffer(old_fb); 532 radeon_fb = to_radeon_framebuffer(old_fb);
533 radeon_gem_object_unpin(radeon_fb->obj); 533 radeon_gem_object_unpin(radeon_fb->obj);
534 } 534 }
535
536 /* Bytes per pixel may have changed */
537 radeon_bandwidth_update(rdev);
538
535 return 0; 539 return 0;
536} 540}
537 541
@@ -664,6 +668,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
664 668
665 WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); 669 WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
666 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 670 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
671
672 WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
673 WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
667 } else { 674 } else {
668 uint32_t crtc_gen_cntl; 675 uint32_t crtc_gen_cntl;
669 uint32_t crtc_ext_cntl; 676 uint32_t crtc_ext_cntl;
@@ -1015,14 +1022,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1015 int x, int y, struct drm_framebuffer *old_fb) 1022 int x, int y, struct drm_framebuffer *old_fb)
1016{ 1023{
1017 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1024 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1018 struct drm_device *dev = crtc->dev;
1019 struct radeon_device *rdev = dev->dev_private;
1020 1025
1021 /* TODO TV */ 1026 /* TODO TV */
1022 radeon_crtc_set_base(crtc, x, y, old_fb); 1027 radeon_crtc_set_base(crtc, x, y, old_fb);
1023 radeon_set_crtc_timing(crtc, adjusted_mode); 1028 radeon_set_crtc_timing(crtc, adjusted_mode);
1024 radeon_set_pll(crtc, adjusted_mode); 1029 radeon_set_pll(crtc, adjusted_mode);
1025 radeon_bandwidth_update(rdev);
1026 if (radeon_crtc->crtc_id == 0) { 1030 if (radeon_crtc->crtc_id == 0) {
1027 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); 1031 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
1028 } else { 1032 } else {
@@ -1053,6 +1057,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1053 .mode_set_base = radeon_crtc_set_base, 1057 .mode_set_base = radeon_crtc_set_base,
1054 .prepare = radeon_crtc_prepare, 1058 .prepare = radeon_crtc_prepare,
1055 .commit = radeon_crtc_commit, 1059 .commit = radeon_crtc_commit,
1060 .load_lut = radeon_crtc_load_lut,
1056}; 1061};
1057 1062
1058 1063
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b1547f700d73..00382122869b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
107 else 107 else
108 radeon_combios_output_lock(encoder, true); 108 radeon_combios_output_lock(encoder, true);
109 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); 109 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
110
111 radeon_encoder_set_active_device(encoder);
112} 110}
113 111
114static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) 112static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
@@ -192,6 +190,8 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
192{ 190{
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 192
193 /* set the active encoder to connector routing */
194 radeon_encoder_set_active_device(encoder);
195 drm_mode_set_crtcinfo(adjusted_mode, 0); 195 drm_mode_set_crtcinfo(adjusted_mode, 0);
196 196
197 if (radeon_encoder->rmx_type != RMX_OFF) 197 if (radeon_encoder->rmx_type != RMX_OFF)
@@ -218,7 +218,8 @@ static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
218 struct drm_display_mode *mode, 218 struct drm_display_mode *mode,
219 struct drm_display_mode *adjusted_mode) 219 struct drm_display_mode *adjusted_mode)
220{ 220{
221 221 /* set the active encoder to connector routing */
222 radeon_encoder_set_active_device(encoder);
222 drm_mode_set_crtcinfo(adjusted_mode, 0); 223 drm_mode_set_crtcinfo(adjusted_mode, 0);
223 224
224 return true; 225 return true;
@@ -272,7 +273,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
272 else 273 else
273 radeon_combios_output_lock(encoder, true); 274 radeon_combios_output_lock(encoder, true);
274 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); 275 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
275 radeon_encoder_set_active_device(encoder);
276} 276}
277 277
278static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) 278static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
@@ -468,7 +468,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
468 else 468 else
469 radeon_combios_output_lock(encoder, true); 469 radeon_combios_output_lock(encoder, true);
470 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); 470 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
471 radeon_encoder_set_active_device(encoder);
472} 471}
473 472
474static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) 473static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
@@ -543,6 +542,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
543 542
544 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); 543 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
545 544
545 fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
546 RADEON_FP_DFP_SYNC_SEL |
547 RADEON_FP_CRT_SYNC_SEL |
548 RADEON_FP_CRTC_LOCK_8DOT |
549 RADEON_FP_USE_SHADOW_EN |
550 RADEON_FP_CRTC_USE_SHADOW_VEND |
551 RADEON_FP_CRT_SYNC_ALT);
552
546 if (1) /* FIXME rgbBits == 8 */ 553 if (1) /* FIXME rgbBits == 8 */
547 fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ 554 fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
548 else 555 else
@@ -556,7 +563,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
556 else 563 else
557 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; 564 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
558 } else 565 } else
559 fp_gen_cntl |= RADEON_FP_SEL_CRTC1; 566 fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
560 } else { 567 } else {
561 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { 568 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
562 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; 569 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
@@ -593,7 +600,8 @@ static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
593 struct drm_display_mode *mode, 600 struct drm_display_mode *mode,
594 struct drm_display_mode *adjusted_mode) 601 struct drm_display_mode *adjusted_mode)
595{ 602{
596 603 /* set the active encoder to connector routing */
604 radeon_encoder_set_active_device(encoder);
597 drm_mode_set_crtcinfo(adjusted_mode, 0); 605 drm_mode_set_crtcinfo(adjusted_mode, 0);
598 606
599 return true; 607 return true;
@@ -636,7 +644,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
636 else 644 else
637 radeon_combios_output_lock(encoder, true); 645 radeon_combios_output_lock(encoder, true);
638 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); 646 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
639 radeon_encoder_set_active_device(encoder);
640} 647}
641 648
642static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) 649static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
@@ -735,7 +742,8 @@ static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
735 struct drm_display_mode *mode, 742 struct drm_display_mode *mode,
736 struct drm_display_mode *adjusted_mode) 743 struct drm_display_mode *adjusted_mode)
737{ 744{
738 745 /* set the active encoder to connector routing */
746 radeon_encoder_set_active_device(encoder);
739 drm_mode_set_crtcinfo(adjusted_mode, 0); 747 drm_mode_set_crtcinfo(adjusted_mode, 0);
740 748
741 return true; 749 return true;
@@ -839,7 +847,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
839 else 847 else
840 radeon_combios_output_lock(encoder, true); 848 radeon_combios_output_lock(encoder, true);
841 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); 849 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
842 radeon_encoder_set_active_device(encoder);
843} 850}
844 851
845static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) 852static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
@@ -881,7 +888,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
881 R420_TV_DAC_DACADJ_MASK | 888 R420_TV_DAC_DACADJ_MASK |
882 R420_TV_DAC_RDACPD | 889 R420_TV_DAC_RDACPD |
883 R420_TV_DAC_GDACPD | 890 R420_TV_DAC_GDACPD |
884 R420_TV_DAC_GDACPD | 891 R420_TV_DAC_BDACPD |
885 R420_TV_DAC_TVENABLE); 892 R420_TV_DAC_TVENABLE);
886 } else { 893 } else {
887 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 894 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
@@ -889,7 +896,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
889 RADEON_TV_DAC_DACADJ_MASK | 896 RADEON_TV_DAC_DACADJ_MASK |
890 RADEON_TV_DAC_RDACPD | 897 RADEON_TV_DAC_RDACPD |
891 RADEON_TV_DAC_GDACPD | 898 RADEON_TV_DAC_GDACPD |
892 RADEON_TV_DAC_GDACPD); 899 RADEON_TV_DAC_BDACPD);
893 } 900 }
894 901
895 /* FIXME TV */ 902 /* FIXME TV */
@@ -1318,7 +1325,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1318 return; 1325 return;
1319 1326
1320 encoder = &radeon_encoder->base; 1327 encoder = &radeon_encoder->base;
1321 encoder->possible_crtcs = 0x3; 1328 if (rdev->flags & RADEON_SINGLE_CRTC)
1329 encoder->possible_crtcs = 0x1;
1330 else
1331 encoder->possible_crtcs = 0x3;
1322 encoder->possible_clones = 0; 1332 encoder->possible_clones = 0;
1323 1333
1324 radeon_encoder->enc_priv = NULL; 1334 radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 570a58729daf..ace726aa0d76 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -172,6 +172,7 @@ enum radeon_connector_table {
172 172
173struct radeon_mode_info { 173struct radeon_mode_info {
174 struct atom_context *atom_context; 174 struct atom_context *atom_context;
175 struct card_info *atom_card_info;
175 enum radeon_connector_table connector_table; 176 enum radeon_connector_table connector_table;
176 bool mode_config_initialized; 177 bool mode_config_initialized;
177 struct radeon_crtc *crtcs[2]; 178 struct radeon_crtc *crtcs[2];
@@ -186,17 +187,6 @@ struct radeon_mode_info {
186 187
187}; 188};
188 189
189struct radeon_native_mode {
190 /* preferred mode */
191 uint32_t panel_xres, panel_yres;
192 uint32_t hoverplus, hsync_width;
193 uint32_t hblank;
194 uint32_t voverplus, vsync_width;
195 uint32_t vblank;
196 uint32_t dotclock;
197 uint32_t flags;
198};
199
200#define MAX_H_CODE_TIMING_LEN 32 190#define MAX_H_CODE_TIMING_LEN 32
201#define MAX_V_CODE_TIMING_LEN 32 191#define MAX_V_CODE_TIMING_LEN 32
202 192
@@ -228,7 +218,7 @@ struct radeon_crtc {
228 enum radeon_rmx_type rmx_type; 218 enum radeon_rmx_type rmx_type;
229 fixed20_12 vsc; 219 fixed20_12 vsc;
230 fixed20_12 hsc; 220 fixed20_12 hsc;
231 struct radeon_native_mode native_mode; 221 struct drm_display_mode native_mode;
232}; 222};
233 223
234struct radeon_encoder_primary_dac { 224struct radeon_encoder_primary_dac {
@@ -248,7 +238,7 @@ struct radeon_encoder_lvds {
248 bool use_bios_dividers; 238 bool use_bios_dividers;
249 uint32_t lvds_gen_cntl; 239 uint32_t lvds_gen_cntl;
250 /* panel mode */ 240 /* panel mode */
251 struct radeon_native_mode native_mode; 241 struct drm_display_mode native_mode;
252}; 242};
253 243
254struct radeon_encoder_tv_dac { 244struct radeon_encoder_tv_dac {
@@ -271,6 +261,16 @@ struct radeon_encoder_int_tmds {
271 struct radeon_tmds_pll tmds_pll[4]; 261 struct radeon_tmds_pll tmds_pll[4];
272}; 262};
273 263
264/* spread spectrum */
265struct radeon_atom_ss {
266 uint16_t percentage;
267 uint8_t type;
268 uint8_t step;
269 uint8_t delay;
270 uint8_t range;
271 uint8_t refdiv;
272};
273
274struct radeon_encoder_atom_dig { 274struct radeon_encoder_atom_dig {
275 /* atom dig */ 275 /* atom dig */
276 bool coherent_mode; 276 bool coherent_mode;
@@ -278,8 +278,9 @@ struct radeon_encoder_atom_dig {
278 /* atom lvds */ 278 /* atom lvds */
279 uint32_t lvds_misc; 279 uint32_t lvds_misc;
280 uint16_t panel_pwr_delay; 280 uint16_t panel_pwr_delay;
281 struct radeon_atom_ss *ss;
281 /* panel mode */ 282 /* panel mode */
282 struct radeon_native_mode native_mode; 283 struct drm_display_mode native_mode;
283}; 284};
284 285
285struct radeon_encoder_atom_dac { 286struct radeon_encoder_atom_dac {
@@ -294,7 +295,7 @@ struct radeon_encoder {
294 uint32_t flags; 295 uint32_t flags;
295 uint32_t pixel_clock; 296 uint32_t pixel_clock;
296 enum radeon_rmx_type rmx_type; 297 enum radeon_rmx_type rmx_type;
297 struct radeon_native_mode native_mode; 298 struct drm_display_mode native_mode;
298 void *enc_priv; 299 void *enc_priv;
299}; 300};
300 301
@@ -308,12 +309,15 @@ struct radeon_connector {
308 uint32_t connector_id; 309 uint32_t connector_id;
309 uint32_t devices; 310 uint32_t devices;
310 struct radeon_i2c_chan *ddc_bus; 311 struct radeon_i2c_chan *ddc_bus;
312 /* some systems have a an hdmi and vga port with a shared ddc line */
313 bool shared_ddc;
311 bool use_digital; 314 bool use_digital;
312 /* we need to mind the EDID between detect 315 /* we need to mind the EDID between detect
313 and get modes due to analog/digital/tvencoder */ 316 and get modes due to analog/digital/tvencoder */
314 struct edid *edid; 317 struct edid *edid;
315 void *con_priv; 318 void *con_priv;
316 bool dac_load_detect; 319 bool dac_load_detect;
320 uint16_t connector_object_id;
317}; 321};
318 322
319struct radeon_framebuffer { 323struct radeon_framebuffer {
@@ -407,6 +411,8 @@ extern void
407radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); 411radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
408extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 412extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
409 u16 blue, int regno); 413 u16 blue, int regno);
414extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
415 u16 *blue, int regno);
410struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, 416struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
411 struct drm_mode_fb_cmd *mode_cmd, 417 struct drm_mode_fb_cmd *mode_cmd,
412 struct drm_gem_object *obj); 418 struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 73af463b7a59..1f056dadc5c2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
400int radeon_object_list_reserve(struct list_head *head) 400int radeon_object_list_reserve(struct list_head *head)
401{ 401{
402 struct radeon_object_list *lobj; 402 struct radeon_object_list *lobj;
403 struct list_head *i;
404 int r; 403 int r;
405 404
406 list_for_each(i, head) { 405 list_for_each_entry(lobj, head, list){
407 lobj = list_entry(i, struct radeon_object_list, list);
408 if (!lobj->robj->pin_count) { 406 if (!lobj->robj->pin_count) {
409 r = radeon_object_reserve(lobj->robj, true); 407 r = radeon_object_reserve(lobj->robj, true);
410 if (unlikely(r != 0)) { 408 if (unlikely(r != 0)) {
@@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head)
420void radeon_object_list_unreserve(struct list_head *head) 418void radeon_object_list_unreserve(struct list_head *head)
421{ 419{
422 struct radeon_object_list *lobj; 420 struct radeon_object_list *lobj;
423 struct list_head *i;
424 421
425 list_for_each(i, head) { 422 list_for_each_entry(lobj, head, list) {
426 lobj = list_entry(i, struct radeon_object_list, list);
427 if (!lobj->robj->pin_count) { 423 if (!lobj->robj->pin_count) {
428 radeon_object_unreserve(lobj->robj); 424 radeon_object_unreserve(lobj->robj);
429 } else {
430 } 425 }
431 } 426 }
432} 427}
@@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
436 struct radeon_object_list *lobj; 431 struct radeon_object_list *lobj;
437 struct radeon_object *robj; 432 struct radeon_object *robj;
438 struct radeon_fence *old_fence = NULL; 433 struct radeon_fence *old_fence = NULL;
439 struct list_head *i;
440 int r; 434 int r;
441 435
442 r = radeon_object_list_reserve(head); 436 r = radeon_object_list_reserve(head);
@@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
444 radeon_object_list_unreserve(head); 438 radeon_object_list_unreserve(head);
445 return r; 439 return r;
446 } 440 }
447 list_for_each(i, head) { 441 list_for_each_entry(lobj, head, list) {
448 lobj = list_entry(i, struct radeon_object_list, list);
449 robj = lobj->robj; 442 robj = lobj->robj;
450 if (!robj->pin_count) { 443 if (!robj->pin_count) {
451 if (lobj->wdomain) { 444 if (lobj->wdomain) {
@@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head)
482{ 475{
483 struct radeon_object_list *lobj; 476 struct radeon_object_list *lobj;
484 struct radeon_fence *old_fence = NULL; 477 struct radeon_fence *old_fence = NULL;
485 struct list_head *i;
486 478
487 list_for_each(i, head) { 479 list_for_each_entry(lobj, head, list) {
488 lobj = list_entry(i, struct radeon_object_list, list);
489 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 480 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
490 lobj->robj->tobj.sync_obj = NULL; 481 lobj->robj->tobj.sync_obj = NULL;
491 if (old_fence) { 482 if (old_fence) {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 000000000000..46146c6a2a06
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,65 @@
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 */
22#include "drmP.h"
23#include "radeon.h"
24
25int radeon_debugfs_pm_init(struct radeon_device *rdev);
26
27int radeon_pm_init(struct radeon_device *rdev)
28{
29 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for CP !\n");
31 }
32
33 return 0;
34}
35
36/*
37 * Debugfs info
38 */
39#if defined(CONFIG_DEBUG_FS)
40
41static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
42{
43 struct drm_info_node *node = (struct drm_info_node *) m->private;
44 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private;
46
47 seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
48 seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
49
50 return 0;
51}
52
53static struct drm_info_list radeon_pm_info_list[] = {
54 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
55};
56#endif
57
58int radeon_debugfs_pm_init(struct radeon_device *rdev)
59{
60#if defined(CONFIG_DEBUG_FS)
61 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
62#else
63 return 0;
64#endif
65}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 21da871a793c..29ab75903ec1 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -290,6 +290,8 @@
290#define RADEON_BUS_CNTL 0x0030 290#define RADEON_BUS_CNTL 0x0030
291# define RADEON_BUS_MASTER_DIS (1 << 6) 291# define RADEON_BUS_MASTER_DIS (1 << 6)
292# define RADEON_BUS_BIOS_DIS_ROM (1 << 12) 292# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
293# define RS600_BUS_MASTER_DIS (1 << 14)
294# define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */
293# define RADEON_BUS_RD_DISCARD_EN (1 << 24) 295# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
294# define RADEON_BUS_RD_ABORT_EN (1 << 25) 296# define RADEON_BUS_RD_ABORT_EN (1 << 25)
295# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) 297# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
@@ -297,6 +299,9 @@
297# define RADEON_BUS_READ_BURST (1 << 30) 299# define RADEON_BUS_READ_BURST (1 << 30)
298#define RADEON_BUS_CNTL1 0x0034 300#define RADEON_BUS_CNTL1 0x0034
299# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) 301# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
302/* rv370/rv380, rv410, r423/r430/r480, r5xx */
303#define RADEON_MSI_REARM_EN 0x0160
304# define RV370_MSI_REARM_EN (1 << 0)
300 305
301/* #define RADEON_PCIE_INDEX 0x0030 */ 306/* #define RADEON_PCIE_INDEX 0x0030 */
302/* #define RADEON_PCIE_DATA 0x0034 */ 307/* #define RADEON_PCIE_DATA 0x0034 */
@@ -3311,6 +3316,7 @@
3311#define RADEON_AIC_CNTL 0x01d0 3316#define RADEON_AIC_CNTL 0x01d0
3312# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) 3317# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
3313# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) 3318# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
3319# define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */
3314#define RADEON_AIC_LO_ADDR 0x01dc 3320#define RADEON_AIC_LO_ADDR 0x01dc
3315#define RADEON_AIC_PT_BASE 0x01d8 3321#define RADEON_AIC_PT_BASE 0x01d8
3316#define RADEON_AIC_HI_ADDR 0x01e0 3322#define RADEON_AIC_HI_ADDR 0x01e0
@@ -3333,6 +3339,7 @@
3333# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) 3339# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
3334# define RADEON_CP_PACKET0_REG_MASK 0x000007ff 3340# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
3335# define R300_CP_PACKET0_REG_MASK 0x00001fff 3341# define R300_CP_PACKET0_REG_MASK 0x00001fff
3342# define R600_CP_PACKET0_REG_MASK 0x0000ffff
3336# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff 3343# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
3337# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 3344# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
3338 3345
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 03c33cf4e14c..f8a465d9a1cf 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev)
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffer) / test size
44 */ 44 */
45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - 45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
46 rdev->cp.ring_size) / size; 46 rdev->cp.ring_size) / size;
47 47
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev)
102 goto out_cleanup; 102 goto out_cleanup;
103 } 103 }
104 104
105 r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); 105 r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
106 if (r) { 106 if (r) {
107 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 107 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
108 goto out_cleanup; 108 goto out_cleanup;
@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev)
145 goto out_cleanup; 145 goto out_cleanup;
146 } 146 }
147 147
148 r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); 148 r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
149 if (r) { 149 if (r) {
150 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 150 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
151 goto out_cleanup; 151 goto out_cleanup;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index acd889c94549..1381e06d6af3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
295 if (unlikely(r)) { 295 if (unlikely(r)) {
296 return r; 296 return r;
297 } 297 }
298
299 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
300 if (unlikely(r)) {
301 goto out_cleanup;
302 }
303
298 r = ttm_tt_bind(bo->ttm, &tmp_mem); 304 r = ttm_tt_bind(bo->ttm, &tmp_mem);
299 if (unlikely(r)) { 305 if (unlikely(r)) {
300 goto out_cleanup; 306 goto out_cleanup;
@@ -530,7 +536,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
530} 536}
531 537
532static struct vm_operations_struct radeon_ttm_vm_ops; 538static struct vm_operations_struct radeon_ttm_vm_ops;
533static struct vm_operations_struct *ttm_vm_ops = NULL; 539static const struct vm_operations_struct *ttm_vm_ops = NULL;
534 540
535static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 541static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
536{ 542{
@@ -689,9 +695,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
689 695
690#define RADEON_DEBUGFS_MEM_TYPES 2 696#define RADEON_DEBUGFS_MEM_TYPES 2
691 697
692static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
693static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
694
695#if defined(CONFIG_DEBUG_FS) 698#if defined(CONFIG_DEBUG_FS)
696static int radeon_mm_dump_table(struct seq_file *m, void *data) 699static int radeon_mm_dump_table(struct seq_file *m, void *data)
697{ 700{
@@ -711,9 +714,11 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
711 714
712static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 715static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
713{ 716{
717#if defined(CONFIG_DEBUG_FS)
718 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
719 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
714 unsigned i; 720 unsigned i;
715 721
716#if defined(CONFIG_DEBUG_FS)
717 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 722 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
718 if (i == 0) 723 if (i == 0)
719 sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); 724 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 000000000000..48a913a06cfd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS100D_H__
29#define __RS100D_H__
30
31/* Registers */
32#define R_00015C_NB_TOM 0x00015C
33#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
34#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
35#define C_00015C_MC_FB_START 0xFFFF0000
36#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
37#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
38#define C_00015C_MC_FB_TOP 0x0000FFFF
39
40#endif
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a3fbdad938c7..ca037160a582 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -27,27 +27,12 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "radeon_reg.h"
31#include "radeon.h" 30#include "radeon.h"
31#include "rs400d.h"
32 32
33/* rs400,rs480 depends on : */ 33/* This files gather functions specifics to : rs400,rs480 */
34void r100_hdp_reset(struct radeon_device *rdev); 34static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
35void r100_mc_disable_clients(struct radeon_device *rdev);
36int r300_mc_wait_for_idle(struct radeon_device *rdev);
37void r420_pipes_init(struct radeon_device *rdev);
38 35
39/* This files gather functions specifics to :
40 * rs400,rs480
41 *
42 * Some of these functions might be used by newer ASICs.
43 */
44void rs400_gpu_init(struct radeon_device *rdev);
45int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
46
47
48/*
49 * GART functions.
50 */
51void rs400_gart_adjust_size(struct radeon_device *rdev) 36void rs400_gart_adjust_size(struct radeon_device *rdev)
52{ 37{
53 /* Check gart size */ 38 /* Check gart size */
@@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
238 return 0; 223 return 0;
239} 224}
240 225
241
242/*
243 * MC functions.
244 */
245int rs400_mc_init(struct radeon_device *rdev)
246{
247 uint32_t tmp;
248 int r;
249
250 if (r100_debugfs_rbbm_init(rdev)) {
251 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
252 }
253
254 rs400_gpu_init(rdev);
255 rs400_gart_disable(rdev);
256 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
257 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
258 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
259 r = radeon_mc_setup(rdev);
260 if (r) {
261 return r;
262 }
263
264 r100_mc_disable_clients(rdev);
265 if (r300_mc_wait_for_idle(rdev)) {
266 printk(KERN_WARNING "Failed to wait MC idle while "
267 "programming pipes. Bad things might happen.\n");
268 }
269
270 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
271 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
272 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
273 WREG32(RADEON_MC_FB_LOCATION, tmp);
274 tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
275 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
276 (void)RREG32(RADEON_HOST_PATH_CNTL);
277 WREG32(RADEON_HOST_PATH_CNTL, tmp);
278 (void)RREG32(RADEON_HOST_PATH_CNTL);
279
280 return 0;
281}
282
283void rs400_mc_fini(struct radeon_device *rdev)
284{
285}
286
287
288/*
289 * Global GPU functions
290 */
291void rs400_errata(struct radeon_device *rdev)
292{
293 rdev->pll_errata = 0;
294}
295
296void rs400_gpu_init(struct radeon_device *rdev) 226void rs400_gpu_init(struct radeon_device *rdev)
297{ 227{
298 /* FIXME: HDP same place on rs400 ? */ 228 /* FIXME: HDP same place on rs400 ? */
@@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev)
305 } 235 }
306} 236}
307 237
308
309/*
310 * VRAM info.
311 */
312void rs400_vram_info(struct radeon_device *rdev) 238void rs400_vram_info(struct radeon_device *rdev)
313{ 239{
314 rs400_gart_adjust_size(rdev); 240 rs400_gart_adjust_size(rdev);
@@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev)
319 r100_vram_init_sizes(rdev); 245 r100_vram_init_sizes(rdev);
320} 246}
321 247
322
323/*
324 * Indirect registers accessor
325 */
326uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 248uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
327{ 249{
328 uint32_t r; 250 uint32_t r;
@@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
340 WREG32(RS480_NB_MC_INDEX, 0xff); 262 WREG32(RS480_NB_MC_INDEX, 0xff);
341} 263}
342 264
343
344/*
345 * Debugfs info
346 */
347#if defined(CONFIG_DEBUG_FS) 265#if defined(CONFIG_DEBUG_FS)
348static int rs400_debugfs_gart_info(struct seq_file *m, void *data) 266static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
349{ 267{
@@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = {
419}; 337};
420#endif 338#endif
421 339
422int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 340static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
423{ 341{
424#if defined(CONFIG_DEBUG_FS) 342#if defined(CONFIG_DEBUG_FS)
425 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); 343 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
@@ -427,3 +345,190 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
427 return 0; 345 return 0;
428#endif 346#endif
429} 347}
348
349static int rs400_mc_init(struct radeon_device *rdev)
350{
351 int r;
352 u32 tmp;
353
354 /* Setup GPU memory space */
355 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev);
359 if (r)
360 return r;
361 return 0;
362}
363
364void rs400_mc_program(struct radeon_device *rdev)
365{
366 struct r100_mc_save save;
367
368 /* Stops all mc clients */
369 r100_mc_stop(rdev, &save);
370
371 /* Wait for mc idle */
372 if (r300_mc_wait_for_idle(rdev))
373 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
374 WREG32(R_000148_MC_FB_LOCATION,
375 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
376 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
377
378 r100_mc_resume(rdev, &save);
379}
380
381static int rs400_startup(struct radeon_device *rdev)
382{
383 int r;
384
385 rs400_mc_program(rdev);
386 /* Resume clock */
387 r300_clock_startup(rdev);
388 /* Initialize GPU configuration (# pipes, ...) */
389 rs400_gpu_init(rdev);
390 /* Initialize GART (initialize after TTM so we can allocate
391 * memory through TTM but finalize after TTM) */
392 r = rs400_gart_enable(rdev);
393 if (r)
394 return r;
395 /* Enable IRQ */
396 rdev->irq.sw_int = true;
397 r100_irq_set(rdev);
398 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024);
400 if (r) {
401 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
402 return r;
403 }
404 r = r100_wb_init(rdev);
405 if (r)
406 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
407 r = r100_ib_init(rdev);
408 if (r) {
409 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
410 return r;
411 }
412 return 0;
413}
414
415int rs400_resume(struct radeon_device *rdev)
416{
417 /* Make sur GART are not working */
418 rs400_gart_disable(rdev);
419 /* Resume clock before doing reset */
420 r300_clock_startup(rdev);
421 /* setup MC before calling post tables */
422 rs400_mc_program(rdev);
423 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
424 if (radeon_gpu_reset(rdev)) {
425 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
426 RREG32(R_000E40_RBBM_STATUS),
427 RREG32(R_0007C0_CP_STAT));
428 }
429 /* post */
430 radeon_combios_asic_init(rdev->ddev);
431 /* Resume clock after posting */
432 r300_clock_startup(rdev);
433 return rs400_startup(rdev);
434}
435
436int rs400_suspend(struct radeon_device *rdev)
437{
438 r100_cp_disable(rdev);
439 r100_wb_disable(rdev);
440 r100_irq_disable(rdev);
441 rs400_gart_disable(rdev);
442 return 0;
443}
444
445void rs400_fini(struct radeon_device *rdev)
446{
447 rs400_suspend(rdev);
448 r100_cp_fini(rdev);
449 r100_wb_fini(rdev);
450 r100_ib_fini(rdev);
451 radeon_gem_fini(rdev);
452 rs400_gart_fini(rdev);
453 radeon_irq_kms_fini(rdev);
454 radeon_fence_driver_fini(rdev);
455 radeon_object_fini(rdev);
456 radeon_atombios_fini(rdev);
457 kfree(rdev->bios);
458 rdev->bios = NULL;
459}
460
461int rs400_init(struct radeon_device *rdev)
462{
463 int r;
464
465 /* Disable VGA */
466 r100_vga_render_disable(rdev);
467 /* Initialize scratch registers */
468 radeon_scratch_init(rdev);
469 /* Initialize surface registers */
470 radeon_surface_init(rdev);
471 /* TODO: disable VGA need to use VGA request */
472 /* BIOS*/
473 if (!radeon_get_bios(rdev)) {
474 if (ASIC_IS_AVIVO(rdev))
475 return -EINVAL;
476 }
477 if (rdev->is_atom_bios) {
478 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
479 return -EINVAL;
480 } else {
481 r = radeon_combios_init(rdev);
482 if (r)
483 return r;
484 }
485 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
486 if (radeon_gpu_reset(rdev)) {
487 dev_warn(rdev->dev,
488 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
489 RREG32(R_000E40_RBBM_STATUS),
490 RREG32(R_0007C0_CP_STAT));
491 }
492 /* check if cards are posted or not */
493 if (!radeon_card_posted(rdev) && rdev->bios) {
494 DRM_INFO("GPU not posted. posting now...\n");
495 radeon_combios_asic_init(rdev->ddev);
496 }
497 /* Initialize clocks */
498 radeon_get_clock_info(rdev->ddev);
499 /* Get vram informations */
500 rs400_vram_info(rdev);
501 /* Initialize memory controller (also test AGP) */
502 r = rs400_mc_init(rdev);
503 if (r)
504 return r;
505 /* Fence driver */
506 r = radeon_fence_driver_init(rdev);
507 if (r)
508 return r;
509 r = radeon_irq_kms_init(rdev);
510 if (r)
511 return r;
512 /* Memory manager */
513 r = radeon_object_init(rdev);
514 if (r)
515 return r;
516 r = rs400_gart_init(rdev);
517 if (r)
518 return r;
519 r300_set_reg_safe(rdev);
520 rdev->accel_working = true;
521 r = rs400_startup(rdev);
522 if (r) {
523 /* Somethings want wront with the accel init stop accel */
524 dev_err(rdev->dev, "Disabling GPU acceleration\n");
525 rs400_suspend(rdev);
526 r100_cp_fini(rdev);
527 r100_wb_fini(rdev);
528 r100_ib_fini(rdev);
529 rs400_gart_fini(rdev);
530 radeon_irq_kms_fini(rdev);
531 rdev->accel_working = false;
532 }
533 return 0;
534}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 000000000000..6d8bac58ced9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS400D_H__
29#define __RS400D_H__
30
31/* Registers */
32#define R_000148_MC_FB_LOCATION 0x000148
33#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
34#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
35#define C_000148_MC_FB_START 0xFFFF0000
36#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
37#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
38#define C_000148_MC_FB_TOP 0x0000FFFF
39#define R_00015C_NB_TOM 0x00015C
40#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
41#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
42#define C_00015C_MC_FB_START 0xFFFF0000
43#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
44#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
45#define C_00015C_MC_FB_TOP 0x0000FFFF
46#define R_0007C0_CP_STAT 0x0007C0
47#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
48#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
49#define C_0007C0_MRU_BUSY 0xFFFFFFFE
50#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
51#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
52#define C_0007C0_MWU_BUSY 0xFFFFFFFD
53#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
54#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
55#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
56#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
57#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
58#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
59#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
60#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
61#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
62#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
63#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
64#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
65#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
66#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
67#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
68#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
69#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
70#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
71#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
72#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
73#define C_0007C0_CSI_BUSY 0xFFFFDFFF
74#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
75#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
76#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
77#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
78#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
79#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
80#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
81#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
82#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
83#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
84#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
85#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
86#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
87#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
88#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
89#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
90#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
91#define C_0007C0_CP_BUSY 0x7FFFFFFF
92#define R_000E40_RBBM_STATUS 0x000E40
93#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
94#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
95#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
96#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
97#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
98#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
99#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
100#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
101#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
102#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
103#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
104#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
105#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
106#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
107#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
108#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
109#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
110#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
111#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
112#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
113#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
114#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
115#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
116#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
117#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
118#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
119#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
120#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
121#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
122#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
123#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
124#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
125#define C_000E40_E2_BUSY 0xFFFDFFFF
126#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
127#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
128#define C_000E40_RB2D_BUSY 0xFFFBFFFF
129#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
130#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
131#define C_000E40_RB3D_BUSY 0xFFF7FFFF
132#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
133#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
134#define C_000E40_VAP_BUSY 0xFFEFFFFF
135#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
136#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
137#define C_000E40_RE_BUSY 0xFFDFFFFF
138#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
139#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
140#define C_000E40_TAM_BUSY 0xFFBFFFFF
141#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
142#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
143#define C_000E40_TDM_BUSY 0xFF7FFFFF
144#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
145#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
146#define C_000E40_PB_BUSY 0xFEFFFFFF
147#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
148#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
149#define C_000E40_TIM_BUSY 0xFDFFFFFF
150#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
151#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
152#define C_000E40_GA_BUSY 0xFBFFFFFF
153#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
154#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
155#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
156#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
157#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
158#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
159
160#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 0e791e26def3..5f117cd8736a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -25,28 +25,25 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
28#include "drmP.h" 38#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 39#include "radeon.h"
31#include "avivod.h" 40#include "atom.h"
41#include "rs600d.h"
32 42
33#include "rs600_reg_safe.h" 43#include "rs600_reg_safe.h"
34 44
35/* rs600 depends on : */
36void r100_hdp_reset(struct radeon_device *rdev);
37int r100_gui_wait_for_idle(struct radeon_device *rdev);
38int r300_mc_wait_for_idle(struct radeon_device *rdev);
39void r420_pipes_init(struct radeon_device *rdev);
40
41/* This files gather functions specifics to :
42 * rs600
43 *
44 * Some of these functions might be used by newer ASICs.
45 */
46void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48void rs600_disable_vga(struct radeon_device *rdev);
49
50 47
51/* 48/*
52 * GART. 49 * GART.
@@ -55,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
55{ 52{
56 uint32_t tmp; 53 uint32_t tmp;
57 54
58 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 55 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
59 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 56 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
60 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 57 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
61 58
62 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 59 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
63 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; 60 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
64 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 61 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
65 62
66 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 63 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
67 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 64 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
68 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 65 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
69 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 66 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
70} 67}
71 68
72int rs600_gart_init(struct radeon_device *rdev) 69int rs600_gart_init(struct radeon_device *rdev)
@@ -88,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
88 85
89int rs600_gart_enable(struct radeon_device *rdev) 86int rs600_gart_enable(struct radeon_device *rdev)
90{ 87{
91 uint32_t tmp; 88 u32 tmp;
92 int r, i; 89 int r, i;
93 90
94 if (rdev->gart.table.vram.robj == NULL) { 91 if (rdev->gart.table.vram.robj == NULL) {
@@ -98,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
98 r = radeon_gart_table_vram_pin(rdev); 95 r = radeon_gart_table_vram_pin(rdev);
99 if (r) 96 if (r)
100 return r; 97 return r;
98 /* Enable bus master */
99 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
100 WREG32(R_00004C_BUS_CNTL, tmp);
101 /* FIXME: setup default page */ 101 /* FIXME: setup default page */
102 WREG32_MC(RS600_MC_PT0_CNTL, 102 WREG32_MC(R_000100_MC_PT0_CNTL,
103 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | 103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
104 RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); 104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
105 for (i = 0; i < 19; i++) { 105 for (i = 0; i < 19; i++) {
106 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, 106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
107 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | 107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
108 RS600_SYSTEM_ACCESS_MODE_IN_SYS | 108 S_00016C_SYSTEM_ACCESS_MODE_MASK(
109 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | 109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
110 RS600_EFFECTIVE_L1_CACHE_SIZE(3) | 110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
111 RS600_ENABLE_FRAGMENT_PROCESSING | 111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
112 RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); 112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
113 } 115 }
114 116
115 /* System context map to GART space */ 117 /* System context map to GART space */
116 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); 118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
117 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
118 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
119 120
120 /* enable first context */ 121 /* enable first context */
121 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); 122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
122 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
123 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); 124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
124 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, 125 S_000102_ENABLE_PAGE_TABLE(1) |
125 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); 126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
126 /* disable all other contexts */ 127 /* disable all other contexts */
127 for (i = 1; i < 8; i++) { 128 for (i = 1; i < 8; i++) {
128 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); 129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
129 } 130 }
130 131
131 /* setup the page table */ 132 /* setup the page table */
132 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
133 rdev->gart.table_addr); 134 rdev->gart.table_addr);
134 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
135 136
136 /* enable page tables */ 137 /* enable page tables */
137 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
138 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); 139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
139 tmp = RREG32_MC(RS600_MC_CNTL1); 140 tmp = RREG32_MC(R_000009_MC_CNTL1);
140 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); 141 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
141 rs600_gart_tlb_flush(rdev); 142 rs600_gart_tlb_flush(rdev);
142 rdev->gart.ready = true; 143 rdev->gart.ready = true;
143 return 0; 144 return 0;
@@ -148,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
148 uint32_t tmp; 149 uint32_t tmp;
149 150
150 /* FIXME: disable out of gart access */ 151 /* FIXME: disable out of gart access */
151 WREG32_MC(RS600_MC_PT0_CNTL, 0); 152 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
152 tmp = RREG32_MC(RS600_MC_CNTL1); 153 tmp = RREG32_MC(R_000009_MC_CNTL1);
153 tmp &= ~RS600_ENABLE_PAGE_TABLES; 154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
154 WREG32_MC(RS600_MC_CNTL1, tmp);
155 if (rdev->gart.table.vram.robj) { 155 if (rdev->gart.table.vram.robj) {
156 radeon_object_kunmap(rdev->gart.table.vram.robj); 156 radeon_object_kunmap(rdev->gart.table.vram.robj);
157 radeon_object_unpin(rdev->gart.table.vram.robj); 157 radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -185,132 +185,64 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
185 return 0; 185 return 0;
186} 186}
187 187
188
189/*
190 * MC.
191 */
192void rs600_mc_disable_clients(struct radeon_device *rdev)
193{
194 unsigned tmp;
195
196 if (r100_gui_wait_for_idle(rdev)) {
197 printk(KERN_WARNING "Failed to wait GUI idle while "
198 "programming pipes. Bad things might happen.\n");
199 }
200
201 radeon_avivo_vga_render_disable(rdev);
202
203 tmp = RREG32(AVIVO_D1VGA_CONTROL);
204 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
205 tmp = RREG32(AVIVO_D2VGA_CONTROL);
206 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
207
208 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
209 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
210 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
211 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
212
213 /* make sure all previous write got through */
214 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
215
216 mdelay(1);
217}
218
219int rs600_mc_init(struct radeon_device *rdev)
220{
221 uint32_t tmp;
222 int r;
223
224 if (r100_debugfs_rbbm_init(rdev)) {
225 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
226 }
227
228 rs600_gpu_init(rdev);
229 rs600_gart_disable(rdev);
230
231 /* Setup GPU memory space */
232 rdev->mc.vram_location = 0xFFFFFFFFUL;
233 rdev->mc.gtt_location = 0xFFFFFFFFUL;
234 r = radeon_mc_setup(rdev);
235 if (r) {
236 return r;
237 }
238
239 /* Program GPU memory space */
240 /* Enable bus master */
241 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
242 WREG32(RADEON_BUS_CNTL, tmp);
243 /* FIXME: What does AGP means for such chipset ? */
244 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
245 /* FIXME: are this AGP reg in indirect MC range ? */
246 WREG32_MC(RS600_MC_AGP_BASE, 0);
247 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
248 rs600_mc_disable_clients(rdev);
249 if (rs600_mc_wait_for_idle(rdev)) {
250 printk(KERN_WARNING "Failed to wait MC idle while "
251 "programming pipes. Bad things might happen.\n");
252 }
253 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
254 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
255 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
256 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
257 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
258 return 0;
259}
260
261void rs600_mc_fini(struct radeon_device *rdev)
262{
263}
264
265
266/*
267 * Interrupts
268 */
269int rs600_irq_set(struct radeon_device *rdev) 188int rs600_irq_set(struct radeon_device *rdev)
270{ 189{
271 uint32_t tmp = 0; 190 uint32_t tmp = 0;
272 uint32_t mode_int = 0; 191 uint32_t mode_int = 0;
273 192
274 if (rdev->irq.sw_int) { 193 if (rdev->irq.sw_int) {
275 tmp |= RADEON_SW_INT_ENABLE; 194 tmp |= S_000040_SW_INT_EN(1);
276 } 195 }
277 if (rdev->irq.crtc_vblank_int[0]) { 196 if (rdev->irq.crtc_vblank_int[0]) {
278 mode_int |= AVIVO_D1MODE_INT_MASK; 197 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
279 } 198 }
280 if (rdev->irq.crtc_vblank_int[1]) { 199 if (rdev->irq.crtc_vblank_int[1]) {
281 mode_int |= AVIVO_D2MODE_INT_MASK; 200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
282 } 201 }
283 WREG32(RADEON_GEN_INT_CNTL, tmp); 202 WREG32(R_000040_GEN_INT_CNTL, tmp);
284 WREG32(AVIVO_DxMODE_INT_MASK, mode_int); 203 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
285 return 0; 204 return 0;
286} 205}
287 206
288static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 207static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
289{ 208{
290 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 209 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
291 uint32_t irq_mask = RADEON_SW_INT_TEST; 210 uint32_t irq_mask = ~C_000044_SW_INT;
292 211
293 if (irqs & AVIVO_DISPLAY_INT_STATUS) { 212 if (G_000044_DISPLAY_INT_STAT(irqs)) {
294 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); 213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
295 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 214 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
296 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 215 WREG32(R_006534_D1MODE_VBLANK_STATUS,
216 S_006534_D1MODE_VBLANK_ACK(1));
297 } 217 }
298 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { 218 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
299 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 219 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1));
300 } 221 }
301 } else { 222 } else {
302 *r500_disp_int = 0; 223 *r500_disp_int = 0;
303 } 224 }
304 225
305 if (irqs) { 226 if (irqs) {
306 WREG32(RADEON_GEN_INT_STATUS, irqs); 227 WREG32(R_000044_GEN_INT_STATUS, irqs);
307 } 228 }
308 return irqs & irq_mask; 229 return irqs & irq_mask;
309} 230}
310 231
232void rs600_irq_disable(struct radeon_device *rdev)
233{
234 u32 tmp;
235
236 WREG32(R_000040_GEN_INT_CNTL, 0);
237 WREG32(R_006540_DxMODE_INT_MASK, 0);
238 /* Wait and acknowledge irq */
239 mdelay(1);
240 rs600_irq_ack(rdev, &tmp);
241}
242
311int rs600_irq_process(struct radeon_device *rdev) 243int rs600_irq_process(struct radeon_device *rdev)
312{ 244{
313 uint32_t status; 245 uint32_t status, msi_rearm;
314 uint32_t r500_disp_int; 246 uint32_t r500_disp_int;
315 247
316 status = rs600_irq_ack(rdev, &r500_disp_int); 248 status = rs600_irq_ack(rdev, &r500_disp_int);
@@ -319,85 +251,65 @@ int rs600_irq_process(struct radeon_device *rdev)
319 } 251 }
320 while (status || r500_disp_int) { 252 while (status || r500_disp_int) {
321 /* SW interrupt */ 253 /* SW interrupt */
322 if (status & RADEON_SW_INT_TEST) { 254 if (G_000040_SW_INT_EN(status))
323 radeon_fence_process(rdev); 255 radeon_fence_process(rdev);
324 }
325 /* Vertical blank interrupts */ 256 /* Vertical blank interrupts */
326 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 257 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
327 drm_handle_vblank(rdev->ddev, 0); 258 drm_handle_vblank(rdev->ddev, 0);
328 } 259 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
329 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
330 drm_handle_vblank(rdev->ddev, 1); 260 drm_handle_vblank(rdev->ddev, 1);
331 }
332 status = rs600_irq_ack(rdev, &r500_disp_int); 261 status = rs600_irq_ack(rdev, &r500_disp_int);
333 } 262 }
263 if (rdev->msi_enabled) {
264 switch (rdev->family) {
265 case CHIP_RS600:
266 case CHIP_RS690:
267 case CHIP_RS740:
268 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
269 WREG32(RADEON_BUS_CNTL, msi_rearm);
270 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
271 break;
272 default:
273 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
274 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
275 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
276 break;
277 }
278 }
334 return IRQ_HANDLED; 279 return IRQ_HANDLED;
335} 280}
336 281
337u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 282u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
338{ 283{
339 if (crtc == 0) 284 if (crtc == 0)
340 return RREG32(AVIVO_D1CRTC_FRAME_COUNT); 285 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
341 else 286 else
342 return RREG32(AVIVO_D2CRTC_FRAME_COUNT); 287 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
343}
344
345
346/*
347 * Global GPU functions
348 */
349void rs600_disable_vga(struct radeon_device *rdev)
350{
351 unsigned tmp;
352
353 WREG32(0x330, 0);
354 WREG32(0x338, 0);
355 tmp = RREG32(0x300);
356 tmp &= ~(3 << 16);
357 WREG32(0x300, tmp);
358 WREG32(0x308, (1 << 8));
359 WREG32(0x310, rdev->mc.vram_location);
360 WREG32(0x594, 0);
361} 288}
362 289
363int rs600_mc_wait_for_idle(struct radeon_device *rdev) 290int rs600_mc_wait_for_idle(struct radeon_device *rdev)
364{ 291{
365 unsigned i; 292 unsigned i;
366 uint32_t tmp;
367 293
368 for (i = 0; i < rdev->usec_timeout; i++) { 294 for (i = 0; i < rdev->usec_timeout; i++) {
369 /* read MC_STATUS */ 295 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
370 tmp = RREG32_MC(RS600_MC_STATUS);
371 if (tmp & RS600_MC_STATUS_IDLE) {
372 return 0; 296 return 0;
373 } 297 udelay(1);
374 DRM_UDELAY(1);
375 } 298 }
376 return -1; 299 return -1;
377} 300}
378 301
379void rs600_errata(struct radeon_device *rdev)
380{
381 rdev->pll_errata = 0;
382}
383
384void rs600_gpu_init(struct radeon_device *rdev) 302void rs600_gpu_init(struct radeon_device *rdev)
385{ 303{
386 /* FIXME: HDP same place on rs600 ? */ 304 /* FIXME: HDP same place on rs600 ? */
387 r100_hdp_reset(rdev); 305 r100_hdp_reset(rdev);
388 rs600_disable_vga(rdev);
389 /* FIXME: is this correct ? */ 306 /* FIXME: is this correct ? */
390 r420_pipes_init(rdev); 307 r420_pipes_init(rdev);
391 if (rs600_mc_wait_for_idle(rdev)) { 308 /* Wait for mc idle */
392 printk(KERN_WARNING "Failed to wait MC idle while " 309 if (rs600_mc_wait_for_idle(rdev))
393 "programming pipes. Bad things might happen.\n"); 310 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
394 }
395} 311}
396 312
397
398/*
399 * VRAM info.
400 */
401void rs600_vram_info(struct radeon_device *rdev) 313void rs600_vram_info(struct radeon_device *rdev)
402{ 314{
403 /* FIXME: to do or is these values sane ? */ 315 /* FIXME: to do or is these values sane ? */
@@ -410,31 +322,208 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
410 /* FIXME: implement, should this be like rs690 ? */ 322 /* FIXME: implement, should this be like rs690 ? */
411} 323}
412 324
413
414/*
415 * Indirect registers accessor
416 */
417uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 325uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
418{ 326{
419 uint32_t r; 327 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
420 328 S_000070_MC_IND_CITF_ARB0(1));
421 WREG32(RS600_MC_INDEX, 329 return RREG32(R_000074_MC_IND_DATA);
422 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
423 r = RREG32(RS600_MC_DATA);
424 return r;
425} 330}
426 331
427void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 332void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
428{ 333{
429 WREG32(RS600_MC_INDEX, 334 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
430 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | 335 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
431 ((reg) & RS600_MC_ADDR_MASK)); 336 WREG32(R_000074_MC_IND_DATA, v);
432 WREG32(RS600_MC_DATA, v);
433} 337}
434 338
435int rs600_init(struct radeon_device *rdev) 339void rs600_debugfs(struct radeon_device *rdev)
340{
341 if (r100_debugfs_rbbm_init(rdev))
342 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
343}
344
345void rs600_set_safe_registers(struct radeon_device *rdev)
436{ 346{
437 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 347 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
438 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 348 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
349}
350
351static void rs600_mc_program(struct radeon_device *rdev)
352{
353 struct rv515_mc_save save;
354
355 /* Stops all mc clients */
356 rv515_mc_stop(rdev, &save);
357
358 /* Wait for mc idle */
359 if (rs600_mc_wait_for_idle(rdev))
360 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
361
362 /* FIXME: What does AGP means for such chipset ? */
363 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
364 WREG32_MC(R_000006_AGP_BASE, 0);
365 WREG32_MC(R_000007_AGP_BASE_2, 0);
366 /* Program MC */
367 WREG32_MC(R_000004_MC_FB_LOCATION,
368 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
369 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
370 WREG32(R_000134_HDP_FB_LOCATION,
371 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
372
373 rv515_mc_resume(rdev, &save);
374}
375
376static int rs600_startup(struct radeon_device *rdev)
377{
378 int r;
379
380 rs600_mc_program(rdev);
381 /* Resume clock */
382 rv515_clock_startup(rdev);
383 /* Initialize GPU configuration (# pipes, ...) */
384 rs600_gpu_init(rdev);
385 /* Initialize GART (initialize after TTM so we can allocate
386 * memory through TTM but finalize after TTM) */
387 r = rs600_gart_enable(rdev);
388 if (r)
389 return r;
390 /* Enable IRQ */
391 rdev->irq.sw_int = true;
392 rs600_irq_set(rdev);
393 /* 1M ring buffer */
394 r = r100_cp_init(rdev, 1024 * 1024);
395 if (r) {
396 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
397 return r;
398 }
399 r = r100_wb_init(rdev);
400 if (r)
401 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
402 r = r100_ib_init(rdev);
403 if (r) {
404 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
405 return r;
406 }
407 return 0;
408}
409
410int rs600_resume(struct radeon_device *rdev)
411{
412 /* Make sur GART are not working */
413 rs600_gart_disable(rdev);
414 /* Resume clock before doing reset */
415 rv515_clock_startup(rdev);
416 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
417 if (radeon_gpu_reset(rdev)) {
418 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
419 RREG32(R_000E40_RBBM_STATUS),
420 RREG32(R_0007C0_CP_STAT));
421 }
422 /* post */
423 atom_asic_init(rdev->mode_info.atom_context);
424 /* Resume clock after posting */
425 rv515_clock_startup(rdev);
426 return rs600_startup(rdev);
427}
428
429int rs600_suspend(struct radeon_device *rdev)
430{
431 r100_cp_disable(rdev);
432 r100_wb_disable(rdev);
433 rs600_irq_disable(rdev);
434 rs600_gart_disable(rdev);
435 return 0;
436}
437
438void rs600_fini(struct radeon_device *rdev)
439{
440 rs600_suspend(rdev);
441 r100_cp_fini(rdev);
442 r100_wb_fini(rdev);
443 r100_ib_fini(rdev);
444 radeon_gem_fini(rdev);
445 rs600_gart_fini(rdev);
446 radeon_irq_kms_fini(rdev);
447 radeon_fence_driver_fini(rdev);
448 radeon_object_fini(rdev);
449 radeon_atombios_fini(rdev);
450 kfree(rdev->bios);
451 rdev->bios = NULL;
452}
453
454int rs600_init(struct radeon_device *rdev)
455{
456 int r;
457
458 /* Disable VGA */
459 rv515_vga_render_disable(rdev);
460 /* Initialize scratch registers */
461 radeon_scratch_init(rdev);
462 /* Initialize surface registers */
463 radeon_surface_init(rdev);
464 /* BIOS */
465 if (!radeon_get_bios(rdev)) {
466 if (ASIC_IS_AVIVO(rdev))
467 return -EINVAL;
468 }
469 if (rdev->is_atom_bios) {
470 r = radeon_atombios_init(rdev);
471 if (r)
472 return r;
473 } else {
474 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
475 return -EINVAL;
476 }
477 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
478 if (radeon_gpu_reset(rdev)) {
479 dev_warn(rdev->dev,
480 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
481 RREG32(R_000E40_RBBM_STATUS),
482 RREG32(R_0007C0_CP_STAT));
483 }
484 /* check if cards are posted or not */
485 if (!radeon_card_posted(rdev) && rdev->bios) {
486 DRM_INFO("GPU not posted. posting now...\n");
487 atom_asic_init(rdev->mode_info.atom_context);
488 }
489 /* Initialize clocks */
490 radeon_get_clock_info(rdev->ddev);
491 /* Initialize power management */
492 radeon_pm_init(rdev);
493 /* Get vram informations */
494 rs600_vram_info(rdev);
495 /* Initialize memory controller (also test AGP) */
496 r = r420_mc_init(rdev);
497 if (r)
498 return r;
499 rs600_debugfs(rdev);
500 /* Fence driver */
501 r = radeon_fence_driver_init(rdev);
502 if (r)
503 return r;
504 r = radeon_irq_kms_init(rdev);
505 if (r)
506 return r;
507 /* Memory manager */
508 r = radeon_object_init(rdev);
509 if (r)
510 return r;
511 r = rs600_gart_init(rdev);
512 if (r)
513 return r;
514 rs600_set_safe_registers(rdev);
515 rdev->accel_working = true;
516 r = rs600_startup(rdev);
517 if (r) {
518 /* Somethings want wront with the accel init stop accel */
519 dev_err(rdev->dev, "Disabling GPU acceleration\n");
520 rs600_suspend(rdev);
521 r100_cp_fini(rdev);
522 r100_wb_fini(rdev);
523 r100_ib_fini(rdev);
524 rs600_gart_fini(rdev);
525 radeon_irq_kms_fini(rdev);
526 rdev->accel_working = false;
527 }
439 return 0; 528 return 0;
440} 529}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 000000000000..81308924859a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,470 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS600D_H__
29#define __RS600D_H__
30
31/* Registers */
32#define R_000040_GEN_INT_CNTL 0x000040
33#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
34#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
35#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
36#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
37#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
38#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
39#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
40#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
41#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
42#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
43#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
44#define C_000040_SNAPSHOT2 0xFFFFFF7F
45#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
46#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
47#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
48#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
49#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
50#define C_000040_FP2_DETECT 0xFFFFFBFF
51#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
52#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
53#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
54#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
55#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
56#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
57#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
58#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
59#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
60#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
61#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
62#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
63#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
64#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
65#define C_000040_I2C_INT_EN 0xFFFDFFFF
66#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
67#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
68#define C_000040_GUI_IDLE 0xFFF7FFFF
69#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
70#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
71#define C_000040_VIPH_INT_EN 0xFEFFFFFF
72#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
73#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
74#define C_000040_SW_INT_EN 0xFDFFFFFF
75#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
76#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
77#define C_000040_GEYSERVILLE 0xF7FFFFFF
78#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
79#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
80#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
81#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
82#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
83#define C_000040_DVI_I2C_INT 0xDFFFFFFF
84#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
85#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
86#define C_000040_GUIDMA 0xBFFFFFFF
87#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
88#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
89#define C_000040_VIDDMA 0x7FFFFFFF
90#define R_000044_GEN_INT_STATUS 0x000044
91#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0)
92#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1)
93#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE
94#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1)
95#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1)
96#define C_000044_VGA_INT_STAT 0xFFFFFFFD
97#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8)
98#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1)
99#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF
100#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12)
101#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1)
102#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF
103#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13)
104#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1)
105#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF
106#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14)
107#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1)
108#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF
109#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15)
110#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1)
111#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF
112#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16)
113#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1)
114#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF
115#define S_000044_I2C_INT(x) (((x) & 0x1) << 17)
116#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1)
117#define C_000044_I2C_INT 0xFFFDFFFF
118#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18)
119#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1)
120#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF
121#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19)
122#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1)
123#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF
124#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20)
125#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1)
126#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF
127#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21)
128#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1)
129#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF
130#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22)
131#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1)
132#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF
133#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23)
134#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1)
135#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF
136#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24)
137#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1)
138#define C_000044_VIPH_INT 0xFEFFFFFF
139#define S_000044_SW_INT(x) (((x) & 0x1) << 25)
140#define G_000044_SW_INT(x) (((x) >> 25) & 0x1)
141#define C_000044_SW_INT 0xFDFFFFFF
142#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26)
143#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1)
144#define C_000044_SW_INT_SET 0xFBFFFFFF
145#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27)
146#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1)
147#define C_000044_IDCT_INT_STAT 0xF7FFFFFF
148#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30)
149#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1)
150#define C_000044_GUIDMA_STAT 0xBFFFFFFF
151#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31)
152#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1)
153#define C_000044_VIDDMA_STAT 0x7FFFFFFF
154#define R_00004C_BUS_CNTL 0x00004C
155#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14)
156#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1)
157#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF
158#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20)
159#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1)
160#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF
161#define R_000070_MC_IND_INDEX 0x000070
162#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0)
163#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF)
164#define C_000070_MC_IND_ADDR 0xFFFF0000
165#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16)
166#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1)
167#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF
168#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17)
169#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1)
170#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF
171#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18)
172#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1)
173#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF
174#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19)
175#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1)
176#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF
177#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20)
178#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1)
179#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF
180#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21)
181#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1)
182#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF
183#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22)
184#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1)
185#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF
186#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23)
187#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1)
188#define C_000070_MC_IND_WR_EN 0xFF7FFFFF
189#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24)
190#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1)
191#define C_000070_MC_IND_RD_INV 0xFEFFFFFF
192#define R_000074_MC_IND_DATA 0x000074
193#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
194#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
195#define C_000074_MC_IND_DATA 0x00000000
196#define R_000134_HDP_FB_LOCATION 0x000134
197#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
198#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
199#define C_000134_HDP_FB_START 0xFFFF0000
200#define R_0007C0_CP_STAT 0x0007C0
201#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
202#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
203#define C_0007C0_MRU_BUSY 0xFFFFFFFE
204#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
205#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
206#define C_0007C0_MWU_BUSY 0xFFFFFFFD
207#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
208#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
209#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
210#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
211#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
212#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
213#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
214#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
215#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
216#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
217#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
218#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
219#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
220#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
221#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
222#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
223#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
224#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
225#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
226#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
227#define C_0007C0_CSI_BUSY 0xFFFFDFFF
228#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
229#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
230#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
231#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
232#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
233#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
234#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
235#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
236#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
237#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
238#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
239#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
240#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
241#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
242#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
243#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
244#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
245#define C_0007C0_CP_BUSY 0x7FFFFFFF
246#define R_000E40_RBBM_STATUS 0x000E40
247#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
248#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
249#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
250#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
251#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
252#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
253#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
254#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
255#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
256#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
257#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
258#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
259#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
260#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
261#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
262#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
263#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
264#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
265#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
266#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
267#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
268#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
269#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
270#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
271#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
272#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
273#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
274#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
275#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
276#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
277#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
278#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
279#define C_000E40_E2_BUSY 0xFFFDFFFF
280#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
281#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
282#define C_000E40_RB2D_BUSY 0xFFFBFFFF
283#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
284#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
285#define C_000E40_RB3D_BUSY 0xFFF7FFFF
286#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
287#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
288#define C_000E40_VAP_BUSY 0xFFEFFFFF
289#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
290#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
291#define C_000E40_RE_BUSY 0xFFDFFFFF
292#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
293#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
294#define C_000E40_TAM_BUSY 0xFFBFFFFF
295#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
296#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
297#define C_000E40_TDM_BUSY 0xFF7FFFFF
298#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
299#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
300#define C_000E40_PB_BUSY 0xFEFFFFFF
301#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
302#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
303#define C_000E40_TIM_BUSY 0xFDFFFFFF
304#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
305#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
306#define C_000E40_GA_BUSY 0xFBFFFFFF
307#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
308#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
309#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
310#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
311#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
312#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
313#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4
314#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
315#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
316#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000
317#define R_006534_D1MODE_VBLANK_STATUS 0x006534
318#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
319#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
320#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE
321#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
322#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
323#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF
324#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
325#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
326#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF
327#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
328#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
329#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF
330#define R_006540_DxMODE_INT_MASK 0x006540
331#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0)
332#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1)
333#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE
334#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4)
335#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1)
336#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF
337#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8)
338#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1)
339#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF
340#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12)
341#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1)
342#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF
343#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30)
344#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1)
345#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF
346#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31)
347#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1)
348#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF
349#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4
350#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
351#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
352#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000
353#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34
354#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
355#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
356#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE
357#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
358#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
359#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF
360#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
361#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
362#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF
363#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
364#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
365#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF
366#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC
367#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4)
368#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1)
369#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF
370#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
371#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
372#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
373
374
375/* MC registers */
376#define R_000000_MC_STATUS 0x000000
377#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0)
378#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1)
379#define C_000000_MC_IDLE 0xFFFFFFFE
380#define R_000004_MC_FB_LOCATION 0x000004
381#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
382#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
383#define C_000004_MC_FB_START 0xFFFF0000
384#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
385#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
386#define C_000004_MC_FB_TOP 0x0000FFFF
387#define R_000005_MC_AGP_LOCATION 0x000005
388#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
389#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
390#define C_000005_MC_AGP_START 0xFFFF0000
391#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
392#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
393#define C_000005_MC_AGP_TOP 0x0000FFFF
394#define R_000006_AGP_BASE 0x000006
395#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
396#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
397#define C_000006_AGP_BASE_ADDR 0x00000000
398#define R_000007_AGP_BASE_2 0x000007
399#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
400#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
401#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
402#define R_000009_MC_CNTL1 0x000009
403#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26)
404#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1)
405#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF
406/* FIXME don't know the various field size need feedback from AMD */
407#define R_000100_MC_PT0_CNTL 0x000100
408#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0)
409#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1)
410#define C_000100_ENABLE_PT 0xFFFFFFFE
411#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15)
412#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7)
413#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF
414#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21)
415#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7)
416#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF
417#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28)
418#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1)
419#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF
420#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29)
421#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1)
422#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF
423#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102
424#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0)
425#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1)
426#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE
427#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1)
428#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3)
429#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9
430#define V_000102_PAGE_TABLE_FLAT 0
431/* R600 documentation suggest that this should be a number of pages */
432#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112
433#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114
434#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C
435#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C
436#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C
437#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C
438#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C
439#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
440#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
441#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE
442#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1)
443#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1)
444#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD
445#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8)
446#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3)
447#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF
448#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0
449#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1
450#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2
451#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3
452#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10)
453#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1)
454#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF
455#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0
456#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
457#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11)
458#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7)
459#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF
460#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14)
461#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1)
462#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF
463#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15)
464#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7)
465#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF
466#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20)
467#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
468#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
469
470#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0f585ca8276d..27547175cf93 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,106 +26,29 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 29#include "radeon.h"
31#include "rs690r.h"
32#include "atom.h" 30#include "atom.h"
33#include "atom-bits.h" 31#include "rs690d.h"
34
35/* rs690,rs740 depends on : */
36void r100_hdp_reset(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39void rs400_gart_disable(struct radeon_device *rdev);
40int rs400_gart_enable(struct radeon_device *rdev);
41void rs400_gart_adjust_size(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev);
43void rs600_disable_vga(struct radeon_device *rdev);
44
45/* This files gather functions specifics to :
46 * rs690,rs740
47 *
48 * Some of these functions might be used by newer ASICs.
49 */
50void rs690_gpu_init(struct radeon_device *rdev);
51int rs690_mc_wait_for_idle(struct radeon_device *rdev);
52
53 32
54/* 33static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
55 * MC functions.
56 */
57int rs690_mc_init(struct radeon_device *rdev)
58{
59 uint32_t tmp;
60 int r;
61
62 if (r100_debugfs_rbbm_init(rdev)) {
63 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
64 }
65
66 rs690_gpu_init(rdev);
67 rs400_gart_disable(rdev);
68
69 /* Setup GPU memory space */
70 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
71 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
72 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
73 rdev->mc.vram_location = 0xFFFFFFFFUL;
74 r = radeon_mc_setup(rdev);
75 if (r) {
76 return r;
77 }
78
79 /* Program GPU memory space */
80 rs600_mc_disable_clients(rdev);
81 if (rs690_mc_wait_for_idle(rdev)) {
82 printk(KERN_WARNING "Failed to wait MC idle while "
83 "programming pipes. Bad things might happen.\n");
84 }
85 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
86 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
87 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
88 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
89 /* FIXME: Does this reg exist on RS480,RS740 ? */
90 WREG32(0x310, rdev->mc.vram_location);
91 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
92 return 0;
93}
94
95void rs690_mc_fini(struct radeon_device *rdev)
96{
97}
98
99
100/*
101 * Global GPU functions
102 */
103int rs690_mc_wait_for_idle(struct radeon_device *rdev)
104{ 34{
105 unsigned i; 35 unsigned i;
106 uint32_t tmp; 36 uint32_t tmp;
107 37
108 for (i = 0; i < rdev->usec_timeout; i++) { 38 for (i = 0; i < rdev->usec_timeout; i++) {
109 /* read MC_STATUS */ 39 /* read MC_STATUS */
110 tmp = RREG32_MC(RS690_MC_STATUS); 40 tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
111 if (tmp & RS690_MC_STATUS_IDLE) { 41 if (G_000090_MC_SYSTEM_IDLE(tmp))
112 return 0; 42 return 0;
113 } 43 udelay(1);
114 DRM_UDELAY(1);
115 } 44 }
116 return -1; 45 return -1;
117} 46}
118 47
119void rs690_errata(struct radeon_device *rdev) 48static void rs690_gpu_init(struct radeon_device *rdev)
120{
121 rdev->pll_errata = 0;
122}
123
124void rs690_gpu_init(struct radeon_device *rdev)
125{ 49{
126 /* FIXME: HDP same place on rs690 ? */ 50 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev); 51 r100_hdp_reset(rdev);
128 rs600_disable_vga(rdev);
129 /* FIXME: is this correct ? */ 52 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev); 53 r420_pipes_init(rdev);
131 if (rs690_mc_wait_for_idle(rdev)) { 54 if (rs690_mc_wait_for_idle(rdev)) {
@@ -134,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev)
134 } 57 }
135} 58}
136 59
137
138/*
139 * VRAM info.
140 */
141void rs690_pm_info(struct radeon_device *rdev) 60void rs690_pm_info(struct radeon_device *rdev)
142{ 61{
143 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 62 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
@@ -251,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
251 /* 170 /*
252 * Line Buffer Setup 171 * Line Buffer Setup
253 * There is a single line buffer shared by both display controllers. 172 * There is a single line buffer shared by both display controllers.
254 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 173 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
255 * the display controllers. The paritioning can either be done 174 * the display controllers. The paritioning can either be done
256 * manually or via one of four preset allocations specified in bits 1:0: 175 * manually or via one of four preset allocations specified in bits 1:0:
257 * 0 - line buffer is divided in half and shared between crtc 176 * 0 - line buffer is divided in half and shared between crtc
258 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 177 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
259 * 2 - D1 gets the whole buffer 178 * 2 - D1 gets the whole buffer
260 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 179 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
261 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual 180 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
262 * allocation mode. In manual allocation mode, D1 always starts at 0, 181 * allocation mode. In manual allocation mode, D1 always starts at 0,
263 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. 182 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
264 */ 183 */
265 tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; 184 tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
266 tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; 185 tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
267 /* auto */ 186 /* auto */
268 if (mode1 && mode2) { 187 if (mode1 && mode2) {
269 if (mode1->hdisplay > mode2->hdisplay) { 188 if (mode1->hdisplay > mode2->hdisplay) {
270 if (mode1->hdisplay > 2560) 189 if (mode1->hdisplay > 2560)
271 tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; 190 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
272 else 191 else
273 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 192 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
274 } else if (mode2->hdisplay > mode1->hdisplay) { 193 } else if (mode2->hdisplay > mode1->hdisplay) {
275 if (mode2->hdisplay > 2560) 194 if (mode2->hdisplay > 2560)
276 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 195 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
277 else 196 else
278 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 197 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
279 } else 198 } else
280 tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 199 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
281 } else if (mode1) { 200 } else if (mode1) {
282 tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; 201 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
283 } else if (mode2) { 202 } else if (mode2) {
284 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 203 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
285 } 204 }
286 WREG32(DC_LB_MEMORY_SPLIT, tmp); 205 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
287} 206}
288 207
289struct rs690_watermark { 208struct rs690_watermark {
@@ -488,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
488 * option. 407 * option.
489 */ 408 */
490 if (rdev->disp_priority == 2) { 409 if (rdev->disp_priority == 2) {
491 tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); 410 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
492 tmp &= ~MC_DISP1R_INIT_LAT_MASK; 411 tmp &= C_000104_MC_DISP0R_INIT_LAT;
493 tmp &= ~MC_DISP0R_INIT_LAT_MASK; 412 tmp &= C_000104_MC_DISP1R_INIT_LAT;
494 if (mode1)
495 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
496 if (mode0) 413 if (mode0)
497 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); 414 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
498 WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); 415 if (mode1)
416 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
417 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
499 } 418 }
500 rs690_line_buffer_adjust(rdev, mode0, mode1); 419 rs690_line_buffer_adjust(rdev, mode0, mode1);
501 420
502 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) 421 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
503 WREG32(DCP_CONTROL, 0); 422 WREG32(R_006C9C_DCP_CONTROL, 0);
504 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 423 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
505 WREG32(DCP_CONTROL, 2); 424 WREG32(R_006C9C_DCP_CONTROL, 2);
506 425
507 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); 426 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
508 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); 427 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
509 428
510 tmp = (wm0.lb_request_fifo_depth - 1); 429 tmp = (wm0.lb_request_fifo_depth - 1);
511 tmp |= (wm1.lb_request_fifo_depth - 1) << 16; 430 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
512 WREG32(LB_MAX_REQ_OUTSTANDING, tmp); 431 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
513 432
514 if (mode0 && mode1) { 433 if (mode0 && mode1) {
515 if (rfixed_trunc(wm0.dbpp) > 64) 434 if (rfixed_trunc(wm0.dbpp) > 64)
@@ -562,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
562 priority_mark12.full = 0; 481 priority_mark12.full = 0;
563 if (wm1.priority_mark_max.full > priority_mark12.full) 482 if (wm1.priority_mark_max.full > priority_mark12.full)
564 priority_mark12.full = wm1.priority_mark_max.full; 483 priority_mark12.full = wm1.priority_mark_max.full;
565 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 484 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
566 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 485 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
567 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 486 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
568 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 487 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
569 } else if (mode0) { 488 } else if (mode0) {
570 if (rfixed_trunc(wm0.dbpp) > 64) 489 if (rfixed_trunc(wm0.dbpp) > 64)
571 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 490 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -592,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
592 priority_mark02.full = 0; 511 priority_mark02.full = 0;
593 if (wm0.priority_mark_max.full > priority_mark02.full) 512 if (wm0.priority_mark_max.full > priority_mark02.full)
594 priority_mark02.full = wm0.priority_mark_max.full; 513 priority_mark02.full = wm0.priority_mark_max.full;
595 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 514 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
596 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 515 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
597 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 516 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
598 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 517 S_006D48_D2MODE_PRIORITY_A_OFF(1));
518 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
519 S_006D4C_D2MODE_PRIORITY_B_OFF(1));
599 } else { 520 } else {
600 if (rfixed_trunc(wm1.dbpp) > 64) 521 if (rfixed_trunc(wm1.dbpp) > 64)
601 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); 522 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
@@ -622,30 +543,205 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
622 priority_mark12.full = 0; 543 priority_mark12.full = 0;
623 if (wm1.priority_mark_max.full > priority_mark12.full) 544 if (wm1.priority_mark_max.full > priority_mark12.full)
624 priority_mark12.full = wm1.priority_mark_max.full; 545 priority_mark12.full = wm1.priority_mark_max.full;
625 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 546 WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
626 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 547 S_006548_D1MODE_PRIORITY_A_OFF(1));
627 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 548 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
628 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 549 S_00654C_D1MODE_PRIORITY_B_OFF(1));
550 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
551 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
629 } 552 }
630} 553}
631 554
632/*
633 * Indirect registers accessor
634 */
635uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 555uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
636{ 556{
637 uint32_t r; 557 uint32_t r;
638 558
639 WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); 559 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
640 r = RREG32(RS690_MC_DATA); 560 r = RREG32(R_00007C_MC_DATA);
641 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); 561 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
642 return r; 562 return r;
643} 563}
644 564
645void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 565void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
646{ 566{
647 WREG32(RS690_MC_INDEX, 567 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
648 RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); 568 S_000078_MC_IND_WR_EN(1));
649 WREG32(RS690_MC_DATA, v); 569 WREG32(R_00007C_MC_DATA, v);
650 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); 570 WREG32(R_000078_MC_INDEX, 0x7F);
571}
572
573void rs690_mc_program(struct radeon_device *rdev)
574{
575 struct rv515_mc_save save;
576
577 /* Stops all mc clients */
578 rv515_mc_stop(rdev, &save);
579
580 /* Wait for mc idle */
581 if (rs690_mc_wait_for_idle(rdev))
582 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
583 /* Program MC, should be a 32bits limited address space */
584 WREG32_MC(R_000100_MCCFG_FB_LOCATION,
585 S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
586 S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
587 WREG32(R_000134_HDP_FB_LOCATION,
588 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
589
590 rv515_mc_resume(rdev, &save);
591}
592
593static int rs690_startup(struct radeon_device *rdev)
594{
595 int r;
596
597 rs690_mc_program(rdev);
598 /* Resume clock */
599 rv515_clock_startup(rdev);
600 /* Initialize GPU configuration (# pipes, ...) */
601 rs690_gpu_init(rdev);
602 /* Initialize GART (initialize after TTM so we can allocate
603 * memory through TTM but finalize after TTM) */
604 r = rs400_gart_enable(rdev);
605 if (r)
606 return r;
607 /* Enable IRQ */
608 rdev->irq.sw_int = true;
609 rs600_irq_set(rdev);
610 /* 1M ring buffer */
611 r = r100_cp_init(rdev, 1024 * 1024);
612 if (r) {
613 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
614 return r;
615 }
616 r = r100_wb_init(rdev);
617 if (r)
618 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
619 r = r100_ib_init(rdev);
620 if (r) {
621 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
622 return r;
623 }
624 return 0;
625}
626
627int rs690_resume(struct radeon_device *rdev)
628{
629 /* Make sur GART are not working */
630 rs400_gart_disable(rdev);
631 /* Resume clock before doing reset */
632 rv515_clock_startup(rdev);
633 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
634 if (radeon_gpu_reset(rdev)) {
635 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
636 RREG32(R_000E40_RBBM_STATUS),
637 RREG32(R_0007C0_CP_STAT));
638 }
639 /* post */
640 atom_asic_init(rdev->mode_info.atom_context);
641 /* Resume clock after posting */
642 rv515_clock_startup(rdev);
643 return rs690_startup(rdev);
644}
645
646int rs690_suspend(struct radeon_device *rdev)
647{
648 r100_cp_disable(rdev);
649 r100_wb_disable(rdev);
650 rs600_irq_disable(rdev);
651 rs400_gart_disable(rdev);
652 return 0;
653}
654
655void rs690_fini(struct radeon_device *rdev)
656{
657 rs690_suspend(rdev);
658 r100_cp_fini(rdev);
659 r100_wb_fini(rdev);
660 r100_ib_fini(rdev);
661 radeon_gem_fini(rdev);
662 rs400_gart_fini(rdev);
663 radeon_irq_kms_fini(rdev);
664 radeon_fence_driver_fini(rdev);
665 radeon_object_fini(rdev);
666 radeon_atombios_fini(rdev);
667 kfree(rdev->bios);
668 rdev->bios = NULL;
669}
670
671int rs690_init(struct radeon_device *rdev)
672{
673 int r;
674
675 /* Disable VGA */
676 rv515_vga_render_disable(rdev);
677 /* Initialize scratch registers */
678 radeon_scratch_init(rdev);
679 /* Initialize surface registers */
680 radeon_surface_init(rdev);
681 /* TODO: disable VGA need to use VGA request */
682 /* BIOS*/
683 if (!radeon_get_bios(rdev)) {
684 if (ASIC_IS_AVIVO(rdev))
685 return -EINVAL;
686 }
687 if (rdev->is_atom_bios) {
688 r = radeon_atombios_init(rdev);
689 if (r)
690 return r;
691 } else {
692 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
693 return -EINVAL;
694 }
695 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
696 if (radeon_gpu_reset(rdev)) {
697 dev_warn(rdev->dev,
698 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
699 RREG32(R_000E40_RBBM_STATUS),
700 RREG32(R_0007C0_CP_STAT));
701 }
702 /* check if cards are posted or not */
703 if (!radeon_card_posted(rdev) && rdev->bios) {
704 DRM_INFO("GPU not posted. posting now...\n");
705 atom_asic_init(rdev->mode_info.atom_context);
706 }
707 /* Initialize clocks */
708 radeon_get_clock_info(rdev->ddev);
709 /* Initialize power management */
710 radeon_pm_init(rdev);
711 /* Get vram informations */
712 rs690_vram_info(rdev);
713 /* Initialize memory controller (also test AGP) */
714 r = r420_mc_init(rdev);
715 if (r)
716 return r;
717 rv515_debugfs(rdev);
718 /* Fence driver */
719 r = radeon_fence_driver_init(rdev);
720 if (r)
721 return r;
722 r = radeon_irq_kms_init(rdev);
723 if (r)
724 return r;
725 /* Memory manager */
726 r = radeon_object_init(rdev);
727 if (r)
728 return r;
729 r = rs400_gart_init(rdev);
730 if (r)
731 return r;
732 rs600_set_safe_registers(rdev);
733 rdev->accel_working = true;
734 r = rs690_startup(rdev);
735 if (r) {
736 /* Somethings want wront with the accel init stop accel */
737 dev_err(rdev->dev, "Disabling GPU acceleration\n");
738 rs690_suspend(rdev);
739 r100_cp_fini(rdev);
740 r100_wb_fini(rdev);
741 r100_ib_fini(rdev);
742 rs400_gart_fini(rdev);
743 radeon_irq_kms_fini(rdev);
744 rdev->accel_working = false;
745 }
746 return 0;
651} 747}
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 000000000000..62d31e7a897f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,307 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS690D_H__
29#define __RS690D_H__
30
31/* Registers */
32#define R_000078_MC_INDEX 0x000078
33#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
34#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
35#define C_000078_MC_IND_ADDR 0xFFFFFE00
36#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
37#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1)
38#define C_000078_MC_IND_WR_EN 0xFFFFFDFF
39#define R_00007C_MC_DATA 0x00007C
40#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0)
41#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
42#define C_00007C_MC_DATA 0x00000000
43#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
44#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
45#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
46#define C_0000F8_CONFIG_MEMSIZE 0x00000000
47#define R_000134_HDP_FB_LOCATION 0x000134
48#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
49#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
50#define C_000134_HDP_FB_START 0xFFFF0000
51#define R_0007C0_CP_STAT 0x0007C0
52#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
53#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
54#define C_0007C0_MRU_BUSY 0xFFFFFFFE
55#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
56#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
57#define C_0007C0_MWU_BUSY 0xFFFFFFFD
58#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
59#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
60#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
61#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
62#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
63#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
64#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
65#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
66#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
67#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
68#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
69#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
70#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
71#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
72#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
73#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
74#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
75#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
76#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
77#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
78#define C_0007C0_CSI_BUSY 0xFFFFDFFF
79#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
80#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
81#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
82#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
83#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
84#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
85#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
86#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
87#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
88#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
89#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
90#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
91#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
92#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
93#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
94#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
95#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
96#define C_0007C0_CP_BUSY 0x7FFFFFFF
97#define R_000E40_RBBM_STATUS 0x000E40
98#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
99#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
100#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
101#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
102#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
103#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
104#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
105#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
106#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
107#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
108#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
109#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
110#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
111#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
112#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
113#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
114#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
115#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
116#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
117#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
118#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
119#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
120#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
121#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
122#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
123#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
124#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
125#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
126#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
127#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
128#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
129#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
130#define C_000E40_E2_BUSY 0xFFFDFFFF
131#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
132#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
133#define C_000E40_RB2D_BUSY 0xFFFBFFFF
134#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
135#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
136#define C_000E40_RB3D_BUSY 0xFFF7FFFF
137#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
138#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
139#define C_000E40_VAP_BUSY 0xFFEFFFFF
140#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
141#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
142#define C_000E40_RE_BUSY 0xFFDFFFFF
143#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
144#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
145#define C_000E40_TAM_BUSY 0xFFBFFFFF
146#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
147#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
148#define C_000E40_TDM_BUSY 0xFF7FFFFF
149#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
150#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
151#define C_000E40_PB_BUSY 0xFEFFFFFF
152#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
153#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
154#define C_000E40_TIM_BUSY 0xFDFFFFFF
155#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
156#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
157#define C_000E40_GA_BUSY 0xFBFFFFFF
158#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
159#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
160#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
161#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
162#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
163#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
164#define R_006520_DC_LB_MEMORY_SPLIT 0x006520
165#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0)
166#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3)
167#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC
168#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2)
169#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1)
170#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB
171#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
172#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
173#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2
174#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
175#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4)
176#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF)
177#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F
178#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
179#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
180#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
181#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
185#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
186#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
188#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
189#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
190#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
191#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
192#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
193#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
194#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
195#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
196#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
197#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
198#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
199#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
200#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
201#define R_006C9C_DCP_CONTROL 0x006C9C
202#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
203#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
204#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
205#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
206#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
207#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
208#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
209#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
210#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
211#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
212#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
213#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
214#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
215#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
216#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
217#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
218#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
219#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
220#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
221#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
222#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
223#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
224#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
225#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
226#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
227#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
228#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58
229#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0)
230#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF)
231#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0
232#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16)
233#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF)
234#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF
235
236
237#define R_000090_MC_SYSTEM_STATUS 0x000090
238#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0)
239#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1)
240#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE
241#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1)
242#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1)
243#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD
244#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2)
245#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1)
246#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB
247#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3)
248#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1)
249#define C_000090_MC_SELECT_PM 0xFFFFFFF7
250#define S_000090_RESERVED4(x) (((x) & 0xF) << 4)
251#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF)
252#define C_000090_RESERVED4 0xFFFFFF0F
253#define S_000090_RESERVED8(x) (((x) & 0xF) << 8)
254#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF)
255#define C_000090_RESERVED8 0xFFFFF0FF
256#define S_000090_RESERVED12(x) (((x) & 0xF) << 12)
257#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF)
258#define C_000090_RESERVED12 0xFFFF0FFF
259#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16)
260#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1)
261#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF
262#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17)
263#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1)
264#define C_000090_MCA_IDLE 0xFFFDFFFF
265#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18)
266#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1)
267#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF
268#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19)
269#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1)
270#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF
271#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20)
272#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF)
273#define C_000090_RESERVED20 0x000FFFFF
274#define R_000100_MCCFG_FB_LOCATION 0x000100
275#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0)
276#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
277#define C_000100_MC_FB_START 0xFFFF0000
278#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
279#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
280#define C_000100_MC_FB_TOP 0x0000FFFF
281#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104
282#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0)
283#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF)
284#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0
285#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4)
286#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF)
287#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F
288#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8)
289#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF)
290#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF
291#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12)
292#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF)
293#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF
294#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16)
295#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF)
296#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF
297#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20)
298#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF)
299#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF
300#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24)
301#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF)
302#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF
303#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28)
304#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF)
305#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF
306
307#endif
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
deleted file mode 100644
index c0d9faa2175b..000000000000
--- a/drivers/gpu/drm/radeon/rs690r.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RS690R_H
29#define RS690R_H
30
31/* RS690/RS740 registers */
32#define MC_INDEX 0x0078
33# define MC_INDEX_MASK 0x1FF
34# define MC_INDEX_WR_EN (1 << 9)
35# define MC_INDEX_WR_ACK 0x7F
36#define MC_DATA 0x007C
37#define HDP_FB_LOCATION 0x0134
38#define DC_LB_MEMORY_SPLIT 0x6520
39#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
40#define DC_LB_MEMORY_SPLIT_SHIFT 0
41#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
42#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
43#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
44#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
45#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
46#define DC_LB_DISP1_END_ADR_SHIFT 4
47#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
48#define D1MODE_PRIORITY_A_CNT 0x6548
49#define MODE_PRIORITY_MARK_MASK 0x00007FFF
50#define MODE_PRIORITY_OFF (1 << 16)
51#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
52#define MODE_PRIORITY_FORCE_MASK (1 << 24)
53#define D1MODE_PRIORITY_B_CNT 0x654C
54#define LB_MAX_REQ_OUTSTANDING 0x6D58
55#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
56#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
57#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
58#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
59#define DCP_CONTROL 0x6C9C
60#define D2MODE_PRIORITY_A_CNT 0x6D48
61#define D2MODE_PRIORITY_B_CNT 0x6D4C
62
63/* MC indirect registers */
64#define MC_STATUS_IDLE (1 << 0)
65#define MC_MISC_CNTL 0x18
66#define DISABLE_GTW (1 << 1)
67#define GART_INDEX_REG_EN (1 << 12)
68#define BLOCK_GFX_D3_EN (1 << 14)
69#define GART_FEATURE_ID 0x2B
70#define HANG_EN (1 << 11)
71#define TLB_ENABLE (1 << 18)
72#define P2P_ENABLE (1 << 19)
73#define GTW_LAC_EN (1 << 25)
74#define LEVEL2_GART (0 << 30)
75#define LEVEL1_GART (1 << 30)
76#define PDC_EN (1 << 31)
77#define GART_BASE 0x2C
78#define GART_CACHE_CNTRL 0x2E
79# define GART_CACHE_INVALIDATE (1 << 0)
80#define MC_STATUS 0x90
81#define MCCFG_FB_LOCATION 0x100
82#define MC_FB_START_MASK 0x0000FFFF
83#define MC_FB_START_SHIFT 0
84#define MC_FB_TOP_MASK 0xFFFF0000
85#define MC_FB_TOP_SHIFT 16
86#define MCCFG_AGP_LOCATION 0x101
87#define MC_AGP_START_MASK 0x0000FFFF
88#define MC_AGP_START_SHIFT 0
89#define MC_AGP_TOP_MASK 0xFFFF0000
90#define MC_AGP_TOP_SHIFT 16
91#define MCCFG_AGP_BASE 0x102
92#define MCCFG_AGP_BASE_2 0x103
93#define MC_INIT_MISC_LAT_TIMER 0x104
94#define MC_DISP0R_INIT_LAT_SHIFT 8
95#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
96#define MC_DISP1R_INIT_LAT_SHIFT 12
97#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
98
99#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 000000000000..c5b398330c26
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV200D_H__
29#define __RV200D_H__
30
31#define R_00015C_AGP_BASE_2 0x00015C
32#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
33#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
34#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
35
36#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 000000000000..e5a70b06fe1f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV250D_H__
29#define __RV250D_H__
30
31#define R_00000D_SCLK_CNTL_M6 0x00000D
32#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
33#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
34#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
35#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
36#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
37#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
38#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
39#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
40#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
41#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
42#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
43#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
44#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
45#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
46#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
47#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
48#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
49#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
50#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
51#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
52#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
53#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
54#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
55#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
56#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
57#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
58#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
59#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
60#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
61#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
62#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
63#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
64#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
65#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
66#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
67#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
68#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
69#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
70#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
71#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
72#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
73#define C_00000D_FORCE_DISP2 0xFFFF7FFF
74#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
75#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
76#define C_00000D_FORCE_CP 0xFFFEFFFF
77#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
78#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
79#define C_00000D_FORCE_HDP 0xFFFDFFFF
80#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
81#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
82#define C_00000D_FORCE_DISP1 0xFFFBFFFF
83#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
84#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
85#define C_00000D_FORCE_TOP 0xFFF7FFFF
86#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
87#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
88#define C_00000D_FORCE_E2 0xFFEFFFFF
89#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
90#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
91#define C_00000D_FORCE_SE 0xFFDFFFFF
92#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
93#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
94#define C_00000D_FORCE_IDCT 0xFFBFFFFF
95#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
96#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
97#define C_00000D_FORCE_VIP 0xFF7FFFFF
98#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
99#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
100#define C_00000D_FORCE_RE 0xFEFFFFFF
101#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
102#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
103#define C_00000D_FORCE_PB 0xFDFFFFFF
104#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
105#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
106#define C_00000D_FORCE_TAM 0xFBFFFFFF
107#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
108#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
109#define C_00000D_FORCE_TDM 0xF7FFFFFF
110#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
111#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
112#define C_00000D_FORCE_RB 0xEFFFFFFF
113#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
114#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
115#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
116#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
117#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
118#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
119#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
120#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
121#define C_00000D_FORCE_OV0 0x7FFFFFFF
122
123#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 000000000000..c75c5ed9e654
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV350D_H__
29#define __RV350D_H__
30
31/* RV350, RV380 registers */
32/* #define R_00000D_SCLK_CNTL 0x00000D */
33#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
34#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
35#define C_00000D_FORCE_VAP 0xFFDFFFFF
36#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
37#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
38#define C_00000D_FORCE_SR 0xFDFFFFFF
39#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
40#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
41#define C_00000D_FORCE_PX 0xFBFFFFFF
42#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
43#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
44#define C_00000D_FORCE_TX 0xF7FFFFFF
45#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
46#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
47#define C_00000D_FORCE_US 0xEFFFFFFF
48#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
49#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
50#define C_00000D_FORCE_SU 0xBFFFFFFF
51
52#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index fd799748e7d8..7935f793bf62 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -29,37 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "rv515d.h" 30#include "rv515d.h"
31#include "radeon.h" 31#include "radeon.h"
32 32#include "atom.h"
33#include "rv515_reg_safe.h" 33#include "rv515_reg_safe.h"
34/* rv515 depends on : */ 34
35void r100_hdp_reset(struct radeon_device *rdev); 35/* This files gather functions specifics to: rv515 */
36int r100_cp_reset(struct radeon_device *rdev);
37int r100_rb2d_reset(struct radeon_device *rdev);
38int r100_gui_wait_for_idle(struct radeon_device *rdev);
39int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40void r420_pipes_init(struct radeon_device *rdev);
41void rs600_mc_disable_clients(struct radeon_device *rdev);
42void rs600_disable_vga(struct radeon_device *rdev);
43
44/* This files gather functions specifics to:
45 * rv515
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 36int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
50int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 37int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
51void rv515_gpu_init(struct radeon_device *rdev); 38void rv515_gpu_init(struct radeon_device *rdev);
52int rv515_mc_wait_for_idle(struct radeon_device *rdev); 39int rv515_mc_wait_for_idle(struct radeon_device *rdev);
53 40
54 41void rv515_debugfs(struct radeon_device *rdev)
55/*
56 * MC
57 */
58int rv515_mc_init(struct radeon_device *rdev)
59{ 42{
60 uint32_t tmp;
61 int r;
62
63 if (r100_debugfs_rbbm_init(rdev)) { 43 if (r100_debugfs_rbbm_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 44 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
65 } 45 }
@@ -69,67 +49,8 @@ int rv515_mc_init(struct radeon_device *rdev)
69 if (rv515_debugfs_ga_info_init(rdev)) { 49 if (rv515_debugfs_ga_info_init(rdev)) {
70 DRM_ERROR("Failed to register debugfs file for pipes !\n"); 50 DRM_ERROR("Failed to register debugfs file for pipes !\n");
71 } 51 }
72
73 rv515_gpu_init(rdev);
74 rv370_pcie_gart_disable(rdev);
75
76 /* Setup GPU memory space */
77 rdev->mc.vram_location = 0xFFFFFFFFUL;
78 rdev->mc.gtt_location = 0xFFFFFFFFUL;
79 if (rdev->flags & RADEON_IS_AGP) {
80 r = radeon_agp_init(rdev);
81 if (r) {
82 printk(KERN_WARNING "[drm] Disabling AGP\n");
83 rdev->flags &= ~RADEON_IS_AGP;
84 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
85 } else {
86 rdev->mc.gtt_location = rdev->mc.agp_base;
87 }
88 }
89 r = radeon_mc_setup(rdev);
90 if (r) {
91 return r;
92 }
93
94 /* Program GPU memory space */
95 rs600_mc_disable_clients(rdev);
96 if (rv515_mc_wait_for_idle(rdev)) {
97 printk(KERN_WARNING "Failed to wait MC idle while "
98 "programming pipes. Bad things might happen.\n");
99 }
100 /* Write VRAM size in case we are limiting it */
101 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
102 tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
103 WREG32(0x134, tmp);
104 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
105 tmp = REG_SET(MC_FB_TOP, tmp >> 16);
106 tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
107 WREG32_MC(MC_FB_LOCATION, tmp);
108 WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
109 WREG32(0x310, rdev->mc.vram_location);
110 if (rdev->flags & RADEON_IS_AGP) {
111 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
112 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
113 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
114 WREG32_MC(MC_AGP_LOCATION, tmp);
115 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
116 WREG32_MC(MC_AGP_BASE_2, 0);
117 } else {
118 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
119 WREG32_MC(MC_AGP_BASE, 0);
120 WREG32_MC(MC_AGP_BASE_2, 0);
121 }
122 return 0;
123}
124
125void rv515_mc_fini(struct radeon_device *rdev)
126{
127} 52}
128 53
129
130/*
131 * Global GPU functions
132 */
133void rv515_ring_start(struct radeon_device *rdev) 54void rv515_ring_start(struct radeon_device *rdev)
134{ 55{
135 int r; 56 int r;
@@ -198,11 +119,6 @@ void rv515_ring_start(struct radeon_device *rdev)
198 radeon_ring_unlock_commit(rdev); 119 radeon_ring_unlock_commit(rdev);
199} 120}
200 121
201void rv515_errata(struct radeon_device *rdev)
202{
203 rdev->pll_errata = 0;
204}
205
206int rv515_mc_wait_for_idle(struct radeon_device *rdev) 122int rv515_mc_wait_for_idle(struct radeon_device *rdev)
207{ 123{
208 unsigned i; 124 unsigned i;
@@ -219,6 +135,14 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
219 return -1; 135 return -1;
220} 136}
221 137
138void rv515_vga_render_disable(struct radeon_device *rdev)
139{
140 WREG32(R_000330_D1VGA_CONTROL, 0);
141 WREG32(R_000338_D2VGA_CONTROL, 0);
142 WREG32(R_000300_VGA_RENDER_CONTROL,
143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
144}
145
222void rv515_gpu_init(struct radeon_device *rdev) 146void rv515_gpu_init(struct radeon_device *rdev)
223{ 147{
224 unsigned pipe_select_current, gb_pipe_select, tmp; 148 unsigned pipe_select_current, gb_pipe_select, tmp;
@@ -231,7 +155,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
231 "reseting GPU. Bad things might happen.\n"); 155 "reseting GPU. Bad things might happen.\n");
232 } 156 }
233 157
234 rs600_disable_vga(rdev); 158 rv515_vga_render_disable(rdev);
235 159
236 r420_pipes_init(rdev); 160 r420_pipes_init(rdev);
237 gb_pipe_select = RREG32(0x402C); 161 gb_pipe_select = RREG32(0x402C);
@@ -335,10 +259,6 @@ int rv515_gpu_reset(struct radeon_device *rdev)
335 return 0; 259 return 0;
336} 260}
337 261
338
339/*
340 * VRAM info
341 */
342static void rv515_vram_get_type(struct radeon_device *rdev) 262static void rv515_vram_get_type(struct radeon_device *rdev)
343{ 263{
344 uint32_t tmp; 264 uint32_t tmp;
@@ -374,10 +294,6 @@ void rv515_vram_info(struct radeon_device *rdev)
374 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 294 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
375} 295}
376 296
377
378/*
379 * Indirect registers accessor
380 */
381uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 297uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
382{ 298{
383 uint32_t r; 299 uint32_t r;
@@ -395,9 +311,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
395 WREG32(MC_IND_INDEX, 0); 311 WREG32(MC_IND_INDEX, 0);
396} 312}
397 313
398/*
399 * Debugfs info
400 */
401#if defined(CONFIG_DEBUG_FS) 314#if defined(CONFIG_DEBUG_FS)
402static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) 315static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
403{ 316{
@@ -459,13 +372,259 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
459#endif 372#endif
460} 373}
461 374
462/* 375void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
463 * Asic initialization 376{
464 */ 377 save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
465int rv515_init(struct radeon_device *rdev) 378 save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
379 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
380 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
381 save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
382 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
383
384 /* Stop all video */
385 WREG32(R_000330_D1VGA_CONTROL, 0);
386 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
387 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
388 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
389 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
390 WREG32(R_006080_D1CRTC_CONTROL, 0);
391 WREG32(R_006880_D2CRTC_CONTROL, 0);
392 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
393 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
394}
395
396void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
397{
398 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
399 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
400 WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
401 WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
402 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
403 /* Unlock host access */
404 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
405 mdelay(1);
406 /* Restore video state */
407 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
408 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
409 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
410 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
411 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
412 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
413 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
414 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
415 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
416}
417
418void rv515_mc_program(struct radeon_device *rdev)
419{
420 struct rv515_mc_save save;
421
422 /* Stops all mc clients */
423 rv515_mc_stop(rdev, &save);
424
425 /* Wait for mc idle */
426 if (rv515_mc_wait_for_idle(rdev))
427 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
428 /* Write VRAM size in case we are limiting it */
429 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
430 /* Program MC, should be a 32bits limited address space */
431 WREG32_MC(R_000001_MC_FB_LOCATION,
432 S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
433 S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
434 WREG32(R_000134_HDP_FB_LOCATION,
435 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
436 if (rdev->flags & RADEON_IS_AGP) {
437 WREG32_MC(R_000002_MC_AGP_LOCATION,
438 S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
439 S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
440 WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
441 WREG32_MC(R_000004_MC_AGP_BASE_2,
442 S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
443 } else {
444 WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
445 WREG32_MC(R_000003_MC_AGP_BASE, 0);
446 WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
447 }
448
449 rv515_mc_resume(rdev, &save);
450}
451
452void rv515_clock_startup(struct radeon_device *rdev)
453{
454 if (radeon_dynclks != -1 && radeon_dynclks)
455 radeon_atom_set_clock_gating(rdev, 1);
456 /* We need to force on some of the block */
457 WREG32_PLL(R_00000F_CP_DYN_CNTL,
458 RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
459 WREG32_PLL(R_000011_E2_DYN_CNTL,
460 RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
461 WREG32_PLL(R_000013_IDCT_DYN_CNTL,
462 RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
463}
464
465static int rv515_startup(struct radeon_device *rdev)
466{
467 int r;
468
469 rv515_mc_program(rdev);
470 /* Resume clock */
471 rv515_clock_startup(rdev);
472 /* Initialize GPU configuration (# pipes, ...) */
473 rv515_gpu_init(rdev);
474 /* Initialize GART (initialize after TTM so we can allocate
475 * memory through TTM but finalize after TTM) */
476 if (rdev->flags & RADEON_IS_PCIE) {
477 r = rv370_pcie_gart_enable(rdev);
478 if (r)
479 return r;
480 }
481 /* Enable IRQ */
482 rdev->irq.sw_int = true;
483 rs600_irq_set(rdev);
484 /* 1M ring buffer */
485 r = r100_cp_init(rdev, 1024 * 1024);
486 if (r) {
487 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
488 return r;
489 }
490 r = r100_wb_init(rdev);
491 if (r)
492 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
493 r = r100_ib_init(rdev);
494 if (r) {
495 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
496 return r;
497 }
498 return 0;
499}
500
501int rv515_resume(struct radeon_device *rdev)
502{
503 /* Make sur GART are not working */
504 if (rdev->flags & RADEON_IS_PCIE)
505 rv370_pcie_gart_disable(rdev);
506 /* Resume clock before doing reset */
507 rv515_clock_startup(rdev);
508 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
509 if (radeon_gpu_reset(rdev)) {
510 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
511 RREG32(R_000E40_RBBM_STATUS),
512 RREG32(R_0007C0_CP_STAT));
513 }
514 /* post */
515 atom_asic_init(rdev->mode_info.atom_context);
516 /* Resume clock after posting */
517 rv515_clock_startup(rdev);
518 return rv515_startup(rdev);
519}
520
521int rv515_suspend(struct radeon_device *rdev)
522{
523 r100_cp_disable(rdev);
524 r100_wb_disable(rdev);
525 rs600_irq_disable(rdev);
526 if (rdev->flags & RADEON_IS_PCIE)
527 rv370_pcie_gart_disable(rdev);
528 return 0;
529}
530
531void rv515_set_safe_registers(struct radeon_device *rdev)
466{ 532{
467 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; 533 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
468 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); 534 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
535}
536
537void rv515_fini(struct radeon_device *rdev)
538{
539 rv515_suspend(rdev);
540 r100_cp_fini(rdev);
541 r100_wb_fini(rdev);
542 r100_ib_fini(rdev);
543 radeon_gem_fini(rdev);
544 rv370_pcie_gart_fini(rdev);
545 radeon_agp_fini(rdev);
546 radeon_irq_kms_fini(rdev);
547 radeon_fence_driver_fini(rdev);
548 radeon_object_fini(rdev);
549 radeon_atombios_fini(rdev);
550 kfree(rdev->bios);
551 rdev->bios = NULL;
552}
553
554int rv515_init(struct radeon_device *rdev)
555{
556 int r;
557
558 /* Initialize scratch registers */
559 radeon_scratch_init(rdev);
560 /* Initialize surface registers */
561 radeon_surface_init(rdev);
562 /* TODO: disable VGA need to use VGA request */
563 /* BIOS*/
564 if (!radeon_get_bios(rdev)) {
565 if (ASIC_IS_AVIVO(rdev))
566 return -EINVAL;
567 }
568 if (rdev->is_atom_bios) {
569 r = radeon_atombios_init(rdev);
570 if (r)
571 return r;
572 } else {
573 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
574 return -EINVAL;
575 }
576 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
577 if (radeon_gpu_reset(rdev)) {
578 dev_warn(rdev->dev,
579 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
580 RREG32(R_000E40_RBBM_STATUS),
581 RREG32(R_0007C0_CP_STAT));
582 }
583 /* check if cards are posted or not */
584 if (!radeon_card_posted(rdev) && rdev->bios) {
585 DRM_INFO("GPU not posted. posting now...\n");
586 atom_asic_init(rdev->mode_info.atom_context);
587 }
588 /* Initialize clocks */
589 radeon_get_clock_info(rdev->ddev);
590 /* Initialize power management */
591 radeon_pm_init(rdev);
592 /* Get vram informations */
593 rv515_vram_info(rdev);
594 /* Initialize memory controller (also test AGP) */
595 r = r420_mc_init(rdev);
596 if (r)
597 return r;
598 rv515_debugfs(rdev);
599 /* Fence driver */
600 r = radeon_fence_driver_init(rdev);
601 if (r)
602 return r;
603 r = radeon_irq_kms_init(rdev);
604 if (r)
605 return r;
606 /* Memory manager */
607 r = radeon_object_init(rdev);
608 if (r)
609 return r;
610 r = rv370_pcie_gart_init(rdev);
611 if (r)
612 return r;
613 rv515_set_safe_registers(rdev);
614 rdev->accel_working = true;
615 r = rv515_startup(rdev);
616 if (r) {
617 /* Somethings want wront with the accel init stop accel */
618 dev_err(rdev->dev, "Disabling GPU acceleration\n");
619 rv515_suspend(rdev);
620 r100_cp_fini(rdev);
621 r100_wb_fini(rdev);
622 r100_ib_fini(rdev);
623 rv370_pcie_gart_fini(rdev);
624 radeon_agp_fini(rdev);
625 radeon_irq_kms_fini(rdev);
626 rdev->accel_working = false;
627 }
469 return 0; 628 return 0;
470} 629}
471 630
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index a65e17ec1c08..fc216e49384d 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -216,5 +216,388 @@
216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) 216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218 218
219#endif 219/* Registers */
220#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
221#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
222#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
223#define C_0000F8_CONFIG_MEMSIZE 0x00000000
224#define R_000134_HDP_FB_LOCATION 0x000134
225#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
226#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
227#define C_000134_HDP_FB_START 0xFFFF0000
228#define R_000300_VGA_RENDER_CONTROL 0x000300
229#define S_000300_VGA_BLINK_RATE(x) (((x) & 0x1F) << 0)
230#define G_000300_VGA_BLINK_RATE(x) (((x) >> 0) & 0x1F)
231#define C_000300_VGA_BLINK_RATE 0xFFFFFFE0
232#define S_000300_VGA_BLINK_MODE(x) (((x) & 0x3) << 5)
233#define G_000300_VGA_BLINK_MODE(x) (((x) >> 5) & 0x3)
234#define C_000300_VGA_BLINK_MODE 0xFFFFFF9F
235#define S_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) & 0x1) << 7)
236#define G_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) >> 7) & 0x1)
237#define C_000300_VGA_CURSOR_BLINK_INVERT 0xFFFFFF7F
238#define S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) & 0x1) << 8)
239#define G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) >> 8) & 0x1)
240#define C_000300_VGA_EXTD_ADDR_COUNT_ENABLE 0xFFFFFEFF
241#define S_000300_VGA_VSTATUS_CNTL(x) (((x) & 0x3) << 16)
242#define G_000300_VGA_VSTATUS_CNTL(x) (((x) >> 16) & 0x3)
243#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
244#define S_000300_VGA_LOCK_8DOT(x) (((x) & 0x1) << 24)
245#define G_000300_VGA_LOCK_8DOT(x) (((x) >> 24) & 0x1)
246#define C_000300_VGA_LOCK_8DOT 0xFEFFFFFF
247#define S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
248#define G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
249#define C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL 0xFDFFFFFF
250#define R_000310_VGA_MEMORY_BASE_ADDRESS 0x000310
251#define S_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
252#define G_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
253#define C_000310_VGA_MEMORY_BASE_ADDRESS 0x00000000
254#define R_000328_VGA_HDP_CONTROL 0x000328
255#define S_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) & 0x1) << 0)
256#define G_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) >> 0) & 0x1)
257#define C_000328_VGA_MEM_PAGE_SELECT_EN 0xFFFFFFFE
258#define S_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) & 0x1) << 8)
259#define G_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) >> 8) & 0x1)
260#define C_000328_VGA_RBBM_LOCK_DISABLE 0xFFFFFEFF
261#define S_000328_VGA_SOFT_RESET(x) (((x) & 0x1) << 16)
262#define G_000328_VGA_SOFT_RESET(x) (((x) >> 16) & 0x1)
263#define C_000328_VGA_SOFT_RESET 0xFFFEFFFF
264#define S_000328_VGA_TEST_RESET_CONTROL(x) (((x) & 0x1) << 24)
265#define G_000328_VGA_TEST_RESET_CONTROL(x) (((x) >> 24) & 0x1)
266#define C_000328_VGA_TEST_RESET_CONTROL 0xFEFFFFFF
267#define R_000330_D1VGA_CONTROL 0x000330
268#define S_000330_D1VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
269#define G_000330_D1VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
270#define C_000330_D1VGA_MODE_ENABLE 0xFFFFFFFE
271#define S_000330_D1VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
272#define G_000330_D1VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
273#define C_000330_D1VGA_TIMING_SELECT 0xFFFFFEFF
274#define S_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
275#define G_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
276#define C_000330_D1VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
277#define S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
278#define G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
279#define C_000330_D1VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
280#define S_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
281#define G_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
282#define C_000330_D1VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
283#define S_000330_D1VGA_ROTATE(x) (((x) & 0x3) << 24)
284#define G_000330_D1VGA_ROTATE(x) (((x) >> 24) & 0x3)
285#define C_000330_D1VGA_ROTATE 0xFCFFFFFF
286#define R_000338_D2VGA_CONTROL 0x000338
287#define S_000338_D2VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
288#define G_000338_D2VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
289#define C_000338_D2VGA_MODE_ENABLE 0xFFFFFFFE
290#define S_000338_D2VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
291#define G_000338_D2VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
292#define C_000338_D2VGA_TIMING_SELECT 0xFFFFFEFF
293#define S_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
294#define G_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
295#define C_000338_D2VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
296#define S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
297#define G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
298#define C_000338_D2VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
299#define S_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
300#define G_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
301#define C_000338_D2VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
302#define S_000338_D2VGA_ROTATE(x) (((x) & 0x3) << 24)
303#define G_000338_D2VGA_ROTATE(x) (((x) >> 24) & 0x3)
304#define C_000338_D2VGA_ROTATE 0xFCFFFFFF
305#define R_0007C0_CP_STAT 0x0007C0
306#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
307#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
308#define C_0007C0_MRU_BUSY 0xFFFFFFFE
309#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
310#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
311#define C_0007C0_MWU_BUSY 0xFFFFFFFD
312#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
313#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
314#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
315#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
316#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
317#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
318#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
319#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
320#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
321#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
322#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
323#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
324#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
325#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
326#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
327#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
328#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
329#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
330#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
331#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
332#define C_0007C0_CSI_BUSY 0xFFFFDFFF
333#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
334#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
335#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
336#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
337#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
338#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
339#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
340#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
341#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
342#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
343#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
344#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
345#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
346#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
347#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
348#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
349#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
350#define C_0007C0_CP_BUSY 0x7FFFFFFF
351#define R_000E40_RBBM_STATUS 0x000E40
352#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
353#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
354#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
355#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
356#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
357#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
358#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
359#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
360#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
361#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
362#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
363#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
364#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
365#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
366#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
367#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
368#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
369#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
370#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
371#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
372#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
373#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
374#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
375#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
376#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
377#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
378#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
379#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
380#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
381#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
382#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
383#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
384#define C_000E40_E2_BUSY 0xFFFDFFFF
385#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
386#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
387#define C_000E40_RB2D_BUSY 0xFFFBFFFF
388#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
389#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
390#define C_000E40_RB3D_BUSY 0xFFF7FFFF
391#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
392#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
393#define C_000E40_VAP_BUSY 0xFFEFFFFF
394#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
395#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
396#define C_000E40_RE_BUSY 0xFFDFFFFF
397#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
398#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
399#define C_000E40_TAM_BUSY 0xFFBFFFFF
400#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
401#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
402#define C_000E40_TDM_BUSY 0xFF7FFFFF
403#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
404#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
405#define C_000E40_PB_BUSY 0xFEFFFFFF
406#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
407#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
408#define C_000E40_TIM_BUSY 0xFDFFFFFF
409#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
410#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
411#define C_000E40_GA_BUSY 0xFBFFFFFF
412#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
413#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
414#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
415#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
416#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
417#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
418#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
419#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
420#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
421#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
422#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
423#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
424#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
425#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
426#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
427#define R_006080_D1CRTC_CONTROL 0x006080
428#define S_006080_D1CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
429#define G_006080_D1CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
430#define C_006080_D1CRTC_MASTER_EN 0xFFFFFFFE
431#define S_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
432#define G_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
433#define C_006080_D1CRTC_SYNC_RESET_SEL 0xFFFFFFEF
434#define S_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
435#define G_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
436#define C_006080_D1CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
437#define S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
438#define G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
439#define C_006080_D1CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
440#define S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
441#define G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
442#define C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
443#define R_0060E8_D1CRTC_UPDATE_LOCK 0x0060E8
444#define S_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
445#define G_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
446#define C_0060E8_D1CRTC_UPDATE_LOCK 0xFFFFFFFE
447#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x006110
448#define S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
449#define G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
450#define C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
451#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x006118
452#define S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
453#define G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
454#define C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
455#define R_006880_D2CRTC_CONTROL 0x006880
456#define S_006880_D2CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
457#define G_006880_D2CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
458#define C_006880_D2CRTC_MASTER_EN 0xFFFFFFFE
459#define S_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
460#define G_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
461#define C_006880_D2CRTC_SYNC_RESET_SEL 0xFFFFFFEF
462#define S_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
463#define G_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
464#define C_006880_D2CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
465#define S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
466#define G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
467#define C_006880_D2CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
468#define S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
469#define G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
470#define C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
471#define R_0068E8_D2CRTC_UPDATE_LOCK 0x0068E8
472#define S_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
473#define G_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
474#define C_0068E8_D2CRTC_UPDATE_LOCK 0xFFFFFFFE
475#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x006910
476#define S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
477#define G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
478#define C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
479#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x006918
480#define S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
481#define G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
482#define C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
483
484
485#define R_000001_MC_FB_LOCATION 0x000001
486#define S_000001_MC_FB_START(x) (((x) & 0xFFFF) << 0)
487#define G_000001_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
488#define C_000001_MC_FB_START 0xFFFF0000
489#define S_000001_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
490#define G_000001_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
491#define C_000001_MC_FB_TOP 0x0000FFFF
492#define R_000002_MC_AGP_LOCATION 0x000002
493#define S_000002_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
494#define G_000002_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
495#define C_000002_MC_AGP_START 0xFFFF0000
496#define S_000002_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
497#define G_000002_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
498#define C_000002_MC_AGP_TOP 0x0000FFFF
499#define R_000003_MC_AGP_BASE 0x000003
500#define S_000003_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
501#define G_000003_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
502#define C_000003_AGP_BASE_ADDR 0x00000000
503#define R_000004_MC_AGP_BASE_2 0x000004
504#define S_000004_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
505#define G_000004_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
506#define C_000004_AGP_BASE_ADDR_2 0xFFFFFFF0
220 507
508
509#define R_00000F_CP_DYN_CNTL 0x00000F
510#define S_00000F_CP_FORCEON(x) (((x) & 0x1) << 0)
511#define G_00000F_CP_FORCEON(x) (((x) >> 0) & 0x1)
512#define C_00000F_CP_FORCEON 0xFFFFFFFE
513#define S_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
514#define G_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
515#define C_00000F_CP_MAX_DYN_STOP_LAT 0xFFFFFFFD
516#define S_00000F_CP_CLOCK_STATUS(x) (((x) & 0x1) << 2)
517#define G_00000F_CP_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
518#define C_00000F_CP_CLOCK_STATUS 0xFFFFFFFB
519#define S_00000F_CP_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
520#define G_00000F_CP_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
521#define C_00000F_CP_PROG_SHUTOFF 0xFFFFFFF7
522#define S_00000F_CP_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
523#define G_00000F_CP_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
524#define C_00000F_CP_PROG_DELAY_VALUE 0xFFFFF00F
525#define S_00000F_CP_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
526#define G_00000F_CP_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
527#define C_00000F_CP_LOWER_POWER_IDLE 0xFFF00FFF
528#define S_00000F_CP_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
529#define G_00000F_CP_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
530#define C_00000F_CP_LOWER_POWER_IGNORE 0xFFEFFFFF
531#define S_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
532#define G_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
533#define C_00000F_CP_NORMAL_POWER_IGNORE 0xFFDFFFFF
534#define S_00000F_SPARE(x) (((x) & 0x3) << 22)
535#define G_00000F_SPARE(x) (((x) >> 22) & 0x3)
536#define C_00000F_SPARE 0xFF3FFFFF
537#define S_00000F_CP_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
538#define G_00000F_CP_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
539#define C_00000F_CP_NORMAL_POWER_BUSY 0x00FFFFFF
540#define R_000011_E2_DYN_CNTL 0x000011
541#define S_000011_E2_FORCEON(x) (((x) & 0x1) << 0)
542#define G_000011_E2_FORCEON(x) (((x) >> 0) & 0x1)
543#define C_000011_E2_FORCEON 0xFFFFFFFE
544#define S_000011_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
545#define G_000011_E2_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
546#define C_000011_E2_MAX_DYN_STOP_LAT 0xFFFFFFFD
547#define S_000011_E2_CLOCK_STATUS(x) (((x) & 0x1) << 2)
548#define G_000011_E2_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
549#define C_000011_E2_CLOCK_STATUS 0xFFFFFFFB
550#define S_000011_E2_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
551#define G_000011_E2_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
552#define C_000011_E2_PROG_SHUTOFF 0xFFFFFFF7
553#define S_000011_E2_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
554#define G_000011_E2_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
555#define C_000011_E2_PROG_DELAY_VALUE 0xFFFFF00F
556#define S_000011_E2_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
557#define G_000011_E2_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
558#define C_000011_E2_LOWER_POWER_IDLE 0xFFF00FFF
559#define S_000011_E2_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
560#define G_000011_E2_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
561#define C_000011_E2_LOWER_POWER_IGNORE 0xFFEFFFFF
562#define S_000011_E2_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
563#define G_000011_E2_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
564#define C_000011_E2_NORMAL_POWER_IGNORE 0xFFDFFFFF
565#define S_000011_SPARE(x) (((x) & 0x3) << 22)
566#define G_000011_SPARE(x) (((x) >> 22) & 0x3)
567#define C_000011_SPARE 0xFF3FFFFF
568#define S_000011_E2_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
569#define G_000011_E2_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
570#define C_000011_E2_NORMAL_POWER_BUSY 0x00FFFFFF
571#define R_000013_IDCT_DYN_CNTL 0x000013
572#define S_000013_IDCT_FORCEON(x) (((x) & 0x1) << 0)
573#define G_000013_IDCT_FORCEON(x) (((x) >> 0) & 0x1)
574#define C_000013_IDCT_FORCEON 0xFFFFFFFE
575#define S_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
576#define G_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
577#define C_000013_IDCT_MAX_DYN_STOP_LAT 0xFFFFFFFD
578#define S_000013_IDCT_CLOCK_STATUS(x) (((x) & 0x1) << 2)
579#define G_000013_IDCT_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
580#define C_000013_IDCT_CLOCK_STATUS 0xFFFFFFFB
581#define S_000013_IDCT_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
582#define G_000013_IDCT_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
583#define C_000013_IDCT_PROG_SHUTOFF 0xFFFFFFF7
584#define S_000013_IDCT_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
585#define G_000013_IDCT_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
586#define C_000013_IDCT_PROG_DELAY_VALUE 0xFFFFF00F
587#define S_000013_IDCT_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
588#define G_000013_IDCT_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
589#define C_000013_IDCT_LOWER_POWER_IDLE 0xFFF00FFF
590#define S_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
591#define G_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
592#define C_000013_IDCT_LOWER_POWER_IGNORE 0xFFEFFFFF
593#define S_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
594#define G_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
595#define C_000013_IDCT_NORMAL_POWER_IGNORE 0xFFDFFFFF
596#define S_000013_SPARE(x) (((x) & 0x3) << 22)
597#define G_000013_SPARE(x) (((x) >> 22) & 0x3)
598#define C_000013_SPARE 0xFF3FFFFF
599#define S_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
600#define G_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
601#define C_000013_IDCT_NORMAL_POWER_BUSY 0x00FFFFFF
602
603#endif
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b574c73a5109..b0efd0ddae7a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -31,8 +31,8 @@
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_drm.h" 32#include "radeon_drm.h"
33#include "rv770d.h" 33#include "rv770d.h"
34#include "avivod.h"
35#include "atom.h" 34#include "atom.h"
35#include "avivod.h"
36 36
37#define R700_PFP_UCODE_SIZE 848 37#define R700_PFP_UCODE_SIZE 848
38#define R700_PM4_UCODE_SIZE 1360 38#define R700_PM4_UCODE_SIZE 1360
@@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
75 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 75 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
76 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 76 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
77 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 77 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
78 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); 78 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
79 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 79 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
80 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 80 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
81 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 81 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
126} 126}
127 127
128 128
129/* 129void rv770_agp_enable(struct radeon_device *rdev)
130 * MC 130{
131 */ 131 u32 tmp;
132static void rv770_mc_resume(struct radeon_device *rdev) 132 int i;
133
134 /* Setup L2 cache */
135 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
136 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
137 EFFECTIVE_L2_QUEUE_SIZE(7));
138 WREG32(VM_L2_CNTL2, 0);
139 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
140 /* Setup TLB control */
141 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
142 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
143 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
144 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
145 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
146 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
148 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
152 for (i = 0; i < 7; i++)
153 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
154}
155
156static void rv770_mc_program(struct radeon_device *rdev)
133{ 157{
134 u32 d1vga_control, d2vga_control; 158 struct rv515_mc_save save;
135 u32 vga_render_control, vga_hdp_control;
136 u32 d1crtc_control, d2crtc_control;
137 u32 new_d1grph_primary, new_d1grph_secondary;
138 u32 new_d2grph_primary, new_d2grph_secondary;
139 u64 old_vram_start;
140 u32 tmp; 159 u32 tmp;
141 int i, j; 160 int i, j;
142 161
@@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev)
150 } 169 }
151 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 170 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
152 171
153 d1vga_control = RREG32(D1VGA_CONTROL); 172 rv515_mc_stop(rdev, &save);
154 d2vga_control = RREG32(D2VGA_CONTROL);
155 vga_render_control = RREG32(VGA_RENDER_CONTROL);
156 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
157 d1crtc_control = RREG32(D1CRTC_CONTROL);
158 d2crtc_control = RREG32(D2CRTC_CONTROL);
159 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
160 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
161 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
162 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
163 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
164 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
165 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
166 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
167 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
168
169 /* Stop all video */
170 WREG32(D1VGA_CONTROL, 0);
171 WREG32(D2VGA_CONTROL, 0);
172 WREG32(VGA_RENDER_CONTROL, 0);
173 WREG32(D1CRTC_UPDATE_LOCK, 1);
174 WREG32(D2CRTC_UPDATE_LOCK, 1);
175 WREG32(D1CRTC_CONTROL, 0);
176 WREG32(D2CRTC_CONTROL, 0);
177 WREG32(D1CRTC_UPDATE_LOCK, 0);
178 WREG32(D2CRTC_UPDATE_LOCK, 0);
179
180 mdelay(1);
181 if (r600_mc_wait_for_idle(rdev)) { 173 if (r600_mc_wait_for_idle(rdev)) {
182 printk(KERN_WARNING "[drm] MC not idle !\n"); 174 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
183 } 175 }
184
185 /* Lockout access through VGA aperture*/ 176 /* Lockout access through VGA aperture*/
186 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 177 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
187
188 /* Update configuration */ 178 /* Update configuration */
189 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 179 if (rdev->flags & RADEON_IS_AGP) {
190 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); 180 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
181 /* VRAM before AGP */
182 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
183 rdev->mc.vram_start >> 12);
184 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
185 rdev->mc.gtt_end >> 12);
186 } else {
187 /* VRAM after AGP */
188 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
189 rdev->mc.gtt_start >> 12);
190 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
191 rdev->mc.vram_end >> 12);
192 }
193 } else {
194 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
195 rdev->mc.vram_start >> 12);
196 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
197 rdev->mc.vram_end >> 12);
198 }
191 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 199 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
192 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; 200 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
193 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 201 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
194 WREG32(MC_VM_FB_LOCATION, tmp); 202 WREG32(MC_VM_FB_LOCATION, tmp);
195 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 203 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
196 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 204 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
197 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 205 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
198 if (rdev->flags & RADEON_IS_AGP) { 206 if (rdev->flags & RADEON_IS_AGP) {
199 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); 207 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
200 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 208 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
201 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 209 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
202 } else { 210 } else {
@@ -204,34 +212,13 @@ static void rv770_mc_resume(struct radeon_device *rdev)
204 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 212 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
205 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 213 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
206 } 214 }
207 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
208 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
209 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
210 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
211 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
212
213 /* Unlock host access */
214 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
215
216 mdelay(1);
217 if (r600_mc_wait_for_idle(rdev)) { 215 if (r600_mc_wait_for_idle(rdev)) {
218 printk(KERN_WARNING "[drm] MC not idle !\n"); 216 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
219 } 217 }
220 218 rv515_mc_resume(rdev, &save);
221 /* Restore video state */
222 WREG32(D1CRTC_UPDATE_LOCK, 1);
223 WREG32(D2CRTC_UPDATE_LOCK, 1);
224 WREG32(D1CRTC_CONTROL, d1crtc_control);
225 WREG32(D2CRTC_CONTROL, d2crtc_control);
226 WREG32(D1CRTC_UPDATE_LOCK, 0);
227 WREG32(D2CRTC_UPDATE_LOCK, 0);
228 WREG32(D1VGA_CONTROL, d1vga_control);
229 WREG32(D2VGA_CONTROL, d2vga_control);
230 WREG32(VGA_RENDER_CONTROL, vga_render_control);
231
232 /* we need to own VRAM, so turn off the VGA renderer here 219 /* we need to own VRAM, so turn off the VGA renderer here
233 * to stop it overwriting our objects */ 220 * to stop it overwriting our objects */
234 radeon_avivo_vga_render_disable(rdev); 221 rv515_vga_render_disable(rdev);
235} 222}
236 223
237 224
@@ -542,11 +529,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
542 if (rdev->family == CHIP_RV770) 529 if (rdev->family == CHIP_RV770)
543 gb_tiling_config |= BANK_TILING(1); 530 gb_tiling_config |= BANK_TILING(1);
544 else 531 else
545 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK); 532 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
546 533
547 gb_tiling_config |= GROUP_SIZE(0); 534 gb_tiling_config |= GROUP_SIZE(0);
548 535
549 if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) { 536 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
550 gb_tiling_config |= ROW_TILING(3); 537 gb_tiling_config |= ROW_TILING(3);
551 gb_tiling_config |= SAMPLE_SPLIT(3); 538 gb_tiling_config |= SAMPLE_SPLIT(3);
552 } else { 539 } else {
@@ -592,14 +579,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
592 579
593 /* set HW defaults for 3D engine */ 580 /* set HW defaults for 3D engine */
594 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 581 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
595 ROQ_IB2_START(0x2b))); 582 ROQ_IB2_START(0x2b)));
596 583
597 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 584 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
598 585
599 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 586 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
600 SYNC_GRADIENT | 587 SYNC_GRADIENT |
601 SYNC_WALKER | 588 SYNC_WALKER |
602 SYNC_ALIGNER)); 589 SYNC_ALIGNER));
603 590
604 sx_debug_1 = RREG32(SX_DEBUG_1); 591 sx_debug_1 = RREG32(SX_DEBUG_1);
605 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 592 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -611,9 +598,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
611 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 598 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
612 599
613 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 600 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
614 GS_FLUSH_CTL(4) | 601 GS_FLUSH_CTL(4) |
615 ACK_FLUSH_CTL(3) | 602 ACK_FLUSH_CTL(3) |
616 SYNC_FLUSH_CTL)); 603 SYNC_FLUSH_CTL));
617 604
618 if (rdev->family == CHIP_RV770) 605 if (rdev->family == CHIP_RV770)
619 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); 606 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
@@ -624,12 +611,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
624 } 611 }
625 612
626 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | 613 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
627 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | 614 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
628 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); 615 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
629 616
630 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | 617 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
631 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | 618 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
632 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); 619 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
633 620
634 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 621 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
635 622
@@ -787,20 +774,49 @@ int rv770_mc_init(struct radeon_device *rdev)
787{ 774{
788 fixed20_12 a; 775 fixed20_12 a;
789 u32 tmp; 776 u32 tmp;
777 int chansize, numchan;
790 int r; 778 int r;
791 779
792 /* Get VRAM informations */ 780 /* Get VRAM informations */
793 /* FIXME: Don't know how to determine vram width, need to check
794 * vram_width usage
795 */
796 rdev->mc.vram_width = 128;
797 rdev->mc.vram_is_ddr = true; 781 rdev->mc.vram_is_ddr = true;
782 tmp = RREG32(MC_ARB_RAMCFG);
783 if (tmp & CHANSIZE_OVERRIDE) {
784 chansize = 16;
785 } else if (tmp & CHANSIZE_MASK) {
786 chansize = 64;
787 } else {
788 chansize = 32;
789 }
790 tmp = RREG32(MC_SHARED_CHMAP);
791 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
792 case 0:
793 default:
794 numchan = 1;
795 break;
796 case 1:
797 numchan = 2;
798 break;
799 case 2:
800 numchan = 4;
801 break;
802 case 3:
803 numchan = 8;
804 break;
805 }
806 rdev->mc.vram_width = numchan * chansize;
798 /* Could aper size report 0 ? */ 807 /* Could aper size report 0 ? */
799 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 808 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
800 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 809 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
801 /* Setup GPU memory space */ 810 /* Setup GPU memory space */
802 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 811 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
803 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 812 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
813
814 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
815 rdev->mc.mc_vram_size = rdev->mc.aper_size;
816
817 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
818 rdev->mc.real_vram_size = rdev->mc.aper_size;
819
804 if (rdev->flags & RADEON_IS_AGP) { 820 if (rdev->flags & RADEON_IS_AGP) {
805 r = radeon_agp_init(rdev); 821 r = radeon_agp_init(rdev);
806 if (r) 822 if (r)
@@ -833,9 +849,9 @@ int rv770_mc_init(struct radeon_device *rdev)
833 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 849 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
834 } 850 }
835 rdev->mc.vram_start = rdev->mc.vram_location; 851 rdev->mc.vram_start = rdev->mc.vram_location;
836 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; 852 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
837 rdev->mc.gtt_start = rdev->mc.gtt_location; 853 rdev->mc.gtt_start = rdev->mc.gtt_location;
838 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; 854 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
839 /* FIXME: we should enforce default clock in case GPU is not in 855 /* FIXME: we should enforce default clock in case GPU is not in
840 * default setup 856 * default setup
841 */ 857 */
@@ -854,11 +870,14 @@ static int rv770_startup(struct radeon_device *rdev)
854{ 870{
855 int r; 871 int r;
856 872
857 radeon_gpu_reset(rdev); 873 rv770_mc_program(rdev);
858 rv770_mc_resume(rdev); 874 if (rdev->flags & RADEON_IS_AGP) {
859 r = rv770_pcie_gart_enable(rdev); 875 rv770_agp_enable(rdev);
860 if (r) 876 } else {
861 return r; 877 r = rv770_pcie_gart_enable(rdev);
878 if (r)
879 return r;
880 }
862 rv770_gpu_init(rdev); 881 rv770_gpu_init(rdev);
863 882
864 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 883 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -877,9 +896,8 @@ static int rv770_startup(struct radeon_device *rdev)
877 r = r600_cp_resume(rdev); 896 r = r600_cp_resume(rdev);
878 if (r) 897 if (r)
879 return r; 898 return r;
880 r = r600_wb_init(rdev); 899 /* write back buffer are not vital so don't worry about failure */
881 if (r) 900 r600_wb_enable(rdev);
882 return r;
883 return 0; 901 return 0;
884} 902}
885 903
@@ -887,15 +905,12 @@ int rv770_resume(struct radeon_device *rdev)
887{ 905{
888 int r; 906 int r;
889 907
890 if (radeon_gpu_reset(rdev)) { 908 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
891 /* FIXME: what do we want to do here ? */ 909 * posting will perform necessary task to bring back GPU into good
892 } 910 * shape.
911 */
893 /* post card */ 912 /* post card */
894 if (rdev->is_atom_bios) { 913 atom_asic_init(rdev->mode_info.atom_context);
895 atom_asic_init(rdev->mode_info.atom_context);
896 } else {
897 radeon_combios_asic_init(rdev->ddev);
898 }
899 /* Initialize clocks */ 914 /* Initialize clocks */
900 r = radeon_clocks_init(rdev); 915 r = radeon_clocks_init(rdev);
901 if (r) { 916 if (r) {
@@ -908,7 +923,7 @@ int rv770_resume(struct radeon_device *rdev)
908 return r; 923 return r;
909 } 924 }
910 925
911 r = radeon_ib_test(rdev); 926 r = r600_ib_test(rdev);
912 if (r) { 927 if (r) {
913 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 928 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
914 return r; 929 return r;
@@ -922,8 +937,8 @@ int rv770_suspend(struct radeon_device *rdev)
922 /* FIXME: we should wait for ring to be empty */ 937 /* FIXME: we should wait for ring to be empty */
923 r700_cp_stop(rdev); 938 r700_cp_stop(rdev);
924 rdev->cp.ready = false; 939 rdev->cp.ready = false;
940 r600_wb_disable(rdev);
925 rv770_pcie_gart_disable(rdev); 941 rv770_pcie_gart_disable(rdev);
926
927 /* unpin shaders bo */ 942 /* unpin shaders bo */
928 radeon_object_unpin(rdev->r600_blit.shader_obj); 943 radeon_object_unpin(rdev->r600_blit.shader_obj);
929 return 0; 944 return 0;
@@ -939,7 +954,6 @@ int rv770_init(struct radeon_device *rdev)
939{ 954{
940 int r; 955 int r;
941 956
942 rdev->new_init_path = true;
943 r = radeon_dummy_page_init(rdev); 957 r = radeon_dummy_page_init(rdev);
944 if (r) 958 if (r)
945 return r; 959 return r;
@@ -953,8 +967,10 @@ int rv770_init(struct radeon_device *rdev)
953 return -EINVAL; 967 return -EINVAL;
954 } 968 }
955 /* Must be an ATOMBIOS */ 969 /* Must be an ATOMBIOS */
956 if (!rdev->is_atom_bios) 970 if (!rdev->is_atom_bios) {
971 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
957 return -EINVAL; 972 return -EINVAL;
973 }
958 r = radeon_atombios_init(rdev); 974 r = radeon_atombios_init(rdev);
959 if (r) 975 if (r)
960 return r; 976 return r;
@@ -967,24 +983,20 @@ int rv770_init(struct radeon_device *rdev)
967 r600_scratch_init(rdev); 983 r600_scratch_init(rdev);
968 /* Initialize surface registers */ 984 /* Initialize surface registers */
969 radeon_surface_init(rdev); 985 radeon_surface_init(rdev);
986 /* Initialize clocks */
970 radeon_get_clock_info(rdev->ddev); 987 radeon_get_clock_info(rdev->ddev);
971 r = radeon_clocks_init(rdev); 988 r = radeon_clocks_init(rdev);
972 if (r) 989 if (r)
973 return r; 990 return r;
991 /* Initialize power management */
992 radeon_pm_init(rdev);
974 /* Fence driver */ 993 /* Fence driver */
975 r = radeon_fence_driver_init(rdev); 994 r = radeon_fence_driver_init(rdev);
976 if (r) 995 if (r)
977 return r; 996 return r;
978 r = rv770_mc_init(rdev); 997 r = rv770_mc_init(rdev);
979 if (r) { 998 if (r)
980 if (rdev->flags & RADEON_IS_AGP) {
981 /* Retry with disabling AGP */
982 rv770_fini(rdev);
983 rdev->flags &= ~RADEON_IS_AGP;
984 return rv770_init(rdev);
985 }
986 return r; 999 return r;
987 }
988 /* Memory manager */ 1000 /* Memory manager */
989 r = radeon_object_init(rdev); 1001 r = radeon_object_init(rdev);
990 if (r) 1002 if (r)
@@ -1013,12 +1025,10 @@ int rv770_init(struct radeon_device *rdev)
1013 1025
1014 r = rv770_startup(rdev); 1026 r = rv770_startup(rdev);
1015 if (r) { 1027 if (r) {
1016 if (rdev->flags & RADEON_IS_AGP) { 1028 rv770_suspend(rdev);
1017 /* Retry with disabling AGP */ 1029 r600_wb_fini(rdev);
1018 rv770_fini(rdev); 1030 radeon_ring_fini(rdev);
1019 rdev->flags &= ~RADEON_IS_AGP; 1031 rv770_pcie_gart_fini(rdev);
1020 return rv770_init(rdev);
1021 }
1022 rdev->accel_working = false; 1032 rdev->accel_working = false;
1023 } 1033 }
1024 if (rdev->accel_working) { 1034 if (rdev->accel_working) {
@@ -1027,7 +1037,7 @@ int rv770_init(struct radeon_device *rdev)
1027 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1037 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1028 rdev->accel_working = false; 1038 rdev->accel_working = false;
1029 } 1039 }
1030 r = radeon_ib_test(rdev); 1040 r = r600_ib_test(rdev);
1031 if (r) { 1041 if (r) {
1032 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1042 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1033 rdev->accel_working = false; 1043 rdev->accel_working = false;
@@ -1042,20 +1052,15 @@ void rv770_fini(struct radeon_device *rdev)
1042 1052
1043 r600_blit_fini(rdev); 1053 r600_blit_fini(rdev);
1044 radeon_ring_fini(rdev); 1054 radeon_ring_fini(rdev);
1055 r600_wb_fini(rdev);
1045 rv770_pcie_gart_fini(rdev); 1056 rv770_pcie_gart_fini(rdev);
1046 radeon_gem_fini(rdev); 1057 radeon_gem_fini(rdev);
1047 radeon_fence_driver_fini(rdev); 1058 radeon_fence_driver_fini(rdev);
1048 radeon_clocks_fini(rdev); 1059 radeon_clocks_fini(rdev);
1049#if __OS_HAS_AGP
1050 if (rdev->flags & RADEON_IS_AGP) 1060 if (rdev->flags & RADEON_IS_AGP)
1051 radeon_agp_fini(rdev); 1061 radeon_agp_fini(rdev);
1052#endif
1053 radeon_object_fini(rdev); 1062 radeon_object_fini(rdev);
1054 if (rdev->is_atom_bios) { 1063 radeon_atombios_fini(rdev);
1055 radeon_atombios_fini(rdev);
1056 } else {
1057 radeon_combios_fini(rdev);
1058 }
1059 kfree(rdev->bios); 1064 kfree(rdev->bios);
1060 rdev->bios = NULL; 1065 rdev->bios = NULL;
1061 radeon_dummy_page_fini(rdev); 1066 radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 4b9c3d6396ff..a1367ab6f261 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -129,6 +129,10 @@
129#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 129#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
130#define HDP_TILING_CONFIG 0x2F3C 130#define HDP_TILING_CONFIG 0x2F3C
131 131
132#define MC_SHARED_CHMAP 0x2004
133#define NOOFCHAN_SHIFT 12
134#define NOOFCHAN_MASK 0x00003000
135
132#define MC_ARB_RAMCFG 0x2760 136#define MC_ARB_RAMCFG 0x2760
133#define NOOFBANK_SHIFT 0 137#define NOOFBANK_SHIFT 0
134#define NOOFBANK_MASK 0x00000003 138#define NOOFBANK_MASK 0x00000003
@@ -142,6 +146,7 @@
142#define CHANSIZE_MASK 0x00000100 146#define CHANSIZE_MASK 0x00000100
143#define BURSTLENGTH_SHIFT 9 147#define BURSTLENGTH_SHIFT 9
144#define BURSTLENGTH_MASK 0x00000200 148#define BURSTLENGTH_MASK 0x00000200
149#define CHANSIZE_OVERRIDE (1 << 11)
145#define MC_VM_AGP_TOP 0x2028 150#define MC_VM_AGP_TOP 0x2028
146#define MC_VM_AGP_BOT 0x202C 151#define MC_VM_AGP_BOT 0x202C
147#define MC_VM_AGP_BASE 0x2030 152#define MC_VM_AGP_BASE 0x2030
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 33de7637c0c6..1c040d040338 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
228 vma->vm_private_data = NULL; 228 vma->vm_private_data = NULL;
229} 229}
230 230
231static struct vm_operations_struct ttm_bo_vm_ops = { 231static const struct vm_operations_struct ttm_bo_vm_ops = {
232 .fault = ttm_bo_vm_fault, 232 .fault = ttm_bo_vm_fault,
233 .open = ttm_bo_vm_open, 233 .open = ttm_bo_vm_open,
234 .close = ttm_bo_vm_close 234 .close = ttm_bo_vm_close
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
index 541744d00d3e..b17007178a36 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
82 if (unlikely(ret != 0)) 82 if (unlikely(ret != 0))
83 goto out_err; 83 goto out_err;
84 84
85 ++item->refcount;
86 } 85 }
86 ++item->refcount;
87 ref->object = item->object; 87 ref->object = item->object;
88 object = item->object; 88 object = item->object;
89 mutex_unlock(&item->mutex); 89 mutex_unlock(&item->mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a55ee1a56c16..7bcb89f39ce8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -279,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
279 279
280 return ttm_tt_set_caching(ttm, state); 280 return ttm_tt_set_caching(ttm, state);
281} 281}
282EXPORT_SYMBOL(ttm_tt_set_placement_caching);
282 283
283static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 284static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
284{ 285{
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index be34d32906bd..7d05c4bb201e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
1066 * @type: HID report type (HID_*_REPORT) 1066 * @type: HID report type (HID_*_REPORT)
1067 * @data: report contents 1067 * @data: report contents
1068 * @size: size of data parameter 1068 * @size: size of data parameter
1069 * @interrupt: called from atomic? 1069 * @interrupt: distinguish between interrupt and control transfers
1070 * 1070 *
1071 * This is data entry for lower layers. 1071 * This is data entry for lower layers.
1072 */ 1072 */
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index b05f602c051e..c40afc57fc8f 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = {
132 .input_mapping = twinhan_input_mapping, 132 .input_mapping = twinhan_input_mapping,
133}; 133};
134 134
135static int twinhan_init(void) 135static int __init twinhan_init(void)
136{ 136{
137 return hid_register_driver(&twinhan_driver); 137 return hid_register_driver(&twinhan_driver);
138} 138}
139 139
140static void twinhan_exit(void) 140static void __exit twinhan_exit(void)
141{ 141{
142 hid_unregister_driver(&twinhan_driver); 142 hid_unregister_driver(&twinhan_driver);
143} 143}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6639ea03dd..cdd136942bca 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -30,6 +30,7 @@
30#include <linux/major.h> 30#include <linux/major.h>
31#include <linux/hid.h> 31#include <linux/hid.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/sched.h>
33#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
34 35
35#include <linux/hidraw.h> 36#include <linux/hidraw.h>
@@ -47,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
47 char *report; 48 char *report;
48 DECLARE_WAITQUEUE(wait, current); 49 DECLARE_WAITQUEUE(wait, current);
49 50
50 while (ret == 0) { 51 mutex_lock(&list->read_mutex);
51
52 mutex_lock(&list->read_mutex);
53 52
53 while (ret == 0) {
54 if (list->head == list->tail) { 54 if (list->head == list->tail) {
55 add_wait_queue(&list->hidraw->wait, &wait); 55 add_wait_queue(&list->hidraw->wait, &wait);
56 set_current_state(TASK_INTERRUPTIBLE); 56 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6857560144bd..700e93adeb33 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -675,7 +675,7 @@ config SENSORS_SHT15
675 675
676config SENSORS_S3C 676config SENSORS_S3C
677 tristate "S3C24XX/S3C64XX Inbuilt ADC" 677 tristate "S3C24XX/S3C64XX Inbuilt ADC"
678 depends on ARCH_S3C2410 || ARCH_S3C64XX 678 depends on ARCH_S3C2410
679 help 679 help
680 If you say yes here you get support for the on-board ADCs of 680 If you say yes here you get support for the on-board ADCs of
681 the Samsung S3C24XX or S3C64XX series of SoC 681 the Samsung S3C24XX or S3C64XX series of SoC
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index d39877a7da63..b5a95193c694 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -350,8 +350,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
350 350
351 case FAULT: 351 case FAULT:
352 /* Note - only for remote1 and remote2 */ 352 /* Note - only for remote1 and remote2 */
353 out = data->alarms & (sattr->index ? 0x8000 : 0x4000); 353 out = !!(data->alarms & (sattr->index ? 0x8000 : 0x4000));
354 out = out ? 0 : 1;
355 break; 354 break;
356 355
357 default: 356 default:
@@ -863,7 +862,7 @@ static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
863 set_pwmfreq, INPUT, 0); 862 set_pwmfreq, INPUT, 0);
864static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 863static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
865 set_pwmctrl, INPUT, 0); 864 set_pwmctrl, INPUT, 0);
866static SENSOR_DEVICE_ATTR_2(pwm1_auto_channel_temp, S_IRUGO | S_IWUSR, 865static SENSOR_DEVICE_ATTR_2(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
867 show_pwmchan, set_pwmchan, INPUT, 0); 866 show_pwmchan, set_pwmchan, INPUT, 0);
868static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 867static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
869 set_pwm, MIN, 0); 868 set_pwm, MIN, 0);
@@ -875,7 +874,7 @@ static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
875 set_pwmfreq, INPUT, 1); 874 set_pwmfreq, INPUT, 1);
876static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 875static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
877 set_pwmctrl, INPUT, 1); 876 set_pwmctrl, INPUT, 1);
878static SENSOR_DEVICE_ATTR_2(pwm2_auto_channel_temp, S_IRUGO | S_IWUSR, 877static SENSOR_DEVICE_ATTR_2(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
879 show_pwmchan, set_pwmchan, INPUT, 1); 878 show_pwmchan, set_pwmchan, INPUT, 1);
880static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 879static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
881 set_pwm, MIN, 1); 880 set_pwm, MIN, 1);
@@ -887,7 +886,7 @@ static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
887 set_pwmfreq, INPUT, 2); 886 set_pwmfreq, INPUT, 2);
888static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 887static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
889 set_pwmctrl, INPUT, 2); 888 set_pwmctrl, INPUT, 2);
890static SENSOR_DEVICE_ATTR_2(pwm3_auto_channel_temp, S_IRUGO | S_IWUSR, 889static SENSOR_DEVICE_ATTR_2(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
891 show_pwmchan, set_pwmchan, INPUT, 2); 890 show_pwmchan, set_pwmchan, INPUT, 2);
892static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 891static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
893 set_pwm, MIN, 2); 892 set_pwm, MIN, 2);
@@ -947,19 +946,19 @@ static struct attribute *adt7475_attrs[] = {
947 &sensor_dev_attr_pwm1.dev_attr.attr, 946 &sensor_dev_attr_pwm1.dev_attr.attr,
948 &sensor_dev_attr_pwm1_freq.dev_attr.attr, 947 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
949 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 948 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
950 &sensor_dev_attr_pwm1_auto_channel_temp.dev_attr.attr, 949 &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
951 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, 950 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
952 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, 951 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
953 &sensor_dev_attr_pwm2.dev_attr.attr, 952 &sensor_dev_attr_pwm2.dev_attr.attr,
954 &sensor_dev_attr_pwm2_freq.dev_attr.attr, 953 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
955 &sensor_dev_attr_pwm2_enable.dev_attr.attr, 954 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
956 &sensor_dev_attr_pwm2_auto_channel_temp.dev_attr.attr, 955 &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
957 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, 956 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
958 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, 957 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
959 &sensor_dev_attr_pwm3.dev_attr.attr, 958 &sensor_dev_attr_pwm3.dev_attr.attr,
960 &sensor_dev_attr_pwm3_freq.dev_attr.attr, 959 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
961 &sensor_dev_attr_pwm3_enable.dev_attr.attr, 960 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
962 &sensor_dev_attr_pwm3_auto_channel_temp.dev_attr.attr, 961 &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
963 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, 962 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
964 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr, 963 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
965 NULL, 964 NULL,
@@ -1152,7 +1151,7 @@ static struct adt7475_data *adt7475_update_device(struct device *dev)
1152 } 1151 }
1153 1152
1154 /* Limits and settings, should never change update every 60 seconds */ 1153 /* Limits and settings, should never change update every 60 seconds */
1155 if (time_after(jiffies, data->limits_updated + HZ * 2) || 1154 if (time_after(jiffies, data->limits_updated + HZ * 60) ||
1156 !data->valid) { 1155 !data->valid) {
1157 data->config5 = adt7475_read(REG_CONFIG5); 1156 data->config5 = adt7475_read(REG_CONFIG5);
1158 1157
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index fe4fa29c9219..5a3ee00c0e7d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -35,18 +35,22 @@
35#define METHOD_OLD_ENUM_FAN "FSIF" 35#define METHOD_OLD_ENUM_FAN "FSIF"
36 36
37#define ATK_MUX_HWMON 0x00000006ULL 37#define ATK_MUX_HWMON 0x00000006ULL
38#define ATK_MUX_MGMT 0x00000011ULL
38 39
39#define ATK_CLASS_MASK 0xff000000ULL 40#define ATK_CLASS_MASK 0xff000000ULL
40#define ATK_CLASS_FREQ_CTL 0x03000000ULL 41#define ATK_CLASS_FREQ_CTL 0x03000000ULL
41#define ATK_CLASS_FAN_CTL 0x04000000ULL 42#define ATK_CLASS_FAN_CTL 0x04000000ULL
42#define ATK_CLASS_HWMON 0x06000000ULL 43#define ATK_CLASS_HWMON 0x06000000ULL
44#define ATK_CLASS_MGMT 0x11000000ULL
43 45
44#define ATK_TYPE_MASK 0x00ff0000ULL 46#define ATK_TYPE_MASK 0x00ff0000ULL
45#define HWMON_TYPE_VOLT 0x00020000ULL 47#define HWMON_TYPE_VOLT 0x00020000ULL
46#define HWMON_TYPE_TEMP 0x00030000ULL 48#define HWMON_TYPE_TEMP 0x00030000ULL
47#define HWMON_TYPE_FAN 0x00040000ULL 49#define HWMON_TYPE_FAN 0x00040000ULL
48 50
49#define HWMON_SENSOR_ID_MASK 0x0000ffffULL 51#define ATK_ELEMENT_ID_MASK 0x0000ffffULL
52
53#define ATK_EC_ID 0x11060004ULL
50 54
51enum atk_pack_member { 55enum atk_pack_member {
52 HWMON_PACK_FLAGS, 56 HWMON_PACK_FLAGS,
@@ -89,6 +93,9 @@ struct atk_data {
89 /* new inteface */ 93 /* new inteface */
90 acpi_handle enumerate_handle; 94 acpi_handle enumerate_handle;
91 acpi_handle read_handle; 95 acpi_handle read_handle;
96 acpi_handle write_handle;
97
98 bool disable_ec;
92 99
93 int voltage_count; 100 int voltage_count;
94 int temperature_count; 101 int temperature_count;
@@ -129,9 +136,22 @@ struct atk_sensor_data {
129 char const *acpi_name; 136 char const *acpi_name;
130}; 137};
131 138
132struct atk_acpi_buffer_u64 { 139/* Return buffer format:
133 union acpi_object buf; 140 * [0-3] "value" is valid flag
134 u64 value; 141 * [4-7] value
142 * [8- ] unknown stuff on newer mobos
143 */
144struct atk_acpi_ret_buffer {
145 u32 flags;
146 u32 value;
147 u8 data[];
148};
149
150/* Input buffer used for GITM and SITM methods */
151struct atk_acpi_input_buf {
152 u32 id;
153 u32 param1;
154 u32 param2;
135}; 155};
136 156
137static int atk_add(struct acpi_device *device); 157static int atk_add(struct acpi_device *device);
@@ -439,52 +459,147 @@ static int atk_read_value_old(struct atk_sensor_data *sensor, u64 *value)
439 return 0; 459 return 0;
440} 460}
441 461
442static int atk_read_value_new(struct atk_sensor_data *sensor, u64 *value) 462static union acpi_object *atk_ggrp(struct atk_data *data, u16 mux)
443{ 463{
444 struct atk_data *data = sensor->data;
445 struct device *dev = &data->acpi_dev->dev; 464 struct device *dev = &data->acpi_dev->dev;
465 struct acpi_buffer buf;
466 acpi_status ret;
446 struct acpi_object_list params; 467 struct acpi_object_list params;
447 struct acpi_buffer ret;
448 union acpi_object id; 468 union acpi_object id;
449 struct atk_acpi_buffer_u64 tmp; 469 union acpi_object *pack;
450 acpi_status status;
451 470
452 id.type = ACPI_TYPE_INTEGER; 471 id.type = ACPI_TYPE_INTEGER;
453 id.integer.value = sensor->id; 472 id.integer.value = mux;
454
455 params.count = 1; 473 params.count = 1;
456 params.pointer = &id; 474 params.pointer = &id;
457 475
458 tmp.buf.type = ACPI_TYPE_BUFFER; 476 buf.length = ACPI_ALLOCATE_BUFFER;
459 tmp.buf.buffer.pointer = (u8 *)&tmp.value; 477 ret = acpi_evaluate_object(data->enumerate_handle, NULL, &params, &buf);
460 tmp.buf.buffer.length = sizeof(u64); 478 if (ret != AE_OK) {
461 ret.length = sizeof(tmp); 479 dev_err(dev, "GGRP[%#x] ACPI exception: %s\n", mux,
462 ret.pointer = &tmp; 480 acpi_format_exception(ret));
481 return ERR_PTR(-EIO);
482 }
483 pack = buf.pointer;
484 if (pack->type != ACPI_TYPE_PACKAGE) {
485 /* Execution was successful, but the id was not found */
486 ACPI_FREE(pack);
487 return ERR_PTR(-ENOENT);
488 }
489
490 if (pack->package.count < 1) {
491 dev_err(dev, "GGRP[%#x] package is too small\n", mux);
492 ACPI_FREE(pack);
493 return ERR_PTR(-EIO);
494 }
495 return pack;
496}
497
498static union acpi_object *atk_gitm(struct atk_data *data, u64 id)
499{
500 struct device *dev = &data->acpi_dev->dev;
501 struct atk_acpi_input_buf buf;
502 union acpi_object tmp;
503 struct acpi_object_list params;
504 struct acpi_buffer ret;
505 union acpi_object *obj;
506 acpi_status status;
507
508 buf.id = id;
509 buf.param1 = 0;
510 buf.param2 = 0;
463 511
512 tmp.type = ACPI_TYPE_BUFFER;
513 tmp.buffer.pointer = (u8 *)&buf;
514 tmp.buffer.length = sizeof(buf);
515
516 params.count = 1;
517 params.pointer = (void *)&tmp;
518
519 ret.length = ACPI_ALLOCATE_BUFFER;
464 status = acpi_evaluate_object_typed(data->read_handle, NULL, &params, 520 status = acpi_evaluate_object_typed(data->read_handle, NULL, &params,
465 &ret, ACPI_TYPE_BUFFER); 521 &ret, ACPI_TYPE_BUFFER);
466 if (status != AE_OK) { 522 if (status != AE_OK) {
467 dev_warn(dev, "%s: ACPI exception: %s\n", __func__, 523 dev_warn(dev, "GITM[%#llx] ACPI exception: %s\n", id,
468 acpi_format_exception(status)); 524 acpi_format_exception(status));
469 return -EIO; 525 return ERR_PTR(-EIO);
526 }
527 obj = ret.pointer;
528
529 /* Sanity check */
530 if (obj->buffer.length < 8) {
531 dev_warn(dev, "Unexpected ASBF length: %u\n",
532 obj->buffer.length);
533 ACPI_FREE(obj);
534 return ERR_PTR(-EIO);
470 } 535 }
536 return obj;
537}
471 538
472 /* Return buffer format: 539static union acpi_object *atk_sitm(struct atk_data *data,
473 * [0-3] "value" is valid flag 540 struct atk_acpi_input_buf *buf)
474 * [4-7] value 541{
475 */ 542 struct device *dev = &data->acpi_dev->dev;
476 if (!(tmp.value & 0xffffffff)) { 543 struct acpi_object_list params;
544 union acpi_object tmp;
545 struct acpi_buffer ret;
546 union acpi_object *obj;
547 acpi_status status;
548
549 tmp.type = ACPI_TYPE_BUFFER;
550 tmp.buffer.pointer = (u8 *)buf;
551 tmp.buffer.length = sizeof(*buf);
552
553 params.count = 1;
554 params.pointer = &tmp;
555
556 ret.length = ACPI_ALLOCATE_BUFFER;
557 status = acpi_evaluate_object_typed(data->write_handle, NULL, &params,
558 &ret, ACPI_TYPE_BUFFER);
559 if (status != AE_OK) {
560 dev_warn(dev, "SITM[%#x] ACPI exception: %s\n", buf->id,
561 acpi_format_exception(status));
562 return ERR_PTR(-EIO);
563 }
564 obj = ret.pointer;
565
566 /* Sanity check */
567 if (obj->buffer.length < 8) {
568 dev_warn(dev, "Unexpected ASBF length: %u\n",
569 obj->buffer.length);
570 ACPI_FREE(obj);
571 return ERR_PTR(-EIO);
572 }
573 return obj;
574}
575
576static int atk_read_value_new(struct atk_sensor_data *sensor, u64 *value)
577{
578 struct atk_data *data = sensor->data;
579 struct device *dev = &data->acpi_dev->dev;
580 union acpi_object *obj;
581 struct atk_acpi_ret_buffer *buf;
582 int err = 0;
583
584 obj = atk_gitm(data, sensor->id);
585 if (IS_ERR(obj))
586 return PTR_ERR(obj);
587
588 buf = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
589 if (buf->flags == 0) {
477 /* The reading is not valid, possible causes: 590 /* The reading is not valid, possible causes:
478 * - sensor failure 591 * - sensor failure
479 * - enumeration was FUBAR (and we didn't notice) 592 * - enumeration was FUBAR (and we didn't notice)
480 */ 593 */
481 dev_info(dev, "Failure: %#llx\n", tmp.value); 594 dev_warn(dev, "Read failed, sensor = %#llx\n", sensor->id);
482 return -EIO; 595 err = -EIO;
596 goto out;
483 } 597 }
484 598
485 *value = (tmp.value & 0xffffffff00000000ULL) >> 32; 599 *value = buf->value;
486 600out:
487 return 0; 601 ACPI_FREE(obj);
602 return err;
488} 603}
489 604
490static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) 605static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
@@ -713,43 +828,141 @@ cleanup:
713 return ret; 828 return ret;
714} 829}
715 830
716static int atk_enumerate_new_hwmon(struct atk_data *data) 831static int atk_ec_present(struct atk_data *data)
717{ 832{
718 struct device *dev = &data->acpi_dev->dev; 833 struct device *dev = &data->acpi_dev->dev;
719 struct acpi_buffer buf;
720 acpi_status ret;
721 struct acpi_object_list params;
722 union acpi_object id;
723 union acpi_object *pack; 834 union acpi_object *pack;
724 int err; 835 union acpi_object *ec;
836 int ret;
725 int i; 837 int i;
726 838
727 dev_dbg(dev, "Enumerating hwmon sensors\n"); 839 pack = atk_ggrp(data, ATK_MUX_MGMT);
840 if (IS_ERR(pack)) {
841 if (PTR_ERR(pack) == -ENOENT) {
842 /* The MGMT class does not exists - that's ok */
843 dev_dbg(dev, "Class %#llx not found\n", ATK_MUX_MGMT);
844 return 0;
845 }
846 return PTR_ERR(pack);
847 }
728 848
729 id.type = ACPI_TYPE_INTEGER; 849 /* Search the EC */
730 id.integer.value = ATK_MUX_HWMON; 850 ec = NULL;
731 params.count = 1; 851 for (i = 0; i < pack->package.count; i++) {
732 params.pointer = &id; 852 union acpi_object *obj = &pack->package.elements[i];
853 union acpi_object *id;
733 854
734 buf.length = ACPI_ALLOCATE_BUFFER; 855 if (obj->type != ACPI_TYPE_PACKAGE)
735 ret = acpi_evaluate_object_typed(data->enumerate_handle, NULL, &params, 856 continue;
736 &buf, ACPI_TYPE_PACKAGE); 857
737 if (ret != AE_OK) { 858 id = &obj->package.elements[0];
738 dev_warn(dev, METHOD_ENUMERATE ": ACPI exception: %s\n", 859 if (id->type != ACPI_TYPE_INTEGER)
739 acpi_format_exception(ret)); 860 continue;
740 return -ENODEV; 861
862 if (id->integer.value == ATK_EC_ID) {
863 ec = obj;
864 break;
865 }
741 } 866 }
742 867
743 /* Result must be a package */ 868 ret = (ec != NULL);
744 pack = buf.pointer; 869 if (!ret)
870 /* The system has no EC */
871 dev_dbg(dev, "EC not found\n");
745 872
746 if (pack->package.count < 1) { 873 ACPI_FREE(pack);
747 dev_dbg(dev, "%s: hwmon package is too small: %d\n", __func__, 874 return ret;
748 pack->package.count); 875}
749 err = -EINVAL; 876
750 goto out; 877static int atk_ec_enabled(struct atk_data *data)
878{
879 struct device *dev = &data->acpi_dev->dev;
880 union acpi_object *obj;
881 struct atk_acpi_ret_buffer *buf;
882 int err;
883
884 obj = atk_gitm(data, ATK_EC_ID);
885 if (IS_ERR(obj)) {
886 dev_err(dev, "Unable to query EC status\n");
887 return PTR_ERR(obj);
888 }
889 buf = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
890
891 if (buf->flags == 0) {
892 dev_err(dev, "Unable to query EC status\n");
893 err = -EIO;
894 } else {
895 err = (buf->value != 0);
896 dev_dbg(dev, "EC is %sabled\n",
897 err ? "en" : "dis");
898 }
899
900 ACPI_FREE(obj);
901 return err;
902}
903
904static int atk_ec_ctl(struct atk_data *data, int enable)
905{
906 struct device *dev = &data->acpi_dev->dev;
907 union acpi_object *obj;
908 struct atk_acpi_input_buf sitm;
909 struct atk_acpi_ret_buffer *ec_ret;
910 int err = 0;
911
912 sitm.id = ATK_EC_ID;
913 sitm.param1 = enable;
914 sitm.param2 = 0;
915
916 obj = atk_sitm(data, &sitm);
917 if (IS_ERR(obj)) {
918 dev_err(dev, "Failed to %sable the EC\n",
919 enable ? "en" : "dis");
920 return PTR_ERR(obj);
921 }
922 ec_ret = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
923 if (ec_ret->flags == 0) {
924 dev_err(dev, "Failed to %sable the EC\n",
925 enable ? "en" : "dis");
926 err = -EIO;
927 } else {
928 dev_info(dev, "EC %sabled\n",
929 enable ? "en" : "dis");
930 }
931
932 ACPI_FREE(obj);
933 return err;
934}
935
936static int atk_enumerate_new_hwmon(struct atk_data *data)
937{
938 struct device *dev = &data->acpi_dev->dev;
939 union acpi_object *pack;
940 int err;
941 int i;
942
943 err = atk_ec_present(data);
944 if (err < 0)
945 return err;
946 if (err) {
947 err = atk_ec_enabled(data);
948 if (err < 0)
949 return err;
950 /* If the EC was disabled we will disable it again on unload */
951 data->disable_ec = err;
952
953 err = atk_ec_ctl(data, 1);
954 if (err) {
955 data->disable_ec = false;
956 return err;
957 }
751 } 958 }
752 959
960 dev_dbg(dev, "Enumerating hwmon sensors\n");
961
962 pack = atk_ggrp(data, ATK_MUX_HWMON);
963 if (IS_ERR(pack))
964 return PTR_ERR(pack);
965
753 for (i = 0; i < pack->package.count; i++) { 966 for (i = 0; i < pack->package.count; i++) {
754 union acpi_object *obj = &pack->package.elements[i]; 967 union acpi_object *obj = &pack->package.elements[i];
755 968
@@ -758,8 +971,7 @@ static int atk_enumerate_new_hwmon(struct atk_data *data)
758 971
759 err = data->voltage_count + data->temperature_count + data->fan_count; 972 err = data->voltage_count + data->temperature_count + data->fan_count;
760 973
761out: 974 ACPI_FREE(pack);
762 ACPI_FREE(buf.pointer);
763 return err; 975 return err;
764} 976}
765 977
@@ -895,6 +1107,15 @@ static int atk_check_new_if(struct atk_data *data)
895 } 1107 }
896 data->read_handle = ret; 1108 data->read_handle = ret;
897 1109
1110 /* De-multiplexer (write) */
1111 status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret);
1112 if (status != AE_OK) {
1113 dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
1114 acpi_format_exception(status));
1115 return -ENODEV;
1116 }
1117 data->write_handle = ret;
1118
898 return 0; 1119 return 0;
899} 1120}
900 1121
@@ -915,6 +1136,7 @@ static int atk_add(struct acpi_device *device)
915 data->acpi_dev = device; 1136 data->acpi_dev = device;
916 data->atk_handle = device->handle; 1137 data->atk_handle = device->handle;
917 INIT_LIST_HEAD(&data->sensor_list); 1138 INIT_LIST_HEAD(&data->sensor_list);
1139 data->disable_ec = false;
918 1140
919 buf.length = ACPI_ALLOCATE_BUFFER; 1141 buf.length = ACPI_ALLOCATE_BUFFER;
920 ret = acpi_evaluate_object_typed(data->atk_handle, BOARD_ID, NULL, 1142 ret = acpi_evaluate_object_typed(data->atk_handle, BOARD_ID, NULL,
@@ -973,6 +1195,8 @@ static int atk_add(struct acpi_device *device)
973cleanup: 1195cleanup:
974 atk_free_sensors(data); 1196 atk_free_sensors(data);
975out: 1197out:
1198 if (data->disable_ec)
1199 atk_ec_ctl(data, 0);
976 kfree(data); 1200 kfree(data);
977 return err; 1201 return err;
978} 1202}
@@ -988,6 +1212,11 @@ static int atk_remove(struct acpi_device *device, int type)
988 atk_free_sensors(data); 1212 atk_free_sensors(data);
989 hwmon_device_unregister(data->hwmon_dev); 1213 hwmon_device_unregister(data->hwmon_dev);
990 1214
1215 if (data->disable_ec) {
1216 if (atk_ec_ctl(data, 0))
1217 dev_err(&device->dev, "Failed to disable EC\n");
1218 }
1219
991 kfree(data); 1220 kfree(data);
992 1221
993 return 0; 1222 return 0;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 2c2cb1ec94c5..27d62574284f 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -572,7 +572,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
572 572
573 /* Sample register contents every 1 sec */ 573 /* Sample register contents every 1 sec */
574 if (time_after(jiffies, data->last_update + HZ) || !data->valid) { 574 if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
575 if (data->type != sch5027) { 575 if (data->type == dme1737) {
576 data->vid = dme1737_read(data, DME1737_REG_VID) & 576 data->vid = dme1737_read(data, DME1737_REG_VID) &
577 0x3f; 577 0x3f;
578 } 578 }
@@ -1621,9 +1621,6 @@ static struct attribute *dme1737_misc_attr[] = {
1621 &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, 1621 &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
1622 &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, 1622 &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
1623 &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, 1623 &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
1624 /* Misc */
1625 &dev_attr_vrm.attr,
1626 &dev_attr_cpu0_vid.attr,
1627 NULL 1624 NULL
1628}; 1625};
1629 1626
@@ -1631,6 +1628,18 @@ static const struct attribute_group dme1737_misc_group = {
1631 .attrs = dme1737_misc_attr, 1628 .attrs = dme1737_misc_attr,
1632}; 1629};
1633 1630
1631/* The following struct holds VID-related attributes. Their creation
1632 depends on the chip type which is determined during module load. */
1633static struct attribute *dme1737_vid_attr[] = {
1634 &dev_attr_vrm.attr,
1635 &dev_attr_cpu0_vid.attr,
1636 NULL
1637};
1638
1639static const struct attribute_group dme1737_vid_group = {
1640 .attrs = dme1737_vid_attr,
1641};
1642
1634/* The following structs hold the PWM attributes, some of which are optional. 1643/* The following structs hold the PWM attributes, some of which are optional.
1635 * Their creation depends on the chip configuration which is determined during 1644 * Their creation depends on the chip configuration which is determined during
1636 * module load. */ 1645 * module load. */
@@ -1902,6 +1911,9 @@ static void dme1737_remove_files(struct device *dev)
1902 if (data->type != sch5027) { 1911 if (data->type != sch5027) {
1903 sysfs_remove_group(&dev->kobj, &dme1737_misc_group); 1912 sysfs_remove_group(&dev->kobj, &dme1737_misc_group);
1904 } 1913 }
1914 if (data->type == dme1737) {
1915 sysfs_remove_group(&dev->kobj, &dme1737_vid_group);
1916 }
1905 1917
1906 sysfs_remove_group(&dev->kobj, &dme1737_group); 1918 sysfs_remove_group(&dev->kobj, &dme1737_group);
1907 1919
@@ -1933,6 +1945,13 @@ static int dme1737_create_files(struct device *dev)
1933 goto exit_remove; 1945 goto exit_remove;
1934 } 1946 }
1935 1947
1948 /* Create VID-related sysfs attributes */
1949 if ((data->type == dme1737) &&
1950 (err = sysfs_create_group(&dev->kobj,
1951 &dme1737_vid_group))) {
1952 goto exit_remove;
1953 }
1954
1936 /* Create fan sysfs attributes */ 1955 /* Create fan sysfs attributes */
1937 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { 1956 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
1938 if (data->has_fan & (1 << ix)) { 1957 if (data->has_fan & (1 << ix)) {
@@ -2127,7 +2146,7 @@ static int dme1737_init_device(struct device *dev)
2127 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ 2146 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
2128 2147
2129 /* Set VRM */ 2148 /* Set VRM */
2130 if (data->type != sch5027) { 2149 if (data->type == dme1737) {
2131 data->vrm = vid_which_vrm(); 2150 data->vrm = vid_which_vrm();
2132 } 2151 }
2133 2152
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index ea955edde87e..da1b1f9488af 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -819,7 +819,7 @@ static int watchdog_release(struct inode *inode, struct file *filp)
819static ssize_t watchdog_write(struct file *filp, const char __user *buf, 819static ssize_t watchdog_write(struct file *filp, const char __user *buf,
820 size_t count, loff_t *offset) 820 size_t count, loff_t *offset)
821{ 821{
822 size_t ret; 822 int ret;
823 struct fschmd_data *data = filp->private_data; 823 struct fschmd_data *data = filp->private_data;
824 824
825 if (count) { 825 if (count) {
@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
915 return ret; 915 return ret;
916} 916}
917 917
918static struct file_operations watchdog_fops = { 918static const struct file_operations watchdog_fops = {
919 .owner = THIS_MODULE, 919 .owner = THIS_MODULE,
920 .llseek = no_llseek, 920 .llseek = no_llseek,
921 .open = watchdog_open, 921 .open = watchdog_open,
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 6679854c85b0..be475e844c2a 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -197,11 +197,13 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
197 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left), 197 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
198 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted), 198 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
199 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd), 199 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
200 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd), 200 AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd),
201 AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap),
201 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right), 202 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
202 AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted), 203 AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
203 AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted), 204 AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
204 AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right), 205 AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
206 AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
205 /* Intel-based HP Pavilion dv5 */ 207 /* Intel-based HP Pavilion dv5 */
206 AXIS_DMI_MATCH2("HPDV5_I", 208 AXIS_DMI_MATCH2("HPDV5_I",
207 PRODUCT_NAME, "HP Pavilion dv5", 209 PRODUCT_NAME, "HP Pavilion dv5",
@@ -214,6 +216,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
214 y_inverted), 216 y_inverted),
215 AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), 217 AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
216 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 218 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
219 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
217 { NULL, } 220 { NULL, }
218/* Laptop models without axis info (yet): 221/* Laptop models without axis info (yet):
219 * "NC6910" "HP Compaq 6910" 222 * "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index ffeb2a10e1a7..a3749cb0f181 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -1028,12 +1028,11 @@ static int __init it87_find(unsigned short *address,
1028 chip_type, *address, sio_data->revision); 1028 chip_type, *address, sio_data->revision);
1029 1029
1030 /* Read GPIO config and VID value from LDN 7 (GPIO) */ 1030 /* Read GPIO config and VID value from LDN 7 (GPIO) */
1031 if (chip_type != IT8705F_DEVID) { 1031 if (sio_data->type != it87) {
1032 int reg; 1032 int reg;
1033 1033
1034 superio_select(GPIO); 1034 superio_select(GPIO);
1035 if ((chip_type == it8718) || 1035 if (sio_data->type == it8718 || sio_data->type == it8720)
1036 (chip_type == it8720))
1037 sio_data->vid_value = superio_inb(IT87_SIO_VID_REG); 1036 sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
1038 1037
1039 reg = superio_inb(IT87_SIO_PINX2_REG); 1038 reg = superio_inb(IT87_SIO_PINX2_REG);
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index ecd739534f6a..82b16808a274 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
83 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 83 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
84 lis3lv02d_joystick_disable(); 84 lis3lv02d_joystick_disable();
85 lis3lv02d_poweroff(lis3); 85 lis3lv02d_poweroff(lis3);
86 return 0; 86
87 return lis3lv02d_remove_fs(&lis3_dev);
87} 88}
88 89
89#ifdef CONFIG_PM 90#ifdef CONFIG_PM
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 6c9a04136e0a..00d975eb5b83 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -20,11 +20,6 @@
20#include <linux/hwmon.h> 20#include <linux/hwmon.h>
21#include <linux/hwmon-sysfs.h> 21#include <linux/hwmon-sysfs.h>
22 22
23static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
24
25/* Insmod parameters */
26I2C_CLIENT_INSMOD_1(ltc4215);
27
28/* Here are names of the chip's registers (a.k.a. commands) */ 23/* Here are names of the chip's registers (a.k.a. commands) */
29enum ltc4215_cmd { 24enum ltc4215_cmd {
30 LTC4215_CONTROL = 0x00, /* rw */ 25 LTC4215_CONTROL = 0x00, /* rw */
@@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = {
246static int ltc4215_probe(struct i2c_client *client, 241static int ltc4215_probe(struct i2c_client *client,
247 const struct i2c_device_id *id) 242 const struct i2c_device_id *id)
248{ 243{
244 struct i2c_adapter *adapter = client->adapter;
249 struct ltc4215_data *data; 245 struct ltc4215_data *data;
250 int ret; 246 int ret;
251 247
248 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
249 return -ENODEV;
250
252 data = kzalloc(sizeof(*data), GFP_KERNEL); 251 data = kzalloc(sizeof(*data), GFP_KERNEL);
253 if (!data) { 252 if (!data) {
254 ret = -ENOMEM; 253 ret = -ENOMEM;
@@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client)
294 return 0; 293 return 0;
295} 294}
296 295
297static int ltc4215_detect(struct i2c_client *client,
298 int kind,
299 struct i2c_board_info *info)
300{
301 struct i2c_adapter *adapter = client->adapter;
302
303 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
304 return -ENODEV;
305
306 if (kind < 0) { /* probed detection - check the chip type */
307 s32 v; /* 8 bits from the chip, or -ERRNO */
308
309 /*
310 * Register 0x01 bit b7 is reserved, expect 0
311 * Register 0x03 bit b6 and b7 are reserved, expect 0
312 */
313 v = i2c_smbus_read_byte_data(client, LTC4215_ALERT);
314 if (v < 0 || (v & (1 << 7)) != 0)
315 return -ENODEV;
316
317 v = i2c_smbus_read_byte_data(client, LTC4215_FAULT);
318 if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0)
319 return -ENODEV;
320 }
321
322 strlcpy(info->type, "ltc4215", I2C_NAME_SIZE);
323 dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n",
324 kind < 0 ? "probed" : "forced",
325 client->addr);
326
327 return 0;
328}
329
330static const struct i2c_device_id ltc4215_id[] = { 296static const struct i2c_device_id ltc4215_id[] = {
331 { "ltc4215", ltc4215 }, 297 { "ltc4215", 0 },
332 { } 298 { }
333}; 299};
334MODULE_DEVICE_TABLE(i2c, ltc4215_id); 300MODULE_DEVICE_TABLE(i2c, ltc4215_id);
335 301
336/* This is the driver that will be inserted */ 302/* This is the driver that will be inserted */
337static struct i2c_driver ltc4215_driver = { 303static struct i2c_driver ltc4215_driver = {
338 .class = I2C_CLASS_HWMON,
339 .driver = { 304 .driver = {
340 .name = "ltc4215", 305 .name = "ltc4215",
341 }, 306 },
342 .probe = ltc4215_probe, 307 .probe = ltc4215_probe,
343 .remove = ltc4215_remove, 308 .remove = ltc4215_remove,
344 .id_table = ltc4215_id, 309 .id_table = ltc4215_id,
345 .detect = ltc4215_detect,
346 .address_data = &addr_data,
347}; 310};
348 311
349static int __init ltc4215_init(void) 312static int __init ltc4215_init(void)
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index e38964333612..65c232a9d0c5 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -22,15 +22,6 @@
22#include <linux/hwmon.h> 22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h> 23#include <linux/hwmon-sysfs.h>
24 24
25/* Valid addresses are 0x20 - 0x3f
26 *
27 * For now, we do not probe, since some of these addresses
28 * are known to be unfriendly to probing */
29static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
30
31/* Insmod parameters */
32I2C_CLIENT_INSMOD_1(ltc4245);
33
34/* Here are names of the chip's registers (a.k.a. commands) */ 25/* Here are names of the chip's registers (a.k.a. commands) */
35enum ltc4245_cmd { 26enum ltc4245_cmd {
36 LTC4245_STATUS = 0x00, /* readonly */ 27 LTC4245_STATUS = 0x00, /* readonly */
@@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = {
369static int ltc4245_probe(struct i2c_client *client, 360static int ltc4245_probe(struct i2c_client *client,
370 const struct i2c_device_id *id) 361 const struct i2c_device_id *id)
371{ 362{
363 struct i2c_adapter *adapter = client->adapter;
372 struct ltc4245_data *data; 364 struct ltc4245_data *data;
373 int ret; 365 int ret;
374 366
367 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
368 return -ENODEV;
369
375 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
376 if (!data) { 371 if (!data) {
377 ret = -ENOMEM; 372 ret = -ENOMEM;
@@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client)
418 return 0; 413 return 0;
419} 414}
420 415
421/* Check that some bits in a control register appear at all possible
422 * locations without changing value
423 *
424 * @client: the i2c client to use
425 * @reg: the register to read
426 * @bits: the bits to check (0xff checks all bits,
427 * 0x03 checks only the last two bits)
428 *
429 * return -ERRNO if the register read failed
430 * return -ENODEV if the register value doesn't stay constant at all
431 * possible addresses
432 *
433 * return 0 for success
434 */
435static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits)
436{
437 int i;
438 s32 v, voff1, voff2;
439
440 /* Read register and check for error */
441 v = i2c_smbus_read_byte_data(client, reg);
442 if (v < 0)
443 return v;
444
445 v &= bits;
446
447 for (i = 0x00; i < 0xff; i += 0x20) {
448
449 voff1 = i2c_smbus_read_byte_data(client, reg + i);
450 if (voff1 < 0)
451 return voff1;
452
453 voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08);
454 if (voff2 < 0)
455 return voff2;
456
457 voff1 &= bits;
458 voff2 &= bits;
459
460 if (v != voff1 || v != voff2)
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int ltc4245_detect(struct i2c_client *client,
468 int kind,
469 struct i2c_board_info *info)
470{
471 struct i2c_adapter *adapter = client->adapter;
472
473 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
474 return -ENODEV;
475
476 if (kind < 0) { /* probed detection - check the chip type */
477 s32 v; /* 8 bits from the chip, or -ERRNO */
478
479 /* Chip registers 0x00-0x07 are control registers
480 * Chip registers 0x10-0x1f are data registers
481 *
482 * Address bits b7-b5 are ignored. This makes the chip "repeat"
483 * in steps of 0x20. Any control registers should appear with
484 * the same values across all duplicated addresses.
485 *
486 * Register 0x02 bit b2 is reserved, expect 0
487 * Register 0x07 bits b7 to b4 are reserved, expect 0
488 *
489 * Registers 0x01, 0x02 are control registers and should not
490 * change on their own.
491 *
492 * Register 0x06 bits b6 and b7 are control bits, and should
493 * not change on their own.
494 *
495 * Register 0x07 bits b3 to b0 are control bits, and should
496 * not change on their own.
497 */
498
499 /* read register 0x02 reserved bit, expect 0 */
500 v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL);
501 if (v < 0 || (v & 0x04) != 0)
502 return -ENODEV;
503
504 /* read register 0x07 reserved bits, expect 0 */
505 v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR);
506 if (v < 0 || (v & 0xf0) != 0)
507 return -ENODEV;
508
509 /* check that the alert register appears at all locations */
510 if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff))
511 return -ENODEV;
512
513 /* check that the control register appears at all locations */
514 if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff))
515 return -ENODEV;
516
517 /* check that register 0x06 bits b6 and b7 stay constant */
518 if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0))
519 return -ENODEV;
520
521 /* check that register 0x07 bits b3-b0 stay constant */
522 if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f))
523 return -ENODEV;
524 }
525
526 strlcpy(info->type, "ltc4245", I2C_NAME_SIZE);
527 dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n",
528 kind < 0 ? "probed" : "forced",
529 client->addr);
530
531 return 0;
532}
533
534static const struct i2c_device_id ltc4245_id[] = { 416static const struct i2c_device_id ltc4245_id[] = {
535 { "ltc4245", ltc4245 }, 417 { "ltc4245", 0 },
536 { } 418 { }
537}; 419};
538MODULE_DEVICE_TABLE(i2c, ltc4245_id); 420MODULE_DEVICE_TABLE(i2c, ltc4245_id);
539 421
540/* This is the driver that will be inserted */ 422/* This is the driver that will be inserted */
541static struct i2c_driver ltc4245_driver = { 423static struct i2c_driver ltc4245_driver = {
542 .class = I2C_CLASS_HWMON,
543 .driver = { 424 .driver = {
544 .name = "ltc4245", 425 .name = "ltc4245",
545 }, 426 },
546 .probe = ltc4245_probe, 427 .probe = ltc4245_probe,
547 .remove = ltc4245_remove, 428 .remove = ltc4245_remove,
548 .id_table = ltc4245_id, 429 .id_table = ltc4245_id,
549 .detect = ltc4245_detect,
550 .address_data = &addr_data,
551}; 430};
552 431
553static int __init ltc4245_init(void) 432static int __init ltc4245_init(void)
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 3a524f2fe493..71835412529f 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -323,14 +323,21 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
323 } 323 }
324 324
325 for (i = 0; i < ARRAY_SIZE(pdata->in); i++) { 325 for (i = 0; i < ARRAY_SIZE(pdata->in); i++) {
326 if (!pdata->in[i]) 326 struct s3c24xx_adc_hwmon_incfg *cfg = pdata->in[i];
327
328 if (!cfg)
327 continue; 329 continue;
328 330
329 if (pdata->in[i]->mult >= 0x10000) 331 if (cfg->mult >= 0x10000)
330 dev_warn(&dev->dev, 332 dev_warn(&dev->dev,
331 "channel %d multiplier too large\n", 333 "channel %d multiplier too large\n",
332 i); 334 i);
333 335
336 if (cfg->divider == 0) {
337 dev_err(&dev->dev, "channel %d divider zero\n", i);
338 continue;
339 }
340
334 ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i], 341 ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i],
335 &hwmon->attrs[i], i); 342 &hwmon->attrs[i], i);
336 if (ret) { 343 if (ret) {
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 303c02694c3c..ebe38b680ee3 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -30,6 +30,7 @@
30#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/sched.h>
33#include <linux/delay.h> 34#include <linux/delay.h>
34#include <linux/jiffies.h> 35#include <linux/jiffies.h>
35#include <linux/err.h> 36#include <linux/err.h>
@@ -622,7 +623,12 @@ static int __devexit sht15_remove(struct platform_device *pdev)
622} 623}
623 624
624 625
625static struct platform_driver sht_drivers[] = { 626/*
627 * sht_drivers simultaneously refers to __devinit and __devexit function
628 * which causes spurious section mismatch warning. So use __refdata to
629 * get rid from this.
630 */
631static struct platform_driver __refdata sht_drivers[] = {
626 { 632 {
627 .driver = { 633 .driver = {
628 .name = "sht10", 634 .name = "sht10",
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6bedd2fcfc15..e8fe7f169e25 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -128,7 +128,7 @@ config I2C_PIIX4
128 ATI SB600 128 ATI SB600
129 ATI SB700 129 ATI SB700
130 ATI SB800 130 ATI SB800
131 AMD SB900 131 AMD Hudson-2
132 Serverworks OSB4 132 Serverworks OSB4
133 Serverworks CSB5 133 Serverworks CSB5
134 Serverworks CSB6 134 Serverworks CSB6
@@ -477,8 +477,8 @@ config I2C_PNX
477 will be called i2c-pnx. 477 will be called i2c-pnx.
478 478
479config I2C_PXA 479config I2C_PXA
480 tristate "Intel PXA2XX I2C adapter (EXPERIMENTAL)" 480 tristate "Intel PXA2XX I2C adapter"
481 depends on EXPERIMENTAL && ARCH_PXA 481 depends on ARCH_PXA || ARCH_MMP
482 help 482 help
483 If you have devices in the PXA I2C bus, say yes to this option. 483 If you have devices in the PXA I2C bus, say yes to this option.
484 This driver can also be built as a module. If so, the module 484 This driver can also be built as a module. If so, the module
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index f7d6fe9c49ba..8f0b90ef8c76 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE, 364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE,
365 amd756_driver.name); 365 amd756_driver.name);
366 if (error) 366 if (error)
367 return error; 367 return -ENODEV;
368 368
369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { 369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) {
370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", 370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n",
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a7c59908c457..5b4ad86ca166 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
376 smbus->size = pci_resource_len(dev, 0); 376 smbus->size = pci_resource_len(dev, 0);
377 377
378 error = acpi_check_resource_conflict(&dev->resource[0]); 378 error = acpi_check_resource_conflict(&dev->resource[0]);
379 if (error) 379 if (error) {
380 error = -ENODEV;
380 goto out_kfree; 381 goto out_kfree;
382 }
381 383
382 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { 384 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) {
383 error = -EBUSY; 385 error = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9d2c5adf5d4f..55edcfe5b851 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
732 } 732 }
733 733
734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); 734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
735 if (err) 735 if (err) {
736 err = -ENODEV;
736 goto exit; 737 goto exit;
738 }
737 739
738 err = pci_request_region(dev, SMBBAR, i801_driver.name); 740 err = pci_request_region(dev, SMBBAR, i801_driver.name);
739 if (err) { 741 if (err) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 4afba3ec2a61..e3654d683e15 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -120,19 +120,26 @@ struct imx_i2c_struct {
120 wait_queue_head_t queue; 120 wait_queue_head_t queue;
121 unsigned long i2csr; 121 unsigned long i2csr;
122 unsigned int disable_delay; 122 unsigned int disable_delay;
123 int stopped;
124 unsigned int ifdr; /* IMX_I2C_IFDR */
123}; 125};
124 126
125/** Functions for IMX I2C adapter driver *************************************** 127/** Functions for IMX I2C adapter driver ***************************************
126*******************************************************************************/ 128*******************************************************************************/
127 129
128static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx) 130static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
129{ 131{
130 unsigned long orig_jiffies = jiffies; 132 unsigned long orig_jiffies = jiffies;
133 unsigned int temp;
131 134
132 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 135 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
133 136
134 /* wait for bus not busy */ 137 while (1) {
135 while (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_IBB) { 138 temp = readb(i2c_imx->base + IMX_I2C_I2SR);
139 if (for_busy && (temp & I2SR_IBB))
140 break;
141 if (!for_busy && !(temp & I2SR_IBB))
142 break;
136 if (signal_pending(current)) { 143 if (signal_pending(current)) {
137 dev_dbg(&i2c_imx->adapter.dev, 144 dev_dbg(&i2c_imx->adapter.dev,
138 "<%s> I2C Interrupted\n", __func__); 145 "<%s> I2C Interrupted\n", __func__);
@@ -179,41 +186,62 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
179 return 0; 186 return 0;
180} 187}
181 188
182static void i2c_imx_start(struct imx_i2c_struct *i2c_imx) 189static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
183{ 190{
184 unsigned int temp = 0; 191 unsigned int temp = 0;
192 int result;
185 193
186 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 194 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
187 195
196 clk_enable(i2c_imx->clk);
197 writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR);
188 /* Enable I2C controller */ 198 /* Enable I2C controller */
199 writeb(0, i2c_imx->base + IMX_I2C_I2SR);
189 writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); 200 writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR);
201
202 /* Wait controller to be stable */
203 udelay(50);
204
190 /* Start I2C transaction */ 205 /* Start I2C transaction */
191 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 206 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
192 temp |= I2CR_MSTA; 207 temp |= I2CR_MSTA;
193 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 208 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
209 result = i2c_imx_bus_busy(i2c_imx, 1);
210 if (result)
211 return result;
212 i2c_imx->stopped = 0;
213
194 temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; 214 temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
195 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 215 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
216 return result;
196} 217}
197 218
198static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) 219static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
199{ 220{
200 unsigned int temp = 0; 221 unsigned int temp = 0;
201 222
202 /* Stop I2C transaction */ 223 if (!i2c_imx->stopped) {
203 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 224 /* Stop I2C transaction */
204 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 225 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
205 temp &= ~I2CR_MSTA; 226 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
206 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 227 temp &= ~(I2CR_MSTA | I2CR_MTX);
207 /* setup chip registers to defaults */ 228 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
208 writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); 229 i2c_imx->stopped = 1;
209 writeb(0, i2c_imx->base + IMX_I2C_I2SR); 230 }
210 /* 231 if (cpu_is_mx1()) {
211 * This delay caused by an i.MXL hardware bug. 232 /*
212 * If no (or too short) delay, no "STOP" bit will be generated. 233 * This delay caused by an i.MXL hardware bug.
213 */ 234 * If no (or too short) delay, no "STOP" bit will be generated.
214 udelay(i2c_imx->disable_delay); 235 */
236 udelay(i2c_imx->disable_delay);
237 }
238
239 if (!i2c_imx->stopped)
240 i2c_imx_bus_busy(i2c_imx, 0);
241
215 /* Disable I2C controller */ 242 /* Disable I2C controller */
216 writeb(0, i2c_imx->base + IMX_I2C_I2CR); 243 writeb(0, i2c_imx->base + IMX_I2C_I2CR);
244 clk_disable(i2c_imx->clk);
217} 245}
218 246
219static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, 247static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
@@ -233,8 +261,8 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
233 else 261 else
234 for (i = 0; i2c_clk_div[i][0] < div; i++); 262 for (i = 0; i2c_clk_div[i][0] < div; i++);
235 263
236 /* Write divider value to register */ 264 /* Store divider value */
237 writeb(i2c_clk_div[i][1], i2c_imx->base + IMX_I2C_IFDR); 265 i2c_imx->ifdr = i2c_clk_div[i][1];
238 266
239 /* 267 /*
240 * There dummy delay is calculated. 268 * There dummy delay is calculated.
@@ -341,11 +369,15 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
341 if (result) 369 if (result)
342 return result; 370 return result;
343 if (i == (msgs->len - 1)) { 371 if (i == (msgs->len - 1)) {
372 /* It must generate STOP before read I2DR to prevent
373 controller from generating another clock cycle */
344 dev_dbg(&i2c_imx->adapter.dev, 374 dev_dbg(&i2c_imx->adapter.dev,
345 "<%s> clear MSTA\n", __func__); 375 "<%s> clear MSTA\n", __func__);
346 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 376 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
347 temp &= ~I2CR_MSTA; 377 temp &= ~(I2CR_MSTA | I2CR_MTX);
348 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 378 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
379 i2c_imx_bus_busy(i2c_imx, 0);
380 i2c_imx->stopped = 1;
349 } else if (i == (msgs->len - 2)) { 381 } else if (i == (msgs->len - 2)) {
350 dev_dbg(&i2c_imx->adapter.dev, 382 dev_dbg(&i2c_imx->adapter.dev,
351 "<%s> set TXAK\n", __func__); 383 "<%s> set TXAK\n", __func__);
@@ -370,14 +402,11 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
370 402
371 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 403 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
372 404
373 /* Check if i2c bus is not busy */ 405 /* Start I2C transfer */
374 result = i2c_imx_bus_busy(i2c_imx); 406 result = i2c_imx_start(i2c_imx);
375 if (result) 407 if (result)
376 goto fail0; 408 goto fail0;
377 409
378 /* Start I2C transfer */
379 i2c_imx_start(i2c_imx);
380
381 /* read/write data */ 410 /* read/write data */
382 for (i = 0; i < num; i++) { 411 for (i = 0; i < num; i++) {
383 if (i) { 412 if (i) {
@@ -386,6 +415,9 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
386 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 415 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
387 temp |= I2CR_RSTA; 416 temp |= I2CR_RSTA;
388 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 417 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
418 result = i2c_imx_bus_busy(i2c_imx, 1);
419 if (result)
420 goto fail0;
389 } 421 }
390 dev_dbg(&i2c_imx->adapter.dev, 422 dev_dbg(&i2c_imx->adapter.dev,
391 "<%s> transfer message: %d\n", __func__, i); 423 "<%s> transfer message: %d\n", __func__, i);
@@ -500,7 +532,6 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
500 dev_err(&pdev->dev, "can't get I2C clock\n"); 532 dev_err(&pdev->dev, "can't get I2C clock\n");
501 goto fail3; 533 goto fail3;
502 } 534 }
503 clk_enable(i2c_imx->clk);
504 535
505 /* Request IRQ */ 536 /* Request IRQ */
506 ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx); 537 ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx);
@@ -549,7 +580,6 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
549fail5: 580fail5:
550 free_irq(i2c_imx->irq, i2c_imx); 581 free_irq(i2c_imx->irq, i2c_imx);
551fail4: 582fail4:
552 clk_disable(i2c_imx->clk);
553 clk_put(i2c_imx->clk); 583 clk_put(i2c_imx->clk);
554fail3: 584fail3:
555 release_mem_region(i2c_imx->res->start, resource_size(res)); 585 release_mem_region(i2c_imx->res->start, resource_size(res));
@@ -586,8 +616,6 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
586 if (pdata && pdata->exit) 616 if (pdata && pdata->exit)
587 pdata->exit(&pdev->dev); 617 pdata->exit(&pdev->dev);
588 618
589 /* Disable I2C clock */
590 clk_disable(i2c_imx->clk);
591 clk_put(i2c_imx->clk); 619 clk_put(i2c_imx->clk);
592 620
593 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); 621 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 9f6b8e0f8632..dba6eb053e2f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev,
281 return -ENODEV; 281 return -ENODEV;
282 } 282 }
283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) 283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name))
284 return -EBUSY; 284 return -ENODEV;
285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { 285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) {
286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", 286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
287 sch_smba); 287 sch_smba);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index d325e86e3103..f627001108b8 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -365,9 +365,6 @@ static int mpc_write(struct mpc_i2c *i2c, int target,
365 unsigned timeout = i2c->adap.timeout; 365 unsigned timeout = i2c->adap.timeout;
366 u32 flags = restart ? CCR_RSTA : 0; 366 u32 flags = restart ? CCR_RSTA : 0;
367 367
368 /* Start with MEN */
369 if (!restart)
370 writeccr(i2c, CCR_MEN);
371 /* Start as master */ 368 /* Start as master */
372 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags); 369 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
373 /* Write target byte */ 370 /* Write target byte */
@@ -396,9 +393,6 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
396 int i, result; 393 int i, result;
397 u32 flags = restart ? CCR_RSTA : 0; 394 u32 flags = restart ? CCR_RSTA : 0;
398 395
399 /* Start with MEN */
400 if (!restart)
401 writeccr(i2c, CCR_MEN);
402 /* Switch to read - restart */ 396 /* Switch to read - restart */
403 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags); 397 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
404 /* Write target address byte - this time with the read flag set */ 398 /* Write target address byte - this time with the read flag set */
@@ -425,9 +419,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
425 /* Generate txack on next to last byte */ 419 /* Generate txack on next to last byte */
426 if (i == length - 2) 420 if (i == length - 2)
427 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK); 421 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
428 /* Generate stop on last byte */ 422 /* Do not generate stop on last byte */
429 if (i == length - 1) 423 if (i == length - 1)
430 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_TXAK); 424 writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX);
431 data[i] = readb(i2c->base + MPC_I2C_DR); 425 data[i] = readb(i2c->base + MPC_I2C_DR);
432 } 426 }
433 427
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a782c7a08f9e..1e245e9cad31 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,7 +22,7 @@
22 Intel PIIX4, 440MX 22 Intel PIIX4, 440MX
23 Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 23 Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
24 ATI IXP200, IXP300, IXP400, SB600, SB700, SB800 24 ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
25 AMD SB900 25 AMD Hudson-2
26 SMSC Victory66 26 SMSC Victory66
27 27
28 Note: we assume there can only be one device, with one SMBus interface. 28 Note: we assume there can only be one device, with one SMBus interface.
@@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
169 } 169 }
170 170
171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
172 return -EBUSY; 172 return -ENODEV;
173 173
174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
@@ -233,9 +233,9 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
233 unsigned short smba_idx = 0xcd6; 233 unsigned short smba_idx = 0xcd6;
234 u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c; 234 u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
235 235
236 /* SB800 SMBus does not support forcing address */ 236 /* SB800 and later SMBus does not support forcing address */
237 if (force || force_addr) { 237 if (force || force_addr) {
238 dev_err(&PIIX4_dev->dev, "SB800 SMBus does not support " 238 dev_err(&PIIX4_dev->dev, "SMBus does not support "
239 "forcing address!\n"); 239 "forcing address!\n");
240 return -EINVAL; 240 return -EINVAL;
241 } 241 }
@@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
260 260
261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; 261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
263 return -EBUSY; 263 return -ENODEV;
264 264
265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
@@ -480,7 +480,7 @@ static struct pci_device_id piix4_ids[] = {
480 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, 480 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
481 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, 481 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
482 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, 482 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
483 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SMBUS) }, 483 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
484 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 484 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
485 PCI_DEVICE_ID_SERVERWORKS_OSB4) }, 485 PCI_DEVICE_ID_SERVERWORKS_OSB4) },
486 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 486 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 276a046ac93f..b4a55d407bf5 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -369,9 +369,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
369 goto err; 369 goto err;
370 370
371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), 371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
372 "SMBus CMI adapter %s (%s)", 372 "SMBus CMI adapter %s",
373 acpi_device_name(device), 373 acpi_device_name(device));
374 acpi_device_uid(device));
375 smbus_cmi->adapter.owner = THIS_MODULE; 374 smbus_cmi->adapter.owner = THIS_MODULE;
376 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm; 375 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
377 smbus_cmi->adapter.algo_data = smbus_cmi; 376 smbus_cmi->adapter.algo_data = smbus_cmi;
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 8295885b2fdb..1649963b00dc 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
280 280
281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); 281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]);
282 if (retval) 282 if (retval)
283 return retval; 283 return -ENODEV;
284 284
285 /* Everything is happy, let's grab the memory and set things up. */ 285 /* Everything is happy, let's grab the memory and set things up. */
286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE, 286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 54d810a4d00f..e4b1543015af 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
365found: 365found:
366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name); 366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
367 if (error) 367 if (error)
368 return error; 368 return -ENODEV;
369 369
370 if (!request_region(vt596_smba, 8, vt596_driver.name)) { 370 if (!request_region(vt596_smba, 8, vt596_driver.name)) {
371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", 371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 6396c3ad3252..837322b10a4c 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -177,7 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = {
177 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, 177 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
178 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, 178 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
179 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, 179 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
180 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 }, 180 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 },
181 { 0, }, 181 { 0, },
182}; 182};
183MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); 183MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 680e5975217f..ca0c46f6580a 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -379,7 +379,8 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
379 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 379 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
380 .port_ops = &cmd64x_port_ops, 380 .port_ops = &cmd64x_port_ops,
381 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 381 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
382 IDE_HFLAG_ABUSE_PREFETCH, 382 IDE_HFLAG_ABUSE_PREFETCH |
383 IDE_HFLAG_SERIALIZE,
383 .pio_mask = ATA_PIO5, 384 .pio_mask = ATA_PIO5,
384 .mwdma_mask = ATA_MWDMA2, 385 .mwdma_mask = ATA_MWDMA2,
385 .udma_mask = 0x00, /* no udma */ 386 .udma_mask = 0x00, /* no udma */
@@ -389,7 +390,8 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
389 .init_chipset = init_chipset_cmd64x, 390 .init_chipset = init_chipset_cmd64x,
390 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 391 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
391 .port_ops = &cmd648_port_ops, 392 .port_ops = &cmd648_port_ops,
392 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 393 .host_flags = IDE_HFLAG_ABUSE_PREFETCH |
394 IDE_HFLAG_SERIALIZE,
393 .pio_mask = ATA_PIO5, 395 .pio_mask = ATA_PIO5,
394 .mwdma_mask = ATA_MWDMA2, 396 .mwdma_mask = ATA_MWDMA2,
395 .udma_mask = ATA_UDMA2, 397 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63c53d65e875..4d76ba473097 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1046,15 +1046,6 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1046 if (port_ops && port_ops->init_dev) 1046 if (port_ops && port_ops->init_dev)
1047 port_ops->init_dev(drive); 1047 port_ops->init_dev(drive);
1048 } 1048 }
1049
1050 ide_port_for_each_dev(i, drive, hwif) {
1051 /*
1052 * default to PIO Mode 0 before we figure out
1053 * the most suited mode for the attached device
1054 */
1055 if (port_ops && port_ops->set_pio_mode)
1056 port_ops->set_pio_mode(drive, 0);
1057 }
1058} 1049}
1059 1050
1060static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1051static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 28d09a5d8450..017c09540c2f 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = {
273 273
274static void proc_ide_settings_warn(void) 274static void proc_ide_settings_warn(void)
275{ 275{
276 static int warned; 276 printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
277
278 if (warned)
279 return;
280
281 printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
282 "obsolete, and will be removed soon!\n"); 277 "obsolete, and will be removed soon!\n");
283 warned = 1;
284} 278}
285 279
286static int ide_settings_proc_show(struct seq_file *m, void *v) 280static int ide_settings_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index afca22beaadf..3b88eba04c9c 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer 3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> 4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 * 8 *
@@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive)
281 281
282 pci_read_config_byte(dev, 0x4b, &reg4bh); 282 pci_read_config_byte(dev, 0x4b, &reg4bh);
283 283
284 rw_prefetch = reg4bh & ~(0x11 << drive->dn);
285
284 if (drive->media == ide_disk) 286 if (drive->media == ide_disk)
285 rw_prefetch = 0x11 << drive->dn; 287 rw_prefetch |= 0x11 << drive->dn;
286 288
287 if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) 289 if (reg4bh != rw_prefetch)
288 pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); 290 pci_write_config_byte(dev, 0x4b, rw_prefetch);
289} 291}
290 292
291static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) 293static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index 1aba8c13fe8f..8e7e3344c4b3 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -247,7 +247,7 @@ static int dma_region_pagefault(struct vm_area_struct *vma,
247 return 0; 247 return 0;
248} 248}
249 249
250static struct vm_operations_struct dma_region_vm_ops = { 250static const struct vm_operations_struct dma_region_vm_ops = {
251 .fault = dma_region_pagefault, 251 .fault = dma_region_pagefault,
252}; 252};
253 253
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 0bc3d78ce7b1..8aa56ac07e29 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/sched.h>
32#include <linux/string.h> 33#include <linux/string.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/fs.h> 35#include <linux/fs.h>
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index d287ba79821d..949064a05675 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -30,6 +30,7 @@
30 */ 30 */
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/interrupt.h> 35#include <linux/interrupt.h>
35#include <linux/wait.h> 36#include <linux/wait.h>
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index 96a2959ce877..7c544f7c74c4 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -260,15 +260,12 @@ static int ieee802154_fake_close(struct net_device *dev)
260static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, 260static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
261 struct net_device *dev) 261 struct net_device *dev)
262{ 262{
263 skb->iif = dev->ifindex;
264 skb->dev = dev;
265 dev->stats.tx_packets++; 263 dev->stats.tx_packets++;
266 dev->stats.tx_bytes += skb->len; 264 dev->stats.tx_bytes += skb->len;
267 265
268 dev->trans_start = jiffies;
269
270 /* FIXME: do hardware work here ... */ 266 /* FIXME: do hardware work here ... */
271 267
268 dev_kfree_skb(skb);
272 return NETDEV_TX_OK; 269 return NETDEV_TX_OK;
273} 270}
274 271
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 5be1bd4fc7ed..bd07803e9183 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -393,7 +393,7 @@ static int addr_resolve_local(struct sockaddr *src_in,
393 393
394 for_each_netdev(&init_net, dev) 394 for_each_netdev(&init_net, dev)
395 if (ipv6_chk_addr(&init_net, 395 if (ipv6_chk_addr(&init_net,
396 &((struct sockaddr_in6 *) addr)->sin6_addr, 396 &((struct sockaddr_in6 *) dst_in)->sin6_addr,
397 dev, 1)) 397 dev, 1))
398 break; 398 break;
399 399
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 55d093a36ae4..0f89909abce9 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -40,6 +40,7 @@
40#include <linux/idr.h> 40#include <linux/idr.h>
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/rbtree.h> 42#include <linux/rbtree.h>
43#include <linux/sched.h>
43#include <linux/spinlock.h> 44#include <linux/spinlock.h>
44#include <linux/workqueue.h> 45#include <linux/workqueue.h>
45#include <linux/completion.h> 46#include <linux/completion.h>
@@ -362,7 +363,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
362 * In either case, must tell the provider to reject. 363 * In either case, must tell the provider to reject.
363 */ 364 */
364 cm_id_priv->state = IW_CM_STATE_DESTROYING; 365 cm_id_priv->state = IW_CM_STATE_DESTROYING;
366 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
365 cm_id->device->iwcm->reject(cm_id, NULL, 0); 367 cm_id->device->iwcm->reject(cm_id, NULL, 0);
368 spin_lock_irqsave(&cm_id_priv->lock, flags);
366 break; 369 break;
367 case IW_CM_STATE_CONN_SENT: 370 case IW_CM_STATE_CONN_SENT:
368 case IW_CM_STATE_DESTROYING: 371 case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 57a3c6f947b2..4e0f2829e0e5 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -37,7 +37,8 @@
37enum rmpp_state { 37enum rmpp_state {
38 RMPP_STATE_ACTIVE, 38 RMPP_STATE_ACTIVE,
39 RMPP_STATE_TIMEOUT, 39 RMPP_STATE_TIMEOUT,
40 RMPP_STATE_COMPLETE 40 RMPP_STATE_COMPLETE,
41 RMPP_STATE_CANCELING
41}; 42};
42 43
43struct mad_rmpp_recv { 44struct mad_rmpp_recv {
@@ -87,18 +88,22 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
87 88
88 spin_lock_irqsave(&agent->lock, flags); 89 spin_lock_irqsave(&agent->lock, flags);
89 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { 90 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
91 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
92 ib_free_recv_mad(rmpp_recv->rmpp_wc);
93 rmpp_recv->state = RMPP_STATE_CANCELING;
94 }
95 spin_unlock_irqrestore(&agent->lock, flags);
96
97 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
90 cancel_delayed_work(&rmpp_recv->timeout_work); 98 cancel_delayed_work(&rmpp_recv->timeout_work);
91 cancel_delayed_work(&rmpp_recv->cleanup_work); 99 cancel_delayed_work(&rmpp_recv->cleanup_work);
92 } 100 }
93 spin_unlock_irqrestore(&agent->lock, flags);
94 101
95 flush_workqueue(agent->qp_info->port_priv->wq); 102 flush_workqueue(agent->qp_info->port_priv->wq);
96 103
97 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, 104 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
98 &agent->rmpp_list, list) { 105 &agent->rmpp_list, list) {
99 list_del(&rmpp_recv->list); 106 list_del(&rmpp_recv->list);
100 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
101 ib_free_recv_mad(rmpp_recv->rmpp_wc);
102 destroy_rmpp_recv(rmpp_recv); 107 destroy_rmpp_recv(rmpp_recv);
103 } 108 }
104} 109}
@@ -260,6 +265,10 @@ static void recv_cleanup_handler(struct work_struct *work)
260 unsigned long flags; 265 unsigned long flags;
261 266
262 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 267 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
268 if (rmpp_recv->state == RMPP_STATE_CANCELING) {
269 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
270 return;
271 }
263 list_del(&rmpp_recv->list); 272 list_del(&rmpp_recv->list);
264 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 273 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
265 destroy_rmpp_recv(rmpp_recv); 274 destroy_rmpp_recv(rmpp_recv);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 51bd9669cb1f..f504c9b00c1b 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -38,6 +38,7 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/err.h> 39#include <linux/err.h>
40#include <linux/poll.h> 40#include <linux/poll.h>
41#include <linux/sched.h>
41#include <linux/file.h> 42#include <linux/file.h>
42#include <linux/mount.h> 43#include <linux/mount.h>
43#include <linux/cdev.h> 44#include <linux/cdev.h>
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 4346a24568fb..bb96d3c4b0f4 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -34,6 +34,7 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/poll.h> 36#include <linux/poll.h>
37#include <linux/sched.h>
37#include <linux/idr.h> 38#include <linux/idr.h>
38#include <linux/in.h> 39#include <linux/in.h>
39#include <linux/in6.h> 40#include <linux/in6.h>
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8c46f2257098..7de02969ed7d 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -44,6 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/compat.h> 46#include <linux/compat.h>
47#include <linux/sched.h>
47#include <linux/semaphore.h> 48#include <linux/semaphore.h>
48 49
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d3fff9e008a3..aec0fbdfe7f0 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -40,6 +40,7 @@
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h>
43#include <linux/file.h> 44#include <linux/file.h>
44#include <linux/mount.h> 45#include <linux/mount.h>
45#include <linux/cdev.h> 46#include <linux/cdev.h>
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 6895523779d0..ed7175549ebd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -37,6 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/sched.h>
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/ethtool.h> 42#include <linux/ethtool.h>
42#include <linux/rtnetlink.h> 43#include <linux/rtnetlink.h>
@@ -1199,11 +1200,14 @@ static int iwch_query_port(struct ib_device *ibdev,
1199 props->state = IB_PORT_DOWN; 1200 props->state = IB_PORT_DOWN;
1200 else { 1201 else {
1201 inetdev = in_dev_get(netdev); 1202 inetdev = in_dev_get(netdev);
1202 if (inetdev->ifa_list) 1203 if (inetdev) {
1203 props->state = IB_PORT_ACTIVE; 1204 if (inetdev->ifa_list)
1204 else 1205 props->state = IB_PORT_ACTIVE;
1206 else
1207 props->state = IB_PORT_INIT;
1208 in_dev_put(inetdev);
1209 } else
1205 props->state = IB_PORT_INIT; 1210 props->state = IB_PORT_INIT;
1206 in_dev_put(inetdev);
1207 } 1211 }
1208 1212
1209 props->port_cap_flags = 1213 props->port_cap_flags =
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 6e8653471941..1cecf98829ac 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -29,6 +29,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32#include <linux/sched.h>
32#include "iwch_provider.h" 33#include "iwch_provider.h"
33#include "iwch.h" 34#include "iwch.h"
34#include "iwch_cm.h" 35#include "iwch_cm.h"
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 3cb688d29131..f1565cae8ec6 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -95,7 +95,7 @@ static void ehca_mm_close(struct vm_area_struct *vma)
95 vma->vm_start, vma->vm_end, *count); 95 vma->vm_start, vma->vm_end, *count);
96} 96}
97 97
98static struct vm_operations_struct vm_ops = { 98static const struct vm_operations_struct vm_ops = {
99 .open = ehca_mm_open, 99 .open = ehca_mm_open,
100 .close = ehca_mm_close, 100 .close = ehca_mm_close,
101}; 101};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 04e88b600558..013d1380e77c 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,7 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/sched.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/idr.h> 36#include <linux/idr.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 38a287006612..40dbe54056c7 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1151,7 +1151,7 @@ static int ipath_file_vma_fault(struct vm_area_struct *vma,
1151 return 0; 1151 return 0;
1152} 1152}
1153 1153
1154static struct vm_operations_struct ipath_file_vm_ops = { 1154static const struct vm_operations_struct ipath_file_vm_ops = {
1155 .fault = ipath_file_vma_fault, 1155 .fault = ipath_file_vma_fault,
1156}; 1156};
1157 1157
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index b2a9d4c155d1..a805402dd4ae 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/sched.h>
40#include <linux/delay.h> 41#include <linux/delay.h>
41#include <linux/io.h> 42#include <linux/io.h>
42#include <rdma/ib_verbs.h> 43#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 6c21b4b5ec71..c0a03ac03ee7 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h>
36 37
37#include "ipath_kernel.h" 38#include "ipath_kernel.h"
38#include "ipath_verbs.h" 39#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index fa830e22002f..b28865faf435 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -74,7 +74,7 @@ static void ipath_vma_close(struct vm_area_struct *vma)
74 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
75} 75}
76 76
77static struct vm_operations_struct ipath_vm_ops = { 77static const struct vm_operations_struct ipath_vm_ops = {
78 .open = ipath_vma_open, 78 .open = ipath_vma_open,
79 .close = ipath_vma_close, 79 .close = ipath_vma_close,
80}; 80};
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 3a5a89b609c4..cb2d3ef2ae12 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/sched.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36 37
37#include "ipath_verbs.h" 38#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 2296832f94da..1f95bbaf7602 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -31,6 +31,7 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/sched.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35 36
36#include "ipath_verbs.h" 37#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 6076cb61bf6a..7420715256a9 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -31,6 +31,7 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/sched.h>
34#include <rdma/ib_smi.h> 35#include <rdma/ib_smi.h>
35 36
36#include "ipath_verbs.h" 37#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 855911e7396d..82878e348627 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/sched.h>
36 37
37#include "ipath_kernel.h" 38#include "ipath_kernel.h"
38 39
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 7bff4b9baa0a..be78f6643c06 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -33,6 +33,7 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/dmapool.h> 35#include <linux/dmapool.h>
36#include <linux/sched.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/list.h> 38#include <linux/list.h>
38#include <linux/highmem.h> 39#include <linux/highmem.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index d73e32232879..6923e1d986da 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/rculist.h> 34#include <linux/rculist.h>
35#include <linux/sched.h>
35 36
36#include "ipath_verbs.h" 37#include "ipath_verbs.h"
37 38
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 056b2a4c6970..0aa0110e4b6c 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -68,11 +68,16 @@ static void catas_reset(struct work_struct *work)
68 spin_unlock_irq(&catas_lock); 68 spin_unlock_irq(&catas_lock);
69 69
70 list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { 70 list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
71 struct pci_dev *pdev = dev->pdev;
71 ret = __mthca_restart_one(dev->pdev); 72 ret = __mthca_restart_one(dev->pdev);
73 /* 'dev' now is not valid */
72 if (ret) 74 if (ret)
73 mthca_err(dev, "Reset failed (%d)\n", ret); 75 printk(KERN_ERR "mthca %s: Reset failed (%d)\n",
74 else 76 pci_name(pdev), ret);
75 mthca_dbg(dev, "Reset succeeded\n"); 77 else {
78 struct mthca_dev *d = pci_get_drvdata(pdev);
79 mthca_dbg(d, "Reset succeeded\n");
80 }
76 } 81 }
77 82
78 mutex_unlock(&mthca_device_mutex); 83 mutex_unlock(&mthca_device_mutex);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 538e409d4515..e593af3354b8 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1566,7 +1566,6 @@ static const struct net_device_ops nes_netdev_ops = {
1566 .ndo_set_mac_address = nes_netdev_set_mac_address, 1566 .ndo_set_mac_address = nes_netdev_set_mac_address,
1567 .ndo_set_multicast_list = nes_netdev_set_multicast_list, 1567 .ndo_set_multicast_list = nes_netdev_set_multicast_list,
1568 .ndo_change_mtu = nes_netdev_change_mtu, 1568 .ndo_change_mtu = nes_netdev_change_mtu,
1569 .ndo_set_mac_address = eth_mac_addr,
1570 .ndo_validate_addr = eth_validate_addr, 1569 .ndo_validate_addr = eth_validate_addr,
1571 .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, 1570 .ndo_vlan_rx_register = nes_netdev_vlan_rx_register,
1572}; 1571};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25874fc680c9..8763c1ea5eb4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -362,12 +362,19 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
362{ 362{
363 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 363 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
364 carrier_on_task); 364 carrier_on_task);
365 struct ib_port_attr attr;
365 366
366 /* 367 /*
367 * Take rtnl_lock to avoid racing with ipoib_stop() and 368 * Take rtnl_lock to avoid racing with ipoib_stop() and
368 * turning the carrier back on while a device is being 369 * turning the carrier back on while a device is being
369 * removed. 370 * removed.
370 */ 371 */
372 if (ib_query_port(priv->ca, priv->port, &attr) ||
373 attr.state != IB_PORT_ACTIVE) {
374 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
375 return;
376 }
377
371 rtnl_lock(); 378 rtnl_lock();
372 netif_carrier_on(priv->dev); 379 netif_carrier_on(priv->dev);
373 rtnl_unlock(); 380 rtnl_unlock();
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0ba6ec876296..add9188663ff 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -426,7 +426,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
426 * because we preallocate so many resources 426 * because we preallocate so many resources
427 */ 427 */
428 cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, 428 cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
429 ISCSI_DEF_XMIT_CMDS_MAX, 429 ISCSI_DEF_XMIT_CMDS_MAX, 0,
430 sizeof(struct iscsi_iser_task), 430 sizeof(struct iscsi_iser_task),
431 initial_cmdsn, 0); 431 initial_cmdsn, 0);
432 if (!cls_session) 432 if (!cls_session)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 1148140d08a1..dee6706038aa 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -13,6 +13,7 @@
13#define EVDEV_BUFFER_SIZE 64 13#define EVDEV_BUFFER_SIZE 64
14 14
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 72c63e5dd630..38df81fcdc3a 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -337,16 +337,16 @@ int input_ff_create(struct input_dev *dev, int max_effects)
337 dev->ff = ff; 337 dev->ff = ff;
338 dev->flush = flush_effects; 338 dev->flush = flush_effects;
339 dev->event = input_ff_event; 339 dev->event = input_ff_event;
340 set_bit(EV_FF, dev->evbit); 340 __set_bit(EV_FF, dev->evbit);
341 341
342 /* Copy "true" bits into ff device bitmap */ 342 /* Copy "true" bits into ff device bitmap */
343 for (i = 0; i <= FF_MAX; i++) 343 for (i = 0; i <= FF_MAX; i++)
344 if (test_bit(i, dev->ffbit)) 344 if (test_bit(i, dev->ffbit))
345 set_bit(i, ff->ffbit); 345 __set_bit(i, ff->ffbit);
346 346
347 /* we can emulate RUMBLE with periodic effects */ 347 /* we can emulate RUMBLE with periodic effects */
348 if (test_bit(FF_PERIODIC, ff->ffbit)) 348 if (test_bit(FF_PERIODIC, ff->ffbit))
349 set_bit(FF_RUMBLE, dev->ffbit); 349 __set_bit(FF_RUMBLE, dev->ffbit);
350 350
351 return 0; 351 return 0;
352} 352}
@@ -362,12 +362,14 @@ EXPORT_SYMBOL_GPL(input_ff_create);
362 */ 362 */
363void input_ff_destroy(struct input_dev *dev) 363void input_ff_destroy(struct input_dev *dev)
364{ 364{
365 clear_bit(EV_FF, dev->evbit); 365 struct ff_device *ff = dev->ff;
366 if (dev->ff) { 366
367 if (dev->ff->destroy) 367 __clear_bit(EV_FF, dev->evbit);
368 dev->ff->destroy(dev->ff); 368 if (ff) {
369 kfree(dev->ff->private); 369 if (ff->destroy)
370 kfree(dev->ff); 370 ff->destroy(ff);
371 kfree(ff->private);
372 kfree(ff);
371 dev->ff = NULL; 373 dev->ff = NULL;
372 } 374 }
373} 375}
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 2d1415e16834..b483b2995fa9 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -61,7 +61,6 @@ struct ml_device {
61 struct ml_effect_state states[FF_MEMLESS_EFFECTS]; 61 struct ml_effect_state states[FF_MEMLESS_EFFECTS];
62 int gain; 62 int gain;
63 struct timer_list timer; 63 struct timer_list timer;
64 spinlock_t timer_lock;
65 struct input_dev *dev; 64 struct input_dev *dev;
66 65
67 int (*play_effect)(struct input_dev *dev, void *data, 66 int (*play_effect)(struct input_dev *dev, void *data,
@@ -368,38 +367,38 @@ static void ml_effect_timer(unsigned long timer_data)
368{ 367{
369 struct input_dev *dev = (struct input_dev *)timer_data; 368 struct input_dev *dev = (struct input_dev *)timer_data;
370 struct ml_device *ml = dev->ff->private; 369 struct ml_device *ml = dev->ff->private;
370 unsigned long flags;
371 371
372 debug("timer: updating effects"); 372 debug("timer: updating effects");
373 373
374 spin_lock(&ml->timer_lock); 374 spin_lock_irqsave(&dev->event_lock, flags);
375 ml_play_effects(ml); 375 ml_play_effects(ml);
376 spin_unlock(&ml->timer_lock); 376 spin_unlock_irqrestore(&dev->event_lock, flags);
377} 377}
378 378
379/*
380 * Sets requested gain for FF effects. Called with dev->event_lock held.
381 */
379static void ml_ff_set_gain(struct input_dev *dev, u16 gain) 382static void ml_ff_set_gain(struct input_dev *dev, u16 gain)
380{ 383{
381 struct ml_device *ml = dev->ff->private; 384 struct ml_device *ml = dev->ff->private;
382 int i; 385 int i;
383 386
384 spin_lock_bh(&ml->timer_lock);
385
386 ml->gain = gain; 387 ml->gain = gain;
387 388
388 for (i = 0; i < FF_MEMLESS_EFFECTS; i++) 389 for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
389 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); 390 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags);
390 391
391 ml_play_effects(ml); 392 ml_play_effects(ml);
392
393 spin_unlock_bh(&ml->timer_lock);
394} 393}
395 394
395/*
396 * Start/stop specified FF effect. Called with dev->event_lock held.
397 */
396static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) 398static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
397{ 399{
398 struct ml_device *ml = dev->ff->private; 400 struct ml_device *ml = dev->ff->private;
399 struct ml_effect_state *state = &ml->states[effect_id]; 401 struct ml_effect_state *state = &ml->states[effect_id];
400 unsigned long flags;
401
402 spin_lock_irqsave(&ml->timer_lock, flags);
403 402
404 if (value > 0) { 403 if (value > 0) {
405 debug("initiated play"); 404 debug("initiated play");
@@ -425,8 +424,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
425 ml_play_effects(ml); 424 ml_play_effects(ml);
426 } 425 }
427 426
428 spin_unlock_irqrestore(&ml->timer_lock, flags);
429
430 return 0; 427 return 0;
431} 428}
432 429
@@ -436,7 +433,7 @@ static int ml_ff_upload(struct input_dev *dev,
436 struct ml_device *ml = dev->ff->private; 433 struct ml_device *ml = dev->ff->private;
437 struct ml_effect_state *state = &ml->states[effect->id]; 434 struct ml_effect_state *state = &ml->states[effect->id];
438 435
439 spin_lock_bh(&ml->timer_lock); 436 spin_lock_irq(&dev->event_lock);
440 437
441 if (test_bit(FF_EFFECT_STARTED, &state->flags)) { 438 if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
442 __clear_bit(FF_EFFECT_PLAYING, &state->flags); 439 __clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -448,7 +445,7 @@ static int ml_ff_upload(struct input_dev *dev,
448 ml_schedule_timer(ml); 445 ml_schedule_timer(ml);
449 } 446 }
450 447
451 spin_unlock_bh(&ml->timer_lock); 448 spin_unlock_irq(&dev->event_lock);
452 449
453 return 0; 450 return 0;
454} 451}
@@ -482,7 +479,6 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
482 ml->private = data; 479 ml->private = data;
483 ml->play_effect = play_effect; 480 ml->play_effect = play_effect;
484 ml->gain = 0xffff; 481 ml->gain = 0xffff;
485 spin_lock_init(&ml->timer_lock);
486 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); 482 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev);
487 483
488 set_bit(FF_GAIN, dev->ffbit); 484 set_bit(FF_GAIN, dev->ffbit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e828aab7dace..2266ecbfbc01 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -17,6 +17,7 @@
17#include <linux/random.h> 17#include <linux/random.h>
18#include <linux/major.h> 18#include <linux/major.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/sched.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
21#include <linux/poll.h> 22#include <linux/poll.h>
22#include <linux/device.h> 23#include <linux/device.h>
@@ -781,10 +782,29 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
781 return 0; 782 return 0;
782} 783}
783 784
785union input_seq_state {
786 struct {
787 unsigned short pos;
788 bool mutex_acquired;
789 };
790 void *p;
791};
792
784static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 793static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
785{ 794{
786 if (mutex_lock_interruptible(&input_mutex)) 795 union input_seq_state *state = (union input_seq_state *)&seq->private;
787 return NULL; 796 int error;
797
798 /* We need to fit into seq->private pointer */
799 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
800
801 error = mutex_lock_interruptible(&input_mutex);
802 if (error) {
803 state->mutex_acquired = false;
804 return ERR_PTR(error);
805 }
806
807 state->mutex_acquired = true;
788 808
789 return seq_list_start(&input_dev_list, *pos); 809 return seq_list_start(&input_dev_list, *pos);
790} 810}
@@ -794,9 +814,12 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
794 return seq_list_next(v, &input_dev_list, pos); 814 return seq_list_next(v, &input_dev_list, pos);
795} 815}
796 816
797static void input_devices_seq_stop(struct seq_file *seq, void *v) 817static void input_seq_stop(struct seq_file *seq, void *v)
798{ 818{
799 mutex_unlock(&input_mutex); 819 union input_seq_state *state = (union input_seq_state *)&seq->private;
820
821 if (state->mutex_acquired)
822 mutex_unlock(&input_mutex);
800} 823}
801 824
802static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 825static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
@@ -860,7 +883,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
860static const struct seq_operations input_devices_seq_ops = { 883static const struct seq_operations input_devices_seq_ops = {
861 .start = input_devices_seq_start, 884 .start = input_devices_seq_start,
862 .next = input_devices_seq_next, 885 .next = input_devices_seq_next,
863 .stop = input_devices_seq_stop, 886 .stop = input_seq_stop,
864 .show = input_devices_seq_show, 887 .show = input_devices_seq_show,
865}; 888};
866 889
@@ -880,40 +903,49 @@ static const struct file_operations input_devices_fileops = {
880 903
881static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 904static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
882{ 905{
883 if (mutex_lock_interruptible(&input_mutex)) 906 union input_seq_state *state = (union input_seq_state *)&seq->private;
884 return NULL; 907 int error;
908
909 /* We need to fit into seq->private pointer */
910 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
911
912 error = mutex_lock_interruptible(&input_mutex);
913 if (error) {
914 state->mutex_acquired = false;
915 return ERR_PTR(error);
916 }
917
918 state->mutex_acquired = true;
919 state->pos = *pos;
885 920
886 seq->private = (void *)(unsigned long)*pos;
887 return seq_list_start(&input_handler_list, *pos); 921 return seq_list_start(&input_handler_list, *pos);
888} 922}
889 923
890static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 924static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
891{ 925{
892 seq->private = (void *)(unsigned long)(*pos + 1); 926 union input_seq_state *state = (union input_seq_state *)&seq->private;
893 return seq_list_next(v, &input_handler_list, pos);
894}
895 927
896static void input_handlers_seq_stop(struct seq_file *seq, void *v) 928 state->pos = *pos + 1;
897{ 929 return seq_list_next(v, &input_handler_list, pos);
898 mutex_unlock(&input_mutex);
899} 930}
900 931
901static int input_handlers_seq_show(struct seq_file *seq, void *v) 932static int input_handlers_seq_show(struct seq_file *seq, void *v)
902{ 933{
903 struct input_handler *handler = container_of(v, struct input_handler, node); 934 struct input_handler *handler = container_of(v, struct input_handler, node);
935 union input_seq_state *state = (union input_seq_state *)&seq->private;
904 936
905 seq_printf(seq, "N: Number=%ld Name=%s", 937 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
906 (unsigned long)seq->private, handler->name);
907 if (handler->fops) 938 if (handler->fops)
908 seq_printf(seq, " Minor=%d", handler->minor); 939 seq_printf(seq, " Minor=%d", handler->minor);
909 seq_putc(seq, '\n'); 940 seq_putc(seq, '\n');
910 941
911 return 0; 942 return 0;
912} 943}
944
913static const struct seq_operations input_handlers_seq_ops = { 945static const struct seq_operations input_handlers_seq_ops = {
914 .start = input_handlers_seq_start, 946 .start = input_handlers_seq_start,
915 .next = input_handlers_seq_next, 947 .next = input_handlers_seq_next,
916 .stop = input_handlers_seq_stop, 948 .stop = input_seq_stop,
917 .show = input_handlers_seq_show, 949 .show = input_handlers_seq_show,
918}; 950};
919 951
@@ -1260,19 +1292,27 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1260 return 0; 1292 return 0;
1261} 1293}
1262 1294
1263#define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1295#define INPUT_DO_TOGGLE(dev, type, bits, on) \
1264 do { \ 1296 do { \
1265 int i; \ 1297 int i; \
1266 if (!test_bit(EV_##type, dev->evbit)) \ 1298 bool active; \
1267 break; \ 1299 \
1268 for (i = 0; i < type##_MAX; i++) { \ 1300 if (!test_bit(EV_##type, dev->evbit)) \
1269 if (!test_bit(i, dev->bits##bit) || \ 1301 break; \
1270 !test_bit(i, dev->bits)) \ 1302 \
1271 continue; \ 1303 for (i = 0; i < type##_MAX; i++) { \
1272 dev->event(dev, EV_##type, i, on); \ 1304 if (!test_bit(i, dev->bits##bit)) \
1273 } \ 1305 continue; \
1306 \
1307 active = test_bit(i, dev->bits); \
1308 if (!active && !on) \
1309 continue; \
1310 \
1311 dev->event(dev, EV_##type, i, on ? active : 0); \
1312 } \
1274 } while (0) 1313 } while (0)
1275 1314
1315#ifdef CONFIG_PM
1276static void input_dev_reset(struct input_dev *dev, bool activate) 1316static void input_dev_reset(struct input_dev *dev, bool activate)
1277{ 1317{
1278 if (!dev->event) 1318 if (!dev->event)
@@ -1287,7 +1327,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1287 } 1327 }
1288} 1328}
1289 1329
1290#ifdef CONFIG_PM
1291static int input_dev_suspend(struct device *dev) 1330static int input_dev_suspend(struct device *dev)
1292{ 1331{
1293 struct input_dev *input_dev = to_input_dev(dev); 1332 struct input_dev *input_dev = to_input_dev(dev);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 901b2525993e..b1bd6dd32286 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -18,6 +18,7 @@
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/miscdevice.h> 24#include <linux/miscdevice.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 2388cf578a62..79e3edcced1a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -143,6 +143,7 @@ static const struct xpad_device {
143 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 143 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
144 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", MAP_DPAD_TO_AXES, XTYPE_XBOX360 }, 144 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
145 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 145 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
146 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
146 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", MAP_DPAD_TO_AXES, XTYPE_XBOX360 }, 147 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
147 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 148 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
148 { 0xffff, 0xffff, "Chinese-made Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, 149 { 0xffff, 0xffff, "Chinese-made Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
@@ -209,6 +210,7 @@ static struct usb_device_id xpad_table [] = {
209 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 210 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
210 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 211 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
211 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 212 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
213 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
212 XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */ 214 XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */
213 { } 215 { }
214}; 216};
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 4709e15af607..28e6110d1ff8 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -233,6 +233,7 @@ struct atkbd {
233 */ 233 */
234static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); 234static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
235static void *atkbd_platform_fixup_data; 235static void *atkbd_platform_fixup_data;
236static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
236 237
237static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 238static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
238 ssize_t (*handler)(struct atkbd *, char *)); 239 ssize_t (*handler)(struct atkbd *, char *));
@@ -393,6 +394,9 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
393 394
394 input_event(dev, EV_MSC, MSC_RAW, code); 395 input_event(dev, EV_MSC, MSC_RAW, code);
395 396
397 if (atkbd_platform_scancode_fixup)
398 code = atkbd_platform_scancode_fixup(atkbd, code);
399
396 if (atkbd->translated) { 400 if (atkbd->translated) {
397 401
398 if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) { 402 if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) {
@@ -574,11 +578,22 @@ static void atkbd_event_work(struct work_struct *work)
574 578
575 mutex_lock(&atkbd->event_mutex); 579 mutex_lock(&atkbd->event_mutex);
576 580
577 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) 581 if (!atkbd->enabled) {
578 atkbd_set_leds(atkbd); 582 /*
583 * Serio ports are resumed asynchronously so while driver core
584 * thinks that device is already fully operational in reality
585 * it may not be ready yet. In this case we need to keep
586 * rescheduling till reconnect completes.
587 */
588 schedule_delayed_work(&atkbd->event_work,
589 msecs_to_jiffies(100));
590 } else {
591 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
592 atkbd_set_leds(atkbd);
579 593
580 if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) 594 if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
581 atkbd_set_repeat_rate(atkbd); 595 atkbd_set_repeat_rate(atkbd);
596 }
582 597
583 mutex_unlock(&atkbd->event_mutex); 598 mutex_unlock(&atkbd->event_mutex);
584} 599}
@@ -770,6 +785,30 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
770 return 3; 785 return 3;
771} 786}
772 787
788static int atkbd_reset_state(struct atkbd *atkbd)
789{
790 struct ps2dev *ps2dev = &atkbd->ps2dev;
791 unsigned char param[1];
792
793/*
794 * Set the LEDs to a predefined state (all off).
795 */
796
797 param[0] = 0;
798 if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
799 return -1;
800
801/*
802 * Set autorepeat to fastest possible.
803 */
804
805 param[0] = 0;
806 if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP))
807 return -1;
808
809 return 0;
810}
811
773static int atkbd_activate(struct atkbd *atkbd) 812static int atkbd_activate(struct atkbd *atkbd)
774{ 813{
775 struct ps2dev *ps2dev = &atkbd->ps2dev; 814 struct ps2dev *ps2dev = &atkbd->ps2dev;
@@ -852,29 +891,6 @@ static unsigned int atkbd_hp_forced_release_keys[] = {
852}; 891};
853 892
854/* 893/*
855 * Inventec system with broken key release on volume keys
856 */
857static unsigned int atkbd_inventec_forced_release_keys[] = {
858 0xae, 0xb0, -1U
859};
860
861/*
862 * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
863 * for its volume buttons
864 */
865static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
866 0xae, 0xb0, -1U
867};
868
869/*
870 * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
871 * release for their volume buttons
872 */
873static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
874 0xae, 0xb0, -1U
875};
876
877/*
878 * Samsung NC10,NC20 with Fn+F? key release not working 894 * Samsung NC10,NC20 with Fn+F? key release not working
879 */ 895 */
880static unsigned int atkbd_samsung_forced_release_keys[] = { 896static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -882,14 +898,6 @@ static unsigned int atkbd_samsung_forced_release_keys[] = {
882}; 898};
883 899
884/* 900/*
885 * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
886 * do not generate release events so we have to do it ourselves.
887 */
888static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
889 0xb0, 0xae, -1U
890};
891
892/*
893 * Amilo Pi 3525 key release for Fn+Volume keys not working 901 * Amilo Pi 3525 key release for Fn+Volume keys not working
894 */ 902 */
895static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = { 903static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
@@ -911,6 +919,30 @@ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
911}; 919};
912 920
913/* 921/*
922 * Many notebooks don't send key release event for volume up/down
923 * keys, with key list below common among them
924 */
925static unsigned int atkbd_volume_forced_release_keys[] = {
926 0xae, 0xb0, -1U
927};
928
929/*
930 * OQO 01+ multimedia keys (64--66) generate e0 6x upon release whereas
931 * they should be generating e4-e6 (0x80 | code).
932 */
933static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
934 unsigned int code)
935{
936 if (atkbd->translated && atkbd->emul == 1 &&
937 (code == 0x64 || code == 0x65 || code == 0x66)) {
938 atkbd->emul = 0;
939 code |= 0x80;
940 }
941
942 return code;
943}
944
945/*
914 * atkbd_set_keycode_table() initializes keyboard's keycode table 946 * atkbd_set_keycode_table() initializes keyboard's keycode table
915 * according to the selected scancode set 947 * according to the selected scancode set
916 */ 948 */
@@ -1087,6 +1119,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
1087 } 1119 }
1088 1120
1089 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra); 1121 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
1122 atkbd_reset_state(atkbd);
1090 atkbd_activate(atkbd); 1123 atkbd_activate(atkbd);
1091 1124
1092 } else { 1125 } else {
@@ -1141,6 +1174,18 @@ static int atkbd_reconnect(struct serio *serio)
1141 return -1; 1174 return -1;
1142 1175
1143 atkbd_activate(atkbd); 1176 atkbd_activate(atkbd);
1177
1178 /*
1179 * Restore LED state and repeat rate. While input core
1180 * will do this for us at resume time reconnect may happen
1181 * because user requested it via sysfs or simply because
1182 * keyboard was unplugged and plugged in again so we need
1183 * to do it ourselves here.
1184 */
1185 atkbd_set_leds(atkbd);
1186 if (!atkbd->softrepeat)
1187 atkbd_set_repeat_rate(atkbd);
1188
1144 } 1189 }
1145 1190
1146 atkbd_enable(atkbd); 1191 atkbd_enable(atkbd);
@@ -1267,6 +1312,7 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
1267 1312
1268 atkbd->dev = new_dev; 1313 atkbd->dev = new_dev;
1269 atkbd->set = atkbd_select_set(atkbd, atkbd->set, value); 1314 atkbd->set = atkbd_select_set(atkbd, atkbd->set, value);
1315 atkbd_reset_state(atkbd);
1270 atkbd_activate(atkbd); 1316 atkbd_activate(atkbd);
1271 atkbd_set_keycode_table(atkbd); 1317 atkbd_set_keycode_table(atkbd);
1272 atkbd_set_device_attrs(atkbd); 1318 atkbd_set_device_attrs(atkbd);
@@ -1388,6 +1434,7 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
1388 1434
1389 atkbd->dev = new_dev; 1435 atkbd->dev = new_dev;
1390 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra); 1436 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra);
1437 atkbd_reset_state(atkbd);
1391 atkbd_activate(atkbd); 1438 atkbd_activate(atkbd);
1392 atkbd_set_keycode_table(atkbd); 1439 atkbd_set_keycode_table(atkbd);
1393 atkbd_set_device_attrs(atkbd); 1440 atkbd_set_device_attrs(atkbd);
@@ -1513,6 +1560,13 @@ static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
1513 return 0; 1560 return 0;
1514} 1561}
1515 1562
1563static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1564{
1565 atkbd_platform_scancode_fixup = id->driver_data;
1566
1567 return 0;
1568}
1569
1516static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { 1570static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1517 { 1571 {
1518 .ident = "Dell Laptop", 1572 .ident = "Dell Laptop",
@@ -1548,7 +1602,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1548 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), 1602 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
1549 }, 1603 },
1550 .callback = atkbd_setup_forced_release, 1604 .callback = atkbd_setup_forced_release,
1551 .driver_data = atkbd_hp_zv6100_forced_release_keys, 1605 .driver_data = atkbd_volume_forced_release_keys,
1552 }, 1606 },
1553 { 1607 {
1554 .ident = "HP Presario R4000", 1608 .ident = "HP Presario R4000",
@@ -1557,7 +1611,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1557 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), 1611 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
1558 }, 1612 },
1559 .callback = atkbd_setup_forced_release, 1613 .callback = atkbd_setup_forced_release,
1560 .driver_data = atkbd_hp_r4000_forced_release_keys, 1614 .driver_data = atkbd_volume_forced_release_keys,
1561 }, 1615 },
1562 { 1616 {
1563 .ident = "HP Presario R4100", 1617 .ident = "HP Presario R4100",
@@ -1566,7 +1620,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1566 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), 1620 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
1567 }, 1621 },
1568 .callback = atkbd_setup_forced_release, 1622 .callback = atkbd_setup_forced_release,
1569 .driver_data = atkbd_hp_r4000_forced_release_keys, 1623 .driver_data = atkbd_volume_forced_release_keys,
1570 }, 1624 },
1571 { 1625 {
1572 .ident = "HP Presario R4200", 1626 .ident = "HP Presario R4200",
@@ -1575,7 +1629,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1575 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), 1629 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
1576 }, 1630 },
1577 .callback = atkbd_setup_forced_release, 1631 .callback = atkbd_setup_forced_release,
1578 .driver_data = atkbd_hp_r4000_forced_release_keys, 1632 .driver_data = atkbd_volume_forced_release_keys,
1579 }, 1633 },
1580 { 1634 {
1581 .ident = "Inventec Symphony", 1635 .ident = "Inventec Symphony",
@@ -1584,7 +1638,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1584 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), 1638 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
1585 }, 1639 },
1586 .callback = atkbd_setup_forced_release, 1640 .callback = atkbd_setup_forced_release,
1587 .driver_data = atkbd_inventec_forced_release_keys, 1641 .driver_data = atkbd_volume_forced_release_keys,
1588 }, 1642 },
1589 { 1643 {
1590 .ident = "Samsung NC10", 1644 .ident = "Samsung NC10",
@@ -1620,7 +1674,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1620 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), 1674 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
1621 }, 1675 },
1622 .callback = atkbd_setup_forced_release, 1676 .callback = atkbd_setup_forced_release,
1623 .driver_data = atkbd_amilo_pa1510_forced_release_keys, 1677 .driver_data = atkbd_volume_forced_release_keys,
1624 }, 1678 },
1625 { 1679 {
1626 .ident = "Fujitsu Amilo Pi 3525", 1680 .ident = "Fujitsu Amilo Pi 3525",
@@ -1649,6 +1703,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1649 .callback = atkbd_setup_forced_release, 1703 .callback = atkbd_setup_forced_release,
1650 .driver_data = atkdb_soltech_ta12_forced_release_keys, 1704 .driver_data = atkdb_soltech_ta12_forced_release_keys,
1651 }, 1705 },
1706 {
1707 .ident = "OQO Model 01+",
1708 .matches = {
1709 DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
1710 DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
1711 },
1712 .callback = atkbd_setup_scancode_fixup,
1713 .driver_data = atkbd_oqo_01plus_scancode_fixup,
1714 },
1652 { } 1715 { }
1653}; 1716};
1654 1717
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index a88aff3816a0..77d130914259 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -147,6 +147,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
147 } 147 }
148 148
149 error = request_irq(irq, gpio_keys_isr, 149 error = request_irq(irq, gpio_keys_isr,
150 IRQF_SHARED |
150 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 151 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
151 button->desc ? button->desc : "gpio_keys", 152 button->desc ? button->desc : "gpio_keys",
152 bdata); 153 bdata);
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index e9d639ec283d..5f72440b50c8 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/hil.h> 25#include <linux/hil.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/sched.h>
27#include <linux/spinlock.h> 28#include <linux/spinlock.h>
28#include <asm/irq.h> 29#include <asm/irq.h>
29#ifdef CONFIG_HP300 30#ifdef CONFIG_HP300
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index 472b56639cdb..a99a04b03ee4 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/sched.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 02f4f8f1db6f..a9bb2544b2de 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -227,6 +227,7 @@ config INPUT_WINBOND_CIR
227 depends on X86 && PNP 227 depends on X86 && PNP
228 select NEW_LEDS 228 select NEW_LEDS
229 select LEDS_CLASS 229 select LEDS_CLASS
230 select LEDS_TRIGGERS
230 select BITREVERSE 231 select BITREVERSE
231 help 232 help
232 Say Y here if you want to use the IR remote functionality found 233 Say Y here if you want to use the IR remote functionality found
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 216a559f55ea..ea821b546969 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -209,7 +209,7 @@ static inline int hp_sdc_rtc_read_rt(struct timeval *res) {
209 209
210/* Read the i8042 fast handshake timer */ 210/* Read the i8042 fast handshake timer */
211static inline int hp_sdc_rtc_read_fhs(struct timeval *res) { 211static inline int hp_sdc_rtc_read_fhs(struct timeval *res) {
212 uint64_t raw; 212 int64_t raw;
213 unsigned int tenms; 213 unsigned int tenms;
214 214
215 raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2); 215 raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index c806fbf1e174..3b9f588fc747 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -106,8 +106,8 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
106 struct input_dev *input; 106 struct input_dev *input;
107 int err; 107 int err;
108 108
109 if (!pdata || !pdata->steps) { 109 if (!pdata) {
110 dev_err(&pdev->dev, "invalid platform data\n"); 110 dev_err(&pdev->dev, "missing platform data\n");
111 return -ENOENT; 111 return -ENOENT;
112 } 112 }
113 113
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index c4f42311fdec..b064419b90a2 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -230,7 +230,7 @@ out_err:
230 return err; 230 return err;
231} 231}
232 232
233static int bbc_remove(struct of_device *op) 233static int __devexit bbc_remove(struct of_device *op)
234{ 234{
235 struct sparcspkr_state *state = dev_get_drvdata(&op->dev); 235 struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
236 struct input_dev *input_dev = state->input_dev; 236 struct input_dev *input_dev = state->input_dev;
@@ -308,7 +308,7 @@ out_err:
308 return err; 308 return err;
309} 309}
310 310
311static int grover_remove(struct of_device *op) 311static int __devexit grover_remove(struct of_device *op)
312{ 312{
313 struct sparcspkr_state *state = dev_get_drvdata(&op->dev); 313 struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
314 struct grover_beep_info *info = &state->u.grover; 314 struct grover_beep_info *info = &state->u.grover;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index c5a49aba418f..d3f57245420a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -30,6 +30,7 @@
30 * - first public version 30 * - first public version
31 */ 31 */
32#include <linux/poll.h> 32#include <linux/poll.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/init.h> 36#include <linux/init.h>
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 11fd038a078f..a932179c4c9e 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -936,6 +936,15 @@ static struct dmi_system_id dmi_ids[] __initdata = {
936 }, 936 },
937 { 937 {
938 .callback = dmi_matched, 938 .callback = dmi_matched,
939 .ident = "Medion MD 42200",
940 .matches = {
941 DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
942 DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2030"),
943 },
944 .driver_data = keymap_fs_amilo_pro_v2000
945 },
946 {
947 .callback = dmi_matched,
939 .ident = "Medion MD 96500", 948 .ident = "Medion MD 96500",
940 .matches = { 949 .matches = {
941 DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"), 950 DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"),
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 5e6308694408..82811558ec33 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -107,8 +107,7 @@ static const struct dmi_system_id lifebook_dmi_table[] = {
107 .matches = { 107 .matches = {
108 DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"), 108 DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"),
109 }, 109 },
110 .callback = lifebook_set_serio_phys, 110 .callback = lifebook_set_6byte_proto,
111 .driver_data = "isa0060/serio3",
112 }, 111 },
113 { 112 {
114 .ident = "Lifebook B142", 113 .ident = "Lifebook B142",
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index de745d751162..ab5dc5f5fd83 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -219,7 +219,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
219 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 219 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
220 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL }, 220 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
221 { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */ 221 { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */
222 { 73, 0, PS2PP_SIDE_BTN }, 222 { 73, PS2PP_KIND_TRACKMAN, PS2PP_SIDE_BTN }, /* TrackMan FX */
223 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 223 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
224 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 224 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
225 { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ 225 { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 690aed905436..07c53798301a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -581,7 +581,7 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
581static int psmouse_extensions(struct psmouse *psmouse, 581static int psmouse_extensions(struct psmouse *psmouse,
582 unsigned int max_proto, bool set_properties) 582 unsigned int max_proto, bool set_properties)
583{ 583{
584 bool synaptics_hardware = true; 584 bool synaptics_hardware = false;
585 585
586/* 586/*
587 * We always check for lifebook because it does not disturb mouse 587 * We always check for lifebook because it does not disturb mouse
@@ -1673,7 +1673,7 @@ static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
1673{ 1673{
1674 int type = *((unsigned int *)kp->arg); 1674 int type = *((unsigned int *)kp->arg);
1675 1675
1676 return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); 1676 return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
1677} 1677}
1678 1678
1679static int __init psmouse_init(void) 1679static int __init psmouse_init(void)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index b66ff1ac7dea..f4a61252bcc9 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -652,6 +652,16 @@ static const struct dmi_system_id toshiba_dmi_table[] = {
652 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 652 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
653 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), 653 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
654 }, 654 },
655
656 },
657 {
658 .ident = "Toshiba Portege M300",
659 .matches = {
660 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
661 DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
662 DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
663 },
664
655 }, 665 },
656 { } 666 { }
657}; 667};
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 966b8868f792..a13d80f7da17 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -13,6 +13,7 @@
13#define MOUSEDEV_MINORS 32 13#define MOUSEDEV_MINORS 32
14#define MOUSEDEV_MIX 31 14#define MOUSEDEV_MIX 31
15 15
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include <linux/poll.h> 19#include <linux/poll.h>
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index c4b3fbd1a80f..aa533ceffe34 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -4,7 +4,7 @@
4config SERIO 4config SERIO
5 tristate "Serial I/O support" if EMBEDDED || !X86 5 tristate "Serial I/O support" if EMBEDDED || !X86
6 default y 6 default y
7 ---help--- 7 help
8 Say Yes here if you have any input device that uses serial I/O to 8 Say Yes here if you have any input device that uses serial I/O to
9 communicate with the system. This includes the 9 communicate with the system. This includes the
10 * standard AT keyboard and PS/2 mouse * 10 * standard AT keyboard and PS/2 mouse *
@@ -22,7 +22,7 @@ config SERIO_I8042
22 tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86 22 tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
23 default y 23 default y
24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && !M68K && !BLACKFIN 24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && !M68K && !BLACKFIN
25 ---help--- 25 help
26 i8042 is the chip over which the standard AT keyboard and PS/2 26 i8042 is the chip over which the standard AT keyboard and PS/2
27 mouse are connected to the computer. If you use these devices, 27 mouse are connected to the computer. If you use these devices,
28 you'll need to say Y here. 28 you'll need to say Y here.
@@ -35,7 +35,7 @@ config SERIO_I8042
35config SERIO_SERPORT 35config SERIO_SERPORT
36 tristate "Serial port line discipline" 36 tristate "Serial port line discipline"
37 default y 37 default y
38 ---help--- 38 help
39 Say Y here if you plan to use an input device (mouse, joystick, 39 Say Y here if you plan to use an input device (mouse, joystick,
40 tablet, 6dof) that communicates over the RS232 serial (COM) port. 40 tablet, 6dof) that communicates over the RS232 serial (COM) port.
41 41
@@ -49,7 +49,7 @@ config SERIO_SERPORT
49config SERIO_CT82C710 49config SERIO_CT82C710
50 tristate "ct82c710 Aux port controller" 50 tristate "ct82c710 Aux port controller"
51 depends on X86 51 depends on X86
52 ---help--- 52 help
53 Say Y here if you have a Texas Instruments TravelMate notebook 53 Say Y here if you have a Texas Instruments TravelMate notebook
54 equipped with the ct82c710 chip and want to use a mouse connected 54 equipped with the ct82c710 chip and want to use a mouse connected
55 to the "QuickPort". 55 to the "QuickPort".
@@ -66,7 +66,7 @@ config SERIO_Q40KBD
66config SERIO_PARKBD 66config SERIO_PARKBD
67 tristate "Parallel port keyboard adapter" 67 tristate "Parallel port keyboard adapter"
68 depends on PARPORT 68 depends on PARPORT
69 ---help--- 69 help
70 Say Y here if you built a simple parallel port adapter to attach 70 Say Y here if you built a simple parallel port adapter to attach
71 an additional AT keyboard, XT keyboard or PS/2 mouse. 71 an additional AT keyboard, XT keyboard or PS/2 mouse.
72 72
@@ -124,7 +124,7 @@ config HP_SDC
124 tristate "HP System Device Controller i8042 Support" 124 tristate "HP System Device Controller i8042 Support"
125 depends on (GSC || HP300) && SERIO 125 depends on (GSC || HP300) && SERIO
126 default y 126 default y
127 ---help--- 127 help
128 This option enables support for the "System Device 128 This option enables support for the "System Device
129 Controller", an i8042 carrying microcode to manage a 129 Controller", an i8042 carrying microcode to manage a
130 few miscellaneous devices on some Hewlett Packard systems. 130 few miscellaneous devices on some Hewlett Packard systems.
@@ -168,6 +168,7 @@ config SERIO_MACEPS2
168 168
169config SERIO_LIBPS2 169config SERIO_LIBPS2
170 tristate "PS/2 driver library" if EMBEDDED 170 tristate "PS/2 driver library" if EMBEDDED
171 depends on SERIO_I8042 || SERIO_I8042=n
171 help 172 help
172 Say Y here if you are using a driver for device connected 173 Say Y here if you are using a driver for device connected
173 to a PS/2 port, such as PS/2 mouse or standard AT keyboard. 174 to a PS/2 port, such as PS/2 mouse or standard AT keyboard.
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a39bc4eb902b..a537925f7651 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -327,6 +327,17 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
327 }, 327 },
328 }, 328 },
329 { 329 {
330 /*
331 * Reset and GET ID commands issued via KBD port are
332 * sometimes being delivered to AUX3.
333 */
334 .ident = "Sony Vaio FZ-240E",
335 .matches = {
336 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
337 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
338 },
339 },
340 {
330 .ident = "Amoi M636/A737", 341 .ident = "Amoi M636/A737",
331 .matches = { 342 .matches = {
332 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), 343 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
@@ -661,7 +672,7 @@ static void i8042_pnp_exit(void)
661static int __init i8042_pnp_init(void) 672static int __init i8042_pnp_init(void)
662{ 673{
663 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 }; 674 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
664 int pnp_data_busted = false; 675 bool pnp_data_busted = false;
665 int err; 676 int err;
666 677
667#ifdef CONFIG_X86 678#ifdef CONFIG_X86
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index bc56e52b945f..1df02d25aca5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -609,6 +609,8 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
609 str = i8042_read_status(); 609 str = i8042_read_status();
610 if (str & I8042_STR_OBF) { 610 if (str & I8042_STR_OBF) {
611 data = i8042_read_data(); 611 data = i8042_read_data();
612 dbg("%02x <- i8042 (aux_test_irq, %s)",
613 data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
612 if (i8042_irq_being_tested && 614 if (i8042_irq_being_tested &&
613 data == 0xa5 && (str & I8042_STR_AUXDATA)) 615 data == 0xa5 && (str & I8042_STR_AUXDATA))
614 complete(&i8042_aux_irq_delivered); 616 complete(&i8042_aux_irq_delivered);
@@ -750,6 +752,7 @@ static int __init i8042_check_aux(void)
750 * AUX IRQ was never delivered so we need to flush the controller to 752 * AUX IRQ was never delivered so we need to flush the controller to
751 * get rid of the byte we put there; otherwise keyboard may not work. 753 * get rid of the byte we put there; otherwise keyboard may not work.
752 */ 754 */
755 dbg(" -- i8042 (aux irq test timeout)");
753 i8042_flush(); 756 i8042_flush();
754 retval = -1; 757 retval = -1;
755 } 758 }
@@ -833,17 +836,32 @@ static int i8042_controller_selftest(void)
833static int i8042_controller_init(void) 836static int i8042_controller_init(void)
834{ 837{
835 unsigned long flags; 838 unsigned long flags;
839 int n = 0;
840 unsigned char ctr[2];
836 841
837/* 842/*
838 * Save the CTR for restoral on unload / reboot. 843 * Save the CTR for restore on unload / reboot.
839 */ 844 */
840 845
841 if (i8042_command(&i8042_ctr, I8042_CMD_CTL_RCTR)) { 846 do {
842 printk(KERN_ERR "i8042.c: Can't read CTR while initializing i8042.\n"); 847 if (n >= 10) {
843 return -EIO; 848 printk(KERN_ERR
844 } 849 "i8042.c: Unable to get stable CTR read.\n");
850 return -EIO;
851 }
852
853 if (n != 0)
854 udelay(50);
855
856 if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
857 printk(KERN_ERR
858 "i8042.c: Can't read CTR while initializing i8042.\n");
859 return -EIO;
860 }
845 861
846 i8042_initial_ctr = i8042_ctr; 862 } while (n < 2 || ctr[0] != ctr[1]);
863
864 i8042_initial_ctr = i8042_ctr = ctr[0];
847 865
848/* 866/*
849 * Disable the keyboard interface and interrupt. 867 * Disable the keyboard interface and interrupt.
@@ -892,6 +910,12 @@ static int i8042_controller_init(void)
892 return -EIO; 910 return -EIO;
893 } 911 }
894 912
913/*
914 * Flush whatever accumulated while we were disabling keyboard port.
915 */
916
917 i8042_flush();
918
895 return 0; 919 return 0;
896} 920}
897 921
@@ -911,7 +935,7 @@ static void i8042_controller_reset(void)
911 i8042_ctr |= I8042_CTR_KBDDIS | I8042_CTR_AUXDIS; 935 i8042_ctr |= I8042_CTR_KBDDIS | I8042_CTR_AUXDIS;
912 i8042_ctr &= ~(I8042_CTR_KBDINT | I8042_CTR_AUXINT); 936 i8042_ctr &= ~(I8042_CTR_KBDINT | I8042_CTR_AUXINT);
913 937
914 if (i8042_command(&i8042_initial_ctr, I8042_CMD_CTL_WCTR)) 938 if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR))
915 printk(KERN_WARNING "i8042.c: Can't write CTR while resetting.\n"); 939 printk(KERN_WARNING "i8042.c: Can't write CTR while resetting.\n");
916 940
917/* 941/*
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 769ba65a585a..f3876acc3e83 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/input.h> 19#include <linux/input.h>
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index b03009bb7468..27fdaaffbb40 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/sched.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/smp_lock.h> 14#include <linux/smp_lock.h>
14#include <linux/poll.h> 15#include <linux/poll.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index b9694b6445d0..6d345112bcb7 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/sched.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index f06332c9e21b..c21e6d3a8844 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -645,7 +645,7 @@ static int __devinit ad7879_probe(struct spi_device *spi)
645 kfree(ts); 645 kfree(ts);
646 } 646 }
647 647
648 return 0; 648 return error;
649} 649}
650 650
651static int __devexit ad7879_remove(struct spi_device *spi) 651static int __devexit ad7879_remove(struct spi_device *spi)
@@ -732,7 +732,7 @@ static int __devinit ad7879_probe(struct i2c_client *client,
732 kfree(ts); 732 kfree(ts);
733 } 733 }
734 734
735 return 0; 735 return error;
736} 736}
737 737
738static int __devexit ad7879_remove(struct i2c_client *client) 738static int __devexit ad7879_remove(struct i2c_client *client)
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 57d26360f64e..dc506ab99cac 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/sched.h>
21#include <linux/seq_file.h> 22#include <linux/seq_file.h>
22#include <linux/skbuff.h> 23#include <linux/skbuff.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 8b256a617c8a..3697c409bec6 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -16,6 +16,7 @@
16#else 16#else
17#include <linux/fs.h> 17#include <linux/fs.h>
18#endif 18#endif
19#include <linux/sched.h>
19#include <linux/isdnif.h> 20#include <linux/isdnif.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include "isdn_divert.h" 22#include "isdn_divert.h"
diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c
index d30ce5b978c2..85a8fd8dd0b7 100644
--- a/drivers/isdn/hisax/arcofi.c
+++ b/drivers/isdn/hisax/arcofi.c
@@ -10,6 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/sched.h>
13#include "hisax.h" 14#include "hisax.h"
14#include "isdnl1.h" 15#include "isdnl1.h"
15#include "isac.h" 16#include "isac.h"
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 5c46a7130e06..8d22f50760eb 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h>
14#include "hisax.h" 15#include "hisax.h"
15#include "hfc_2bds0.h" 16#include "hfc_2bds0.h"
16#include "isdnl1.h" 17#include "isdnl1.h"
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index d110a77940a4..10914731b304 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -20,6 +20,7 @@
20#include "hfc_pci.h" 20#include "hfc_pci.h"
21#include "isdnl1.h" 21#include "isdnl1.h"
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/sched.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24 25
25static const char *hfcpci_revision = "$Revision: 1.48.2.4 $"; 26static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 8f9f4912de32..90b35e1a4b7e 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -11,6 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/cred.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/poll.h> 16#include <linux/poll.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 8991d2c8ee4a..8bcae28c4409 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/sched.h>
16#include <linux/smp_lock.h> 17#include <linux/smp_lock.h>
17 18
18#include "hysdn_defs.h" 19#include "hysdn_defs.h"
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 8c66bcb953a1..123c1d6c43b4 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/sched.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index e075e8d2fce0..30f0f45e3139 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -27,6 +27,7 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/sched.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/interrupt.h> 32#include <linux/interrupt.h>
32#include <linux/workqueue.h> 33#include <linux/workqueue.h>
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index dd0acd06750b..5a0774880d56 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/sched.h>
11#include "includes.h" 12#include "includes.h"
12#include "hardware.h" 13#include "hardware.h"
13#include "card.h" 14#include "card.h"
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7c8e7122aaa9..e4f599f20e38 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -150,9 +150,9 @@ config LEDS_LP3944
150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" 150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
151 depends on LEDS_CLASS && I2C 151 depends on LEDS_CLASS && I2C
152 help 152 help
153 This option enables support for LEDs connected to the National 153 This option enables support for LEDs connected to the National
154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as 154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
155 Fun Light Chip. 155 Fun Light Chip.
156 156
157 To compile this driver as a module, choose M here: the 157 To compile this driver as a module, choose M here: the
158 module will be called leds-lp3944. 158 module will be called leds-lp3944.
@@ -195,6 +195,13 @@ config LEDS_PCA955X
195 LED driver chips accessed via the I2C bus. Supported 195 LED driver chips accessed via the I2C bus. Supported
196 devices include PCA9550, PCA9551, PCA9552, and PCA9553. 196 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
197 197
198config LEDS_WM831X_STATUS
199 tristate "LED support for status LEDs on WM831x PMICs"
200 depends on LEDS_CLASS && MFD_WM831X
201 help
202 This option enables support for the status LEDs of the WM831x
203 series of PMICs.
204
198config LEDS_WM8350 205config LEDS_WM8350
199 tristate "LED Support for WM8350 AudioPlus PMIC" 206 tristate "LED Support for WM8350 AudioPlus PMIC"
200 depends on LEDS_CLASS && MFD_WM8350 207 depends on LEDS_CLASS && MFD_WM8350
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e8cdcf77a4c3..46d72704d606 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
29obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
30obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
31 32
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index f2242db54016..a498135a4e80 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
153 .flags = LED_CORE_SUSPENDRESUME, 153 .flags = LED_CORE_SUSPENDRESUME,
154}; 154};
155 155
156static int __init clevo_mail_led_probe(struct platform_device *pdev) 156static int __devinit clevo_mail_led_probe(struct platform_device *pdev)
157{ 157{
158 return led_classdev_register(&pdev->dev, &clevo_mail_led); 158 return led_classdev_register(&pdev->dev, &clevo_mail_led);
159} 159}
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 059aa2924b1c..8816806accd2 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -28,7 +28,7 @@ static void qube_front_led_set(struct led_classdev *led_cdev,
28} 28}
29 29
30static struct led_classdev qube_front_led = { 30static struct led_classdev qube_front_led = {
31 .name = "qube-front", 31 .name = "qube::front",
32 .brightness = LED_FULL, 32 .brightness = LED_FULL,
33 .brightness_set = qube_front_led_set, 33 .brightness_set = qube_front_led_set,
34 .default_trigger = "ide-disk", 34 .default_trigger = "ide-disk",
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 5f1ce810815f..defc212105f3 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -49,7 +49,7 @@ static void raq_web_led_set(struct led_classdev *led_cdev,
49} 49}
50 50
51static struct led_classdev raq_web_led = { 51static struct led_classdev raq_web_led = {
52 .name = "raq-web", 52 .name = "raq::web",
53 .brightness_set = raq_web_led_set, 53 .brightness_set = raq_web_led_set,
54}; 54};
55 55
@@ -70,7 +70,7 @@ static void raq_power_off_led_set(struct led_classdev *led_cdev,
70} 70}
71 71
72static struct led_classdev raq_power_off_led = { 72static struct led_classdev raq_power_off_led = {
73 .name = "raq-power-off", 73 .name = "raq::power-off",
74 .brightness_set = raq_power_off_led_set, 74 .brightness_set = raq_power_off_led_set,
75 .default_trigger = "power-off", 75 .default_trigger = "power-off",
76}; 76};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 6b06638eb5b4..e5225d28f392 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -78,9 +78,11 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
78{ 78{
79 int ret, state; 79 int ret, state;
80 80
81 led_dat->gpio = -1;
82
81 /* skip leds that aren't available */ 83 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 84 if (!gpio_is_valid(template->gpio)) {
83 printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", 85 printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
84 template->gpio, template->name); 86 template->gpio, template->name);
85 return 0; 87 return 0;
86 } 88 }
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index dba8921240f2..adc561eb59d2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -19,9 +19,6 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/leds-pca9532.h> 20#include <linux/leds-pca9532.h>
21 21
22static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
23I2C_CLIENT_INSMOD_1(pca9532);
24
25#define PCA9532_REG_PSC(i) (0x2+(i)*2) 22#define PCA9532_REG_PSC(i) (0x2+(i)*2)
26#define PCA9532_REG_PWM(i) (0x3+(i)*2) 23#define PCA9532_REG_PWM(i) (0x3+(i)*2)
27#define PCA9532_REG_LS0 0x6 24#define PCA9532_REG_LS0 0x6
@@ -34,7 +31,7 @@ struct pca9532_data {
34 struct i2c_client *client; 31 struct i2c_client *client;
35 struct pca9532_led leds[16]; 32 struct pca9532_led leds[16];
36 struct mutex update_lock; 33 struct mutex update_lock;
37 struct input_dev *idev; 34 struct input_dev *idev;
38 struct work_struct work; 35 struct work_struct work;
39 u8 pwm[2]; 36 u8 pwm[2];
40 u8 psc[2]; 37 u8 psc[2];
@@ -53,9 +50,9 @@ MODULE_DEVICE_TABLE(i2c, pca9532_id);
53 50
54static struct i2c_driver pca9532_driver = { 51static struct i2c_driver pca9532_driver = {
55 .driver = { 52 .driver = {
56 .name = "pca9532", 53 .name = "pca9532",
57 }, 54 },
58 .probe = pca9532_probe, 55 .probe = pca9532_probe,
59 .remove = pca9532_remove, 56 .remove = pca9532_remove,
60 .id_table = pca9532_id, 57 .id_table = pca9532_id,
61}; 58};
@@ -149,7 +146,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
149 146
150 if (*delay_on == 0 && *delay_off == 0) { 147 if (*delay_on == 0 && *delay_off == 0) {
151 /* led subsystem ask us for a blink rate */ 148 /* led subsystem ask us for a blink rate */
152 *delay_on = 1000; 149 *delay_on = 1000;
153 *delay_off = 1000; 150 *delay_off = 1000;
154 } 151 }
155 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6) 152 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
@@ -227,7 +224,7 @@ static int pca9532_configure(struct i2c_client *client,
227 break; 224 break;
228 case PCA9532_TYPE_LED: 225 case PCA9532_TYPE_LED:
229 led->state = pled->state; 226 led->state = pled->state;
230 led->name = pled->name; 227 led->name = pled->name;
231 led->ldev.name = led->name; 228 led->ldev.name = led->name;
232 led->ldev.brightness = LED_OFF; 229 led->ldev.brightness = LED_OFF;
233 led->ldev.brightness_set = pca9532_set_brightness; 230 led->ldev.brightness_set = pca9532_set_brightness;
@@ -254,7 +251,7 @@ static int pca9532_configure(struct i2c_client *client,
254 data->idev->name = pled->name; 251 data->idev->name = pled->name;
255 data->idev->phys = "i2c/pca9532"; 252 data->idev->phys = "i2c/pca9532";
256 data->idev->id.bustype = BUS_HOST; 253 data->idev->id.bustype = BUS_HOST;
257 data->idev->id.vendor = 0x001f; 254 data->idev->id.vendor = 0x001f;
258 data->idev->id.product = 0x0001; 255 data->idev->id.product = 0x0001;
259 data->idev->id.version = 0x0100; 256 data->idev->id.version = 0x0100;
260 data->idev->evbit[0] = BIT_MASK(EV_SND); 257 data->idev->evbit[0] = BIT_MASK(EV_SND);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
new file mode 100644
index 000000000000..c586d05e336a
--- /dev/null
+++ b/drivers/leds/leds-wm831x-status.c
@@ -0,0 +1,341 @@
1/*
2 * LED driver for WM831x status LEDs
3 *
4 * Copyright(C) 2009 Wolfson Microelectronics PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/leds.h>
16#include <linux/err.h>
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/status.h>
20
21
22struct wm831x_status {
23 struct led_classdev cdev;
24 struct wm831x *wm831x;
25 struct work_struct work;
26 struct mutex mutex;
27
28 spinlock_t value_lock;
29 int reg; /* Control register */
30 int reg_val; /* Control register value */
31
32 int blink;
33 int blink_time;
34 int blink_cyc;
35 int src;
36 enum led_brightness brightness;
37};
38
39#define to_wm831x_status(led_cdev) \
40 container_of(led_cdev, struct wm831x_status, cdev)
41
42static void wm831x_status_work(struct work_struct *work)
43{
44 struct wm831x_status *led = container_of(work, struct wm831x_status,
45 work);
46 unsigned long flags;
47
48 mutex_lock(&led->mutex);
49
50 led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK |
51 WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK);
52
53 spin_lock_irqsave(&led->value_lock, flags);
54
55 led->reg_val |= led->src << WM831X_LED_SRC_SHIFT;
56 if (led->blink) {
57 led->reg_val |= 2 << WM831X_LED_MODE_SHIFT;
58 led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT;
59 led->reg_val |= led->blink_cyc;
60 } else {
61 if (led->brightness != LED_OFF)
62 led->reg_val |= 1 << WM831X_LED_MODE_SHIFT;
63 }
64
65 spin_unlock_irqrestore(&led->value_lock, flags);
66
67 wm831x_reg_write(led->wm831x, led->reg, led->reg_val);
68
69 mutex_unlock(&led->mutex);
70}
71
72static void wm831x_status_set(struct led_classdev *led_cdev,
73 enum led_brightness value)
74{
75 struct wm831x_status *led = to_wm831x_status(led_cdev);
76 unsigned long flags;
77
78 spin_lock_irqsave(&led->value_lock, flags);
79 led->brightness = value;
80 if (value == LED_OFF)
81 led->blink = 0;
82 schedule_work(&led->work);
83 spin_unlock_irqrestore(&led->value_lock, flags);
84}
85
86static int wm831x_status_blink_set(struct led_classdev *led_cdev,
87 unsigned long *delay_on,
88 unsigned long *delay_off)
89{
90 struct wm831x_status *led = to_wm831x_status(led_cdev);
91 unsigned long flags;
92 int ret = 0;
93
94 /* Pick some defaults if we've not been given times */
95 if (*delay_on == 0 && *delay_off == 0) {
96 *delay_on = 250;
97 *delay_off = 250;
98 }
99
100 spin_lock_irqsave(&led->value_lock, flags);
101
102 /* We only have a limited selection of settings, see if we can
103 * support the configuration we're being given */
104 switch (*delay_on) {
105 case 1000:
106 led->blink_time = 0;
107 break;
108 case 250:
109 led->blink_time = 1;
110 break;
111 case 125:
112 led->blink_time = 2;
113 break;
114 case 62:
115 case 63:
116 /* Actually 62.5ms */
117 led->blink_time = 3;
118 break;
119 default:
120 ret = -EINVAL;
121 break;
122 }
123
124 if (ret == 0) {
125 switch (*delay_off / *delay_on) {
126 case 1:
127 led->blink_cyc = 0;
128 break;
129 case 3:
130 led->blink_cyc = 1;
131 break;
132 case 4:
133 led->blink_cyc = 2;
134 break;
135 case 8:
136 led->blink_cyc = 3;
137 break;
138 default:
139 ret = -EINVAL;
140 break;
141 }
142 }
143
144 if (ret == 0)
145 led->blink = 1;
146 else
147 led->blink = 0;
148
149 /* Always update; if we fail turn off blinking since we expect
150 * a software fallback. */
151 schedule_work(&led->work);
152
153 spin_unlock_irqrestore(&led->value_lock, flags);
154
155 return ret;
156}
157
158static const char *led_src_texts[] = {
159 "otp",
160 "power",
161 "charger",
162 "soft",
163};
164
165static ssize_t wm831x_status_src_show(struct device *dev,
166 struct device_attribute *attr, char *buf)
167{
168 struct led_classdev *led_cdev = dev_get_drvdata(dev);
169 struct wm831x_status *led = to_wm831x_status(led_cdev);
170 int i;
171 ssize_t ret = 0;
172
173 mutex_lock(&led->mutex);
174
175 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++)
176 if (i == led->src)
177 ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]);
178 else
179 ret += sprintf(&buf[ret], "%s ", led_src_texts[i]);
180
181 mutex_unlock(&led->mutex);
182
183 ret += sprintf(&buf[ret], "\n");
184
185 return ret;
186}
187
188static ssize_t wm831x_status_src_store(struct device *dev,
189 struct device_attribute *attr,
190 const char *buf, size_t size)
191{
192 struct led_classdev *led_cdev = dev_get_drvdata(dev);
193 struct wm831x_status *led = to_wm831x_status(led_cdev);
194 char name[20];
195 int i;
196 size_t len;
197
198 name[sizeof(name) - 1] = '\0';
199 strncpy(name, buf, sizeof(name) - 1);
200 len = strlen(name);
201
202 if (len && name[len - 1] == '\n')
203 name[len - 1] = '\0';
204
205 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) {
206 if (!strcmp(name, led_src_texts[i])) {
207 mutex_lock(&led->mutex);
208
209 led->src = i;
210 schedule_work(&led->work);
211
212 mutex_unlock(&led->mutex);
213 }
214 }
215
216 return size;
217}
218
219static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
220
221static int wm831x_status_probe(struct platform_device *pdev)
222{
223 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
224 struct wm831x_pdata *chip_pdata;
225 struct wm831x_status_pdata pdata;
226 struct wm831x_status *drvdata;
227 struct resource *res;
228 int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
229 int ret;
230
231 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
232 if (res == NULL) {
233 dev_err(&pdev->dev, "No I/O resource\n");
234 ret = -EINVAL;
235 goto err;
236 }
237
238 drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
239 if (!drvdata)
240 return -ENOMEM;
241 dev_set_drvdata(&pdev->dev, drvdata);
242
243 drvdata->wm831x = wm831x;
244 drvdata->reg = res->start;
245
246 if (wm831x->dev->platform_data)
247 chip_pdata = wm831x->dev->platform_data;
248 else
249 chip_pdata = NULL;
250
251 memset(&pdata, 0, sizeof(pdata));
252 if (chip_pdata && chip_pdata->status[id])
253 memcpy(&pdata, chip_pdata->status[id], sizeof(pdata));
254 else
255 pdata.name = dev_name(&pdev->dev);
256
257 mutex_init(&drvdata->mutex);
258 INIT_WORK(&drvdata->work, wm831x_status_work);
259 spin_lock_init(&drvdata->value_lock);
260
261 /* We cache the configuration register and read startup values
262 * from it. */
263 drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg);
264
265 if (drvdata->reg_val & WM831X_LED_MODE_MASK)
266 drvdata->brightness = LED_FULL;
267 else
268 drvdata->brightness = LED_OFF;
269
270 /* Set a default source if configured, otherwise leave the
271 * current hardware setting.
272 */
273 if (pdata.default_src == WM831X_STATUS_PRESERVE) {
274 drvdata->src = drvdata->reg_val;
275 drvdata->src &= WM831X_LED_SRC_MASK;
276 drvdata->src >>= WM831X_LED_SRC_SHIFT;
277 } else {
278 drvdata->src = pdata.default_src - 1;
279 }
280
281 drvdata->cdev.name = pdata.name;
282 drvdata->cdev.default_trigger = pdata.default_trigger;
283 drvdata->cdev.brightness_set = wm831x_status_set;
284 drvdata->cdev.blink_set = wm831x_status_blink_set;
285
286 ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
287 if (ret < 0) {
288 dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
289 goto err_led;
290 }
291
292 ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
293 if (ret != 0)
294 dev_err(&pdev->dev,
295 "No source control for LED: %d\n", ret);
296
297 return 0;
298
299err_led:
300 led_classdev_unregister(&drvdata->cdev);
301 kfree(drvdata);
302err:
303 return ret;
304}
305
306static int wm831x_status_remove(struct platform_device *pdev)
307{
308 struct wm831x_status *drvdata = platform_get_drvdata(pdev);
309
310 device_remove_file(drvdata->cdev.dev, &dev_attr_src);
311 led_classdev_unregister(&drvdata->cdev);
312 kfree(drvdata);
313
314 return 0;
315}
316
317static struct platform_driver wm831x_status_driver = {
318 .driver = {
319 .name = "wm831x-status",
320 .owner = THIS_MODULE,
321 },
322 .probe = wm831x_status_probe,
323 .remove = wm831x_status_remove,
324};
325
326static int __devinit wm831x_status_init(void)
327{
328 return platform_driver_register(&wm831x_status_driver);
329}
330module_init(wm831x_status_init);
331
332static void wm831x_status_exit(void)
333{
334 platform_driver_unregister(&wm831x_status_driver);
335}
336module_exit(wm831x_status_exit);
337
338MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
339MODULE_DESCRIPTION("WM831x status LED driver");
340MODULE_LICENSE("GPL");
341MODULE_ALIAS("platform:wm831x-status");
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 1bc5db4ece0d..f5913372d691 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -44,22 +44,22 @@ static void gpio_trig_work(struct work_struct *work)
44 struct gpio_trig_data, work); 44 struct gpio_trig_data, work);
45 int tmp; 45 int tmp;
46 46
47 if (!gpio_data->gpio) 47 if (!gpio_data->gpio)
48 return; 48 return;
49 49
50 tmp = gpio_get_value(gpio_data->gpio); 50 tmp = gpio_get_value(gpio_data->gpio);
51 if (gpio_data->inverted) 51 if (gpio_data->inverted)
52 tmp = !tmp; 52 tmp = !tmp;
53 53
54 if (tmp) { 54 if (tmp) {
55 if (gpio_data->desired_brightness) 55 if (gpio_data->desired_brightness)
56 led_set_brightness(gpio_data->led, 56 led_set_brightness(gpio_data->led,
57 gpio_data->desired_brightness); 57 gpio_data->desired_brightness);
58 else 58 else
59 led_set_brightness(gpio_data->led, LED_FULL); 59 led_set_brightness(gpio_data->led, LED_FULL);
60 } else { 60 } else {
61 led_set_brightness(gpio_data->led, LED_OFF); 61 led_set_brightness(gpio_data->led, LED_OFF);
62 } 62 }
63} 63}
64 64
65static ssize_t gpio_trig_brightness_show(struct device *dev, 65static ssize_t gpio_trig_brightness_show(struct device *dev,
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 18648180db02..daaf86631647 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -16,6 +16,7 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/sched.h>
19#include "lg.h" 20#include "lg.h"
20 21
21/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */ 22/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b4d3f7ca554f..bd1632388e4a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file)
508 * uses: reading and writing a character device called /dev/lguest. All the 508 * uses: reading and writing a character device called /dev/lguest. All the
509 * work happens in the read(), write() and close() routines: 509 * work happens in the read(), write() and close() routines:
510 */ 510 */
511static struct file_operations lguest_fops = { 511static const struct file_operations lguest_fops = {
512 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
513 .release = close, 513 .release = close,
514 .write = write, 514 .write = write,
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index fde377c60cca..556f0feaa4df 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg)
124 return data; 124 return data;
125} 125}
126 126
127static struct i2c_driver thermostat_driver;
128
127static int 129static int
128attach_thermostat(struct i2c_adapter *adapter) 130attach_thermostat(struct i2c_adapter *adapter)
129{ 131{
@@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter)
148 * Let i2c-core delete that device on driver removal. 150 * Let i2c-core delete that device on driver removal.
149 * This is safe because i2c-core holds the core_lock mutex for us. 151 * This is safe because i2c-core holds the core_lock mutex for us.
150 */ 152 */
151 list_add_tail(&client->detected, &client->driver->clients); 153 list_add_tail(&client->detected, &thermostat_driver.clients);
152 return 0; 154 return 0;
153} 155}
154 156
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index a028598af2d3..ea32c7e5a9af 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -286,6 +286,8 @@ struct fcu_fan_table fcu_fans[] = {
286 }, 286 },
287}; 287};
288 288
289static struct i2c_driver therm_pm72_driver;
290
289/* 291/*
290 * Utility function to create an i2c_client structure and 292 * Utility function to create an i2c_client structure and
291 * attach it to one of u3 adapters 293 * attach it to one of u3 adapters
@@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
318 * Let i2c-core delete that device on driver removal. 320 * Let i2c-core delete that device on driver removal.
319 * This is safe because i2c-core holds the core_lock mutex for us. 321 * This is safe because i2c-core holds the core_lock mutex for us.
320 */ 322 */
321 list_add_tail(&clt->detected, &clt->driver->clients); 323 list_add_tail(&clt->detected, &therm_pm72_driver.clients);
322 return clt; 324 return clt;
323} 325}
324 326
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 55ad95671387..d242976bcfe7 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -72,7 +72,7 @@ static void pmu_led_set(struct led_classdev *led_cdev,
72} 72}
73 73
74static struct led_classdev pmu_led = { 74static struct led_classdev pmu_led = {
75 .name = "pmu-front-led", 75 .name = "pmu-led::front",
76#ifdef CONFIG_ADB_PMU_LED_IDE 76#ifdef CONFIG_ADB_PMU_LED_IDE
77 .default_trigger = "ide-disk", 77 .default_trigger = "ide-disk",
78#endif 78#endif
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index b40fb9b6c862..6f308a4757ee 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -405,7 +405,11 @@ static int __init via_pmu_start(void)
405 printk(KERN_ERR "via-pmu: can't map interrupt\n"); 405 printk(KERN_ERR "via-pmu: can't map interrupt\n");
406 return -ENODEV; 406 return -ENODEV;
407 } 407 }
408 if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) { 408 /* We set IRQF_TIMER because we don't want the interrupt to be disabled
409 * between the 2 passes of driver suspend, we control our own disabling
410 * for that one
411 */
412 if (request_irq(irq, via_pmu_interrupt, IRQF_TIMER, "VIA-PMU", (void *)0)) {
409 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); 413 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
410 return -ENODEV; 414 return -ENODEV;
411 } 415 }
@@ -419,7 +423,7 @@ static int __init via_pmu_start(void)
419 gpio_irq = irq_of_parse_and_map(gpio_node, 0); 423 gpio_irq = irq_of_parse_and_map(gpio_node, 0);
420 424
421 if (gpio_irq != NO_IRQ) { 425 if (gpio_irq != NO_IRQ) {
422 if (request_irq(gpio_irq, gpio1_interrupt, 0, 426 if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER,
423 "GPIO1 ADB", (void *)0)) 427 "GPIO1 ADB", (void *)0))
424 printk(KERN_ERR "pmu: can't get irq %d" 428 printk(KERN_ERR "pmu: can't get irq %d"
425 " (GPIO1)\n", gpio_irq); 429 " (GPIO1)\n", gpio_irq);
@@ -925,8 +929,7 @@ proc_write_options(struct file *file, const char __user *buffer,
925 929
926#ifdef CONFIG_ADB 930#ifdef CONFIG_ADB
927/* Send an ADB command */ 931/* Send an ADB command */
928static int 932static int pmu_send_request(struct adb_request *req, int sync)
929pmu_send_request(struct adb_request *req, int sync)
930{ 933{
931 int i, ret; 934 int i, ret;
932 935
@@ -1005,16 +1008,11 @@ pmu_send_request(struct adb_request *req, int sync)
1005} 1008}
1006 1009
1007/* Enable/disable autopolling */ 1010/* Enable/disable autopolling */
1008static int 1011static int __pmu_adb_autopoll(int devs)
1009pmu_adb_autopoll(int devs)
1010{ 1012{
1011 struct adb_request req; 1013 struct adb_request req;
1012 1014
1013 if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
1014 return -ENXIO;
1015
1016 if (devs) { 1015 if (devs) {
1017 adb_dev_map = devs;
1018 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, 1016 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
1019 adb_dev_map >> 8, adb_dev_map); 1017 adb_dev_map >> 8, adb_dev_map);
1020 pmu_adb_flags = 2; 1018 pmu_adb_flags = 2;
@@ -1027,9 +1025,17 @@ pmu_adb_autopoll(int devs)
1027 return 0; 1025 return 0;
1028} 1026}
1029 1027
1028static int pmu_adb_autopoll(int devs)
1029{
1030 if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
1031 return -ENXIO;
1032
1033 adb_dev_map = devs;
1034 return __pmu_adb_autopoll(devs);
1035}
1036
1030/* Reset the ADB bus */ 1037/* Reset the ADB bus */
1031static int 1038static int pmu_adb_reset_bus(void)
1032pmu_adb_reset_bus(void)
1033{ 1039{
1034 struct adb_request req; 1040 struct adb_request req;
1035 int save_autopoll = adb_dev_map; 1041 int save_autopoll = adb_dev_map;
@@ -1038,13 +1044,13 @@ pmu_adb_reset_bus(void)
1038 return -ENXIO; 1044 return -ENXIO;
1039 1045
1040 /* anyone got a better idea?? */ 1046 /* anyone got a better idea?? */
1041 pmu_adb_autopoll(0); 1047 __pmu_adb_autopoll(0);
1042 1048
1043 req.nbytes = 5; 1049 req.nbytes = 4;
1044 req.done = NULL; 1050 req.done = NULL;
1045 req.data[0] = PMU_ADB_CMD; 1051 req.data[0] = PMU_ADB_CMD;
1046 req.data[1] = 0; 1052 req.data[1] = ADB_BUSRESET;
1047 req.data[2] = ADB_BUSRESET; 1053 req.data[2] = 0;
1048 req.data[3] = 0; 1054 req.data[3] = 0;
1049 req.data[4] = 0; 1055 req.data[4] = 0;
1050 req.reply_len = 0; 1056 req.reply_len = 0;
@@ -1056,7 +1062,7 @@ pmu_adb_reset_bus(void)
1056 pmu_wait_complete(&req); 1062 pmu_wait_complete(&req);
1057 1063
1058 if (save_autopoll != 0) 1064 if (save_autopoll != 0)
1059 pmu_adb_autopoll(save_autopoll); 1065 __pmu_adb_autopoll(save_autopoll);
1060 1066
1061 return 0; 1067 return 0;
1062} 1068}
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 529886c7a826..ed6426a10773 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client,
115 return rc; 115 return rc;
116} 116}
117 117
118static struct i2c_driver wf_lm75_driver;
119
118static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, 120static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
119 u8 addr, int ds1775, 121 u8 addr, int ds1775,
120 const char *loc) 122 const char *loc)
@@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
157 * Let i2c-core delete that device on driver removal. 159 * Let i2c-core delete that device on driver removal.
158 * This is safe because i2c-core holds the core_lock mutex for us. 160 * This is safe because i2c-core holds the core_lock mutex for us.
159 */ 161 */
160 list_add_tail(&client->detected, &client->driver->clients); 162 list_add_tail(&client->detected, &wf_lm75_driver.clients);
161 return client; 163 return client;
162 fail: 164 fail:
163 return NULL; 165 return NULL;
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index e2a55ecda2b2..a67b349319e9 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client,
88 return rc; 88 return rc;
89} 89}
90 90
91static struct i2c_driver wf_max6690_driver;
92
91static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, 93static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
92 u8 addr, const char *loc) 94 u8 addr, const char *loc)
93{ 95{
@@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
119 * Let i2c-core delete that device on driver removal. 121 * Let i2c-core delete that device on driver removal.
120 * This is safe because i2c-core holds the core_lock mutex for us. 122 * This is safe because i2c-core holds the core_lock mutex for us.
121 */ 123 */
122 list_add_tail(&client->detected, &client->driver->clients); 124 list_add_tail(&client->detected, &wf_max6690_driver.clients);
123 return client; 125 return client;
124 126
125 fail: 127 fail:
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 5da729e58f99..e20330a28959 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = {
194 .owner = THIS_MODULE, 194 .owner = THIS_MODULE,
195}; 195};
196 196
197static struct i2c_driver wf_sat_driver;
198
197static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) 199static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
198{ 200{
199 struct i2c_board_info info; 201 struct i2c_board_info info;
@@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
222 * Let i2c-core delete that device on driver removal. 224 * Let i2c-core delete that device on driver removal.
223 * This is safe because i2c-core holds the core_lock mutex for us. 225 * This is safe because i2c-core holds the core_lock mutex for us.
224 */ 226 */
225 list_add_tail(&client->detected, &client->driver->clients); 227 list_add_tail(&client->detected, &wf_sat_driver.clients);
226} 228}
227 229
228static int wf_sat_probe(struct i2c_client *client, 230static int wf_sat_probe(struct i2c_client *client,
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 1dc4185bd781..e355e7f6a536 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
46obj-$(CONFIG_DM_ZERO) += dm-zero.o 46obj-$(CONFIG_DM_ZERO) += dm-zero.o
47 47
48quiet_cmd_unroll = UNROLL $@ 48quiet_cmd_unroll = UNROLL $@
49 cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ 49 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
50 < $< > $@ || ( rm -f $@ && exit 1 ) 50 < $< > $@ || ( rm -f $@ && exit 1 )
51 51
52ifeq ($(CONFIG_ALTIVEC),y) 52ifeq ($(CONFIG_ALTIVEC),y)
@@ -59,56 +59,56 @@ endif
59 59
60targets += raid6int1.c 60targets += raid6int1.c
61$(obj)/raid6int1.c: UNROLL := 1 61$(obj)/raid6int1.c: UNROLL := 1
62$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 62$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
63 $(call if_changed,unroll) 63 $(call if_changed,unroll)
64 64
65targets += raid6int2.c 65targets += raid6int2.c
66$(obj)/raid6int2.c: UNROLL := 2 66$(obj)/raid6int2.c: UNROLL := 2
67$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 67$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
68 $(call if_changed,unroll) 68 $(call if_changed,unroll)
69 69
70targets += raid6int4.c 70targets += raid6int4.c
71$(obj)/raid6int4.c: UNROLL := 4 71$(obj)/raid6int4.c: UNROLL := 4
72$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 72$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
73 $(call if_changed,unroll) 73 $(call if_changed,unroll)
74 74
75targets += raid6int8.c 75targets += raid6int8.c
76$(obj)/raid6int8.c: UNROLL := 8 76$(obj)/raid6int8.c: UNROLL := 8
77$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 77$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
78 $(call if_changed,unroll) 78 $(call if_changed,unroll)
79 79
80targets += raid6int16.c 80targets += raid6int16.c
81$(obj)/raid6int16.c: UNROLL := 16 81$(obj)/raid6int16.c: UNROLL := 16
82$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 82$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
83 $(call if_changed,unroll) 83 $(call if_changed,unroll)
84 84
85targets += raid6int32.c 85targets += raid6int32.c
86$(obj)/raid6int32.c: UNROLL := 32 86$(obj)/raid6int32.c: UNROLL := 32
87$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 87$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
88 $(call if_changed,unroll) 88 $(call if_changed,unroll)
89 89
90CFLAGS_raid6altivec1.o += $(altivec_flags) 90CFLAGS_raid6altivec1.o += $(altivec_flags)
91targets += raid6altivec1.c 91targets += raid6altivec1.c
92$(obj)/raid6altivec1.c: UNROLL := 1 92$(obj)/raid6altivec1.c: UNROLL := 1
93$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 93$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
94 $(call if_changed,unroll) 94 $(call if_changed,unroll)
95 95
96CFLAGS_raid6altivec2.o += $(altivec_flags) 96CFLAGS_raid6altivec2.o += $(altivec_flags)
97targets += raid6altivec2.c 97targets += raid6altivec2.c
98$(obj)/raid6altivec2.c: UNROLL := 2 98$(obj)/raid6altivec2.c: UNROLL := 2
99$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 99$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
100 $(call if_changed,unroll) 100 $(call if_changed,unroll)
101 101
102CFLAGS_raid6altivec4.o += $(altivec_flags) 102CFLAGS_raid6altivec4.o += $(altivec_flags)
103targets += raid6altivec4.c 103targets += raid6altivec4.c
104$(obj)/raid6altivec4.c: UNROLL := 4 104$(obj)/raid6altivec4.c: UNROLL := 4
105$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 105$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
106 $(call if_changed,unroll) 106 $(call if_changed,unroll)
107 107
108CFLAGS_raid6altivec8.o += $(altivec_flags) 108CFLAGS_raid6altivec8.o += $(altivec_flags)
109targets += raid6altivec8.c 109targets += raid6altivec8.c
110$(obj)/raid6altivec8.c: UNROLL := 8 110$(obj)/raid6altivec8.c: UNROLL := 8
111$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 111$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
112 $(call if_changed,unroll) 112 $(call if_changed,unroll)
113 113
114quiet_cmd_mktable = TABLE $@ 114quiet_cmd_mktable = TABLE $@
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 6986b0059d23..60e2b322db11 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
1624 bitmap->offset = mddev->bitmap_offset; 1624 bitmap->offset = mddev->bitmap_offset;
1625 if (file) { 1625 if (file) {
1626 get_file(file); 1626 get_file(file);
1627 do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, 1627 /* As future accesses to this file will use bmap,
1628 SYNC_FILE_RANGE_WAIT_BEFORE | 1628 * and bypass the page cache, we must sync the file
1629 SYNC_FILE_RANGE_WRITE | 1629 * first.
1630 SYNC_FILE_RANGE_WAIT_AFTER); 1630 */
1631 vfs_fsync(file, file->f_dentry, 1);
1631 } 1632 }
1632 /* read superblock from bitmap file (this sets bitmap->chunksize) */ 1633 /* read superblock from bitmap file (this sets bitmap->chunksize) */
1633 err = bitmap_read_sb(bitmap); 1634 err = bitmap_read_sb(bitmap);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 556acff3952f..7dbe652efb5a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type)
138} 138}
139EXPORT_SYMBOL(dm_exception_store_type_unregister); 139EXPORT_SYMBOL(dm_exception_store_type_unregister);
140 140
141/*
142 * Round a number up to the nearest 'size' boundary. size must
143 * be a power of 2.
144 */
145static ulong round_up(ulong n, ulong size)
146{
147 size--;
148 return (n + size) & ~size;
149}
150
151static int set_chunk_size(struct dm_exception_store *store, 141static int set_chunk_size(struct dm_exception_store *store,
152 const char *chunk_size_arg, char **error) 142 const char *chunk_size_arg, char **error)
153{ 143{
@@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store,
155 char *value; 145 char *value;
156 146
157 chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); 147 chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
158 if (*chunk_size_arg == '\0' || *value != '\0') { 148 if (*chunk_size_arg == '\0' || *value != '\0' ||
149 chunk_size_ulong > UINT_MAX) {
159 *error = "Invalid chunk size"; 150 *error = "Invalid chunk size";
160 return -EINVAL; 151 return -EINVAL;
161 } 152 }
@@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store,
165 return 0; 156 return 0;
166 } 157 }
167 158
168 /* 159 return dm_exception_store_set_chunk_size(store,
169 * Chunk size must be multiple of page size. Silently 160 (unsigned) chunk_size_ulong,
170 * round up if it's not.
171 */
172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
173
174 return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
175 error); 161 error);
176} 162}
177 163
178int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 164int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
179 unsigned long chunk_size_ulong, 165 unsigned chunk_size,
180 char **error) 166 char **error)
181{ 167{
182 /* Check chunk_size is a power of 2 */ 168 /* Check chunk_size is a power of 2 */
183 if (!is_power_of_2(chunk_size_ulong)) { 169 if (!is_power_of_2(chunk_size)) {
184 *error = "Chunk size is not a power of 2"; 170 *error = "Chunk size is not a power of 2";
185 return -EINVAL; 171 return -EINVAL;
186 } 172 }
187 173
188 /* Validate the chunk size against the device block size */ 174 /* Validate the chunk size against the device block size */
189 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 175 if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
190 *error = "Chunk size is not a multiple of device blocksize"; 176 *error = "Chunk size is not a multiple of device blocksize";
191 return -EINVAL; 177 return -EINVAL;
192 } 178 }
193 179
194 if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { 180 if (chunk_size > INT_MAX >> SECTOR_SHIFT) {
195 *error = "Chunk size is too high"; 181 *error = "Chunk size is too high";
196 return -EINVAL; 182 return -EINVAL;
197 } 183 }
198 184
199 store->chunk_size = chunk_size_ulong; 185 store->chunk_size = chunk_size;
200 store->chunk_mask = chunk_size_ulong - 1; 186 store->chunk_mask = chunk_size - 1;
201 store->chunk_shift = ffs(chunk_size_ulong) - 1; 187 store->chunk_shift = ffs(chunk_size) - 1;
202 188
203 return 0; 189 return 0;
204} 190}
@@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
251 237
252 r = set_chunk_size(tmp_store, argv[2], &ti->error); 238 r = set_chunk_size(tmp_store, argv[2], &ti->error);
253 if (r) 239 if (r)
254 goto bad_cow; 240 goto bad_ctr;
255 241
256 r = type->ctr(tmp_store, 0, NULL); 242 r = type->ctr(tmp_store, 0, NULL);
257 if (r) { 243 if (r) {
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 812c71872ba0..8a223a48802c 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -101,9 +101,9 @@ struct dm_exception_store {
101 struct dm_dev *cow; 101 struct dm_dev *cow;
102 102
103 /* Size of data blocks saved - must be a power of 2 */ 103 /* Size of data blocks saved - must be a power of 2 */
104 chunk_t chunk_size; 104 unsigned chunk_size;
105 chunk_t chunk_mask; 105 unsigned chunk_mask;
106 chunk_t chunk_shift; 106 unsigned chunk_shift;
107 107
108 void *context; 108 void *context;
109}; 109};
@@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
169int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 169int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
170 170
171int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 171int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 unsigned long chunk_size_ulong, 172 unsigned chunk_size,
173 char **error); 173 char **error);
174 174
175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 652bd33109e3..7ac2c1450d10 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
156 } 156 }
157 157
158 /* The ptr value is sufficient for local unique id */ 158 /* The ptr value is sufficient for local unique id */
159 lc->luid = (uint64_t)lc; 159 lc->luid = (unsigned long)lc;
160 160
161 lc->ti = ti; 161 lc->ti = ti;
162 162
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index d5b2e08750d5..0c746420c008 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot)
284{ 284{
285 int r; 285 int r;
286 struct disk_header *dh; 286 struct disk_header *dh;
287 chunk_t chunk_size; 287 unsigned chunk_size;
288 int chunk_size_supplied = 1; 288 int chunk_size_supplied = 1;
289 char *chunk_err; 289 char *chunk_err;
290 290
291 /* 291 /*
292 * Use default chunk size (or hardsect_size, if larger) if none supplied 292 * Use default chunk size (or logical_block_size, if larger)
293 * if none supplied
293 */ 294 */
294 if (!ps->store->chunk_size) { 295 if (!ps->store->chunk_size) {
295 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 296 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
@@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot)
334 return 0; 335 return 0;
335 336
336 if (chunk_size_supplied) 337 if (chunk_size_supplied)
337 DMWARN("chunk size %llu in device metadata overrides " 338 DMWARN("chunk size %u in device metadata overrides "
338 "table chunk size of %llu.", 339 "table chunk size of %u.",
339 (unsigned long long)chunk_size, 340 chunk_size, ps->store->chunk_size);
340 (unsigned long long)ps->store->chunk_size);
341 341
342 /* We had a bogus chunk_size. Fix stuff up. */ 342 /* We had a bogus chunk_size. Fix stuff up. */
343 free_area(ps); 343 free_area(ps);
@@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size, 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
346 &chunk_err); 346 &chunk_err);
347 if (r) { 347 if (r) {
348 DMERR("invalid on-disk chunk size %llu: %s.", 348 DMERR("invalid on-disk chunk size %u: %s.",
349 (unsigned long long)chunk_size, chunk_err); 349 chunk_size, chunk_err);
350 return r; 350 return r;
351 } 351 }
352 352
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 57f1bf7f3b7a..3a3ba46e6d4b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o)
296 */ 296 */
297static int register_snapshot(struct dm_snapshot *snap) 297static int register_snapshot(struct dm_snapshot *snap)
298{ 298{
299 struct dm_snapshot *l;
299 struct origin *o, *new_o; 300 struct origin *o, *new_o;
300 struct block_device *bdev = snap->origin->bdev; 301 struct block_device *bdev = snap->origin->bdev;
301 302
@@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap)
319 __insert_origin(o); 320 __insert_origin(o);
320 } 321 }
321 322
322 list_add_tail(&snap->list, &o->snapshots); 323 /* Sort the list according to chunk size, largest-first smallest-last */
324 list_for_each_entry(l, &o->snapshots, list)
325 if (l->store->chunk_size < snap->store->chunk_size)
326 break;
327 list_add_tail(&snap->list, &l->list);
323 328
324 up_write(&_origins_lock); 329 up_write(&_origins_lock);
325 return 0; 330 return 0;
@@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
668 bio_list_init(&s->queued_bios); 673 bio_list_init(&s->queued_bios);
669 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 674 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
670 675
676 if (!s->store->chunk_size) {
677 ti->error = "Chunk size not set";
678 goto bad_load_and_register;
679 }
680
671 /* Add snapshot to the list of snapshots for this origin */ 681 /* Add snapshot to the list of snapshots for this origin */
672 /* Exceptions aren't triggered till snapshot_resume() is called */ 682 /* Exceptions aren't triggered till snapshot_resume() is called */
673 if (register_snapshot(s)) { 683 if (register_snapshot(s)) {
@@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
951 961
952 src.bdev = bdev; 962 src.bdev = bdev;
953 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 963 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
954 src.count = min(s->store->chunk_size, dev_size - src.sector); 964 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
955 965
956 dest.bdev = s->store->cow->bdev; 966 dest.bdev = s->store->cow->bdev;
957 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 967 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
@@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1142 unsigned sz = 0; 1152 unsigned sz = 0;
1143 struct dm_snapshot *snap = ti->private; 1153 struct dm_snapshot *snap = ti->private;
1144 1154
1155 down_write(&snap->lock);
1156
1145 switch (type) { 1157 switch (type) {
1146 case STATUSTYPE_INFO: 1158 case STATUSTYPE_INFO:
1147 if (!snap->valid) 1159 if (!snap->valid)
@@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1173 break; 1185 break;
1174 } 1186 }
1175 1187
1188 up_write(&snap->lock);
1189
1176 return 0; 1190 return 0;
1177} 1191}
1178 1192
@@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti)
1388 struct dm_dev *dev = ti->private; 1402 struct dm_dev *dev = ti->private;
1389 struct dm_snapshot *snap; 1403 struct dm_snapshot *snap;
1390 struct origin *o; 1404 struct origin *o;
1391 chunk_t chunk_size = 0; 1405 unsigned chunk_size = 0;
1392 1406
1393 down_read(&_origins_lock); 1407 down_read(&_origins_lock);
1394 o = __lookup_origin(dev->bdev); 1408 o = __lookup_origin(dev->bdev);
@@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void)
1465 r = dm_register_target(&snapshot_target); 1479 r = dm_register_target(&snapshot_target);
1466 if (r) { 1480 if (r) {
1467 DMERR("snapshot target register failed %d", r); 1481 DMERR("snapshot target register failed %d", r);
1468 return r; 1482 goto bad_register_snapshot_target;
1469 } 1483 }
1470 1484
1471 r = dm_register_target(&origin_target); 1485 r = dm_register_target(&origin_target);
@@ -1522,6 +1536,9 @@ bad2:
1522 dm_unregister_target(&origin_target); 1536 dm_unregister_target(&origin_target);
1523bad1: 1537bad1:
1524 dm_unregister_target(&snapshot_target); 1538 dm_unregister_target(&snapshot_target);
1539
1540bad_register_snapshot_target:
1541 dm_exception_store_exit();
1525 return r; 1542 return r;
1526} 1543}
1527 1544
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 376f1ab48a24..724efc63904d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -47,6 +47,7 @@ struct dm_io {
47 atomic_t io_count; 47 atomic_t io_count;
48 struct bio *bio; 48 struct bio *bio;
49 unsigned long start_time; 49 unsigned long start_time;
50 spinlock_t endio_lock;
50}; 51};
51 52
52/* 53/*
@@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
578 struct mapped_device *md = io->md; 579 struct mapped_device *md = io->md;
579 580
580 /* Push-back supersedes any I/O errors */ 581 /* Push-back supersedes any I/O errors */
581 if (error && !(io->error > 0 && __noflush_suspending(md))) 582 if (unlikely(error)) {
582 io->error = error; 583 spin_lock_irqsave(&io->endio_lock, flags);
584 if (!(io->error > 0 && __noflush_suspending(md)))
585 io->error = error;
586 spin_unlock_irqrestore(&io->endio_lock, flags);
587 }
583 588
584 if (atomic_dec_and_test(&io->io_count)) { 589 if (atomic_dec_and_test(&io->io_count)) {
585 if (io->error == DM_ENDIO_REQUEUE) { 590 if (io->error == DM_ENDIO_REQUEUE) {
@@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1226 atomic_set(&ci.io->io_count, 1); 1231 atomic_set(&ci.io->io_count, 1);
1227 ci.io->bio = bio; 1232 ci.io->bio = bio;
1228 ci.io->md = md; 1233 ci.io->md = md;
1234 spin_lock_init(&ci.io->endio_lock);
1229 ci.sector = bio->bi_sector; 1235 ci.sector = bio->bi_sector;
1230 ci.sector_count = bio_sectors(bio); 1236 ci.sector_count = bio_sectors(bio);
1231 if (unlikely(bio_empty_barrier(bio))) 1237 if (unlikely(bio_empty_barrier(bio)))
@@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
1822bad_bdev: 1828bad_bdev:
1823 destroy_workqueue(md->wq); 1829 destroy_workqueue(md->wq);
1824bad_thread: 1830bad_thread:
1831 del_gendisk(md->disk);
1825 put_disk(md->disk); 1832 put_disk(md->disk);
1826bad_disk: 1833bad_disk:
1827 blk_cleanup_queue(md->queue); 1834 blk_cleanup_queue(md->queue);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 26ba42a79129..b182f86a19dd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -944,6 +944,14 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
944 desc->raid_disk < mddev->raid_disks */) { 944 desc->raid_disk < mddev->raid_disks */) {
945 set_bit(In_sync, &rdev->flags); 945 set_bit(In_sync, &rdev->flags);
946 rdev->raid_disk = desc->raid_disk; 946 rdev->raid_disk = desc->raid_disk;
947 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
948 /* active but not in sync implies recovery up to
949 * reshape position. We don't know exactly where
950 * that is, so set to zero for now */
951 if (mddev->minor_version >= 91) {
952 rdev->recovery_offset = 0;
953 rdev->raid_disk = desc->raid_disk;
954 }
947 } 955 }
948 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 956 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
949 set_bit(WriteMostly, &rdev->flags); 957 set_bit(WriteMostly, &rdev->flags);
@@ -1032,8 +1040,19 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1032 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1040 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1033 mdp_disk_t *d; 1041 mdp_disk_t *d;
1034 int desc_nr; 1042 int desc_nr;
1035 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1043 int is_active = test_bit(In_sync, &rdev2->flags);
1036 && !test_bit(Faulty, &rdev2->flags)) 1044
1045 if (rdev2->raid_disk >= 0 &&
1046 sb->minor_version >= 91)
1047 /* we have nowhere to store the recovery_offset,
1048 * but if it is not below the reshape_position,
1049 * we can piggy-back on that.
1050 */
1051 is_active = 1;
1052 if (rdev2->raid_disk < 0 ||
1053 test_bit(Faulty, &rdev2->flags))
1054 is_active = 0;
1055 if (is_active)
1037 desc_nr = rdev2->raid_disk; 1056 desc_nr = rdev2->raid_disk;
1038 else 1057 else
1039 desc_nr = next_spare++; 1058 desc_nr = next_spare++;
@@ -1043,16 +1062,16 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1043 d->number = rdev2->desc_nr; 1062 d->number = rdev2->desc_nr;
1044 d->major = MAJOR(rdev2->bdev->bd_dev); 1063 d->major = MAJOR(rdev2->bdev->bd_dev);
1045 d->minor = MINOR(rdev2->bdev->bd_dev); 1064 d->minor = MINOR(rdev2->bdev->bd_dev);
1046 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1065 if (is_active)
1047 && !test_bit(Faulty, &rdev2->flags))
1048 d->raid_disk = rdev2->raid_disk; 1066 d->raid_disk = rdev2->raid_disk;
1049 else 1067 else
1050 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1068 d->raid_disk = rdev2->desc_nr; /* compatibility */
1051 if (test_bit(Faulty, &rdev2->flags)) 1069 if (test_bit(Faulty, &rdev2->flags))
1052 d->state = (1<<MD_DISK_FAULTY); 1070 d->state = (1<<MD_DISK_FAULTY);
1053 else if (test_bit(In_sync, &rdev2->flags)) { 1071 else if (is_active) {
1054 d->state = (1<<MD_DISK_ACTIVE); 1072 d->state = (1<<MD_DISK_ACTIVE);
1055 d->state |= (1<<MD_DISK_SYNC); 1073 if (test_bit(In_sync, &rdev2->flags))
1074 d->state |= (1<<MD_DISK_SYNC);
1056 active++; 1075 active++;
1057 working++; 1076 working++;
1058 } else { 1077 } else {
@@ -1382,8 +1401,6 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1382 1401
1383 if (rdev->raid_disk >= 0 && 1402 if (rdev->raid_disk >= 0 &&
1384 !test_bit(In_sync, &rdev->flags)) { 1403 !test_bit(In_sync, &rdev->flags)) {
1385 if (mddev->curr_resync_completed > rdev->recovery_offset)
1386 rdev->recovery_offset = mddev->curr_resync_completed;
1387 if (rdev->recovery_offset > 0) { 1404 if (rdev->recovery_offset > 0) {
1388 sb->feature_map |= 1405 sb->feature_map |=
1389 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1406 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1917,6 +1934,14 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1917 */ 1934 */
1918 mdk_rdev_t *rdev; 1935 mdk_rdev_t *rdev;
1919 1936
1937 /* First make sure individual recovery_offsets are correct */
1938 list_for_each_entry(rdev, &mddev->disks, same_set) {
1939 if (rdev->raid_disk >= 0 &&
1940 !test_bit(In_sync, &rdev->flags) &&
1941 mddev->curr_resync_completed > rdev->recovery_offset)
1942 rdev->recovery_offset = mddev->curr_resync_completed;
1943
1944 }
1920 list_for_each_entry(rdev, &mddev->disks, same_set) { 1945 list_for_each_entry(rdev, &mddev->disks, same_set) {
1921 if (rdev->sb_events == mddev->events || 1946 if (rdev->sb_events == mddev->events ||
1922 (nospares && 1947 (nospares &&
@@ -2631,7 +2656,7 @@ static void analyze_sbs(mddev_t * mddev)
2631 rdev->desc_nr = i++; 2656 rdev->desc_nr = i++;
2632 rdev->raid_disk = rdev->desc_nr; 2657 rdev->raid_disk = rdev->desc_nr;
2633 set_bit(In_sync, &rdev->flags); 2658 set_bit(In_sync, &rdev->flags);
2634 } else if (rdev->raid_disk >= mddev->raid_disks) { 2659 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2635 rdev->raid_disk = -1; 2660 rdev->raid_disk = -1;
2636 clear_bit(In_sync, &rdev->flags); 2661 clear_bit(In_sync, &rdev->flags);
2637 } 2662 }
@@ -6504,8 +6529,9 @@ void md_do_sync(mddev_t *mddev)
6504 skip: 6529 skip:
6505 mddev->curr_resync = 0; 6530 mddev->curr_resync = 0;
6506 mddev->curr_resync_completed = 0; 6531 mddev->curr_resync_completed = 0;
6507 mddev->resync_min = 0; 6532 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6508 mddev->resync_max = MaxSector; 6533 /* We completed so max setting can be forgotten. */
6534 mddev->resync_max = MaxSector;
6509 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6535 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6510 wake_up(&resync_wait); 6536 wake_up(&resync_wait);
6511 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6537 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d1b9bd5fd4f6..a053423785c9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
64 64
65 /* allocate a r1bio with room for raid_disks entries in the bios array */ 65 /* allocate a r1bio with room for raid_disks entries in the bios array */
66 r1_bio = kzalloc(size, gfp_flags); 66 r1_bio = kzalloc(size, gfp_flags);
67 if (!r1_bio) 67 if (!r1_bio && pi->mddev)
68 unplug_slaves(pi->mddev); 68 unplug_slaves(pi->mddev);
69 69
70 return r1_bio; 70 return r1_bio;
@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev)
1683 generic_make_request(bio); 1683 generic_make_request(bio);
1684 } 1684 }
1685 } 1685 }
1686 cond_resched();
1686 } 1687 }
1687 if (unplug) 1688 if (unplug)
1688 unplug_slaves(mddev); 1689 unplug_slaves(mddev);
@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev)
1978 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1979 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1979 if (!conf->poolinfo) 1980 if (!conf->poolinfo)
1980 goto out_no_mem; 1981 goto out_no_mem;
1981 conf->poolinfo->mddev = mddev; 1982 conf->poolinfo->mddev = NULL;
1982 conf->poolinfo->raid_disks = mddev->raid_disks; 1983 conf->poolinfo->raid_disks = mddev->raid_disks;
1983 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 1984 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1984 r1bio_pool_free, 1985 r1bio_pool_free,
1985 conf->poolinfo); 1986 conf->poolinfo);
1986 if (!conf->r1bio_pool) 1987 if (!conf->r1bio_pool)
1987 goto out_no_mem; 1988 goto out_no_mem;
1989 conf->poolinfo->mddev = mddev;
1988 1990
1989 spin_lock_init(&conf->device_lock); 1991 spin_lock_init(&conf->device_lock);
1990 mddev->queue->queue_lock = &conf->device_lock; 1992 mddev->queue->queue_lock = &conf->device_lock;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 51c4c5c4d87a..c2cb7b87b440 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
68 68
69 /* allocate a r10bio with room for raid_disks entries in the bios array */ 69 /* allocate a r10bio with room for raid_disks entries in the bios array */
70 r10_bio = kzalloc(size, gfp_flags); 70 r10_bio = kzalloc(size, gfp_flags);
71 if (!r10_bio) 71 if (!r10_bio && conf->mddev)
72 unplug_slaves(conf->mddev); 72 unplug_slaves(conf->mddev);
73 73
74 return r10_bio; 74 return r10_bio;
@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev)
1632 generic_make_request(bio); 1632 generic_make_request(bio);
1633 } 1633 }
1634 } 1634 }
1635 cond_resched();
1635 } 1636 }
1636 if (unplug) 1637 if (unplug)
1637 unplug_slaves(mddev); 1638 unplug_slaves(mddev);
@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev)
2095 if (!conf->tmppage) 2096 if (!conf->tmppage)
2096 goto out_free_conf; 2097 goto out_free_conf;
2097 2098
2098 conf->mddev = mddev;
2099 conf->raid_disks = mddev->raid_disks; 2099 conf->raid_disks = mddev->raid_disks;
2100 conf->near_copies = nc; 2100 conf->near_copies = nc;
2101 conf->far_copies = fc; 2101 conf->far_copies = fc;
@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev)
2132 goto out_free_conf; 2132 goto out_free_conf;
2133 } 2133 }
2134 2134
2135 conf->mddev = mddev;
2135 spin_lock_init(&conf->device_lock); 2136 spin_lock_init(&conf->device_lock);
2136 mddev->queue->queue_lock = &conf->device_lock; 2137 mddev->queue->queue_lock = &conf->device_lock;
2137 2138
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 94829804ab7f..d29215d966da 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks)
156static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 156static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157 int *count, int syndrome_disks) 157 int *count, int syndrome_disks)
158{ 158{
159 int slot; 159 int slot = *count;
160 160
161 if (sh->ddf_layout)
162 (*count)++;
161 if (idx == sh->pd_idx) 163 if (idx == sh->pd_idx)
162 return syndrome_disks; 164 return syndrome_disks;
163 if (idx == sh->qd_idx) 165 if (idx == sh->qd_idx)
164 return syndrome_disks + 1; 166 return syndrome_disks + 1;
165 slot = (*count)++; 167 if (!sh->ddf_layout)
168 (*count)++;
166 return slot; 169 return slot;
167} 170}
168 171
@@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
717 int i; 720 int i;
718 721
719 for (i = 0; i < disks; i++) 722 for (i = 0; i < disks; i++)
720 srcs[i] = (void *)raid6_empty_zero_page; 723 srcs[i] = NULL;
721 724
722 count = 0; 725 count = 0;
723 i = d0_idx; 726 i = d0_idx;
@@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
727 srcs[slot] = sh->dev[i].page; 730 srcs[slot] = sh->dev[i].page;
728 i = raid6_next_disk(i, disks); 731 i = raid6_next_disk(i, disks);
729 } while (i != d0_idx); 732 } while (i != d0_idx);
730 BUG_ON(count != syndrome_disks);
731 733
732 return count; 734 return syndrome_disks;
733} 735}
734 736
735static struct dma_async_tx_descriptor * 737static struct dma_async_tx_descriptor *
@@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
814 * slot number conversion for 'faila' and 'failb' 816 * slot number conversion for 'faila' and 'failb'
815 */ 817 */
816 for (i = 0; i < disks ; i++) 818 for (i = 0; i < disks ; i++)
817 blocks[i] = (void *)raid6_empty_zero_page; 819 blocks[i] = NULL;
818 count = 0; 820 count = 0;
819 i = d0_idx; 821 i = d0_idx;
820 do { 822 do {
@@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
828 failb = slot; 830 failb = slot;
829 i = raid6_next_disk(i, disks); 831 i = raid6_next_disk(i, disks);
830 } while (i != d0_idx); 832 } while (i != d0_idx);
831 BUG_ON(count != syndrome_disks);
832 833
833 BUG_ON(faila == failb); 834 BUG_ON(faila == failb);
834 if (failb < faila) 835 if (failb < faila)
@@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 846 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
846 ops_complete_compute, sh, 847 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu)); 848 to_addr_conv(sh, percpu));
848 return async_gen_syndrome(blocks, 0, count+2, 849 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
849 STRIPE_SIZE, &submit); 850 STRIPE_SIZE, &submit);
850 } else { 851 } else {
851 struct page *dest; 852 struct page *dest;
@@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
1139 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1140 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1140} 1141}
1141 1142
1142static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1143static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1143{ 1144{
1144 int overlap_clear = 0, i, disks = sh->disks; 1145 int overlap_clear = 0, i, disks = sh->disks;
1145 struct dma_async_tx_descriptor *tx = NULL; 1146 struct dma_async_tx_descriptor *tx = NULL;
@@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1204 put_cpu(); 1205 put_cpu();
1205} 1206}
1206 1207
1208#ifdef CONFIG_MULTICORE_RAID456
1209static void async_run_ops(void *param, async_cookie_t cookie)
1210{
1211 struct stripe_head *sh = param;
1212 unsigned long ops_request = sh->ops.request;
1213
1214 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1215 wake_up(&sh->ops.wait_for_ops);
1216
1217 __raid_run_ops(sh, ops_request);
1218 release_stripe(sh);
1219}
1220
1221static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1222{
1223 /* since handle_stripe can be called outside of raid5d context
1224 * we need to ensure sh->ops.request is de-staged before another
1225 * request arrives
1226 */
1227 wait_event(sh->ops.wait_for_ops,
1228 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1229 sh->ops.request = ops_request;
1230
1231 atomic_inc(&sh->count);
1232 async_schedule(async_run_ops, sh);
1233}
1234#else
1235#define raid_run_ops __raid_run_ops
1236#endif
1237
1207static int grow_one_stripe(raid5_conf_t *conf) 1238static int grow_one_stripe(raid5_conf_t *conf)
1208{ 1239{
1209 struct stripe_head *sh; 1240 struct stripe_head *sh;
1241 int disks = max(conf->raid_disks, conf->previous_raid_disks);
1210 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 1242 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1211 if (!sh) 1243 if (!sh)
1212 return 0; 1244 return 0;
1213 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 1245 memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
1214 sh->raid_conf = conf; 1246 sh->raid_conf = conf;
1215 spin_lock_init(&sh->lock); 1247 spin_lock_init(&sh->lock);
1248 #ifdef CONFIG_MULTICORE_RAID456
1249 init_waitqueue_head(&sh->ops.wait_for_ops);
1250 #endif
1216 1251
1217 if (grow_buffers(sh, conf->raid_disks)) { 1252 if (grow_buffers(sh, disks)) {
1218 shrink_buffers(sh, conf->raid_disks); 1253 shrink_buffers(sh, disks);
1219 kmem_cache_free(conf->slab_cache, sh); 1254 kmem_cache_free(conf->slab_cache, sh);
1220 return 0; 1255 return 0;
1221 } 1256 }
1222 sh->disks = conf->raid_disks;
1223 /* we just created an active stripe so... */ 1257 /* we just created an active stripe so... */
1224 atomic_set(&sh->count, 1); 1258 atomic_set(&sh->count, 1);
1225 atomic_inc(&conf->active_stripes); 1259 atomic_inc(&conf->active_stripes);
@@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
1231static int grow_stripes(raid5_conf_t *conf, int num) 1265static int grow_stripes(raid5_conf_t *conf, int num)
1232{ 1266{
1233 struct kmem_cache *sc; 1267 struct kmem_cache *sc;
1234 int devs = conf->raid_disks; 1268 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1235 1269
1236 sprintf(conf->cache_name[0], 1270 sprintf(conf->cache_name[0],
1237 "raid%d-%s", conf->level, mdname(conf->mddev)); 1271 "raid%d-%s", conf->level, mdname(conf->mddev));
@@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1329 1363
1330 nsh->raid_conf = conf; 1364 nsh->raid_conf = conf;
1331 spin_lock_init(&nsh->lock); 1365 spin_lock_init(&nsh->lock);
1366 #ifdef CONFIG_MULTICORE_RAID456
1367 init_waitqueue_head(&nsh->ops.wait_for_ops);
1368 #endif
1332 1369
1333 list_add(&nsh->lru, &newstripes); 1370 list_add(&nsh->lru, &newstripes);
1334 } 1371 }
@@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1899 case ALGORITHM_PARITY_N: 1936 case ALGORITHM_PARITY_N:
1900 break; 1937 break;
1901 case ALGORITHM_ROTATING_N_CONTINUE: 1938 case ALGORITHM_ROTATING_N_CONTINUE:
1939 /* Like left_symmetric, but P is before Q */
1902 if (sh->pd_idx == 0) 1940 if (sh->pd_idx == 0)
1903 i--; /* P D D D Q */ 1941 i--; /* P D D D Q */
1904 else if (i > sh->pd_idx) 1942 else {
1905 i -= 2; /* D D Q P D */ 1943 /* D D Q P D */
1944 if (i < sh->pd_idx)
1945 i += raid_disks;
1946 i -= (sh->pd_idx + 1);
1947 }
1906 break; 1948 break;
1907 case ALGORITHM_LEFT_ASYMMETRIC_6: 1949 case ALGORITHM_LEFT_ASYMMETRIC_6:
1908 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1950 case ALGORITHM_RIGHT_ASYMMETRIC_6:
@@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2896 * 2938 *
2897 */ 2939 */
2898 2940
2899static bool handle_stripe5(struct stripe_head *sh) 2941static void handle_stripe5(struct stripe_head *sh)
2900{ 2942{
2901 raid5_conf_t *conf = sh->raid_conf; 2943 raid5_conf_t *conf = sh->raid_conf;
2902 int disks = sh->disks, i; 2944 int disks = sh->disks, i;
@@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh)
3167 ops_run_io(sh, &s); 3209 ops_run_io(sh, &s);
3168 3210
3169 return_io(return_bi); 3211 return_io(return_bi);
3170
3171 return blocked_rdev == NULL;
3172} 3212}
3173 3213
3174static bool handle_stripe6(struct stripe_head *sh) 3214static void handle_stripe6(struct stripe_head *sh)
3175{ 3215{
3176 raid5_conf_t *conf = sh->raid_conf; 3216 raid5_conf_t *conf = sh->raid_conf;
3177 int disks = sh->disks; 3217 int disks = sh->disks;
@@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh)
3455 ops_run_io(sh, &s); 3495 ops_run_io(sh, &s);
3456 3496
3457 return_io(return_bi); 3497 return_io(return_bi);
3458
3459 return blocked_rdev == NULL;
3460} 3498}
3461 3499
3462/* returns true if the stripe was handled */ 3500static void handle_stripe(struct stripe_head *sh)
3463static bool handle_stripe(struct stripe_head *sh)
3464{ 3501{
3465 if (sh->raid_conf->level == 6) 3502 if (sh->raid_conf->level == 6)
3466 return handle_stripe6(sh); 3503 handle_stripe6(sh);
3467 else 3504 else
3468 return handle_stripe5(sh); 3505 handle_stripe5(sh);
3469} 3506}
3470 3507
3471static void raid5_activate_delayed(raid5_conf_t *conf) 3508static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev)
3503{ 3540{
3504 raid5_conf_t *conf = mddev->private; 3541 raid5_conf_t *conf = mddev->private;
3505 int i; 3542 int i;
3543 int devs = max(conf->raid_disks, conf->previous_raid_disks);
3506 3544
3507 rcu_read_lock(); 3545 rcu_read_lock();
3508 for (i = 0; i < conf->raid_disks; i++) { 3546 for (i = 0; i < devs; i++) {
3509 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3547 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3510 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3548 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3511 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3549 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -4011,6 +4049,8 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
4011 sector_nr = conf->reshape_progress; 4049 sector_nr = conf->reshape_progress;
4012 sector_div(sector_nr, new_data_disks); 4050 sector_div(sector_nr, new_data_disks);
4013 if (sector_nr) { 4051 if (sector_nr) {
4052 mddev->curr_resync_completed = sector_nr;
4053 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4014 *skipped = 1; 4054 *skipped = 1;
4015 return sector_nr; 4055 return sector_nr;
4016 } 4056 }
@@ -4277,9 +4317,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4277 clear_bit(STRIPE_INSYNC, &sh->state); 4317 clear_bit(STRIPE_INSYNC, &sh->state);
4278 spin_unlock(&sh->lock); 4318 spin_unlock(&sh->lock);
4279 4319
4280 /* wait for any blocked device to be handled */ 4320 handle_stripe(sh);
4281 while (unlikely(!handle_stripe(sh)))
4282 ;
4283 release_stripe(sh); 4321 release_stripe(sh);
4284 4322
4285 return STRIPE_SECTORS; 4323 return STRIPE_SECTORS;
@@ -4349,37 +4387,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4349 return handled; 4387 return handled;
4350} 4388}
4351 4389
4352#ifdef CONFIG_MULTICORE_RAID456
4353static void __process_stripe(void *param, async_cookie_t cookie)
4354{
4355 struct stripe_head *sh = param;
4356
4357 handle_stripe(sh);
4358 release_stripe(sh);
4359}
4360
4361static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4362{
4363 async_schedule_domain(__process_stripe, sh, domain);
4364}
4365
4366static void synchronize_stripe_processing(struct list_head *domain)
4367{
4368 async_synchronize_full_domain(domain);
4369}
4370#else
4371static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4372{
4373 handle_stripe(sh);
4374 release_stripe(sh);
4375 cond_resched();
4376}
4377
4378static void synchronize_stripe_processing(struct list_head *domain)
4379{
4380}
4381#endif
4382
4383 4390
4384/* 4391/*
4385 * This is our raid5 kernel thread. 4392 * This is our raid5 kernel thread.
@@ -4393,7 +4400,6 @@ static void raid5d(mddev_t *mddev)
4393 struct stripe_head *sh; 4400 struct stripe_head *sh;
4394 raid5_conf_t *conf = mddev->private; 4401 raid5_conf_t *conf = mddev->private;
4395 int handled; 4402 int handled;
4396 LIST_HEAD(raid_domain);
4397 4403
4398 pr_debug("+++ raid5d active\n"); 4404 pr_debug("+++ raid5d active\n");
4399 4405
@@ -4430,7 +4436,9 @@ static void raid5d(mddev_t *mddev)
4430 spin_unlock_irq(&conf->device_lock); 4436 spin_unlock_irq(&conf->device_lock);
4431 4437
4432 handled++; 4438 handled++;
4433 process_stripe(sh, &raid_domain); 4439 handle_stripe(sh);
4440 release_stripe(sh);
4441 cond_resched();
4434 4442
4435 spin_lock_irq(&conf->device_lock); 4443 spin_lock_irq(&conf->device_lock);
4436 } 4444 }
@@ -4438,7 +4446,6 @@ static void raid5d(mddev_t *mddev)
4438 4446
4439 spin_unlock_irq(&conf->device_lock); 4447 spin_unlock_irq(&conf->device_lock);
4440 4448
4441 synchronize_stripe_processing(&raid_domain);
4442 async_tx_issue_pending_all(); 4449 async_tx_issue_pending_all();
4443 unplug_slaves(mddev); 4450 unplug_slaves(mddev);
4444 4451
@@ -4558,13 +4565,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4558 4565
4559 if (!sectors) 4566 if (!sectors)
4560 sectors = mddev->dev_sectors; 4567 sectors = mddev->dev_sectors;
4561 if (!raid_disks) { 4568 if (!raid_disks)
4562 /* size is defined by the smallest of previous and new size */ 4569 /* size is defined by the smallest of previous and new size */
4563 if (conf->raid_disks < conf->previous_raid_disks) 4570 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4564 raid_disks = conf->raid_disks;
4565 else
4566 raid_disks = conf->previous_raid_disks;
4567 }
4568 4571
4569 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4572 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4570 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4573 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
@@ -4665,7 +4668,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4665 } 4668 }
4666 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; 4669 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4667 } 4670 }
4668 scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); 4671 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4669 if (!scribble) { 4672 if (!scribble) {
4670 err = -ENOMEM; 4673 err = -ENOMEM;
4671 break; 4674 break;
@@ -4686,7 +4689,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4686static raid5_conf_t *setup_conf(mddev_t *mddev) 4689static raid5_conf_t *setup_conf(mddev_t *mddev)
4687{ 4690{
4688 raid5_conf_t *conf; 4691 raid5_conf_t *conf;
4689 int raid_disk, memory; 4692 int raid_disk, memory, max_disks;
4690 mdk_rdev_t *rdev; 4693 mdk_rdev_t *rdev;
4691 struct disk_info *disk; 4694 struct disk_info *disk;
4692 4695
@@ -4722,15 +4725,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4722 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); 4725 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4723 if (conf == NULL) 4726 if (conf == NULL)
4724 goto abort; 4727 goto abort;
4728 spin_lock_init(&conf->device_lock);
4729 init_waitqueue_head(&conf->wait_for_stripe);
4730 init_waitqueue_head(&conf->wait_for_overlap);
4731 INIT_LIST_HEAD(&conf->handle_list);
4732 INIT_LIST_HEAD(&conf->hold_list);
4733 INIT_LIST_HEAD(&conf->delayed_list);
4734 INIT_LIST_HEAD(&conf->bitmap_list);
4735 INIT_LIST_HEAD(&conf->inactive_list);
4736 atomic_set(&conf->active_stripes, 0);
4737 atomic_set(&conf->preread_active_stripes, 0);
4738 atomic_set(&conf->active_aligned_reads, 0);
4739 conf->bypass_threshold = BYPASS_THRESHOLD;
4725 4740
4726 conf->raid_disks = mddev->raid_disks; 4741 conf->raid_disks = mddev->raid_disks;
4727 conf->scribble_len = scribble_len(conf->raid_disks);
4728 if (mddev->reshape_position == MaxSector) 4742 if (mddev->reshape_position == MaxSector)
4729 conf->previous_raid_disks = mddev->raid_disks; 4743 conf->previous_raid_disks = mddev->raid_disks;
4730 else 4744 else
4731 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4745 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4746 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4747 conf->scribble_len = scribble_len(max_disks);
4732 4748
4733 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4749 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4734 GFP_KERNEL); 4750 GFP_KERNEL);
4735 if (!conf->disks) 4751 if (!conf->disks)
4736 goto abort; 4752 goto abort;
@@ -4744,24 +4760,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4744 if (raid5_alloc_percpu(conf) != 0) 4760 if (raid5_alloc_percpu(conf) != 0)
4745 goto abort; 4761 goto abort;
4746 4762
4747 spin_lock_init(&conf->device_lock);
4748 init_waitqueue_head(&conf->wait_for_stripe);
4749 init_waitqueue_head(&conf->wait_for_overlap);
4750 INIT_LIST_HEAD(&conf->handle_list);
4751 INIT_LIST_HEAD(&conf->hold_list);
4752 INIT_LIST_HEAD(&conf->delayed_list);
4753 INIT_LIST_HEAD(&conf->bitmap_list);
4754 INIT_LIST_HEAD(&conf->inactive_list);
4755 atomic_set(&conf->active_stripes, 0);
4756 atomic_set(&conf->preread_active_stripes, 0);
4757 atomic_set(&conf->active_aligned_reads, 0);
4758 conf->bypass_threshold = BYPASS_THRESHOLD;
4759
4760 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4763 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4761 4764
4762 list_for_each_entry(rdev, &mddev->disks, same_set) { 4765 list_for_each_entry(rdev, &mddev->disks, same_set) {
4763 raid_disk = rdev->raid_disk; 4766 raid_disk = rdev->raid_disk;
4764 if (raid_disk >= conf->raid_disks 4767 if (raid_disk >= max_disks
4765 || raid_disk < 0) 4768 || raid_disk < 0)
4766 continue; 4769 continue;
4767 disk = conf->disks + raid_disk; 4770 disk = conf->disks + raid_disk;
@@ -4793,7 +4796,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4793 } 4796 }
4794 4797
4795 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4798 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4796 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4799 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4797 if (grow_stripes(conf, conf->max_nr_stripes)) { 4800 if (grow_stripes(conf, conf->max_nr_stripes)) {
4798 printk(KERN_ERR 4801 printk(KERN_ERR
4799 "raid5: couldn't allocate %dkB for buffers\n", memory); 4802 "raid5: couldn't allocate %dkB for buffers\n", memory);
@@ -4820,11 +4823,40 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4820 return ERR_PTR(-ENOMEM); 4823 return ERR_PTR(-ENOMEM);
4821} 4824}
4822 4825
4826
4827static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4828{
4829 switch (algo) {
4830 case ALGORITHM_PARITY_0:
4831 if (raid_disk < max_degraded)
4832 return 1;
4833 break;
4834 case ALGORITHM_PARITY_N:
4835 if (raid_disk >= raid_disks - max_degraded)
4836 return 1;
4837 break;
4838 case ALGORITHM_PARITY_0_6:
4839 if (raid_disk == 0 ||
4840 raid_disk == raid_disks - 1)
4841 return 1;
4842 break;
4843 case ALGORITHM_LEFT_ASYMMETRIC_6:
4844 case ALGORITHM_RIGHT_ASYMMETRIC_6:
4845 case ALGORITHM_LEFT_SYMMETRIC_6:
4846 case ALGORITHM_RIGHT_SYMMETRIC_6:
4847 if (raid_disk == raid_disks - 1)
4848 return 1;
4849 }
4850 return 0;
4851}
4852
4823static int run(mddev_t *mddev) 4853static int run(mddev_t *mddev)
4824{ 4854{
4825 raid5_conf_t *conf; 4855 raid5_conf_t *conf;
4826 int working_disks = 0, chunk_size; 4856 int working_disks = 0, chunk_size;
4857 int dirty_parity_disks = 0;
4827 mdk_rdev_t *rdev; 4858 mdk_rdev_t *rdev;
4859 sector_t reshape_offset = 0;
4828 4860
4829 if (mddev->recovery_cp != MaxSector) 4861 if (mddev->recovery_cp != MaxSector)
4830 printk(KERN_NOTICE "raid5: %s is not clean" 4862 printk(KERN_NOTICE "raid5: %s is not clean"
@@ -4858,6 +4890,7 @@ static int run(mddev_t *mddev)
4858 "on a stripe boundary\n"); 4890 "on a stripe boundary\n");
4859 return -EINVAL; 4891 return -EINVAL;
4860 } 4892 }
4893 reshape_offset = here_new * mddev->new_chunk_sectors;
4861 /* here_new is the stripe we will write to */ 4894 /* here_new is the stripe we will write to */
4862 here_old = mddev->reshape_position; 4895 here_old = mddev->reshape_position;
4863 sector_div(here_old, mddev->chunk_sectors * 4896 sector_div(here_old, mddev->chunk_sectors *
@@ -4913,12 +4946,54 @@ static int run(mddev_t *mddev)
4913 /* 4946 /*
4914 * 0 for a fully functional array, 1 or 2 for a degraded array. 4947 * 0 for a fully functional array, 1 or 2 for a degraded array.
4915 */ 4948 */
4916 list_for_each_entry(rdev, &mddev->disks, same_set) 4949 list_for_each_entry(rdev, &mddev->disks, same_set) {
4917 if (rdev->raid_disk >= 0 && 4950 if (rdev->raid_disk < 0)
4918 test_bit(In_sync, &rdev->flags)) 4951 continue;
4952 if (test_bit(In_sync, &rdev->flags))
4919 working_disks++; 4953 working_disks++;
4954 /* This disc is not fully in-sync. However if it
4955 * just stored parity (beyond the recovery_offset),
4956 * when we don't need to be concerned about the
4957 * array being dirty.
4958 * When reshape goes 'backwards', we never have
4959 * partially completed devices, so we only need
4960 * to worry about reshape going forwards.
4961 */
4962 /* Hack because v0.91 doesn't store recovery_offset properly. */
4963 if (mddev->major_version == 0 &&
4964 mddev->minor_version > 90)
4965 rdev->recovery_offset = reshape_offset;
4966
4967 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4968 rdev->raid_disk, working_disks, conf->prev_algo,
4969 conf->previous_raid_disks, conf->max_degraded,
4970 conf->algorithm, conf->raid_disks,
4971 only_parity(rdev->raid_disk,
4972 conf->prev_algo,
4973 conf->previous_raid_disks,
4974 conf->max_degraded),
4975 only_parity(rdev->raid_disk,
4976 conf->algorithm,
4977 conf->raid_disks,
4978 conf->max_degraded));
4979 if (rdev->recovery_offset < reshape_offset) {
4980 /* We need to check old and new layout */
4981 if (!only_parity(rdev->raid_disk,
4982 conf->algorithm,
4983 conf->raid_disks,
4984 conf->max_degraded))
4985 continue;
4986 }
4987 if (!only_parity(rdev->raid_disk,
4988 conf->prev_algo,
4989 conf->previous_raid_disks,
4990 conf->max_degraded))
4991 continue;
4992 dirty_parity_disks++;
4993 }
4920 4994
4921 mddev->degraded = conf->raid_disks - working_disks; 4995 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4996 - working_disks);
4922 4997
4923 if (mddev->degraded > conf->max_degraded) { 4998 if (mddev->degraded > conf->max_degraded) {
4924 printk(KERN_ERR "raid5: not enough operational devices for %s" 4999 printk(KERN_ERR "raid5: not enough operational devices for %s"
@@ -4931,7 +5006,7 @@ static int run(mddev_t *mddev)
4931 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 5006 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4932 mddev->resync_max_sectors = mddev->dev_sectors; 5007 mddev->resync_max_sectors = mddev->dev_sectors;
4933 5008
4934 if (mddev->degraded > 0 && 5009 if (mddev->degraded > dirty_parity_disks &&
4935 mddev->recovery_cp != MaxSector) { 5010 mddev->recovery_cp != MaxSector) {
4936 if (mddev->ok_start_degraded) 5011 if (mddev->ok_start_degraded)
4937 printk(KERN_WARNING 5012 printk(KERN_WARNING
@@ -5357,9 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5357 !test_bit(Faulty, &rdev->flags)) { 5432 !test_bit(Faulty, &rdev->flags)) {
5358 if (raid5_add_disk(mddev, rdev) == 0) { 5433 if (raid5_add_disk(mddev, rdev) == 0) {
5359 char nm[20]; 5434 char nm[20];
5360 set_bit(In_sync, &rdev->flags); 5435 if (rdev->raid_disk >= conf->previous_raid_disks)
5436 set_bit(In_sync, &rdev->flags);
5437 else
5438 rdev->recovery_offset = 0;
5361 added_devices++; 5439 added_devices++;
5362 rdev->recovery_offset = 0;
5363 sprintf(nm, "rd%d", rdev->raid_disk); 5440 sprintf(nm, "rd%d", rdev->raid_disk);
5364 if (sysfs_create_link(&mddev->kobj, 5441 if (sysfs_create_link(&mddev->kobj,
5365 &rdev->kobj, nm)) 5442 &rdev->kobj, nm))
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2390e0e83daf..dd708359b451 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -214,12 +214,20 @@ struct stripe_head {
214 int disks; /* disks in stripe */ 214 int disks; /* disks in stripe */
215 enum check_states check_state; 215 enum check_states check_state;
216 enum reconstruct_states reconstruct_state; 216 enum reconstruct_states reconstruct_state;
217 /* stripe_operations 217 /**
218 * struct stripe_operations
218 * @target - STRIPE_OP_COMPUTE_BLK target 219 * @target - STRIPE_OP_COMPUTE_BLK target
220 * @target2 - 2nd compute target in the raid6 case
221 * @zero_sum_result - P and Q verification flags
222 * @request - async service request flags for raid_run_ops
219 */ 223 */
220 struct stripe_operations { 224 struct stripe_operations {
221 int target, target2; 225 int target, target2;
222 enum sum_check_flags zero_sum_result; 226 enum sum_check_flags zero_sum_result;
227 #ifdef CONFIG_MULTICORE_RAID456
228 unsigned long request;
229 wait_queue_head_t wait_for_ops;
230 #endif
223 } ops; 231 } ops;
224 struct r5dev { 232 struct r5dev {
225 struct bio req; 233 struct bio req;
@@ -294,6 +302,8 @@ struct r6_state {
294#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ 302#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
295#define STRIPE_BIOFILL_RUN 14 303#define STRIPE_BIOFILL_RUN 14
296#define STRIPE_COMPUTE_RUN 15 304#define STRIPE_COMPUTE_RUN 15
305#define STRIPE_OPS_REQ_PENDING 16
306
297/* 307/*
298 * Operation request flags 308 * Operation request flags
299 */ 309 */
@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout)
478{ 488{
479 return (layout >= 0 && layout <= 5) 489 return (layout >= 0 && layout <= 5)
480 || 490 ||
481 (layout == 8 || layout == 10) 491 (layout >= 8 && layout <= 10)
482 || 492 ||
483 (layout >= 16 && layout <= 20); 493 (layout >= 16 && layout <= 20);
484} 494}
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
index 699dfeee4944..2654d5c854be 100644
--- a/drivers/md/raid6altivec.uc
+++ b/drivers/md/raid6altivec.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 * 19 *
20 * <benh> hpa: in process, 20 * <benh> hpa: in process,
21 * you can just "steal" the vec unit with enable_kernel_altivec() (but 21 * you can just "steal" the vec unit with enable_kernel_altivec() (but
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc
index f9bf9cba357f..d1e276a14fab 100644
--- a/drivers/md/raid6int.uc
+++ b/drivers/md/raid6int.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 */ 19 */
20 20
21#include <linux/raid/pq.h> 21#include <linux/raid/pq.h>
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
index 58ffdf4f5161..2874cbef529d 100644
--- a/drivers/md/raid6test/Makefile
+++ b/drivers/md/raid6test/Makefile
@@ -7,7 +7,7 @@ CC = gcc
7OPTFLAGS = -O2 # Adjust as desired 7OPTFLAGS = -O2 # Adjust as desired
8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) 8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
9LD = ld 9LD = ld
10PERL = perl 10AWK = awk
11AR = ar 11AR = ar
12RANLIB = ranlib 12RANLIB = ranlib
13 13
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
35raid6test: test.c raid6.a 35raid6test: test.c raid6.a
36 $(CC) $(CFLAGS) -o raid6test $^ 36 $(CC) $(CFLAGS) -o raid6test $^
37 37
38raid6altivec1.c: raid6altivec.uc ../unroll.pl 38raid6altivec1.c: raid6altivec.uc ../unroll.awk
39 $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ 39 $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
40 40
41raid6altivec2.c: raid6altivec.uc ../unroll.pl 41raid6altivec2.c: raid6altivec.uc ../unroll.awk
42 $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ 42 $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
43 43
44raid6altivec4.c: raid6altivec.uc ../unroll.pl 44raid6altivec4.c: raid6altivec.uc ../unroll.awk
45 $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ 45 $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
46 46
47raid6altivec8.c: raid6altivec.uc ../unroll.pl 47raid6altivec8.c: raid6altivec.uc ../unroll.awk
48 $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ 48 $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
49 49
50raid6int1.c: raid6int.uc ../unroll.pl 50raid6int1.c: raid6int.uc ../unroll.awk
51 $(PERL) ../unroll.pl 1 < raid6int.uc > $@ 51 $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
52 52
53raid6int2.c: raid6int.uc ../unroll.pl 53raid6int2.c: raid6int.uc ../unroll.awk
54 $(PERL) ../unroll.pl 2 < raid6int.uc > $@ 54 $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
55 55
56raid6int4.c: raid6int.uc ../unroll.pl 56raid6int4.c: raid6int.uc ../unroll.awk
57 $(PERL) ../unroll.pl 4 < raid6int.uc > $@ 57 $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
58 58
59raid6int8.c: raid6int.uc ../unroll.pl 59raid6int8.c: raid6int.uc ../unroll.awk
60 $(PERL) ../unroll.pl 8 < raid6int.uc > $@ 60 $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
61 61
62raid6int16.c: raid6int.uc ../unroll.pl 62raid6int16.c: raid6int.uc ../unroll.awk
63 $(PERL) ../unroll.pl 16 < raid6int.uc > $@ 63 $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
64 64
65raid6int32.c: raid6int.uc ../unroll.pl 65raid6int32.c: raid6int.uc ../unroll.awk
66 $(PERL) ../unroll.pl 32 < raid6int.uc > $@ 66 $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
67 67
68raid6tables.c: mktables 68raid6tables.c: mktables
69 ./mktables > raid6tables.c 69 ./mktables > raid6tables.c
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk
new file mode 100644
index 000000000000..c6aa03631df8
--- /dev/null
+++ b/drivers/md/unroll.awk
@@ -0,0 +1,20 @@
1
2# This filter requires one command line option of form -vN=n
3# where n must be a decimal number.
4#
5# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
6# Replace each $# with n, and each $* with a single $.
7
8BEGIN {
9 n = N + 0
10}
11{
12 if (/\$\$/) { rep = n } else { rep = 1 }
13 for (i = 0; i < rep; ++i) {
14 tmp = $0
15 gsub(/\$\$/, i, tmp)
16 gsub(/\$\#/, n, tmp)
17 gsub(/\$\*/, "$", tmp)
18 print tmp
19 }
20}
diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl
deleted file mode 100644
index 3acc710a20ea..000000000000
--- a/drivers/md/unroll.pl
+++ /dev/null
@@ -1,24 +0,0 @@
1#!/usr/bin/perl
2#
3# Take a piece of C code and for each line which contains the sequence $$
4# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
5# by the unrolling factor, and $* with a single $
6#
7
8($n) = @ARGV;
9$n += 0;
10
11while ( defined($line = <STDIN>) ) {
12 if ( $line =~ /\$\$/ ) {
13 $rep = $n;
14 } else {
15 $rep = 1;
16 }
17 for ( $i = 0 ; $i < $rep ; $i++ ) {
18 $tmp = $line;
19 $tmp =~ s/\$\$/$i/g;
20 $tmp =~ s/\$\#/$n/g;
21 $tmp =~ s/\$\*/\$/g;
22 print $tmp;
23 }
24}
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 64595112000d..3a50ce96fcb9 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -616,13 +616,13 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
616 case RF2: 616 case RF2:
617 map[i].rf_a1 = (prog_cal[RF2] - prog_tab[RF2] - 617 map[i].rf_a1 = (prog_cal[RF2] - prog_tab[RF2] -
618 prog_cal[RF1] + prog_tab[RF1]) / 618 prog_cal[RF1] + prog_tab[RF1]) /
619 ((rf_freq[RF2] - rf_freq[RF1]) / 1000); 619 (s32)((rf_freq[RF2] - rf_freq[RF1]) / 1000);
620 map[i].rf2 = rf_freq[RF2] / 1000; 620 map[i].rf2 = rf_freq[RF2] / 1000;
621 break; 621 break;
622 case RF3: 622 case RF3:
623 map[i].rf_a2 = (prog_cal[RF3] - prog_tab[RF3] - 623 map[i].rf_a2 = (prog_cal[RF3] - prog_tab[RF3] -
624 prog_cal[RF2] + prog_tab[RF2]) / 624 prog_cal[RF2] + prog_tab[RF2]) /
625 ((rf_freq[RF3] - rf_freq[RF2]) / 1000); 625 (s32)((rf_freq[RF3] - rf_freq[RF2]) / 1000);
626 map[i].rf_b2 = prog_cal[RF2] - prog_tab[RF2]; 626 map[i].rf_b2 = prog_cal[RF2] - prog_tab[RF2];
627 map[i].rf3 = rf_freq[RF3] / 1000; 627 map[i].rf3 = rf_freq[RF3] / 1000;
628 break; 628 break;
@@ -1000,12 +1000,12 @@ static int tda18271_set_analog_params(struct dvb_frontend *fe,
1000 struct tda18271_std_map_item *map; 1000 struct tda18271_std_map_item *map;
1001 char *mode; 1001 char *mode;
1002 int ret; 1002 int ret;
1003 u32 freq = params->frequency * 62500; 1003 u32 freq = params->frequency * 125 *
1004 ((params->mode == V4L2_TUNER_RADIO) ? 1 : 1000) / 2;
1004 1005
1005 priv->mode = TDA18271_ANALOG; 1006 priv->mode = TDA18271_ANALOG;
1006 1007
1007 if (params->mode == V4L2_TUNER_RADIO) { 1008 if (params->mode == V4L2_TUNER_RADIO) {
1008 freq = freq / 1000;
1009 map = &std_map->fm_radio; 1009 map = &std_map->fm_radio;
1010 mode = "fm"; 1010 mode = "fm";
1011 } else if (params->std & V4L2_STD_MN) { 1011 } else if (params->std & V4L2_STD_MN) {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 3750ff48cba1..c37790ad92d0 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/sched.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
@@ -1203,7 +1204,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
1203 return mask; 1204 return mask;
1204} 1205}
1205 1206
1206static struct file_operations dvb_dvr_fops = { 1207static const struct file_operations dvb_dvr_fops = {
1207 .owner = THIS_MODULE, 1208 .owner = THIS_MODULE,
1208 .read = dvb_dvr_read, 1209 .read = dvb_dvr_read,
1209 .write = dvb_dvr_write, 1210 .write = dvb_dvr_write,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index eef6d3616626..91c537bca8ad 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/sched.h>
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8c9ae0a3a272..0241a7c5c34a 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -63,6 +63,7 @@
63#include <asm/uaccess.h> 63#include <asm/uaccess.h>
64#include <linux/crc32.h> 64#include <linux/crc32.h>
65#include <linux/mutex.h> 65#include <linux/mutex.h>
66#include <linux/sched.h>
66 67
67#include "dvb_demux.h" 68#include "dvb_demux.h"
68#include "dvb_net.h" 69#include "dvb_net.h"
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 9744b0692417..0e4b97fba384 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -75,7 +75,7 @@ config DVB_USB_DIB0700
75 select DVB_DIB3000MC if !DVB_FE_CUSTOMISE 75 select DVB_DIB3000MC if !DVB_FE_CUSTOMISE
76 select DVB_S5H1411 if !DVB_FE_CUSTOMISE 76 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
77 select DVB_LGDT3305 if !DVB_FE_CUSTOMISE 77 select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
78 select DVB_TUNER_DIB0070 78 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
79 select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE 79 select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE
80 select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE 80 select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
81 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 81 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/dvb/dvb-usb/ce6230.c b/drivers/media/dvb/dvb-usb/ce6230.c
index 0737c6377892..3df2045b7d2d 100644
--- a/drivers/media/dvb/dvb-usb/ce6230.c
+++ b/drivers/media/dvb/dvb-usb/ce6230.c
@@ -105,7 +105,7 @@ static int ce6230_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
105 int i = 0; 105 int i = 0;
106 struct req_t req; 106 struct req_t req;
107 int ret = 0; 107 int ret = 0;
108 memset(&req, 0, sizeof(&req)); 108 memset(&req, 0, sizeof(req));
109 109
110 if (num > 2) 110 if (num > 2)
111 return -EINVAL; 111 return -EINVAL;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 0b2812aa30a4..6bd8951ea02b 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -1925,7 +1925,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1925 { NULL }, 1925 { NULL },
1926 }, 1926 },
1927 { "Leadtek Winfast DTV Dongle (STK7700P based)", 1927 { "Leadtek Winfast DTV Dongle (STK7700P based)",
1928 { &dib0700_usb_id_table[8] }, 1928 { &dib0700_usb_id_table[8], &dib0700_usb_id_table[34] },
1929 { NULL }, 1929 { NULL },
1930 }, 1930 },
1931 { "AVerMedia AVerTV DVB-T Express", 1931 { "AVerMedia AVerTV DVB-T Express",
@@ -2064,7 +2064,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2064 }, 2064 },
2065 }, 2065 },
2066 2066
2067 .num_device_descs = 12, 2067 .num_device_descs = 11,
2068 .devices = { 2068 .devices = {
2069 { "DiBcom STK7070P reference design", 2069 { "DiBcom STK7070P reference design",
2070 { &dib0700_usb_id_table[15], NULL }, 2070 { &dib0700_usb_id_table[15], NULL },
@@ -2098,11 +2098,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2098 { &dib0700_usb_id_table[30], NULL }, 2098 { &dib0700_usb_id_table[30], NULL },
2099 { NULL }, 2099 { NULL },
2100 }, 2100 },
2101 { "Terratec Cinergy T USB XXS/ T3",
2102 { &dib0700_usb_id_table[33],
2103 &dib0700_usb_id_table[52], NULL },
2104 { NULL },
2105 },
2106 { "Elgato EyeTV DTT", 2101 { "Elgato EyeTV DTT",
2107 { &dib0700_usb_id_table[49], NULL }, 2102 { &dib0700_usb_id_table[49], NULL },
2108 { NULL }, 2103 { NULL },
@@ -2343,8 +2338,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2343 { &dib0700_usb_id_table[59], NULL }, 2338 { &dib0700_usb_id_table[59], NULL },
2344 { NULL }, 2339 { NULL },
2345 }, 2340 },
2346 { "Terratec Cinergy T USB XXS (HD)", 2341 { "Terratec Cinergy T USB XXS (HD)/ T3",
2347 { &dib0700_usb_id_table[34], &dib0700_usb_id_table[60] }, 2342 { &dib0700_usb_id_table[33],
2343 &dib0700_usb_id_table[52],
2344 &dib0700_usb_id_table[60], NULL},
2348 { NULL }, 2345 { NULL },
2349 }, 2346 },
2350 }, 2347 },
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index d1b67fe0f011..485d061319ab 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1050,28 +1050,28 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
1050 c->operand[4] = 0; /* slot */ 1050 c->operand[4] = 0; /* slot */
1051 c->operand[5] = SFE_VENDOR_TAG_CA_PMT; /* ca tag */ 1051 c->operand[5] = SFE_VENDOR_TAG_CA_PMT; /* ca tag */
1052 c->operand[6] = 0; /* more/last */ 1052 c->operand[6] = 0; /* more/last */
1053 /* c->operand[7] = XXXprogram_info_length + 17; */ /* length */ 1053 /* Use three bytes for length field in case length > 127 */
1054 c->operand[8] = list_management; 1054 c->operand[10] = list_management;
1055 c->operand[9] = 0x01; /* pmt_cmd=OK_descramble */ 1055 c->operand[11] = 0x01; /* pmt_cmd=OK_descramble */
1056 1056
1057 /* TS program map table */ 1057 /* TS program map table */
1058 1058
1059 c->operand[10] = 0x02; /* Table id=2 */ 1059 c->operand[12] = 0x02; /* Table id=2 */
1060 c->operand[11] = 0x80; /* Section syntax + length */ 1060 c->operand[13] = 0x80; /* Section syntax + length */
1061 /* c->operand[12] = XXXprogram_info_length + 12; */ 1061 /* c->operand[14] = XXXprogram_info_length + 12; */
1062 c->operand[13] = msg[1]; /* Program number */ 1062 c->operand[15] = msg[1]; /* Program number */
1063 c->operand[14] = msg[2]; 1063 c->operand[16] = msg[2];
1064 c->operand[15] = 0x01; /* Version number=0 + current/next=1 */ 1064 c->operand[17] = 0x01; /* Version number=0 + current/next=1 */
1065 c->operand[16] = 0x00; /* Section number=0 */ 1065 c->operand[18] = 0x00; /* Section number=0 */
1066 c->operand[17] = 0x00; /* Last section number=0 */ 1066 c->operand[19] = 0x00; /* Last section number=0 */
1067 c->operand[18] = 0x1f; /* PCR_PID=1FFF */ 1067 c->operand[20] = 0x1f; /* PCR_PID=1FFF */
1068 c->operand[19] = 0xff; 1068 c->operand[21] = 0xff;
1069 c->operand[20] = (program_info_length >> 8); /* Program info length */ 1069 c->operand[22] = (program_info_length >> 8); /* Program info length */
1070 c->operand[21] = (program_info_length & 0xff); 1070 c->operand[23] = (program_info_length & 0xff);
1071 1071
1072 /* CA descriptors at programme level */ 1072 /* CA descriptors at programme level */
1073 read_pos = 6; 1073 read_pos = 6;
1074 write_pos = 22; 1074 write_pos = 24;
1075 if (program_info_length > 0) { 1075 if (program_info_length > 0) {
1076 pmt_cmd_id = msg[read_pos++]; 1076 pmt_cmd_id = msg[read_pos++];
1077 if (pmt_cmd_id != 1 && pmt_cmd_id != 4) 1077 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
@@ -1113,8 +1113,10 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
1113 c->operand[write_pos++] = 0x00; 1113 c->operand[write_pos++] = 0x00;
1114 c->operand[write_pos++] = 0x00; 1114 c->operand[write_pos++] = 0x00;
1115 1115
1116 c->operand[7] = write_pos - 8; 1116 c->operand[7] = 0x82;
1117 c->operand[12] = write_pos - 13; 1117 c->operand[8] = (write_pos - 10) >> 8;
1118 c->operand[9] = (write_pos - 10) & 0xff;
1119 c->operand[14] = write_pos - 15;
1118 1120
1119 crc32_csum = crc32_be(0, &c->operand[10], c->operand[12] - 1); 1121 crc32_csum = crc32_be(0, &c->operand[10], c->operand[12] - 1);
1120 c->operand[write_pos - 4] = (crc32_csum >> 24) & 0xff; 1122 c->operand[write_pos - 4] = (crc32_csum >> 24) & 0xff;
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index eeb80d0ea3ff..853e04b7cb36 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
215 return POLLIN; 215 return POLLIN;
216} 216}
217 217
218static struct file_operations fdtv_ca_fops = { 218static const struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl, 220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open, 221 .open = dvb_generic_open,
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
index 7ba43630a25d..e49cdc88b0c7 100644
--- a/drivers/media/dvb/firewire/firedtv-fe.c
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -141,18 +141,12 @@ static int fdtv_read_uncorrected_blocks(struct dvb_frontend *fe, u32 *ucblocks)
141 return -EOPNOTSUPP; 141 return -EOPNOTSUPP;
142} 142}
143 143
144#define ACCEPTED 0x9
145
146static int fdtv_set_frontend(struct dvb_frontend *fe, 144static int fdtv_set_frontend(struct dvb_frontend *fe,
147 struct dvb_frontend_parameters *params) 145 struct dvb_frontend_parameters *params)
148{ 146{
149 struct firedtv *fdtv = fe->sec_priv; 147 struct firedtv *fdtv = fe->sec_priv;
150 148
151 /* FIXME: avc_tuner_dsd never returns ACCEPTED. Check status? */ 149 return avc_tuner_dsd(fdtv, params);
152 if (avc_tuner_dsd(fdtv, params) != ACCEPTED)
153 return -EINVAL;
154 else
155 return 0; /* not sure of this... */
156} 150}
157 151
158static int fdtv_get_frontend(struct dvb_frontend *fe, 152static int fdtv_get_frontend(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb/frontends/dib0070.h b/drivers/media/dvb/frontends/dib0070.h
index 8a2e1e710adb..eec9e52ffa75 100644
--- a/drivers/media/dvb/frontends/dib0070.h
+++ b/drivers/media/dvb/frontends/dib0070.h
@@ -51,6 +51,7 @@ struct dib0070_config {
51#if defined(CONFIG_DVB_TUNER_DIB0070) || (defined(CONFIG_DVB_TUNER_DIB0070_MODULE) && defined(MODULE)) 51#if defined(CONFIG_DVB_TUNER_DIB0070) || (defined(CONFIG_DVB_TUNER_DIB0070_MODULE) && defined(MODULE))
52extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg); 52extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
53extern u16 dib0070_wbd_offset(struct dvb_frontend *); 53extern u16 dib0070_wbd_offset(struct dvb_frontend *);
54extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
54#else 55#else
55static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg) 56static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg)
56{ 57{
@@ -63,7 +64,11 @@ static inline u16 dib0070_wbd_offset(struct dvb_frontend *fe)
63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
64 return -ENODEV; 65 return -ENODEV;
65} 66}
67
68static inline void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
69{
70 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
71}
66#endif 72#endif
67extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
68 73
69#endif 74#endif
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 55ef6eeb0769..0781f94e05d2 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -1375,6 +1375,11 @@ struct dvb_frontend * dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
1375 if (dib7000p_identify(st) != 0) 1375 if (dib7000p_identify(st) != 0)
1376 goto error; 1376 goto error;
1377 1377
1378 /* FIXME: make sure the dev.parent field is initialized, or else
1379 request_firmware() will hit an OOPS (this should be moved somewhere
1380 more common) */
1381 st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
1382
1378 dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr); 1383 dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
1379 1384
1380 dib7000p_demod_reset(st); 1385 dib7000p_demod_reset(st);
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 81e623a90f09..1fd8306371e2 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/freezer.h> 29#include <linux/freezer.h>
30#include <linux/vmalloc.h>
30 31
31#include "dvbdev.h" 32#include "dvbdev.h"
32#include "dvb_demux.h" 33#include "dvb_demux.h"
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index cb8a358b7310..8f88a586b0dd 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -529,6 +529,12 @@ struct usb_device_id smsusb_id_table[] = {
529 .driver_info = SMS1XXX_BOARD_SIANO_NICE }, 529 .driver_info = SMS1XXX_BOARD_SIANO_NICE },
530 { USB_DEVICE(0x187f, 0x0301), 530 { USB_DEVICE(0x187f, 0x0301),
531 .driver_info = SMS1XXX_BOARD_SIANO_VENICE }, 531 .driver_info = SMS1XXX_BOARD_SIANO_VENICE },
532 { USB_DEVICE(0x2040, 0xb900),
533 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
534 { USB_DEVICE(0x2040, 0xb910),
535 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
536 { USB_DEVICE(0x2040, 0xc000),
537 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
532 { } /* Terminating entry */ 538 { } /* Terminating entry */
533 }; 539 };
534 540
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 8b1440136c45..482d0f3be5ff 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -38,6 +38,7 @@
38#include <linux/videodev2.h> /* V4L2 API defs */ 38#include <linux/videodev2.h> /* V4L2 API defs */
39#include <linux/param.h> 39#include <linux/param.h>
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/sched.h>
41#include <linux/io.h> /* outb, outb_p */ 42#include <linux/io.h> /* outb, outb_p */
42#include <media/v4l2-device.h> 43#include <media/v4l2-device.h>
43#include <media/v4l2-ioctl.h> 44#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 939d1e512974..a6724019c66f 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1299,7 +1299,7 @@ set_tvnorm(struct bttv *btv, unsigned int norm)
1299 1299
1300 tvnorm = &bttv_tvnorms[norm]; 1300 tvnorm = &bttv_tvnorms[norm];
1301 1301
1302 if (!memcmp(&bttv_tvnorms[btv->tvnorm].cropcap, &tvnorm->cropcap, 1302 if (memcmp(&bttv_tvnorms[btv->tvnorm].cropcap, &tvnorm->cropcap,
1303 sizeof (tvnorm->cropcap))) { 1303 sizeof (tvnorm->cropcap))) {
1304 bttv_crop_reset(&btv->crop[0], norm); 1304 bttv_crop_reset(&btv->crop[0], norm);
1305 btv->crop[1] = btv->crop[0]; /* current = default */ 1305 btv->crop[1] = btv->crop[0]; /* current = default */
@@ -3800,11 +3800,34 @@ bttv_irq_next_video(struct bttv *btv, struct bttv_buffer_set *set)
3800 if (!V4L2_FIELD_HAS_BOTH(item->vb.field) && 3800 if (!V4L2_FIELD_HAS_BOTH(item->vb.field) &&
3801 (item->vb.queue.next != &btv->capture)) { 3801 (item->vb.queue.next != &btv->capture)) {
3802 item = list_entry(item->vb.queue.next, struct bttv_buffer, vb.queue); 3802 item = list_entry(item->vb.queue.next, struct bttv_buffer, vb.queue);
3803 /* Mike Isely <isely@pobox.com> - Only check
3804 * and set up the bottom field in the logic
3805 * below. Don't ever do the top field. This
3806 * of course means that if we set up the
3807 * bottom field in the above code that we'll
3808 * actually skip a field. But that's OK.
3809 * Having processed only a single buffer this
3810 * time, then the next time around the first
3811 * available buffer should be for a top field.
3812 * That will then cause us here to set up a
3813 * top then a bottom field in the normal way.
3814 * The alternative to this understanding is
3815 * that we set up the second available buffer
3816 * as a top field, but that's out of order
3817 * since this driver always processes the top
3818 * field first - the effect will be the two
3819 * buffers being returned in the wrong order,
3820 * with the second buffer also being delayed
3821 * by one field time (owing to the fifo nature
3822 * of videobuf). Worse still, we'll be stuck
3823 * doing fields out of order now every time
3824 * until something else causes a field to be
3825 * dropped. By effectively forcing a field to
3826 * drop this way then we always get back into
3827 * sync within a single frame time. (Out of
3828 * order fields can screw up deinterlacing
3829 * algorithms.) */
3803 if (!V4L2_FIELD_HAS_BOTH(item->vb.field)) { 3830 if (!V4L2_FIELD_HAS_BOTH(item->vb.field)) {
3804 if (NULL == set->top &&
3805 V4L2_FIELD_TOP == item->vb.field) {
3806 set->top = item;
3807 }
3808 if (NULL == set->bottom && 3831 if (NULL == set->bottom &&
3809 V4L2_FIELD_BOTTOM == item->vb.field) { 3832 V4L2_FIELD_BOTTOM == item->vb.field) {
3810 set->bottom = item; 3833 set->bottom = item;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 657c481d255c..10230cb3d210 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1325,7 +1325,7 @@ static void cafe_v4l_vm_close(struct vm_area_struct *vma)
1325 mutex_unlock(&sbuf->cam->s_mutex); 1325 mutex_unlock(&sbuf->cam->s_mutex);
1326} 1326}
1327 1327
1328static struct vm_operations_struct cafe_v4l_vm_ops = { 1328static const struct vm_operations_struct cafe_v4l_vm_ops = {
1329 .open = cafe_v4l_vm_open, 1329 .open = cafe_v4l_vm_open,
1330 .close = cafe_v4l_vm_close 1330 .close = cafe_v4l_vm_close
1331}; 1331};
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 43ab0adf3b61..2377313c041a 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/sched.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 7bd8a70f0a0b..ac947aecb9c3 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -383,6 +383,11 @@ static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream)
383 383
384static int snd_em28xx_prepare(struct snd_pcm_substream *substream) 384static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
385{ 385{
386 struct em28xx *dev = snd_pcm_substream_chip(substream);
387
388 dev->adev.hwptr_done_capture = 0;
389 dev->adev.capture_transfer_done = 0;
390
386 return 0; 391 return 0;
387} 392}
388 393
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 74092f436be6..88987a57cf7b 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1496,7 +1496,7 @@ static void et61x251_vm_close(struct vm_area_struct* vma)
1496} 1496}
1497 1497
1498 1498
1499static struct vm_operations_struct et61x251_vm_ops = { 1499static const struct vm_operations_struct et61x251_vm_ops = {
1500 .open = et61x251_vm_open, 1500 .open = et61x251_vm_open,
1501 .close = et61x251_vm_close, 1501 .close = et61x251_vm_close,
1502}; 1502};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index cf6540da1e42..23d3fb776918 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -99,7 +99,7 @@ static void gspca_vm_close(struct vm_area_struct *vma)
99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED; 99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED;
100} 100}
101 101
102static struct vm_operations_struct gspca_vm_ops = { 102static const struct vm_operations_struct gspca_vm_ops = {
103 .open = gspca_vm_open, 103 .open = gspca_vm_open,
104 .close = gspca_vm_close, 104 .close = gspca_vm_close,
105}; 105};
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 59400e858965..a27afeb6f39b 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -35,12 +35,25 @@ static
35 const 35 const
36 struct dmi_system_id s5k4aa_vflip_dmi_table[] = { 36 struct dmi_system_id s5k4aa_vflip_dmi_table[] = {
37 { 37 {
38 .ident = "BRUNEINIT",
39 .matches = {
40 DMI_MATCH(DMI_SYS_VENDOR, "BRUNENIT"),
41 DMI_MATCH(DMI_PRODUCT_NAME, "BRUNENIT"),
42 DMI_MATCH(DMI_BOARD_VERSION, "00030D0000000001")
43 }
44 }, {
38 .ident = "Fujitsu-Siemens Amilo Xa 2528", 45 .ident = "Fujitsu-Siemens Amilo Xa 2528",
39 .matches = { 46 .matches = {
40 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 47 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
41 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528") 48 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528")
42 } 49 }
43 }, { 50 }, {
51 .ident = "Fujitsu-Siemens Amilo Xi 2528",
52 .matches = {
53 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
54 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 2528")
55 }
56 }, {
44 .ident = "Fujitsu-Siemens Amilo Xi 2550", 57 .ident = "Fujitsu-Siemens Amilo Xi 2550",
45 .matches = { 58 .matches = {
46 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 59 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
@@ -57,6 +70,13 @@ static
57 .matches = { 70 .matches = {
58 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), 71 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
59 DMI_MATCH(DMI_PRODUCT_NAME, "GX700"), 72 DMI_MATCH(DMI_PRODUCT_NAME, "GX700"),
73 DMI_MATCH(DMI_BIOS_DATE, "12/02/2008")
74 }
75 }, {
76 .ident = "MSI GX700",
77 .matches = {
78 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
79 DMI_MATCH(DMI_PRODUCT_NAME, "GX700"),
60 DMI_MATCH(DMI_BIOS_DATE, "07/26/2007") 80 DMI_MATCH(DMI_BIOS_DATE, "07/26/2007")
61 } 81 }
62 }, { 82 }, {
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 140c8f320e47..f8328b9efae5 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -483,7 +483,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
483 data[3] = 0x2c; /* reg 2, H size/8 */ 483 data[3] = 0x2c; /* reg 2, H size/8 */
484 data[4] = 0x48; /* reg 3, V size/4 */ 484 data[4] = 0x48; /* reg 3, V size/4 */
485 data[6] = 0x06; /* reg 5, H start */ 485 data[6] = 0x06; /* reg 5, H start */
486 data[8] = 0x06 + sd->sensor_type; /* reg 7, V start */ 486 data[8] = 0x06 - sd->sensor_type; /* reg 7, V start */
487 break; 487 break;
488 } 488 }
489 err_code = mr_write(gspca_dev, 11); 489 err_code = mr_write(gspca_dev, 11);
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 2f6e135d94bc..a5c190e93799 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2919,7 +2919,7 @@ static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
2919 /* A false positive here is likely, until OVT gives me 2919 /* A false positive here is likely, until OVT gives me
2920 * the definitive SOF/EOF format */ 2920 * the definitive SOF/EOF format */
2921 if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { 2921 if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
2922 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0); 2922 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
2923 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0); 2923 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
2924 sd->packet_nr = 0; 2924 sd->packet_nr = 0;
2925 } 2925 }
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 65489d6b0d89..bfae63f5584c 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -394,7 +394,8 @@ frame_data:
394 PDEBUG(D_PACK, "End of frame detected"); 394 PDEBUG(D_PACK, "End of frame detected");
395 395
396 /* Complete the last frame (if any) */ 396 /* Complete the last frame (if any) */
397 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0); 397 frame = gspca_frame_add(gspca_dev, LAST_PACKET,
398 frame, data, 0);
398 399
399 if (chunk_len) 400 if (chunk_len)
400 PDEBUG(D_ERR, "Chunk length is " 401 PDEBUG(D_ERR, "Chunk length is "
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index d0765bed79c9..01e1eefcf1eb 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -28,6 +28,7 @@
28 */ 28 */
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/sched.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/videodev.h> 33#include <linux/videodev.h>
33#include <media/v4l2-common.h> 34#include <media/v4l2-common.h>
@@ -1589,7 +1590,7 @@ static void meye_vm_close(struct vm_area_struct *vma)
1589 meye.vma_use_count[idx]--; 1590 meye.vma_use_count[idx]--;
1590} 1591}
1591 1592
1592static struct vm_operations_struct meye_vm_ops = { 1593static const struct vm_operations_struct meye_vm_ops = {
1593 .open = meye_vm_open, 1594 .open = meye_vm_open,
1594 .close = meye_vm_close, 1595 .close = meye_vm_close,
1595}; 1596};
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 6952e9602d5d..51b683c63b70 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -26,6 +26,7 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/sched.h>
29 30
30#include <media/v4l2-common.h> 31#include <media/v4l2-common.h>
31#include <media/v4l2-dev.h> 32#include <media/v4l2-dev.h>
@@ -1432,7 +1433,9 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1432 icd->sense = &sense; 1433 icd->sense = &sense;
1433 1434
1434 cam_f.fmt.pix.pixelformat = cam_fmt->fourcc; 1435 cam_f.fmt.pix.pixelformat = cam_fmt->fourcc;
1435 ret = v4l2_subdev_call(sd, video, s_fmt, f); 1436 ret = v4l2_subdev_call(sd, video, s_fmt, &cam_f);
1437 cam_f.fmt.pix.pixelformat = pix->pixelformat;
1438 *pix = cam_f.fmt.pix;
1436 1439
1437 icd->sense = NULL; 1440 icd->sense = NULL;
1438 1441
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 9e3262c0ba37..2c0bb06cab3b 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -598,11 +598,6 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
598 buf = list_entry(dma_q->active.next, 598 buf = list_entry(dma_q->active.next,
599 struct s2255_buffer, vb.queue); 599 struct s2255_buffer, vb.queue);
600 600
601 if (!waitqueue_active(&buf->vb.done)) {
602 /* no one active */
603 rc = -1;
604 goto unlock;
605 }
606 list_del(&buf->vb.queue); 601 list_del(&buf->vb.queue);
607 do_gettimeofday(&buf->vb.ts); 602 do_gettimeofday(&buf->vb.ts);
608 dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i); 603 dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i);
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 71145bff94fa..09013229d4aa 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -3428,6 +3428,7 @@ struct saa7134_board saa7134_boards[] = {
3428 .tuner_config = 3, 3428 .tuner_config = 3,
3429 .mpeg = SAA7134_MPEG_DVB, 3429 .mpeg = SAA7134_MPEG_DVB,
3430 .ts_type = SAA7134_MPEG_TS_SERIAL, 3430 .ts_type = SAA7134_MPEG_TS_SERIAL,
3431 .ts_force_val = 1,
3431 .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ 3432 .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */
3432 .inputs = {{ 3433 .inputs = {{
3433 .name = name_tv, 3434 .name = name_tv,
diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
index 3fa652279ac0..03488ba4c99c 100644
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -262,11 +262,13 @@ int saa7134_ts_start(struct saa7134_dev *dev)
262 switch (saa7134_boards[dev->board].ts_type) { 262 switch (saa7134_boards[dev->board].ts_type) {
263 case SAA7134_MPEG_TS_PARALLEL: 263 case SAA7134_MPEG_TS_PARALLEL:
264 saa_writeb(SAA7134_TS_SERIAL0, 0x40); 264 saa_writeb(SAA7134_TS_SERIAL0, 0x40);
265 saa_writeb(SAA7134_TS_PARALLEL, 0xec); 265 saa_writeb(SAA7134_TS_PARALLEL, 0xec |
266 (saa7134_boards[dev->board].ts_force_val << 4));
266 break; 267 break;
267 case SAA7134_MPEG_TS_SERIAL: 268 case SAA7134_MPEG_TS_SERIAL:
268 saa_writeb(SAA7134_TS_SERIAL0, 0xd8); 269 saa_writeb(SAA7134_TS_SERIAL0, 0xd8);
269 saa_writeb(SAA7134_TS_PARALLEL, 0x6c); 270 saa_writeb(SAA7134_TS_PARALLEL, 0x6c |
271 (saa7134_boards[dev->board].ts_force_val << 4));
270 saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc); 272 saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc);
271 saa_writeb(SAA7134_TS_SERIAL1, 0x02); 273 saa_writeb(SAA7134_TS_SERIAL1, 0x02);
272 break; 274 break;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 6ee3e9b7769e..f8697d46ff5f 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -360,6 +360,7 @@ struct saa7134_board {
360 enum saa7134_mpeg_type mpeg; 360 enum saa7134_mpeg_type mpeg;
361 enum saa7134_mpeg_ts_type ts_type; 361 enum saa7134_mpeg_ts_type ts_type;
362 unsigned int vid_port_opts; 362 unsigned int vid_port_opts;
363 unsigned int ts_force_val:1;
363}; 364};
364 365
365#define card_has_radio(dev) (NULL != saa7134_boards[dev->board].radio.name) 366#define card_has_radio(dev) (NULL != saa7134_boards[dev->board].radio.name)
diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
index c45966edc0cf..9c1d3ac43869 100644
--- a/drivers/media/video/saa7164/saa7164-cmd.c
+++ b/drivers/media/video/saa7164/saa7164-cmd.c
@@ -347,7 +347,7 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, tmComResCmd_t command,
347 347
348 /* Prepare some basic command/response structures */ 348 /* Prepare some basic command/response structures */
349 memset(&command_t, 0, sizeof(command_t)); 349 memset(&command_t, 0, sizeof(command_t));
350 memset(&response_t, 0, sizeof(&response_t)); 350 memset(&response_t, 0, sizeof(response_t));
351 pcommand_t = &command_t; 351 pcommand_t = &command_t;
352 presponse_t = &response_t; 352 presponse_t = &response_t;
353 command_t.id = id; 353 command_t.id = id;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 5ab7c5aefd62..2f78b4f263f5 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -404,7 +404,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
404 "SuperH Mobile CEU driver attached to camera %d\n", 404 "SuperH Mobile CEU driver attached to camera %d\n",
405 icd->devnum); 405 icd->devnum);
406 406
407 clk_enable(pcdev->clk); 407 pm_runtime_get_sync(ici->v4l2_dev.dev);
408 408
409 ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ 409 ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
410 while (ceu_read(pcdev, CSTSR) & 1) 410 while (ceu_read(pcdev, CSTSR) & 1)
@@ -438,7 +438,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
438 } 438 }
439 spin_unlock_irqrestore(&pcdev->lock, flags); 439 spin_unlock_irqrestore(&pcdev->lock, flags);
440 440
441 clk_disable(pcdev->clk); 441 pm_runtime_put_sync(ici->v4l2_dev.dev);
442 442
443 dev_info(icd->dev.parent, 443 dev_info(icd->dev.parent,
444 "SuperH Mobile CEU driver detached from camera %d\n", 444 "SuperH Mobile CEU driver detached from camera %d\n",
@@ -1173,8 +1173,8 @@ static int get_scales(struct soc_camera_device *icd,
1173 width_in = scale_up(cam->ceu_rect.width, *scale_h); 1173 width_in = scale_up(cam->ceu_rect.width, *scale_h);
1174 height_in = scale_up(cam->ceu_rect.height, *scale_v); 1174 height_in = scale_up(cam->ceu_rect.height, *scale_v);
1175 1175
1176 *scale_h = calc_generic_scale(cam->ceu_rect.width, icd->user_width); 1176 *scale_h = calc_generic_scale(width_in, icd->user_width);
1177 *scale_v = calc_generic_scale(cam->ceu_rect.height, icd->user_height); 1177 *scale_v = calc_generic_scale(height_in, icd->user_height);
1178 1178
1179 return 0; 1179 return 0;
1180} 1180}
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 9d84c94e8a40..4a7711c3e745 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2077,7 +2077,7 @@ static void sn9c102_vm_close(struct vm_area_struct* vma)
2077} 2077}
2078 2078
2079 2079
2080static struct vm_operations_struct sn9c102_vm_ops = { 2080static const struct vm_operations_struct sn9c102_vm_ops = {
2081 .open = sn9c102_vm_open, 2081 .open = sn9c102_vm_open,
2082 .close = sn9c102_vm_close, 2082 .close = sn9c102_vm_close,
2083}; 2083};
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 59aa7a3694c2..36e617bd13c7 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1160,13 +1160,15 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1160 if (icd->iface == ici->nr) { 1160 if (icd->iface == ici->nr) {
1161 /* The bus->remove will be called */ 1161 /* The bus->remove will be called */
1162 device_unregister(&icd->dev); 1162 device_unregister(&icd->dev);
1163 /* Not before device_unregister(), .remove 1163 /*
1164 * needs parent to call ici->ops->remove() */ 1164 * Not before device_unregister(), .remove
1165 icd->dev.parent = NULL; 1165 * needs parent to call ici->ops->remove().
1166 1166 * If the host module is loaded again, device_register()
1167 /* If the host module is loaded again, device_register() 1167 * would complain "already initialised," since 2.6.32
1168 * would complain "already initialised" */ 1168 * this is also needed to prevent use-after-free of the
1169 memset(&icd->dev.kobj, 0, sizeof(icd->dev.kobj)); 1169 * device private data.
1170 */
1171 memset(&icd->dev, 0, sizeof(icd->dev));
1170 } 1172 }
1171 } 1173 }
1172 1174
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 0b996ea4134e..6b41865f42bd 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -790,7 +790,7 @@ static void stk_v4l_vm_close(struct vm_area_struct *vma)
790 if (sbuf->mapcount == 0) 790 if (sbuf->mapcount == 0)
791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED; 791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
792} 792}
793static struct vm_operations_struct stk_v4l_vm_ops = { 793static const struct vm_operations_struct stk_v4l_vm_ops = {
794 .open = stk_v4l_vm_open, 794 .open = stk_v4l_vm_open,
795 .close = stk_v4l_vm_close 795 .close = stk_v4l_vm_close
796}; 796};
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index c3225a561748..1b89735e62fd 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -348,7 +348,7 @@ static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
348 __s32 value, __u8 *data) 348 __s32 value, __u8 *data)
349{ 349{
350 data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff; 350 data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
351 data[2] = min(abs(value), 0xff); 351 data[2] = min((int)abs(value), 0xff);
352} 352}
353 353
354static struct uvc_control_mapping uvc_ctrl_mappings[] = { 354static struct uvc_control_mapping uvc_ctrl_mappings[] = {
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 9e7351569b5d..a2bdd806efab 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1069,7 +1069,7 @@ static void uvc_vm_close(struct vm_area_struct *vma)
1069 buffer->vma_use_count--; 1069 buffer->vma_use_count--;
1070} 1070}
1071 1071
1072static struct vm_operations_struct uvc_vm_ops = { 1072static const struct vm_operations_struct uvc_vm_ops = {
1073 .open = uvc_vm_open, 1073 .open = uvc_vm_open,
1074 .close = uvc_vm_close, 1074 .close = uvc_vm_close,
1075}; 1075};
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index f960e8ea4f17..a6e41d12b221 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -90,7 +90,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
90 ctrl->dwMaxVideoFrameSize = 90 ctrl->dwMaxVideoFrameSize =
91 frame->dwMaxVideoFrameBufferSize; 91 frame->dwMaxVideoFrameBufferSize;
92 92
93 if (stream->dev->quirks & UVC_QUIRK_FIX_BANDWIDTH && 93 if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) &&
94 stream->dev->quirks & UVC_QUIRK_FIX_BANDWIDTH &&
94 stream->intf->num_altsetting > 1) { 95 stream->intf->num_altsetting > 1) {
95 u32 interval; 96 u32 interval;
96 u32 bandwidth; 97 u32 bandwidth;
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index f1ccf98c0a6f..8e93c6f25c83 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/sched.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
22 23
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index d09ce83a9429..635ffc7b0391 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -105,7 +105,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
105 } 105 }
106} 106}
107 107
108static struct vm_operations_struct videobuf_vm_ops = { 108static const struct vm_operations_struct videobuf_vm_ops = {
109 .open = videobuf_vm_open, 109 .open = videobuf_vm_open,
110 .close = videobuf_vm_close, 110 .close = videobuf_vm_close,
111}; 111};
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index a8dd22ace3fb..032ebae0134a 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/sched.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26 27
@@ -394,7 +395,7 @@ videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
394 return 0; 395 return 0;
395} 396}
396 397
397static struct vm_operations_struct videobuf_vm_ops = 398static const struct vm_operations_struct videobuf_vm_ops =
398{ 399{
399 .open = videobuf_vm_open, 400 .open = videobuf_vm_open,
400 .close = videobuf_vm_close, 401 .close = videobuf_vm_close,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 30ae30f99ccc..35f3900c5633 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -116,7 +116,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
116 return; 116 return;
117} 117}
118 118
119static struct vm_operations_struct videobuf_vm_ops = 119static const struct vm_operations_struct videobuf_vm_ops =
120{ 120{
121 .open = videobuf_vm_open, 121 .open = videobuf_vm_open,
122 .close = videobuf_vm_close, 122 .close = videobuf_vm_close,
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index cd6a3446ab7e..b034a81d2b1c 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -3857,7 +3857,7 @@ static void vino_vm_close(struct vm_area_struct *vma)
3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count); 3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count);
3858} 3858}
3859 3859
3860static struct vm_operations_struct vino_vm_ops = { 3860static const struct vm_operations_struct vino_vm_ops = {
3861 .open = vino_vm_open, 3861 .open = vino_vm_open,
3862 .close = vino_vm_close, 3862 .close = vino_vm_close,
3863}; 3863};
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index b3c6436b33ba..312a71336fd0 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -935,7 +935,7 @@ static void zc0301_vm_close(struct vm_area_struct* vma)
935} 935}
936 936
937 937
938static struct vm_operations_struct zc0301_vm_ops = { 938static const struct vm_operations_struct zc0301_vm_ops = {
939 .open = zc0301_vm_open, 939 .open = zc0301_vm_open,
940 .close = zc0301_vm_close, 940 .close = zc0301_vm_close,
941}; 941};
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index bcdefb1bcb3d..47137deafcfd 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3172,7 +3172,7 @@ zoran_vm_close (struct vm_area_struct *vma)
3172 mutex_unlock(&zr->resource_lock); 3172 mutex_unlock(&zr->resource_lock);
3173} 3173}
3174 3174
3175static struct vm_operations_struct zoran_vm_ops = { 3175static const struct vm_operations_struct zoran_vm_ops = {
3176 .open = zoran_vm_open, 3176 .open = zoran_vm_open,
3177 .close = zoran_vm_close, 3177 .close = zoran_vm_close,
3178}; 3178};
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index bc2ec2182c00..34f3f36f819b 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -56,6 +56,7 @@
56#include <linux/init.h> 56#include <linux/init.h>
57#include <linux/module.h> 57#include <linux/module.h>
58#include <linux/fs.h> 58#include <linux/fs.h>
59#include <linux/sched.h>
59 60
60#define my_VERSION MPT_LINUX_VERSION_COMMON 61#define my_VERSION MPT_LINUX_VERSION_COMMON
61#define MYNAM "mptlan" 62#define MYNAM "mptlan"
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 5447da16a170..613481028272 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -57,8 +57,6 @@
57 * The AB3100 is usually assigned address 0x48 (7-bit) 57 * The AB3100 is usually assigned address 0x48 (7-bit)
58 * The chip is defined in the platform i2c_board_data section. 58 * The chip is defined in the platform i2c_board_data section.
59 */ 59 */
60static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
61I2C_CLIENT_INSMOD_1(ab3100);
62 60
63u8 ab3100_get_chip_type(struct ab3100 *ab3100) 61u8 ab3100_get_chip_type(struct ab3100 *ab3100)
64{ 62{
@@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
966} 964}
967 965
968static const struct i2c_device_id ab3100_id[] = { 966static const struct i2c_device_id ab3100_id[] = {
969 { "ab3100", ab3100 }, 967 { "ab3100", 0 },
970 { } 968 { }
971}; 969};
972MODULE_DEVICE_TABLE(i2c, ab3100_id); 970MODULE_DEVICE_TABLE(i2c, ab3100_id);
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index e424cf6d8e9e..a1c47ee95c0e 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -480,7 +480,6 @@ static int
480add_children(struct twl4030_platform_data *pdata, unsigned long features) 480add_children(struct twl4030_platform_data *pdata, unsigned long features)
481{ 481{
482 struct device *child; 482 struct device *child;
483 struct device *usb_transceiver = NULL;
484 483
485 if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) { 484 if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
486 child = add_child(3, "twl4030_bci", 485 child = add_child(3, "twl4030_bci",
@@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
532 } 531 }
533 532
534 if (twl_has_usb() && pdata->usb) { 533 if (twl_has_usb() && pdata->usb) {
534
535 static struct regulator_consumer_supply usb1v5 = {
536 .supply = "usb1v5",
537 };
538 static struct regulator_consumer_supply usb1v8 = {
539 .supply = "usb1v8",
540 };
541 static struct regulator_consumer_supply usb3v1 = {
542 .supply = "usb3v1",
543 };
544
545 /* First add the regulators so that they can be used by transceiver */
546 if (twl_has_regulator()) {
547 /* this is a template that gets copied */
548 struct regulator_init_data usb_fixed = {
549 .constraints.valid_modes_mask =
550 REGULATOR_MODE_NORMAL
551 | REGULATOR_MODE_STANDBY,
552 .constraints.valid_ops_mask =
553 REGULATOR_CHANGE_MODE
554 | REGULATOR_CHANGE_STATUS,
555 };
556
557 child = add_regulator_linked(TWL4030_REG_VUSB1V5,
558 &usb_fixed, &usb1v5, 1);
559 if (IS_ERR(child))
560 return PTR_ERR(child);
561
562 child = add_regulator_linked(TWL4030_REG_VUSB1V8,
563 &usb_fixed, &usb1v8, 1);
564 if (IS_ERR(child))
565 return PTR_ERR(child);
566
567 child = add_regulator_linked(TWL4030_REG_VUSB3V1,
568 &usb_fixed, &usb3v1, 1);
569 if (IS_ERR(child))
570 return PTR_ERR(child);
571
572 }
573
535 child = add_child(0, "twl4030_usb", 574 child = add_child(0, "twl4030_usb",
536 pdata->usb, sizeof(*pdata->usb), 575 pdata->usb, sizeof(*pdata->usb),
537 true, 576 true,
538 /* irq0 = USB_PRES, irq1 = USB */ 577 /* irq0 = USB_PRES, irq1 = USB */
539 pdata->irq_base + 8 + 2, pdata->irq_base + 4); 578 pdata->irq_base + 8 + 2, pdata->irq_base + 4);
579
540 if (IS_ERR(child)) 580 if (IS_ERR(child))
541 return PTR_ERR(child); 581 return PTR_ERR(child);
542 582
543 /* we need to connect regulators to this transceiver */ 583 /* we need to connect regulators to this transceiver */
544 usb_transceiver = child; 584 if (twl_has_regulator() && child) {
585 usb1v5.dev = child;
586 usb1v8.dev = child;
587 usb3v1.dev = child;
588 }
545 } 589 }
546 590
547 if (twl_has_watchdog()) { 591 if (twl_has_watchdog()) {
@@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
580 return PTR_ERR(child); 624 return PTR_ERR(child);
581 } 625 }
582 626
583 if (twl_has_regulator() && usb_transceiver) {
584 static struct regulator_consumer_supply usb1v5 = {
585 .supply = "usb1v5",
586 };
587 static struct regulator_consumer_supply usb1v8 = {
588 .supply = "usb1v8",
589 };
590 static struct regulator_consumer_supply usb3v1 = {
591 .supply = "usb3v1",
592 };
593
594 /* this is a template that gets copied */
595 struct regulator_init_data usb_fixed = {
596 .constraints.valid_modes_mask =
597 REGULATOR_MODE_NORMAL
598 | REGULATOR_MODE_STANDBY,
599 .constraints.valid_ops_mask =
600 REGULATOR_CHANGE_MODE
601 | REGULATOR_CHANGE_STATUS,
602 };
603
604 usb1v5.dev = usb_transceiver;
605 usb1v8.dev = usb_transceiver;
606 usb3v1.dev = usb_transceiver;
607
608 child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed,
609 &usb1v5, 1);
610 if (IS_ERR(child))
611 return PTR_ERR(child);
612
613 child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed,
614 &usb1v8, 1);
615 if (IS_ERR(child))
616 return PTR_ERR(child);
617
618 child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed,
619 &usb3v1, 1);
620 if (IS_ERR(child))
621 return PTR_ERR(child);
622 }
623
624 /* maybe add LDOs that are omitted on cost-reduced parts */ 627 /* maybe add LDOs that are omitted on cost-reduced parts */
625 if (twl_has_regulator() && !(features & TPS_SUBSET)) { 628 if (twl_has_regulator() && !(features & TPS_SUBSET)) {
626 child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); 629 child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
@@ -792,7 +795,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
792 twl->client = i2c_new_dummy(client->adapter, 795 twl->client = i2c_new_dummy(client->adapter,
793 twl->address); 796 twl->address);
794 if (!twl->client) { 797 if (!twl->client) {
795 dev_err(&twl->client->dev, 798 dev_err(&client->dev,
796 "can't attach client %d\n", i); 799 "can't attach client %d\n", i);
797 status = -ENOMEM; 800 status = -ENOMEM;
798 goto fail; 801 goto fail;
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 2afc08006e6d..fa294b6d600a 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/sched.h>
24#include <linux/ucb1400.h> 25#include <linux/ucb1400.h>
25 26
26unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, 27unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fea9085fe52c..60c3988f3cf3 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/errno.h> 24#include <linux/errno.h>
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index d3015dfb9134..ac056ea6b66e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -507,6 +507,8 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
507{ 507{
508 int i, ret; 508 int i, ret;
509 509
510 mutex_init(&wm831x->irq_lock);
511
510 if (!irq) { 512 if (!irq) {
511 dev_warn(wm831x->dev, 513 dev_warn(wm831x->dev,
512 "No interrupt specified - functionality limited\n"); 514 "No interrupt specified - functionality limited\n");
@@ -521,7 +523,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
521 } 523 }
522 524
523 wm831x->irq = irq; 525 wm831x->irq = irq;
524 mutex_init(&wm831x->irq_lock);
525 INIT_WORK(&wm831x->irq_work, wm831x_irq_worker); 526 INIT_WORK(&wm831x->irq_work, wm831x_irq_worker);
526 527
527 /* Mask the individual interrupt sources */ 528 /* Mask the individual interrupt sources */
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 3c0c58eed347..5a6b2bce8ad5 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -33,12 +33,6 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
37static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
38
39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(max6875);
41
42/* The MAX6875 can only read/write 16 bytes at a time */ 36/* The MAX6875 can only read/write 16 bytes at a time */
43#define SLICE_SIZE 16 37#define SLICE_SIZE 16
44#define SLICE_BITS 4 38#define SLICE_BITS 4
@@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = {
146 .read = max6875_read, 140 .read = max6875_read,
147}; 141};
148 142
149/* Return 0 if detection is successful, -ENODEV otherwise */ 143static int max6875_probe(struct i2c_client *client,
150static int max6875_detect(struct i2c_client *client, int kind, 144 const struct i2c_device_id *id)
151 struct i2c_board_info *info)
152{ 145{
153 struct i2c_adapter *adapter = client->adapter; 146 struct i2c_adapter *adapter = client->adapter;
147 struct max6875_data *data;
148 int err;
154 149
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA 150 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
156 | I2C_FUNC_SMBUS_READ_BYTE)) 151 | I2C_FUNC_SMBUS_READ_BYTE))
157 return -ENODEV; 152 return -ENODEV;
158 153
159 /* Only check even addresses */ 154 /* Only bind to even addresses */
160 if (client->addr & 1) 155 if (client->addr & 1)
161 return -ENODEV; 156 return -ENODEV;
162 157
163 strlcpy(info->type, "max6875", I2C_NAME_SIZE);
164
165 return 0;
166}
167
168static int max6875_probe(struct i2c_client *client,
169 const struct i2c_device_id *id)
170{
171 struct max6875_data *data;
172 int err;
173
174 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) 158 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
175 return -ENOMEM; 159 return -ENOMEM;
176 160
@@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = {
222 .probe = max6875_probe, 206 .probe = max6875_probe,
223 .remove = max6875_remove, 207 .remove = max6875_remove,
224 .id_table = max6875_id, 208 .id_table = max6875_id,
225
226 .detect = max6875_detect,
227 .address_data = &addr_data,
228}; 209};
229 210
230static int __init max6875_init(void) 211static int __init max6875_init(void)
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 1ad27c6abcca..a92a3a742b43 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -18,6 +18,7 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/file.h> 19#include <linux/file.h>
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/sched.h>
21#include <linux/spinlock.h> 22#include <linux/spinlock.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/uaccess.h> 24#include <linux/uaccess.h>
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 276d3fb68094..e2031739aa29 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -22,6 +22,7 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/sched.h>
25#include "ibmasm.h" 26#include "ibmasm.h"
26#include "lowlevel.h" 27#include "lowlevel.h"
27 28
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 68a0a5b94795..572d41ffc186 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -22,6 +22,7 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/sched.h>
25#include "ibmasm.h" 26#include "ibmasm.h"
26#include "lowlevel.h" 27#include "lowlevel.h"
27 28
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index bec9e2c44bef..2de487ac788c 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/sched.h>
23#include "ibmasm.h" 24#include "ibmasm.h"
24#include "dot_command.h" 25#include "dot_command.h"
25 26
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index fa57b67593ae..04c27266f567 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/phantom.h> 24#include <linux/phantom.h>
25#include <linux/sched.h>
25#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
26 27
27#include <asm/atomic.h> 28#include <asm/atomic.h>
@@ -271,7 +272,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
271 return mask; 272 return mask;
272} 273}
273 274
274static struct file_operations phantom_file_ops = { 275static const struct file_operations phantom_file_ops = {
275 .open = phantom_open, 276 .open = phantom_open,
276 .release = phantom_release, 277 .release = phantom_release,
277 .unlocked_ioctl = phantom_ioctl, 278 .unlocked_ioctl = phantom_ioctl,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index aed609832bc2..41c8fe2a928c 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -53,7 +53,6 @@ struct gru_stats_s gru_stats;
53/* Guaranteed user available resources on each node */ 53/* Guaranteed user available resources on each node */
54static int max_user_cbrs, max_user_dsr_bytes; 54static int max_user_cbrs, max_user_dsr_bytes;
55 55
56static struct file_operations gru_fops;
57static struct miscdevice gru_miscdev; 56static struct miscdevice gru_miscdev;
58 57
59 58
@@ -426,7 +425,7 @@ static void __exit gru_exit(void)
426 gru_proc_exit(); 425 gru_proc_exit();
427} 426}
428 427
429static struct file_operations gru_fops = { 428static const struct file_operations gru_fops = {
430 .owner = THIS_MODULE, 429 .owner = THIS_MODULE,
431 .unlocked_ioctl = gru_file_unlocked_ioctl, 430 .unlocked_ioctl = gru_file_unlocked_ioctl,
432 .mmap = gru_file_mmap, 431 .mmap = gru_file_mmap,
@@ -438,7 +437,7 @@ static struct miscdevice gru_miscdev = {
438 .fops = &gru_fops, 437 .fops = &gru_fops,
439}; 438};
440 439
441struct vm_operations_struct gru_vm_ops = { 440const struct vm_operations_struct gru_vm_ops = {
442 .close = gru_vma_close, 441 .close = gru_vma_close,
443 .fault = gru_fault, 442 .fault = gru_fault,
444}; 443};
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index ccd4408a26c7..3f2375c5ba5b 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -161,14 +161,15 @@ static int options_show(struct seq_file *s, void *p)
161static ssize_t options_write(struct file *file, const char __user *userbuf, 161static ssize_t options_write(struct file *file, const char __user *userbuf,
162 size_t count, loff_t *data) 162 size_t count, loff_t *data)
163{ 163{
164 unsigned long val; 164 char buf[20];
165 char buf[80];
166 165
167 if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0) 166 if (count >= sizeof(buf))
167 return -EINVAL;
168 if (copy_from_user(buf, userbuf, count))
168 return -EFAULT; 169 return -EFAULT;
169 buf[count - 1] = '\0'; 170 buf[count] = '\0';
170 if (!strict_strtoul(buf, 10, &val)) 171 if (strict_strtoul(buf, 0, &gru_options))
171 gru_options = val; 172 return -EINVAL;
172 173
173 return count; 174 return count;
174} 175}
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 34ab3d453919..46990bcfa536 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -624,7 +624,7 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
624 */ 624 */
625struct gru_unload_context_req; 625struct gru_unload_context_req;
626 626
627extern struct vm_operations_struct gru_vm_ops; 627extern const struct vm_operations_struct gru_vm_ops;
628extern struct device *grudev; 628extern struct device *grudev;
629 629
630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, 630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 610dbd1fcc82..96d10f40fb23 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file)
240 return 0; 240 return 0;
241} 241}
242 242
243static struct file_operations mmc_dbg_ext_csd_fops = { 243static const struct file_operations mmc_dbg_ext_csd_fops = {
244 .open = mmc_ext_csd_open, 244 .open = mmc_ext_csd_open,
245 .read = mmc_ext_csd_read, 245 .read = mmc_ext_csd_read,
246 .release = mmc_ext_csd_release, 246 .release = mmc_ext_csd_release,
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6636354b48ce..f85dcd536508 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
29 unsigned i, nr_strings; 29 unsigned i, nr_strings;
30 char **buffer, *string; 30 char **buffer, *string;
31 31
32 /* Find all null-terminated (including zero length) strings in
33 the TPLLV1_INFO field. Trailing garbage is ignored. */
32 buf += 2; 34 buf += 2;
33 size -= 2; 35 size -= 2;
34 36
@@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
39 if (buf[i] == 0) 41 if (buf[i] == 0)
40 nr_strings++; 42 nr_strings++;
41 } 43 }
42 44 if (nr_strings == 0)
43 if (nr_strings < 4) {
44 printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
45 return 0; 45 return 0;
46 }
47 46
48 size = i; 47 size = i;
49 48
@@ -98,6 +97,22 @@ static const unsigned char speed_val[16] =
98static const unsigned int speed_unit[8] = 97static const unsigned int speed_unit[8] =
99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
100 99
100/* FUNCE tuples with these types get passed to SDIO drivers */
101static const unsigned char funce_type_whitelist[] = {
102 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
103};
104
105static int cistpl_funce_whitelisted(unsigned char type)
106{
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
110 if (funce_type_whitelist[i] == type)
111 return 1;
112 }
113 return 0;
114}
115
101static int cistpl_funce_common(struct mmc_card *card, 116static int cistpl_funce_common(struct mmc_card *card,
102 const unsigned char *buf, unsigned size) 117 const unsigned char *buf, unsigned size)
103{ 118{
@@ -120,6 +135,10 @@ static int cistpl_funce_func(struct sdio_func *func,
120 unsigned vsn; 135 unsigned vsn;
121 unsigned min_size; 136 unsigned min_size;
122 137
138 /* let SDIO drivers take care of whitelisted FUNCE tuples */
139 if (cistpl_funce_whitelisted(buf[0]))
140 return -EILSEQ;
141
123 vsn = func->card->cccr.sdio_vsn; 142 vsn = func->card->cccr.sdio_vsn;
124 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 143 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
125 144
@@ -154,13 +173,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
154 else 173 else
155 ret = cistpl_funce_common(card, buf, size); 174 ret = cistpl_funce_common(card, buf, size);
156 175
157 if (ret) { 176 if (ret && ret != -EILSEQ) {
158 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " 177 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
159 "type %u\n", mmc_hostname(card->host), size, buf[0]); 178 "type %u\n", mmc_hostname(card->host), size, buf[0]);
160 return ret;
161 } 179 }
162 180
163 return 0; 181 return ret;
164} 182}
165 183
166typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 184typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
@@ -253,21 +271,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
253 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 271 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
254 if (cis_tpl_list[i].code == tpl_code) 272 if (cis_tpl_list[i].code == tpl_code)
255 break; 273 break;
256 if (i >= ARRAY_SIZE(cis_tpl_list)) { 274 if (i < ARRAY_SIZE(cis_tpl_list)) {
257 /* this tuple is unknown to the core */
258 this->next = NULL;
259 this->code = tpl_code;
260 this->size = tpl_link;
261 *prev = this;
262 prev = &this->next;
263 printk(KERN_DEBUG
264 "%s: queuing CIS tuple 0x%02x length %u\n",
265 mmc_hostname(card->host), tpl_code, tpl_link);
266 } else {
267 const struct cis_tpl *tpl = cis_tpl_list + i; 275 const struct cis_tpl *tpl = cis_tpl_list + i;
268 if (tpl_link < tpl->min_size) { 276 if (tpl_link < tpl->min_size) {
269 printk(KERN_ERR 277 printk(KERN_ERR
270 "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", 278 "%s: bad CIS tuple 0x%02x"
279 " (length = %u, expected >= %u)\n",
271 mmc_hostname(card->host), 280 mmc_hostname(card->host),
272 tpl_code, tpl_link, tpl->min_size); 281 tpl_code, tpl_link, tpl->min_size);
273 ret = -EINVAL; 282 ret = -EINVAL;
@@ -275,7 +284,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
275 ret = tpl->parse(card, func, 284 ret = tpl->parse(card, func,
276 this->data, tpl_link); 285 this->data, tpl_link);
277 } 286 }
278 kfree(this); 287 /*
288 * We don't need the tuple anymore if it was
289 * successfully parsed by the SDIO core or if it is
290 * not going to be parsed by SDIO drivers.
291 */
292 if (!ret || ret != -EILSEQ)
293 kfree(this);
294 } else {
295 /* unknown tuple */
296 ret = -EILSEQ;
297 }
298
299 if (ret == -EILSEQ) {
300 /* this tuple is unknown to the core or whitelisted */
301 this->next = NULL;
302 this->code = tpl_code;
303 this->size = tpl_link;
304 *prev = this;
305 prev = &this->next;
306 printk(KERN_DEBUG
307 "%s: queuing CIS tuple 0x%02x length %u\n",
308 mmc_hostname(card->host), tpl_code, tpl_link);
309 /* keep on analyzing tuples */
310 ret = 0;
279 } 311 }
280 312
281 ptr += tpl_link; 313 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7cb057f3f883..432ae8358c86 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -276,6 +276,47 @@ config MMC_S3C
276 276
277 If unsure, say N. 277 If unsure, say N.
278 278
279config MMC_S3C_HW_SDIO_IRQ
280 bool "Hardware support for SDIO IRQ"
281 depends on MMC_S3C
282 help
283 Enable the hardware support for SDIO interrupts instead of using
284 the generic polling code.
285
286choice
287 prompt "Samsung S3C SD/MMC transfer code"
288 depends on MMC_S3C
289
290config MMC_S3C_PIO
291 bool "Use PIO transfers only"
292 help
293 Use PIO to transfer data between memory and the hardware.
294
295 PIO is slower than DMA as it requires CPU instructions to
296 move the data. This has been the traditional default for
297 the S3C MCI driver.
298
299config MMC_S3C_DMA
300 bool "Use DMA transfers only (EXPERIMENTAL)"
301 depends on EXPERIMENTAL
302 help
303 Use DMA to transfer data between memory and the hardare.
304
305 Currently, the DMA support in this driver seems to not be
306 working properly and needs to be debugged before this
307 option is useful.
308
309config MMC_S3C_PIODMA
310 bool "Support for both PIO and DMA (EXPERIMENTAL)"
311 help
312 Compile both the PIO and DMA transfer routines into the
313 driver and let the platform select at run-time which one
314 is best.
315
316 See notes for the DMA option.
317
318endchoice
319
279config MMC_SDRICOH_CS 320config MMC_SDRICOH_CS
280 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 321 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
281 depends on EXPERIMENTAL && PCI && PCMCIA 322 depends on EXPERIMENTAL && PCI && PCMCIA
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index e556d42cc45a..63924e0c7ea9 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -72,7 +72,6 @@
72#include <asm/irq.h> 72#include <asm/irq.h>
73#include <asm/gpio.h> 73#include <asm/gpio.h>
74 74
75#include <asm/mach/mmc.h>
76#include <mach/board.h> 75#include <mach/board.h>
77#include <mach/cpu.h> 76#include <mach/cpu.h>
78#include <mach/at91_mci.h> 77#include <mach/at91_mci.h>
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 8741d0f5146a..705a5894a6bb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -22,12 +22,13 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/amba/mmci.h>
26#include <linux/regulator/consumer.h>
25 27
26#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
27#include <asm/div64.h> 29#include <asm/div64.h>
28#include <asm/io.h> 30#include <asm/io.h>
29#include <asm/sizes.h> 31#include <asm/sizes.h>
30#include <asm/mach/mmc.h>
31 32
32#include "mmci.h" 33#include "mmci.h"
33 34
@@ -38,6 +39,36 @@
38 39
39static unsigned int fmax = 515633; 40static unsigned int fmax = 515633;
40 41
42/*
43 * This must be called with host->lock held
44 */
45static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
46{
47 u32 clk = 0;
48
49 if (desired) {
50 if (desired >= host->mclk) {
51 clk = MCI_CLK_BYPASS;
52 host->cclk = host->mclk;
53 } else {
54 clk = host->mclk / (2 * desired) - 1;
55 if (clk >= 256)
56 clk = 255;
57 host->cclk = host->mclk / (2 * (clk + 1));
58 }
59 if (host->hw_designer == 0x80)
60 clk |= MCI_FCEN; /* Bug fix in ST IP block */
61 clk |= MCI_CLK_ENABLE;
62 /* This hasn't proven to be worthwhile */
63 /* clk |= MCI_CLK_PWRSAVE; */
64 }
65
66 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
67 clk |= MCI_WIDE_BUS;
68
69 writel(clk, host->base + MMCICLOCK);
70}
71
41static void 72static void
42mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 73mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
43{ 74{
@@ -419,30 +450,31 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
419static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 450static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
420{ 451{
421 struct mmci_host *host = mmc_priv(mmc); 452 struct mmci_host *host = mmc_priv(mmc);
422 u32 clk = 0, pwr = 0; 453 u32 pwr = 0;
423 454 unsigned long flags;
424 if (ios->clock) {
425 if (ios->clock >= host->mclk) {
426 clk = MCI_CLK_BYPASS;
427 host->cclk = host->mclk;
428 } else {
429 clk = host->mclk / (2 * ios->clock) - 1;
430 if (clk >= 256)
431 clk = 255;
432 host->cclk = host->mclk / (2 * (clk + 1));
433 }
434 if (host->hw_designer == AMBA_VENDOR_ST)
435 clk |= MCI_FCEN; /* Bug fix in ST IP block */
436 clk |= MCI_CLK_ENABLE;
437 }
438
439 if (host->plat->translate_vdd)
440 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
441 455
442 switch (ios->power_mode) { 456 switch (ios->power_mode) {
443 case MMC_POWER_OFF: 457 case MMC_POWER_OFF:
458 if(host->vcc &&
459 regulator_is_enabled(host->vcc))
460 regulator_disable(host->vcc);
444 break; 461 break;
445 case MMC_POWER_UP: 462 case MMC_POWER_UP:
463#ifdef CONFIG_REGULATOR
464 if (host->vcc)
465 /* This implicitly enables the regulator */
466 mmc_regulator_set_ocr(host->vcc, ios->vdd);
467#endif
468 /*
469 * The translate_vdd function is not used if you have
470 * an external regulator, or your design is really weird.
471 * Using it would mean sending in power control BOTH using
472 * a regulator AND the 4 MMCIPWR bits. If we don't have
473 * a regulator, we might have some other platform specific
474 * power control behind this translate function.
475 */
476 if (!host->vcc && host->plat->translate_vdd)
477 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
446 /* The ST version does not have this, fall through to POWER_ON */ 478 /* The ST version does not have this, fall through to POWER_ON */
447 if (host->hw_designer != AMBA_VENDOR_ST) { 479 if (host->hw_designer != AMBA_VENDOR_ST) {
448 pwr |= MCI_PWR_UP; 480 pwr |= MCI_PWR_UP;
@@ -465,12 +497,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
465 } 497 }
466 } 498 }
467 499
468 writel(clk, host->base + MMCICLOCK); 500 spin_lock_irqsave(&host->lock, flags);
501
502 mmci_set_clkreg(host, ios->clock);
469 503
470 if (host->pwr != pwr) { 504 if (host->pwr != pwr) {
471 host->pwr = pwr; 505 host->pwr = pwr;
472 writel(pwr, host->base + MMCIPOWER); 506 writel(pwr, host->base + MMCIPOWER);
473 } 507 }
508
509 spin_unlock_irqrestore(&host->lock, flags);
474} 510}
475 511
476static int mmci_get_ro(struct mmc_host *mmc) 512static int mmci_get_ro(struct mmc_host *mmc)
@@ -517,7 +553,7 @@ static void mmci_check_status(unsigned long data)
517 553
518static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 554static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
519{ 555{
520 struct mmc_platform_data *plat = dev->dev.platform_data; 556 struct mmci_platform_data *plat = dev->dev.platform_data;
521 struct mmci_host *host; 557 struct mmci_host *host;
522 struct mmc_host *mmc; 558 struct mmc_host *mmc;
523 int ret; 559 int ret;
@@ -583,7 +619,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
583 mmc->ops = &mmci_ops; 619 mmc->ops = &mmci_ops;
584 mmc->f_min = (host->mclk + 511) / 512; 620 mmc->f_min = (host->mclk + 511) / 512;
585 mmc->f_max = min(host->mclk, fmax); 621 mmc->f_max = min(host->mclk, fmax);
586 mmc->ocr_avail = plat->ocr_mask; 622#ifdef CONFIG_REGULATOR
623 /* If we're using the regulator framework, try to fetch a regulator */
624 host->vcc = regulator_get(&dev->dev, "vmmc");
625 if (IS_ERR(host->vcc))
626 host->vcc = NULL;
627 else {
628 int mask = mmc_regulator_get_ocrmask(host->vcc);
629
630 if (mask < 0)
631 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
632 mask);
633 else {
634 host->mmc->ocr_avail = (u32) mask;
635 if (plat->ocr_mask)
636 dev_warn(&dev->dev,
637 "Provided ocr_mask/setpower will not be used "
638 "(using regulator instead)\n");
639 }
640 }
641#endif
642 /* Fall back to platform data if no regulator is found */
643 if (host->vcc == NULL)
644 mmc->ocr_avail = plat->ocr_mask;
645 mmc->caps = plat->capabilities;
587 646
588 /* 647 /*
589 * We can do SGIO 648 * We can do SGIO
@@ -619,7 +678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
619 writel(0, host->base + MMCIMASK1); 678 writel(0, host->base + MMCIMASK1);
620 writel(0xfff, host->base + MMCICLEAR); 679 writel(0xfff, host->base + MMCICLEAR);
621 680
622#ifdef CONFIG_GPIOLIB
623 if (gpio_is_valid(plat->gpio_cd)) { 681 if (gpio_is_valid(plat->gpio_cd)) {
624 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 682 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
625 if (ret == 0) 683 if (ret == 0)
@@ -638,7 +696,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
638 else if (ret != -ENOSYS) 696 else if (ret != -ENOSYS)
639 goto err_gpio_wp; 697 goto err_gpio_wp;
640 } 698 }
641#endif
642 699
643 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 700 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
644 if (ret) 701 if (ret)
@@ -720,6 +777,10 @@ static int __devexit mmci_remove(struct amba_device *dev)
720 clk_disable(host->clk); 777 clk_disable(host->clk);
721 clk_put(host->clk); 778 clk_put(host->clk);
722 779
780 if (regulator_is_enabled(host->vcc))
781 regulator_disable(host->vcc);
782 regulator_put(host->vcc);
783
723 mmc_free_host(mmc); 784 mmc_free_host(mmc);
724 785
725 amba_release_regions(dev); 786 amba_release_regions(dev);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 839f264c9725..1ceb9a90f59b 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -161,7 +161,7 @@ struct mmci_host {
161 unsigned int mclk; 161 unsigned int mclk;
162 unsigned int cclk; 162 unsigned int cclk;
163 u32 pwr; 163 u32 pwr;
164 struct mmc_platform_data *plat; 164 struct mmci_platform_data *plat;
165 165
166 u8 hw_designer; 166 u8 hw_designer;
167 u8 hw_revision:4; 167 u8 hw_revision:4;
@@ -175,6 +175,7 @@ struct mmci_host {
175 struct scatterlist *sg_ptr; 175 struct scatterlist *sg_ptr;
176 unsigned int sg_off; 176 unsigned int sg_off;
177 unsigned int size; 177 unsigned int size;
178 struct regulator *vcc;
178}; 179};
179 180
180static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 181static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index e7a331de5733..b8fd7af1ceeb 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1529,6 +1529,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
1529 host->pdata->cleanup(&pdev->dev); 1529 host->pdata->cleanup(&pdev->dev);
1530 1530
1531 mmc_omap_fclk_enable(host, 0); 1531 mmc_omap_fclk_enable(host, 0);
1532 free_irq(host->irq, host);
1532 clk_put(host->fclk); 1533 clk_put(host->fclk);
1533 clk_disable(host->iclk); 1534 clk_disable(host->iclk);
1534 clk_put(host->iclk); 1535 clk_put(host->iclk);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4487cc097911..0aecaaebef3d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2013,7 +2013,7 @@ static struct platform_driver omap_hsmmc_driver = {
2013static int __init omap_hsmmc_init(void) 2013static int __init omap_hsmmc_init(void)
2014{ 2014{
2015 /* Register the MMC driver */ 2015 /* Register the MMC driver */
2016 return platform_driver_register(&omap_hsmmc_driver); 2016 return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe);
2017} 2017}
2018 2018
2019static void __exit omap_hsmmc_cleanup(void) 2019static void __exit omap_hsmmc_cleanup(void)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e55ac792d68c..b00d67319058 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
31#include <linux/gpio.h>
31 32
32#include <asm/sizes.h> 33#include <asm/sizes.h>
33 34
@@ -96,10 +97,18 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
96 97
97static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) 98static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
98{ 99{
100 int on;
101
99#ifdef CONFIG_REGULATOR 102#ifdef CONFIG_REGULATOR
100 if (host->vcc) 103 if (host->vcc)
101 mmc_regulator_set_ocr(host->vcc, vdd); 104 mmc_regulator_set_ocr(host->vcc, vdd);
102#endif 105#endif
106 if (!host->vcc && host->pdata &&
107 gpio_is_valid(host->pdata->gpio_power)) {
108 on = ((1 << vdd) & host->pdata->ocr_mask);
109 gpio_set_value(host->pdata->gpio_power,
110 !!on ^ host->pdata->gpio_power_invert);
111 }
103 if (!host->vcc && host->pdata && host->pdata->setpower) 112 if (!host->vcc && host->pdata && host->pdata->setpower)
104 host->pdata->setpower(mmc_dev(host->mmc), vdd); 113 host->pdata->setpower(mmc_dev(host->mmc), vdd);
105} 114}
@@ -421,6 +430,12 @@ static int pxamci_get_ro(struct mmc_host *mmc)
421{ 430{
422 struct pxamci_host *host = mmc_priv(mmc); 431 struct pxamci_host *host = mmc_priv(mmc);
423 432
433 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
434 if (host->pdata->gpio_card_ro_invert)
435 return !gpio_get_value(host->pdata->gpio_card_ro);
436 else
437 return gpio_get_value(host->pdata->gpio_card_ro);
438 }
424 if (host->pdata && host->pdata->get_ro) 439 if (host->pdata && host->pdata->get_ro)
425 return !!host->pdata->get_ro(mmc_dev(mmc)); 440 return !!host->pdata->get_ro(mmc_dev(mmc));
426 /* 441 /*
@@ -534,7 +549,7 @@ static int pxamci_probe(struct platform_device *pdev)
534 struct mmc_host *mmc; 549 struct mmc_host *mmc;
535 struct pxamci_host *host = NULL; 550 struct pxamci_host *host = NULL;
536 struct resource *r, *dmarx, *dmatx; 551 struct resource *r, *dmarx, *dmatx;
537 int ret, irq; 552 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
538 553
539 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 554 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 irq = platform_get_irq(pdev, 0); 555 irq = platform_get_irq(pdev, 0);
@@ -661,13 +676,63 @@ static int pxamci_probe(struct platform_device *pdev)
661 } 676 }
662 host->dma_drcmrtx = dmatx->start; 677 host->dma_drcmrtx = dmatx->start;
663 678
679 if (host->pdata) {
680 gpio_cd = host->pdata->gpio_card_detect;
681 gpio_ro = host->pdata->gpio_card_ro;
682 gpio_power = host->pdata->gpio_power;
683 }
684 if (gpio_is_valid(gpio_power)) {
685 ret = gpio_request(gpio_power, "mmc card power");
686 if (ret) {
687 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
688 goto out;
689 }
690 gpio_direction_output(gpio_power,
691 host->pdata->gpio_power_invert);
692 }
693 if (gpio_is_valid(gpio_ro)) {
694 ret = gpio_request(gpio_ro, "mmc card read only");
695 if (ret) {
696 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
697 goto err_gpio_ro;
698 }
699 gpio_direction_input(gpio_ro);
700 }
701 if (gpio_is_valid(gpio_cd)) {
702 ret = gpio_request(gpio_cd, "mmc card detect");
703 if (ret) {
704 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
705 goto err_gpio_cd;
706 }
707 gpio_direction_input(gpio_cd);
708
709 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
710 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
711 "mmc card detect", mmc);
712 if (ret) {
713 dev_err(&pdev->dev, "failed to request card detect IRQ\n");
714 goto err_request_irq;
715 }
716 }
717
664 if (host->pdata && host->pdata->init) 718 if (host->pdata && host->pdata->init)
665 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); 719 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
666 720
721 if (gpio_is_valid(gpio_power) && host->pdata->setpower)
722 dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
723 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
724 dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
725
667 mmc_add_host(mmc); 726 mmc_add_host(mmc);
668 727
669 return 0; 728 return 0;
670 729
730err_request_irq:
731 gpio_free(gpio_cd);
732err_gpio_cd:
733 gpio_free(gpio_ro);
734err_gpio_ro:
735 gpio_free(gpio_power);
671 out: 736 out:
672 if (host) { 737 if (host) {
673 if (host->dma >= 0) 738 if (host->dma >= 0)
@@ -688,12 +753,26 @@ static int pxamci_probe(struct platform_device *pdev)
688static int pxamci_remove(struct platform_device *pdev) 753static int pxamci_remove(struct platform_device *pdev)
689{ 754{
690 struct mmc_host *mmc = platform_get_drvdata(pdev); 755 struct mmc_host *mmc = platform_get_drvdata(pdev);
756 int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
691 757
692 platform_set_drvdata(pdev, NULL); 758 platform_set_drvdata(pdev, NULL);
693 759
694 if (mmc) { 760 if (mmc) {
695 struct pxamci_host *host = mmc_priv(mmc); 761 struct pxamci_host *host = mmc_priv(mmc);
696 762
763 if (host->pdata) {
764 gpio_cd = host->pdata->gpio_card_detect;
765 gpio_ro = host->pdata->gpio_card_ro;
766 gpio_power = host->pdata->gpio_power;
767 }
768 if (gpio_is_valid(gpio_cd)) {
769 free_irq(gpio_to_irq(gpio_cd), mmc);
770 gpio_free(gpio_cd);
771 }
772 if (gpio_is_valid(gpio_ro))
773 gpio_free(gpio_ro);
774 if (gpio_is_valid(gpio_power))
775 gpio_free(gpio_power);
697 if (host->vcc) 776 if (host->vcc)
698 regulator_put(host->vcc); 777 regulator_put(host->vcc);
699 778
@@ -725,20 +804,20 @@ static int pxamci_remove(struct platform_device *pdev)
725} 804}
726 805
727#ifdef CONFIG_PM 806#ifdef CONFIG_PM
728static int pxamci_suspend(struct platform_device *dev, pm_message_t state) 807static int pxamci_suspend(struct device *dev)
729{ 808{
730 struct mmc_host *mmc = platform_get_drvdata(dev); 809 struct mmc_host *mmc = dev_get_drvdata(dev);
731 int ret = 0; 810 int ret = 0;
732 811
733 if (mmc) 812 if (mmc)
734 ret = mmc_suspend_host(mmc, state); 813 ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
735 814
736 return ret; 815 return ret;
737} 816}
738 817
739static int pxamci_resume(struct platform_device *dev) 818static int pxamci_resume(struct device *dev)
740{ 819{
741 struct mmc_host *mmc = platform_get_drvdata(dev); 820 struct mmc_host *mmc = dev_get_drvdata(dev);
742 int ret = 0; 821 int ret = 0;
743 822
744 if (mmc) 823 if (mmc)
@@ -746,19 +825,22 @@ static int pxamci_resume(struct platform_device *dev)
746 825
747 return ret; 826 return ret;
748} 827}
749#else 828
750#define pxamci_suspend NULL 829static struct dev_pm_ops pxamci_pm_ops = {
751#define pxamci_resume NULL 830 .suspend = pxamci_suspend,
831 .resume = pxamci_resume,
832};
752#endif 833#endif
753 834
754static struct platform_driver pxamci_driver = { 835static struct platform_driver pxamci_driver = {
755 .probe = pxamci_probe, 836 .probe = pxamci_probe,
756 .remove = pxamci_remove, 837 .remove = pxamci_remove,
757 .suspend = pxamci_suspend,
758 .resume = pxamci_resume,
759 .driver = { 838 .driver = {
760 .name = DRIVER_NAME, 839 .name = DRIVER_NAME,
761 .owner = THIS_MODULE, 840 .owner = THIS_MODULE,
841#ifdef CONFIG_PM
842 .pm = &pxamci_pm_ops,
843#endif
762 }, 844 },
763}; 845};
764 846
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8c08cd7efa7f..99b74a351020 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
20#include <linux/gpio.h> 22#include <linux/gpio.h>
21#include <linux/irq.h> 23#include <linux/irq.h>
22#include <linux/io.h> 24#include <linux/io.h>
@@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug;
58 dev_dbg(&host->pdev->dev, args); \ 60 dev_dbg(&host->pdev->dev, args); \
59 } while (0) 61 } while (0)
60 62
61#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
62
63static struct s3c2410_dma_client s3cmci_dma_client = { 63static struct s3c2410_dma_client s3cmci_dma_client = {
64 .name = "s3c-mci", 64 .name = "s3c-mci",
65}; 65};
@@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
164 164
165#endif /* CONFIG_MMC_DEBUG */ 165#endif /* CONFIG_MMC_DEBUG */
166 166
167/**
168 * s3cmci_host_usedma - return whether the host is using dma or pio
169 * @host: The host state
170 *
171 * Return true if the host is using DMA to transfer data, else false
172 * to use PIO mode. Will return static data depending on the driver
173 * configuration.
174 */
175static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
176{
177#ifdef CONFIG_MMC_S3C_PIO
178 return false;
179#elif defined(CONFIG_MMC_S3C_DMA)
180 return true;
181#else
182 return host->dodma;
183#endif
184}
185
186/**
187 * s3cmci_host_canpio - return true if host has pio code available
188 *
189 * Return true if the driver has been compiled with the PIO support code
190 * available.
191 */
192static inline bool s3cmci_host_canpio(void)
193{
194#ifdef CONFIG_MMC_S3C_PIO
195 return true;
196#else
197 return false;
198#endif
199}
200
167static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) 201static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
168{ 202{
169 u32 newmask; 203 u32 newmask;
@@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
190 224
191static inline void clear_imask(struct s3cmci_host *host) 225static inline void clear_imask(struct s3cmci_host *host)
192{ 226{
193 writel(0, host->base + host->sdiimsk); 227 u32 mask = readl(host->base + host->sdiimsk);
228
229 /* preserve the SDIO IRQ mask state */
230 mask &= S3C2410_SDIIMSK_SDIOIRQ;
231 writel(mask, host->base + host->sdiimsk);
232}
233
234/**
235 * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
236 * @host: The host to check.
237 *
238 * Test to see if the SDIO interrupt is being signalled in case the
239 * controller has failed to re-detect a card interrupt. Read GPE8 and
240 * see if it is low and if so, signal a SDIO interrupt.
241 *
242 * This is currently called if a request is finished (we assume that the
243 * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
244 * already being indicated.
245*/
246static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
247{
248 if (host->sdio_irqen) {
249 if (gpio_get_value(S3C2410_GPE(8)) == 0) {
250 printk(KERN_DEBUG "%s: signalling irq\n", __func__);
251 mmc_signal_sdio_irq(host->mmc);
252 }
253 }
194} 254}
195 255
196static inline int get_data_buffer(struct s3cmci_host *host, 256static inline int get_data_buffer(struct s3cmci_host *host,
@@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host)
238 return 63 - fifostat; 298 return 63 - fifostat;
239} 299}
240 300
301/**
302 * s3cmci_enable_irq - enable IRQ, after having disabled it.
303 * @host: The device state.
304 * @more: True if more IRQs are expected from transfer.
305 *
306 * Enable the main IRQ if needed after it has been disabled.
307 *
308 * The IRQ can be one of the following states:
309 * - disabled during IDLE
310 * - disabled whilst processing data
311 * - enabled during transfer
312 * - enabled whilst awaiting SDIO interrupt detection
313 */
314static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
315{
316 unsigned long flags;
317 bool enable = false;
318
319 local_irq_save(flags);
320
321 host->irq_enabled = more;
322 host->irq_disabled = false;
323
324 enable = more | host->sdio_irqen;
325
326 if (host->irq_state != enable) {
327 host->irq_state = enable;
328
329 if (enable)
330 enable_irq(host->irq);
331 else
332 disable_irq(host->irq);
333 }
334
335 local_irq_restore(flags);
336}
337
338/**
339 *
340 */
341static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
342{
343 unsigned long flags;
344
345 local_irq_save(flags);
346
347 //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
348
349 host->irq_disabled = transfer;
350
351 if (transfer && host->irq_state) {
352 host->irq_state = false;
353 disable_irq(host->irq);
354 }
355
356 local_irq_restore(flags);
357}
358
241static void do_pio_read(struct s3cmci_host *host) 359static void do_pio_read(struct s3cmci_host *host)
242{ 360{
243 int res; 361 int res;
@@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data)
374{ 492{
375 struct s3cmci_host *host = (struct s3cmci_host *) data; 493 struct s3cmci_host *host = (struct s3cmci_host *) data;
376 494
377 495 s3cmci_disable_irq(host, true);
378 disable_irq(host->irq);
379 496
380 if (host->pio_active == XFER_WRITE) 497 if (host->pio_active == XFER_WRITE)
381 do_pio_write(host); 498 do_pio_write(host);
@@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data)
395 host->mrq->data->error = -EINVAL; 512 host->mrq->data->error = -EINVAL;
396 } 513 }
397 514
515 s3cmci_enable_irq(host, false);
398 finalize_request(host); 516 finalize_request(host);
399 } else 517 } else
400 enable_irq(host->irq); 518 s3cmci_enable_irq(host, true);
401} 519}
402 520
403/* 521/*
@@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
432 struct s3cmci_host *host = dev_id; 550 struct s3cmci_host *host = dev_id;
433 struct mmc_command *cmd; 551 struct mmc_command *cmd;
434 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; 552 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
435 u32 mci_cclear, mci_dclear; 553 u32 mci_cclear = 0, mci_dclear;
436 unsigned long iflags; 554 unsigned long iflags;
437 555
556 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
557 mci_imsk = readl(host->base + host->sdiimsk);
558
559 if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
560 if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
561 mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
562 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
563
564 mmc_signal_sdio_irq(host->mmc);
565 return IRQ_HANDLED;
566 }
567 }
568
438 spin_lock_irqsave(&host->complete_lock, iflags); 569 spin_lock_irqsave(&host->complete_lock, iflags);
439 570
440 mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 571 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
441 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
442 mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 572 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
443 mci_fsta = readl(host->base + S3C2410_SDIFSTA); 573 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
444 mci_imsk = readl(host->base + host->sdiimsk);
445 mci_cclear = 0;
446 mci_dclear = 0; 574 mci_dclear = 0;
447 575
448 if ((host->complete_what == COMPLETION_NONE) || 576 if ((host->complete_what == COMPLETION_NONE) ||
@@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
466 goto irq_out; 594 goto irq_out;
467 } 595 }
468 596
469 if (!host->dodma) { 597 if (!s3cmci_host_usedma(host)) {
470 if ((host->pio_active == XFER_WRITE) && 598 if ((host->pio_active == XFER_WRITE) &&
471 (mci_fsta & S3C2410_SDIFSTA_TFDET)) { 599 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
472 600
@@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
673 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", 801 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
674 size, mci_dsta, mci_dcnt); 802 size, mci_dsta, mci_dcnt);
675 803
804 host->dma_complete = 1;
676 host->complete_what = COMPLETION_FINALIZE; 805 host->complete_what = COMPLETION_FINALIZE;
677 806
678out: 807out:
@@ -683,9 +812,9 @@ out:
683fail_request: 812fail_request:
684 host->mrq->data->error = -EINVAL; 813 host->mrq->data->error = -EINVAL;
685 host->complete_what = COMPLETION_FINALIZE; 814 host->complete_what = COMPLETION_FINALIZE;
686 writel(0, host->base + host->sdiimsk); 815 clear_imask(host);
687 goto out;
688 816
817 goto out;
689} 818}
690 819
691static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
@@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host)
702 831
703 if (cmd->data && (cmd->error == 0) && 832 if (cmd->data && (cmd->error == 0) &&
704 (cmd->data->error == 0)) { 833 (cmd->data->error == 0)) {
705 if (host->dodma && (!host->dma_complete)) { 834 if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
706 dbg(host, dbg_dma, "DMA Missing!\n"); 835 dbg(host, dbg_dma, "DMA Missing (%d)!\n",
836 host->dma_complete);
707 return; 837 return;
708 } 838 }
709 } 839 }
@@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host)
728 writel(0, host->base + S3C2410_SDICMDARG); 858 writel(0, host->base + S3C2410_SDICMDARG);
729 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 859 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
730 writel(0, host->base + S3C2410_SDICMDCON); 860 writel(0, host->base + S3C2410_SDICMDCON);
731 writel(0, host->base + host->sdiimsk); 861 clear_imask(host);
732 862
733 if (cmd->data && cmd->error) 863 if (cmd->data && cmd->error)
734 cmd->data->error = cmd->error; 864 cmd->data->error = cmd->error;
@@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host)
754 /* If we had an error while transfering data we flush the 884 /* If we had an error while transfering data we flush the
755 * DMA channel and the fifo to clear out any garbage. */ 885 * DMA channel and the fifo to clear out any garbage. */
756 if (mrq->data->error != 0) { 886 if (mrq->data->error != 0) {
757 if (host->dodma) 887 if (s3cmci_host_usedma(host))
758 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 888 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
759 889
760 if (host->is2440) { 890 if (host->is2440) {
@@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host)
776request_done: 906request_done:
777 host->complete_what = COMPLETION_NONE; 907 host->complete_what = COMPLETION_NONE;
778 host->mrq = NULL; 908 host->mrq = NULL;
909
910 s3cmci_check_sdio_irq(host);
779 mmc_request_done(host->mmc, mrq); 911 mmc_request_done(host->mmc, mrq);
780} 912}
781 913
@@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
872 1004
873 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; 1005 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
874 1006
875 if (host->dodma) 1007 if (s3cmci_host_usedma(host))
876 dcon |= S3C2410_SDIDCON_DMAEN; 1008 dcon |= S3C2410_SDIDCON_DMAEN;
877 1009
878 if (host->bus_width == MMC_BUS_WIDTH_4) 1010 if (host->bus_width == MMC_BUS_WIDTH_4)
@@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
950static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) 1082static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
951{ 1083{
952 int dma_len, i; 1084 int dma_len, i;
953 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 1085 int rw = data->flags & MMC_DATA_WRITE;
954 1086
955 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1087 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
956 1088
@@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
958 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1090 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
959 1091
960 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1092 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
961 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1093 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
962 1094
963 if (dma_len == 0) 1095 if (dma_len == 0)
964 return -ENOMEM; 1096 return -ENOMEM;
@@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
969 for (i = 0; i < dma_len; i++) { 1101 for (i = 0; i < dma_len; i++) {
970 int res; 1102 int res;
971 1103
972 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, 1104 dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i,
973 sg_dma_address(&data->sg[i]), 1105 sg_dma_address(&data->sg[i]),
974 sg_dma_len(&data->sg[i])); 1106 sg_dma_len(&data->sg[i]));
975 1107
976 res = s3c2410_dma_enqueue(host->dma, (void *) host, 1108 res = s3c2410_dma_enqueue(host->dma, host,
977 sg_dma_address(&data->sg[i]), 1109 sg_dma_address(&data->sg[i]),
978 sg_dma_len(&data->sg[i])); 1110 sg_dma_len(&data->sg[i]));
979 1111
@@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1018 return; 1150 return;
1019 } 1151 }
1020 1152
1021 if (host->dodma) 1153 if (s3cmci_host_usedma(host))
1022 res = s3cmci_prepare_dma(host, cmd->data); 1154 res = s3cmci_prepare_dma(host, cmd->data);
1023 else 1155 else
1024 res = s3cmci_prepare_pio(host, cmd->data); 1156 res = s3cmci_prepare_pio(host, cmd->data);
@@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1037 s3cmci_send_command(host, cmd); 1169 s3cmci_send_command(host, cmd);
1038 1170
1039 /* Enable Interrupt */ 1171 /* Enable Interrupt */
1040 enable_irq(host->irq); 1172 s3cmci_enable_irq(host, true);
1041} 1173}
1042 1174
1043static int s3cmci_card_present(struct mmc_host *mmc) 1175static int s3cmci_card_present(struct mmc_host *mmc)
@@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
1049 if (pdata->gpio_detect == 0) 1181 if (pdata->gpio_detect == 0)
1050 return -ENOSYS; 1182 return -ENOSYS;
1051 1183
1052 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; 1184 ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
1053 return ret ^ pdata->detect_invert; 1185 return ret ^ pdata->detect_invert;
1054} 1186}
1055 1187
@@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1104 switch (ios->power_mode) { 1236 switch (ios->power_mode) {
1105 case MMC_POWER_ON: 1237 case MMC_POWER_ON:
1106 case MMC_POWER_UP: 1238 case MMC_POWER_UP:
1107 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); 1239 s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
1108 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); 1240 s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
1109 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); 1241 s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
1110 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); 1242 s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
1111 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); 1243 s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
1112 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); 1244 s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
1113 1245
1114 if (host->pdata->set_power) 1246 if (host->pdata->set_power)
1115 host->pdata->set_power(ios->power_mode, ios->vdd); 1247 host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1121 1253
1122 case MMC_POWER_OFF: 1254 case MMC_POWER_OFF:
1123 default: 1255 default:
1124 s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1256 gpio_direction_output(S3C2410_GPE(5), 0);
1125 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
1126 1257
1127 if (host->is2440) 1258 if (host->is2440)
1128 mci_con |= S3C2440_SDICON_SDRESET; 1259 mci_con |= S3C2440_SDICON_SDRESET;
@@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1168 struct s3c24xx_mci_pdata *pdata = host->pdata; 1299 struct s3c24xx_mci_pdata *pdata = host->pdata;
1169 int ret; 1300 int ret;
1170 1301
1171 if (pdata->gpio_wprotect == 0) 1302 if (pdata->no_wprotect)
1172 return 0; 1303 return 0;
1173 1304
1174 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
@@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1179 return ret; 1310 return ret;
1180} 1311}
1181 1312
1313static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1314{
1315 struct s3cmci_host *host = mmc_priv(mmc);
1316 unsigned long flags;
1317 u32 con;
1318
1319 local_irq_save(flags);
1320
1321 con = readl(host->base + S3C2410_SDICON);
1322 host->sdio_irqen = enable;
1323
1324 if (enable == host->sdio_irqen)
1325 goto same_state;
1326
1327 if (enable) {
1328 con |= S3C2410_SDICON_SDIOIRQ;
1329 enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1330
1331 if (!host->irq_state && !host->irq_disabled) {
1332 host->irq_state = true;
1333 enable_irq(host->irq);
1334 }
1335 } else {
1336 disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1337 con &= ~S3C2410_SDICON_SDIOIRQ;
1338
1339 if (!host->irq_enabled && host->irq_state) {
1340 disable_irq_nosync(host->irq);
1341 host->irq_state = false;
1342 }
1343 }
1344
1345 writel(con, host->base + S3C2410_SDICON);
1346
1347 same_state:
1348 local_irq_restore(flags);
1349
1350 s3cmci_check_sdio_irq(host);
1351}
1352
1182static struct mmc_host_ops s3cmci_ops = { 1353static struct mmc_host_ops s3cmci_ops = {
1183 .request = s3cmci_request, 1354 .request = s3cmci_request,
1184 .set_ios = s3cmci_set_ios, 1355 .set_ios = s3cmci_set_ios,
1185 .get_ro = s3cmci_get_ro, 1356 .get_ro = s3cmci_get_ro,
1186 .get_cd = s3cmci_card_present, 1357 .get_cd = s3cmci_card_present,
1358 .enable_sdio_irq = s3cmci_enable_sdio_irq,
1187}; 1359};
1188 1360
1189static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1361static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
1246} 1418}
1247#endif 1419#endif
1248 1420
1249static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) 1421
1422#ifdef CONFIG_DEBUG_FS
1423
1424static int s3cmci_state_show(struct seq_file *seq, void *v)
1425{
1426 struct s3cmci_host *host = seq->private;
1427
1428 seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
1429 seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
1430 seq_printf(seq, "Prescale = %d\n", host->prescaler);
1431 seq_printf(seq, "is2440 = %d\n", host->is2440);
1432 seq_printf(seq, "IRQ = %d\n", host->irq);
1433 seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
1434 seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
1435 seq_printf(seq, "IRQ state = %d\n", host->irq_state);
1436 seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
1437 seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
1438 seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
1439 seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
1440
1441 return 0;
1442}
1443
1444static int s3cmci_state_open(struct inode *inode, struct file *file)
1445{
1446 return single_open(file, s3cmci_state_show, inode->i_private);
1447}
1448
1449static const struct file_operations s3cmci_fops_state = {
1450 .owner = THIS_MODULE,
1451 .open = s3cmci_state_open,
1452 .read = seq_read,
1453 .llseek = seq_lseek,
1454 .release = single_release,
1455};
1456
1457#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
1458
1459struct s3cmci_reg {
1460 unsigned short addr;
1461 unsigned char *name;
1462} debug_regs[] = {
1463 DBG_REG(CON),
1464 DBG_REG(PRE),
1465 DBG_REG(CMDARG),
1466 DBG_REG(CMDCON),
1467 DBG_REG(CMDSTAT),
1468 DBG_REG(RSP0),
1469 DBG_REG(RSP1),
1470 DBG_REG(RSP2),
1471 DBG_REG(RSP3),
1472 DBG_REG(TIMER),
1473 DBG_REG(BSIZE),
1474 DBG_REG(DCON),
1475 DBG_REG(DCNT),
1476 DBG_REG(DSTA),
1477 DBG_REG(FSTA),
1478 {}
1479};
1480
1481static int s3cmci_regs_show(struct seq_file *seq, void *v)
1482{
1483 struct s3cmci_host *host = seq->private;
1484 struct s3cmci_reg *rptr = debug_regs;
1485
1486 for (; rptr->name; rptr++)
1487 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
1488 readl(host->base + rptr->addr));
1489
1490 seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
1491
1492 return 0;
1493}
1494
1495static int s3cmci_regs_open(struct inode *inode, struct file *file)
1496{
1497 return single_open(file, s3cmci_regs_show, inode->i_private);
1498}
1499
1500static const struct file_operations s3cmci_fops_regs = {
1501 .owner = THIS_MODULE,
1502 .open = s3cmci_regs_open,
1503 .read = seq_read,
1504 .llseek = seq_lseek,
1505 .release = single_release,
1506};
1507
1508static void s3cmci_debugfs_attach(struct s3cmci_host *host)
1509{
1510 struct device *dev = &host->pdev->dev;
1511
1512 host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
1513 if (IS_ERR(host->debug_root)) {
1514 dev_err(dev, "failed to create debugfs root\n");
1515 return;
1516 }
1517
1518 host->debug_state = debugfs_create_file("state", 0444,
1519 host->debug_root, host,
1520 &s3cmci_fops_state);
1521
1522 if (IS_ERR(host->debug_state))
1523 dev_err(dev, "failed to create debug state file\n");
1524
1525 host->debug_regs = debugfs_create_file("regs", 0444,
1526 host->debug_root, host,
1527 &s3cmci_fops_regs);
1528
1529 if (IS_ERR(host->debug_regs))
1530 dev_err(dev, "failed to create debug regs file\n");
1531}
1532
1533static void s3cmci_debugfs_remove(struct s3cmci_host *host)
1534{
1535 debugfs_remove(host->debug_regs);
1536 debugfs_remove(host->debug_state);
1537 debugfs_remove(host->debug_root);
1538}
1539
1540#else
1541static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
1542static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
1543
1544#endif /* CONFIG_DEBUG_FS */
1545
1546static int __devinit s3cmci_probe(struct platform_device *pdev)
1250{ 1547{
1251 struct s3cmci_host *host; 1548 struct s3cmci_host *host;
1252 struct mmc_host *mmc; 1549 struct mmc_host *mmc;
1253 int ret; 1550 int ret;
1551 int is2440;
1552 int i;
1553
1554 is2440 = platform_get_device_id(pdev)->driver_data;
1254 1555
1255 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); 1556 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1256 if (!mmc) { 1557 if (!mmc) {
@@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1258 goto probe_out; 1559 goto probe_out;
1259 } 1560 }
1260 1561
1562 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
1563 ret = gpio_request(i, dev_name(&pdev->dev));
1564 if (ret) {
1565 dev_err(&pdev->dev, "failed to get gpio %d\n", i);
1566
1567 for (i--; i >= S3C2410_GPE(5); i--)
1568 gpio_free(i);
1569
1570 goto probe_free_host;
1571 }
1572 }
1573
1261 host = mmc_priv(mmc); 1574 host = mmc_priv(mmc);
1262 host->mmc = mmc; 1575 host->mmc = mmc;
1263 host->pdev = pdev; 1576 host->pdev = pdev;
@@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1282 host->clk_div = 2; 1595 host->clk_div = 2;
1283 } 1596 }
1284 1597
1285 host->dodma = 0;
1286 host->complete_what = COMPLETION_NONE; 1598 host->complete_what = COMPLETION_NONE;
1287 host->pio_active = XFER_NONE; 1599 host->pio_active = XFER_NONE;
1288 1600
1289 host->dma = S3CMCI_DMA; 1601#ifdef CONFIG_MMC_S3C_PIODMA
1602 host->dodma = host->pdata->dma;
1603#endif
1290 1604
1291 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1605 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 if (!host->mem) { 1606 if (!host->mem) {
@@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1294 "failed to get io memory region resouce.\n"); 1608 "failed to get io memory region resouce.\n");
1295 1609
1296 ret = -ENOENT; 1610 ret = -ENOENT;
1297 goto probe_free_host; 1611 goto probe_free_gpio;
1298 } 1612 }
1299 1613
1300 host->mem = request_mem_region(host->mem->start, 1614 host->mem = request_mem_region(host->mem->start,
1301 RESSIZE(host->mem), pdev->name); 1615 resource_size(host->mem), pdev->name);
1302 1616
1303 if (!host->mem) { 1617 if (!host->mem) {
1304 dev_err(&pdev->dev, "failed to request io memory region.\n"); 1618 dev_err(&pdev->dev, "failed to request io memory region.\n");
1305 ret = -ENOENT; 1619 ret = -ENOENT;
1306 goto probe_free_host; 1620 goto probe_free_gpio;
1307 } 1621 }
1308 1622
1309 host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1623 host->base = ioremap(host->mem->start, resource_size(host->mem));
1310 if (!host->base) { 1624 if (!host->base) {
1311 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1625 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1312 ret = -EINVAL; 1626 ret = -EINVAL;
@@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 * ensure we don't lock the system with un-serviceable requests. */ 1645 * ensure we don't lock the system with un-serviceable requests. */
1332 1646
1333 disable_irq(host->irq); 1647 disable_irq(host->irq);
1648 host->irq_state = false;
1334 1649
1335 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1650 if (!host->pdata->no_detect) {
1336 1651 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
1337 if (host->irq_cd >= 0) { 1652 if (ret) {
1338 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1653 dev_err(&pdev->dev, "failed to get detect gpio\n");
1339 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1340 DRIVER_NAME, host)) {
1341 dev_err(&pdev->dev, "can't get card detect irq.\n");
1342 ret = -ENOENT;
1343 goto probe_free_irq; 1654 goto probe_free_irq;
1344 } 1655 }
1345 } else { 1656
1346 dev_warn(&pdev->dev, "host detect has no irq available\n"); 1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1347 s3c2410_gpio_cfgpin(host->pdata->gpio_detect, 1658
1348 S3C2410_GPIO_INPUT); 1659 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1661 IRQF_TRIGGER_RISING |
1662 IRQF_TRIGGER_FALLING,
1663 DRIVER_NAME, host)) {
1664 dev_err(&pdev->dev,
1665 "can't get card detect irq.\n");
1666 ret = -ENOENT;
1667 goto probe_free_gpio_cd;
1668 }
1669 } else {
1670 dev_warn(&pdev->dev,
1671 "host detect has no irq available\n");
1672 gpio_direction_input(host->pdata->gpio_detect);
1673 }
1674 } else
1675 host->irq_cd = -1;
1676
1677 if (!host->pdata->no_wprotect) {
1678 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
1679 if (ret) {
1680 dev_err(&pdev->dev, "failed to get writeprotect\n");
1681 goto probe_free_irq_cd;
1682 }
1683
1684 gpio_direction_input(host->pdata->gpio_wprotect);
1349 } 1685 }
1350 1686
1351 if (host->pdata->gpio_wprotect) 1687 /* depending on the dma state, get a dma channel to use. */
1352 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1353 S3C2410_GPIO_INPUT);
1354 1688
1355 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { 1689 if (s3cmci_host_usedma(host)) {
1356 dev_err(&pdev->dev, "unable to get DMA channel.\n"); 1690 host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client,
1357 ret = -EBUSY; 1691 host);
1358 goto probe_free_irq_cd; 1692 if (host->dma < 0) {
1693 dev_err(&pdev->dev, "cannot get DMA channel.\n");
1694 if (!s3cmci_host_canpio()) {
1695 ret = -EBUSY;
1696 goto probe_free_gpio_wp;
1697 } else {
1698 dev_warn(&pdev->dev, "falling back to PIO.\n");
1699 host->dodma = 0;
1700 }
1701 }
1359 } 1702 }
1360 1703
1361 host->clk = clk_get(&pdev->dev, "sdi"); 1704 host->clk = clk_get(&pdev->dev, "sdi");
@@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1363 dev_err(&pdev->dev, "failed to find clock source.\n"); 1706 dev_err(&pdev->dev, "failed to find clock source.\n");
1364 ret = PTR_ERR(host->clk); 1707 ret = PTR_ERR(host->clk);
1365 host->clk = NULL; 1708 host->clk = NULL;
1366 goto probe_free_host; 1709 goto probe_free_dma;
1367 } 1710 }
1368 1711
1369 ret = clk_enable(host->clk); 1712 ret = clk_enable(host->clk);
@@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1376 1719
1377 mmc->ops = &s3cmci_ops; 1720 mmc->ops = &s3cmci_ops;
1378 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1721 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1722#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
1723 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1724#else
1379 mmc->caps = MMC_CAP_4_BIT_DATA; 1725 mmc->caps = MMC_CAP_4_BIT_DATA;
1726#endif
1380 mmc->f_min = host->clk_rate / (host->clk_div * 256); 1727 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1381 mmc->f_max = host->clk_rate / host->clk_div; 1728 mmc->f_max = host->clk_rate / host->clk_div;
1382 1729
@@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1408 goto free_cpufreq; 1755 goto free_cpufreq;
1409 } 1756 }
1410 1757
1758 s3cmci_debugfs_attach(host);
1759
1411 platform_set_drvdata(pdev, mmc); 1760 platform_set_drvdata(pdev, mmc);
1412 dev_info(&pdev->dev, "initialisation done.\n"); 1761 dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
1762 s3cmci_host_usedma(host) ? "dma" : "pio",
1763 mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
1413 1764
1414 return 0; 1765 return 0;
1415 1766
@@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1422 clk_free: 1773 clk_free:
1423 clk_put(host->clk); 1774 clk_put(host->clk);
1424 1775
1776 probe_free_dma:
1777 if (s3cmci_host_usedma(host))
1778 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1779
1780 probe_free_gpio_wp:
1781 if (!host->pdata->no_wprotect)
1782 gpio_free(host->pdata->gpio_wprotect);
1783
1784 probe_free_gpio_cd:
1785 if (!host->pdata->no_detect)
1786 gpio_free(host->pdata->gpio_detect);
1787
1425 probe_free_irq_cd: 1788 probe_free_irq_cd:
1426 if (host->irq_cd >= 0) 1789 if (host->irq_cd >= 0)
1427 free_irq(host->irq_cd, host); 1790 free_irq(host->irq_cd, host);
@@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1433 iounmap(host->base); 1796 iounmap(host->base);
1434 1797
1435 probe_free_mem_region: 1798 probe_free_mem_region:
1436 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1799 release_mem_region(host->mem->start, resource_size(host->mem));
1800
1801 probe_free_gpio:
1802 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1803 gpio_free(i);
1437 1804
1438 probe_free_host: 1805 probe_free_host:
1439 mmc_free_host(mmc); 1806 mmc_free_host(mmc);
1807
1440 probe_out: 1808 probe_out:
1441 return ret; 1809 return ret;
1442} 1810}
@@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
1449 if (host->irq_cd >= 0) 1817 if (host->irq_cd >= 0)
1450 free_irq(host->irq_cd, host); 1818 free_irq(host->irq_cd, host);
1451 1819
1820 s3cmci_debugfs_remove(host);
1452 s3cmci_cpufreq_deregister(host); 1821 s3cmci_cpufreq_deregister(host);
1453 mmc_remove_host(mmc); 1822 mmc_remove_host(mmc);
1454 clk_disable(host->clk); 1823 clk_disable(host->clk);
@@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1458{ 1827{
1459 struct mmc_host *mmc = platform_get_drvdata(pdev); 1828 struct mmc_host *mmc = platform_get_drvdata(pdev);
1460 struct s3cmci_host *host = mmc_priv(mmc); 1829 struct s3cmci_host *host = mmc_priv(mmc);
1830 struct s3c24xx_mci_pdata *pd = host->pdata;
1831 int i;
1461 1832
1462 s3cmci_shutdown(pdev); 1833 s3cmci_shutdown(pdev);
1463 1834
1464 clk_put(host->clk); 1835 clk_put(host->clk);
1465 1836
1466 tasklet_disable(&host->pio_tasklet); 1837 tasklet_disable(&host->pio_tasklet);
1467 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1838
1839 if (s3cmci_host_usedma(host))
1840 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1468 1841
1469 free_irq(host->irq, host); 1842 free_irq(host->irq, host);
1470 1843
1844 if (!pd->no_wprotect)
1845 gpio_free(pd->gpio_wprotect);
1846
1847 if (!pd->no_detect)
1848 gpio_free(pd->gpio_detect);
1849
1850 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1851 gpio_free(i);
1852
1853
1471 iounmap(host->base); 1854 iounmap(host->base);
1472 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1855 release_mem_region(host->mem->start, resource_size(host->mem));
1473 1856
1474 mmc_free_host(mmc); 1857 mmc_free_host(mmc);
1475 return 0; 1858 return 0;
1476} 1859}
1477 1860
1478static int __devinit s3cmci_2410_probe(struct platform_device *dev) 1861static struct platform_device_id s3cmci_driver_ids[] = {
1479{ 1862 {
1480 return s3cmci_probe(dev, 0); 1863 .name = "s3c2410-sdi",
1481} 1864 .driver_data = 0,
1865 }, {
1866 .name = "s3c2412-sdi",
1867 .driver_data = 1,
1868 }, {
1869 .name = "s3c2440-sdi",
1870 .driver_data = 1,
1871 },
1872 { }
1873};
1482 1874
1483static int __devinit s3cmci_2412_probe(struct platform_device *dev) 1875MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1484{
1485 return s3cmci_probe(dev, 1);
1486}
1487 1876
1488static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1489{
1490 return s3cmci_probe(dev, 1);
1491}
1492 1877
1493#ifdef CONFIG_PM 1878#ifdef CONFIG_PM
1494 1879
1495static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) 1880static int s3cmci_suspend(struct device *dev)
1496{ 1881{
1497 struct mmc_host *mmc = platform_get_drvdata(dev); 1882 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1883 struct pm_message event = { PM_EVENT_SUSPEND };
1498 1884
1499 return mmc_suspend_host(mmc, state); 1885 return mmc_suspend_host(mmc, event);
1500} 1886}
1501 1887
1502static int s3cmci_resume(struct platform_device *dev) 1888static int s3cmci_resume(struct device *dev)
1503{ 1889{
1504 struct mmc_host *mmc = platform_get_drvdata(dev); 1890 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1505 1891
1506 return mmc_resume_host(mmc); 1892 return mmc_resume_host(mmc);
1507} 1893}
1508 1894
1509#else /* CONFIG_PM */ 1895static struct dev_pm_ops s3cmci_pm = {
1510#define s3cmci_suspend NULL
1511#define s3cmci_resume NULL
1512#endif /* CONFIG_PM */
1513
1514
1515static struct platform_driver s3cmci_2410_driver = {
1516 .driver.name = "s3c2410-sdi",
1517 .driver.owner = THIS_MODULE,
1518 .probe = s3cmci_2410_probe,
1519 .remove = __devexit_p(s3cmci_remove),
1520 .shutdown = s3cmci_shutdown,
1521 .suspend = s3cmci_suspend, 1896 .suspend = s3cmci_suspend,
1522 .resume = s3cmci_resume, 1897 .resume = s3cmci_resume,
1523}; 1898};
1524 1899
1525static struct platform_driver s3cmci_2412_driver = { 1900#define s3cmci_pm_ops &s3cmci_pm
1526 .driver.name = "s3c2412-sdi", 1901#else /* CONFIG_PM */
1527 .driver.owner = THIS_MODULE, 1902#define s3cmci_pm_ops NULL
1528 .probe = s3cmci_2412_probe, 1903#endif /* CONFIG_PM */
1529 .remove = __devexit_p(s3cmci_remove),
1530 .shutdown = s3cmci_shutdown,
1531 .suspend = s3cmci_suspend,
1532 .resume = s3cmci_resume,
1533};
1534 1904
1535static struct platform_driver s3cmci_2440_driver = { 1905
1536 .driver.name = "s3c2440-sdi", 1906static struct platform_driver s3cmci_driver = {
1537 .driver.owner = THIS_MODULE, 1907 .driver = {
1538 .probe = s3cmci_2440_probe, 1908 .name = "s3c-sdi",
1909 .owner = THIS_MODULE,
1910 .pm = s3cmci_pm_ops,
1911 },
1912 .id_table = s3cmci_driver_ids,
1913 .probe = s3cmci_probe,
1539 .remove = __devexit_p(s3cmci_remove), 1914 .remove = __devexit_p(s3cmci_remove),
1540 .shutdown = s3cmci_shutdown, 1915 .shutdown = s3cmci_shutdown,
1541 .suspend = s3cmci_suspend,
1542 .resume = s3cmci_resume,
1543}; 1916};
1544 1917
1545
1546static int __init s3cmci_init(void) 1918static int __init s3cmci_init(void)
1547{ 1919{
1548 platform_driver_register(&s3cmci_2410_driver); 1920 return platform_driver_register(&s3cmci_driver);
1549 platform_driver_register(&s3cmci_2412_driver);
1550 platform_driver_register(&s3cmci_2440_driver);
1551 return 0;
1552} 1921}
1553 1922
1554static void __exit s3cmci_exit(void) 1923static void __exit s3cmci_exit(void)
1555{ 1924{
1556 platform_driver_unregister(&s3cmci_2410_driver); 1925 platform_driver_unregister(&s3cmci_driver);
1557 platform_driver_unregister(&s3cmci_2412_driver);
1558 platform_driver_unregister(&s3cmci_2440_driver);
1559} 1926}
1560 1927
1561module_init(s3cmci_init); 1928module_init(s3cmci_init);
@@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit);
1564MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1931MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1565MODULE_LICENSE("GPL v2"); 1932MODULE_LICENSE("GPL v2");
1566MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>"); 1933MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
1567MODULE_ALIAS("platform:s3c2410-sdi");
1568MODULE_ALIAS("platform:s3c2412-sdi");
1569MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index ca1ba3d58cfd..c76b53dbeb61 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -8,9 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor { 11enum s3cmci_waitfor {
15 COMPLETION_NONE, 12 COMPLETION_NONE,
16 COMPLETION_FINALIZE, 13 COMPLETION_FINALIZE,
@@ -42,6 +39,11 @@ struct s3cmci_host {
42 int dodma; 39 int dodma;
43 int dmatogo; 40 int dmatogo;
44 41
42 bool irq_disabled;
43 bool irq_enabled;
44 bool irq_state;
45 int sdio_irqen;
46
45 struct mmc_request *mrq; 47 struct mmc_request *mrq;
46 int cmd_is_stop; 48 int cmd_is_stop;
47 49
@@ -68,6 +70,12 @@ struct s3cmci_host {
68 unsigned int ccnt, dcnt; 70 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet; 71 struct tasklet_struct pio_tasklet;
70 72
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debug_root;
75 struct dentry *debug_state;
76 struct dentry *debug_regs;
77#endif
78
71#ifdef CONFIG_CPU_FREQ 79#ifdef CONFIG_CPU_FREQ
72 struct notifier_block freq_transition; 80 struct notifier_block freq_transition;
73#endif 81#endif
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 379c316f329e..4c19269de91a 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/math64.h> 23#include <linux/math64.h>
24#include <linux/sched.h>
24 25
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c2baf3353f84..0a11721f146e 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/sched.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 841e085ab74a..14be0755d7cd 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -486,6 +486,7 @@ config MTD_BFIN_ASYNC
486 486
487config MTD_GPIO_ADDR 487config MTD_GPIO_ADDR
488 tristate "GPIO-assisted Flash Chip Support" 488 tristate "GPIO-assisted Flash Chip Support"
489 depends on GENERIC_GPIO || GPIOLIB
489 depends on MTD_COMPLEX_MAPPINGS 490 depends on MTD_COMPLEX_MAPPINGS
490 select MTD_PARTITIONS 491 select MTD_PARTITIONS
491 help 492 help
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 1d5cf8636723..ae2f6dbe43c3 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,4 +58,6 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
58obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o 58obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
59obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 59obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
60obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o 60obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
61obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
62obj-$(CONFIG_MTD_VMU) += vmu-flash.o
61obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 63obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 44ef9a49a860..1ad5caf9fe69 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -13,7 +13,9 @@
13 * Licensed under the GPL-2 or later. 13 * Licensed under the GPL-2 or later.
14 */ 14 */
15 15
16#include <linux/gpio.h>
16#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/io.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
@@ -23,9 +25,6 @@
23#include <linux/platform_device.h> 25#include <linux/platform_device.h>
24#include <linux/types.h> 26#include <linux/types.h>
25 27
26#include <asm/gpio.h>
27#include <asm/io.h>
28
29#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 28#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
30 29
31#define DRIVER_NAME "gpio-addr-flash" 30#define DRIVER_NAME "gpio-addr-flash"
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fdb97f3d30e9..d7a47574d21e 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -209,8 +209,8 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
209 } 209 }
210 subdev->mtd->owner = THIS_MODULE; 210 subdev->mtd->owner = THIS_MODULE;
211 211
212 printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %dMiB, " 212 printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
213 "%d-bit\n", phys, subdev->mtd->size >> 20, 213 phys, (unsigned)(subdev->mtd->size >> 20),
214 subdev->map.bankwidth * 8); 214 subdev->map.bankwidth * 8);
215 215
216 return 0; 216 return 0;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0acbf4f5be50..8ca17a3e96ea 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv {
32 spinlock_t queue_lock; 32 spinlock_t queue_lock;
33}; 33};
34 34
35static int blktrans_discard_request(struct request_queue *q,
36 struct request *req)
37{
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
40 return 0;
41}
42
43static int do_blktrans_request(struct mtd_blktrans_ops *tr, 35static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 struct mtd_blktrans_dev *dev, 36 struct mtd_blktrans_dev *dev,
45 struct request *req) 37 struct request *req)
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
52 44
53 buf = req->buffer; 45 buf = req->buffer;
54 46
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return tr->discard(dev, block, nsect);
58
59 if (!blk_fs_request(req)) 47 if (!blk_fs_request(req))
60 return -EIO; 48 return -EIO;
61 49
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
63 get_capacity(req->rq_disk)) 51 get_capacity(req->rq_disk))
64 return -EIO; 52 return -EIO;
65 53
54 if (blk_discard_rq(req))
55 return tr->discard(dev, block, nsect);
56
66 switch(rq_data_dir(req)) { 57 switch(rq_data_dir(req)) {
67 case READ: 58 case READ:
68 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 59 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
380 tr->blkcore_priv->rq->queuedata = tr; 371 tr->blkcore_priv->rq->queuedata = tr;
381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 372 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard) 373 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq, 374 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
384 blktrans_discard_request); 375 tr->blkcore_priv->rq);
385 376
386 tr->blkshift = ffs(tr->blksize) - 1; 377 tr->blkshift = ffs(tr->blksize) - 1;
387 378
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 22113865438b..2957cc70da3d 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -761,6 +761,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
761 * @mtd: mtd info structure 761 * @mtd: mtd info structure
762 * @chip: nand chip info structure 762 * @chip: nand chip info structure
763 * @buf: buffer to store read data 763 * @buf: buffer to store read data
764 * @page: page number to read
764 * 765 *
765 * Not for syndrome calculating ecc controllers, which use a special oob layout 766 * Not for syndrome calculating ecc controllers, which use a special oob layout
766 */ 767 */
@@ -777,6 +778,7 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
777 * @mtd: mtd info structure 778 * @mtd: mtd info structure
778 * @chip: nand chip info structure 779 * @chip: nand chip info structure
779 * @buf: buffer to store read data 780 * @buf: buffer to store read data
781 * @page: page number to read
780 * 782 *
781 * We need a special oob layout and handling even when OOB isn't used. 783 * We need a special oob layout and handling even when OOB isn't used.
782 */ 784 */
@@ -818,6 +820,7 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *c
818 * @mtd: mtd info structure 820 * @mtd: mtd info structure
819 * @chip: nand chip info structure 821 * @chip: nand chip info structure
820 * @buf: buffer to store read data 822 * @buf: buffer to store read data
823 * @page: page number to read
821 */ 824 */
822static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 825static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
823 uint8_t *buf, int page) 826 uint8_t *buf, int page)
@@ -939,6 +942,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
939 * @mtd: mtd info structure 942 * @mtd: mtd info structure
940 * @chip: nand chip info structure 943 * @chip: nand chip info structure
941 * @buf: buffer to store read data 944 * @buf: buffer to store read data
945 * @page: page number to read
942 * 946 *
943 * Not for syndrome calculating ecc controllers which need a special oob layout 947 * Not for syndrome calculating ecc controllers which need a special oob layout
944 */ 948 */
@@ -983,6 +987,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
983 * @mtd: mtd info structure 987 * @mtd: mtd info structure
984 * @chip: nand chip info structure 988 * @chip: nand chip info structure
985 * @buf: buffer to store read data 989 * @buf: buffer to store read data
990 * @page: page number to read
986 * 991 *
987 * Hardware ECC for large page chips, require OOB to be read first. 992 * Hardware ECC for large page chips, require OOB to be read first.
988 * For this ECC mode, the write_page method is re-used from ECC_HW. 993 * For this ECC mode, the write_page method is re-used from ECC_HW.
@@ -1031,6 +1036,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1031 * @mtd: mtd info structure 1036 * @mtd: mtd info structure
1032 * @chip: nand chip info structure 1037 * @chip: nand chip info structure
1033 * @buf: buffer to store read data 1038 * @buf: buffer to store read data
1039 * @page: page number to read
1034 * 1040 *
1035 * The hw generator calculates the error syndrome automatically. Therefor 1041 * The hw generator calculates the error syndrome automatically. Therefor
1036 * we need a special oob layout and handling. 1042 * we need a special oob layout and handling.
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e1f7d0a78b9d..14cec04c34f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -42,6 +42,7 @@
42#include <linux/log2.h> 42#include <linux/log2.h>
43#include <linux/kthread.h> 43#include <linux/kthread.h>
44#include <linux/reboot.h> 44#include <linux/reboot.h>
45#include <linux/kernel.h>
45#include "ubi.h" 46#include "ubi.h"
46 47
47/* Maximum length of the 'mtd=' parameter */ 48/* Maximum length of the 'mtd=' parameter */
@@ -1257,7 +1258,7 @@ static int __init bytes_str_to_int(const char *str)
1257 unsigned long result; 1258 unsigned long result;
1258 1259
1259 result = simple_strtoul(str, &endp, 0); 1260 result = simple_strtoul(str, &endp, 0);
1260 if (str == endp || result < 0) { 1261 if (str == endp || result >= INT_MAX) {
1261 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1262 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1262 str); 1263 str);
1263 return -EINVAL; 1264 return -EINVAL;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index e7161adc419d..90af61a2c3e4 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -794,16 +794,15 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
794 * number. 794 * number.
795 */ 795 */
796 image_seq = be32_to_cpu(ech->image_seq); 796 image_seq = be32_to_cpu(ech->image_seq);
797 if (!si->image_seq_set) { 797 if (!ubi->image_seq && image_seq)
798 ubi->image_seq = image_seq; 798 ubi->image_seq = image_seq;
799 si->image_seq_set = 1; 799 if (ubi->image_seq && image_seq &&
800 } else if (ubi->image_seq && ubi->image_seq != image_seq) { 800 ubi->image_seq != image_seq) {
801 ubi_err("bad image sequence number %d in PEB %d, " 801 ubi_err("bad image sequence number %d in PEB %d, "
802 "expected %d", image_seq, pnum, ubi->image_seq); 802 "expected %d", image_seq, pnum, ubi->image_seq);
803 ubi_dbg_dump_ec_hdr(ech); 803 ubi_dbg_dump_ec_hdr(ech);
804 return -EINVAL; 804 return -EINVAL;
805 } 805 }
806
807 } 806 }
808 807
809 /* OK, we've done with the EC header, let's look at the VID header */ 808 /* OK, we've done with the EC header, let's look at the VID header */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index bab31695dace..ff179ad7ca55 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -103,7 +103,6 @@ struct ubi_scan_volume {
103 * @ec_sum: a temporary variable used when calculating @mean_ec 103 * @ec_sum: a temporary variable used when calculating @mean_ec
104 * @ec_count: a temporary variable used when calculating @mean_ec 104 * @ec_count: a temporary variable used when calculating @mean_ec
105 * @corr_count: count of corrupted PEBs 105 * @corr_count: count of corrupted PEBs
106 * @image_seq_set: indicates @ubi->image_seq is known
107 * 106 *
108 * This data structure contains the result of scanning and may be used by other 107 * This data structure contains the result of scanning and may be used by other
109 * UBI sub-systems to build final UBI data structures, further error-recovery 108 * UBI sub-systems to build final UBI data structures, further error-recovery
@@ -127,7 +126,6 @@ struct ubi_scan_info {
127 uint64_t ec_sum; 126 uint64_t ec_sum;
128 int ec_count; 127 int ec_count;
129 int corr_count; 128 int corr_count;
130 int image_seq_set;
131}; 129};
132 130
133struct ubi_device; 131struct ubi_device;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e19ca4bb7510..b2f71f79baaf 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -975,7 +975,7 @@ config ENC28J60_WRITEVERIFY
975 975
976config ETHOC 976config ETHOC
977 tristate "OpenCores 10/100 Mbps Ethernet MAC support" 977 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
978 depends on NET_ETHERNET && HAS_IOMEM 978 depends on NET_ETHERNET && HAS_IOMEM && HAS_DMA
979 select MII 979 select MII
980 select PHYLIB 980 select PHYLIB
981 select CRC32 981 select CRC32
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index ce6f1ac25df8..3f4b4300f533 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1088,7 +1088,14 @@ static struct net_device * au1000_probe(int port_num)
1088 return NULL; 1088 return NULL;
1089 } 1089 }
1090 1090
1091 if ((err = register_netdev(dev)) != 0) { 1091 dev->base_addr = base;
1092 dev->irq = irq;
1093 dev->netdev_ops = &au1000_netdev_ops;
1094 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1095 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1096
1097 err = register_netdev(dev);
1098 if (err != 0) {
1092 printk(KERN_ERR "%s: Cannot register net device, error %d\n", 1099 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1093 DRV_NAME, err); 1100 DRV_NAME, err);
1094 free_netdev(dev); 1101 free_netdev(dev);
@@ -1209,12 +1216,6 @@ static struct net_device * au1000_probe(int port_num)
1209 aup->tx_db_inuse[i] = pDB; 1216 aup->tx_db_inuse[i] = pDB;
1210 } 1217 }
1211 1218
1212 dev->base_addr = base;
1213 dev->irq = irq;
1214 dev->netdev_ops = &au1000_netdev_ops;
1215 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1216 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1217
1218 /* 1219 /*
1219 * The boot code uses the ethernet controller, so reset it to start 1220 * The boot code uses the ethernet controller, so reset it to start
1220 * fresh. au1000_init() expects that the device is in reset state. 1221 * fresh. au1000_init() expects that the device is in reset state.
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ff449de6f3c0..8762a27a2a18 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/sched.h>
25#include <linux/sysdev.h> 26#include <linux/sysdev.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27#include <linux/types.h> 28#include <linux/types.h>
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 15c0195ebd31..a24be34a3f7a 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev)
768 768
769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); 769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
770 770
771 /* Renegotiate with link partner */ 771 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
772 if (autoneg_normal) { 772 if (autoneg_normal) {
773 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); 773 /* Renegotiate with link partner */
774 data |= BMCR_ANENABLE | BMCR_ANRESTART; 774 data |= BMCR_ANENABLE | BMCR_ANRESTART;
775 } else {
776 /* Don't negotiate speed or duplex */
777 data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
778
779 /* Set speed and duplex static */
780 if (current_speed_selection == 10)
781 data &= ~BMCR_SPEED100;
782 else
783 data |= BMCR_SPEED100;
784
785 if (current_duplex != full)
786 data &= ~BMCR_FULLDPLX;
787 else
788 data |= BMCR_FULLDPLX;
775 } 789 }
776 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); 790 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
777} 791}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index db6380379478..e3478314c002 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -164,16 +164,14 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7) 164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
165 165
166/* EMAC mac_control register */ 166/* EMAC mac_control register */
167#define EMAC_MACCONTROL_TXPTYPE (0x200) 167#define EMAC_MACCONTROL_TXPTYPE BIT(9)
168#define EMAC_MACCONTROL_TXPACEEN (0x40) 168#define EMAC_MACCONTROL_TXPACEEN BIT(6)
169#define EMAC_MACCONTROL_MIIEN (0x20) 169#define EMAC_MACCONTROL_GMIIEN BIT(5)
170#define EMAC_MACCONTROL_GIGABITEN (0x80) 170#define EMAC_MACCONTROL_GIGABITEN BIT(7)
171#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7) 171#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
172#define EMAC_MACCONTROL_FULLDUPLEXEN (0x1)
173#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15) 172#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
174 173
175/* GIGABIT MODE related bits */ 174/* GIGABIT MODE related bits */
176#define EMAC_DM646X_MACCONTORL_GMIIEN BIT(5)
177#define EMAC_DM646X_MACCONTORL_GIG BIT(7) 175#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
178#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17) 176#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
179 177
@@ -192,10 +190,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
192#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF) 190#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
193 191
194/* MAC_IN_VECTOR (0x180) register bit fields */ 192/* MAC_IN_VECTOR (0x180) register bit fields */
195#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT (0x20000) 193#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
196#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT (0x10000) 194#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
197#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC (0x0100) 195#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
198#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC (0x01) 196#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
199 197
200/** NOTE:: For DM646x the IN_VECTOR has changed */ 198/** NOTE:: For DM646x the IN_VECTOR has changed */
201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) 199#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
@@ -203,7 +201,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
203#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26) 201#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
204#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27) 202#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
205 203
206
207/* CPPI bit positions */ 204/* CPPI bit positions */
208#define EMAC_CPPI_SOP_BIT BIT(31) 205#define EMAC_CPPI_SOP_BIT BIT(31)
209#define EMAC_CPPI_EOP_BIT BIT(30) 206#define EMAC_CPPI_EOP_BIT BIT(30)
@@ -750,8 +747,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
750 747
751 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) { 748 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
752 mac_control = emac_read(EMAC_MACCONTROL); 749 mac_control = emac_read(EMAC_MACCONTROL);
753 mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN | 750 mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
754 EMAC_DM646X_MACCONTORL_GIG |
755 EMAC_DM646X_MACCONTORL_GIGFORCE); 751 EMAC_DM646X_MACCONTORL_GIGFORCE);
756 } else { 752 } else {
757 /* Clear the GIG bit and GIGFORCE bit */ 753 /* Clear the GIG bit and GIGFORCE bit */
@@ -2108,7 +2104,7 @@ static int emac_hw_enable(struct emac_priv *priv)
2108 2104
2109 /* Enable MII */ 2105 /* Enable MII */
2110 val = emac_read(EMAC_MACCONTROL); 2106 val = emac_read(EMAC_MACCONTROL);
2111 val |= (EMAC_MACCONTROL_MIIEN); 2107 val |= (EMAC_MACCONTROL_GMIIEN);
2112 emac_write(EMAC_MACCONTROL, val); 2108 emac_write(EMAC_MACCONTROL, val);
2113 2109
2114 /* Enable NAPI and interrupts */ 2110 /* Enable NAPI and interrupts */
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 9686c1fa28f1..7a3bdac84abe 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -237,6 +237,7 @@
237 237
238#include <linux/module.h> 238#include <linux/module.h>
239#include <linux/kernel.h> 239#include <linux/kernel.h>
240#include <linux/sched.h>
240#include <linux/string.h> 241#include <linux/string.h>
241#include <linux/errno.h> 242#include <linux/errno.h>
242#include <linux/ioport.h> 243#include <linux/ioport.h>
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d19b0845970a..3c29a20b751e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -151,6 +151,7 @@
151#include <linux/moduleparam.h> 151#include <linux/moduleparam.h>
152#include <linux/kernel.h> 152#include <linux/kernel.h>
153#include <linux/types.h> 153#include <linux/types.h>
154#include <linux/sched.h>
154#include <linux/slab.h> 155#include <linux/slab.h>
155#include <linux/delay.h> 156#include <linux/delay.h>
156#include <linux/init.h> 157#include <linux/init.h>
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 189dfa2d6c76..3e187b0e4203 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -141,6 +141,8 @@ struct e1000_info;
141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ 141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
142#define HV_TNCRS_LOWER PHY_REG(778, 30) 142#define HV_TNCRS_LOWER PHY_REG(778, 30)
143 143
144#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
145
144/* BM PHY Copper Specific Status */ 146/* BM PHY Copper Specific Status */
145#define BM_CS_STATUS 17 147#define BM_CS_STATUS 17
146#define BM_CS_STATUS_LINK_UP 0x0400 148#define BM_CS_STATUS_LINK_UP 0x0400
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 1bf4d2a5d34f..e82638ecae88 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -327,10 +327,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
327 327
328 hw->fc.current_mode = hw->fc.requested_mode; 328 hw->fc.current_mode = hw->fc.requested_mode;
329 329
330 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 330 if (hw->phy.media_type == e1000_media_type_fiber) {
331 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 331 retval = hw->mac.ops.setup_link(hw);
332 /* implicit goto out */
333 } else {
334 retval = e1000e_force_mac_fc(hw);
335 if (retval)
336 goto out;
337 e1000e_set_fc_watermarks(hw);
338 }
332 } 339 }
333 340
341out:
334 clear_bit(__E1000_RESETTING, &adapter->state); 342 clear_bit(__E1000_RESETTING, &adapter->state);
335 return retval; 343 return retval;
336} 344}
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 51ddb04ab195..eff3f4783655 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1118,7 +1118,8 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1118 oem_reg |= HV_OEM_BITS_LPLU; 1118 oem_reg |= HV_OEM_BITS_LPLU;
1119 } 1119 }
1120 /* Restart auto-neg to activate the bits */ 1120 /* Restart auto-neg to activate the bits */
1121 oem_reg |= HV_OEM_BITS_RESTART_AN; 1121 if (!e1000_check_reset_block(hw))
1122 oem_reg |= HV_OEM_BITS_RESTART_AN;
1122 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); 1123 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
1123 1124
1124out: 1125out:
@@ -3558,6 +3559,7 @@ struct e1000_info e1000_pch_info = {
3558 | FLAG_HAS_AMT 3559 | FLAG_HAS_AMT
3559 | FLAG_HAS_FLASH 3560 | FLAG_HAS_FLASH
3560 | FLAG_HAS_JUMBO_FRAMES 3561 | FLAG_HAS_JUMBO_FRAMES
3562 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
3561 | FLAG_APME_IN_WUC, 3563 | FLAG_APME_IN_WUC,
3562 .pba = 26, 3564 .pba = 26,
3563 .max_hw_frame_size = 4096, 3565 .max_hw_frame_size = 4096,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 0687c6aa4e46..fad8f9ea0043 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2769,25 +2769,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
2769 /* 2769 /*
2770 * flow control settings 2770 * flow control settings
2771 * 2771 *
2772 * The high water mark must be low enough to fit two full frame 2772 * The high water mark must be low enough to fit one full frame
2773 * (or the size used for early receive) above it in the Rx FIFO. 2773 * (or the size used for early receive) above it in the Rx FIFO.
2774 * Set it to the lower of: 2774 * Set it to the lower of:
2775 * - 90% of the Rx FIFO size, and 2775 * - 90% of the Rx FIFO size, and
2776 * - the full Rx FIFO size minus the early receive size (for parts 2776 * - the full Rx FIFO size minus the early receive size (for parts
2777 * with ERT support assuming ERT set to E1000_ERT_2048), or 2777 * with ERT support assuming ERT set to E1000_ERT_2048), or
2778 * - the full Rx FIFO size minus two full frames 2778 * - the full Rx FIFO size minus one full frame
2779 */ 2779 */
2780 if ((adapter->flags & FLAG_HAS_ERT) && 2780 if (hw->mac.type == e1000_pchlan) {
2781 (adapter->netdev->mtu > ETH_DATA_LEN)) 2781 /*
2782 hwm = min(((pba << 10) * 9 / 10), 2782 * Workaround PCH LOM adapter hangs with certain network
2783 ((pba << 10) - (E1000_ERT_2048 << 3))); 2783 * loads. If hangs persist, try disabling Tx flow control.
2784 else 2784 */
2785 hwm = min(((pba << 10) * 9 / 10), 2785 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2786 ((pba << 10) - (2 * adapter->max_frame_size))); 2786 fc->high_water = 0x3500;
2787 fc->low_water = 0x1500;
2788 } else {
2789 fc->high_water = 0x5000;
2790 fc->low_water = 0x3000;
2791 }
2792 } else {
2793 if ((adapter->flags & FLAG_HAS_ERT) &&
2794 (adapter->netdev->mtu > ETH_DATA_LEN))
2795 hwm = min(((pba << 10) * 9 / 10),
2796 ((pba << 10) - (E1000_ERT_2048 << 3)));
2797 else
2798 hwm = min(((pba << 10) * 9 / 10),
2799 ((pba << 10) - adapter->max_frame_size));
2787 2800
2788 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 2801 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
2789 fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); 2802 fc->low_water = fc->high_water - 8;
2790 fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ 2803 }
2791 2804
2792 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 2805 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2793 fc->pause_time = 0xFFFF; 2806 fc->pause_time = 0xFFFF;
@@ -2813,6 +2826,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
2813 if (mac->ops.init_hw(hw)) 2826 if (mac->ops.init_hw(hw))
2814 e_err("Hardware Error\n"); 2827 e_err("Hardware Error\n");
2815 2828
2829 /* additional part of the flow-control workaround above */
2830 if (hw->mac.type == e1000_pchlan)
2831 ew32(FCRTV_PCH, 0x1000);
2832
2816 e1000_update_mng_vlan(adapter); 2833 e1000_update_mng_vlan(adapter);
2817 2834
2818 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 2835 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -3610,7 +3627,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3610 case SPEED_100: 3627 case SPEED_100:
3611 txb2b = 0; 3628 txb2b = 0;
3612 netdev->tx_queue_len = 100; 3629 netdev->tx_queue_len = 100;
3613 /* maybe add some timeout factor ? */ 3630 adapter->tx_timeout_factor = 10;
3614 break; 3631 break;
3615 } 3632 }
3616 3633
@@ -4288,8 +4305,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4288 4305
4289 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4306 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4290 msleep(1); 4307 msleep(1);
4291 /* e1000e_down has a dependency on max_frame_size */ 4308 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4292 adapter->max_frame_size = max_frame; 4309 adapter->max_frame_size = max_frame;
4310 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4311 netdev->mtu = new_mtu;
4293 if (netif_running(netdev)) 4312 if (netif_running(netdev))
4294 e1000e_down(adapter); 4313 e1000e_down(adapter);
4295 4314
@@ -4319,9 +4338,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4319 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 4338 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4320 + ETH_FCS_LEN; 4339 + ETH_FCS_LEN;
4321 4340
4322 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4323 netdev->mtu = new_mtu;
4324
4325 if (netif_running(netdev)) 4341 if (netif_running(netdev))
4326 e1000e_up(adapter); 4342 e1000e_up(adapter);
4327 else 4343 else
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 03175b3a2c9e..85f955f70417 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -71,7 +71,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
71#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) 71#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
72#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ 72#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
73#define I82577_CTRL_REG 23 73#define I82577_CTRL_REG 23
74#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
75 74
76/* 82577 specific PHY registers */ 75/* 82577 specific PHY registers */
77#define I82577_PHY_CTRL_2 18 76#define I82577_PHY_CTRL_2 18
@@ -660,15 +659,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
660 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 659 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
661 660
662 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); 661 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
663 if (ret_val)
664 goto out;
665
666 /* Set number of link attempts before downshift */
667 ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
668 if (ret_val)
669 goto out;
670 phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
671 ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
672 662
673out: 663out:
674 return ret_val; 664 return ret_val;
@@ -2658,19 +2648,18 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2658 page = 0; 2648 page = 0;
2659 2649
2660 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2650 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2661 if ((hw->phy.type != e1000_phy_82578) || 2651 u32 phy_addr = hw->phy.addr;
2662 ((reg != I82578_ADDR_REG) &&
2663 (reg != I82578_ADDR_REG + 1))) {
2664 u32 phy_addr = hw->phy.addr;
2665 2652
2666 hw->phy.addr = 1; 2653 hw->phy.addr = 1;
2667 2654
2668 /* Page is shifted left, PHY expects (page x 32) */ 2655 /* Page is shifted left, PHY expects (page x 32) */
2669 ret_val = e1000e_write_phy_reg_mdic(hw, 2656 ret_val = e1000e_write_phy_reg_mdic(hw,
2670 IGP01E1000_PHY_PAGE_SELECT, 2657 IGP01E1000_PHY_PAGE_SELECT,
2671 (page << IGP_PAGE_SHIFT)); 2658 (page << IGP_PAGE_SHIFT));
2672 hw->phy.addr = phy_addr; 2659 hw->phy.addr = phy_addr;
2673 } 2660
2661 if (ret_val)
2662 goto out;
2674 } 2663 }
2675 2664
2676 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2665 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2678,7 +2667,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2678out: 2667out:
2679 /* Revert to MDIO fast mode, if applicable */ 2668 /* Revert to MDIO fast mode, if applicable */
2680 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2669 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2681 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2670 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2682 2671
2683 if (!locked) 2672 if (!locked)
2684 hw->phy.ops.release_phy(hw); 2673 hw->phy.ops.release_phy(hw);
@@ -2784,19 +2773,18 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2784 } 2773 }
2785 2774
2786 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2775 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2787 if ((hw->phy.type != e1000_phy_82578) || 2776 u32 phy_addr = hw->phy.addr;
2788 ((reg != I82578_ADDR_REG) &&
2789 (reg != I82578_ADDR_REG + 1))) {
2790 u32 phy_addr = hw->phy.addr;
2791 2777
2792 hw->phy.addr = 1; 2778 hw->phy.addr = 1;
2793 2779
2794 /* Page is shifted left, PHY expects (page x 32) */ 2780 /* Page is shifted left, PHY expects (page x 32) */
2795 ret_val = e1000e_write_phy_reg_mdic(hw, 2781 ret_val = e1000e_write_phy_reg_mdic(hw,
2796 IGP01E1000_PHY_PAGE_SELECT, 2782 IGP01E1000_PHY_PAGE_SELECT,
2797 (page << IGP_PAGE_SHIFT)); 2783 (page << IGP_PAGE_SHIFT));
2798 hw->phy.addr = phy_addr; 2784 hw->phy.addr = phy_addr;
2799 } 2785
2786 if (ret_val)
2787 goto out;
2800 } 2788 }
2801 2789
2802 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2790 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2805,7 +2793,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2805out: 2793out:
2806 /* Revert to MDIO fast mode, if applicable */ 2794 /* Revert to MDIO fast mode, if applicable */
2807 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2795 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2808 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2796 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2809 2797
2810 if (!locked) 2798 if (!locked)
2811 hw->phy.ops.release_phy(hw); 2799 hw->phy.ops.release_phy(hw);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index d4d9a3eda695..f5b96cadeb25 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,7 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#include <linux/capability.h>
114#include <linux/module.h> 115#include <linux/module.h>
115#include <linux/kernel.h> 116#include <linux/kernel.h>
116#include <linux/init.h> 117#include <linux/init.h>
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 590473afb3dc..f7d9ac8324cb 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -17,6 +17,7 @@
17#include <linux/mii.h> 17#include <linux/mii.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h>
20#include <net/ethoc.h> 21#include <net/ethoc.h>
21 22
22static int buffer_size = 0x8000; /* 32 KBytes */ 23static int buffer_size = 0x8000; /* 32 KBytes */
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b2a5ec8f3721..dd4ba01fd92d 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -145,6 +145,7 @@
145 145
146#include <linux/module.h> 146#include <linux/module.h>
147#include <linux/kernel.h> 147#include <linux/kernel.h>
148#include <linux/sched.h>
148#include <linux/string.h> 149#include <linux/string.h>
149#include <linux/errno.h> 150#include <linux/errno.h>
150#include <linux/ioport.h> 151#include <linux/ioport.h>
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 73fe97777201..3116601dbfea 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -49,6 +49,7 @@
49#include <linux/netdevice.h> 49#include <linux/netdevice.h>
50#include <linux/etherdevice.h> 50#include <linux/etherdevice.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/sched.h>
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53#include <linux/ethtool.h> 54#include <linux/ethtool.h>
54#include <linux/timer.h> 55#include <linux/timer.h>
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a09aca..f7519a594945 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -145,6 +145,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
145/* Time in jiffies before concluding the transmitter is hung. */ 145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT (5*HZ) 146#define TX_TIMEOUT (5*HZ)
147 147
148#include <linux/capability.h>
148#include <linux/module.h> 149#include <linux/module.h>
149#include <linux/kernel.h> 150#include <linux/kernel.h>
150#include <linux/string.h> 151#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 7bcaf7c66243..e344c84c0ef9 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -44,6 +44,7 @@
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/sched.h>
47#include <linux/string.h> 48#include <linux/string.h>
48#include <linux/workqueue.h> 49#include <linux/workqueue.h>
49#include <linux/fs.h> 50#include <linux/fs.h>
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index aa4488e871b2..ed60fd664273 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -71,6 +71,7 @@
71 71
72/*****************************************************************************/ 72/*****************************************************************************/
73 73
74#include <linux/capability.h>
74#include <linux/module.h> 75#include <linux/module.h>
75#include <linux/ioport.h> 76#include <linux/ioport.h>
76#include <linux/string.h> 77#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 88c593596020..1686f6dcbbce 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -61,6 +61,7 @@
61 61
62/*****************************************************************************/ 62/*****************************************************************************/
63 63
64#include <linux/capability.h>
64#include <linux/module.h> 65#include <linux/module.h>
65#include <linux/ioport.h> 66#include <linux/ioport.h>
66#include <linux/string.h> 67#include <linux/string.h>
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 0013c409782c..91c5790c9581 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -42,6 +42,7 @@
42 42
43/*****************************************************************************/ 43/*****************************************************************************/
44 44
45#include <linux/capability.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/types.h> 47#include <linux/types.h>
47#include <linux/net.h> 48#include <linux/net.h>
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index a9a1a99f02dd..dd8665138062 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -98,6 +98,7 @@
98 98
99#include <linux/module.h> 99#include <linux/module.h>
100#include <linux/kernel.h> 100#include <linux/kernel.h>
101#include <linux/sched.h>
101#include <linux/string.h> 102#include <linux/string.h>
102#include <linux/errno.h> 103#include <linux/errno.h>
103#include <linux/ioport.h> 104#include <linux/ioport.h>
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index aab3d971af51..b243ed3b0c36 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -34,6 +34,7 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/if_ether.h> 35#include <linux/if_ether.h>
36#include <linux/ethtool.h> 36#include <linux/ethtool.h>
37#include <linux/sched.h>
37 38
38#include "igb.h" 39#include "igb.h"
39 40
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b449eea..cb90d640007a 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
66#include <linux/errno.h> 66#include <linux/errno.h>
67#include <linux/init.h> 67#include <linux/init.h>
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include <linux/kref.h>
70#include <linux/usb.h> 69#include <linux/usb.h>
71#include <linux/device.h> 70#include <linux/device.h>
72#include <linux/crc32.h> 71#include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc51cbc..b54d3b48045e 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/kref.h>
122#include <linux/usb.h> 121#include <linux/usb.h>
123#include <linux/device.h> 122#include <linux/device.h>
124#include <linux/crc32.h> 123#include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d73353972..8d713ebac15b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/kref.h>
86#include <linux/usb.h> 85#include <linux/usb.h>
87#include <linux/device.h> 86#include <linux/device.h>
88#include <linux/crc32.h> 87#include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bcb88e7..c0e0bb9401d3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/kref.h>
54#include <linux/usb.h> 53#include <linux/usb.h>
55#include <linux/device.h> 54#include <linux/device.h>
56#include <linux/crc32.h> 55#include <linux/crc32.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1445e5865196..84db145d2b59 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -17,6 +17,7 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/gpio.h>
20 21
21#include <net/irda/irda.h> 22#include <net/irda/irda.h>
22#include <net/irda/irmod.h> 23#include <net/irda/irmod.h>
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
163} 164}
164 165
165/* 166/*
167 * Set the IrDA communications mode.
168 */
169static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
170{
171 if (si->pdata->transceiver_mode)
172 si->pdata->transceiver_mode(si->dev, mode);
173 else {
174 if (gpio_is_valid(si->pdata->gpio_pwdown))
175 gpio_set_value(si->pdata->gpio_pwdown,
176 !(mode & IR_OFF) ^
177 !si->pdata->gpio_pwdown_inverted);
178 pxa2xx_transceiver_mode(si->dev, mode);
179 }
180}
181
182/*
166 * Set the IrDA communications speed. 183 * Set the IrDA communications speed.
167 */ 184 */
168static int pxa_irda_set_speed(struct pxa_irda *si, int speed) 185static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
188 pxa_irda_disable_clk(si); 205 pxa_irda_disable_clk(si);
189 206
190 /* set board transceiver to SIR mode */ 207 /* set board transceiver to SIR mode */
191 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 208 pxa_irda_set_mode(si, IR_SIRMODE);
192 209
193 /* enable the STUART clock */ 210 /* enable the STUART clock */
194 pxa_irda_enable_sirclk(si); 211 pxa_irda_enable_sirclk(si);
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
222 ICCR0 = 0; 239 ICCR0 = 0;
223 240
224 /* set board transceiver to FIR mode */ 241 /* set board transceiver to FIR mode */
225 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 242 pxa_irda_set_mode(si, IR_FIRMODE);
226 243
227 /* enable the FICP clock */ 244 /* enable the FICP clock */
228 pxa_irda_enable_firclk(si); 245 pxa_irda_enable_firclk(si);
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si)
641 local_irq_restore(flags); 658 local_irq_restore(flags);
642 659
643 /* power off board transceiver */ 660 /* power off board transceiver */
644 si->pdata->transceiver_mode(si->dev, IR_OFF); 661 pxa_irda_set_mode(si, IR_OFF);
645 662
646 printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); 663 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
647} 664}
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev)
849 if (err) 866 if (err)
850 goto err_mem_5; 867 goto err_mem_5;
851 868
852 if (si->pdata->startup) 869 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
870 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
871 if (err)
872 goto err_startup;
873 err = gpio_direction_output(si->pdata->gpio_pwdown,
874 !si->pdata->gpio_pwdown_inverted);
875 if (err) {
876 gpio_free(si->pdata->gpio_pwdown);
877 goto err_startup;
878 }
879 }
880
881 if (si->pdata->startup) {
853 err = si->pdata->startup(si->dev); 882 err = si->pdata->startup(si->dev);
854 if (err) 883 if (err)
855 goto err_startup; 884 goto err_startup;
885 }
886
887 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
888 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
856 889
857 dev->netdev_ops = &pxa_irda_netdev_ops; 890 dev->netdev_ops = &pxa_irda_netdev_ops;
858 891
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev)
903 if (dev) { 936 if (dev) {
904 struct pxa_irda *si = netdev_priv(dev); 937 struct pxa_irda *si = netdev_priv(dev);
905 unregister_netdev(dev); 938 unregister_netdev(dev);
939 if (gpio_is_valid(si->pdata->gpio_pwdown))
940 gpio_free(si->pdata->gpio_pwdown);
906 if (si->pdata->shutdown) 941 if (si->pdata->shutdown)
907 si->pdata->shutdown(si->dev); 942 si->pdata->shutdown(si->dev);
908 kfree(si->tx_buff.head); 943 kfree(si->tx_buff.head);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index fcf287b749db..99e1ec02a011 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -120,6 +120,7 @@
120#include <linux/module.h> 120#include <linux/module.h>
121#include <linux/delay.h> 121#include <linux/delay.h>
122#include <linux/init.h> 122#include <linux/init.h>
123#include <linux/sched.h>
123 124
124#include <net/irda/irda.h> 125#include <net/irda/irda.h>
125 126
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a5036f7c1923..a456578b8578 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -240,11 +240,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
240static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 240static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
241 struct ixgbe_ring *tx_ring) 241 struct ixgbe_ring *tx_ring)
242{ 242{
243 int tc;
244 u32 txoff = IXGBE_TFCS_TXOFF; 243 u32 txoff = IXGBE_TFCS_TXOFF;
245 244
246#ifdef CONFIG_IXGBE_DCB 245#ifdef CONFIG_IXGBE_DCB
247 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 246 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
247 int tc;
248 int reg_idx = tx_ring->reg_idx; 248 int reg_idx = tx_ring->reg_idx;
249 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 249 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
250 250
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 0be14d702beb..c146304d8d6c 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -568,6 +568,16 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
568 iowrite16(*wptr++, ks->hw_addr); 568 iowrite16(*wptr++, ks->hw_addr);
569} 569}
570 570
571static void ks_disable_int(struct ks_net *ks)
572{
573 ks_wrreg16(ks, KS_IER, 0x0000);
574} /* ks_disable_int */
575
576static void ks_enable_int(struct ks_net *ks)
577{
578 ks_wrreg16(ks, KS_IER, ks->rc_ier);
579} /* ks_enable_int */
580
571/** 581/**
572 * ks_tx_fifo_space - return the available hardware buffer size. 582 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information 583 * @ks: The chip information
@@ -681,6 +691,47 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
681} 691}
682 692
683 693
694void ks_enable_qmu(struct ks_net *ks)
695{
696 u16 w;
697
698 w = ks_rdreg16(ks, KS_TXCR);
699 /* Enables QMU Transmit (TXCR). */
700 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
701
702 /*
703 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
704 * Enable
705 */
706
707 w = ks_rdreg16(ks, KS_RXQCR);
708 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
709
710 /* Enables QMU Receive (RXCR1). */
711 w = ks_rdreg16(ks, KS_RXCR1);
712 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
713 ks->enabled = true;
714} /* ks_enable_qmu */
715
716static void ks_disable_qmu(struct ks_net *ks)
717{
718 u16 w;
719
720 w = ks_rdreg16(ks, KS_TXCR);
721
722 /* Disables QMU Transmit (TXCR). */
723 w &= ~TXCR_TXE;
724 ks_wrreg16(ks, KS_TXCR, w);
725
726 /* Disables QMU Receive (RXCR1). */
727 w = ks_rdreg16(ks, KS_RXCR1);
728 w &= ~RXCR1_RXE ;
729 ks_wrreg16(ks, KS_RXCR1, w);
730
731 ks->enabled = false;
732
733} /* ks_disable_qmu */
734
684/** 735/**
685 * ks_read_qmu - read 1 pkt data from the QMU. 736 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information 737 * @ks: The chip information
@@ -752,7 +803,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) { 803 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2); 804 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */ 805 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4); 806 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
756 skb_put(skb, frame_hdr->len); 807 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev; 808 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev); 809 skb->protocol = eth_type_trans(skb, netdev);
@@ -861,7 +912,7 @@ static int ks_net_open(struct net_device *netdev)
861 ks_dbg(ks, "%s - entry\n", __func__); 912 ks_dbg(ks, "%s - entry\n", __func__);
862 913
863 /* reset the HW */ 914 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks); 915 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
865 916
866 if (err) { 917 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 918 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
@@ -869,6 +920,15 @@ static int ks_net_open(struct net_device *netdev)
869 return err; 920 return err;
870 } 921 }
871 922
923 /* wake up powermode to normal mode */
924 ks_set_powermode(ks, PMECR_PM_NORMAL);
925 mdelay(1); /* wait for normal mode to take effect */
926
927 ks_wrreg16(ks, KS_ISR, 0xffff);
928 ks_enable_int(ks);
929 ks_enable_qmu(ks);
930 netif_start_queue(ks->netdev);
931
872 if (netif_msg_ifup(ks)) 932 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name); 933 ks_dbg(ks, "network device %s up\n", netdev->name);
874 934
@@ -892,19 +952,14 @@ static int ks_net_stop(struct net_device *netdev)
892 952
893 netif_stop_queue(netdev); 953 netif_stop_queue(netdev);
894 954
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock); 955 mutex_lock(&ks->lock);
898 956
899 /* turn off the IRQs and ack any outstanding */ 957 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000); 958 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff); 959 ks_wrreg16(ks, KS_ISR, 0xffff);
902 960
903 /* shutdown RX process */ 961 /* shutdown RX/TX QMU */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000); 962 ks_disable_qmu(ks);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908 963
909 /* set powermode to soft power down to save power */ 964 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN); 965 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -929,17 +984,8 @@ static int ks_net_stop(struct net_device *netdev)
929 */ 984 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) 985static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{ 986{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */ 987 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid); 988 ks->txh.txw[0] = 0;
943 ks->txh.txw[1] = cpu_to_le16(len); 989 ks->txh.txw[1] = cpu_to_le16(len);
944 990
945 /* 1. set sudo-DMA mode */ 991 /* 1. set sudo-DMA mode */
@@ -957,16 +1003,6 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
957 ; 1003 ;
958} 1004}
959 1005
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/** 1006/**
971 * ks_start_xmit - transmit packet 1007 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit 1008 * @skb : The buffer to transmit
@@ -1410,25 +1446,6 @@ static int ks_read_selftest(struct ks_net *ks)
1410 return ret; 1446 return ret;
1411} 1447}
1412 1448
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks) 1449static void ks_setup(struct ks_net *ks)
1433{ 1450{
1434 u16 w; 1451 u16 w;
@@ -1463,7 +1480,7 @@ static void ks_setup(struct ks_net *ks)
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; 1480 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w); 1481 ks_wrreg16(ks, KS_TXCR, w);
1465 1482
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE; 1483 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1467 1484
1468 if (ks->promiscuous) /* bPromiscuous */ 1485 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF); 1486 w |= (RXCR1_RXAE | RXCR1_RXINVF);
@@ -1486,28 +1503,6 @@ static void ks_setup_int(struct ks_net *ks)
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI); 1503 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */ 1504} /* ks_setup_int */
1488 1505
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks) 1506static int ks_hw_init(struct ks_net *ks)
1512{ 1507{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES) 1508#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
@@ -1612,11 +1607,9 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1612 1607
1613 ks_soft_reset(ks, GRR_GSR); 1608 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks); 1609 ks_hw_init(ks);
1615 ks_disable(ks); 1610 ks_disable_qmu(ks);
1616 ks_setup(ks); 1611 ks_setup(ks);
1617 ks_setup_int(ks); 1612 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6); 1613 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621 1614
1622 data = ks_rdreg16(ks, KS_OBCR); 1615 data = ks_rdreg16(ks, KS_OBCR);
@@ -1658,6 +1651,7 @@ static int __devexit ks8851_remove(struct platform_device *pdev)
1658 struct ks_net *ks = netdev_priv(netdev); 1651 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1652 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660 1653
1654 kfree(ks->frame_head_info);
1661 unregister_netdev(netdev); 1655 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr); 1656 iounmap(ks->hw_addr);
1663 free_netdev(netdev); 1657 free_netdev(netdev);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index cee199ceba2f..3c16602172fc 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/mlx4/cmd.h> 35#include <linux/mlx4/cmd.h>
36#include <linux/cache.h>
36 37
37#include "fw.h" 38#include "fw.h"
38#include "icm.h" 39#include "icm.h"
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
698#define INIT_HCA_IN_SIZE 0x200 699#define INIT_HCA_IN_SIZE 0x200
699#define INIT_HCA_VERSION_OFFSET 0x000 700#define INIT_HCA_VERSION_OFFSET 0x000
700#define INIT_HCA_VERSION 2 701#define INIT_HCA_VERSION 2
702#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
701#define INIT_HCA_FLAGS_OFFSET 0x014 703#define INIT_HCA_FLAGS_OFFSET 0x014
702#define INIT_HCA_QPC_OFFSET 0x020 704#define INIT_HCA_QPC_OFFSET 0x020
703#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 705#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
735 737
736 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 738 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
737 739
740 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
741 (ilog2(cache_line_size()) - 4) << 5;
742
738#if defined(__LITTLE_ENDIAN) 743#if defined(__LITTLE_ENDIAN)
739 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 744 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
740#elif defined(__BIG_ENDIAN) 745#elif defined(__BIG_ENDIAN)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3dd481e77f92..291a505fd4fc 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1282,6 +1282,8 @@ static struct pci_device_id mlx4_pci_table[] = {
1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1285 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1286 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1285 { 0, } 1287 { 0, }
1286}; 1288};
1287 1289
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7384f59df615..e1237b802872 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1163,6 +1163,8 @@ struct netxen_adapter {
1163 u32 int_vec_bit; 1163 u32 int_vec_bit;
1164 u32 heartbit; 1164 u32 heartbit;
1165 1165
1166 u8 mac_addr[ETH_ALEN];
1167
1166 struct netxen_adapter_stats stats; 1168 struct netxen_adapter_stats stats;
1167 1169
1168 struct netxen_recv_context recv_ctx; 1170 struct netxen_recv_context recv_ctx;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 1c46da632125..17bb3818d84e 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -545,6 +545,8 @@ enum {
545#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) 545#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
546#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) 546#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
547#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) 547#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
548#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
549#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
548#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) 550#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
549#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) 551#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
550 552
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 3185a98b0917..52a3798d8d94 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -383,24 +383,51 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
383 383
384int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 384int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
385{ 385{
386 __u32 reg; 386 u32 mac_cfg;
387 u32 cnt = 0;
388 __u32 reg = 0x0200;
387 u32 port = adapter->physical_port; 389 u32 port = adapter->physical_port;
390 u16 board_type = adapter->ahw.board_type;
388 391
389 if (port > NETXEN_NIU_MAX_XG_PORTS) 392 if (port > NETXEN_NIU_MAX_XG_PORTS)
390 return -EINVAL; 393 return -EINVAL;
391 394
392 reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); 395 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
393 if (mode == NETXEN_NIU_PROMISC_MODE) 396 mac_cfg &= ~0x4;
394 reg = (reg | 0x2000UL); 397 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
395 else
396 reg = (reg & ~0x2000UL);
397 398
398 if (mode == NETXEN_NIU_ALLMULTI_MODE) 399 if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
399 reg = (reg | 0x1000UL); 400 (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
400 else 401 reg = (0x20 << port);
401 reg = (reg & ~0x1000UL); 402
403 NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
404
405 mdelay(10);
406
407 while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
408 mdelay(10);
409
410 if (cnt < 20) {
411
412 reg = NXRD32(adapter,
413 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
414
415 if (mode == NETXEN_NIU_PROMISC_MODE)
416 reg = (reg | 0x2000UL);
417 else
418 reg = (reg & ~0x2000UL);
419
420 if (mode == NETXEN_NIU_ALLMULTI_MODE)
421 reg = (reg | 0x1000UL);
422 else
423 reg = (reg & ~0x1000UL);
424
425 NXWR32(adapter,
426 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
427 }
402 428
403 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); 429 mac_cfg |= 0x4;
430 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
404 431
405 return 0; 432 return 0;
406} 433}
@@ -436,7 +463,7 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
436{ 463{
437 u32 val = 0; 464 u32 val = 0;
438 u16 port = adapter->physical_port; 465 u16 port = adapter->physical_port;
439 u8 *addr = adapter->netdev->dev_addr; 466 u8 *addr = adapter->mac_addr;
440 467
441 if (adapter->mc_enabled) 468 if (adapter->mc_enabled)
442 return 0; 469 return 0;
@@ -465,7 +492,7 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
465{ 492{
466 u32 val = 0; 493 u32 val = 0;
467 u16 port = adapter->physical_port; 494 u16 port = adapter->physical_port;
468 u8 *addr = adapter->netdev->dev_addr; 495 u8 *addr = adapter->mac_addr;
469 496
470 if (!adapter->mc_enabled) 497 if (!adapter->mc_enabled)
471 return 0; 498 return 0;
@@ -660,7 +687,7 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
660 687
661 list_splice_tail_init(&adapter->mac_list, &del_list); 688 list_splice_tail_init(&adapter->mac_list, &del_list);
662 689
663 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list); 690 nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
664 nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); 691 nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
665 692
666 if (netdev->flags & IFF_PROMISC) { 693 if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index e40b914d6faf..8a0904368e08 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -544,6 +544,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
544 continue; 544 continue;
545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
546 continue; 546 continue;
547 if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
548 continue;
547 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18)) 549 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
548 buf[i].data = 0x1020; 550 buf[i].data = 0x1020;
549 /* skip the function enable register */ 551 /* skip the function enable register */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 0b4a56a8c8d5..3bf78dbfbf0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -437,6 +437,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
437 netdev->dev_addr[i] = *(p + 5 - i); 437 netdev->dev_addr[i] = *(p + 5 - i);
438 438
439 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 439 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
440 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
440 441
441 /* set station address */ 442 /* set station address */
442 443
@@ -459,6 +460,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
459 netxen_napi_disable(adapter); 460 netxen_napi_disable(adapter);
460 } 461 }
461 462
463 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
462 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 464 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
463 adapter->macaddr_set(adapter, addr->sa_data); 465 adapter->macaddr_set(adapter, addr->sa_data);
464 466
@@ -956,7 +958,7 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
956 return err; 958 return err;
957 } 959 }
958 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 960 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
959 adapter->macaddr_set(adapter, netdev->dev_addr); 961 adapter->macaddr_set(adapter, adapter->mac_addr);
960 962
961 adapter->set_multi(netdev); 963 adapter->set_multi(netdev);
962 adapter->set_mtu(adapter, netdev->mtu); 964 adapter->set_mtu(adapter, netdev->mtu);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index c594e1946476..57fd483dbb1f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -111,6 +111,7 @@
111#include <linux/compiler.h> 111#include <linux/compiler.h>
112#include <linux/prefetch.h> 112#include <linux/prefetch.h>
113#include <linux/ethtool.h> 113#include <linux/ethtool.h>
114#include <linux/sched.h>
114#include <linux/timer.h> 115#include <linux/timer.h>
115#include <linux/if_vlan.h> 116#include <linux/if_vlan.h>
116#include <linux/rtnetlink.h> 117#include <linux/rtnetlink.h>
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 6d28b18e7e28..c1b3f09f452c 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -31,6 +31,7 @@ static const char *const version =
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/sched.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/errno.h> 36#include <linux/errno.h>
36#include <linux/ioport.h> 37#include <linux/ioport.h>
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index ee366c5a8fa3..c9c70ab0cce0 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -36,6 +36,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
36 36
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/sched.h>
39#include <linux/string.h> 40#include <linux/string.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/errno.h> 42#include <linux/errno.h>
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index cee00ad49b57..49eb91b5f50c 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -188,7 +188,7 @@ static int sfn4111t_reset(struct efx_nic *efx)
188 efx_oword_t reg; 188 efx_oword_t reg;
189 189
190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */ 190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
191 mutex_lock(&efx->i2c_adap.bus_lock); 191 i2c_lock_adapter(&efx->i2c_adap);
192 192
193 /* Pull RST_N (GPIO 2) low then let it up again, setting the 193 /* Pull RST_N (GPIO 2) low then let it up again, setting the
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the 194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
@@ -204,7 +204,7 @@ static int sfn4111t_reset(struct efx_nic *efx)
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 204 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
205 msleep(1); 205 msleep(1);
206 206
207 mutex_unlock(&efx->i2c_adap.bus_lock); 207 i2c_unlock_adapter(&efx->i2c_adap);
208 208
209 ssleep(1); 209 ssleep(1);
210 return 0; 210 return 0;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 97949d0a699b..c072f7f36acf 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -52,6 +52,7 @@
52#include <linux/module.h> 52#include <linux/module.h>
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
55#include <linux/sched.h>
55#include <linux/string.h> 56#include <linux/string.h>
56#include <linux/timer.h> 57#include <linux/timer.h>
57#include <linux/errno.h> 58#include <linux/errno.h>
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 38a508b4aad9..b27156eaf267 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -73,6 +73,7 @@ static const char * const boot_msg =
73 73
74/* Include files */ 74/* Include files */
75 75
76#include <linux/capability.h>
76#include <linux/module.h> 77#include <linux/module.h>
77#include <linux/kernel.h> 78#include <linux/kernel.h>
78#include <linux/errno.h> 79#include <linux/errno.h>
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 01f6811f1324..8f5414348e86 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -37,6 +37,7 @@
37#include <linux/crc32.h> 37#include <linux/crc32.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/debugfs.h> 39#include <linux/debugfs.h>
40#include <linux/sched.h>
40#include <linux/seq_file.h> 41#include <linux/seq_file.h>
41#include <linux/mii.h> 42#include <linux/mii.h>
42#include <asm/irq.h> 43#include <asm/irq.h>
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index e17c535a577e..fe3cebb984de 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -67,6 +67,7 @@
67#include <asm/system.h> 67#include <asm/system.h>
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <linux/bitops.h> 69#include <linux/bitops.h>
70#include <linux/sched.h>
70#include <linux/string.h> 71#include <linux/string.h>
71#include <linux/mm.h> 72#include <linux/mm.h>
72#include <linux/interrupt.h> 73#include <linux/interrupt.h>
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index c2f14dc9ba28..9542995ba667 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -416,13 +416,8 @@ static void init_dma_desc_rings(struct net_device *dev)
416 unsigned int txsize = priv->dma_tx_size; 416 unsigned int txsize = priv->dma_tx_size;
417 unsigned int rxsize = priv->dma_rx_size; 417 unsigned int rxsize = priv->dma_rx_size;
418 unsigned int bfsize = priv->dma_buf_sz; 418 unsigned int bfsize = priv->dma_buf_sz;
419 int buff2_needed = 0; 419 int buff2_needed = 0, dis_ic = 0;
420 int dis_ic = 0;
421 420
422#ifdef CONFIG_STMMAC_TIMER
423 /* Using Timers disable interrupts on completion for the reception */
424 dis_ic = 1;
425#endif
426 /* Set the Buffer size according to the MTU; 421 /* Set the Buffer size according to the MTU;
427 * indeed, in case of jumbo we need to bump-up the buffer sizes. 422 * indeed, in case of jumbo we need to bump-up the buffer sizes.
428 */ 423 */
@@ -437,6 +432,11 @@ static void init_dma_desc_rings(struct net_device *dev)
437 else 432 else
438 bfsize = DMA_BUFFER_SIZE; 433 bfsize = DMA_BUFFER_SIZE;
439 434
435#ifdef CONFIG_STMMAC_TIMER
436 /* Disable interrupts on completion for the reception if timer is on */
437 if (likely(priv->tm->enable))
438 dis_ic = 1;
439#endif
440 /* If the MTU exceeds 8k so use the second buffer in the chain */ 440 /* If the MTU exceeds 8k so use the second buffer in the chain */
441 if (bfsize >= BUF_SIZE_8KiB) 441 if (bfsize >= BUF_SIZE_8KiB)
442 buff2_needed = 1; 442 buff2_needed = 1;
@@ -809,20 +809,22 @@ static void stmmac_tx(struct stmmac_priv *priv)
809 809
810static inline void stmmac_enable_irq(struct stmmac_priv *priv) 810static inline void stmmac_enable_irq(struct stmmac_priv *priv)
811{ 811{
812#ifndef CONFIG_STMMAC_TIMER 812#ifdef CONFIG_STMMAC_TIMER
813 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); 813 if (likely(priv->tm->enable))
814#else 814 priv->tm->timer_start(tmrate);
815 priv->tm->timer_start(tmrate); 815 else
816#endif 816#endif
817 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
817} 818}
818 819
819static inline void stmmac_disable_irq(struct stmmac_priv *priv) 820static inline void stmmac_disable_irq(struct stmmac_priv *priv)
820{ 821{
821#ifndef CONFIG_STMMAC_TIMER 822#ifdef CONFIG_STMMAC_TIMER
822 writel(0, priv->dev->base_addr + DMA_INTR_ENA); 823 if (likely(priv->tm->enable))
823#else 824 priv->tm->timer_stop();
824 priv->tm->timer_stop(); 825 else
825#endif 826#endif
827 writel(0, priv->dev->base_addr + DMA_INTR_ENA);
826} 828}
827 829
828static int stmmac_has_work(struct stmmac_priv *priv) 830static int stmmac_has_work(struct stmmac_priv *priv)
@@ -1031,22 +1033,23 @@ static int stmmac_open(struct net_device *dev)
1031 } 1033 }
1032 1034
1033#ifdef CONFIG_STMMAC_TIMER 1035#ifdef CONFIG_STMMAC_TIMER
1034 priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 1036 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1035 if (unlikely(priv->tm == NULL)) { 1037 if (unlikely(priv->tm == NULL)) {
1036 pr_err("%s: ERROR: timer memory alloc failed \n", __func__); 1038 pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
1037 return -ENOMEM; 1039 return -ENOMEM;
1038 } 1040 }
1039 priv->tm->freq = tmrate; 1041 priv->tm->freq = tmrate;
1040 1042
1041 /* Test if the HW timer can be actually used. 1043 /* Test if the external timer can be actually used.
1042 * In case of failure continue with no timer. */ 1044 * In case of failure continue without timer. */
1043 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { 1045 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1044 pr_warning("stmmaceth: cannot attach the HW timer\n"); 1046 pr_warning("stmmaceth: cannot attach the external timer.\n");
1045 tmrate = 0; 1047 tmrate = 0;
1046 priv->tm->freq = 0; 1048 priv->tm->freq = 0;
1047 priv->tm->timer_start = stmmac_no_timer_started; 1049 priv->tm->timer_start = stmmac_no_timer_started;
1048 priv->tm->timer_stop = stmmac_no_timer_stopped; 1050 priv->tm->timer_stop = stmmac_no_timer_stopped;
1049 } 1051 } else
1052 priv->tm->enable = 1;
1050#endif 1053#endif
1051 1054
1052 /* Create and initialize the TX/RX descriptors chains. */ 1055 /* Create and initialize the TX/RX descriptors chains. */
@@ -1322,9 +1325,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1322 1325
1323 /* Interrupt on completition only for the latest segment */ 1326 /* Interrupt on completition only for the latest segment */
1324 priv->mac_type->ops->close_tx_desc(desc); 1327 priv->mac_type->ops->close_tx_desc(desc);
1328
1325#ifdef CONFIG_STMMAC_TIMER 1329#ifdef CONFIG_STMMAC_TIMER
1326 /* Clean IC while using timers */ 1330 /* Clean IC while using timer */
1327 priv->mac_type->ops->clear_tx_ic(desc); 1331 if (likely(priv->tm->enable))
1332 priv->mac_type->ops->clear_tx_ic(desc);
1328#endif 1333#endif
1329 /* To avoid raise condition */ 1334 /* To avoid raise condition */
1330 priv->mac_type->ops->set_tx_owner(first); 1335 priv->mac_type->ops->set_tx_owner(first);
@@ -2028,7 +2033,8 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2028 2033
2029#ifdef CONFIG_STMMAC_TIMER 2034#ifdef CONFIG_STMMAC_TIMER
2030 priv->tm->timer_stop(); 2035 priv->tm->timer_stop();
2031 dis_ic = 1; 2036 if (likely(priv->tm->enable))
2037 dis_ic = 1;
2032#endif 2038#endif
2033 napi_disable(&priv->napi); 2039 napi_disable(&priv->napi);
2034 2040
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index b838c6582077..679f61ffb1f8 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -63,7 +63,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
63 63
64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); 64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
65 if (stmmac_rtc == NULL) { 65 if (stmmac_rtc == NULL) {
66 pr_error("open rtc device failed\n"); 66 pr_err("open rtc device failed\n");
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
69 69
@@ -71,7 +71,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
71 71
72 /* Periodic mode is not supported */ 72 /* Periodic mode is not supported */
73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { 73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
74 pr_error("set periodic failed\n"); 74 pr_err("set periodic failed\n");
75 rtc_irq_unregister(stmmac_rtc, &stmmac_task); 75 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
76 rtc_class_close(stmmac_rtc); 76 rtc_class_close(stmmac_rtc);
77 return -1; 77 return -1;
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
index f795cae33725..6863590d184b 100644
--- a/drivers/net/stmmac/stmmac_timer.h
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -26,6 +26,7 @@ struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq); 26 void (*timer_start) (unsigned int new_freq);
27 void (*timer_stop) (void); 27 void (*timer_stop) (void);
28 unsigned int freq; 28 unsigned int freq;
29 unsigned int enable;
29}; 30};
30 31
31/* Open the HW timer device and return 0 in case of success */ 32/* Open the HW timer device and return 0 in case of success */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1927b3de9eec..61640b99b705 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
40#include <linux/in.h> 40#include <linux/in.h>
41#include <linux/sched.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/string.h> 43#include <linux/string.h>
43#include <linux/delay.h> 44#include <linux/delay.h>
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 6a3c7510afd9..75fa32e34fd0 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -108,6 +108,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
108#define IBMTR_DEBUG_MESSAGES 0 108#define IBMTR_DEBUG_MESSAGES 0
109 109
110#include <linux/module.h> 110#include <linux/module.h>
111#include <linux/sched.h>
111 112
112#ifdef PCMCIA /* required for ibmtr_cs.c to build */ 113#ifdef PCMCIA /* required for ibmtr_cs.c to build */
113#undef MODULE /* yes, really */ 114#undef MODULE /* yes, really */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d6d345229fe9..5921f5bdd764 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -108,6 +108,7 @@ static const int multicast_filter_limit = 32;
108 108
109#include <linux/module.h> 109#include <linux/module.h>
110#include <linux/kernel.h> 110#include <linux/kernel.h>
111#include <linux/sched.h>
111#include <linux/string.h> 112#include <linux/string.h>
112#include <linux/timer.h> 113#include <linux/timer.h>
113#include <linux/errno.h> 114#include <linux/errno.h>
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index fa4e58196c21..43bc3fcc0d85 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -378,7 +378,7 @@ static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
378} 378}
379 379
380#define DUMP(buf_, len_) \ 380#define DUMP(buf_, len_) \
381 dbg_dump(__LINE__, __func__, buf_, len_) 381 dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_)
382 382
383#define DUMP1(buf_, len_) \ 383#define DUMP1(buf_, len_) \
384 do { \ 384 do { \
@@ -1363,7 +1363,7 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1363 /* reset the rts and dtr */ 1363 /* reset the rts and dtr */
1364 /* do the actual close */ 1364 /* do the actual close */
1365 serial->open_count--; 1365 serial->open_count--;
1366 kref_put(&serial->parent->ref, hso_serial_ref_free); 1366
1367 if (serial->open_count <= 0) { 1367 if (serial->open_count <= 0) {
1368 serial->open_count = 0; 1368 serial->open_count = 0;
1369 spin_lock_irq(&serial->serial_lock); 1369 spin_lock_irq(&serial->serial_lock);
@@ -1383,6 +1383,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1383 usb_autopm_put_interface(serial->parent->interface); 1383 usb_autopm_put_interface(serial->parent->interface);
1384 1384
1385 mutex_unlock(&serial->parent->mutex); 1385 mutex_unlock(&serial->parent->mutex);
1386
1387 kref_put(&serial->parent->ref, hso_serial_ref_free);
1386} 1388}
1387 1389
1388/* close the requested serial port */ 1390/* close the requested serial port */
@@ -1527,7 +1529,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1527 dev_warn(&usb->dev, 1529 dev_warn(&usb->dev,
1528 "hso received invalid serial state notification\n"); 1530 "hso received invalid serial state notification\n");
1529 DUMP(serial_state_notification, 1531 DUMP(serial_state_notification,
1530 sizeof(hso_serial_state_notifation)) 1532 sizeof(struct hso_serial_state_notification));
1531 } else { 1533 } else {
1532 1534
1533 UART_state_bitmap = le16_to_cpu(serial_state_notification-> 1535 UART_state_bitmap = le16_to_cpu(serial_state_notification->
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 0caa8008c51c..f56dec6119c3 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -362,12 +362,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
362 retval = -EINVAL; 362 retval = -EINVAL;
363 goto halt_fail_and_release; 363 goto halt_fail_and_release;
364 } 364 }
365 dev->hard_mtu = tmp;
366 net->mtu = dev->hard_mtu - net->hard_header_len;
367 dev_warn(&intf->dev, 365 dev_warn(&intf->dev,
368 "dev can't take %u byte packets (max %u), " 366 "dev can't take %u byte packets (max %u), "
369 "adjusting MTU to %u\n", 367 "adjusting MTU to %u\n",
370 dev->hard_mtu, tmp, net->mtu); 368 dev->hard_mtu, tmp, tmp - net->hard_header_len);
369 dev->hard_mtu = tmp;
370 net->mtu = dev->hard_mtu - net->hard_header_len;
371 } 371 }
372 372
373 /* REVISIT: peripheral "alignment" request is ignored ... */ 373 /* REVISIT: peripheral "alignment" request is ignored ... */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ade5b344f75d..52af5017c46b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -210,32 +210,29 @@ rx_drop:
210static struct net_device_stats *veth_get_stats(struct net_device *dev) 210static struct net_device_stats *veth_get_stats(struct net_device *dev)
211{ 211{
212 struct veth_priv *priv; 212 struct veth_priv *priv;
213 struct net_device_stats *dev_stats;
214 int cpu; 213 int cpu;
215 struct veth_net_stats *stats; 214 struct veth_net_stats *stats, total = {0};
216 215
217 priv = netdev_priv(dev); 216 priv = netdev_priv(dev);
218 dev_stats = &dev->stats;
219
220 dev_stats->rx_packets = 0;
221 dev_stats->tx_packets = 0;
222 dev_stats->rx_bytes = 0;
223 dev_stats->tx_bytes = 0;
224 dev_stats->tx_dropped = 0;
225 dev_stats->rx_dropped = 0;
226 217
227 for_each_online_cpu(cpu) { 218 for_each_possible_cpu(cpu) {
228 stats = per_cpu_ptr(priv->stats, cpu); 219 stats = per_cpu_ptr(priv->stats, cpu);
229 220
230 dev_stats->rx_packets += stats->rx_packets; 221 total.rx_packets += stats->rx_packets;
231 dev_stats->tx_packets += stats->tx_packets; 222 total.tx_packets += stats->tx_packets;
232 dev_stats->rx_bytes += stats->rx_bytes; 223 total.rx_bytes += stats->rx_bytes;
233 dev_stats->tx_bytes += stats->tx_bytes; 224 total.tx_bytes += stats->tx_bytes;
234 dev_stats->tx_dropped += stats->tx_dropped; 225 total.tx_dropped += stats->tx_dropped;
235 dev_stats->rx_dropped += stats->rx_dropped; 226 total.rx_dropped += stats->rx_dropped;
236 } 227 }
237 228 dev->stats.rx_packets = total.rx_packets;
238 return dev_stats; 229 dev->stats.tx_packets = total.tx_packets;
230 dev->stats.rx_bytes = total.rx_bytes;
231 dev->stats.tx_bytes = total.tx_bytes;
232 dev->stats.tx_dropped = total.tx_dropped;
233 dev->stats.rx_dropped = total.rx_dropped;
234
235 return &dev->stats;
239} 236}
240 237
241static int veth_open(struct net_device *dev) 238static int veth_open(struct net_device *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb8b52d0d1ce..b9e002fccbca 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,7 +22,6 @@
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/virtio.h> 24#include <linux/virtio.h>
25#include <linux/virtio_ids.h>
26#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
27#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
28#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 9693b0fd323d..0bd898c94759 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/capability.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/string.h> 22#include <linux/string.h>
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 66360a2a14c2..e2c33c06190b 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -76,6 +76,7 @@
76 76
77#include <linux/module.h> 77#include <linux/module.h>
78#include <linux/kernel.h> 78#include <linux/kernel.h>
79#include <linux/sched.h>
79#include <linux/slab.h> 80#include <linux/slab.h>
80#include <linux/poll.h> 81#include <linux/poll.h>
81#include <linux/fs.h> 82#include <linux/fs.h>
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 2573c18b6aa5..cd8cb95c5bd7 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -84,6 +84,7 @@
84#include <linux/kernel.h> /* printk(), and other useful stuff */ 84#include <linux/kernel.h> /* printk(), and other useful stuff */
85#include <linux/module.h> 85#include <linux/module.h>
86#include <linux/string.h> /* inline memset(), etc. */ 86#include <linux/string.h> /* inline memset(), etc. */
87#include <linux/sched.h>
87#include <linux/slab.h> /* kmalloc(), kfree() */ 88#include <linux/slab.h> /* kmalloc(), kfree() */
88#include <linux/stddef.h> /* offsetof(), etc. */ 89#include <linux/stddef.h> /* offsetof(), etc. */
89#include <linux/wanrouter.h> /* WAN router definitions */ 90#include <linux/wanrouter.h> /* WAN router definitions */
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 81c8aec9df92..07d00b4cf48a 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -81,6 +81,7 @@
81 */ 81 */
82 82
83#include <linux/module.h> 83#include <linux/module.h>
84#include <linux/sched.h>
84#include <linux/types.h> 85#include <linux/types.h>
85#include <linux/errno.h> 86#include <linux/errno.h>
86#include <linux/list.h> 87#include <linux/list.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3e90eb816181..beda387f2fc7 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/version.h> 20#include <linux/version.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/sched.h>
22#include <linux/ioport.h> 23#include <linux/ioport.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/if.h> 25#include <linux/if.h>
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 83da596e2052..58c66819f39b 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/capability.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/fcntl.h> 24#include <linux/fcntl.h>
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index a52f29c72c33..f1340faaf022 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/capability.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/fcntl.h> 22#include <linux/fcntl.h>
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index dbbf0d11e18e..9b9044400218 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -30,6 +30,7 @@
30#include "xmit.h" 30#include "xmit.h"
31 31
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33 34
34 35
35static u16 generate_cookie(struct b43_pio_txqueue *q, 36static u16 generate_cookie(struct b43_pio_txqueue *q,
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b3d4c4..4b60148a5e61 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -37,6 +37,7 @@
37#include <linux/firmware.h> 37#include <linux/firmware.h>
38#include <linux/wireless.h> 38#include <linux/wireless.h>
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40#include <linux/sched.h>
40#include <linux/skbuff.h> 41#include <linux/skbuff.h>
41#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
42#include <net/dst.h> 43#include <net/dst.h>
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 11319ec2d64a..aaf227203a98 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/sched.h>
34#include <linux/types.h> 35#include <linux/types.h>
35 36
36#include "b43legacy.h" 37#include "b43legacy.h"
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 6fa14a4e4b53..4dfb40a84c96 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,6 +1,7 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */ 1/* Host AP driver Info Frame processing (part of hostap.o module) */
2 2
3#include <linux/if_arp.h> 3#include <linux/if_arp.h>
4#include <linux/sched.h>
4#include "hostap_wlan.h" 5#include "hostap_wlan.h"
5#include "hostap.h" 6#include "hostap.h"
6#include "hostap_ap.h" 7#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3f2bda881a4f..9419cebca8a5 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,6 +1,7 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ 1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/sched.h>
4#include <linux/ethtool.h> 5#include <linux/ethtool.h>
5#include <linux/if_arp.h> 6#include <linux/if_arp.h>
6#include <net/lib80211.h> 7#include <net/lib80211.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0d30a9e2d3df..a6ca536e44f8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -30,6 +30,7 @@
30 30
31******************************************************************************/ 31******************************************************************************/
32 32
33#include <linux/sched.h>
33#include "ipw2200.h" 34#include "ipw2200.h"
34 35
35 36
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 231c833f6469..f059b49dc691 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/netdevice.h> 35#include <linux/netdevice.h>
35#include <linux/wireless.h> 36#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index a22a0501c190..6f703a041847 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/netdevice.h> 35#include <linux/netdevice.h>
35#include <linux/wireless.h> 36#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 524e7e4c51d1..6e6f516ba404 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/sched.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
34#include <linux/wireless.h> 35#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 877af443aac0..921dc4a26fe2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/netdevice.h> 38#include <linux/netdevice.h>
38#include <linux/wireless.h> 39#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 484d5c1a7312..2dc928755454 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h>
32#include <net/mac80211.h> 33#include <net/mac80211.h>
33 34
34#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6cd8da..a6856daf14cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/sched.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32 33
33#include "iwl-dev.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index c18907544701..fb9bcfa6d947 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -28,6 +28,7 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/sched.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
33#include "iwl-dev.h" 34#include "iwl-dev.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 837a193221cf..d00a80334095 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/netdevice.h> 38#include <linux/netdevice.h>
38#include <linux/wireless.h> 39#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0ac99a..f3c55658225b 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/sched.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
27#include <linux/wireless.h> 28#include <linux/wireless.h>
28#include <linux/ieee80211.h> 29#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa2605f..84158b6d35d8 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -40,6 +40,7 @@
40#include <linux/wireless.h> 40#include <linux/wireless.h>
41#include <linux/etherdevice.h> 41#include <linux/etherdevice.h>
42#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
43#include <linux/sched.h>
43 44
44#include "iwm.h" 45#include "iwm.h"
45#include "bus.h" 46#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index d668e4756324..222eb2cf1b30 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/sched.h>
41#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
42#include <linux/wireless.h> 43#include <linux/wireless.h>
43 44
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc16593..771a301003c9 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/sched.h>
41#include <linux/etherdevice.h> 42#include <linux/etherdevice.h>
42#include <linux/wireless.h> 43#include <linux/wireless.h>
43#include <linux/ieee80211.h> 44#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 685098148e10..0a324dcd264c 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -6,6 +6,7 @@
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <net/lib80211.h> 7#include <net/lib80211.h>
8#include <linux/kfifo.h> 8#include <linux/kfifo.h>
9#include <linux/sched.h>
9#include "host.h" 10#include "host.h"
10#include "hostcmd.h" 11#include "hostcmd.h"
11#include "decl.h" 12#include "decl.h"
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7a0a8d..8c3766a6e8e7 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -3,6 +3,7 @@
3 */ 3 */
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6#include <linux/sched.h>
6 7
7#include "hostcmd.h" 8#include "hostcmd.h"
8#include "radiotap.h" 9#include "radiotap.h"
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4c97c6ad6f5d..bc08464d8323 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -19,6 +19,7 @@
19 * 19 *
20 */ 20 */
21 21
22#include <linux/capability.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/if_arp.h> 25#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e26d7b3ceab5..2505be56ae39 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -23,6 +23,7 @@
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/ethtool.h> 24#include <linux/ethtool.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/sched.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/if_arp.h> 29#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index f7c677e2094d..69d2f882fd06 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -20,6 +20,7 @@
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/sched.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/system.h> 26#include <asm/system.h>
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88cd58eb3b9f..1c88c2ea59aa 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2879,7 +2879,7 @@ static int write_essid(struct file *file, const char __user *buffer,
2879 unsigned long count, void *data) 2879 unsigned long count, void *data)
2880{ 2880{
2881 static char proc_essid[33]; 2881 static char proc_essid[33];
2882 int len = count; 2882 unsigned int len = count;
2883 2883
2884 if (len > 32) 2884 if (len > 32)
2885 len = 32; 2885 len = 32;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7b3ee8c2eaef..68bc9bb1dbf9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/poll.h> 29#include <linux/poll.h>
30#include <linux/sched.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31 32
32#include "rt2x00.h" 33#include "rt2x00.h"
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index bacaa536fd51..4b22ba568b19 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,6 +97,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
97} 97}
98EXPORT_SYMBOL(of_mdiobus_register); 98EXPORT_SYMBOL(of_mdiobus_register);
99 99
100/* Helper function for of_phy_find_device */
101static int of_phy_match(struct device *dev, void *phy_np)
102{
103 return dev_archdata_get_node(&dev->archdata) == phy_np;
104}
105
100/** 106/**
101 * of_phy_find_device - Give a PHY node, find the phy_device 107 * of_phy_find_device - Give a PHY node, find the phy_device
102 * @phy_np: Pointer to the phy's device tree node 108 * @phy_np: Pointer to the phy's device tree node
@@ -106,15 +112,10 @@ EXPORT_SYMBOL(of_mdiobus_register);
106struct phy_device *of_phy_find_device(struct device_node *phy_np) 112struct phy_device *of_phy_find_device(struct device_node *phy_np)
107{ 113{
108 struct device *d; 114 struct device *d;
109 int match(struct device *dev, void *phy_np)
110 {
111 return dev_archdata_get_node(&dev->archdata) == phy_np;
112 }
113
114 if (!phy_np) 115 if (!phy_np)
115 return NULL; 116 return NULL;
116 117
117 d = bus_find_device(&mdio_bus_type, NULL, phy_np, match); 118 d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
118 return d ? to_phy_device(d) : NULL; 119 return d ? to_phy_device(d) : NULL;
119} 120}
120EXPORT_SYMBOL(of_phy_find_device); 121EXPORT_SYMBOL(of_phy_find_device);
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 2b7ae366ceb1..5df60a6b6776 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -35,12 +35,23 @@ static size_t buffer_pos;
35/* atomic_t because wait_event checks it outside of buffer_mutex */ 35/* atomic_t because wait_event checks it outside of buffer_mutex */
36static atomic_t buffer_ready = ATOMIC_INIT(0); 36static atomic_t buffer_ready = ATOMIC_INIT(0);
37 37
38/* Add an entry to the event buffer. When we 38/*
39 * get near to the end we wake up the process 39 * Add an entry to the event buffer. When we get near to the end we
40 * sleeping on the read() of the file. 40 * wake up the process sleeping on the read() of the file. To protect
41 * the event_buffer this function may only be called when buffer_mutex
42 * is set.
41 */ 43 */
42void add_event_entry(unsigned long value) 44void add_event_entry(unsigned long value)
43{ 45{
46 /*
47 * This shouldn't happen since all workqueues or handlers are
48 * canceled or flushed before the event buffer is freed.
49 */
50 if (!event_buffer) {
51 WARN_ON_ONCE(1);
52 return;
53 }
54
44 if (buffer_pos == buffer_size) { 55 if (buffer_pos == buffer_size) {
45 atomic_inc(&oprofile_stats.event_lost_overflow); 56 atomic_inc(&oprofile_stats.event_lost_overflow);
46 return; 57 return;
@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void)
69 80
70int alloc_event_buffer(void) 81int alloc_event_buffer(void)
71{ 82{
72 int err = -ENOMEM;
73 unsigned long flags; 83 unsigned long flags;
74 84
75 spin_lock_irqsave(&oprofilefs_lock, flags); 85 spin_lock_irqsave(&oprofilefs_lock, flags);
@@ -80,21 +90,22 @@ int alloc_event_buffer(void)
80 if (buffer_watershed >= buffer_size) 90 if (buffer_watershed >= buffer_size)
81 return -EINVAL; 91 return -EINVAL;
82 92
93 buffer_pos = 0;
83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 94 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
84 if (!event_buffer) 95 if (!event_buffer)
85 goto out; 96 return -ENOMEM;
86 97
87 err = 0; 98 return 0;
88out:
89 return err;
90} 99}
91 100
92 101
93void free_event_buffer(void) 102void free_event_buffer(void)
94{ 103{
104 mutex_lock(&buffer_mutex);
95 vfree(event_buffer); 105 vfree(event_buffer);
96 106 buffer_pos = 0;
97 event_buffer = NULL; 107 event_buffer = NULL;
108 mutex_unlock(&buffer_mutex);
98} 109}
99 110
100 111
@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
167 178
168 mutex_lock(&buffer_mutex); 179 mutex_lock(&buffer_mutex);
169 180
181 /* May happen if the buffer is freed during pending reads. */
182 if (!event_buffer) {
183 retval = -EINTR;
184 goto out;
185 }
186
170 atomic_set(&buffer_ready, 0); 187 atomic_set(&buffer_ready, 0);
171 188
172 retval = -EFAULT; 189 retval = -EFAULT;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 14bbaa17e2ca..e5f8fc164fd3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -175,15 +175,6 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
175 int ret = 0; 175 int ret = 0;
176 176
177 drhd = (struct acpi_dmar_hardware_unit *)header; 177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
185 return -ENODEV;
186 }
187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188 if (!dmaru) 179 if (!dmaru)
189 return -ENOMEM; 180 return -ENOMEM;
@@ -354,6 +345,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
354 struct acpi_dmar_hardware_unit *drhd; 345 struct acpi_dmar_hardware_unit *drhd;
355 struct acpi_dmar_reserved_memory *rmrr; 346 struct acpi_dmar_reserved_memory *rmrr;
356 struct acpi_dmar_atsr *atsr; 347 struct acpi_dmar_atsr *atsr;
348 struct acpi_dmar_rhsa *rhsa;
357 349
358 switch (header->type) { 350 switch (header->type) {
359 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -375,6 +367,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
375 atsr = container_of(header, struct acpi_dmar_atsr, header); 367 atsr = container_of(header, struct acpi_dmar_atsr, header);
376 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); 368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
377 break; 369 break;
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
375 break;
378 } 376 }
379} 377}
380 378
@@ -459,9 +457,13 @@ parse_dmar_table(void)
459 ret = dmar_parse_one_atsr(entry_header); 457 ret = dmar_parse_one_atsr(entry_header);
460#endif 458#endif
461 break; 459 break;
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */
462 break;
462 default: 463 default:
463 printk(KERN_WARNING PREFIX 464 printk(KERN_WARNING PREFIX
464 "Unknown DMAR structure type\n"); 465 "Unknown DMAR structure type %d\n",
466 entry_header->type);
465 ret = 0; /* for forward compatibility */ 467 ret = 0; /* for forward compatibility */
466 break; 468 break;
467 } 469 }
@@ -580,12 +582,50 @@ int __init dmar_table_init(void)
580 return 0; 582 return 0;
581} 583}
582 584
585int __init check_zero_address(void)
586{
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
590
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
593
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596 /* Avoid looping forever on bad ACPI tables */
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
600 return 0;
601 }
602
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
612 return 0;
613 }
614 break;
615 }
616
617 entry_header = ((void *)entry_header + entry_header->length);
618 }
619 return 1;
620}
621
583void __init detect_intel_iommu(void) 622void __init detect_intel_iommu(void)
584{ 623{
585 int ret; 624 int ret;
586 625
587 ret = dmar_table_detect(); 626 ret = dmar_table_detect();
588 627 if (ret)
628 ret = check_zero_address();
589 { 629 {
590#ifdef CONFIG_INTR_REMAP 630#ifdef CONFIG_INTR_REMAP
591 struct acpi_table_dmar *dmar; 631 struct acpi_table_dmar *dmar;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index a9d926b7d805..e7be66dbac21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -406,7 +406,6 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
406 __func__, status); 406 __func__, status);
407 return retval; 407 return retval;
408 } 408 }
409 info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0';
410 409
411 if (info->current_status && (info->valid & ACPI_VALID_HID) && 410 if (info->current_status && (info->valid & ACPI_VALID_HID) &&
412 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || 411 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 53836001d511..9c6a9fd26812 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -32,6 +32,7 @@
32#include <asm/io.h> /* for read? and write? functions */ 32#include <asm/io.h> /* for read? and write? functions */
33#include <linux/delay.h> /* for delays */ 33#include <linux/delay.h> /* for delays */
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/sched.h> /* for signal_pending() */
35 36
36#define MY_NAME "cpqphp" 37#define MY_NAME "cpqphp"
37 38
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 855dd7ca47f3..1840a0578a42 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -48,6 +48,7 @@
48 48
49#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 49#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
51#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
51 52
52#define IOAPIC_RANGE_START (0xfee00000) 53#define IOAPIC_RANGE_START (0xfee00000)
53#define IOAPIC_RANGE_END (0xfeefffff) 54#define IOAPIC_RANGE_END (0xfeefffff)
@@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p)
94/* global iommu list, set NULL for ignored DMAR units */ 95/* global iommu list, set NULL for ignored DMAR units */
95static struct intel_iommu **g_iommus; 96static struct intel_iommu **g_iommus;
96 97
98static void __init check_tylersburg_isoch(void);
97static int rwbf_quirk; 99static int rwbf_quirk;
98 100
99/* 101/*
@@ -1934,6 +1936,9 @@ error:
1934} 1936}
1935 1937
1936static int iommu_identity_mapping; 1938static int iommu_identity_mapping;
1939#define IDENTMAP_ALL 1
1940#define IDENTMAP_GFX 2
1941#define IDENTMAP_AZALIA 4
1937 1942
1938static int iommu_domain_identity_map(struct dmar_domain *domain, 1943static int iommu_domain_identity_map(struct dmar_domain *domain,
1939 unsigned long long start, 1944 unsigned long long start,
@@ -2151,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2151 2156
2152static int iommu_should_identity_map(struct pci_dev *pdev, int startup) 2157static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2153{ 2158{
2154 if (iommu_identity_mapping == 2) 2159 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2155 return IS_GFX_DEVICE(pdev); 2160 return 1;
2161
2162 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2163 return 1;
2164
2165 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2166 return 0;
2156 2167
2157 /* 2168 /*
2158 * We want to start off with all devices in the 1:1 domain, and 2169 * We want to start off with all devices in the 1:1 domain, and
@@ -2332,11 +2343,14 @@ int __init init_dmars(void)
2332 } 2343 }
2333 2344
2334 if (iommu_pass_through) 2345 if (iommu_pass_through)
2335 iommu_identity_mapping = 1; 2346 iommu_identity_mapping |= IDENTMAP_ALL;
2347
2336#ifdef CONFIG_DMAR_BROKEN_GFX_WA 2348#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2337 else 2349 iommu_identity_mapping |= IDENTMAP_GFX;
2338 iommu_identity_mapping = 2;
2339#endif 2350#endif
2351
2352 check_tylersburg_isoch();
2353
2340 /* 2354 /*
2341 * If pass through is not set or not enabled, setup context entries for 2355 * If pass through is not set or not enabled, setup context entries for
2342 * identity mappings for rmrr, gfx, and isa and may fall back to static 2356 * identity mappings for rmrr, gfx, and isa and may fall back to static
@@ -2753,7 +2767,15 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2753 2767
2754 size = PAGE_ALIGN(size); 2768 size = PAGE_ALIGN(size);
2755 order = get_order(size); 2769 order = get_order(size);
2756 flags &= ~(GFP_DMA | GFP_DMA32); 2770
2771 if (!iommu_no_mapping(hwdev))
2772 flags &= ~(GFP_DMA | GFP_DMA32);
2773 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2774 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2775 flags |= GFP_DMA;
2776 else
2777 flags |= GFP_DMA32;
2778 }
2757 2779
2758 vaddr = (void *)__get_free_pages(flags, order); 2780 vaddr = (void *)__get_free_pages(flags, order);
2759 if (!vaddr) 2781 if (!vaddr)
@@ -3193,6 +3215,33 @@ static int __init init_iommu_sysfs(void)
3193} 3215}
3194#endif /* CONFIG_PM */ 3216#endif /* CONFIG_PM */
3195 3217
3218/*
3219 * Here we only respond to action of unbound device from driver.
3220 *
3221 * Added device is not attached to its DMAR domain here yet. That will happen
3222 * when mapping the device to iova.
3223 */
3224static int device_notifier(struct notifier_block *nb,
3225 unsigned long action, void *data)
3226{
3227 struct device *dev = data;
3228 struct pci_dev *pdev = to_pci_dev(dev);
3229 struct dmar_domain *domain;
3230
3231 domain = find_domain(pdev);
3232 if (!domain)
3233 return 0;
3234
3235 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
3236 domain_remove_one_dev_info(domain, pdev);
3237
3238 return 0;
3239}
3240
3241static struct notifier_block device_nb = {
3242 .notifier_call = device_notifier,
3243};
3244
3196int __init intel_iommu_init(void) 3245int __init intel_iommu_init(void)
3197{ 3246{
3198 int ret = 0; 3247 int ret = 0;
@@ -3245,6 +3294,8 @@ int __init intel_iommu_init(void)
3245 3294
3246 register_iommu(&intel_iommu_ops); 3295 register_iommu(&intel_iommu_ops);
3247 3296
3297 bus_register_notifier(&pci_bus_type, &device_nb);
3298
3248 return 0; 3299 return 0;
3249} 3300}
3250 3301
@@ -3670,3 +3721,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3670} 3721}
3671 3722
3672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3723DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3724
3725/* On Tylersburg chipsets, some BIOSes have been known to enable the
3726 ISOCH DMAR unit for the Azalia sound device, but not give it any
3727 TLB entries, which causes it to deadlock. Check for that. We do
3728 this in a function called from init_dmars(), instead of in a PCI
3729 quirk, because we don't want to print the obnoxious "BIOS broken"
3730 message if VT-d is actually disabled.
3731*/
3732static void __init check_tylersburg_isoch(void)
3733{
3734 struct pci_dev *pdev;
3735 uint32_t vtisochctrl;
3736
3737 /* If there's no Azalia in the system anyway, forget it. */
3738 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3739 if (!pdev)
3740 return;
3741 pci_dev_put(pdev);
3742
3743 /* System Management Registers. Might be hidden, in which case
3744 we can't do the sanity check. But that's OK, because the
3745 known-broken BIOSes _don't_ actually hide it, so far. */
3746 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3747 if (!pdev)
3748 return;
3749
3750 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3751 pci_dev_put(pdev);
3752 return;
3753 }
3754
3755 pci_dev_put(pdev);
3756
3757 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3758 if (vtisochctrl & 1)
3759 return;
3760
3761 /* Drop all bits other than the number of TLB entries */
3762 vtisochctrl &= 0x1c;
3763
3764 /* If we have the recommended number of TLB entries (16), fine. */
3765 if (vtisochctrl == 0x10)
3766 return;
3767
3768 /* Zero TLB entries? You get to ride the short bus to school. */
3769 if (!vtisochctrl) {
3770 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
3771 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3772 dmi_get_system_info(DMI_BIOS_VENDOR),
3773 dmi_get_system_info(DMI_BIOS_VERSION),
3774 dmi_get_system_info(DMI_PRODUCT_VERSION));
3775 iommu_identity_mapping |= IDENTMAP_AZALIA;
3776 return;
3777 }
3778
3779 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
3780 vtisochctrl);
3781}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6edecff0b419..4e4c295a049f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -513,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
513 else if (state == PCI_D2 || dev->current_state == PCI_D2) 513 else if (state == PCI_D2 || dev->current_state == PCI_D2)
514 udelay(PCI_PM_D2_DELAY); 514 udelay(PCI_PM_D2_DELAY);
515 515
516 dev->current_state = state; 516 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
517 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
518 if (dev->current_state != state && printk_ratelimit())
519 dev_info(&dev->dev, "Refused to change power state, "
520 "currently in D%d\n", dev->current_state);
517 521
518 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 522 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
519 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 523 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
@@ -2542,10 +2546,10 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2542 2546
2543/** 2547/**
2544 * pci_set_vga_state - set VGA decode state on device and parents if requested 2548 * pci_set_vga_state - set VGA decode state on device and parents if requested
2545 * @dev the PCI device 2549 * @dev: the PCI device
2546 * @decode - true = enable decoding, false = disable decoding 2550 * @decode: true = enable decoding, false = disable decoding
2547 * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 2551 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
2548 * @change_bridge - traverse ancestors and change bridges 2552 * @change_bridge: traverse ancestors and change bridges
2549 */ 2553 */
2550int pci_set_vga_state(struct pci_dev *dev, bool decode, 2554int pci_set_vga_state(struct pci_dev *dev, bool decode,
2551 unsigned int command_bits, bool change_bridge) 2555 unsigned int command_bits, bool change_bridge)
@@ -2719,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
2719 return 1; 2723 return 1;
2720} 2724}
2721 2725
2722static int __devinit pci_init(void)
2723{
2724 struct pci_dev *dev = NULL;
2725
2726 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2727 pci_fixup_device(pci_fixup_final, dev);
2728 }
2729
2730 return 0;
2731}
2732
2733static int __init pci_setup(char *str) 2726static int __init pci_setup(char *str)
2734{ 2727{
2735 while (str) { 2728 while (str) {
@@ -2767,8 +2760,6 @@ static int __init pci_setup(char *str)
2767} 2760}
2768early_param("pci", pci_setup); 2761early_param("pci", pci_setup);
2769 2762
2770device_initcall(pci_init);
2771
2772EXPORT_SYMBOL(pci_reenable_device); 2763EXPORT_SYMBOL(pci_reenable_device);
2773EXPORT_SYMBOL(pci_enable_device_io); 2764EXPORT_SYMBOL(pci_enable_device_io);
2774EXPORT_SYMBOL(pci_enable_device_mem); 2765EXPORT_SYMBOL(pci_enable_device_mem);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2ce8f9ccc66e..40c3cc5d1caf 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/sched.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22#include <linux/pm.h> 23#include <linux/pm.h>
@@ -52,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = {
52 53
53static struct pcie_port_service_driver aerdriver = { 54static struct pcie_port_service_driver aerdriver = {
54 .name = "aer", 55 .name = "aer",
55 .port_type = PCIE_ANY_PORT, 56 .port_type = PCIE_RC_PORT,
56 .service = PCIE_PORT_SERVICE_AER, 57 .service = PCIE_PORT_SERVICE_AER,
57 58
58 .probe = aer_probe, 59 .probe = aer_probe,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 745402e8e498..5b7056cec00c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -656,8 +656,10 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
656 free_link_state(link); 656 free_link_state(link);
657 657
658 /* Recheck latencies and configure upstream links */ 658 /* Recheck latencies and configure upstream links */
659 pcie_update_aspm_capable(root); 659 if (parent_link) {
660 pcie_config_aspm_path(parent_link); 660 pcie_update_aspm_capable(root);
661 pcie_config_aspm_path(parent_link);
662 }
661out: 663out:
662 mutex_unlock(&aspm_lock); 664 mutex_unlock(&aspm_lock);
663 up_read(&pci_bus_sem); 665 up_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 6df5c984a791..f635e476d632 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
32/* global data */ 32/* global data */
33static const char device_name[] = "pcieport-driver";
34 33
35static int pcie_portdrv_restore_config(struct pci_dev *dev) 34static int pcie_portdrv_restore_config(struct pci_dev *dev)
36{ 35{
@@ -262,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = {
262}; 261};
263 262
264static struct pci_driver pcie_portdriver = { 263static struct pci_driver pcie_portdriver = {
265 .name = (char *)device_name, 264 .name = "pcieport",
266 .id_table = &port_pci_ids[0], 265 .id_table = &port_pci_ids[0],
267 266
268 .probe = pcie_portdrv_probe, 267 .probe = pcie_portdrv_probe,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6099facecd79..245d2cdb4765 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -670,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
670} 670}
671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); 671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
672 672
673/*
674 * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
675 * Disable fast back-to-back on the secondary bus segment
676 */
677static void __devinit quirk_xio2000a(struct pci_dev *dev)
678{
679 struct pci_dev *pdev;
680 u16 command;
681
682 dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
683 "secondary bus fast back-to-back transfers disabled\n");
684 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
685 pci_read_config_word(pdev, PCI_COMMAND, &command);
686 if (command & PCI_COMMAND_FAST_BACK)
687 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
688 }
689}
690DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
691 quirk_xio2000a);
673 692
674#ifdef CONFIG_X86_IO_APIC 693#ifdef CONFIG_X86_IO_APIC
675 694
@@ -990,7 +1009,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
990 1009
991static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) 1010static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
992{ 1011{
993 /* set SBX00 SATA in IDE mode to AHCI mode */ 1012 /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
994 u8 tmp; 1013 u8 tmp;
995 1014
996 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); 1015 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
@@ -1009,8 +1028,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
1009DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 1028DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1010DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1029DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1030DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); 1031DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1013DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); 1032DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1014 1033
1015/* 1034/*
1016 * Serverworks CSB5 IDE does not fully support native mode 1035 * Serverworks CSB5 IDE does not fully support native mode
@@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
2572 } 2591 }
2573 pci_do_fixups(dev, start, end); 2592 pci_do_fixups(dev, start, end);
2574} 2593}
2594
2595static int __init pci_apply_final_quirks(void)
2596{
2597 struct pci_dev *dev = NULL;
2598
2599 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2600 pci_fixup_device(pci_fixup_final, dev);
2601 }
2602
2603 return 0;
2604}
2605
2606fs_initcall_sync(pci_apply_final_quirks);
2575#else 2607#else
2576void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} 2608void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
2577#endif 2609#endif
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 706f82d8111f..c54526b206b5 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -205,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
205 return ret; 205 return ret;
206} 206}
207 207
208#if 0
209int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
210{
211 struct pci_bus *bus = dev->bus;
212 struct resource *res = dev->resource + resno;
213 unsigned int type_mask;
214 int i, ret = -EBUSY;
215
216 type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
217
218 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
219 struct resource *r = bus->resource[i];
220 if (!r)
221 continue;
222
223 /* type_mask must match */
224 if ((res->flags ^ r->flags) & type_mask)
225 continue;
226
227 ret = request_resource(r, res);
228
229 if (ret == 0)
230 break;
231 }
232
233 if (ret) {
234 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
235 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
236 } else if (resno < PCI_BRIDGE_RESOURCES) {
237 pci_update_resource(dev, resno);
238 }
239
240 return ret;
241}
242EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
243#endif
244
245/* Sort resources by alignment */ 208/* Sort resources by alignment */
246void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) 209void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
247{ 210{
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index fbf965b31c14..17f38a781d47 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -192,6 +192,10 @@ config PCMCIA_AU1X00
192 tristate "Au1x00 pcmcia support" 192 tristate "Au1x00 pcmcia support"
193 depends on SOC_AU1X00 && PCMCIA 193 depends on SOC_AU1X00 && PCMCIA
194 194
195config PCMCIA_BCM63XX
196 tristate "bcm63xx pcmcia support"
197 depends on BCM63XX && PCMCIA
198
195config PCMCIA_SA1100 199config PCMCIA_SA1100
196 tristate "SA1100 support" 200 tristate "SA1100 support"
197 depends on ARM && ARCH_SA1100 && PCMCIA 201 depends on ARM && ARCH_SA1100 && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 047394d98ac2..a03a38acd77d 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o 28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o 29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
30obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
30obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 31obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
31obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 32obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
32obj-$(CONFIG_OMAP_CF) += omap_cf.o 33obj-$(CONFIG_OMAP_CF) += omap_cf.o
@@ -71,6 +72,7 @@ pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
71pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o 72pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
72pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o 73pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 74pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 76pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 77pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 78pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 9e1140f085fd..e1dccedc5960 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -363,7 +363,7 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
363 struct at91_cf_socket *cf = platform_get_drvdata(pdev); 363 struct at91_cf_socket *cf = platform_get_drvdata(pdev);
364 struct at91_cf_data *board = cf->board; 364 struct at91_cf_data *board = cf->board;
365 365
366 pcmcia_socket_dev_suspend(&pdev->dev, mesg); 366 pcmcia_socket_dev_suspend(&pdev->dev);
367 if (device_may_wakeup(&pdev->dev)) { 367 if (device_may_wakeup(&pdev->dev)) {
368 enable_irq_wake(board->det_pin); 368 enable_irq_wake(board->det_pin);
369 if (board->irq_pin) 369 if (board->irq_pin)
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 90013341cd5f..02088704ac2c 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -515,7 +515,7 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev, 515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev,
516 pm_message_t state) 516 pm_message_t state)
517{ 517{
518 return pcmcia_socket_dev_suspend(&dev->dev, state); 518 return pcmcia_socket_dev_suspend(&dev->dev);
519} 519}
520 520
521static int au1x00_drv_pcmcia_resume(struct platform_device *dev) 521static int au1x00_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
new file mode 100644
index 000000000000..bc88a3b19bb3
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -0,0 +1,536 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/ioport.h>
12#include <linux/timer.h>
13#include <linux/platform_device.h>
14#include <linux/delay.h>
15#include <linux/pci.h>
16#include <linux/gpio.h>
17
18#include <bcm63xx_regs.h>
19#include <bcm63xx_io.h>
20#include "bcm63xx_pcmcia.h"
21
22#define PFX "bcm63xx_pcmcia: "
23
24#ifdef CONFIG_CARDBUS
25/* if cardbus is used, platform device needs reference to actual pci
26 * device */
27static struct pci_dev *bcm63xx_cb_dev;
28#endif
29
30/*
31 * read/write helper for pcmcia regs
32 */
33static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off)
34{
35 return bcm_readl(skt->base + off);
36}
37
38static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt,
39 u32 val, u32 off)
40{
41 bcm_writel(val, skt->base + off);
42}
43
44/*
45 * This callback should (re-)initialise the socket, turn on status
46 * interrupts and PCMCIA bus, and wait for power to stabilise so that
47 * the card status signals report correctly.
48 *
49 * Hardware cannot do that.
50 */
51static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock)
52{
53 return 0;
54}
55
56/*
57 * This callback should remove power on the socket, disable IRQs from
58 * the card, turn off status interrupts, and disable the PCMCIA bus.
59 *
60 * Hardware cannot do that.
61 */
62static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock)
63{
64 return 0;
65}
66
67/*
68 * Implements the set_socket() operation for the in-kernel PCMCIA
69 * service (formerly SS_SetSocket in Card Services). We more or
70 * less punt all of this work and let the kernel handle the details
71 * of power configuration, reset, &c. We also record the value of
72 * `state' in order to regurgitate it to the PCMCIA core later.
73 */
74static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock,
75 socket_state_t *state)
76{
77 struct bcm63xx_pcmcia_socket *skt;
78 unsigned long flags;
79 u32 val;
80
81 skt = sock->driver_data;
82
83 spin_lock_irqsave(&skt->lock, flags);
84
85 /* note: hardware cannot control socket power, so we will
86 * always report SS_POWERON */
87
88 /* apply socket reset */
89 val = pcmcia_readl(skt, PCMCIA_C1_REG);
90 if (state->flags & SS_RESET)
91 val |= PCMCIA_C1_RESET_MASK;
92 else
93 val &= ~PCMCIA_C1_RESET_MASK;
94
95 /* reverse reset logic for cardbus card */
96 if (skt->card_detected && (skt->card_type & CARD_CARDBUS))
97 val ^= PCMCIA_C1_RESET_MASK;
98
99 pcmcia_writel(skt, val, PCMCIA_C1_REG);
100
101 /* keep requested state for event reporting */
102 skt->requested_state = *state;
103
104 spin_unlock_irqrestore(&skt->lock, flags);
105
106 return 0;
107}
108
109/*
110 * identity cardtype from VS[12] input, CD[12] input while only VS2 is
111 * floating, and CD[12] input while only VS1 is floating
112 */
113enum {
114 IN_VS1 = (1 << 0),
115 IN_VS2 = (1 << 1),
116 IN_CD1_VS2H = (1 << 2),
117 IN_CD2_VS2H = (1 << 3),
118 IN_CD1_VS1H = (1 << 4),
119 IN_CD2_VS1H = (1 << 5),
120};
121
122static const u8 vscd_to_cardtype[] = {
123
124 /* VS1 float, VS2 float */
125 [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V),
126
127 /* VS1 grounded, VS2 float */
128 [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V),
129
130 /* VS1 grounded, VS2 grounded */
131 [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV),
132
133 /* VS1 tied to CD1, VS2 float */
134 [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V),
135
136 /* VS1 grounded, VS2 tied to CD2 */
137 [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV),
138
139 /* VS1 tied to CD2, VS2 grounded */
140 [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV),
141
142 /* VS1 float, VS2 grounded */
143 [IN_VS1] = (CARD_PCCARD | CARD_XV),
144
145 /* VS1 float, VS2 tied to CD2 */
146 [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V),
147
148 /* VS1 float, VS2 tied to CD1 */
149 [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV),
150
151 /* VS1 tied to CD2, VS2 float */
152 [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV),
153
154 /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */
155 [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */
156};
157
158/*
159 * poll hardware to check card insertion status
160 */
161static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt)
162{
163 unsigned int stat;
164 u32 val;
165
166 stat = 0;
167
168 /* check CD for card presence */
169 val = pcmcia_readl(skt, PCMCIA_C1_REG);
170
171 if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK))
172 stat |= SS_DETECT;
173
174 /* if new insertion, detect cardtype */
175 if ((stat & SS_DETECT) && !skt->card_detected) {
176 unsigned int stat = 0;
177
178 /* float VS1, float VS2 */
179 val |= PCMCIA_C1_VS1OE_MASK;
180 val |= PCMCIA_C1_VS2OE_MASK;
181 pcmcia_writel(skt, val, PCMCIA_C1_REG);
182
183 /* wait for output to stabilize and read VS[12] */
184 udelay(10);
185 val = pcmcia_readl(skt, PCMCIA_C1_REG);
186 stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0;
187 stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0;
188
189 /* drive VS1 low, float VS2 */
190 val &= ~PCMCIA_C1_VS1OE_MASK;
191 val |= PCMCIA_C1_VS2OE_MASK;
192 pcmcia_writel(skt, val, PCMCIA_C1_REG);
193
194 /* wait for output to stabilize and read CD[12] */
195 udelay(10);
196 val = pcmcia_readl(skt, PCMCIA_C1_REG);
197 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0;
198 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0;
199
200 /* float VS1, drive VS2 low */
201 val |= PCMCIA_C1_VS1OE_MASK;
202 val &= ~PCMCIA_C1_VS2OE_MASK;
203 pcmcia_writel(skt, val, PCMCIA_C1_REG);
204
205 /* wait for output to stabilize and read CD[12] */
206 udelay(10);
207 val = pcmcia_readl(skt, PCMCIA_C1_REG);
208 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0;
209 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0;
210
211 /* guess cardtype from all this */
212 skt->card_type = vscd_to_cardtype[stat];
213 if (!skt->card_type)
214 dev_err(&skt->socket.dev, "unsupported card type\n");
215
216 /* drive both VS pin to 0 again */
217 val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK);
218
219 /* enable correct logic */
220 val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK);
221 if (skt->card_type & CARD_PCCARD)
222 val |= PCMCIA_C1_EN_PCMCIA_MASK;
223 else
224 val |= PCMCIA_C1_EN_CARDBUS_MASK;
225
226 pcmcia_writel(skt, val, PCMCIA_C1_REG);
227 }
228 skt->card_detected = (stat & SS_DETECT) ? 1 : 0;
229
230 /* report card type/voltage */
231 if (skt->card_type & CARD_CARDBUS)
232 stat |= SS_CARDBUS;
233 if (skt->card_type & CARD_3V)
234 stat |= SS_3VCARD;
235 if (skt->card_type & CARD_XV)
236 stat |= SS_XVCARD;
237 stat |= SS_POWERON;
238
239 if (gpio_get_value(skt->pd->ready_gpio))
240 stat |= SS_READY;
241
242 return stat;
243}
244
245/*
246 * core request to get current socket status
247 */
248static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock,
249 unsigned int *status)
250{
251 struct bcm63xx_pcmcia_socket *skt;
252
253 skt = sock->driver_data;
254
255 spin_lock_bh(&skt->lock);
256 *status = __get_socket_status(skt);
257 spin_unlock_bh(&skt->lock);
258
259 return 0;
260}
261
262/*
263 * socket polling timer callback
264 */
265static void bcm63xx_pcmcia_poll(unsigned long data)
266{
267 struct bcm63xx_pcmcia_socket *skt;
268 unsigned int stat, events;
269
270 skt = (struct bcm63xx_pcmcia_socket *)data;
271
272 spin_lock_bh(&skt->lock);
273
274 stat = __get_socket_status(skt);
275
276 /* keep only changed bits, and mask with required one from the
277 * core */
278 events = (stat ^ skt->old_status) & skt->requested_state.csc_mask;
279 skt->old_status = stat;
280 spin_unlock_bh(&skt->lock);
281
282 if (events)
283 pcmcia_parse_events(&skt->socket, events);
284
285 mod_timer(&skt->timer,
286 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
287}
288
289static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock,
290 struct pccard_io_map *map)
291{
292 /* this doesn't seem to be called by pcmcia layer if static
293 * mapping is used */
294 return 0;
295}
296
297static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock,
298 struct pccard_mem_map *map)
299{
300 struct bcm63xx_pcmcia_socket *skt;
301 struct resource *res;
302
303 skt = sock->driver_data;
304 if (map->flags & MAP_ATTRIB)
305 res = skt->attr_res;
306 else
307 res = skt->common_res;
308
309 map->static_start = res->start + map->card_start;
310 return 0;
311}
312
313static struct pccard_operations bcm63xx_pcmcia_operations = {
314 .init = bcm63xx_pcmcia_sock_init,
315 .suspend = bcm63xx_pcmcia_suspend,
316 .get_status = bcm63xx_pcmcia_get_status,
317 .set_socket = bcm63xx_pcmcia_set_socket,
318 .set_io_map = bcm63xx_pcmcia_set_io_map,
319 .set_mem_map = bcm63xx_pcmcia_set_mem_map,
320};
321
322/*
323 * register pcmcia socket to core
324 */
325static int __devinit bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
326{
327 struct bcm63xx_pcmcia_socket *skt;
328 struct pcmcia_socket *sock;
329 struct resource *res, *irq_res;
330 unsigned int regmem_size = 0, iomem_size = 0;
331 u32 val;
332 int ret;
333
334 skt = kzalloc(sizeof(*skt), GFP_KERNEL);
335 if (!skt)
336 return -ENOMEM;
337 spin_lock_init(&skt->lock);
338 sock = &skt->socket;
339 sock->driver_data = skt;
340
341 /* make sure we have all resources we need */
342 skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
343 skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
344 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
345 skt->pd = pdev->dev.platform_data;
346 if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
347 ret = -EINVAL;
348 goto err;
349 }
350
351 /* remap pcmcia registers */
352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
353 regmem_size = resource_size(res);
354 if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) {
355 ret = -EINVAL;
356 goto err;
357 }
358 skt->reg_res = res;
359
360 skt->base = ioremap(res->start, regmem_size);
361 if (!skt->base) {
362 ret = -ENOMEM;
363 goto err;
364 }
365
366 /* remap io registers */
367 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
368 iomem_size = resource_size(res);
369 skt->io_base = ioremap(res->start, iomem_size);
370 if (!skt->io_base) {
371 ret = -ENOMEM;
372 goto err;
373 }
374
375 /* resources are static */
376 sock->resource_ops = &pccard_static_ops;
377 sock->ops = &bcm63xx_pcmcia_operations;
378 sock->owner = THIS_MODULE;
379 sock->dev.parent = &pdev->dev;
380 sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
381 sock->io_offset = (unsigned long)skt->io_base;
382 sock->pci_irq = irq_res->start;
383
384#ifdef CONFIG_CARDBUS
385 sock->cb_dev = bcm63xx_cb_dev;
386 if (bcm63xx_cb_dev)
387 sock->features |= SS_CAP_CARDBUS;
388#endif
389
390 /* assume common & attribute memory have the same size */
391 sock->map_size = resource_size(skt->common_res);
392
393 /* initialize polling timer */
394 setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt);
395
396 /* initialize pcmcia control register, drive VS[12] to 0,
397 * leave CB IDSEL to the old value since it is set by the PCI
398 * layer */
399 val = pcmcia_readl(skt, PCMCIA_C1_REG);
400 val &= PCMCIA_C1_CBIDSEL_MASK;
401 val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK;
402 pcmcia_writel(skt, val, PCMCIA_C1_REG);
403
404 /*
405 * Hardware has only one set of timings registers, not one for
406 * each memory access type, so we configure them for the
407 * slowest one: attribute memory.
408 */
409 val = PCMCIA_C2_DATA16_MASK;
410 val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT;
411 val |= 6 << PCMCIA_C2_INACTIVE_SHIFT;
412 val |= 3 << PCMCIA_C2_SETUP_SHIFT;
413 val |= 3 << PCMCIA_C2_HOLD_SHIFT;
414 pcmcia_writel(skt, val, PCMCIA_C2_REG);
415
416 ret = pcmcia_register_socket(sock);
417 if (ret)
418 goto err;
419
420 /* start polling socket */
421 mod_timer(&skt->timer,
422 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
423
424 platform_set_drvdata(pdev, skt);
425 return 0;
426
427err:
428 if (skt->io_base)
429 iounmap(skt->io_base);
430 if (skt->base)
431 iounmap(skt->base);
432 if (skt->reg_res)
433 release_mem_region(skt->reg_res->start, regmem_size);
434 kfree(skt);
435 return ret;
436}
437
438static int __devexit bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
439{
440 struct bcm63xx_pcmcia_socket *skt;
441 struct resource *res;
442
443 skt = platform_get_drvdata(pdev);
444 del_timer_sync(&skt->timer);
445 iounmap(skt->base);
446 iounmap(skt->io_base);
447 res = skt->reg_res;
448 release_mem_region(res->start, resource_size(res));
449 kfree(skt);
450 return 0;
451}
452
453struct platform_driver bcm63xx_pcmcia_driver = {
454 .probe = bcm63xx_drv_pcmcia_probe,
455 .remove = __devexit_p(bcm63xx_drv_pcmcia_remove),
456 .driver = {
457 .name = "bcm63xx_pcmcia",
458 .owner = THIS_MODULE,
459 },
460};
461
462#ifdef CONFIG_CARDBUS
463static int __devinit bcm63xx_cb_probe(struct pci_dev *dev,
464 const struct pci_device_id *id)
465{
466 /* keep pci device */
467 bcm63xx_cb_dev = dev;
468 return platform_driver_register(&bcm63xx_pcmcia_driver);
469}
470
471static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
472{
473 platform_driver_unregister(&bcm63xx_pcmcia_driver);
474 bcm63xx_cb_dev = NULL;
475}
476
477static struct pci_device_id bcm63xx_cb_table[] = {
478 {
479 .vendor = PCI_VENDOR_ID_BROADCOM,
480 .device = BCM6348_CPU_ID,
481 .subvendor = PCI_VENDOR_ID_BROADCOM,
482 .subdevice = PCI_ANY_ID,
483 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
484 .class_mask = ~0,
485 },
486
487 {
488 .vendor = PCI_VENDOR_ID_BROADCOM,
489 .device = BCM6358_CPU_ID,
490 .subvendor = PCI_VENDOR_ID_BROADCOM,
491 .subdevice = PCI_ANY_ID,
492 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
493 .class_mask = ~0,
494 },
495
496 { },
497};
498
499MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table);
500
501static struct pci_driver bcm63xx_cardbus_driver = {
502 .name = "bcm63xx_cardbus",
503 .id_table = bcm63xx_cb_table,
504 .probe = bcm63xx_cb_probe,
505 .remove = __devexit_p(bcm63xx_cb_exit),
506};
507#endif
508
509/*
510 * if cardbus support is enabled, register our platform device after
511 * our fake cardbus bridge has been registered
512 */
513static int __init bcm63xx_pcmcia_init(void)
514{
515#ifdef CONFIG_CARDBUS
516 return pci_register_driver(&bcm63xx_cardbus_driver);
517#else
518 return platform_driver_register(&bcm63xx_pcmcia_driver);
519#endif
520}
521
522static void __exit bcm63xx_pcmcia_exit(void)
523{
524#ifdef CONFIG_CARDBUS
525 return pci_unregister_driver(&bcm63xx_cardbus_driver);
526#else
527 platform_driver_unregister(&bcm63xx_pcmcia_driver);
528#endif
529}
530
531module_init(bcm63xx_pcmcia_init);
532module_exit(bcm63xx_pcmcia_exit);
533
534MODULE_LICENSE("GPL");
535MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
536MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller");
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.h b/drivers/pcmcia/bcm63xx_pcmcia.h
new file mode 100644
index 000000000000..ed957399d863
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.h
@@ -0,0 +1,60 @@
1#ifndef BCM63XX_PCMCIA_H_
2#define BCM63XX_PCMCIA_H_
3
4#include <linux/types.h>
5#include <linux/timer.h>
6#include <pcmcia/ss.h>
7#include <bcm63xx_dev_pcmcia.h>
8
9/* socket polling rate in ms */
10#define BCM63XX_PCMCIA_POLL_RATE 500
11
12enum {
13 CARD_CARDBUS = (1 << 0),
14 CARD_PCCARD = (1 << 1),
15 CARD_5V = (1 << 2),
16 CARD_3V = (1 << 3),
17 CARD_XV = (1 << 4),
18 CARD_YV = (1 << 5),
19};
20
21struct bcm63xx_pcmcia_socket {
22 struct pcmcia_socket socket;
23
24 /* platform specific data */
25 struct bcm63xx_pcmcia_platform_data *pd;
26
27 /* all regs access are protected by this spinlock */
28 spinlock_t lock;
29
30 /* pcmcia registers resource */
31 struct resource *reg_res;
32
33 /* base remapped address of registers */
34 void __iomem *base;
35
36 /* whether a card is detected at the moment */
37 int card_detected;
38
39 /* type of detected card (mask of above enum) */
40 u8 card_type;
41
42 /* keep last socket status to implement event reporting */
43 unsigned int old_status;
44
45 /* backup of requested socket state */
46 socket_state_t requested_state;
47
48 /* timer used for socket status polling */
49 struct timer_list timer;
50
51 /* attribute/common memory resources */
52 struct resource *attr_res;
53 struct resource *common_res;
54 struct resource *io_res;
55
56 /* base address of io memory */
57 void __iomem *io_base;
58};
59
60#endif /* BCM63XX_PCMCIA_H_ */
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index b59d4115d20f..300b368605c9 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -302,7 +302,7 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev)
302 302
303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
304{ 304{
305 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 305 return pcmcia_socket_dev_suspend(&pdev->dev);
306} 306}
307 307
308static int bfin_cf_resume(struct platform_device *pdev) 308static int bfin_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 4a110b7b2673..6c4a4fc83630 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1463,7 +1463,9 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t
1463 return -ENOMEM; 1463 return -ENOMEM;
1464 } 1464 }
1465 tuple.DesiredTuple = code; 1465 tuple.DesiredTuple = code;
1466 tuple.Attributes = TUPLE_RETURN_COMMON; 1466 tuple.Attributes = 0;
1467 if (function == BIND_FN_ALL)
1468 tuple.Attributes = TUPLE_RETURN_COMMON;
1467 ret = pccard_get_first_tuple(s, function, &tuple); 1469 ret = pccard_get_first_tuple(s, function, &tuple);
1468 if (ret != 0) 1470 if (ret != 0)
1469 goto done; 1471 goto done;
@@ -1490,7 +1492,7 @@ EXPORT_SYMBOL(pccard_read_tuple);
1490 1492
1491======================================================================*/ 1493======================================================================*/
1492 1494
1493int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned int *info) 1495int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
1494{ 1496{
1495 tuple_t *tuple; 1497 tuple_t *tuple;
1496 cisparse_t *p; 1498 cisparse_t *p;
@@ -1515,30 +1517,30 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned
1515 count = reserved = 0; 1517 count = reserved = 0;
1516 tuple->DesiredTuple = RETURN_FIRST_TUPLE; 1518 tuple->DesiredTuple = RETURN_FIRST_TUPLE;
1517 tuple->Attributes = TUPLE_RETURN_COMMON; 1519 tuple->Attributes = TUPLE_RETURN_COMMON;
1518 ret = pccard_get_first_tuple(s, function, tuple); 1520 ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple);
1519 if (ret != 0) 1521 if (ret != 0)
1520 goto done; 1522 goto done;
1521 1523
1522 /* First tuple should be DEVICE; we should really have either that 1524 /* First tuple should be DEVICE; we should really have either that
1523 or a CFTABLE_ENTRY of some sort */ 1525 or a CFTABLE_ENTRY of some sort */
1524 if ((tuple->TupleCode == CISTPL_DEVICE) || 1526 if ((tuple->TupleCode == CISTPL_DEVICE) ||
1525 (pccard_read_tuple(s, function, CISTPL_CFTABLE_ENTRY, p) == 0) || 1527 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p) == 0) ||
1526 (pccard_read_tuple(s, function, CISTPL_CFTABLE_ENTRY_CB, p) == 0)) 1528 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p) == 0))
1527 dev_ok++; 1529 dev_ok++;
1528 1530
1529 /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2 1531 /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
1530 tuple, for card identification. Certain old D-Link and Linksys 1532 tuple, for card identification. Certain old D-Link and Linksys
1531 cards have only a broken VERS_2 tuple; hence the bogus test. */ 1533 cards have only a broken VERS_2 tuple; hence the bogus test. */
1532 if ((pccard_read_tuple(s, function, CISTPL_MANFID, p) == 0) || 1534 if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
1533 (pccard_read_tuple(s, function, CISTPL_VERS_1, p) == 0) || 1535 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
1534 (pccard_read_tuple(s, function, CISTPL_VERS_2, p) != -ENOSPC)) 1536 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
1535 ident_ok++; 1537 ident_ok++;
1536 1538
1537 if (!dev_ok && !ident_ok) 1539 if (!dev_ok && !ident_ok)
1538 goto done; 1540 goto done;
1539 1541
1540 for (count = 1; count < MAX_TUPLES; count++) { 1542 for (count = 1; count < MAX_TUPLES; count++) {
1541 ret = pccard_get_next_tuple(s, function, tuple); 1543 ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple);
1542 if (ret != 0) 1544 if (ret != 0)
1543 break; 1545 break;
1544 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || 1546 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 0660ad182589..698d75cda084 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -98,10 +98,13 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem);
98 * These functions check for the appropriate struct pcmcia_soket arrays, 98 * These functions check for the appropriate struct pcmcia_soket arrays,
99 * and pass them to the low-level functions pcmcia_{suspend,resume}_socket 99 * and pass them to the low-level functions pcmcia_{suspend,resume}_socket
100 */ 100 */
101static int socket_early_resume(struct pcmcia_socket *skt);
102static int socket_late_resume(struct pcmcia_socket *skt);
101static int socket_resume(struct pcmcia_socket *skt); 103static int socket_resume(struct pcmcia_socket *skt);
102static int socket_suspend(struct pcmcia_socket *skt); 104static int socket_suspend(struct pcmcia_socket *skt);
103 105
104int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state) 106static void pcmcia_socket_dev_run(struct device *dev,
107 int (*cb)(struct pcmcia_socket *))
105{ 108{
106 struct pcmcia_socket *socket; 109 struct pcmcia_socket *socket;
107 110
@@ -110,29 +113,34 @@ int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
110 if (socket->dev.parent != dev) 113 if (socket->dev.parent != dev)
111 continue; 114 continue;
112 mutex_lock(&socket->skt_mutex); 115 mutex_lock(&socket->skt_mutex);
113 socket_suspend(socket); 116 cb(socket);
114 mutex_unlock(&socket->skt_mutex); 117 mutex_unlock(&socket->skt_mutex);
115 } 118 }
116 up_read(&pcmcia_socket_list_rwsem); 119 up_read(&pcmcia_socket_list_rwsem);
120}
117 121
122int pcmcia_socket_dev_suspend(struct device *dev)
123{
124 pcmcia_socket_dev_run(dev, socket_suspend);
118 return 0; 125 return 0;
119} 126}
120EXPORT_SYMBOL(pcmcia_socket_dev_suspend); 127EXPORT_SYMBOL(pcmcia_socket_dev_suspend);
121 128
122int pcmcia_socket_dev_resume(struct device *dev) 129void pcmcia_socket_dev_early_resume(struct device *dev)
123{ 130{
124 struct pcmcia_socket *socket; 131 pcmcia_socket_dev_run(dev, socket_early_resume);
132}
133EXPORT_SYMBOL(pcmcia_socket_dev_early_resume);
125 134
126 down_read(&pcmcia_socket_list_rwsem); 135void pcmcia_socket_dev_late_resume(struct device *dev)
127 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 136{
128 if (socket->dev.parent != dev) 137 pcmcia_socket_dev_run(dev, socket_late_resume);
129 continue; 138}
130 mutex_lock(&socket->skt_mutex); 139EXPORT_SYMBOL(pcmcia_socket_dev_late_resume);
131 socket_resume(socket);
132 mutex_unlock(&socket->skt_mutex);
133 }
134 up_read(&pcmcia_socket_list_rwsem);
135 140
141int pcmcia_socket_dev_resume(struct device *dev)
142{
143 pcmcia_socket_dev_run(dev, socket_resume);
136 return 0; 144 return 0;
137} 145}
138EXPORT_SYMBOL(pcmcia_socket_dev_resume); 146EXPORT_SYMBOL(pcmcia_socket_dev_resume);
@@ -546,29 +554,24 @@ static int socket_suspend(struct pcmcia_socket *skt)
546 return 0; 554 return 0;
547} 555}
548 556
549/* 557static int socket_early_resume(struct pcmcia_socket *skt)
550 * Resume a socket. If a card is present, verify its CIS against
551 * our cached copy. If they are different, the card has been
552 * replaced, and we need to tell the drivers.
553 */
554static int socket_resume(struct pcmcia_socket *skt)
555{ 558{
556 int ret;
557
558 if (!(skt->state & SOCKET_SUSPEND))
559 return -EBUSY;
560
561 skt->socket = dead_socket; 559 skt->socket = dead_socket;
562 skt->ops->init(skt); 560 skt->ops->init(skt);
563 skt->ops->set_socket(skt, &skt->socket); 561 skt->ops->set_socket(skt, &skt->socket);
562 if (skt->state & SOCKET_PRESENT)
563 skt->resume_status = socket_setup(skt, resume_delay);
564 return 0;
565}
564 566
567static int socket_late_resume(struct pcmcia_socket *skt)
568{
565 if (!(skt->state & SOCKET_PRESENT)) { 569 if (!(skt->state & SOCKET_PRESENT)) {
566 skt->state &= ~SOCKET_SUSPEND; 570 skt->state &= ~SOCKET_SUSPEND;
567 return socket_insert(skt); 571 return socket_insert(skt);
568 } 572 }
569 573
570 ret = socket_setup(skt, resume_delay); 574 if (skt->resume_status == 0) {
571 if (ret == 0) {
572 /* 575 /*
573 * FIXME: need a better check here for cardbus cards. 576 * FIXME: need a better check here for cardbus cards.
574 */ 577 */
@@ -596,6 +599,20 @@ static int socket_resume(struct pcmcia_socket *skt)
596 return 0; 599 return 0;
597} 600}
598 601
602/*
603 * Resume a socket. If a card is present, verify its CIS against
604 * our cached copy. If they are different, the card has been
605 * replaced, and we need to tell the drivers.
606 */
607static int socket_resume(struct pcmcia_socket *skt)
608{
609 if (!(skt->state & SOCKET_SUSPEND))
610 return -EBUSY;
611
612 socket_early_resume(skt);
613 return socket_late_resume(skt);
614}
615
599static void socket_remove(struct pcmcia_socket *skt) 616static void socket_remove(struct pcmcia_socket *skt)
600{ 617{
601 dev_printk(KERN_NOTICE, &skt->dev, 618 dev_printk(KERN_NOTICE, &skt->dev,
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 79615e6d540b..1f4098f1354d 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -197,8 +197,7 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
197 cisdata_t code, void *parse); 197 cisdata_t code, void *parse);
198int pcmcia_replace_cis(struct pcmcia_socket *s, 198int pcmcia_replace_cis(struct pcmcia_socket *s,
199 const u8 *data, const size_t len); 199 const u8 *data, const size_t len);
200int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, 200int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
201 unsigned int *count);
202 201
203/* rsrc_mgr.c */ 202/* rsrc_mgr.c */
204int pcmcia_validate_mem(struct pcmcia_socket *s); 203int pcmcia_validate_mem(struct pcmcia_socket *s);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 9f300d3cb125..f5b7079f13d3 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -547,7 +547,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
547 if (!vers1) 547 if (!vers1)
548 return -ENOMEM; 548 return -ENOMEM;
549 549
550 if (!pccard_read_tuple(p_dev->socket, p_dev->func, 550 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL,
551 CISTPL_MANFID, &manf_id)) { 551 CISTPL_MANFID, &manf_id)) {
552 p_dev->manf_id = manf_id.manf; 552 p_dev->manf_id = manf_id.manf;
553 p_dev->card_id = manf_id.card; 553 p_dev->card_id = manf_id.card;
@@ -581,9 +581,9 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
581 kfree(devgeo); 581 kfree(devgeo);
582 } 582 }
583 583
584 if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_VERS_1, 584 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1,
585 vers1)) { 585 vers1)) {
586 for (i=0; i < vers1->ns; i++) { 586 for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) {
587 char *tmp; 587 char *tmp;
588 unsigned int length; 588 unsigned int length;
589 589
@@ -733,7 +733,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
733 return -EAGAIN; /* try again, but later... */ 733 return -EAGAIN; /* try again, but later... */
734 } 734 }
735 735
736 ret = pccard_validate_cis(s, BIND_FN_ALL, &no_chains); 736 ret = pccard_validate_cis(s, &no_chains);
737 if (ret || !no_chains) { 737 if (ret || !no_chains) {
738 ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n"); 738 ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n");
739 return -ENODEV; 739 return -ENODEV;
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 46561face128..a04f21c8170f 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
42#ifdef CONFIG_PM 42#ifdef CONFIG_PM
43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state) 43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
44{ 44{
45 return pcmcia_socket_dev_suspend(&dev->dev, state); 45 return pcmcia_socket_dev_suspend(&dev->dev);
46} 46}
47 47
48static int i82092aa_socket_resume (struct pci_dev *dev) 48static int i82092aa_socket_resume (struct pci_dev *dev)
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 40d4953e4b12..a4aacb830b80 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1053,8 +1053,8 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io)
1053 u_char map, ioctl; 1053 u_char map, ioctl;
1054 1054
1055 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 1055 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
1056 "%#x-%#x)\n", sock, io->map, io->flags, 1056 "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed,
1057 io->speed, io->start, io->stop); 1057 (unsigned long long)io->start, (unsigned long long)io->stop);
1058 map = io->map; 1058 map = io->map;
1059 if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || 1059 if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
1060 (io->stop < io->start)) return -EINVAL; 1060 (io->stop < io->start)) return -EINVAL;
@@ -1241,7 +1241,7 @@ static int pcic_init(struct pcmcia_socket *s)
1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev, 1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev,
1242 pm_message_t state) 1242 pm_message_t state)
1243{ 1243{
1244 return pcmcia_socket_dev_suspend(&dev->dev, state); 1244 return pcmcia_socket_dev_suspend(&dev->dev);
1245} 1245}
1246 1246
1247static int i82365_drv_pcmcia_resume(struct platform_device *dev) 1247static int i82365_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 62b4ecc97c46..7dfbee1dcd76 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -537,8 +537,9 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
537 u_char map; 537 u_char map;
538 538
539 debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " 539 debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, "
540 "%#lx-%#lx)\n", sock, io->map, io->flags, 540 "%#llx-%#llx)\n", sock, io->map, io->flags,
541 io->speed, io->start, io->stop); 541 io->speed, (unsigned long long)io->start,
542 (unsigned long long)io->stop);
542 map = io->map; 543 map = io->map;
543 544
544 return 0; 545 return 0;
@@ -554,8 +555,9 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
554 pcc_socket_t *t = &socket[sock]; 555 pcc_socket_t *t = &socket[sock];
555 556
556 debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " 557 debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, "
557 "%#lx, %#x)\n", sock, map, mem->flags, 558 "%#llx, %#x)\n", sock, map, mem->flags,
558 mem->speed, mem->static_start, mem->card_start); 559 mem->speed, (unsigned long long)mem->static_start,
560 mem->card_start);
559 561
560 /* 562 /*
561 * sanity check 563 * sanity check
@@ -699,7 +701,7 @@ static struct pccard_operations pcc_operations = {
699static int cfc_drv_pcmcia_suspend(struct platform_device *dev, 701static int cfc_drv_pcmcia_suspend(struct platform_device *dev,
700 pm_message_t state) 702 pm_message_t state)
701{ 703{
702 return pcmcia_socket_dev_suspend(&dev->dev, state); 704 return pcmcia_socket_dev_suspend(&dev->dev);
703} 705}
704 706
705static int cfc_drv_pcmcia_resume(struct platform_device *dev) 707static int cfc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 12034b41d196..c6524f99ccc3 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -492,8 +492,9 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
492 u_char map; 492 u_char map;
493 493
494 debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " 494 debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, "
495 "%#x-%#x)\n", sock, io->map, io->flags, 495 "%#llx-%#llx)\n", sock, io->map, io->flags,
496 io->speed, io->start, io->stop); 496 io->speed, (unsigned long long)io->start,
497 (unsigned long long)io->stop);
497 map = io->map; 498 map = io->map;
498 499
499 return 0; 500 return 0;
@@ -515,8 +516,9 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
515#endif 516#endif
516 517
517 debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " 518 debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, "
518 "%#lx, %#x)\n", sock, map, mem->flags, 519 "%#llx, %#x)\n", sock, map, mem->flags,
519 mem->speed, mem->static_start, mem->card_start); 520 mem->speed, (unsigned long long)mem->static_start,
521 mem->card_start);
520 522
521 /* 523 /*
522 * sanity check 524 * sanity check
@@ -675,7 +677,7 @@ static struct pccard_operations pcc_operations = {
675static int pcc_drv_pcmcia_suspend(struct platform_device *dev, 677static int pcc_drv_pcmcia_suspend(struct platform_device *dev,
676 pm_message_t state) 678 pm_message_t state)
677{ 679{
678 return pcmcia_socket_dev_suspend(&dev->dev, state); 680 return pcmcia_socket_dev_suspend(&dev->dev);
679} 681}
680 682
681static int pcc_drv_pcmcia_resume(struct platform_device *dev) 683static int pcc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index d1ad0966392d..403559ba49dd 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -975,8 +975,9 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
975#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) 975#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
976 976
977 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " 977 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
978 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags, 978 "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags,
979 io->speed, io->start, io->stop); 979 io->speed, (unsigned long long)io->start,
980 (unsigned long long)io->stop);
980 981
981 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) 982 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
982 || (io->stop > 0xffff) || (io->stop < io->start)) 983 || (io->stop > 0xffff) || (io->stop < io->start))
@@ -1055,8 +1056,9 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1055 pcmconf8xx_t *pcmcia = s->pcmcia; 1056 pcmconf8xx_t *pcmcia = s->pcmcia;
1056 1057
1057 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1058 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1058 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1059 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1059 mem->speed, mem->static_start, mem->card_start); 1060 mem->speed, (unsigned long long)mem->static_start,
1061 mem->card_start);
1060 1062
1061 if ((mem->map >= PCMCIA_MEM_WIN_NO) 1063 if ((mem->map >= PCMCIA_MEM_WIN_NO)
1062// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) 1064// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
@@ -1107,8 +1109,9 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1107 } 1109 }
1108 1110
1109 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1111 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1110 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1112 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1111 mem->speed, mem->static_start, mem->card_start); 1113 mem->speed, (unsigned long long)mem->static_start,
1114 mem->card_start);
1112 1115
1113 /* copy the struct and modify the copy */ 1116 /* copy the struct and modify the copy */
1114 1117
@@ -1296,7 +1299,7 @@ static int m8xx_remove(struct of_device *ofdev)
1296#ifdef CONFIG_PM 1299#ifdef CONFIG_PM
1297static int m8xx_suspend(struct platform_device *pdev, pm_message_t state) 1300static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
1298{ 1301{
1299 return pcmcia_socket_dev_suspend(&pdev->dev, state); 1302 return pcmcia_socket_dev_suspend(&pdev->dev);
1300} 1303}
1301 1304
1302static int m8xx_resume(struct platform_device *pdev) 1305static int m8xx_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index f3736398900e..68570bc3ac86 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -334,7 +334,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
334 334
335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
336{ 336{
337 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 337 return pcmcia_socket_dev_suspend(&pdev->dev);
338} 338}
339 339
340static int omap_cf_resume(struct platform_device *pdev) 340static int omap_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 32c44040c1e8..30cf71d2ee23 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -881,7 +881,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
881 mutex_lock(&s->skt_mutex); 881 mutex_lock(&s->skt_mutex);
882 pcmcia_validate_mem(s); 882 pcmcia_validate_mem(s);
883 mutex_unlock(&s->skt_mutex); 883 mutex_unlock(&s->skt_mutex);
884 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo.Chains); 884 ret = pccard_validate_cis(s, &buf->cisinfo.Chains);
885 break; 885 break;
886 case DS_SUSPEND_CARD: 886 case DS_SUSPEND_CARD:
887 ret = pcmcia_suspend_card(s); 887 ret = pcmcia_suspend_card(s);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 8bed1dab9039..70a33468bcd0 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -641,6 +641,12 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
641 if ((ret = pci_enable_device(dev))) 641 if ((ret = pci_enable_device(dev)))
642 goto err_out_free_mem; 642 goto err_out_free_mem;
643 643
644 if (!pci_resource_start(dev, 0)) {
645 printk(KERN_INFO "pd6729: refusing to load the driver "
646 "as the io_base is 0.\n");
647 goto err_out_free_mem;
648 }
649
644 printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge " 650 printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge "
645 "at 0x%llx on irq %d\n", 651 "at 0x%llx on irq %d\n",
646 (unsigned long long)pci_resource_start(dev, 0), dev->irq); 652 (unsigned long long)pci_resource_start(dev, 0), dev->irq);
@@ -758,7 +764,7 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
758#ifdef CONFIG_PM 764#ifdef CONFIG_PM
759static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state) 765static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
760{ 766{
761 return pcmcia_socket_dev_suspend(&dev->dev, state); 767 return pcmcia_socket_dev_suspend(&dev->dev);
762} 768}
763 769
764static int pd6729_socket_resume(struct pci_dev *dev) 770static int pd6729_socket_resume(struct pci_dev *dev)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index c49a7269f6d1..0e35acb1366b 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -300,25 +300,29 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
300 return soc_common_drv_pcmcia_remove(&dev->dev); 300 return soc_common_drv_pcmcia_remove(&dev->dev);
301} 301}
302 302
303static int pxa2xx_drv_pcmcia_suspend(struct platform_device *dev, pm_message_t state) 303static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
304{ 304{
305 return pcmcia_socket_dev_suspend(&dev->dev, state); 305 return pcmcia_socket_dev_suspend(dev);
306} 306}
307 307
308static int pxa2xx_drv_pcmcia_resume(struct platform_device *dev) 308static int pxa2xx_drv_pcmcia_resume(struct device *dev)
309{ 309{
310 pxa2xx_configure_sockets(&dev->dev); 310 pxa2xx_configure_sockets(dev);
311 return pcmcia_socket_dev_resume(&dev->dev); 311 return pcmcia_socket_dev_resume(dev);
312} 312}
313 313
314static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
315 .suspend = pxa2xx_drv_pcmcia_suspend,
316 .resume = pxa2xx_drv_pcmcia_resume,
317};
318
314static struct platform_driver pxa2xx_pcmcia_driver = { 319static struct platform_driver pxa2xx_pcmcia_driver = {
315 .probe = pxa2xx_drv_pcmcia_probe, 320 .probe = pxa2xx_drv_pcmcia_probe,
316 .remove = pxa2xx_drv_pcmcia_remove, 321 .remove = pxa2xx_drv_pcmcia_remove,
317 .suspend = pxa2xx_drv_pcmcia_suspend,
318 .resume = pxa2xx_drv_pcmcia_resume,
319 .driver = { 322 .driver = {
320 .name = "pxa2xx-pcmcia", 323 .name = "pxa2xx-pcmcia",
321 .owner = THIS_MODULE, 324 .owner = THIS_MODULE,
325 .pm = &pxa2xx_drv_pcmcia_pm_ops,
322 }, 326 },
323}; 327};
324 328
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
new file mode 100644
index 000000000000..3a8993ed5621
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -0,0 +1,230 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_palmtc.c
3 *
4 * Driver for Palm Tungsten|C PCMCIA
5 *
6 * Copyright (C) 2008 Alex Osborne <ato@meshy.org>
7 * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19
20#include <asm/mach-types.h>
21#include <mach/palmtc.h>
22#include "soc_common.h"
23
24static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{
26 int ret;
27
28 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1");
29 if (ret)
30 goto err1;
31 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
32 if (ret)
33 goto err2;
34
35 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2");
36 if (ret)
37 goto err2;
38 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
39 if (ret)
40 goto err3;
41
42 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3");
43 if (ret)
44 goto err3;
45 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
46 if (ret)
47 goto err4;
48
49 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST");
50 if (ret)
51 goto err4;
52 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
53 if (ret)
54 goto err5;
55
56 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY");
57 if (ret)
58 goto err5;
59 ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY);
60 if (ret)
61 goto err6;
62
63 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY");
64 if (ret)
65 goto err6;
66 ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
67 if (ret)
68 goto err7;
69
70 skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
71 return 0;
72
73err7:
74 gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
75err6:
76 gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
77err5:
78 gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
79err4:
80 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
81err3:
82 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
83err2:
84 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
85err1:
86 return ret;
87}
88
89static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
90{
91 gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
92 gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
93 gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
94 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
95 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
96 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
97}
98
99static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
100 struct pcmcia_state *state)
101{
102 state->detect = 1; /* always inserted */
103 state->ready = !!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_READY);
104 state->bvd1 = 1;
105 state->bvd2 = 1;
106 state->wrprot = 0;
107 state->vs_3v = 1;
108 state->vs_Xv = 0;
109}
110
111static int palmtc_wifi_powerdown(void)
112{
113 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
114 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
115 mdelay(40);
116 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
117 return 0;
118}
119
120static int palmtc_wifi_powerup(void)
121{
122 int timeout = 50;
123
124 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1);
125 mdelay(50);
126
127 /* Power up the card, 1.8V first, after a while 3.3V */
128 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1);
129 mdelay(100);
130 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1);
131
132 /* Wait till the card is ready */
133 while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) &&
134 timeout) {
135 mdelay(1);
136 timeout--;
137 }
138
139 /* Power down the WiFi in case of error */
140 if (!timeout) {
141 palmtc_wifi_powerdown();
142 return 1;
143 }
144
145 /* Reset the card */
146 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
147 mdelay(20);
148 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0);
149 mdelay(25);
150
151 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
152
153 return 0;
154}
155
156static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
157 const socket_state_t *state)
158{
159 int ret = 1;
160
161 if (state->Vcc == 0)
162 ret = palmtc_wifi_powerdown();
163 else if (state->Vcc == 33)
164 ret = palmtc_wifi_powerup();
165
166 return ret;
167}
168
169static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
170{
171}
172
173static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
174{
175}
176
177static struct pcmcia_low_level palmtc_pcmcia_ops = {
178 .owner = THIS_MODULE,
179
180 .first = 0,
181 .nr = 1,
182
183 .hw_init = palmtc_pcmcia_hw_init,
184 .hw_shutdown = palmtc_pcmcia_hw_shutdown,
185
186 .socket_state = palmtc_pcmcia_socket_state,
187 .configure_socket = palmtc_pcmcia_configure_socket,
188
189 .socket_init = palmtc_pcmcia_socket_init,
190 .socket_suspend = palmtc_pcmcia_socket_suspend,
191};
192
193static struct platform_device *palmtc_pcmcia_device;
194
195static int __init palmtc_pcmcia_init(void)
196{
197 int ret;
198
199 if (!machine_is_palmtc())
200 return -ENODEV;
201
202 palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
203 if (!palmtc_pcmcia_device)
204 return -ENOMEM;
205
206 ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops,
207 sizeof(palmtc_pcmcia_ops));
208
209 if (!ret)
210 ret = platform_device_add(palmtc_pcmcia_device);
211
212 if (ret)
213 platform_device_put(palmtc_pcmcia_device);
214
215 return ret;
216}
217
218static void __exit palmtc_pcmcia_exit(void)
219{
220 platform_device_unregister(palmtc_pcmcia_device);
221}
222
223module_init(palmtc_pcmcia_init);
224module_exit(palmtc_pcmcia_exit);
225
226MODULE_AUTHOR("Alex Osborne <ato@meshy.org>,"
227 " Marek Vasut <marek.vasut@gmail.com>");
228MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C");
229MODULE_ALIAS("platform:pxa2xx-pcmcia");
230MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9ca22c7aafb2..7039f3cf5b77 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -206,6 +206,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
206 /* First, what does a floating port look like? */ 206 /* First, what does a floating port look like? */
207 b = kzalloc(256, GFP_KERNEL); 207 b = kzalloc(256, GFP_KERNEL);
208 if (!b) { 208 if (!b) {
209 printk("\n");
209 dev_printk(KERN_ERR, &s->dev, 210 dev_printk(KERN_ERR, &s->dev,
210 "do_io_probe: unable to kmalloc 256 bytes"); 211 "do_io_probe: unable to kmalloc 256 bytes");
211 return; 212 return;
@@ -275,7 +276,7 @@ static int readable(struct pcmcia_socket *s, struct resource *res,
275 s->cis_mem.res = res; 276 s->cis_mem.res = res;
276 s->cis_virt = ioremap(res->start, s->map_size); 277 s->cis_virt = ioremap(res->start, s->map_size);
277 if (s->cis_virt) { 278 if (s->cis_virt) {
278 ret = pccard_validate_cis(s, BIND_FN_ALL, count); 279 ret = pccard_validate_cis(s, count);
279 /* invalidate mapping and CIS cache */ 280 /* invalidate mapping and CIS cache */
280 iounmap(s->cis_virt); 281 iounmap(s->cis_virt);
281 s->cis_virt = NULL; 282 s->cis_virt = NULL;
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index f424146a2bc9..ac8aa09ba0da 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
130 .socket_suspend = assabet_pcmcia_socket_suspend, 130 .socket_suspend = assabet_pcmcia_socket_suspend,
131}; 131};
132 132
133int __init pcmcia_assabet_init(struct device *dev) 133int pcmcia_assabet_init(struct device *dev)
134{ 134{
135 int ret = -ENODEV; 135 int ret = -ENODEV;
136 136
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index d8da5ac844e9..2d0e99751530 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -89,7 +89,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, 89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
90 pm_message_t state) 90 pm_message_t state)
91{ 91{
92 return pcmcia_socket_dev_suspend(&dev->dev, state); 92 return pcmcia_socket_dev_suspend(&dev->dev);
93} 93}
94 94
95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev) 95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 4c41e86ccff9..0c76d337815b 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -123,7 +123,7 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
123 .socket_suspend = sa1111_pcmcia_socket_suspend, 123 .socket_suspend = sa1111_pcmcia_socket_suspend,
124}; 124};
125 125
126int __init pcmcia_neponset_init(struct sa1111_dev *sadev) 126int pcmcia_neponset_init(struct sa1111_dev *sadev)
127{ 127{
128 int ret = -ENODEV; 128 int ret = -ENODEV;
129 129
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 401052a21ce8..4be4e172ffa1 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -159,7 +159,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
159 159
160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state) 160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state)
161{ 161{
162 return pcmcia_socket_dev_suspend(&dev->dev, state); 162 return pcmcia_socket_dev_suspend(&dev->dev);
163} 163}
164 164
165static int pcmcia_resume(struct sa1111_dev *dev) 165static int pcmcia_resume(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 163cf98e2386..ef7e9e58782b 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -336,8 +336,9 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m
336 struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock); 336 struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
337 unsigned short speed = map->speed; 337 unsigned short speed = map->speed;
338 338
339 debug(skt, 2, "map %u speed %u start 0x%08x stop 0x%08x\n", 339 debug(skt, 2, "map %u speed %u start 0x%08llx stop 0x%08llx\n",
340 map->map, map->speed, map->start, map->stop); 340 map->map, map->speed, (unsigned long long)map->start,
341 (unsigned long long)map->stop);
341 debug(skt, 2, "flags: %s%s%s%s%s%s%s%s\n", 342 debug(skt, 2, "flags: %s%s%s%s%s%s%s%s\n",
342 (map->flags==0)?"<NONE>":"", 343 (map->flags==0)?"<NONE>":"",
343 (map->flags&MAP_ACTIVE)?"ACTIVE ":"", 344 (map->flags&MAP_ACTIVE)?"ACTIVE ":"",
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index ff9a3bb3c88d..78d5aab542f7 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -300,7 +300,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
300 300
301 if (!(s->state & SOCKET_PRESENT)) 301 if (!(s->state & SOCKET_PRESENT))
302 return -ENODEV; 302 return -ENODEV;
303 if (pccard_validate_cis(s, BIND_FN_ALL, &chains)) 303 if (pccard_validate_cis(s, &chains))
304 return -EIO; 304 return -EIO;
305 if (!chains) 305 if (!chains)
306 return -ENODATA; 306 return -ENODATA;
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 8eb04230fec7..6918849d511e 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -366,7 +366,7 @@ static int __init get_tcic_id(void)
366static int tcic_drv_pcmcia_suspend(struct platform_device *dev, 366static int tcic_drv_pcmcia_suspend(struct platform_device *dev,
367 pm_message_t state) 367 pm_message_t state)
368{ 368{
369 return pcmcia_socket_dev_suspend(&dev->dev, state); 369 return pcmcia_socket_dev_suspend(&dev->dev);
370} 370}
371 371
372static int tcic_drv_pcmcia_resume(struct platform_device *dev) 372static int tcic_drv_pcmcia_resume(struct platform_device *dev)
@@ -732,8 +732,8 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
732 u_short base, len, ioctl; 732 u_short base, len, ioctl;
733 733
734 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 734 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
735 "%#x-%#x)\n", psock, io->map, io->flags, 735 "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed,
736 io->speed, io->start, io->stop); 736 (unsigned long long)io->start, (unsigned long long)io->stop);
737 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || 737 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
738 (io->stop < io->start)) return -EINVAL; 738 (io->stop < io->start)) return -EINVAL;
739 tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); 739 tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT));
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index d4ad50d737b0..c9fcbdc164ea 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -707,7 +707,7 @@ __setup("vrc4171_card=", vrc4171_card_setup);
707static int vrc4171_card_suspend(struct platform_device *dev, 707static int vrc4171_card_suspend(struct platform_device *dev,
708 pm_message_t state) 708 pm_message_t state)
709{ 709{
710 return pcmcia_socket_dev_suspend(&dev->dev, state); 710 return pcmcia_socket_dev_suspend(&dev->dev);
711} 711}
712 712
713static int vrc4171_card_resume(struct platform_device *dev) 713static int vrc4171_card_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b459e87a30ac..8be4cc447a17 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1225,60 +1225,81 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1225} 1225}
1226 1226
1227#ifdef CONFIG_PM 1227#ifdef CONFIG_PM
1228static int yenta_dev_suspend (struct pci_dev *dev, pm_message_t state) 1228static int yenta_dev_suspend_noirq(struct device *dev)
1229{ 1229{
1230 struct yenta_socket *socket = pci_get_drvdata(dev); 1230 struct pci_dev *pdev = to_pci_dev(dev);
1231 struct yenta_socket *socket = pci_get_drvdata(pdev);
1231 int ret; 1232 int ret;
1232 1233
1233 ret = pcmcia_socket_dev_suspend(&dev->dev, state); 1234 ret = pcmcia_socket_dev_suspend(dev);
1234 1235
1235 if (socket) { 1236 if (!socket)
1236 if (socket->type && socket->type->save_state) 1237 return ret;
1237 socket->type->save_state(socket);
1238 1238
1239 /* FIXME: pci_save_state needs to have a better interface */ 1239 if (socket->type && socket->type->save_state)
1240 pci_save_state(dev); 1240 socket->type->save_state(socket);
1241 pci_read_config_dword(dev, 16*4, &socket->saved_state[0]);
1242 pci_read_config_dword(dev, 17*4, &socket->saved_state[1]);
1243 pci_disable_device(dev);
1244 1241
1245 /* 1242 pci_save_state(pdev);
1246 * Some laptops (IBM T22) do not like us putting the Cardbus 1243 pci_read_config_dword(pdev, 16*4, &socket->saved_state[0]);
1247 * bridge into D3. At a guess, some other laptop will 1244 pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
1248 * probably require this, so leave it commented out for now. 1245 pci_disable_device(pdev);
1249 */ 1246
1250 /* pci_set_power_state(dev, 3); */ 1247 /*
1251 } 1248 * Some laptops (IBM T22) do not like us putting the Cardbus
1249 * bridge into D3. At a guess, some other laptop will
1250 * probably require this, so leave it commented out for now.
1251 */
1252 /* pci_set_power_state(dev, 3); */
1252 1253
1253 return ret; 1254 return ret;
1254} 1255}
1255 1256
1256 1257static int yenta_dev_resume_noirq(struct device *dev)
1257static int yenta_dev_resume (struct pci_dev *dev)
1258{ 1258{
1259 struct yenta_socket *socket = pci_get_drvdata(dev); 1259 struct pci_dev *pdev = to_pci_dev(dev);
1260 struct yenta_socket *socket = pci_get_drvdata(pdev);
1261 int ret;
1260 1262
1261 if (socket) { 1263 if (!socket)
1262 int rc; 1264 return 0;
1263 1265
1264 pci_set_power_state(dev, 0); 1266 pci_write_config_dword(pdev, 16*4, socket->saved_state[0]);
1265 /* FIXME: pci_restore_state needs to have a better interface */ 1267 pci_write_config_dword(pdev, 17*4, socket->saved_state[1]);
1266 pci_restore_state(dev);
1267 pci_write_config_dword(dev, 16*4, socket->saved_state[0]);
1268 pci_write_config_dword(dev, 17*4, socket->saved_state[1]);
1269 1268
1270 rc = pci_enable_device(dev); 1269 ret = pci_enable_device(pdev);
1271 if (rc) 1270 if (ret)
1272 return rc; 1271 return ret;
1273 1272
1274 pci_set_master(dev); 1273 pci_set_master(pdev);
1275 1274
1276 if (socket->type && socket->type->restore_state) 1275 if (socket->type && socket->type->restore_state)
1277 socket->type->restore_state(socket); 1276 socket->type->restore_state(socket);
1278 } 1277
1278 pcmcia_socket_dev_early_resume(dev);
1279 return 0;
1280}
1279 1281
1280 return pcmcia_socket_dev_resume(&dev->dev); 1282static int yenta_dev_resume(struct device *dev)
1283{
1284 pcmcia_socket_dev_late_resume(dev);
1285 return 0;
1281} 1286}
1287
1288static struct dev_pm_ops yenta_pm_ops = {
1289 .suspend_noirq = yenta_dev_suspend_noirq,
1290 .resume_noirq = yenta_dev_resume_noirq,
1291 .resume = yenta_dev_resume,
1292 .freeze_noirq = yenta_dev_suspend_noirq,
1293 .thaw_noirq = yenta_dev_resume_noirq,
1294 .thaw = yenta_dev_resume,
1295 .poweroff_noirq = yenta_dev_suspend_noirq,
1296 .restore_noirq = yenta_dev_resume_noirq,
1297 .restore = yenta_dev_resume,
1298};
1299
1300#define YENTA_PM_OPS (&yenta_pm_ops)
1301#else
1302#define YENTA_PM_OPS NULL
1282#endif 1303#endif
1283 1304
1284#define CB_ID(vend,dev,type) \ 1305#define CB_ID(vend,dev,type) \
@@ -1376,10 +1397,7 @@ static struct pci_driver yenta_cardbus_driver = {
1376 .id_table = yenta_table, 1397 .id_table = yenta_table,
1377 .probe = yenta_probe, 1398 .probe = yenta_probe,
1378 .remove = __devexit_p(yenta_close), 1399 .remove = __devexit_p(yenta_close),
1379#ifdef CONFIG_PM 1400 .driver.pm = YENTA_PM_OPS,
1380 .suspend = yenta_dev_suspend,
1381 .resume = yenta_dev_resume,
1382#endif
1383}; 1401};
1384 1402
1385 1403
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index da3c08b3dcc1..4226e5352738 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -350,13 +350,14 @@ static const struct rfkill_ops eeepc_rfkill_ops = {
350 .set_block = eeepc_rfkill_set, 350 .set_block = eeepc_rfkill_set,
351}; 351};
352 352
353static void __init eeepc_enable_camera(void) 353static void __devinit eeepc_enable_camera(void)
354{ 354{
355 /* 355 /*
356 * If the following call to set_acpi() fails, it's because there's no 356 * If the following call to set_acpi() fails, it's because there's no
357 * camera so we can ignore the error. 357 * camera so we can ignore the error.
358 */ 358 */
359 set_acpi(CM_ASL_CAMERA, 1); 359 if (get_acpi(CM_ASL_CAMERA) == 0)
360 set_acpi(CM_ASL_CAMERA, 1);
360} 361}
361 362
362/* 363/*
@@ -624,7 +625,7 @@ static int notify_brn(void)
624 struct backlight_device *bd = eeepc_backlight_device; 625 struct backlight_device *bd = eeepc_backlight_device;
625 if (bd) { 626 if (bd) {
626 int old = bd->props.brightness; 627 int old = bd->props.brightness;
627 bd->props.brightness = read_brightness(bd); 628 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
628 return old; 629 return old;
629 } 630 }
630 return -1; 631 return -1;
@@ -1189,7 +1190,7 @@ static int eeepc_input_init(struct device *dev)
1189 return 0; 1190 return 0;
1190} 1191}
1191 1192
1192static int eeepc_hotk_add(struct acpi_device *device) 1193static int __devinit eeepc_hotk_add(struct acpi_device *device)
1193{ 1194{
1194 struct device *dev; 1195 struct device *dev;
1195 int result; 1196 int result;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index f35aee5c2149..bcd4ba8be7db 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -944,7 +944,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
944 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); 944 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
945 struct input_dev *input = fujitsu_hotkey->input; 945 struct input_dev *input = fujitsu_hotkey->input;
946 946
947#ifdef CONFIG_LEDS_CLASS 947#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
948 if (fujitsu_hotkey->logolamp_registered) 948 if (fujitsu_hotkey->logolamp_registered)
949 led_classdev_unregister(&logolamp_led); 949 led_classdev_unregister(&logolamp_led);
950 950
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index afdbdaaf80cb..a2a742c8ff7e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1211,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device)
1211 } 1211 }
1212 } 1212 }
1213 1213
1214 /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
1215 * should be respected as we already checked for the device presence above */
1216 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
1217 dprintk("Invoking _INI\n");
1218 if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
1219 NULL, NULL)))
1220 dprintk("_INI Method failed\n");
1221 }
1222
1223 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 1214 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1224 &handle))) { 1215 &handle))) {
1225 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 1216 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
@@ -1399,27 +1390,20 @@ struct sonypi_eventtypes {
1399 struct sonypi_event *events; 1390 struct sonypi_event *events;
1400}; 1391};
1401 1392
1402struct device_ctrl { 1393struct sony_pic_dev {
1394 struct acpi_device *acpi_dev;
1395 struct sony_pic_irq *cur_irq;
1396 struct sony_pic_ioport *cur_ioport;
1397 struct list_head interrupts;
1398 struct list_head ioports;
1399 struct mutex lock;
1400 struct sonypi_eventtypes *event_types;
1401 int (*handle_irq)(const u8, const u8);
1403 int model; 1402 int model;
1404 int (*handle_irq)(const u8, const u8);
1405 u16 evport_offset; 1403 u16 evport_offset;
1406 u8 has_camera; 1404 u8 camera_power;
1407 u8 has_bluetooth; 1405 u8 bluetooth_power;
1408 u8 has_wwan; 1406 u8 wwan_power;
1409 struct sonypi_eventtypes *event_types;
1410};
1411
1412struct sony_pic_dev {
1413 struct device_ctrl *control;
1414 struct acpi_device *acpi_dev;
1415 struct sony_pic_irq *cur_irq;
1416 struct sony_pic_ioport *cur_ioport;
1417 struct list_head interrupts;
1418 struct list_head ioports;
1419 struct mutex lock;
1420 u8 camera_power;
1421 u8 bluetooth_power;
1422 u8 wwan_power;
1423}; 1407};
1424 1408
1425static struct sony_pic_dev spic_dev = { 1409static struct sony_pic_dev spic_dev = {
@@ -1427,6 +1411,8 @@ static struct sony_pic_dev spic_dev = {
1427 .ioports = LIST_HEAD_INIT(spic_dev.ioports), 1411 .ioports = LIST_HEAD_INIT(spic_dev.ioports),
1428}; 1412};
1429 1413
1414static int spic_drv_registered;
1415
1430/* Event masks */ 1416/* Event masks */
1431#define SONYPI_JOGGER_MASK 0x00000001 1417#define SONYPI_JOGGER_MASK 0x00000001
1432#define SONYPI_CAPTURE_MASK 0x00000002 1418#define SONYPI_CAPTURE_MASK 0x00000002
@@ -1724,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev)
1724 return 1; 1710 return 1;
1725} 1711}
1726 1712
1727static struct device_ctrl spic_types[] = {
1728 {
1729 .model = SONYPI_DEVICE_TYPE1,
1730 .handle_irq = NULL,
1731 .evport_offset = SONYPI_TYPE1_OFFSET,
1732 .event_types = type1_events,
1733 },
1734 {
1735 .model = SONYPI_DEVICE_TYPE2,
1736 .handle_irq = NULL,
1737 .evport_offset = SONYPI_TYPE2_OFFSET,
1738 .event_types = type2_events,
1739 },
1740 {
1741 .model = SONYPI_DEVICE_TYPE3,
1742 .handle_irq = type3_handle_irq,
1743 .evport_offset = SONYPI_TYPE3_OFFSET,
1744 .event_types = type3_events,
1745 },
1746};
1747
1748static void sony_pic_detect_device_type(struct sony_pic_dev *dev) 1713static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1749{ 1714{
1750 struct pci_dev *pcidev; 1715 struct pci_dev *pcidev;
@@ -1752,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1752 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1717 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1753 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 1718 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
1754 if (pcidev) { 1719 if (pcidev) {
1755 dev->control = &spic_types[0]; 1720 dev->model = SONYPI_DEVICE_TYPE1;
1721 dev->evport_offset = SONYPI_TYPE1_OFFSET;
1722 dev->event_types = type1_events;
1756 goto out; 1723 goto out;
1757 } 1724 }
1758 1725
1759 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1726 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1760 PCI_DEVICE_ID_INTEL_ICH6_1, NULL); 1727 PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
1761 if (pcidev) { 1728 if (pcidev) {
1762 dev->control = &spic_types[2]; 1729 dev->model = SONYPI_DEVICE_TYPE2;
1730 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1731 dev->event_types = type2_events;
1763 goto out; 1732 goto out;
1764 } 1733 }
1765 1734
1766 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1735 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1767 PCI_DEVICE_ID_INTEL_ICH7_1, NULL); 1736 PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
1768 if (pcidev) { 1737 if (pcidev) {
1769 dev->control = &spic_types[2]; 1738 dev->model = SONYPI_DEVICE_TYPE3;
1739 dev->handle_irq = type3_handle_irq;
1740 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1741 dev->event_types = type3_events;
1770 goto out; 1742 goto out;
1771 } 1743 }
1772 1744
1773 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1745 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1774 PCI_DEVICE_ID_INTEL_ICH8_4, NULL); 1746 PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
1775 if (pcidev) { 1747 if (pcidev) {
1776 dev->control = &spic_types[2]; 1748 dev->model = SONYPI_DEVICE_TYPE3;
1749 dev->handle_irq = type3_handle_irq;
1750 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1751 dev->event_types = type3_events;
1777 goto out; 1752 goto out;
1778 } 1753 }
1779 1754
1780 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1755 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1781 PCI_DEVICE_ID_INTEL_ICH9_1, NULL); 1756 PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
1782 if (pcidev) { 1757 if (pcidev) {
1783 dev->control = &spic_types[2]; 1758 dev->model = SONYPI_DEVICE_TYPE3;
1759 dev->handle_irq = type3_handle_irq;
1760 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1761 dev->event_types = type3_events;
1784 goto out; 1762 goto out;
1785 } 1763 }
1786 1764
1787 /* default */ 1765 /* default */
1788 dev->control = &spic_types[1]; 1766 dev->model = SONYPI_DEVICE_TYPE2;
1767 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1768 dev->event_types = type2_events;
1789 1769
1790out: 1770out:
1791 if (pcidev) 1771 if (pcidev)
1792 pci_dev_put(pcidev); 1772 pci_dev_put(pcidev);
1793 1773
1794 printk(KERN_INFO DRV_PFX "detected Type%d model\n", 1774 printk(KERN_INFO DRV_PFX "detected Type%d model\n",
1795 dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : 1775 dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
1796 dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); 1776 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
1797} 1777}
1798 1778
1799/* camera tests and poweron/poweroff */ 1779/* camera tests and poweron/poweroff */
@@ -2566,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device,
2566 buffer.pointer = resource; 2546 buffer.pointer = resource;
2567 2547
2568 /* setup Type 1 resources */ 2548 /* setup Type 1 resources */
2569 if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { 2549 if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
2570 2550
2571 /* setup io resources */ 2551 /* setup io resources */
2572 resource->res1.type = ACPI_RESOURCE_TYPE_IO; 2552 resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2649,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2649 data_mask = inb_p(dev->cur_ioport->io2.minimum); 2629 data_mask = inb_p(dev->cur_ioport->io2.minimum);
2650 else 2630 else
2651 data_mask = inb_p(dev->cur_ioport->io1.minimum + 2631 data_mask = inb_p(dev->cur_ioport->io1.minimum +
2652 dev->control->evport_offset); 2632 dev->evport_offset);
2653 2633
2654 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2634 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2655 ev, data_mask, dev->cur_ioport->io1.minimum, 2635 ev, data_mask, dev->cur_ioport->io1.minimum,
2656 dev->control->evport_offset); 2636 dev->evport_offset);
2657 2637
2658 if (ev == 0x00 || ev == 0xff) 2638 if (ev == 0x00 || ev == 0xff)
2659 return IRQ_HANDLED; 2639 return IRQ_HANDLED;
2660 2640
2661 for (i = 0; dev->control->event_types[i].mask; i++) { 2641 for (i = 0; dev->event_types[i].mask; i++) {
2662 2642
2663 if ((data_mask & dev->control->event_types[i].data) != 2643 if ((data_mask & dev->event_types[i].data) !=
2664 dev->control->event_types[i].data) 2644 dev->event_types[i].data)
2665 continue; 2645 continue;
2666 2646
2667 if (!(mask & dev->control->event_types[i].mask)) 2647 if (!(mask & dev->event_types[i].mask))
2668 continue; 2648 continue;
2669 2649
2670 for (j = 0; dev->control->event_types[i].events[j].event; j++) { 2650 for (j = 0; dev->event_types[i].events[j].event; j++) {
2671 if (ev == dev->control->event_types[i].events[j].data) { 2651 if (ev == dev->event_types[i].events[j].data) {
2672 device_event = 2652 device_event =
2673 dev->control-> 2653 dev->event_types[i].events[j].event;
2674 event_types[i].events[j].event;
2675 goto found; 2654 goto found;
2676 } 2655 }
2677 } 2656 }
@@ -2679,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2679 /* Still not able to decode the event try to pass 2658 /* Still not able to decode the event try to pass
2680 * it over to the minidriver 2659 * it over to the minidriver
2681 */ 2660 */
2682 if (dev->control->handle_irq && 2661 if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
2683 dev->control->handle_irq(data_mask, ev) == 0)
2684 return IRQ_HANDLED; 2662 return IRQ_HANDLED;
2685 2663
2686 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2664 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2687 ev, data_mask, dev->cur_ioport->io1.minimum, 2665 ev, data_mask, dev->cur_ioport->io1.minimum,
2688 dev->control->evport_offset); 2666 dev->evport_offset);
2689 return IRQ_HANDLED; 2667 return IRQ_HANDLED;
2690 2668
2691found: 2669found:
@@ -2816,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device)
2816 /* request IRQ */ 2794 /* request IRQ */
2817 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { 2795 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
2818 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, 2796 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
2819 IRQF_SHARED, "sony-laptop", &spic_dev)) { 2797 IRQF_DISABLED, "sony-laptop", &spic_dev)) {
2820 dprintk("IRQ: %d - triggering: %d - " 2798 dprintk("IRQ: %d - triggering: %d - "
2821 "polarity: %d - shr: %d\n", 2799 "polarity: %d - shr: %d\n",
2822 irq->irq.interrupts[0], 2800 irq->irq.interrupts[0],
@@ -2949,6 +2927,7 @@ static int __init sony_laptop_init(void)
2949 "Unable to register SPIC driver."); 2927 "Unable to register SPIC driver.");
2950 goto out; 2928 goto out;
2951 } 2929 }
2930 spic_drv_registered = 1;
2952 } 2931 }
2953 2932
2954 result = acpi_bus_register_driver(&sony_nc_driver); 2933 result = acpi_bus_register_driver(&sony_nc_driver);
@@ -2960,7 +2939,7 @@ static int __init sony_laptop_init(void)
2960 return 0; 2939 return 0;
2961 2940
2962out_unregister_pic: 2941out_unregister_pic:
2963 if (!no_spic) 2942 if (spic_drv_registered)
2964 acpi_bus_unregister_driver(&sony_pic_driver); 2943 acpi_bus_unregister_driver(&sony_pic_driver);
2965out: 2944out:
2966 return result; 2945 return result;
@@ -2969,7 +2948,7 @@ out:
2969static void __exit sony_laptop_exit(void) 2948static void __exit sony_laptop_exit(void)
2970{ 2949{
2971 acpi_bus_unregister_driver(&sony_nc_driver); 2950 acpi_bus_unregister_driver(&sony_nc_driver);
2972 if (!no_spic) 2951 if (spic_drv_registered)
2973 acpi_bus_unregister_driver(&sony_pic_driver); 2952 acpi_bus_unregister_driver(&sony_pic_driver);
2974} 2953}
2975 2954
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f78d27503925..d93108d148fc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.23" 24#define TPACPI_VERSION "0.23"
25#define TPACPI_SYSFS_VERSION 0x020400 25#define TPACPI_SYSFS_VERSION 0x020500
26 26
27/* 27/*
28 * Changelog: 28 * Changelog:
@@ -145,6 +145,51 @@ enum {
145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */ 145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
146}; 146};
147 147
148/* HKEY events */
149enum tpacpi_hkey_event_t {
150 /* Hotkey-related */
151 TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
152 TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
153 TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
154 TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
155 TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
156 TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
157
158 /* Reasons for waking up from S3/S4 */
159 TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
160 TP_HKEY_EV_WKUP_S4_UNDOCK = 0x2404, /* undock requested, S4 */
161 TP_HKEY_EV_WKUP_S3_BAYEJ = 0x2305, /* bay ejection req, S3 */
162 TP_HKEY_EV_WKUP_S4_BAYEJ = 0x2405, /* bay ejection req, S4 */
163 TP_HKEY_EV_WKUP_S3_BATLOW = 0x2313, /* battery empty, S3 */
164 TP_HKEY_EV_WKUP_S4_BATLOW = 0x2413, /* battery empty, S4 */
165
166 /* Auto-sleep after eject request */
167 TP_HKEY_EV_BAYEJ_ACK = 0x3003, /* bay ejection complete */
168 TP_HKEY_EV_UNDOCK_ACK = 0x4003, /* undock complete */
169
170 /* Misc bay events */
171 TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
172
173 /* User-interface events */
174 TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
175 TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
176 TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
177 TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
178 TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
179 TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
180 TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
181
182 /* Thermal events */
183 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
184 TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
185 TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
186 TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
187 TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
188
189 /* Misc */
190 TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
191};
192
148/**************************************************************************** 193/****************************************************************************
149 * Main driver 194 * Main driver
150 */ 195 */
@@ -1848,6 +1893,27 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
1848 * Hotkey subdriver 1893 * Hotkey subdriver
1849 */ 1894 */
1850 1895
1896/*
1897 * ThinkPad firmware event model
1898 *
1899 * The ThinkPad firmware has two main event interfaces: normal ACPI
1900 * notifications (which follow the ACPI standard), and a private event
1901 * interface.
1902 *
1903 * The private event interface also issues events for the hotkeys. As
1904 * the driver gained features, the event handling code ended up being
1905 * built around the hotkey subdriver. This will need to be refactored
1906 * to a more formal event API eventually.
1907 *
1908 * Some "hotkeys" are actually supposed to be used as event reports,
1909 * such as "brightness has changed", "volume has changed", depending on
1910 * the ThinkPad model and how the firmware is operating.
1911 *
1912 * Unlike other classes, hotkey-class events have mask/unmask control on
1913 * non-ancient firmware. However, how it behaves changes a lot with the
1914 * firmware model and version.
1915 */
1916
1851enum { /* hot key scan codes (derived from ACPI DSDT) */ 1917enum { /* hot key scan codes (derived from ACPI DSDT) */
1852 TP_ACPI_HOTKEYSCAN_FNF1 = 0, 1918 TP_ACPI_HOTKEYSCAN_FNF1 = 0,
1853 TP_ACPI_HOTKEYSCAN_FNF2, 1919 TP_ACPI_HOTKEYSCAN_FNF2,
@@ -1875,7 +1941,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
1875 TP_ACPI_HOTKEYSCAN_THINKPAD, 1941 TP_ACPI_HOTKEYSCAN_THINKPAD,
1876}; 1942};
1877 1943
1878enum { /* Keys available through NVRAM polling */ 1944enum { /* Keys/events available through NVRAM polling */
1879 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U, 1945 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
1880 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U, 1946 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
1881}; 1947};
@@ -1930,8 +1996,11 @@ static struct task_struct *tpacpi_hotkey_task;
1930static struct mutex hotkey_thread_mutex; 1996static struct mutex hotkey_thread_mutex;
1931 1997
1932/* 1998/*
1933 * Acquire mutex to write poller control variables. 1999 * Acquire mutex to write poller control variables as an
1934 * Increment hotkey_config_change when changing them. 2000 * atomic block.
2001 *
2002 * Increment hotkey_config_change when changing them if you
2003 * want the kthread to forget old state.
1935 * 2004 *
1936 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END 2005 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
1937 */ 2006 */
@@ -1942,6 +2011,11 @@ static unsigned int hotkey_config_change;
1942 * hotkey poller control variables 2011 * hotkey poller control variables
1943 * 2012 *
1944 * Must be atomic or readers will also need to acquire mutex 2013 * Must be atomic or readers will also need to acquire mutex
2014 *
2015 * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
2016 * should be used only when the changes need to be taken as
2017 * a block, OR when one needs to force the kthread to forget
2018 * old state.
1945 */ 2019 */
1946static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */ 2020static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
1947static unsigned int hotkey_poll_freq = 10; /* Hz */ 2021static unsigned int hotkey_poll_freq = 10; /* Hz */
@@ -1972,10 +2046,12 @@ static enum { /* Reasons for waking up */
1972 2046
1973static int hotkey_autosleep_ack; 2047static int hotkey_autosleep_ack;
1974 2048
1975static u32 hotkey_orig_mask; 2049static u32 hotkey_orig_mask; /* events the BIOS had enabled */
1976static u32 hotkey_all_mask; 2050static u32 hotkey_all_mask; /* all events supported in fw */
1977static u32 hotkey_reserved_mask; 2051static u32 hotkey_reserved_mask; /* events better left disabled */
1978static u32 hotkey_mask; 2052static u32 hotkey_driver_mask; /* events needed by the driver */
2053static u32 hotkey_user_mask; /* events visible to userspace */
2054static u32 hotkey_acpi_mask; /* events enabled in firmware */
1979 2055
1980static unsigned int hotkey_report_mode; 2056static unsigned int hotkey_report_mode;
1981 2057
@@ -1983,6 +2059,9 @@ static u16 *hotkey_keycode_map;
1983 2059
1984static struct attribute_set *hotkey_dev_attributes; 2060static struct attribute_set *hotkey_dev_attributes;
1985 2061
2062static void tpacpi_driver_event(const unsigned int hkey_event);
2063static void hotkey_driver_event(const unsigned int scancode);
2064
1986/* HKEY.MHKG() return bits */ 2065/* HKEY.MHKG() return bits */
1987#define TP_HOTKEY_TABLET_MASK (1 << 3) 2066#define TP_HOTKEY_TABLET_MASK (1 << 3)
1988 2067
@@ -2017,24 +2096,53 @@ static int hotkey_get_tablet_mode(int *status)
2017} 2096}
2018 2097
2019/* 2098/*
2099 * Reads current event mask from firmware, and updates
2100 * hotkey_acpi_mask accordingly. Also resets any bits
2101 * from hotkey_user_mask that are unavailable to be
2102 * delivered (shadow requirement of the userspace ABI).
2103 *
2020 * Call with hotkey_mutex held 2104 * Call with hotkey_mutex held
2021 */ 2105 */
2022static int hotkey_mask_get(void) 2106static int hotkey_mask_get(void)
2023{ 2107{
2024 u32 m = 0;
2025
2026 if (tp_features.hotkey_mask) { 2108 if (tp_features.hotkey_mask) {
2109 u32 m = 0;
2110
2027 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d")) 2111 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
2028 return -EIO; 2112 return -EIO;
2113
2114 hotkey_acpi_mask = m;
2115 } else {
2116 /* no mask support doesn't mean no event support... */
2117 hotkey_acpi_mask = hotkey_all_mask;
2029 } 2118 }
2030 HOTKEY_CONFIG_CRITICAL_START 2119
2031 hotkey_mask = m | (hotkey_source_mask & hotkey_mask); 2120 /* sync userspace-visible mask */
2032 HOTKEY_CONFIG_CRITICAL_END 2121 hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
2033 2122
2034 return 0; 2123 return 0;
2035} 2124}
2036 2125
2126void static hotkey_mask_warn_incomplete_mask(void)
2127{
2128 /* log only what the user can fix... */
2129 const u32 wantedmask = hotkey_driver_mask &
2130 ~(hotkey_acpi_mask | hotkey_source_mask) &
2131 (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
2132
2133 if (wantedmask)
2134 printk(TPACPI_NOTICE
2135 "required events 0x%08x not enabled!\n",
2136 wantedmask);
2137}
2138
2037/* 2139/*
2140 * Set the firmware mask when supported
2141 *
2142 * Also calls hotkey_mask_get to update hotkey_acpi_mask.
2143 *
2144 * NOTE: does not set bits in hotkey_user_mask, but may reset them.
2145 *
2038 * Call with hotkey_mutex held 2146 * Call with hotkey_mutex held
2039 */ 2147 */
2040static int hotkey_mask_set(u32 mask) 2148static int hotkey_mask_set(u32 mask)
@@ -2042,66 +2150,100 @@ static int hotkey_mask_set(u32 mask)
2042 int i; 2150 int i;
2043 int rc = 0; 2151 int rc = 0;
2044 2152
2045 if (tp_features.hotkey_mask) { 2153 const u32 fwmask = mask & ~hotkey_source_mask;
2046 if (!tp_warned.hotkey_mask_ff &&
2047 (mask == 0xffff || mask == 0xffffff ||
2048 mask == 0xffffffff)) {
2049 tp_warned.hotkey_mask_ff = 1;
2050 printk(TPACPI_NOTICE
2051 "setting the hotkey mask to 0x%08x is likely "
2052 "not the best way to go about it\n", mask);
2053 printk(TPACPI_NOTICE
2054 "please consider using the driver defaults, "
2055 "and refer to up-to-date thinkpad-acpi "
2056 "documentation\n");
2057 }
2058 2154
2059 HOTKEY_CONFIG_CRITICAL_START 2155 if (tp_features.hotkey_mask) {
2060 for (i = 0; i < 32; i++) { 2156 for (i = 0; i < 32; i++) {
2061 u32 m = 1 << i;
2062 /* enable in firmware mask only keys not in NVRAM
2063 * mode, but enable the key in the cached hotkey_mask
2064 * regardless of mode, or the key will end up
2065 * disabled by hotkey_mask_get() */
2066 if (!acpi_evalf(hkey_handle, 2157 if (!acpi_evalf(hkey_handle,
2067 NULL, "MHKM", "vdd", i + 1, 2158 NULL, "MHKM", "vdd", i + 1,
2068 !!((mask & ~hotkey_source_mask) & m))) { 2159 !!(mask & (1 << i)))) {
2069 rc = -EIO; 2160 rc = -EIO;
2070 break; 2161 break;
2071 } else {
2072 hotkey_mask = (hotkey_mask & ~m) | (mask & m);
2073 } 2162 }
2074 } 2163 }
2075 HOTKEY_CONFIG_CRITICAL_END 2164 }
2076 2165
2077 /* hotkey_mask_get must be called unconditionally below */ 2166 /*
2078 if (!hotkey_mask_get() && !rc && 2167 * We *must* make an inconditional call to hotkey_mask_get to
2079 (hotkey_mask & ~hotkey_source_mask) != 2168 * refresh hotkey_acpi_mask and update hotkey_user_mask
2080 (mask & ~hotkey_source_mask)) { 2169 *
2081 printk(TPACPI_NOTICE 2170 * Take the opportunity to also log when we cannot _enable_
2082 "requested hot key mask 0x%08x, but " 2171 * a given event.
2083 "firmware forced it to 0x%08x\n", 2172 */
2084 mask, hotkey_mask); 2173 if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
2085 } 2174 printk(TPACPI_NOTICE
2086 } else { 2175 "asked for hotkey mask 0x%08x, but "
2087#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2176 "firmware forced it to 0x%08x\n",
2088 HOTKEY_CONFIG_CRITICAL_START 2177 fwmask, hotkey_acpi_mask);
2089 hotkey_mask = mask & hotkey_source_mask;
2090 HOTKEY_CONFIG_CRITICAL_END
2091 hotkey_mask_get();
2092 if (hotkey_mask != mask) {
2093 printk(TPACPI_NOTICE
2094 "requested hot key mask 0x%08x, "
2095 "forced to 0x%08x (NVRAM poll mask is "
2096 "0x%08x): no firmware mask support\n",
2097 mask, hotkey_mask, hotkey_source_mask);
2098 }
2099#else
2100 hotkey_mask_get();
2101 rc = -ENXIO;
2102#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
2103 } 2178 }
2104 2179
2180 hotkey_mask_warn_incomplete_mask();
2181
2182 return rc;
2183}
2184
2185/*
2186 * Sets hotkey_user_mask and tries to set the firmware mask
2187 *
2188 * Call with hotkey_mutex held
2189 */
2190static int hotkey_user_mask_set(const u32 mask)
2191{
2192 int rc;
2193
2194 /* Give people a chance to notice they are doing something that
2195 * is bound to go boom on their users sooner or later */
2196 if (!tp_warned.hotkey_mask_ff &&
2197 (mask == 0xffff || mask == 0xffffff ||
2198 mask == 0xffffffff)) {
2199 tp_warned.hotkey_mask_ff = 1;
2200 printk(TPACPI_NOTICE
2201 "setting the hotkey mask to 0x%08x is likely "
2202 "not the best way to go about it\n", mask);
2203 printk(TPACPI_NOTICE
2204 "please consider using the driver defaults, "
2205 "and refer to up-to-date thinkpad-acpi "
2206 "documentation\n");
2207 }
2208
2209 /* Try to enable what the user asked for, plus whatever we need.
2210 * this syncs everything but won't enable bits in hotkey_user_mask */
2211 rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
2212
2213 /* Enable the available bits in hotkey_user_mask */
2214 hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
2215
2216 return rc;
2217}
2218
2219/*
2220 * Sets the driver hotkey mask.
2221 *
2222 * Can be called even if the hotkey subdriver is inactive
2223 */
2224static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2225{
2226 int rc;
2227
2228 /* Do the right thing if hotkey_init has not been called yet */
2229 if (!tp_features.hotkey) {
2230 hotkey_driver_mask = mask;
2231 return 0;
2232 }
2233
2234 mutex_lock(&hotkey_mutex);
2235
2236 HOTKEY_CONFIG_CRITICAL_START
2237 hotkey_driver_mask = mask;
2238#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2239 hotkey_source_mask |= (mask & ~hotkey_all_mask);
2240#endif
2241 HOTKEY_CONFIG_CRITICAL_END
2242
2243 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
2244 ~hotkey_source_mask);
2245 mutex_unlock(&hotkey_mutex);
2246
2105 return rc; 2247 return rc;
2106} 2248}
2107 2249
@@ -2137,11 +2279,10 @@ static void tpacpi_input_send_tabletsw(void)
2137 } 2279 }
2138} 2280}
2139 2281
2140static void tpacpi_input_send_key(unsigned int scancode) 2282/* Do NOT call without validating scancode first */
2283static void tpacpi_input_send_key(const unsigned int scancode)
2141{ 2284{
2142 unsigned int keycode; 2285 const unsigned int keycode = hotkey_keycode_map[scancode];
2143
2144 keycode = hotkey_keycode_map[scancode];
2145 2286
2146 if (keycode != KEY_RESERVED) { 2287 if (keycode != KEY_RESERVED) {
2147 mutex_lock(&tpacpi_inputdev_send_mutex); 2288 mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -2162,19 +2303,28 @@ static void tpacpi_input_send_key(unsigned int scancode)
2162 } 2303 }
2163} 2304}
2164 2305
2306/* Do NOT call without validating scancode first */
2307static void tpacpi_input_send_key_masked(const unsigned int scancode)
2308{
2309 hotkey_driver_event(scancode);
2310 if (hotkey_user_mask & (1 << scancode))
2311 tpacpi_input_send_key(scancode);
2312}
2313
2165#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2314#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2166static struct tp_acpi_drv_struct ibm_hotkey_acpidriver; 2315static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
2167 2316
2317/* Do NOT call without validating scancode first */
2168static void tpacpi_hotkey_send_key(unsigned int scancode) 2318static void tpacpi_hotkey_send_key(unsigned int scancode)
2169{ 2319{
2170 tpacpi_input_send_key(scancode); 2320 tpacpi_input_send_key_masked(scancode);
2171 if (hotkey_report_mode < 2) { 2321 if (hotkey_report_mode < 2) {
2172 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device, 2322 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
2173 0x80, 0x1001 + scancode); 2323 0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
2174 } 2324 }
2175} 2325}
2176 2326
2177static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m) 2327static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
2178{ 2328{
2179 u8 d; 2329 u8 d;
2180 2330
@@ -2210,21 +2360,24 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
2210 } 2360 }
2211} 2361}
2212 2362
2363static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2364 struct tp_nvram_state *newn,
2365 const u32 event_mask)
2366{
2367
2213#define TPACPI_COMPARE_KEY(__scancode, __member) \ 2368#define TPACPI_COMPARE_KEY(__scancode, __member) \
2214 do { \ 2369 do { \
2215 if ((mask & (1 << __scancode)) && \ 2370 if ((event_mask & (1 << __scancode)) && \
2216 oldn->__member != newn->__member) \ 2371 oldn->__member != newn->__member) \
2217 tpacpi_hotkey_send_key(__scancode); \ 2372 tpacpi_hotkey_send_key(__scancode); \
2218 } while (0) 2373 } while (0)
2219 2374
2220#define TPACPI_MAY_SEND_KEY(__scancode) \ 2375#define TPACPI_MAY_SEND_KEY(__scancode) \
2221 do { if (mask & (1 << __scancode)) \ 2376 do { \
2222 tpacpi_hotkey_send_key(__scancode); } while (0) 2377 if (event_mask & (1 << __scancode)) \
2378 tpacpi_hotkey_send_key(__scancode); \
2379 } while (0)
2223 2380
2224static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2225 struct tp_nvram_state *newn,
2226 u32 mask)
2227{
2228 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); 2381 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
2229 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); 2382 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
2230 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); 2383 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2270,15 +2423,22 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2270 } 2423 }
2271 } 2424 }
2272 } 2425 }
2273}
2274 2426
2275#undef TPACPI_COMPARE_KEY 2427#undef TPACPI_COMPARE_KEY
2276#undef TPACPI_MAY_SEND_KEY 2428#undef TPACPI_MAY_SEND_KEY
2429}
2277 2430
2431/*
2432 * Polling driver
2433 *
2434 * We track all events in hotkey_source_mask all the time, since
2435 * most of them are edge-based. We only issue those requested by
2436 * hotkey_user_mask or hotkey_driver_mask, though.
2437 */
2278static int hotkey_kthread(void *data) 2438static int hotkey_kthread(void *data)
2279{ 2439{
2280 struct tp_nvram_state s[2]; 2440 struct tp_nvram_state s[2];
2281 u32 mask; 2441 u32 poll_mask, event_mask;
2282 unsigned int si, so; 2442 unsigned int si, so;
2283 unsigned long t; 2443 unsigned long t;
2284 unsigned int change_detector, must_reset; 2444 unsigned int change_detector, must_reset;
@@ -2298,10 +2458,12 @@ static int hotkey_kthread(void *data)
2298 /* Initial state for compares */ 2458 /* Initial state for compares */
2299 mutex_lock(&hotkey_thread_data_mutex); 2459 mutex_lock(&hotkey_thread_data_mutex);
2300 change_detector = hotkey_config_change; 2460 change_detector = hotkey_config_change;
2301 mask = hotkey_source_mask & hotkey_mask; 2461 poll_mask = hotkey_source_mask;
2462 event_mask = hotkey_source_mask &
2463 (hotkey_driver_mask | hotkey_user_mask);
2302 poll_freq = hotkey_poll_freq; 2464 poll_freq = hotkey_poll_freq;
2303 mutex_unlock(&hotkey_thread_data_mutex); 2465 mutex_unlock(&hotkey_thread_data_mutex);
2304 hotkey_read_nvram(&s[so], mask); 2466 hotkey_read_nvram(&s[so], poll_mask);
2305 2467
2306 while (!kthread_should_stop()) { 2468 while (!kthread_should_stop()) {
2307 if (t == 0) { 2469 if (t == 0) {
@@ -2324,15 +2486,17 @@ static int hotkey_kthread(void *data)
2324 t = 0; 2486 t = 0;
2325 change_detector = hotkey_config_change; 2487 change_detector = hotkey_config_change;
2326 } 2488 }
2327 mask = hotkey_source_mask & hotkey_mask; 2489 poll_mask = hotkey_source_mask;
2490 event_mask = hotkey_source_mask &
2491 (hotkey_driver_mask | hotkey_user_mask);
2328 poll_freq = hotkey_poll_freq; 2492 poll_freq = hotkey_poll_freq;
2329 mutex_unlock(&hotkey_thread_data_mutex); 2493 mutex_unlock(&hotkey_thread_data_mutex);
2330 2494
2331 if (likely(mask)) { 2495 if (likely(poll_mask)) {
2332 hotkey_read_nvram(&s[si], mask); 2496 hotkey_read_nvram(&s[si], poll_mask);
2333 if (likely(si != so)) { 2497 if (likely(si != so)) {
2334 hotkey_compare_and_issue_event(&s[so], &s[si], 2498 hotkey_compare_and_issue_event(&s[so], &s[si],
2335 mask); 2499 event_mask);
2336 } 2500 }
2337 } 2501 }
2338 2502
@@ -2364,10 +2528,12 @@ static void hotkey_poll_stop_sync(void)
2364/* call with hotkey_mutex held */ 2528/* call with hotkey_mutex held */
2365static void hotkey_poll_setup(bool may_warn) 2529static void hotkey_poll_setup(bool may_warn)
2366{ 2530{
2367 u32 hotkeys_to_poll = hotkey_source_mask & hotkey_mask; 2531 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
2532 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
2368 2533
2369 if (hotkeys_to_poll != 0 && hotkey_poll_freq > 0 && 2534 if (hotkey_poll_freq > 0 &&
2370 (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) { 2535 (poll_driver_mask ||
2536 (poll_user_mask && tpacpi_inputdev->users > 0))) {
2371 if (!tpacpi_hotkey_task) { 2537 if (!tpacpi_hotkey_task) {
2372 tpacpi_hotkey_task = kthread_run(hotkey_kthread, 2538 tpacpi_hotkey_task = kthread_run(hotkey_kthread,
2373 NULL, TPACPI_NVRAM_KTHREAD_NAME); 2539 NULL, TPACPI_NVRAM_KTHREAD_NAME);
@@ -2380,12 +2546,13 @@ static void hotkey_poll_setup(bool may_warn)
2380 } 2546 }
2381 } else { 2547 } else {
2382 hotkey_poll_stop_sync(); 2548 hotkey_poll_stop_sync();
2383 if (may_warn && hotkeys_to_poll != 0 && 2549 if (may_warn && (poll_driver_mask || poll_user_mask) &&
2384 hotkey_poll_freq == 0) { 2550 hotkey_poll_freq == 0) {
2385 printk(TPACPI_NOTICE 2551 printk(TPACPI_NOTICE
2386 "hot keys 0x%08x require polling, " 2552 "hot keys 0x%08x and/or events 0x%08x "
2387 "which is currently disabled\n", 2553 "require polling, which is currently "
2388 hotkeys_to_poll); 2554 "disabled\n",
2555 poll_user_mask, poll_driver_mask);
2389 } 2556 }
2390 } 2557 }
2391} 2558}
@@ -2403,9 +2570,7 @@ static void hotkey_poll_set_freq(unsigned int freq)
2403 if (!freq) 2570 if (!freq)
2404 hotkey_poll_stop_sync(); 2571 hotkey_poll_stop_sync();
2405 2572
2406 HOTKEY_CONFIG_CRITICAL_START
2407 hotkey_poll_freq = freq; 2573 hotkey_poll_freq = freq;
2408 HOTKEY_CONFIG_CRITICAL_END
2409} 2574}
2410 2575
2411#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 2576#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2440,7 +2605,8 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2440static void hotkey_inputdev_close(struct input_dev *dev) 2605static void hotkey_inputdev_close(struct input_dev *dev)
2441{ 2606{
2442 /* disable hotkey polling when possible */ 2607 /* disable hotkey polling when possible */
2443 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING) 2608 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
2609 !(hotkey_source_mask & hotkey_driver_mask))
2444 hotkey_poll_setup_safe(false); 2610 hotkey_poll_setup_safe(false);
2445} 2611}
2446 2612
@@ -2488,15 +2654,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
2488 struct device_attribute *attr, 2654 struct device_attribute *attr,
2489 char *buf) 2655 char *buf)
2490{ 2656{
2491 int res; 2657 return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
2492
2493 if (mutex_lock_killable(&hotkey_mutex))
2494 return -ERESTARTSYS;
2495 res = hotkey_mask_get();
2496 mutex_unlock(&hotkey_mutex);
2497
2498 return (res)?
2499 res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
2500} 2658}
2501 2659
2502static ssize_t hotkey_mask_store(struct device *dev, 2660static ssize_t hotkey_mask_store(struct device *dev,
@@ -2512,7 +2670,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
2512 if (mutex_lock_killable(&hotkey_mutex)) 2670 if (mutex_lock_killable(&hotkey_mutex))
2513 return -ERESTARTSYS; 2671 return -ERESTARTSYS;
2514 2672
2515 res = hotkey_mask_set(t); 2673 res = hotkey_user_mask_set(t);
2516 2674
2517#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2675#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2518 hotkey_poll_setup(true); 2676 hotkey_poll_setup(true);
@@ -2594,6 +2752,8 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2594 const char *buf, size_t count) 2752 const char *buf, size_t count)
2595{ 2753{
2596 unsigned long t; 2754 unsigned long t;
2755 u32 r_ev;
2756 int rc;
2597 2757
2598 if (parse_strtoul(buf, 0xffffffffUL, &t) || 2758 if (parse_strtoul(buf, 0xffffffffUL, &t) ||
2599 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0)) 2759 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
@@ -2606,14 +2766,28 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2606 hotkey_source_mask = t; 2766 hotkey_source_mask = t;
2607 HOTKEY_CONFIG_CRITICAL_END 2767 HOTKEY_CONFIG_CRITICAL_END
2608 2768
2769 rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
2770 ~hotkey_source_mask);
2609 hotkey_poll_setup(true); 2771 hotkey_poll_setup(true);
2610 hotkey_mask_set(hotkey_mask); 2772
2773 /* check if events needed by the driver got disabled */
2774 r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
2775 & ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
2611 2776
2612 mutex_unlock(&hotkey_mutex); 2777 mutex_unlock(&hotkey_mutex);
2613 2778
2779 if (rc < 0)
2780 printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
2781 "firmware event mask!\n");
2782
2783 if (r_ev)
2784 printk(TPACPI_NOTICE "hotkey_source_mask: "
2785 "some important events were disabled: "
2786 "0x%04x\n", r_ev);
2787
2614 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t); 2788 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
2615 2789
2616 return count; 2790 return (rc < 0) ? rc : count;
2617} 2791}
2618 2792
2619static struct device_attribute dev_attr_hotkey_source_mask = 2793static struct device_attribute dev_attr_hotkey_source_mask =
@@ -2731,9 +2905,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_reason =
2731 2905
2732static void hotkey_wakeup_reason_notify_change(void) 2906static void hotkey_wakeup_reason_notify_change(void)
2733{ 2907{
2734 if (tp_features.hotkey_mask) 2908 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2735 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2909 "wakeup_reason");
2736 "wakeup_reason");
2737} 2910}
2738 2911
2739/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */ 2912/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
@@ -2750,9 +2923,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
2750 2923
2751static void hotkey_wakeup_hotunplug_complete_notify_change(void) 2924static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2752{ 2925{
2753 if (tp_features.hotkey_mask) 2926 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2754 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2927 "wakeup_hotunplug_complete");
2755 "wakeup_hotunplug_complete");
2756} 2928}
2757 2929
2758/* --------------------------------------------------------------------- */ 2930/* --------------------------------------------------------------------- */
@@ -2760,27 +2932,19 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2760static struct attribute *hotkey_attributes[] __initdata = { 2932static struct attribute *hotkey_attributes[] __initdata = {
2761 &dev_attr_hotkey_enable.attr, 2933 &dev_attr_hotkey_enable.attr,
2762 &dev_attr_hotkey_bios_enabled.attr, 2934 &dev_attr_hotkey_bios_enabled.attr,
2935 &dev_attr_hotkey_bios_mask.attr,
2763 &dev_attr_hotkey_report_mode.attr, 2936 &dev_attr_hotkey_report_mode.attr,
2764#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2937 &dev_attr_hotkey_wakeup_reason.attr,
2938 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2765 &dev_attr_hotkey_mask.attr, 2939 &dev_attr_hotkey_mask.attr,
2766 &dev_attr_hotkey_all_mask.attr, 2940 &dev_attr_hotkey_all_mask.attr,
2767 &dev_attr_hotkey_recommended_mask.attr, 2941 &dev_attr_hotkey_recommended_mask.attr,
2942#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2768 &dev_attr_hotkey_source_mask.attr, 2943 &dev_attr_hotkey_source_mask.attr,
2769 &dev_attr_hotkey_poll_freq.attr, 2944 &dev_attr_hotkey_poll_freq.attr,
2770#endif 2945#endif
2771}; 2946};
2772 2947
2773static struct attribute *hotkey_mask_attributes[] __initdata = {
2774 &dev_attr_hotkey_bios_mask.attr,
2775#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2776 &dev_attr_hotkey_mask.attr,
2777 &dev_attr_hotkey_all_mask.attr,
2778 &dev_attr_hotkey_recommended_mask.attr,
2779#endif
2780 &dev_attr_hotkey_wakeup_reason.attr,
2781 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2782};
2783
2784/* 2948/*
2785 * Sync both the hw and sw blocking state of all switches 2949 * Sync both the hw and sw blocking state of all switches
2786 */ 2950 */
@@ -2843,16 +3007,16 @@ static void hotkey_exit(void)
2843 3007
2844 kfree(hotkey_keycode_map); 3008 kfree(hotkey_keycode_map);
2845 3009
2846 if (tp_features.hotkey) { 3010 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
2847 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY, 3011 "restoring original HKEY status and mask\n");
2848 "restoring original hot key mask\n"); 3012 /* yes, there is a bitwise or below, we want the
2849 /* no short-circuit boolean operator below! */ 3013 * functions to be called even if one of them fail */
2850 if ((hotkey_mask_set(hotkey_orig_mask) | 3014 if (((tp_features.hotkey_mask &&
2851 hotkey_status_set(false)) != 0) 3015 hotkey_mask_set(hotkey_orig_mask)) |
2852 printk(TPACPI_ERR 3016 hotkey_status_set(false)) != 0)
2853 "failed to restore hot key mask " 3017 printk(TPACPI_ERR
2854 "to BIOS defaults\n"); 3018 "failed to restore hot key mask "
2855 } 3019 "to BIOS defaults\n");
2856} 3020}
2857 3021
2858static void __init hotkey_unmap(const unsigned int scancode) 3022static void __init hotkey_unmap(const unsigned int scancode)
@@ -2864,6 +3028,35 @@ static void __init hotkey_unmap(const unsigned int scancode)
2864 } 3028 }
2865} 3029}
2866 3030
3031/*
3032 * HKEY quirks:
3033 * TPACPI_HK_Q_INIMASK: Supports FN+F3,FN+F4,FN+F12
3034 */
3035
3036#define TPACPI_HK_Q_INIMASK 0x0001
3037
3038static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3039 TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
3040 TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
3041 TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
3042 TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
3043 TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
3044 TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
3045 TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
3046 TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
3047 TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
3048 TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
3049 TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
3050 TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
3051 TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
3052 TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
3053 TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
3054 TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
3055 TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
3056 TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
3057 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3058};
3059
2867static int __init hotkey_init(struct ibm_init_struct *iibm) 3060static int __init hotkey_init(struct ibm_init_struct *iibm)
2868{ 3061{
2869 /* Requirements for changing the default keymaps: 3062 /* Requirements for changing the default keymaps:
@@ -2906,9 +3099,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2906 KEY_UNKNOWN, /* 0x0D: FN+INSERT */ 3099 KEY_UNKNOWN, /* 0x0D: FN+INSERT */
2907 KEY_UNKNOWN, /* 0x0E: FN+DELETE */ 3100 KEY_UNKNOWN, /* 0x0E: FN+DELETE */
2908 3101
2909 /* brightness: firmware always reacts to them, unless 3102 /* brightness: firmware always reacts to them */
2910 * X.org did some tricks in the radeon BIOS scratch
2911 * registers of *some* models */
2912 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ 3103 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
2913 KEY_RESERVED, /* 0x10: FN+END (brightness down) */ 3104 KEY_RESERVED, /* 0x10: FN+END (brightness down) */
2914 3105
@@ -2983,6 +3174,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2983 int status; 3174 int status;
2984 int hkeyv; 3175 int hkeyv;
2985 3176
3177 unsigned long quirks;
3178
2986 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3179 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
2987 "initializing hotkey subdriver\n"); 3180 "initializing hotkey subdriver\n");
2988 3181
@@ -3008,9 +3201,16 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3008 if (!tp_features.hotkey) 3201 if (!tp_features.hotkey)
3009 return 1; 3202 return 1;
3010 3203
3204 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
3205 ARRAY_SIZE(tpacpi_hotkey_qtable));
3206
3011 tpacpi_disable_brightness_delay(); 3207 tpacpi_disable_brightness_delay();
3012 3208
3013 hotkey_dev_attributes = create_attr_set(13, NULL); 3209 /* MUST have enough space for all attributes to be added to
3210 * hotkey_dev_attributes */
3211 hotkey_dev_attributes = create_attr_set(
3212 ARRAY_SIZE(hotkey_attributes) + 2,
3213 NULL);
3014 if (!hotkey_dev_attributes) 3214 if (!hotkey_dev_attributes)
3015 return -ENOMEM; 3215 return -ENOMEM;
3016 res = add_many_to_attr_set(hotkey_dev_attributes, 3216 res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -3019,7 +3219,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3019 if (res) 3219 if (res)
3020 goto err_exit; 3220 goto err_exit;
3021 3221
3022 /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, 3222 /* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
3023 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking 3223 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
3024 for HKEY interface version 0x100 */ 3224 for HKEY interface version 0x100 */
3025 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3225 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
@@ -3033,10 +3233,22 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3033 * MHKV 0x100 in A31, R40, R40e, 3233 * MHKV 0x100 in A31, R40, R40e,
3034 * T4x, X31, and later 3234 * T4x, X31, and later
3035 */ 3235 */
3036 tp_features.hotkey_mask = 1;
3037 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3236 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3038 "firmware HKEY interface version: 0x%x\n", 3237 "firmware HKEY interface version: 0x%x\n",
3039 hkeyv); 3238 hkeyv);
3239
3240 /* Paranoia check AND init hotkey_all_mask */
3241 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3242 "MHKA", "qd")) {
3243 printk(TPACPI_ERR
3244 "missing MHKA handler, "
3245 "please report this to %s\n",
3246 TPACPI_MAIL);
3247 /* Fallback: pre-init for FN+F3,F4,F12 */
3248 hotkey_all_mask = 0x080cU;
3249 } else {
3250 tp_features.hotkey_mask = 1;
3251 }
3040 } 3252 }
3041 } 3253 }
3042 3254
@@ -3044,32 +3256,23 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3044 "hotkey masks are %s\n", 3256 "hotkey masks are %s\n",
3045 str_supported(tp_features.hotkey_mask)); 3257 str_supported(tp_features.hotkey_mask));
3046 3258
3047 if (tp_features.hotkey_mask) { 3259 /* Init hotkey_all_mask if not initialized yet */
3048 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3260 if (!tp_features.hotkey_mask && !hotkey_all_mask &&
3049 "MHKA", "qd")) { 3261 (quirks & TPACPI_HK_Q_INIMASK))
3050 printk(TPACPI_ERR 3262 hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
3051 "missing MHKA handler, "
3052 "please report this to %s\n",
3053 TPACPI_MAIL);
3054 /* FN+F12, FN+F4, FN+F3 */
3055 hotkey_all_mask = 0x080cU;
3056 }
3057 }
3058 3263
3059 /* hotkey_source_mask *must* be zero for 3264 /* Init hotkey_acpi_mask and hotkey_orig_mask */
3060 * the first hotkey_mask_get */
3061 if (tp_features.hotkey_mask) { 3265 if (tp_features.hotkey_mask) {
3266 /* hotkey_source_mask *must* be zero for
3267 * the first hotkey_mask_get to return hotkey_orig_mask */
3062 res = hotkey_mask_get(); 3268 res = hotkey_mask_get();
3063 if (res) 3269 if (res)
3064 goto err_exit; 3270 goto err_exit;
3065 3271
3066 hotkey_orig_mask = hotkey_mask; 3272 hotkey_orig_mask = hotkey_acpi_mask;
3067 res = add_many_to_attr_set( 3273 } else {
3068 hotkey_dev_attributes, 3274 hotkey_orig_mask = hotkey_all_mask;
3069 hotkey_mask_attributes, 3275 hotkey_acpi_mask = hotkey_all_mask;
3070 ARRAY_SIZE(hotkey_mask_attributes));
3071 if (res)
3072 goto err_exit;
3073 } 3276 }
3074 3277
3075#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 3278#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
@@ -3183,14 +3386,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3183 } 3386 }
3184 3387
3185#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3388#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
3186 if (tp_features.hotkey_mask) { 3389 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3187 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK 3390 & ~hotkey_all_mask
3188 & ~hotkey_all_mask 3391 & ~hotkey_reserved_mask;
3189 & ~hotkey_reserved_mask;
3190 } else {
3191 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3192 & ~hotkey_reserved_mask;
3193 }
3194 3392
3195 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3393 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3196 "hotkey source mask 0x%08x, polling freq %u\n", 3394 "hotkey source mask 0x%08x, polling freq %u\n",
@@ -3204,13 +3402,18 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3204 hotkey_exit(); 3402 hotkey_exit();
3205 return res; 3403 return res;
3206 } 3404 }
3207 res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) 3405 res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
3208 & ~hotkey_reserved_mask) 3406 | hotkey_driver_mask)
3209 | hotkey_orig_mask); 3407 & ~hotkey_source_mask);
3210 if (res < 0 && res != -ENXIO) { 3408 if (res < 0 && res != -ENXIO) {
3211 hotkey_exit(); 3409 hotkey_exit();
3212 return res; 3410 return res;
3213 } 3411 }
3412 hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
3413 & ~hotkey_reserved_mask;
3414 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3415 "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
3416 hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
3214 3417
3215 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3418 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3216 "legacy ibm/hotkey event reporting over procfs %s\n", 3419 "legacy ibm/hotkey event reporting over procfs %s\n",
@@ -3245,7 +3448,7 @@ static bool hotkey_notify_hotkey(const u32 hkey,
3245 if (scancode > 0 && scancode < 0x21) { 3448 if (scancode > 0 && scancode < 0x21) {
3246 scancode--; 3449 scancode--;
3247 if (!(hotkey_source_mask & (1 << scancode))) { 3450 if (!(hotkey_source_mask & (1 << scancode))) {
3248 tpacpi_input_send_key(scancode); 3451 tpacpi_input_send_key_masked(scancode);
3249 *send_acpi_ev = false; 3452 *send_acpi_ev = false;
3250 } else { 3453 } else {
3251 *ignore_acpi_ev = true; 3454 *ignore_acpi_ev = true;
@@ -3264,20 +3467,20 @@ static bool hotkey_notify_wakeup(const u32 hkey,
3264 *ignore_acpi_ev = false; 3467 *ignore_acpi_ev = false;
3265 3468
3266 switch (hkey) { 3469 switch (hkey) {
3267 case 0x2304: /* suspend, undock */ 3470 case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
3268 case 0x2404: /* hibernation, undock */ 3471 case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
3269 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK; 3472 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
3270 *ignore_acpi_ev = true; 3473 *ignore_acpi_ev = true;
3271 break; 3474 break;
3272 3475
3273 case 0x2305: /* suspend, bay eject */ 3476 case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
3274 case 0x2405: /* hibernation, bay eject */ 3477 case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
3275 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ; 3478 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
3276 *ignore_acpi_ev = true; 3479 *ignore_acpi_ev = true;
3277 break; 3480 break;
3278 3481
3279 case 0x2313: /* Battery on critical low level (S3) */ 3482 case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
3280 case 0x2413: /* Battery on critical low level (S4) */ 3483 case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
3281 printk(TPACPI_ALERT 3484 printk(TPACPI_ALERT
3282 "EMERGENCY WAKEUP: battery almost empty\n"); 3485 "EMERGENCY WAKEUP: battery almost empty\n");
3283 /* how to auto-heal: */ 3486 /* how to auto-heal: */
@@ -3307,21 +3510,21 @@ static bool hotkey_notify_usrevent(const u32 hkey,
3307 *ignore_acpi_ev = false; 3510 *ignore_acpi_ev = false;
3308 3511
3309 switch (hkey) { 3512 switch (hkey) {
3310 case 0x5010: /* Lenovo new BIOS: brightness changed */ 3513 case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
3311 case 0x500b: /* X61t: tablet pen inserted into bay */ 3514 case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
3312 case 0x500c: /* X61t: tablet pen removed from bay */
3313 return true; 3515 return true;
3314 3516
3315 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */ 3517 case TP_HKEY_EV_TABLET_TABLET: /* X41t-X61t: tablet mode */
3316 case 0x500a: /* X41t-X61t: swivel down (normal mode) */ 3518 case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
3317 tpacpi_input_send_tabletsw(); 3519 tpacpi_input_send_tabletsw();
3318 hotkey_tablet_mode_notify_change(); 3520 hotkey_tablet_mode_notify_change();
3319 *send_acpi_ev = false; 3521 *send_acpi_ev = false;
3320 return true; 3522 return true;
3321 3523
3322 case 0x5001: 3524 case TP_HKEY_EV_LID_CLOSE: /* Lid closed */
3323 case 0x5002: 3525 case TP_HKEY_EV_LID_OPEN: /* Lid opened */
3324 /* LID switch events. Do not propagate */ 3526 case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
3527 /* do not propagate these events */
3325 *ignore_acpi_ev = true; 3528 *ignore_acpi_ev = true;
3326 return true; 3529 return true;
3327 3530
@@ -3339,30 +3542,30 @@ static bool hotkey_notify_thermal(const u32 hkey,
3339 *ignore_acpi_ev = false; 3542 *ignore_acpi_ev = false;
3340 3543
3341 switch (hkey) { 3544 switch (hkey) {
3342 case 0x6011: 3545 case TP_HKEY_EV_ALARM_BAT_HOT:
3343 printk(TPACPI_CRIT 3546 printk(TPACPI_CRIT
3344 "THERMAL ALARM: battery is too hot!\n"); 3547 "THERMAL ALARM: battery is too hot!\n");
3345 /* recommended action: warn user through gui */ 3548 /* recommended action: warn user through gui */
3346 return true; 3549 return true;
3347 case 0x6012: 3550 case TP_HKEY_EV_ALARM_BAT_XHOT:
3348 printk(TPACPI_ALERT 3551 printk(TPACPI_ALERT
3349 "THERMAL EMERGENCY: battery is extremely hot!\n"); 3552 "THERMAL EMERGENCY: battery is extremely hot!\n");
3350 /* recommended action: immediate sleep/hibernate */ 3553 /* recommended action: immediate sleep/hibernate */
3351 return true; 3554 return true;
3352 case 0x6021: 3555 case TP_HKEY_EV_ALARM_SENSOR_HOT:
3353 printk(TPACPI_CRIT 3556 printk(TPACPI_CRIT
3354 "THERMAL ALARM: " 3557 "THERMAL ALARM: "
3355 "a sensor reports something is too hot!\n"); 3558 "a sensor reports something is too hot!\n");
3356 /* recommended action: warn user through gui, that */ 3559 /* recommended action: warn user through gui, that */
3357 /* some internal component is too hot */ 3560 /* some internal component is too hot */
3358 return true; 3561 return true;
3359 case 0x6022: 3562 case TP_HKEY_EV_ALARM_SENSOR_XHOT:
3360 printk(TPACPI_ALERT 3563 printk(TPACPI_ALERT
3361 "THERMAL EMERGENCY: " 3564 "THERMAL EMERGENCY: "
3362 "a sensor reports something is extremely hot!\n"); 3565 "a sensor reports something is extremely hot!\n");
3363 /* recommended action: immediate sleep/hibernate */ 3566 /* recommended action: immediate sleep/hibernate */
3364 return true; 3567 return true;
3365 case 0x6030: 3568 case TP_HKEY_EV_THM_TABLE_CHANGED:
3366 printk(TPACPI_INFO 3569 printk(TPACPI_INFO
3367 "EC reports that Thermal Table has changed\n"); 3570 "EC reports that Thermal Table has changed\n");
3368 /* recommended action: do nothing, we don't have 3571 /* recommended action: do nothing, we don't have
@@ -3420,7 +3623,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3420 break; 3623 break;
3421 case 3: 3624 case 3:
3422 /* 0x3000-0x3FFF: bay-related wakeups */ 3625 /* 0x3000-0x3FFF: bay-related wakeups */
3423 if (hkey == 0x3003) { 3626 if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
3424 hotkey_autosleep_ack = 1; 3627 hotkey_autosleep_ack = 1;
3425 printk(TPACPI_INFO 3628 printk(TPACPI_INFO
3426 "bay ejected\n"); 3629 "bay ejected\n");
@@ -3432,7 +3635,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3432 break; 3635 break;
3433 case 4: 3636 case 4:
3434 /* 0x4000-0x4FFF: dock-related wakeups */ 3637 /* 0x4000-0x4FFF: dock-related wakeups */
3435 if (hkey == 0x4003) { 3638 if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
3436 hotkey_autosleep_ack = 1; 3639 hotkey_autosleep_ack = 1;
3437 printk(TPACPI_INFO 3640 printk(TPACPI_INFO
3438 "undocked\n"); 3641 "undocked\n");
@@ -3454,7 +3657,8 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3454 break; 3657 break;
3455 case 7: 3658 case 7:
3456 /* 0x7000-0x7FFF: misc */ 3659 /* 0x7000-0x7FFF: misc */
3457 if (tp_features.hotkey_wlsw && hkey == 0x7000) { 3660 if (tp_features.hotkey_wlsw &&
3661 hkey == TP_HKEY_EV_RFKILL_CHANGED) {
3458 tpacpi_send_radiosw_update(); 3662 tpacpi_send_radiosw_update();
3459 send_acpi_ev = 0; 3663 send_acpi_ev = 0;
3460 known_ev = true; 3664 known_ev = true;
@@ -3500,10 +3704,12 @@ static void hotkey_resume(void)
3500{ 3704{
3501 tpacpi_disable_brightness_delay(); 3705 tpacpi_disable_brightness_delay();
3502 3706
3503 if (hotkey_mask_get()) 3707 if (hotkey_status_set(true) < 0 ||
3708 hotkey_mask_set(hotkey_acpi_mask) < 0)
3504 printk(TPACPI_ERR 3709 printk(TPACPI_ERR
3505 "error while trying to read hot key mask " 3710 "error while attempting to reset the event "
3506 "from firmware\n"); 3711 "firmware interface\n");
3712
3507 tpacpi_send_radiosw_update(); 3713 tpacpi_send_radiosw_update();
3508 hotkey_tablet_mode_notify_change(); 3714 hotkey_tablet_mode_notify_change();
3509 hotkey_wakeup_reason_notify_change(); 3715 hotkey_wakeup_reason_notify_change();
@@ -3532,8 +3738,8 @@ static int hotkey_read(char *p)
3532 return res; 3738 return res;
3533 3739
3534 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0)); 3740 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
3535 if (tp_features.hotkey_mask) { 3741 if (hotkey_all_mask) {
3536 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask); 3742 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
3537 len += sprintf(p + len, 3743 len += sprintf(p + len,
3538 "commands:\tenable, disable, reset, <mask>\n"); 3744 "commands:\tenable, disable, reset, <mask>\n");
3539 } else { 3745 } else {
@@ -3570,7 +3776,7 @@ static int hotkey_write(char *buf)
3570 if (mutex_lock_killable(&hotkey_mutex)) 3776 if (mutex_lock_killable(&hotkey_mutex))
3571 return -ERESTARTSYS; 3777 return -ERESTARTSYS;
3572 3778
3573 mask = hotkey_mask; 3779 mask = hotkey_user_mask;
3574 3780
3575 res = 0; 3781 res = 0;
3576 while ((cmd = next_cmd(&buf))) { 3782 while ((cmd = next_cmd(&buf))) {
@@ -3592,12 +3798,11 @@ static int hotkey_write(char *buf)
3592 } 3798 }
3593 } 3799 }
3594 3800
3595 if (!res) 3801 if (!res) {
3596 tpacpi_disclose_usertask("procfs hotkey", 3802 tpacpi_disclose_usertask("procfs hotkey",
3597 "set mask to 0x%08x\n", mask); 3803 "set mask to 0x%08x\n", mask);
3598 3804 res = hotkey_user_mask_set(mask);
3599 if (!res && mask != hotkey_mask) 3805 }
3600 res = hotkey_mask_set(mask);
3601 3806
3602errexit: 3807errexit:
3603 mutex_unlock(&hotkey_mutex); 3808 mutex_unlock(&hotkey_mutex);
@@ -6010,8 +6215,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6010 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, 6215 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
6011 &ibm_backlight_data); 6216 &ibm_backlight_data);
6012 if (IS_ERR(ibm_backlight_device)) { 6217 if (IS_ERR(ibm_backlight_device)) {
6218 int rc = PTR_ERR(ibm_backlight_device);
6219 ibm_backlight_device = NULL;
6013 printk(TPACPI_ERR "Could not register backlight device\n"); 6220 printk(TPACPI_ERR "Could not register backlight device\n");
6014 return PTR_ERR(ibm_backlight_device); 6221 return rc;
6015 } 6222 }
6016 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 6223 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6017 "brightness is supported\n"); 6224 "brightness is supported\n");
@@ -7499,6 +7706,21 @@ static struct ibm_struct fan_driver_data = {
7499 **************************************************************************** 7706 ****************************************************************************
7500 ****************************************************************************/ 7707 ****************************************************************************/
7501 7708
7709/*
7710 * HKEY event callout for other subdrivers go here
7711 * (yes, it is ugly, but it is quick, safe, and gets the job done
7712 */
7713static void tpacpi_driver_event(const unsigned int hkey_event)
7714{
7715}
7716
7717
7718
7719static void hotkey_driver_event(const unsigned int scancode)
7720{
7721 tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
7722}
7723
7502/* sysfs name ---------------------------------------------------------- */ 7724/* sysfs name ---------------------------------------------------------- */
7503static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, 7725static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
7504 struct device_attribute *attr, 7726 struct device_attribute *attr,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c07fdb94d665..83b8b5ac49c9 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -153,6 +153,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
153 acpi_handle temp = NULL; 153 acpi_handle temp = NULL;
154 acpi_status status; 154 acpi_status status;
155 struct pnp_dev *dev; 155 struct pnp_dev *dev;
156 struct acpi_hardware_id *id;
156 157
157 /* 158 /*
158 * If a PnPacpi device is not present , the device 159 * If a PnPacpi device is not present , the device
@@ -193,15 +194,12 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
193 if (dev->capabilities & PNP_CONFIGURABLE) 194 if (dev->capabilities & PNP_CONFIGURABLE)
194 pnpacpi_parse_resource_option_data(dev); 195 pnpacpi_parse_resource_option_data(dev);
195 196
196 if (device->flags.compatible_ids) { 197 list_for_each_entry(id, &device->pnp.ids, list) {
197 struct acpica_device_id_list *cid_list = device->pnp.cid_list; 198 if (!strcmp(id->id, acpi_device_hid(device)))
198 int i; 199 continue;
199 200 if (!ispnpidacpi(id->id))
200 for (i = 0; i < cid_list->count; i++) { 201 continue;
201 if (!ispnpidacpi(cid_list->ids[i].string)) 202 pnp_add_id(dev, id->id);
202 continue;
203 pnp_add_id(dev, cid_list->ids[i].string);
204 }
205 } 203 }
206 204
207 /* clear out the damaged flags */ 205 /* clear out the damaged flags */
@@ -232,9 +230,8 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
232 struct pnp_dev *pnp = _pnp; 230 struct pnp_dev *pnp = _pnp;
233 231
234 /* true means it matched */ 232 /* true means it matched */
235 return acpi->flags.hardware_id 233 return !acpi_get_physical_device(acpi->handle)
236 && !acpi_get_physical_device(acpi->handle) 234 && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
237 && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
238} 235}
239 236
240static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) 237static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 35a0b192d768..2d414e23d390 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -271,6 +271,7 @@ void pps_event(int source, struct pps_ktime *ts, int event, void *data)
271{ 271{
272 struct pps_device *pps; 272 struct pps_device *pps;
273 unsigned long flags; 273 unsigned long flags;
274 int captured = 0;
274 275
275 if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) { 276 if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
276 printk(KERN_ERR "pps: unknown event (%x) for source %d\n", 277 printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
@@ -293,7 +294,8 @@ void pps_event(int source, struct pps_ktime *ts, int event, void *data)
293 294
294 /* Check the event */ 295 /* Check the event */
295 pps->current_mode = pps->params.mode; 296 pps->current_mode = pps->params.mode;
296 if (event & PPS_CAPTUREASSERT) { 297 if ((event & PPS_CAPTUREASSERT) &
298 (pps->params.mode & PPS_CAPTUREASSERT)) {
297 /* We have to add an offset? */ 299 /* We have to add an offset? */
298 if (pps->params.mode & PPS_OFFSETASSERT) 300 if (pps->params.mode & PPS_OFFSETASSERT)
299 pps_add_offset(ts, &pps->params.assert_off_tu); 301 pps_add_offset(ts, &pps->params.assert_off_tu);
@@ -303,8 +305,11 @@ void pps_event(int source, struct pps_ktime *ts, int event, void *data)
303 pps->assert_sequence++; 305 pps->assert_sequence++;
304 pr_debug("capture assert seq #%u for source %d\n", 306 pr_debug("capture assert seq #%u for source %d\n",
305 pps->assert_sequence, source); 307 pps->assert_sequence, source);
308
309 captured = ~0;
306 } 310 }
307 if (event & PPS_CAPTURECLEAR) { 311 if ((event & PPS_CAPTURECLEAR) &
312 (pps->params.mode & PPS_CAPTURECLEAR)) {
308 /* We have to add an offset? */ 313 /* We have to add an offset? */
309 if (pps->params.mode & PPS_OFFSETCLEAR) 314 if (pps->params.mode & PPS_OFFSETCLEAR)
310 pps_add_offset(ts, &pps->params.clear_off_tu); 315 pps_add_offset(ts, &pps->params.clear_off_tu);
@@ -314,12 +319,17 @@ void pps_event(int source, struct pps_ktime *ts, int event, void *data)
314 pps->clear_sequence++; 319 pps->clear_sequence++;
315 pr_debug("capture clear seq #%u for source %d\n", 320 pr_debug("capture clear seq #%u for source %d\n",
316 pps->clear_sequence, source); 321 pps->clear_sequence, source);
322
323 captured = ~0;
317 } 324 }
318 325
319 pps->go = ~0; 326 /* Wake up iif captured somthing */
320 wake_up_interruptible(&pps->queue); 327 if (captured) {
328 pps->go = ~0;
329 wake_up_interruptible(&pps->queue);
321 330
322 kill_fasync(&pps->async_queue, SIGIO, POLL_IN); 331 kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
332 }
323 333
324 spin_unlock_irqrestore(&pps->lock, flags); 334 spin_unlock_irqrestore(&pps->lock, flags);
325 335
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index fea17e7805e9..ca5183bdad85 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -71,9 +71,14 @@ static long pps_cdev_ioctl(struct file *file,
71 case PPS_GETPARAMS: 71 case PPS_GETPARAMS:
72 pr_debug("PPS_GETPARAMS: source %d\n", pps->id); 72 pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
73 73
74 /* Return current parameters */ 74 spin_lock_irq(&pps->lock);
75 err = copy_to_user(uarg, &pps->params, 75
76 sizeof(struct pps_kparams)); 76 /* Get the current parameters */
77 params = pps->params;
78
79 spin_unlock_irq(&pps->lock);
80
81 err = copy_to_user(uarg, &params, sizeof(struct pps_kparams));
77 if (err) 82 if (err)
78 return -EFAULT; 83 return -EFAULT;
79 84
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 744ea1d0b59b..efe568deda12 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1283,7 +1283,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
1283 return -EIO; 1283 return -EIO;
1284 1284
1285 /* are we the last user and permitted to disable ? */ 1285 /* are we the last user and permitted to disable ? */
1286 if (rdev->use_count == 1 && !rdev->constraints->always_on) { 1286 if (rdev->use_count == 1 &&
1287 (rdev->constraints && !rdev->constraints->always_on)) {
1287 1288
1288 /* we are last user */ 1289 /* we are last user */
1289 if (_regulator_can_change_status(rdev) && 1290 if (_regulator_can_change_status(rdev) &&
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f8b295700d7d..f9f516a3028a 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -196,11 +196,10 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
196 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); 196 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
197 197
198 regulator_unregister(drvdata->dev); 198 regulator_unregister(drvdata->dev);
199 kfree(drvdata->desc.name);
200 kfree(drvdata);
201
202 if (gpio_is_valid(drvdata->gpio)) 199 if (gpio_is_valid(drvdata->gpio))
203 gpio_free(drvdata->gpio); 200 gpio_free(drvdata->gpio);
201 kfree(drvdata->desc.name);
202 kfree(drvdata);
204 203
205 return 0; 204 return 0;
206} 205}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index bb61aede4801..902db56ce099 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -175,18 +175,18 @@ static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
175 struct wm831x *wm831x = ldo->wm831x; 175 struct wm831x *wm831x = ldo->wm831x;
176 int ctrl_reg = ldo->base + WM831X_LDO_CONTROL; 176 int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
177 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; 177 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
178 unsigned int ret; 178 int ret;
179 179
180 ret = wm831x_reg_read(wm831x, on_reg); 180 ret = wm831x_reg_read(wm831x, on_reg);
181 if (ret < 0) 181 if (ret < 0)
182 return 0; 182 return ret;
183 183
184 if (!(ret & WM831X_LDO1_ON_MODE)) 184 if (!(ret & WM831X_LDO1_ON_MODE))
185 return REGULATOR_MODE_NORMAL; 185 return REGULATOR_MODE_NORMAL;
186 186
187 ret = wm831x_reg_read(wm831x, ctrl_reg); 187 ret = wm831x_reg_read(wm831x, ctrl_reg);
188 if (ret < 0) 188 if (ret < 0)
189 return 0; 189 return ret;
190 190
191 if (ret & WM831X_LDO1_LP_MODE) 191 if (ret & WM831X_LDO1_LP_MODE)
192 return REGULATOR_MODE_STANDBY; 192 return REGULATOR_MODE_STANDBY;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 4cdb31a362ca..a0c816238aa9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -12,6 +12,7 @@
12*/ 12*/
13 13
14#include <linux/rtc.h> 14#include <linux/rtc.h>
15#include <linux/sched.h>
15#include <linux/log2.h> 16#include <linux/log2.h>
16 17
17int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 18int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 7fe1fa26c52c..03ea530981d1 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -58,7 +58,16 @@ static irqreturn_t coh901331_interrupt(int irq, void *data)
58 clk_enable(rtap->clk); 58 clk_enable(rtap->clk);
59 /* Ack IRQ */ 59 /* Ack IRQ */
60 writel(1, rtap->virtbase + COH901331_IRQ_EVENT); 60 writel(1, rtap->virtbase + COH901331_IRQ_EVENT);
61 /*
62 * Disable the interrupt. This is necessary because
63 * the RTC lives on a lower-clocked line and will
64 * not release the IRQ line until after a few (slower)
65 * clock cycles. The interrupt will be re-enabled when
66 * a new alarm is set anyway.
67 */
68 writel(0, rtap->virtbase + COH901331_IRQ_MASK);
61 clk_disable(rtap->clk); 69 clk_disable(rtap->clk);
70
62 /* Set alarm flag */ 71 /* Set alarm flag */
63 rtc_update_irq(rtap->rtc, 1, RTC_AF); 72 rtc_update_irq(rtap->rtc, 1, RTC_AF);
64 73
@@ -128,6 +137,8 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
128 else 137 else
129 writel(0, rtap->virtbase + COH901331_IRQ_MASK); 138 writel(0, rtap->virtbase + COH901331_IRQ_MASK);
130 clk_disable(rtap->clk); 139 clk_disable(rtap->clk);
140
141 return 0;
131} 142}
132 143
133static struct rtc_class_ops coh901331_ops = { 144static struct rtc_class_ops coh901331_ops = {
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 8a11de9552cd..62227cd52410 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/sched.h>
16#include "rtc-core.h" 17#include "rtc-core.h"
17 18
18static dev_t rtc_devt; 19static dev_t rtc_devt;
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index f4dd87e29075..33a10c47260e 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -70,7 +70,7 @@ static void pcf2rtc_time(struct rtc_time *rtc, struct pcf50633_time *pcf)
70 rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]); 70 rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]);
71 rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]); 71 rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]);
72 rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]); 72 rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]);
73 rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]); 73 rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]) - 1;
74 rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100; 74 rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100;
75} 75}
76 76
@@ -81,7 +81,7 @@ static void rtc2pcf_time(struct pcf50633_time *pcf, struct rtc_time *rtc)
81 pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour); 81 pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour);
82 pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday); 82 pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday);
83 pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday); 83 pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday);
84 pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon); 84 pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon + 1);
85 pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100); 85 pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100);
86} 86}
87 87
@@ -245,8 +245,9 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
245 ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA, 245 ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
246 PCF50633_TI_EXTENT, &pcf_tm.time[0]); 246 PCF50633_TI_EXTENT, &pcf_tm.time[0]);
247 247
248 if (!alarm_masked) 248 if (!alarm_masked || alrm->enabled)
249 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); 249 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
250 rtc->alarm_enabled = alrm->enabled;
250 251
251 return ret; 252 return ret;
252} 253}
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index bb8cc05605ac..747ca194fad4 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -438,34 +438,37 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev)
438} 438}
439 439
440#ifdef CONFIG_PM 440#ifdef CONFIG_PM
441static int pxa_rtc_suspend(struct platform_device *pdev, pm_message_t state) 441static int pxa_rtc_suspend(struct device *dev)
442{ 442{
443 struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); 443 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
444 444
445 if (device_may_wakeup(&pdev->dev)) 445 if (device_may_wakeup(dev))
446 enable_irq_wake(pxa_rtc->irq_Alrm); 446 enable_irq_wake(pxa_rtc->irq_Alrm);
447 return 0; 447 return 0;
448} 448}
449 449
450static int pxa_rtc_resume(struct platform_device *pdev) 450static int pxa_rtc_resume(struct device *dev)
451{ 451{
452 struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); 452 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
453 453
454 if (device_may_wakeup(&pdev->dev)) 454 if (device_may_wakeup(dev))
455 disable_irq_wake(pxa_rtc->irq_Alrm); 455 disable_irq_wake(pxa_rtc->irq_Alrm);
456 return 0; 456 return 0;
457} 457}
458#else 458
459#define pxa_rtc_suspend NULL 459static struct dev_pm_ops pxa_rtc_pm_ops = {
460#define pxa_rtc_resume NULL 460 .suspend = pxa_rtc_suspend,
461 .resume = pxa_rtc_resume,
462};
461#endif 463#endif
462 464
463static struct platform_driver pxa_rtc_driver = { 465static struct platform_driver pxa_rtc_driver = {
464 .remove = __exit_p(pxa_rtc_remove), 466 .remove = __exit_p(pxa_rtc_remove),
465 .suspend = pxa_rtc_suspend,
466 .resume = pxa_rtc_resume,
467 .driver = { 467 .driver = {
468 .name = "pxa-rtc", 468 .name = "pxa-rtc",
469#ifdef CONFIG_PM
470 .pm = &pxa_rtc_pm_ops,
471#endif
469 }, 472 },
470}; 473};
471 474
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 021b2928f0b9..29f98a70586e 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -393,31 +393,34 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
393} 393}
394 394
395#ifdef CONFIG_PM 395#ifdef CONFIG_PM
396static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state) 396static int sa1100_rtc_suspend(struct device *dev)
397{ 397{
398 if (device_may_wakeup(&pdev->dev)) 398 if (device_may_wakeup(dev))
399 enable_irq_wake(IRQ_RTCAlrm); 399 enable_irq_wake(IRQ_RTCAlrm);
400 return 0; 400 return 0;
401} 401}
402 402
403static int sa1100_rtc_resume(struct platform_device *pdev) 403static int sa1100_rtc_resume(struct device *dev)
404{ 404{
405 if (device_may_wakeup(&pdev->dev)) 405 if (device_may_wakeup(dev))
406 disable_irq_wake(IRQ_RTCAlrm); 406 disable_irq_wake(IRQ_RTCAlrm);
407 return 0; 407 return 0;
408} 408}
409#else 409
410#define sa1100_rtc_suspend NULL 410static struct dev_pm_ops sa1100_rtc_pm_ops = {
411#define sa1100_rtc_resume NULL 411 .suspend = sa1100_rtc_suspend,
412 .resume = sa1100_rtc_resume,
413};
412#endif 414#endif
413 415
414static struct platform_driver sa1100_rtc_driver = { 416static struct platform_driver sa1100_rtc_driver = {
415 .probe = sa1100_rtc_probe, 417 .probe = sa1100_rtc_probe,
416 .remove = sa1100_rtc_remove, 418 .remove = sa1100_rtc_remove,
417 .suspend = sa1100_rtc_suspend,
418 .resume = sa1100_rtc_resume,
419 .driver = { 419 .driver = {
420 .name = "sa1100-rtc", 420 .name = "sa1100-rtc",
421#ifdef CONFIG_PM
422 .pm = &sa1100_rtc_pm_ops,
423#endif
421 }, 424 },
422}; 425};
423 426
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index ad164056feb6..423cd5a30b10 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -96,7 +96,7 @@ static void v3020_mmio_write_bit(struct v3020 *chip, unsigned char bit)
96 96
97static unsigned char v3020_mmio_read_bit(struct v3020 *chip) 97static unsigned char v3020_mmio_read_bit(struct v3020 *chip)
98{ 98{
99 return readl(chip->ioaddress) & (1 << chip->leftshift); 99 return !!(readl(chip->ioaddress) & (1 << chip->leftshift));
100} 100}
101 101
102static struct v3020_chip_ops v3020_mmio_ops = { 102static struct v3020_chip_ops v3020_mmio_ops = {
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 2c839d0d21bd..fadddac1e5a4 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -209,19 +209,18 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
209 209
210static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq) 210static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
211{ 211{
212 unsigned long count; 212 u64 count;
213 213
214 if (!is_power_of_2(freq)) 214 if (!is_power_of_2(freq))
215 return -EINVAL; 215 return -EINVAL;
216 count = RTC_FREQUENCY; 216 count = RTC_FREQUENCY;
217 do_div(count, freq); 217 do_div(count, freq);
218 218
219 periodic_count = count;
220
221 spin_lock_irq(&rtc_lock); 219 spin_lock_irq(&rtc_lock);
222 220
223 rtc1_write(RTCL1LREG, count); 221 periodic_count = count;
224 rtc1_write(RTCL1HREG, count >> 16); 222 rtc1_write(RTCL1LREG, periodic_count);
223 rtc1_write(RTCL1HREG, periodic_count >> 16);
225 224
226 spin_unlock_irq(&rtc_lock); 225 spin_unlock_irq(&rtc_lock);
227 226
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index dad0449475b6..aaccc8ecfa8f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2508,8 +2508,6 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2508 device->stopped &= ~DASD_UNRESUMED_PM; 2508 device->stopped &= ~DASD_UNRESUMED_PM;
2509 2509
2510 dasd_schedule_device_bh(device); 2510 dasd_schedule_device_bh(device);
2511 if (device->block)
2512 dasd_schedule_block_bh(device->block);
2513 2511
2514 if (device->discipline->restore) 2512 if (device->discipline->restore)
2515 rc = device->discipline->restore(device); 2513 rc = device->discipline->restore(device);
@@ -2520,6 +2518,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2520 */ 2518 */
2521 device->stopped |= DASD_UNRESUMED_PM; 2519 device->stopped |= DASD_UNRESUMED_PM;
2522 2520
2521 if (device->block)
2522 dasd_schedule_block_bh(device->block);
2523
2523 dasd_put_device(device); 2524 dasd_put_device(device);
2524 return 0; 2525 return 0;
2525} 2526}
@@ -2532,6 +2533,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2532{ 2533{
2533 struct dasd_ccw_req *cqr; 2534 struct dasd_ccw_req *cqr;
2534 struct ccw1 *ccw; 2535 struct ccw1 *ccw;
2536 unsigned long *idaw;
2535 2537
2536 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2538 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2537 2539
@@ -2545,9 +2547,17 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2545 2547
2546 ccw = cqr->cpaddr; 2548 ccw = cqr->cpaddr;
2547 ccw->cmd_code = CCW_CMD_RDC; 2549 ccw->cmd_code = CCW_CMD_RDC;
2548 ccw->cda = (__u32)(addr_t)rdc_buffer; 2550 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2549 ccw->count = rdc_buffer_size; 2551 idaw = (unsigned long *) (cqr->data);
2552 ccw->cda = (__u32)(addr_t) idaw;
2553 ccw->flags = CCW_FLAG_IDA;
2554 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2555 } else {
2556 ccw->cda = (__u32)(addr_t) rdc_buffer;
2557 ccw->flags = 0;
2558 }
2550 2559
2560 ccw->count = rdc_buffer_size;
2551 cqr->startdev = device; 2561 cqr->startdev = device;
2552 cqr->memdev = device; 2562 cqr->memdev = device;
2553 cqr->expires = 10*HZ; 2563 cqr->expires = 10*HZ;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ab3521755588..417b97cd3f94 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2338,6 +2338,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2338 /* Calculate number of blocks/records per track. */ 2338 /* Calculate number of blocks/records per track. */
2339 blksize = block->bp_block; 2339 blksize = block->bp_block;
2340 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2340 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2341 if (blk_per_trk == 0)
2342 return ERR_PTR(-EINVAL);
2341 /* Calculate record id of first and last block. */ 2343 /* Calculate record id of first and last block. */
2342 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 2344 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2343 first_offs = sector_div(first_trk, blk_per_trk); 2345 first_offs = sector_div(first_trk, blk_per_trk);
@@ -3211,8 +3213,10 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
3211int dasd_eckd_restore_device(struct dasd_device *device) 3213int dasd_eckd_restore_device(struct dasd_device *device)
3212{ 3214{
3213 struct dasd_eckd_private *private; 3215 struct dasd_eckd_private *private;
3216 struct dasd_eckd_characteristics temp_rdc_data;
3214 int is_known, rc; 3217 int is_known, rc;
3215 struct dasd_uid temp_uid; 3218 struct dasd_uid temp_uid;
3219 unsigned long flags;
3216 3220
3217 private = (struct dasd_eckd_private *) device->private; 3221 private = (struct dasd_eckd_private *) device->private;
3218 3222
@@ -3225,7 +3229,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3225 rc = dasd_eckd_generate_uid(device, &private->uid); 3229 rc = dasd_eckd_generate_uid(device, &private->uid);
3226 dasd_get_uid(device->cdev, &temp_uid); 3230 dasd_get_uid(device->cdev, &temp_uid);
3227 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 3231 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3228 dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); 3232 dev_err(&device->cdev->dev, "The UID of the DASD has "
3233 "changed\n");
3229 if (rc) 3234 if (rc)
3230 goto out_err; 3235 goto out_err;
3231 dasd_set_uid(device->cdev, &private->uid); 3236 dasd_set_uid(device->cdev, &private->uid);
@@ -3245,15 +3250,17 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3245 dasd_eckd_read_features(device); 3250 dasd_eckd_read_features(device);
3246 3251
3247 /* Read Device Characteristics */ 3252 /* Read Device Characteristics */
3248 memset(&private->rdc_data, 0, sizeof(private->rdc_data));
3249 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 3253 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3250 &private->rdc_data, 64); 3254 &temp_rdc_data, 64);
3251 if (rc) { 3255 if (rc) {
3252 DBF_EVENT(DBF_WARNING, 3256 DBF_EVENT(DBF_WARNING,
3253 "Read device characteristics failed, rc=%d for " 3257 "Read device characteristics failed, rc=%d for "
3254 "device: %s", rc, dev_name(&device->cdev->dev)); 3258 "device: %s", rc, dev_name(&device->cdev->dev));
3255 goto out_err; 3259 goto out_err;
3256 } 3260 }
3261 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3262 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3263 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3257 3264
3258 /* add device to alias management */ 3265 /* add device to alias management */
3259 dasd_alias_add_device(device); 3266 dasd_alias_add_device(device);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 89ece1c235aa..66e21dd23154 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -357,6 +357,7 @@ static int mon_close(struct inode *inode, struct file *filp)
357 atomic_set(&monpriv->msglim_count, 0); 357 atomic_set(&monpriv->msglim_count, 0);
358 monpriv->write_index = 0; 358 monpriv->write_index = 0;
359 monpriv->read_index = 0; 359 monpriv->read_index = 0;
360 dev_set_drvdata(monreader_device, NULL);
360 361
361 for (i = 0; i < MON_MSGLIM; i++) 362 for (i = 0; i < MON_MSGLIM; i++)
362 kfree(monpriv->msg_array[i]); 363 kfree(monpriv->msg_array[i]);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index d6a022f55e92..62ddf5202b79 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1361,11 +1361,13 @@ static int raw3270_pm_start(struct ccw_device *cdev)
1361 1361
1362void raw3270_pm_unfreeze(struct raw3270_view *view) 1362void raw3270_pm_unfreeze(struct raw3270_view *view)
1363{ 1363{
1364#ifdef CONFIG_TN3270_CONSOLE
1364 struct raw3270 *rp; 1365 struct raw3270 *rp;
1365 1366
1366 rp = view->dev; 1367 rp = view->dev;
1367 if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 1368 if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
1368 ccw_device_force_console(); 1369 ccw_device_force_console();
1370#endif
1369} 1371}
1370 1372
1371static struct ccw_device_id raw3270_id[] = { 1373static struct ccw_device_id raw3270_id[] = {
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index daaec185ed36..b44462a6c6d3 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb;
26static int sclp_async_send_wait(char *message); 26static int sclp_async_send_wait(char *message);
27static struct ctl_table_header *callhome_sysctl_header; 27static struct ctl_table_header *callhome_sysctl_header;
28static DEFINE_SPINLOCK(sclp_async_lock); 28static DEFINE_SPINLOCK(sclp_async_lock);
29static char nodename[64];
30#define SCLP_NORMAL_WRITE 0x00 29#define SCLP_NORMAL_WRITE 0x00
31 30
32struct async_evbuf { 31struct async_evbuf {
@@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = {
52static int call_home_on_panic(struct notifier_block *self, 51static int call_home_on_panic(struct notifier_block *self,
53 unsigned long event, void *data) 52 unsigned long event, void *data)
54{ 53{
55 strncat(data, nodename, strlen(nodename)); 54 strncat(data, init_utsname()->nodename,
56 sclp_async_send_wait(data); 55 sizeof(init_utsname()->nodename));
57 return NOTIFY_DONE; 56 sclp_async_send_wait(data);
57 return NOTIFY_DONE;
58} 58}
59 59
60static struct notifier_block call_home_panic_nb = { 60static struct notifier_block call_home_panic_nb = {
@@ -62,21 +62,20 @@ static struct notifier_block call_home_panic_nb = {
62 .priority = INT_MAX, 62 .priority = INT_MAX,
63}; 63};
64 64
65static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp, 65static int proc_handler_callhome(struct ctl_table *ctl, int write,
66 void __user *buffer, size_t *count, 66 void __user *buffer, size_t *count,
67 loff_t *ppos) 67 loff_t *ppos)
68{ 68{
69 unsigned long val; 69 unsigned long val;
70 int len, rc; 70 int len, rc;
71 char buf[2]; 71 char buf[3];
72 72
73 if (!*count | (*ppos && !write)) { 73 if (!*count || (*ppos && !write)) {
74 *count = 0; 74 *count = 0;
75 return 0; 75 return 0;
76 } 76 }
77 if (!write) { 77 if (!write) {
78 len = sprintf(buf, "%d\n", callhome_enabled); 78 len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
79 buf[len] = '\0';
80 rc = copy_to_user(buffer, buf, sizeof(buf)); 79 rc = copy_to_user(buffer, buf, sizeof(buf));
81 if (rc != 0) 80 if (rc != 0)
82 return -EFAULT; 81 return -EFAULT;
@@ -100,7 +99,7 @@ static struct ctl_table callhome_table[] = {
100 { 99 {
101 .procname = "callhome", 100 .procname = "callhome",
102 .mode = 0644, 101 .mode = 0644,
103 .proc_handler = &proc_handler_callhome, 102 .proc_handler = proc_handler_callhome,
104 }, 103 },
105 { .ctl_name = 0 } 104 { .ctl_name = 0 }
106}; 105};
@@ -171,39 +170,29 @@ static int __init sclp_async_init(void)
171 rc = sclp_register(&sclp_async_register); 170 rc = sclp_register(&sclp_async_register);
172 if (rc) 171 if (rc)
173 return rc; 172 return rc;
174 callhome_sysctl_header = register_sysctl_table(kern_dir_table); 173 rc = -EOPNOTSUPP;
175 if (!callhome_sysctl_header) { 174 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
176 rc = -ENOMEM;
177 goto out_sclp;
178 }
179 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
180 rc = -EOPNOTSUPP;
181 goto out_sclp; 175 goto out_sclp;
182 }
183 rc = -ENOMEM; 176 rc = -ENOMEM;
177 callhome_sysctl_header = register_sysctl_table(kern_dir_table);
178 if (!callhome_sysctl_header)
179 goto out_sclp;
184 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); 180 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
185 if (!request)
186 goto out_sys;
187 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 181 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
188 if (!sccb) 182 if (!request || !sccb)
189 goto out_mem; 183 goto out_mem;
190 rc = atomic_notifier_chain_register(&panic_notifier_list, 184 rc = atomic_notifier_chain_register(&panic_notifier_list,
191 &call_home_panic_nb); 185 &call_home_panic_nb);
192 if (rc) 186 if (!rc)
193 goto out_mem; 187 goto out;
194
195 strncpy(nodename, init_utsname()->nodename, 64);
196 return 0;
197
198out_mem: 188out_mem:
199 kfree(request); 189 kfree(request);
200 free_page((unsigned long) sccb); 190 free_page((unsigned long) sccb);
201out_sys:
202 unregister_sysctl_table(callhome_sysctl_header); 191 unregister_sysctl_table(callhome_sysctl_header);
203out_sclp: 192out_sclp:
204 sclp_unregister(&sclp_async_register); 193 sclp_unregister(&sclp_async_register);
194out:
205 return rc; 195 return rc;
206
207} 196}
208module_init(sclp_async_init); 197module_init(sclp_async_init);
209 198
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 84c191c1cd62..05909a7df8b3 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -20,9 +20,12 @@
20 20
21#include "sclp.h" 21#include "sclp.h"
22 22
23static void (*old_machine_restart)(char *);
24static void (*old_machine_halt)(void);
25static void (*old_machine_power_off)(void);
26
23/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ 27/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
24static void 28static void do_machine_quiesce(void)
25do_machine_quiesce(void)
26{ 29{
27 psw_t quiesce_psw; 30 psw_t quiesce_psw;
28 31
@@ -33,23 +36,48 @@ do_machine_quiesce(void)
33} 36}
34 37
35/* Handler for quiesce event. Start shutdown procedure. */ 38/* Handler for quiesce event. Start shutdown procedure. */
36static void 39static void sclp_quiesce_handler(struct evbuf_header *evbuf)
37sclp_quiesce_handler(struct evbuf_header *evbuf)
38{ 40{
39 _machine_restart = (void *) do_machine_quiesce; 41 if (_machine_restart != (void *) do_machine_quiesce) {
40 _machine_halt = do_machine_quiesce; 42 old_machine_restart = _machine_restart;
41 _machine_power_off = do_machine_quiesce; 43 old_machine_halt = _machine_halt;
44 old_machine_power_off = _machine_power_off;
45 _machine_restart = (void *) do_machine_quiesce;
46 _machine_halt = do_machine_quiesce;
47 _machine_power_off = do_machine_quiesce;
48 }
42 ctrl_alt_del(); 49 ctrl_alt_del();
43} 50}
44 51
52/* Undo machine restart/halt/power_off modification on resume */
53static void sclp_quiesce_pm_event(struct sclp_register *reg,
54 enum sclp_pm_event sclp_pm_event)
55{
56 switch (sclp_pm_event) {
57 case SCLP_PM_EVENT_RESTORE:
58 if (old_machine_restart) {
59 _machine_restart = old_machine_restart;
60 _machine_halt = old_machine_halt;
61 _machine_power_off = old_machine_power_off;
62 old_machine_restart = NULL;
63 old_machine_halt = NULL;
64 old_machine_power_off = NULL;
65 }
66 break;
67 case SCLP_PM_EVENT_FREEZE:
68 case SCLP_PM_EVENT_THAW:
69 break;
70 }
71}
72
45static struct sclp_register sclp_quiesce_event = { 73static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EVTYP_SIGQUIESCE_MASK, 74 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 75 .receiver_fn = sclp_quiesce_handler,
76 .pm_event_fn = sclp_quiesce_pm_event
48}; 77};
49 78
50/* Initialize quiesce driver. */ 79/* Initialize quiesce driver. */
51static int __init 80static int __init sclp_quiesce_init(void)
52sclp_quiesce_init(void)
53{ 81{
54 return sclp_register(&sclp_quiesce_event); 82 return sclp_register(&sclp_quiesce_event);
55} 83}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 178724f2a4c3..b9d2a007e93b 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -705,21 +705,6 @@ out_driver:
705} 705}
706__initcall(sclp_vt220_tty_init); 706__initcall(sclp_vt220_tty_init);
707 707
708#ifdef CONFIG_SCLP_VT220_CONSOLE
709
710static void
711sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
712{
713 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
714}
715
716static struct tty_driver *
717sclp_vt220_con_device(struct console *c, int *index)
718{
719 *index = 0;
720 return sclp_vt220_driver;
721}
722
723static void __sclp_vt220_flush_buffer(void) 708static void __sclp_vt220_flush_buffer(void)
724{ 709{
725 unsigned long flags; 710 unsigned long flags;
@@ -776,6 +761,21 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
776 } 761 }
777} 762}
778 763
764#ifdef CONFIG_SCLP_VT220_CONSOLE
765
766static void
767sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
768{
769 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
770}
771
772static struct tty_driver *
773sclp_vt220_con_device(struct console *c, int *index)
774{
775 *index = 0;
776 return sclp_vt220_driver;
777}
778
779static int 779static int
780sclp_vt220_notify(struct notifier_block *self, 780sclp_vt220_notify(struct notifier_block *self,
781 unsigned long event, void *data) 781 unsigned long event, void *data)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 64f57ef2763c..0c0705b91c28 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -162,9 +162,10 @@ tapeblock_requeue(struct work_struct *work) {
162 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
163 while ( 163 while (
164 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
165 (req = blk_fetch_request(queue)) && 165 blk_peek_request(queue) &&
166 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
167 ) { 167 ) {
168 req = blk_fetch_request(queue);
168 if (rq_data_dir(req) == WRITE) { 169 if (rq_data_dir(req) == WRITE) {
169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 170 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
170 spin_unlock_irq(&device->blk_data.request_queue_lock); 171 spin_unlock_irq(&device->blk_data.request_queue_lock);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 6565f027791e..7eab9ab9f406 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -265,13 +265,11 @@ struct ccwdev_iter {
265static void * 265static void *
266cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) 266cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
267{ 267{
268 struct ccwdev_iter *iter; 268 struct ccwdev_iter *iter = s->private;
269 269
270 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) 270 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
271 return NULL; 271 return NULL;
272 iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); 272 memset(iter, 0, sizeof(*iter));
273 if (!iter)
274 return ERR_PTR(-ENOMEM);
275 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); 273 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
276 iter->devno = *offset % (__MAX_SUBCHANNEL + 1); 274 iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
277 return iter; 275 return iter;
@@ -280,8 +278,6 @@ cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
280static void 278static void
281cio_ignore_proc_seq_stop(struct seq_file *s, void *it) 279cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
282{ 280{
283 if (!IS_ERR(it))
284 kfree(it);
285} 281}
286 282
287static void * 283static void *
@@ -378,14 +374,15 @@ static const struct seq_operations cio_ignore_proc_seq_ops = {
378static int 374static int
379cio_ignore_proc_open(struct inode *inode, struct file *file) 375cio_ignore_proc_open(struct inode *inode, struct file *file)
380{ 376{
381 return seq_open(file, &cio_ignore_proc_seq_ops); 377 return seq_open_private(file, &cio_ignore_proc_seq_ops,
378 sizeof(struct ccwdev_iter));
382} 379}
383 380
384static const struct file_operations cio_ignore_proc_fops = { 381static const struct file_operations cio_ignore_proc_fops = {
385 .open = cio_ignore_proc_open, 382 .open = cio_ignore_proc_open,
386 .read = seq_read, 383 .read = seq_read,
387 .llseek = seq_lseek, 384 .llseek = seq_lseek,
388 .release = seq_release, 385 .release = seq_release_private,
389 .write = cio_ignore_write, 386 .write = cio_ignore_write,
390}; 387};
391 388
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 40002830d48a..8ab51608da55 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -393,7 +393,6 @@ int chp_new(struct chp_id chpid)
393 chp->state = 1; 393 chp->state = 1;
394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
395 chp->dev.release = chp_release; 395 chp->dev.release = chp_release;
396 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
397 396
398 /* Obtain channel path description and fill it in. */ 397 /* Obtain channel path description and fill it in. */
399 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 398 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -411,6 +410,7 @@ int chp_new(struct chp_id chpid)
411 } else { 410 } else {
412 chp->cmg = -1; 411 chp->cmg = -1;
413 } 412 }
413 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
414 414
415 /* make it known to the system */ 415 /* make it known to the system */
416 ret = device_register(&chp->dev); 416 ret = device_register(&chp->dev);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f780bdd3a04e..2490b741e16a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1250,8 +1250,7 @@ static int io_subchannel_probe(struct subchannel *sch)
1250 unsigned long flags; 1250 unsigned long flags;
1251 struct ccw_dev_id dev_id; 1251 struct ccw_dev_id dev_id;
1252 1252
1253 cdev = sch_get_cdev(sch); 1253 if (cio_is_console(sch->schid)) {
1254 if (cdev) {
1255 rc = sysfs_create_group(&sch->dev.kobj, 1254 rc = sysfs_create_group(&sch->dev.kobj,
1256 &io_subchannel_attr_group); 1255 &io_subchannel_attr_group);
1257 if (rc) 1256 if (rc)
@@ -1260,13 +1259,13 @@ static int io_subchannel_probe(struct subchannel *sch)
1260 "0.%x.%04x (rc=%d)\n", 1259 "0.%x.%04x (rc=%d)\n",
1261 sch->schid.ssid, sch->schid.sch_no, rc); 1260 sch->schid.ssid, sch->schid.sch_no, rc);
1262 /* 1261 /*
1263 * This subchannel already has an associated ccw_device. 1262 * The console subchannel already has an associated ccw_device.
1264 * Throw the delayed uevent for the subchannel, register 1263 * Throw the delayed uevent for the subchannel, register
1265 * the ccw_device and exit. This happens for all early 1264 * the ccw_device and exit.
1266 * devices, e.g. the console.
1267 */ 1265 */
1268 dev_set_uevent_suppress(&sch->dev, 0); 1266 dev_set_uevent_suppress(&sch->dev, 0);
1269 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1267 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1268 cdev = sch_get_cdev(sch);
1270 cdev->dev.groups = ccwdev_attr_groups; 1269 cdev->dev.groups = ccwdev_attr_groups;
1271 device_initialize(&cdev->dev); 1270 device_initialize(&cdev->dev);
1272 ccw_device_register(cdev); 1271 ccw_device_register(cdev);
@@ -1609,7 +1608,7 @@ int ccw_purge_blacklisted(void)
1609 return 0; 1608 return 0;
1610} 1609}
1611 1610
1612static void device_set_disconnected(struct ccw_device *cdev) 1611void ccw_device_set_disconnected(struct ccw_device *cdev)
1613{ 1612{
1614 if (!cdev) 1613 if (!cdev)
1615 return; 1614 return;
@@ -1705,7 +1704,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1705 ccw_device_trigger_reprobe(cdev); 1704 ccw_device_trigger_reprobe(cdev);
1706 break; 1705 break;
1707 case DISC: 1706 case DISC:
1708 device_set_disconnected(cdev); 1707 ccw_device_set_disconnected(cdev);
1709 break; 1708 break;
1710 default: 1709 default:
1711 break; 1710 break;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index ed39a2caaf47..246c6482842c 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -125,6 +125,7 @@ int ccw_device_stlck(struct ccw_device *);
125void ccw_device_trigger_reprobe(struct ccw_device *); 125void ccw_device_trigger_reprobe(struct ccw_device *);
126void ccw_device_kill_io(struct ccw_device *); 126void ccw_device_kill_io(struct ccw_device *);
127int ccw_device_notify(struct ccw_device *, int); 127int ccw_device_notify(struct ccw_device *, int);
128void ccw_device_set_disconnected(struct ccw_device *cdev);
128void ccw_device_set_notoper(struct ccw_device *cdev); 129void ccw_device_set_notoper(struct ccw_device *cdev);
129 130
130/* qdio needs this. */ 131/* qdio needs this. */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e728ce447f6e..b9613d7df9ef 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -387,19 +387,35 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 387
388 cdev->private->state = state; 388 cdev->private->state = state;
389 389
390 if (state == DEV_STATE_BOXED) { 390 switch (state) {
391 case DEV_STATE_BOXED:
391 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 392 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
392 cdev->private->dev_id.devno, sch->schid.sch_no); 393 cdev->private->dev_id.devno, sch->schid.sch_no);
393 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 394 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
394 ccw_device_schedule_sch_unregister(cdev); 395 ccw_device_schedule_sch_unregister(cdev);
395 cdev->private->flags.donotify = 0; 396 cdev->private->flags.donotify = 0;
396 } 397 break;
397 if (state == DEV_STATE_NOT_OPER) { 398 case DEV_STATE_NOT_OPER:
398 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 399 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
399 cdev->private->dev_id.devno, sch->schid.sch_no); 400 cdev->private->dev_id.devno, sch->schid.sch_no);
400 if (!ccw_device_notify(cdev, CIO_GONE)) 401 if (!ccw_device_notify(cdev, CIO_GONE))
401 ccw_device_schedule_sch_unregister(cdev); 402 ccw_device_schedule_sch_unregister(cdev);
403 else
404 ccw_device_set_disconnected(cdev);
402 cdev->private->flags.donotify = 0; 405 cdev->private->flags.donotify = 0;
406 break;
407 case DEV_STATE_DISCONNECTED:
408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
409 "%04x\n", cdev->private->dev_id.devno,
410 sch->schid.sch_no);
411 if (!ccw_device_notify(cdev, CIO_NO_PATH))
412 ccw_device_schedule_sch_unregister(cdev);
413 else
414 ccw_device_set_disconnected(cdev);
415 cdev->private->flags.donotify = 0;
416 break;
417 default:
418 break;
403 } 419 }
404 420
405 if (cdev->private->flags.donotify) { 421 if (cdev->private->flags.donotify) {
@@ -671,6 +687,10 @@ ccw_device_offline(struct ccw_device *cdev)
671 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 687 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
672 return 0; 688 return 0;
673 } 689 }
690 if (cdev->private->state == DEV_STATE_BOXED) {
691 ccw_device_done(cdev, DEV_STATE_BOXED);
692 return 0;
693 }
674 if (ccw_device_is_orphan(cdev)) { 694 if (ccw_device_is_orphan(cdev)) {
675 ccw_device_done(cdev, DEV_STATE_OFFLINE); 695 ccw_device_done(cdev, DEV_STATE_OFFLINE);
676 return 0; 696 return 0;
@@ -730,11 +750,10 @@ ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
730static void ccw_device_generic_notoper(struct ccw_device *cdev, 750static void ccw_device_generic_notoper(struct ccw_device *cdev,
731 enum dev_event dev_event) 751 enum dev_event dev_event)
732{ 752{
733 struct subchannel *sch; 753 if (!ccw_device_notify(cdev, CIO_GONE))
734 754 ccw_device_schedule_sch_unregister(cdev);
735 ccw_device_set_notoper(cdev); 755 else
736 sch = to_subchannel(cdev->dev.parent); 756 ccw_device_set_disconnected(cdev);
737 css_schedule_eval(sch->schid);
738} 757}
739 758
740/* 759/*
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 1b78f639ead3..76769978285f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
125 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
126} 126}
127 127
128static struct file_operations debugfs_fops = { 128static const struct file_operations debugfs_fops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .open = qstat_seq_open, 130 .open = qstat_seq_open,
131 .read = seq_read, 131 .read = seq_read,
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index eff943923c6f..968e3c7c2632 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
84 return single_open(filp, qdio_perf_proc_show, NULL); 84 return single_open(filp, qdio_perf_proc_show, NULL);
85} 85}
86 86
87static struct file_operations qdio_perf_proc_fops = { 87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open, 89 .open = qdio_perf_seq_open,
90 .read = seq_read, 90 .read = seq_read,
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index c20d4790258e..5677b40e4ac0 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -361,7 +361,7 @@ static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
361 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr), 361 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
362 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr), 362 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
363 }; 363 };
364 static struct CPRBX static_cprbx = { 364 static struct CPRBX local_cprbx = {
365 .cprb_len = 0x00dc, 365 .cprb_len = 0x00dc,
366 .cprb_ver_id = 0x02, 366 .cprb_ver_id = 0x02,
367 .func_id = {0x54, 0x32}, 367 .func_id = {0x54, 0x32},
@@ -372,7 +372,7 @@ static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
372 372
373 msg->hdr = static_type6_hdrX; 373 msg->hdr = static_type6_hdrX;
374 msg->hdr.FromCardLen2 = random_number_length, 374 msg->hdr.FromCardLen2 = random_number_length,
375 msg->cprbx = static_cprbx; 375 msg->cprbx = local_cprbx;
376 msg->cprbx.rpl_datal = random_number_length, 376 msg->cprbx.rpl_datal = random_number_length,
377 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); 377 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
378 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 378 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
@@ -561,7 +561,8 @@ static int convert_response_ica(struct zcrypt_device *zdev,
561 if (msg->cprbx.cprb_ver_id == 0x02) 561 if (msg->cprbx.cprb_ver_id == 0x02)
562 return convert_type86_ica(zdev, reply, 562 return convert_type86_ica(zdev, reply,
563 outputdata, outputdatalength); 563 outputdata, outputdatalength);
564 /* no break, incorrect cprb version is an unknown response */ 564 /* Fall through, no break, incorrect cprb version is an unknown
565 * response */
565 default: /* Unknown response type, this should NEVER EVER happen */ 566 default: /* Unknown response type, this should NEVER EVER happen */
566 zdev->online = 0; 567 zdev->online = 0;
567 return -EAGAIN; /* repeat the request on a different device. */ 568 return -EAGAIN; /* repeat the request on a different device. */
@@ -587,7 +588,8 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
587 } 588 }
588 if (msg->cprbx.cprb_ver_id == 0x02) 589 if (msg->cprbx.cprb_ver_id == 0x02)
589 return convert_type86_xcrb(zdev, reply, xcRB); 590 return convert_type86_xcrb(zdev, reply, xcRB);
590 /* no break, incorrect cprb version is an unknown response */ 591 /* Fall through, no break, incorrect cprb version is an unknown
592 * response */
591 default: /* Unknown response type, this should NEVER EVER happen */ 593 default: /* Unknown response type, this should NEVER EVER happen */
592 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 594 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
593 zdev->online = 0; 595 zdev->online = 0;
@@ -610,7 +612,8 @@ static int convert_response_rng(struct zcrypt_device *zdev,
610 return -EINVAL; 612 return -EINVAL;
611 if (msg->cprbx.cprb_ver_id == 0x02) 613 if (msg->cprbx.cprb_ver_id == 0x02)
612 return convert_type86_rng(zdev, reply, data); 614 return convert_type86_rng(zdev, reply, data);
613 /* no break, incorrect cprb version is an unknown response */ 615 /* Fall through, no break, incorrect cprb version is an unknown
616 * response */
614 default: /* Unknown response type, this should NEVER EVER happen */ 617 default: /* Unknown response type, this should NEVER EVER happen */
615 zdev->online = 0; 618 zdev->online = 0;
616 return -EAGAIN; /* repeat the request on a different device. */ 619 return -EAGAIN; /* repeat the request on a different device. */
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 102000d1af6f..3012355f8304 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -158,7 +158,12 @@ static int smsg_pm_restore_thaw(struct device *dev)
158 smsg_path->flags = 0; 158 smsg_path->flags = 0;
159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
160 NULL, NULL, NULL); 160 NULL, NULL, NULL);
161 printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); 161#ifdef CONFIG_PM_DEBUG
162 if (rc)
163 printk(KERN_ERR
164 "iucv_path_connect returned with rc %i\n", rc);
165#endif
166 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
162 } 167 }
163 return 0; 168 return 0;
164} 169}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1be6bf7e8ce6..2889e5f2dfd3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -80,28 +80,35 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
80 80
81static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 81static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
82{ 82{
83 struct ccw_device *ccwdev;
83 struct zfcp_adapter *adapter; 84 struct zfcp_adapter *adapter;
84 struct zfcp_port *port; 85 struct zfcp_port *port;
85 struct zfcp_unit *unit; 86 struct zfcp_unit *unit;
86 87
87 mutex_lock(&zfcp_data.config_mutex); 88 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
88 read_lock_irq(&zfcp_data.config_lock); 89 if (!ccwdev)
89 adapter = zfcp_get_adapter_by_busid(busid); 90 return;
90 if (adapter) 91
91 zfcp_adapter_get(adapter); 92 if (ccw_device_set_online(ccwdev))
92 read_unlock_irq(&zfcp_data.config_lock); 93 goto out_ccwdev;
93 94
95 mutex_lock(&zfcp_data.config_mutex);
96 adapter = dev_get_drvdata(&ccwdev->dev);
94 if (!adapter) 97 if (!adapter)
95 goto out_adapter; 98 goto out_unlock;
96 port = zfcp_port_enqueue(adapter, wwpn, 0, 0); 99 zfcp_adapter_get(adapter);
97 if (IS_ERR(port)) 100
101 port = zfcp_get_port_by_wwpn(adapter, wwpn);
102 if (!port)
98 goto out_port; 103 goto out_port;
104
105 zfcp_port_get(port);
99 unit = zfcp_unit_enqueue(port, lun); 106 unit = zfcp_unit_enqueue(port, lun);
100 if (IS_ERR(unit)) 107 if (IS_ERR(unit))
101 goto out_unit; 108 goto out_unit;
102 mutex_unlock(&zfcp_data.config_mutex); 109 mutex_unlock(&zfcp_data.config_mutex);
103 ccw_device_set_online(adapter->ccw_device);
104 110
111 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
105 zfcp_erp_wait(adapter); 112 zfcp_erp_wait(adapter);
106 flush_work(&unit->scsi_work); 113 flush_work(&unit->scsi_work);
107 114
@@ -111,20 +118,23 @@ out_unit:
111 zfcp_port_put(port); 118 zfcp_port_put(port);
112out_port: 119out_port:
113 zfcp_adapter_put(adapter); 120 zfcp_adapter_put(adapter);
114out_adapter: 121out_unlock:
115 mutex_unlock(&zfcp_data.config_mutex); 122 mutex_unlock(&zfcp_data.config_mutex);
123out_ccwdev:
124 put_device(&ccwdev->dev);
116 return; 125 return;
117} 126}
118 127
119static void __init zfcp_init_device_setup(char *devstr) 128static void __init zfcp_init_device_setup(char *devstr)
120{ 129{
121 char *token; 130 char *token;
122 char *str; 131 char *str, *str_saved;
123 char busid[ZFCP_BUS_ID_SIZE]; 132 char busid[ZFCP_BUS_ID_SIZE];
124 u64 wwpn, lun; 133 u64 wwpn, lun;
125 134
126 /* duplicate devstr and keep the original for sysfs presentation*/ 135 /* duplicate devstr and keep the original for sysfs presentation*/
127 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL); 136 str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
137 str = str_saved;
128 if (!str) 138 if (!str)
129 return; 139 return;
130 140
@@ -143,12 +153,12 @@ static void __init zfcp_init_device_setup(char *devstr)
143 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun)) 153 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
144 goto err_out; 154 goto err_out;
145 155
146 kfree(str); 156 kfree(str_saved);
147 zfcp_init_device_configure(busid, wwpn, lun); 157 zfcp_init_device_configure(busid, wwpn, lun);
148 return; 158 return;
149 159
150 err_out: 160err_out:
151 kfree(str); 161 kfree(str_saved);
152 pr_err("%s is not a valid SCSI device\n", devstr); 162 pr_err("%s is not a valid SCSI device\n", devstr);
153} 163}
154 164
@@ -593,10 +603,8 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
593 int retval = 0; 603 int retval = 0;
594 unsigned long flags; 604 unsigned long flags;
595 605
596 cancel_work_sync(&adapter->scan_work);
597 cancel_work_sync(&adapter->stat_work); 606 cancel_work_sync(&adapter->stat_work);
598 zfcp_fc_wka_ports_force_offline(adapter->gs); 607 zfcp_fc_wka_ports_force_offline(adapter->gs);
599 zfcp_adapter_scsi_unregister(adapter);
600 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 608 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
601 &zfcp_sysfs_adapter_attrs); 609 &zfcp_sysfs_adapter_attrs);
602 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 610 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0c90f8e71605..e08339428ecf 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -102,6 +102,14 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
102 adapter = dev_get_drvdata(&ccw_device->dev); 102 adapter = dev_get_drvdata(&ccw_device->dev);
103 if (!adapter) 103 if (!adapter)
104 goto out; 104 goto out;
105 mutex_unlock(&zfcp_data.config_mutex);
106
107 cancel_work_sync(&adapter->scan_work);
108
109 mutex_lock(&zfcp_data.config_mutex);
110
111 /* this also removes the scsi devices, so call it first */
112 zfcp_adapter_scsi_unregister(adapter);
105 113
106 write_lock_irq(&zfcp_data.config_lock); 114 write_lock_irq(&zfcp_data.config_lock);
107 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 115 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
@@ -117,11 +125,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
117 write_unlock_irq(&zfcp_data.config_lock); 125 write_unlock_irq(&zfcp_data.config_lock);
118 126
119 list_for_each_entry_safe(port, p, &port_remove_lh, list) { 127 list_for_each_entry_safe(port, p, &port_remove_lh, list) {
120 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) { 128 list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
121 if (unit->device)
122 scsi_remove_device(unit->device);
123 zfcp_unit_dequeue(unit); 129 zfcp_unit_dequeue(unit);
124 }
125 zfcp_port_dequeue(port); 130 zfcp_port_dequeue(port);
126 } 131 }
127 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); 132 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
@@ -192,13 +197,9 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
192 197
193 mutex_lock(&zfcp_data.config_mutex); 198 mutex_lock(&zfcp_data.config_mutex);
194 adapter = dev_get_drvdata(&ccw_device->dev); 199 adapter = dev_get_drvdata(&ccw_device->dev);
195 if (!adapter)
196 goto out;
197
198 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 200 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
199 zfcp_erp_wait(adapter); 201 zfcp_erp_wait(adapter);
200 mutex_unlock(&zfcp_data.config_mutex); 202 mutex_unlock(&zfcp_data.config_mutex);
201out:
202 return 0; 203 return 0;
203} 204}
204 205
@@ -253,13 +254,17 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
253 254
254 mutex_lock(&zfcp_data.config_mutex); 255 mutex_lock(&zfcp_data.config_mutex);
255 adapter = dev_get_drvdata(&cdev->dev); 256 adapter = dev_get_drvdata(&cdev->dev);
257 if (!adapter)
258 goto out;
259
256 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 260 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
257 zfcp_erp_wait(adapter); 261 zfcp_erp_wait(adapter);
258 zfcp_erp_thread_kill(adapter); 262 zfcp_erp_thread_kill(adapter);
263out:
259 mutex_unlock(&zfcp_data.config_mutex); 264 mutex_unlock(&zfcp_data.config_mutex);
260} 265}
261 266
262static struct ccw_driver zfcp_ccw_driver = { 267struct ccw_driver zfcp_ccw_driver = {
263 .owner = THIS_MODULE, 268 .owner = THIS_MODULE,
264 .name = "zfcp", 269 .name = "zfcp",
265 .ids = zfcp_ccw_device_id, 270 .ids = zfcp_ccw_device_id,
@@ -284,20 +289,3 @@ int __init zfcp_ccw_register(void)
284{ 289{
285 return ccw_driver_register(&zfcp_ccw_driver); 290 return ccw_driver_register(&zfcp_ccw_driver);
286} 291}
287
288/**
289 * zfcp_get_adapter_by_busid - find zfcp_adapter struct
290 * @busid: bus id string of zfcp adapter to find
291 */
292struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
293{
294 struct ccw_device *ccw_device;
295 struct zfcp_adapter *adapter = NULL;
296
297 ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
298 if (ccw_device) {
299 adapter = dev_get_drvdata(&ccw_device->dev);
300 put_device(&ccw_device->dev);
301 }
302 return adapter;
303}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 8305c874e86f..ef681dfed0cc 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -86,8 +86,23 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) 86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
87{ 87{
88 char busid[9]; 88 char busid[9];
89 struct ccw_device *ccwdev;
90 struct zfcp_adapter *adapter = NULL;
91
89 snprintf(busid, sizeof(busid), "0.0.%04x", devno); 92 snprintf(busid, sizeof(busid), "0.0.%04x", devno);
90 return zfcp_get_adapter_by_busid(busid); 93 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
94 if (!ccwdev)
95 goto out;
96
97 adapter = dev_get_drvdata(&ccwdev->dev);
98 if (!adapter)
99 goto out_put;
100
101 zfcp_adapter_get(adapter);
102out_put:
103 put_device(&ccwdev->dev);
104out:
105 return adapter;
91} 106}
92 107
93static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) 108static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 73d366ba31e5..f73e2180f333 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -858,10 +858,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
858 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 858 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
859 return zfcp_erp_open_ptp_port(act); 859 return zfcp_erp_open_ptp_port(act);
860 if (!port->d_id) { 860 if (!port->d_id) {
861 zfcp_port_get(port); 861 zfcp_fc_trigger_did_lookup(port);
862 if (!queue_work(adapter->work_queue,
863 &port->gid_pn_work))
864 zfcp_port_put(port);
865 return ZFCP_ERP_EXIT; 862 return ZFCP_ERP_EXIT;
866 } 863 }
867 return zfcp_erp_port_strategy_open_port(act); 864 return zfcp_erp_port_strategy_open_port(act);
@@ -869,12 +866,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
869 case ZFCP_ERP_STEP_PORT_OPENING: 866 case ZFCP_ERP_STEP_PORT_OPENING:
870 /* D_ID might have changed during open */ 867 /* D_ID might have changed during open */
871 if (p_status & ZFCP_STATUS_COMMON_OPEN) { 868 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
872 if (port->d_id) 869 if (!port->d_id) {
873 return ZFCP_ERP_SUCCEEDED; 870 zfcp_fc_trigger_did_lookup(port);
874 else { 871 return ZFCP_ERP_EXIT;
875 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
876 return ZFCP_ERP_CONTINUES;
877 } 872 }
873 return ZFCP_ERP_SUCCEEDED;
878 } 874 }
879 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { 875 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
880 port->d_id = 0; 876 port->d_id = 0;
@@ -889,19 +885,21 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
889static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 885static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
890{ 886{
891 struct zfcp_port *port = erp_action->port; 887 struct zfcp_port *port = erp_action->port;
888 int p_status = atomic_read(&port->status);
892 889
893 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) 890 if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
891 !(p_status & ZFCP_STATUS_COMMON_OPEN))
894 goto close_init_done; 892 goto close_init_done;
895 893
896 switch (erp_action->step) { 894 switch (erp_action->step) {
897 case ZFCP_ERP_STEP_UNINITIALIZED: 895 case ZFCP_ERP_STEP_UNINITIALIZED:
898 zfcp_erp_port_strategy_clearstati(port); 896 zfcp_erp_port_strategy_clearstati(port);
899 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) 897 if (p_status & ZFCP_STATUS_COMMON_OPEN)
900 return zfcp_erp_port_strategy_close(erp_action); 898 return zfcp_erp_port_strategy_close(erp_action);
901 break; 899 break;
902 900
903 case ZFCP_ERP_STEP_PORT_CLOSING: 901 case ZFCP_ERP_STEP_PORT_CLOSING:
904 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) 902 if (p_status & ZFCP_STATUS_COMMON_OPEN)
905 return ZFCP_ERP_FAILED; 903 return ZFCP_ERP_FAILED;
906 break; 904 break;
907 } 905 }
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 36935bc0818f..b3f28deb4505 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -28,7 +28,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
28/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); 30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
31extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); 31extern struct ccw_driver zfcp_ccw_driver;
32 32
33/* zfcp_cfdc.c */ 33/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 34extern struct miscdevice zfcp_cfdc_misc;
@@ -96,6 +96,7 @@ extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
96extern void _zfcp_fc_scan_ports_later(struct work_struct *); 96extern void _zfcp_fc_scan_ports_later(struct work_struct *);
97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
98extern void zfcp_fc_port_did_lookup(struct work_struct *); 98extern void zfcp_fc_port_did_lookup(struct work_struct *);
99extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
99extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
100extern void zfcp_fc_test_link(struct zfcp_port *); 101extern void zfcp_fc_test_link(struct zfcp_port *);
101extern void zfcp_fc_link_test_work(struct work_struct *); 102extern void zfcp_fc_link_test_work(struct work_struct *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 722f22de8753..df23bcead23d 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -361,6 +361,17 @@ out:
361} 361}
362 362
363/** 363/**
364 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
365 * @port: The zfcp_port to lookup the d_id for.
366 */
367void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
368{
369 zfcp_port_get(port);
370 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
371 zfcp_port_put(port);
372}
373
374/**
364 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload 375 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
365 * @port: zfcp_port structure 376 * @port: zfcp_port structure
366 * @plogi: plogi payload 377 * @plogi: plogi payload
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index f09c863dc6bd..4e41baa0c141 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1058,14 +1058,28 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ, 1059 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals); 1060 sg_resp, max_sbals);
1061 req->qtcb->bottom.support.resp_buf_length = bytes;
1061 if (bytes <= 0) 1062 if (bytes <= 0)
1062 return -EIO; 1063 return -EIO;
1063 1064
1065 return 0;
1066}
1067
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp,
1071 int max_sbals)
1072{
1073 int ret;
1074
1075 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1076 if (ret)
1077 return ret;
1078
1064 /* common settings for ct/gs and els requests */ 1079 /* common settings for ct/gs and els requests */
1065 req->qtcb->bottom.support.resp_buf_length = bytes;
1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1080 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1067 req->qtcb->bottom.support.timeout = 2 * R_A_TOV; 1081 req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
1068 zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); 1082 zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ);
1069 1083
1070 return 0; 1084 return 0;
1071} 1085}
@@ -1094,8 +1108,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1094 } 1108 }
1095 1109
1096 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1110 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1097 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, 1111 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1098 FSF_MAX_SBALS_PER_REQ); 1112 FSF_MAX_SBALS_PER_REQ);
1099 if (ret) 1113 if (ret)
1100 goto failed_send; 1114 goto failed_send;
1101 1115
@@ -1192,7 +1206,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1192 } 1206 }
1193 1207
1194 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1195 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); 1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
1196 1210
1197 if (ret) 1211 if (ret)
1198 goto failed_send; 1212 goto failed_send;
@@ -1461,9 +1475,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1461 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1475 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1462 if (req->qtcb->bottom.support.els1_length >= 1476 if (req->qtcb->bottom.support.els1_length >=
1463 FSF_PLOGI_MIN_LEN) { 1477 FSF_PLOGI_MIN_LEN) {
1464 if (plogi->serv_param.wwpn != port->wwpn) 1478 if (plogi->serv_param.wwpn != port->wwpn) {
1465 port->d_id = 0; 1479 port->d_id = 0;
1466 else { 1480 dev_warn(&port->adapter->ccw_device->dev,
1481 "A port opened with WWPN 0x%016Lx "
1482 "returned data that identifies it as "
1483 "WWPN 0x%016Lx\n",
1484 (unsigned long long) port->wwpn,
1485 (unsigned long long)
1486 plogi->serv_param.wwpn);
1487 } else {
1467 port->wwnn = plogi->serv_param.wwnn; 1488 port->wwnn = plogi->serv_param.wwnn;
1468 zfcp_fc_plogi_evaluate(port, plogi); 1489 zfcp_fc_plogi_evaluate(port, plogi);
1469 } 1490 }
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 079a8cf518a3..d31000886ca8 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -224,6 +224,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
224 224
225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
226 zfcp_erp_wait(unit->port->adapter); 226 zfcp_erp_wait(unit->port->adapter);
227 flush_work(&unit->scsi_work);
227 zfcp_unit_put(unit); 228 zfcp_unit_put(unit);
228out: 229out:
229 mutex_unlock(&zfcp_data.config_mutex); 230 mutex_unlock(&zfcp_data.config_mutex);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 82bb3b2d207a..e11cca4c784c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -366,6 +366,7 @@ config ISCSI_TCP
366 366
367source "drivers/scsi/cxgb3i/Kconfig" 367source "drivers/scsi/cxgb3i/Kconfig"
368source "drivers/scsi/bnx2i/Kconfig" 368source "drivers/scsi/bnx2i/Kconfig"
369source "drivers/scsi/be2iscsi/Kconfig"
369 370
370config SGIWD93_SCSI 371config SGIWD93_SCSI
371 tristate "SGI WD93C93 SCSI Driver" 372 tristate "SGI WD93C93 SCSI Driver"
@@ -1827,6 +1828,16 @@ config SCSI_SRP
1827 To compile this driver as a module, choose M here: the 1828 To compile this driver as a module, choose M here: the
1828 module will be called libsrp. 1829 module will be called libsrp.
1829 1830
1831config SCSI_BFA_FC
1832 tristate "Brocade BFA Fibre Channel Support"
1833 depends on PCI && SCSI
1834 select SCSI_FC_ATTRS
1835 help
1836 This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
1837
1838 To compile this driver as a module, choose M here. The module will
1839 be called bfa.
1840
1830endif # SCSI_LOWLEVEL 1841endif # SCSI_LOWLEVEL
1831 1842
1832source "drivers/scsi/pcmcia/Kconfig" 1843source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 61a94af3cee7..3ad61db5e3fa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 86obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
87obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ 87obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
88obj-$(CONFIG_SCSI_LPFC) += lpfc/ 88obj-$(CONFIG_SCSI_LPFC) += lpfc/
89obj-$(CONFIG_SCSI_BFA_FC) += bfa/
89obj-$(CONFIG_SCSI_PAS16) += pas16.o 90obj-$(CONFIG_SCSI_PAS16) += pas16.o
90obj-$(CONFIG_SCSI_T128) += t128.o 91obj-$(CONFIG_SCSI_T128) += t128.o
91obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 92obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 131obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 132obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 133obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
134obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
133obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 135obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
134 136
135obj-$(CONFIG_ARM) += arm/ 137obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
new file mode 100644
index 000000000000..2952fcd008ea
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -0,0 +1,8 @@
1config BE2ISCSI
2 tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
3 depends on PCI && SCSI
4 select SCSI_ISCSI_ATTRS
5
6 help
7 This driver implements the iSCSI functionality for ServerEngines'
8 10Gbps Storage adapter - BladeEngine 2.
diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile
new file mode 100644
index 000000000000..c11f443e3f83
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
3#
4#
5
6obj-$(CONFIG_BE2ISCSI) += be2iscsi.o
7
8be2iscsi-y := be_iscsi.o be_main.o be_mgmt.o be_cmds.o
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
new file mode 100644
index 000000000000..b36020dcf012
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be.h
@@ -0,0 +1,183 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#ifndef BEISCSI_H
19#define BEISCSI_H
20
21#include <linux/pci.h>
22#include <linux/if_vlan.h>
23
24#define FW_VER_LEN 32
25
26struct be_dma_mem {
27 void *va;
28 dma_addr_t dma;
29 u32 size;
30};
31
32struct be_queue_info {
33 struct be_dma_mem dma_mem;
34 u16 len;
35 u16 entry_size; /* Size of an element in the queue */
36 u16 id;
37 u16 tail, head;
38 bool created;
39 atomic_t used; /* Number of valid elements in the queue */
40};
41
42static inline u32 MODULO(u16 val, u16 limit)
43{
44 WARN_ON(limit & (limit - 1));
45 return val & (limit - 1);
46}
47
48static inline void index_inc(u16 *index, u16 limit)
49{
50 *index = MODULO((*index + 1), limit);
51}
52
53static inline void *queue_head_node(struct be_queue_info *q)
54{
55 return q->dma_mem.va + q->head * q->entry_size;
56}
57
58static inline void *queue_tail_node(struct be_queue_info *q)
59{
60 return q->dma_mem.va + q->tail * q->entry_size;
61}
62
63static inline void queue_head_inc(struct be_queue_info *q)
64{
65 index_inc(&q->head, q->len);
66}
67
68static inline void queue_tail_inc(struct be_queue_info *q)
69{
70 index_inc(&q->tail, q->len);
71}
72
73/*ISCSI */
74
75struct be_eq_obj {
76 struct be_queue_info q;
77 char desc[32];
78
79 /* Adaptive interrupt coalescing (AIC) info */
80 bool enable_aic;
81 u16 min_eqd; /* in usecs */
82 u16 max_eqd; /* in usecs */
83 u16 cur_eqd; /* in usecs */
84};
85
86struct be_mcc_obj {
87 struct be_queue_info *q;
88 struct be_queue_info *cq;
89};
90
91struct be_ctrl_info {
92 u8 __iomem *csr;
93 u8 __iomem *db; /* Door Bell */
94 u8 __iomem *pcicfg; /* PCI config space */
95 struct pci_dev *pdev;
96
97 /* Mbox used for cmd request/response */
98 spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
99 struct be_dma_mem mbox_mem;
100 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
101 * is stored for freeing purpose */
102 struct be_dma_mem mbox_mem_alloced;
103
104 /* MCC Rings */
105 struct be_mcc_obj mcc_obj;
106 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
107 spinlock_t mcc_cq_lock;
108
109 /* MCC Async callback */
110 void (*async_cb) (void *adapter, bool link_up);
111 void *adapter_ctxt;
112};
113
114#include "be_cmds.h"
115
116#define PAGE_SHIFT_4K 12
117#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
118
119/* Returns number of pages spanned by the data starting at the given addr */
120#define PAGES_4K_SPANNED(_address, size) \
121 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
122 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
123
124/* Byte offset into the page corresponding to given address */
125#define OFFSET_IN_PAGE(addr) \
126 ((size_t)(addr) & (PAGE_SIZE_4K-1))
127
128/* Returns bit offset within a DWORD of a bitfield */
129#define AMAP_BIT_OFFSET(_struct, field) \
130 (((size_t)&(((_struct *)0)->field))%32)
131
132/* Returns the bit mask of the field that is NOT shifted into location. */
133static inline u32 amap_mask(u32 bitsize)
134{
135 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
136}
137
138static inline void amap_set(void *ptr, u32 dw_offset, u32 mask,
139 u32 offset, u32 value)
140{
141 u32 *dw = (u32 *) ptr + dw_offset;
142 *dw &= ~(mask << offset);
143 *dw |= (mask & value) << offset;
144}
145
146#define AMAP_SET_BITS(_struct, field, ptr, val) \
147 amap_set(ptr, \
148 offsetof(_struct, field)/32, \
149 amap_mask(sizeof(((_struct *)0)->field)), \
150 AMAP_BIT_OFFSET(_struct, field), \
151 val)
152
153static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
154{
155 u32 *dw = ptr;
156 return mask & (*(dw + dw_offset) >> offset);
157}
158
159#define AMAP_GET_BITS(_struct, field, ptr) \
160 amap_get(ptr, \
161 offsetof(_struct, field)/32, \
162 amap_mask(sizeof(((_struct *)0)->field)), \
163 AMAP_BIT_OFFSET(_struct, field))
164
165#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
166#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
167static inline void swap_dws(void *wrb, int len)
168{
169#ifdef __BIG_ENDIAN
170 u32 *dw = wrb;
171 WARN_ON(len % 4);
172 do {
173 *dw = cpu_to_le32(*dw);
174 dw++;
175 len -= 4;
176 } while (len);
177#endif /* __BIG_ENDIAN */
178}
179
180extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
181 u16 num_popped);
182
183#endif /* BEISCSI_H */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
new file mode 100644
index 000000000000..08007b6e42df
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -0,0 +1,523 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19#include "be_mgmt.h"
20#include "be_main.h"
21
22static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
23{
24 if (compl->flags != 0) {
25 compl->flags = le32_to_cpu(compl->flags);
26 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
27 return true;
28 } else
29 return false;
30}
31
32static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
33{
34 compl->flags = 0;
35}
36
37static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
38 struct be_mcc_compl *compl)
39{
40 u16 compl_status, extd_status;
41
42 be_dws_le_to_cpu(compl, 4);
43
44 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
45 CQE_STATUS_COMPL_MASK;
46 if (compl_status != MCC_STATUS_SUCCESS) {
47 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
48 CQE_STATUS_EXTD_MASK;
49 dev_err(&ctrl->pdev->dev,
50 "error in cmd completion: status(compl/extd)=%d/%d\n",
51 compl_status, extd_status);
52 return -1;
53 }
54 return 0;
55}
56
57static inline bool is_link_state_evt(u32 trailer)
58{
59 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
60 ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE);
61}
62
63void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
64 u16 num_popped)
65{
66 u32 val = 0;
67 val |= qid & DB_CQ_RING_ID_MASK;
68 if (arm)
69 val |= 1 << DB_CQ_REARM_SHIFT;
70 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
71 iowrite32(val, ctrl->db + DB_CQ_OFFSET);
72}
73
74static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
75{
76#define long_delay 2000
77 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
78 int cnt = 0, wait = 5; /* in usecs */
79 u32 ready;
80
81 do {
82 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
83 if (ready)
84 break;
85
86 if (cnt > 6000000) {
87 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
88 return -1;
89 }
90
91 if (cnt > 50) {
92 wait = long_delay;
93 mdelay(long_delay / 1000);
94 } else
95 udelay(wait);
96 cnt += wait;
97 } while (true);
98 return 0;
99}
100
101int be_mbox_notify(struct be_ctrl_info *ctrl)
102{
103 int status;
104 u32 val = 0;
105 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
106 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
107 struct be_mcc_mailbox *mbox = mbox_mem->va;
108 struct be_mcc_compl *compl = &mbox->compl;
109
110 val &= ~MPU_MAILBOX_DB_RDY_MASK;
111 val |= MPU_MAILBOX_DB_HI_MASK;
112 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
113 iowrite32(val, db);
114
115 status = be_mbox_db_ready_wait(ctrl);
116 if (status != 0) {
117 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
118 return status;
119 }
120 val = 0;
121 val &= ~MPU_MAILBOX_DB_RDY_MASK;
122 val &= ~MPU_MAILBOX_DB_HI_MASK;
123 val |= (u32) (mbox_mem->dma >> 4) << 2;
124 iowrite32(val, db);
125
126 status = be_mbox_db_ready_wait(ctrl);
127 if (status != 0) {
128 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
129 return status;
130 }
131 if (be_mcc_compl_is_new(compl)) {
132 status = be_mcc_compl_process(ctrl, &mbox->compl);
133 be_mcc_compl_use(compl);
134 if (status) {
135 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
136 return status;
137 }
138 } else {
139 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
140 return -1;
141 }
142 return 0;
143}
144
145void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
146 bool embedded, u8 sge_cnt)
147{
148 if (embedded)
149 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
150 else
151 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
152 MCC_WRB_SGE_CNT_SHIFT;
153 wrb->payload_length = payload_len;
154 be_dws_cpu_to_le(wrb, 8);
155}
156
157void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
158 u8 subsystem, u8 opcode, int cmd_len)
159{
160 req_hdr->opcode = opcode;
161 req_hdr->subsystem = subsystem;
162 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
163}
164
165static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
166 struct be_dma_mem *mem)
167{
168 int i, buf_pages;
169 u64 dma = (u64) mem->dma;
170
171 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
172 for (i = 0; i < buf_pages; i++) {
173 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
174 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
175 dma += PAGE_SIZE_4K;
176 }
177}
178
179static u32 eq_delay_to_mult(u32 usec_delay)
180{
181#define MAX_INTR_RATE 651042
182 const u32 round = 10;
183 u32 multiplier;
184
185 if (usec_delay == 0)
186 multiplier = 0;
187 else {
188 u32 interrupt_rate = 1000000 / usec_delay;
189 if (interrupt_rate == 0)
190 multiplier = 1023;
191 else {
192 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
193 multiplier /= interrupt_rate;
194 multiplier = (multiplier + round / 2) / round;
195 multiplier = min(multiplier, (u32) 1023);
196 }
197 }
198 return multiplier;
199}
200
201struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
202{
203 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
204}
205
206int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
207 struct be_queue_info *eq, int eq_delay)
208{
209 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
210 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
211 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
212 struct be_dma_mem *q_mem = &eq->dma_mem;
213 int status;
214
215 spin_lock(&ctrl->mbox_lock);
216 memset(wrb, 0, sizeof(*wrb));
217
218 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
219
220 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
221 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
222
223 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
224
225 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
226 PCI_FUNC(ctrl->pdev->devfn));
227 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
228 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
229 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
230 __ilog2_u32(eq->len / 256));
231 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
232 eq_delay_to_mult(eq_delay));
233 be_dws_cpu_to_le(req->context, sizeof(req->context));
234
235 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
236
237 status = be_mbox_notify(ctrl);
238 if (!status) {
239 eq->id = le16_to_cpu(resp->eq_id);
240 eq->created = true;
241 }
242 spin_unlock(&ctrl->mbox_lock);
243 return status;
244}
245
246int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
247{
248 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
249 int status;
250 u8 *endian_check;
251
252 spin_lock(&ctrl->mbox_lock);
253 memset(wrb, 0, sizeof(*wrb));
254
255 endian_check = (u8 *) wrb;
256 *endian_check++ = 0xFF;
257 *endian_check++ = 0x12;
258 *endian_check++ = 0x34;
259 *endian_check++ = 0xFF;
260 *endian_check++ = 0xFF;
261 *endian_check++ = 0x56;
262 *endian_check++ = 0x78;
263 *endian_check++ = 0xFF;
264 be_dws_cpu_to_le(wrb, sizeof(*wrb));
265
266 status = be_mbox_notify(ctrl);
267 if (status)
268 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
269
270 spin_unlock(&ctrl->mbox_lock);
271 return status;
272}
273
274int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
275 struct be_queue_info *cq, struct be_queue_info *eq,
276 bool sol_evts, bool no_delay, int coalesce_wm)
277{
278 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
279 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
280 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
281 struct be_dma_mem *q_mem = &cq->dma_mem;
282 void *ctxt = &req->context;
283 int status;
284
285 spin_lock(&ctrl->mbox_lock);
286 memset(wrb, 0, sizeof(*wrb));
287
288 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
289
290 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
291 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
292
293 if (!q_mem->va)
294 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
295
296 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
297
298 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
299 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
300 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
301 __ilog2_u32(cq->len / 256));
302 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
303 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
304 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
305 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
306 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
307 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
308 PCI_FUNC(ctrl->pdev->devfn));
309 be_dws_cpu_to_le(ctxt, sizeof(req->context));
310
311 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
312
313 status = be_mbox_notify(ctrl);
314 if (!status) {
315 cq->id = le16_to_cpu(resp->cq_id);
316 cq->created = true;
317 } else
318 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
319 status);
320 spin_unlock(&ctrl->mbox_lock);
321
322 return status;
323}
324
325static u32 be_encoded_q_len(int q_len)
326{
327 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
328 if (len_encoded == 16)
329 len_encoded = 0;
330 return len_encoded;
331}
332int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
333 int queue_type)
334{
335 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
336 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
337 u8 subsys = 0, opcode = 0;
338 int status;
339
340 spin_lock(&ctrl->mbox_lock);
341 memset(wrb, 0, sizeof(*wrb));
342 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
343
344 switch (queue_type) {
345 case QTYPE_EQ:
346 subsys = CMD_SUBSYSTEM_COMMON;
347 opcode = OPCODE_COMMON_EQ_DESTROY;
348 break;
349 case QTYPE_CQ:
350 subsys = CMD_SUBSYSTEM_COMMON;
351 opcode = OPCODE_COMMON_CQ_DESTROY;
352 break;
353 case QTYPE_WRBQ:
354 subsys = CMD_SUBSYSTEM_ISCSI;
355 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
356 break;
357 case QTYPE_DPDUQ:
358 subsys = CMD_SUBSYSTEM_ISCSI;
359 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
360 break;
361 case QTYPE_SGL:
362 subsys = CMD_SUBSYSTEM_ISCSI;
363 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
364 break;
365 default:
366 spin_unlock(&ctrl->mbox_lock);
367 BUG();
368 return -1;
369 }
370 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
371 if (queue_type != QTYPE_SGL)
372 req->id = cpu_to_le16(q->id);
373
374 status = be_mbox_notify(ctrl);
375
376 spin_unlock(&ctrl->mbox_lock);
377 return status;
378}
379
380int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
381{
382 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
383 struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
384 int status;
385
386 spin_lock(&ctrl->mbox_lock);
387 memset(wrb, 0, sizeof(*wrb));
388 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
389 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
390 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
391 sizeof(*req));
392
393 status = be_mbox_notify(ctrl);
394 if (!status) {
395 struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
396
397 memcpy(mac_addr, resp->mac_address, ETH_ALEN);
398 }
399
400 spin_unlock(&ctrl->mbox_lock);
401 return status;
402}
403
404int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
405 struct be_queue_info *cq,
406 struct be_queue_info *dq, int length,
407 int entry_size)
408{
409 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
410 struct be_defq_create_req *req = embedded_payload(wrb);
411 struct be_dma_mem *q_mem = &dq->dma_mem;
412 void *ctxt = &req->context;
413 int status;
414
415 spin_lock(&ctrl->mbox_lock);
416 memset(wrb, 0, sizeof(*wrb));
417
418 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
419
420 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
421 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
422
423 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
424 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
425 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
426 1);
427 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
428 PCI_FUNC(ctrl->pdev->devfn));
429 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
430 be_encoded_q_len(length / sizeof(struct phys_addr)));
431 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
432 ctxt, entry_size);
433 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
434 cq->id);
435
436 be_dws_cpu_to_le(ctxt, sizeof(req->context));
437
438 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
439
440 status = be_mbox_notify(ctrl);
441 if (!status) {
442 struct be_defq_create_resp *resp = embedded_payload(wrb);
443
444 dq->id = le16_to_cpu(resp->id);
445 dq->created = true;
446 }
447 spin_unlock(&ctrl->mbox_lock);
448
449 return status;
450}
451
452int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
453 struct be_queue_info *wrbq)
454{
455 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
456 struct be_wrbq_create_req *req = embedded_payload(wrb);
457 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
458 int status;
459
460 spin_lock(&ctrl->mbox_lock);
461 memset(wrb, 0, sizeof(*wrb));
462
463 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
464
465 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
466 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
467 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
468 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
469
470 status = be_mbox_notify(ctrl);
471 if (!status)
472 wrbq->id = le16_to_cpu(resp->cid);
473 spin_unlock(&ctrl->mbox_lock);
474 return status;
475}
476
477int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
478 struct be_dma_mem *q_mem,
479 u32 page_offset, u32 num_pages)
480{
481 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
482 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
483 int status;
484 unsigned int curr_pages;
485 u32 internal_page_offset = 0;
486 u32 temp_num_pages = num_pages;
487
488 if (num_pages == 0xff)
489 num_pages = 1;
490
491 spin_lock(&ctrl->mbox_lock);
492 do {
493 memset(wrb, 0, sizeof(*wrb));
494 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
495 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
496 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
497 sizeof(*req));
498 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
499 pages);
500 req->num_pages = min(num_pages, curr_pages);
501 req->page_offset = page_offset;
502 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
503 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
504 internal_page_offset += req->num_pages;
505 page_offset += req->num_pages;
506 num_pages -= req->num_pages;
507
508 if (temp_num_pages == 0xff)
509 req->num_pages = temp_num_pages;
510
511 status = be_mbox_notify(ctrl);
512 if (status) {
513 SE_DEBUG(DBG_LVL_1,
514 "FW CMD to map iscsi frags failed.\n");
515 goto error;
516 }
517 } while (num_pages > 0);
518error:
519 spin_unlock(&ctrl->mbox_lock);
520 if (status != 0)
521 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
522 return status;
523}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
new file mode 100644
index 000000000000..c20d686cbb43
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -0,0 +1,877 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#ifndef BEISCSI_CMDS_H
19#define BEISCSI_CMDS_H
20
21/**
22 * The driver sends configuration and managements command requests to the
23 * firmware in the BE. These requests are communicated to the processor
24 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
25 * WRB inside a MAILBOX.
26 * The commands are serviced by the ARM processor in the BladeEngine's MPU.
27 */
28struct be_sge {
29 u32 pa_lo;
30 u32 pa_hi;
31 u32 len;
32};
33
34#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
35#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
36struct be_mcc_wrb {
37 u32 embedded; /* dword 0 */
38 u32 payload_length; /* dword 1 */
39 u32 tag0; /* dword 2 */
40 u32 tag1; /* dword 3 */
41 u32 rsvd; /* dword 4 */
42 union {
43 u8 embedded_payload[236]; /* used by embedded cmds */
44 struct be_sge sgl[19]; /* used by non-embedded cmds */
45 } payload;
46};
47
48#define CQE_FLAGS_VALID_MASK (1 << 31)
49#define CQE_FLAGS_ASYNC_MASK (1 << 30)
50
51/* Completion Status */
52#define MCC_STATUS_SUCCESS 0x0
53
54#define CQE_STATUS_COMPL_MASK 0xFFFF
55#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
56#define CQE_STATUS_EXTD_MASK 0xFFFF
57#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */
58
59struct be_mcc_compl {
60 u32 status; /* dword 0 */
61 u32 tag0; /* dword 1 */
62 u32 tag1; /* dword 2 */
63 u32 flags; /* dword 3 */
64};
65
66/********* Mailbox door bell *************/
67/**
68 * Used for driver communication with the FW.
69 * The software must write this register twice to post any command. First,
70 * it writes the register with hi=1 and the upper bits of the physical address
71 * for the MAILBOX structure. Software must poll the ready bit until this
72 * is acknowledged. Then, sotware writes the register with hi=0 with the lower
73 * bits in the address. It must poll the ready bit until the command is
74 * complete. Upon completion, the MAILBOX will contain a valid completion
75 * queue entry.
76 */
77#define MPU_MAILBOX_DB_OFFSET 0x160
78#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
79#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
80
81/********** MPU semphore ******************/
82#define MPU_EP_SEMAPHORE_OFFSET 0xac
83#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
84#define EP_SEMAPHORE_POST_ERR_MASK 0x1
85#define EP_SEMAPHORE_POST_ERR_SHIFT 31
86
87/********** MCC door bell ************/
88#define DB_MCCQ_OFFSET 0x140
89#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
90/* Number of entries posted */
91#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
92
93/* MPU semphore POST stage values */
94#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
95
96/**
97 * When the async bit of mcc_compl is set, the last 4 bytes of
98 * mcc_compl is interpreted as follows:
99 */
100#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
101#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
102#define ASYNC_EVENT_CODE_LINK_STATE 0x1
103struct be_async_event_trailer {
104 u32 code;
105};
106
107enum {
108 ASYNC_EVENT_LINK_DOWN = 0x0,
109 ASYNC_EVENT_LINK_UP = 0x1
110};
111
112/**
113 * When the event code of an async trailer is link-state, the mcc_compl
114 * must be interpreted as follows
115 */
116struct be_async_event_link_state {
117 u8 physical_port;
118 u8 port_link_status;
119 u8 port_duplex;
120 u8 port_speed;
121 u8 port_fault;
122 u8 rsvd0[7];
123 struct be_async_event_trailer trailer;
124} __packed;
125
126struct be_mcc_mailbox {
127 struct be_mcc_wrb wrb;
128 struct be_mcc_compl compl;
129};
130
131/* Type of subsystems supported by FW */
132#define CMD_SUBSYSTEM_COMMON 0x1
133#define CMD_SUBSYSTEM_ISCSI 0x2
134#define CMD_SUBSYSTEM_ETH 0x3
135#define CMD_SUBSYSTEM_ISCSI_INI 0x6
136#define CMD_COMMON_TCP_UPLOAD 0x1
137
138/**
139 * List of common opcodes subsystem CMD_SUBSYSTEM_COMMON
140 * These opcodes are unique for each subsystem defined above
141 */
142#define OPCODE_COMMON_CQ_CREATE 12
143#define OPCODE_COMMON_EQ_CREATE 13
144#define OPCODE_COMMON_MCC_CREATE 21
145#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
146#define OPCODE_COMMON_GET_FW_VERSION 35
147#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
148#define OPCODE_COMMON_FIRMWARE_CONFIG 42
149#define OPCODE_COMMON_MCC_DESTROY 53
150#define OPCODE_COMMON_CQ_DESTROY 54
151#define OPCODE_COMMON_EQ_DESTROY 55
152#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
153#define OPCODE_COMMON_FUNCTION_RESET 61
154
155/**
156 * LIST of opcodes that are common between Initiator and Target
157 * used by CMD_SUBSYSTEM_ISCSI
158 * These opcodes are unique for each subsystem defined above
159 */
160#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
161#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
162#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
163#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
164#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
165#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
166#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
167#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
168
169struct be_cmd_req_hdr {
170 u8 opcode; /* dword 0 */
171 u8 subsystem; /* dword 0 */
172 u8 port_number; /* dword 0 */
173 u8 domain; /* dword 0 */
174 u32 timeout; /* dword 1 */
175 u32 request_length; /* dword 2 */
176 u32 rsvd; /* dword 3 */
177};
178
179struct be_cmd_resp_hdr {
180 u32 info; /* dword 0 */
181 u32 status; /* dword 1 */
182 u32 response_length; /* dword 2 */
183 u32 actual_resp_len; /* dword 3 */
184};
185
186struct phys_addr {
187 u32 lo;
188 u32 hi;
189};
190
191/**************************
192 * BE Command definitions *
193 **************************/
194
195/**
196 * Pseudo amap definition in which each bit of the actual structure is defined
197 * as a byte - used to calculate offset/shift/mask of each field
198 */
199struct amap_eq_context {
200 u8 cidx[13]; /* dword 0 */
201 u8 rsvd0[3]; /* dword 0 */
202 u8 epidx[13]; /* dword 0 */
203 u8 valid; /* dword 0 */
204 u8 rsvd1; /* dword 0 */
205 u8 size; /* dword 0 */
206 u8 pidx[13]; /* dword 1 */
207 u8 rsvd2[3]; /* dword 1 */
208 u8 pd[10]; /* dword 1 */
209 u8 count[3]; /* dword 1 */
210 u8 solevent; /* dword 1 */
211 u8 stalled; /* dword 1 */
212 u8 armed; /* dword 1 */
213 u8 rsvd3[4]; /* dword 2 */
214 u8 func[8]; /* dword 2 */
215 u8 rsvd4; /* dword 2 */
216 u8 delaymult[10]; /* dword 2 */
217 u8 rsvd5[2]; /* dword 2 */
218 u8 phase[2]; /* dword 2 */
219 u8 nodelay; /* dword 2 */
220 u8 rsvd6[4]; /* dword 2 */
221 u8 rsvd7[32]; /* dword 3 */
222} __packed;
223
224struct be_cmd_req_eq_create {
225 struct be_cmd_req_hdr hdr; /* dw[4] */
226 u16 num_pages; /* sword */
227 u16 rsvd0; /* sword */
228 u8 context[sizeof(struct amap_eq_context) / 8]; /* dw[4] */
229 struct phys_addr pages[8];
230} __packed;
231
232struct be_cmd_resp_eq_create {
233 struct be_cmd_resp_hdr resp_hdr;
234 u16 eq_id; /* sword */
235 u16 rsvd0; /* sword */
236} __packed;
237
238struct mac_addr {
239 u16 size_of_struct;
240 u8 addr[ETH_ALEN];
241} __packed;
242
243struct be_cmd_req_mac_query {
244 struct be_cmd_req_hdr hdr;
245 u8 type;
246 u8 permanent;
247 u16 if_id;
248} __packed;
249
250struct be_cmd_resp_mac_query {
251 struct be_cmd_resp_hdr hdr;
252 struct mac_addr mac;
253};
254
255/******************** Create CQ ***************************/
256/**
257 * Pseudo amap definition in which each bit of the actual structure is defined
258 * as a byte - used to calculate offset/shift/mask of each field
259 */
260struct amap_cq_context {
261 u8 cidx[11]; /* dword 0 */
262 u8 rsvd0; /* dword 0 */
263 u8 coalescwm[2]; /* dword 0 */
264 u8 nodelay; /* dword 0 */
265 u8 epidx[11]; /* dword 0 */
266 u8 rsvd1; /* dword 0 */
267 u8 count[2]; /* dword 0 */
268 u8 valid; /* dword 0 */
269 u8 solevent; /* dword 0 */
270 u8 eventable; /* dword 0 */
271 u8 pidx[11]; /* dword 1 */
272 u8 rsvd2; /* dword 1 */
273 u8 pd[10]; /* dword 1 */
274 u8 eqid[8]; /* dword 1 */
275 u8 stalled; /* dword 1 */
276 u8 armed; /* dword 1 */
277 u8 rsvd3[4]; /* dword 2 */
278 u8 func[8]; /* dword 2 */
279 u8 rsvd4[20]; /* dword 2 */
280 u8 rsvd5[32]; /* dword 3 */
281} __packed;
282
283struct be_cmd_req_cq_create {
284 struct be_cmd_req_hdr hdr;
285 u16 num_pages;
286 u16 rsvd0;
287 u8 context[sizeof(struct amap_cq_context) / 8];
288 struct phys_addr pages[4];
289} __packed;
290
291struct be_cmd_resp_cq_create {
292 struct be_cmd_resp_hdr hdr;
293 u16 cq_id;
294 u16 rsvd0;
295} __packed;
296
297/******************** Create MCCQ ***************************/
298/**
299 * Pseudo amap definition in which each bit of the actual structure is defined
300 * as a byte - used to calculate offset/shift/mask of each field
301 */
302struct amap_mcc_context {
303 u8 con_index[14];
304 u8 rsvd0[2];
305 u8 ring_size[4];
306 u8 fetch_wrb;
307 u8 fetch_r2t;
308 u8 cq_id[10];
309 u8 prod_index[14];
310 u8 fid[8];
311 u8 pdid[9];
312 u8 valid;
313 u8 rsvd1[32];
314 u8 rsvd2[32];
315} __packed;
316
317struct be_cmd_req_mcc_create {
318 struct be_cmd_req_hdr hdr;
319 u16 num_pages;
320 u16 rsvd0;
321 u8 context[sizeof(struct amap_mcc_context) / 8];
322 struct phys_addr pages[8];
323} __packed;
324
325struct be_cmd_resp_mcc_create {
326 struct be_cmd_resp_hdr hdr;
327 u16 id;
328 u16 rsvd0;
329} __packed;
330
331/******************** Q Destroy ***************************/
332/* Type of Queue to be destroyed */
333enum {
334 QTYPE_EQ = 1,
335 QTYPE_CQ,
336 QTYPE_MCCQ,
337 QTYPE_WRBQ,
338 QTYPE_DPDUQ,
339 QTYPE_SGL
340};
341
342struct be_cmd_req_q_destroy {
343 struct be_cmd_req_hdr hdr;
344 u16 id;
345 u16 bypass_flush; /* valid only for rx q destroy */
346} __packed;
347
348struct macaddr {
349 u8 byte[ETH_ALEN];
350};
351
352struct be_cmd_req_mcast_mac_config {
353 struct be_cmd_req_hdr hdr;
354 u16 num_mac;
355 u8 promiscuous;
356 u8 interface_id;
357 struct macaddr mac[32];
358} __packed;
359
360static inline void *embedded_payload(struct be_mcc_wrb *wrb)
361{
362 return wrb->payload.embedded_payload;
363}
364
365static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
366{
367 return &wrb->payload.sgl[0];
368}
369
370/******************** Modify EQ Delay *******************/
371struct be_cmd_req_modify_eq_delay {
372 struct be_cmd_req_hdr hdr;
373 u32 num_eq;
374 struct {
375 u32 eq_id;
376 u32 phase;
377 u32 delay_multiplier;
378 } delay[8];
379} __packed;
380
381/******************** Get MAC ADDR *******************/
382
383#define ETH_ALEN 6
384
385
386struct be_cmd_req_get_mac_addr {
387 struct be_cmd_req_hdr hdr;
388 u32 nic_port_count;
389 u32 speed;
390 u32 max_speed;
391 u32 link_state;
392 u32 max_frame_size;
393 u16 size_of_structure;
394 u8 mac_address[ETH_ALEN];
395 u32 rsvd[23];
396};
397
398struct be_cmd_resp_get_mac_addr {
399 struct be_cmd_resp_hdr hdr;
400 u32 nic_port_count;
401 u32 speed;
402 u32 max_speed;
403 u32 link_state;
404 u32 max_frame_size;
405 u16 size_of_structure;
406 u8 mac_address[6];
407 u32 rsvd[23];
408};
409
410int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
411 struct be_queue_info *eq, int eq_delay);
412
413int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
414 struct be_queue_info *cq, struct be_queue_info *eq,
415 bool sol_evts, bool no_delay,
416 int num_cqe_dma_coalesce);
417
418int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
419 int type);
420int be_poll_mcc(struct be_ctrl_info *ctrl);
421unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl);
422int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr);
423
424/*ISCSI Functuions */
425int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
426
427struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
428
429int be_mbox_notify(struct be_ctrl_info *ctrl);
430
431int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
432 struct be_queue_info *cq,
433 struct be_queue_info *dq, int length,
434 int entry_size);
435
436int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
437 struct be_dma_mem *q_mem, u32 page_offset,
438 u32 num_pages);
439
440int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
441 struct be_queue_info *wrbq);
442
443struct be_default_pdu_context {
444 u32 dw[4];
445} __packed;
446
447struct amap_be_default_pdu_context {
448 u8 dbuf_cindex[13]; /* dword 0 */
449 u8 rsvd0[3]; /* dword 0 */
450 u8 ring_size[4]; /* dword 0 */
451 u8 ring_state[4]; /* dword 0 */
452 u8 rsvd1[8]; /* dword 0 */
453 u8 dbuf_pindex[13]; /* dword 1 */
454 u8 rsvd2; /* dword 1 */
455 u8 pci_func_id[8]; /* dword 1 */
456 u8 rx_pdid[9]; /* dword 1 */
457 u8 rx_pdid_valid; /* dword 1 */
458 u8 default_buffer_size[16]; /* dword 2 */
459 u8 cq_id_recv[10]; /* dword 2 */
460 u8 rx_pdid_not_valid; /* dword 2 */
461 u8 rsvd3[5]; /* dword 2 */
462 u8 rsvd4[32]; /* dword 3 */
463} __packed;
464
465struct be_defq_create_req {
466 struct be_cmd_req_hdr hdr;
467 u16 num_pages;
468 u8 ulp_num;
469 u8 rsvd0;
470 struct be_default_pdu_context context;
471 struct phys_addr pages[8];
472} __packed;
473
474struct be_defq_create_resp {
475 struct be_cmd_req_hdr hdr;
476 u16 id;
477 u16 rsvd0;
478} __packed;
479
480struct be_post_sgl_pages_req {
481 struct be_cmd_req_hdr hdr;
482 u16 num_pages;
483 u16 page_offset;
484 u32 rsvd0;
485 struct phys_addr pages[26];
486 u32 rsvd1;
487} __packed;
488
489struct be_wrbq_create_req {
490 struct be_cmd_req_hdr hdr;
491 u16 num_pages;
492 u8 ulp_num;
493 u8 rsvd0;
494 struct phys_addr pages[8];
495} __packed;
496
497struct be_wrbq_create_resp {
498 struct be_cmd_resp_hdr resp_hdr;
499 u16 cid;
500 u16 rsvd0;
501} __packed;
502
503#define SOL_CID_MASK 0x0000FFC0
504#define SOL_CODE_MASK 0x0000003F
505#define SOL_WRB_INDEX_MASK 0x00FF0000
506#define SOL_CMD_WND_MASK 0xFF000000
507#define SOL_RES_CNT_MASK 0x7FFFFFFF
508#define SOL_EXP_CMD_SN_MASK 0xFFFFFFFF
509#define SOL_HW_STS_MASK 0x000000FF
510#define SOL_STS_MASK 0x0000FF00
511#define SOL_RESP_MASK 0x00FF0000
512#define SOL_FLAGS_MASK 0x7F000000
513#define SOL_S_MASK 0x80000000
514
515struct sol_cqe {
516 u32 dw[4];
517};
518
519struct amap_sol_cqe {
520 u8 hw_sts[8]; /* dword 0 */
521 u8 i_sts[8]; /* dword 0 */
522 u8 i_resp[8]; /* dword 0 */
523 u8 i_flags[7]; /* dword 0 */
524 u8 s; /* dword 0 */
525 u8 i_exp_cmd_sn[32]; /* dword 1 */
526 u8 code[6]; /* dword 2 */
527 u8 cid[10]; /* dword 2 */
528 u8 wrb_index[8]; /* dword 2 */
529 u8 i_cmd_wnd[8]; /* dword 2 */
530 u8 i_res_cnt[31]; /* dword 3 */
531 u8 valid; /* dword 3 */
532} __packed;
533
534
535/**
536 * Post WRB Queue Doorbell Register used by the host Storage
537 * stack to notify the
538 * controller of a posted Work Request Block
539 */
540#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */
541#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
542
543#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
544#define DB_DEF_PDU_NUM_POSTED_SHIFT 24
545
546struct fragnum_bits_for_sgl_cra_in {
547 struct be_cmd_req_hdr hdr;
548 u32 num_bits;
549} __packed;
550
551struct iscsi_cleanup_req {
552 struct be_cmd_req_hdr hdr;
553 u16 chute;
554 u8 hdr_ring_id;
555 u8 data_ring_id;
556
557} __packed;
558
559struct eq_delay {
560 u32 eq_id;
561 u32 phase;
562 u32 delay_multiplier;
563} __packed;
564
565struct be_eq_delay_params_in {
566 struct be_cmd_req_hdr hdr;
567 u32 num_eq;
568 struct eq_delay delay[8];
569} __packed;
570
571struct ip_address_format {
572 u16 size_of_structure;
573 u8 reserved;
574 u8 ip_type;
575 u8 ip_address[16];
576 u32 rsvd0;
577} __packed;
578
579struct tcp_connect_and_offload_in {
580 struct be_cmd_req_hdr hdr;
581 struct ip_address_format ip_address;
582 u16 tcp_port;
583 u16 cid;
584 u16 cq_id;
585 u16 defq_id;
586 struct phys_addr dataout_template_pa;
587 u16 hdr_ring_id;
588 u16 data_ring_id;
589 u8 do_offload;
590 u8 rsvd0[3];
591} __packed;
592
593struct tcp_connect_and_offload_out {
594 struct be_cmd_resp_hdr hdr;
595 u32 connection_handle;
596 u16 cid;
597 u16 rsvd0;
598
599} __packed;
600
601struct be_mcc_wrb_context {
602 struct MCC_WRB *wrb;
603 int *users_final_status;
604} __packed;
605
606#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */
607#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */
608#define DB_DEF_PDU_REARM_SHIFT 14
609#define DB_DEF_PDU_EVENT_SHIFT 15
610#define DB_DEF_PDU_CQPROC_SHIFT 16
611
612struct dmsg_cqe {
613 u32 dw[4];
614} __packed;
615
616struct tcp_upload_params_in {
617 struct be_cmd_req_hdr hdr;
618 u16 id;
619 u16 upload_type;
620 u32 reset_seq;
621} __packed;
622
623struct tcp_upload_params_out {
624 u32 dw[32];
625} __packed;
626
627union tcp_upload_params {
628 struct tcp_upload_params_in request;
629 struct tcp_upload_params_out response;
630} __packed;
631
632struct be_ulp_fw_cfg {
633 u32 ulp_mode;
634 u32 etx_base;
635 u32 etx_count;
636 u32 sq_base;
637 u32 sq_count;
638 u32 rq_base;
639 u32 rq_count;
640 u32 dq_base;
641 u32 dq_count;
642 u32 lro_base;
643 u32 lro_count;
644 u32 icd_base;
645 u32 icd_count;
646};
647
648struct be_fw_cfg {
649 struct be_cmd_req_hdr hdr;
650 u32 be_config_number;
651 u32 asic_revision;
652 u32 phys_port;
653 u32 function_mode;
654 struct be_ulp_fw_cfg ulp[2];
655 u32 function_caps;
656} __packed;
657
658#define CMD_ISCSI_COMMAND_INVALIDATE 1
659#define ISCSI_OPCODE_SCSI_DATA_OUT 5
660#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
661#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
662#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
663#define OPCODE_COMMON_ISCSI_CLEANUP 59
664#define OPCODE_COMMON_TCP_UPLOAD 56
665#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
666/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
667#define CMD_ISCSI_CONNECTION_INVALIDATE 1
668#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2
669#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
670
671#define INI_WR_CMD 1 /* Initiator write command */
672#define INI_TMF_CMD 2 /* Initiator TMF command */
673#define INI_NOPOUT_CMD 3 /* Initiator; Send a NOP-OUT */
674#define INI_RD_CMD 5 /* Initiator requesting to send
675 * a read command
676 */
677#define TGT_CTX_UPDT_CMD 7 /* Target context update */
678#define TGT_STS_CMD 8 /* Target R2T and other BHS
679 * where only the status number
680 * need to be updated
681 */
682#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
683 * to read command
684 */
685#define TGT_SOS_PDU 10 /* Target:standalone status
686 * response
687 */
688#define TGT_DM_CMD 11 /* Indicates that the bhs
689 * preparedby
690 * driver should not be touched
691 */
692/* --- CMD_CHUTE_TYPE --- */
693#define CMD_CONNECTION_CHUTE_0 1
694#define CMD_CONNECTION_CHUTE_1 2
695#define CMD_CONNECTION_CHUTE_2 3
696
697#define EQ_MAJOR_CODE_COMPLETION 0
698
699#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0
700#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1
701
702/* --- CONNECTION_UPLOAD_PARAMS --- */
703/* These parameters are used to define the type of upload desired. */
704#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */
705#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with
706 * reset
707 */
708#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without
709 * reset
710 */
711#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
712 * sequence number by driver */
713
714/* Returns byte size of given field with a structure. */
715
716/* Returns the number of items in the field array. */
717#define BE_NUMBER_OF_FIELD(_type_, _field_) \
718 (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
719
720/**
721 * Different types of iSCSI completions to host driver for both initiator
722 * and taget mode
723 * of operation.
724 */
725#define SOL_CMD_COMPLETE 1 /* Solicited command completed
726 * normally
727 */
728#define SOL_CMD_KILLED_DATA_DIGEST_ERR 2 /* Solicited command got
729 * invalidated internally due
730 * to Data Digest error
731 */
732#define CXN_KILLED_PDU_SIZE_EXCEEDS_DSL 3 /* Connection got invalidated
733 * internally
734 * due to a recieved PDU
735 * size > DSL
736 */
737#define CXN_KILLED_BURST_LEN_MISMATCH 4 /* Connection got invalidated
738 * internally due ti received
739 * PDU sequence size >
740 * FBL/MBL.
741 */
742#define CXN_KILLED_AHS_RCVD 5 /* Connection got invalidated
743 * internally due to a recieved
744 * PDU Hdr that has
745 * AHS */
746#define CXN_KILLED_HDR_DIGEST_ERR 6 /* Connection got invalidated
747 * internally due to Hdr Digest
748 * error
749 */
750#define CXN_KILLED_UNKNOWN_HDR 7 /* Connection got invalidated
751 * internally
752 * due to a bad opcode in the
753 * pdu hdr
754 */
755#define CXN_KILLED_STALE_ITT_TTT_RCVD 8 /* Connection got invalidated
756 * internally due to a recieved
757 * ITT/TTT that does not belong
758 * to this Connection
759 */
760#define CXN_KILLED_INVALID_ITT_TTT_RCVD 9 /* Connection got invalidated
761 * internally due to recieved
762 * ITT/TTT value > Max
763 * Supported ITTs/TTTs
764 */
765#define CXN_KILLED_RST_RCVD 10 /* Connection got invalidated
766 * internally due to an
767 * incoming TCP RST
768 */
769#define CXN_KILLED_TIMED_OUT 11 /* Connection got invalidated
770 * internally due to timeout on
771 * tcp segment 12 retransmit
772 * attempts failed
773 */
774#define CXN_KILLED_RST_SENT 12 /* Connection got invalidated
775 * internally due to TCP RST
776 * sent by the Tx side
777 */
778#define CXN_KILLED_FIN_RCVD 13 /* Connection got invalidated
779 * internally due to an
780 * incoming TCP FIN.
781 */
782#define CXN_KILLED_BAD_UNSOL_PDU_RCVD 14 /* Connection got invalidated
783 * internally due to bad
784 * unsolicited PDU Unsolicited
785 * PDUs are PDUs with
786 * ITT=0xffffffff
787 */
788#define CXN_KILLED_BAD_WRB_INDEX_ERROR 15 /* Connection got invalidated
789 * internally due to bad WRB
790 * index.
791 */
792#define CXN_KILLED_OVER_RUN_RESIDUAL 16 /* Command got invalidated
793 * internally due to recived
794 * command has residual
795 * over run bytes.
796 */
797#define CXN_KILLED_UNDER_RUN_RESIDUAL 17 /* Command got invalidated
798 * internally due to recived
799 * command has residual under
800 * run bytes.
801 */
802#define CMD_KILLED_INVALID_STATSN_RCVD 18 /* Command got invalidated
803 * internally due to a recieved
804 * PDU has an invalid StatusSN
805 */
806#define CMD_KILLED_INVALID_R2T_RCVD 19 /* Command got invalidated
807 * internally due to a recieved
808 * an R2T with some invalid
809 * fields in it
810 */
811#define CMD_CXN_KILLED_LUN_INVALID 20 /* Command got invalidated
812 * internally due to received
813 * PDU has an invalid LUN.
814 */
815#define CMD_CXN_KILLED_ICD_INVALID 21 /* Command got invalidated
816 * internally due to the
817 * corresponding ICD not in a
818 * valid state
819 */
820#define CMD_CXN_KILLED_ITT_INVALID 22 /* Command got invalidated due
821 * to received PDU has an
822 * invalid ITT.
823 */
824#define CMD_CXN_KILLED_SEQ_OUTOFORDER 23 /* Command got invalidated due
825 * to received sequence buffer
826 * offset is out of order.
827 */
828#define CMD_CXN_KILLED_INVALID_DATASN_RCVD 24 /* Command got invalidated
829 * internally due to a
830 * recieved PDU has an invalid
831 * DataSN
832 */
833#define CXN_INVALIDATE_NOTIFY 25 /* Connection invalidation
834 * completion notify.
835 */
836#define CXN_INVALIDATE_INDEX_NOTIFY 26 /* Connection invalidation
837 * completion
838 * with data PDU index.
839 */
840#define CMD_INVALIDATED_NOTIFY 27 /* Command invalidation
841 * completionnotifify.
842 */
843#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
844#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
845#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
846 * error notify.
847 */
848#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
849 * notification.
850 */
851#define CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN 32 /* Connection got invalidated
852 * internally due to command
853 * and data are not on same
854 * connection.
855 */
856#define SOL_CMD_KILLED_DIF_ERR 33 /* Solicited command got
857 * invalidated internally due
858 * to DIF error
859 */
860#define CXN_KILLED_SYN_RCVD 34 /* Connection got invalidated
861 * internally due to incoming
862 * TCP SYN
863 */
864#define CXN_KILLED_IMM_DATA_RCVD 35 /* Connection got invalidated
865 * internally due to an
866 * incoming Unsolicited PDU
867 * that has immediate data on
868 * the cxn
869 */
870
871void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
872 bool embedded, u8 sge_cnt);
873
874void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
875 u8 subsystem, u8 opcode, int cmd_len);
876
877#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
new file mode 100644
index 000000000000..2fd25442cfaf
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -0,0 +1,638 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20
21#include <scsi/libiscsi.h>
22#include <scsi/scsi_transport_iscsi.h>
23#include <scsi/scsi_transport.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi.h>
28
29#include "be_iscsi.h"
30
31extern struct iscsi_transport beiscsi_iscsi_transport;
32
33/**
34 * beiscsi_session_create - creates a new iscsi session
35 * @cmds_max: max commands supported
36 * @qdepth: max queue depth supported
37 * @initial_cmdsn: initial iscsi CMDSN
38 */
39struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
40 u16 cmds_max,
41 u16 qdepth,
42 u32 initial_cmdsn)
43{
44 struct Scsi_Host *shost;
45 struct beiscsi_endpoint *beiscsi_ep;
46 struct iscsi_cls_session *cls_session;
47 struct beiscsi_hba *phba;
48 struct iscsi_session *sess;
49 struct beiscsi_session *beiscsi_sess;
50 struct beiscsi_io_task *io_task;
51
52 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
53
54 if (!ep) {
55 SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep \n");
56 return NULL;
57 }
58 beiscsi_ep = ep->dd_data;
59 phba = beiscsi_ep->phba;
60 shost = phba->shost;
61 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
62 shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
63 "Max cmds per session supported is %d. Using %d. "
64 "\n", cmds_max,
65 beiscsi_ep->phba->params.wrbs_per_cxn,
66 beiscsi_ep->phba->params.wrbs_per_cxn);
67 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
68 }
69
70 cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
71 shost, cmds_max,
72 sizeof(*beiscsi_sess),
73 sizeof(*io_task),
74 initial_cmdsn, ISCSI_MAX_TARGET);
75 if (!cls_session)
76 return NULL;
77 sess = cls_session->dd_data;
78 beiscsi_sess = sess->dd_data;
79 beiscsi_sess->bhs_pool = pci_pool_create("beiscsi_bhs_pool",
80 phba->pcidev,
81 sizeof(struct be_cmd_bhs),
82 64, 0);
83 if (!beiscsi_sess->bhs_pool)
84 goto destroy_sess;
85
86 return cls_session;
87destroy_sess:
88 iscsi_session_teardown(cls_session);
89 return NULL;
90}
91
92/**
93 * beiscsi_session_destroy - destroys iscsi session
94 * @cls_session: pointer to iscsi cls session
95 *
96 * Destroys iSCSI session instance and releases
97 * resources allocated for it.
98 */
99void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
100{
101 struct iscsi_session *sess = cls_session->dd_data;
102 struct beiscsi_session *beiscsi_sess = sess->dd_data;
103
104 pci_pool_destroy(beiscsi_sess->bhs_pool);
105 iscsi_session_teardown(cls_session);
106}
107
108/**
109 * beiscsi_conn_create - create an instance of iscsi connection
110 * @cls_session: ptr to iscsi_cls_session
111 * @cid: iscsi cid
112 */
113struct iscsi_cls_conn *
114beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
115{
116 struct beiscsi_hba *phba;
117 struct Scsi_Host *shost;
118 struct iscsi_cls_conn *cls_conn;
119 struct beiscsi_conn *beiscsi_conn;
120 struct iscsi_conn *conn;
121 struct iscsi_session *sess;
122 struct beiscsi_session *beiscsi_sess;
123
124 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
125 "from iscsi layer=%d\n", cid);
126 shost = iscsi_session_to_shost(cls_session);
127 phba = iscsi_host_priv(shost);
128
129 cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
130 if (!cls_conn)
131 return NULL;
132
133 conn = cls_conn->dd_data;
134 beiscsi_conn = conn->dd_data;
135 beiscsi_conn->ep = NULL;
136 beiscsi_conn->phba = phba;
137 beiscsi_conn->conn = conn;
138 sess = cls_session->dd_data;
139 beiscsi_sess = sess->dd_data;
140 beiscsi_conn->beiscsi_sess = beiscsi_sess;
141 return cls_conn;
142}
143
144/**
145 * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
146 * @beiscsi_conn: The pointer to beiscsi_conn structure
147 * @phba: The phba instance
148 * @cid: The cid to free
149 */
150static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
151 struct beiscsi_conn *beiscsi_conn,
152 unsigned int cid)
153{
154 if (phba->conn_table[cid]) {
155 SE_DEBUG(DBG_LVL_1,
156 "Connection table already occupied. Detected clash\n");
157 return -EINVAL;
158 } else {
159 SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn) \n",
160 cid, beiscsi_conn);
161 phba->conn_table[cid] = beiscsi_conn;
162 }
163 return 0;
164}
165
166/**
167 * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
168 * @cls_session: pointer to iscsi cls session
169 * @cls_conn: pointer to iscsi cls conn
170 * @transport_fd: EP handle(64 bit)
171 *
172 * This function binds the TCP Conn with iSCSI Connection and Session.
173 */
174int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
175 struct iscsi_cls_conn *cls_conn,
176 u64 transport_fd, int is_leading)
177{
178 struct iscsi_conn *conn = cls_conn->dd_data;
179 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
180 struct Scsi_Host *shost =
181 (struct Scsi_Host *)iscsi_session_to_shost(cls_session);
182 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
183 struct beiscsi_endpoint *beiscsi_ep;
184 struct iscsi_endpoint *ep;
185
186 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
187 ep = iscsi_lookup_endpoint(transport_fd);
188 if (!ep)
189 return -EINVAL;
190
191 beiscsi_ep = ep->dd_data;
192
193 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
194 return -EINVAL;
195
196 if (beiscsi_ep->phba != phba) {
197 SE_DEBUG(DBG_LVL_8,
198 "beiscsi_ep->hba=%p not equal to phba=%p \n",
199 beiscsi_ep->phba, phba);
200 return -EEXIST;
201 }
202
203 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
204 beiscsi_conn->ep = beiscsi_ep;
205 beiscsi_ep->conn = beiscsi_conn;
206 SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d \n",
207 beiscsi_conn, conn, beiscsi_ep->ep_cid);
208 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
209}
210
211/**
212 * beiscsi_conn_get_param - get the iscsi parameter
213 * @cls_conn: pointer to iscsi cls conn
214 * @param: parameter type identifier
215 * @buf: buffer pointer
216 *
217 * returns iscsi parameter
218 */
219int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
220 enum iscsi_param param, char *buf)
221{
222 struct beiscsi_endpoint *beiscsi_ep;
223 struct iscsi_conn *conn = cls_conn->dd_data;
224 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
225 int len = 0;
226
227 beiscsi_ep = beiscsi_conn->ep;
228 if (!beiscsi_ep) {
229 SE_DEBUG(DBG_LVL_1,
230 "In beiscsi_conn_get_param , no beiscsi_ep\n");
231 return -1;
232 }
233
234 switch (param) {
235 case ISCSI_PARAM_CONN_PORT:
236 len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
237 break;
238 case ISCSI_PARAM_CONN_ADDRESS:
239 if (beiscsi_ep->ip_type == BE2_IPV4)
240 len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
241 else
242 len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
243 break;
244 default:
245 return iscsi_conn_get_param(cls_conn, param, buf);
246 }
247 return len;
248}
249
250int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
251 enum iscsi_param param, char *buf, int buflen)
252{
253 struct iscsi_conn *conn = cls_conn->dd_data;
254 struct iscsi_session *session = conn->session;
255 int ret;
256
257 ret = iscsi_set_param(cls_conn, param, buf, buflen);
258 if (ret)
259 return ret;
260 /*
261 * If userspace tried to set the value to higher than we can
262 * support override here.
263 */
264 switch (param) {
265 case ISCSI_PARAM_FIRST_BURST:
266 if (session->first_burst > 8192)
267 session->first_burst = 8192;
268 break;
269 case ISCSI_PARAM_MAX_RECV_DLENGTH:
270 if (conn->max_recv_dlength > 65536)
271 conn->max_recv_dlength = 65536;
272 break;
273 case ISCSI_PARAM_MAX_BURST:
274 if (session->first_burst > 262144)
275 session->first_burst = 262144;
276 break;
277 default:
278 return 0;
279 }
280
281 return 0;
282}
283
284/**
285 * beiscsi_get_host_param - get the iscsi parameter
286 * @shost: pointer to scsi_host structure
287 * @param: parameter type identifier
288 * @buf: buffer pointer
289 *
290 * returns host parameter
291 */
292int beiscsi_get_host_param(struct Scsi_Host *shost,
293 enum iscsi_host_param param, char *buf)
294{
295 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
296 int len = 0;
297
298 switch (param) {
299 case ISCSI_HOST_PARAM_HWADDRESS:
300 be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address);
301 len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
302 break;
303 default:
304 return iscsi_host_get_param(shost, param, buf);
305 }
306 return len;
307}
308
309/**
310 * beiscsi_conn_get_stats - get the iscsi stats
311 * @cls_conn: pointer to iscsi cls conn
312 * @stats: pointer to iscsi_stats structure
313 *
314 * returns iscsi stats
315 */
316void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
317 struct iscsi_stats *stats)
318{
319 struct iscsi_conn *conn = cls_conn->dd_data;
320
321 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
322 stats->txdata_octets = conn->txdata_octets;
323 stats->rxdata_octets = conn->rxdata_octets;
324 stats->dataout_pdus = conn->dataout_pdus_cnt;
325 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
326 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
327 stats->datain_pdus = conn->datain_pdus_cnt;
328 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
329 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
330 stats->r2t_pdus = conn->r2t_pdus_cnt;
331 stats->digest_err = 0;
332 stats->timeout_err = 0;
333 stats->custom_length = 0;
334 strcpy(stats->custom[0].desc, "eh_abort_cnt");
335 stats->custom[0].value = conn->eh_abort_cnt;
336}
337
338/**
339 * beiscsi_set_params_for_offld - get the parameters for offload
340 * @beiscsi_conn: pointer to beiscsi_conn
341 * @params: pointer to offload_params structure
342 */
343static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
344 struct beiscsi_offload_params *params)
345{
346 struct iscsi_conn *conn = beiscsi_conn->conn;
347 struct iscsi_session *session = conn->session;
348
349 AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length,
350 params, session->max_burst);
351 AMAP_SET_BITS(struct amap_beiscsi_offload_params,
352 max_send_data_segment_length, params,
353 conn->max_xmit_dlength);
354 AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length,
355 params, session->first_burst);
356 AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params,
357 session->erl);
358 AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params,
359 conn->datadgst_en);
360 AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params,
361 conn->hdrdgst_en);
362 AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params,
363 session->initial_r2t_en);
364 AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
365 session->imm_data_en);
366 AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
367 (conn->exp_statsn - 1));
368}
369
370/**
371 * beiscsi_conn_start - offload of session to chip
372 * @cls_conn: pointer to beiscsi_conn
373 */
374int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
375{
376 struct iscsi_conn *conn = cls_conn->dd_data;
377 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
378 struct beiscsi_endpoint *beiscsi_ep;
379 struct beiscsi_offload_params params;
380 struct iscsi_session *session = conn->session;
381 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
382 struct beiscsi_hba *phba = iscsi_host_priv(shost);
383
384 memset(&params, 0, sizeof(struct beiscsi_offload_params));
385 beiscsi_ep = beiscsi_conn->ep;
386 if (!beiscsi_ep)
387 SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
388
389 free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle);
390 beiscsi_conn->login_in_progress = 0;
391 beiscsi_set_params_for_offld(beiscsi_conn, &params);
392 beiscsi_offload_connection(beiscsi_conn, &params);
393 iscsi_conn_start(cls_conn);
394 return 0;
395}
396
397/**
398 * beiscsi_get_cid - Allocate a cid
399 * @phba: The phba instance
400 */
401static int beiscsi_get_cid(struct beiscsi_hba *phba)
402{
403 unsigned short cid = 0xFFFF;
404
405 if (!phba->avlbl_cids)
406 return cid;
407
408 cid = phba->cid_array[phba->cid_alloc++];
409 if (phba->cid_alloc == phba->params.cxns_per_ctrl)
410 phba->cid_alloc = 0;
411 phba->avlbl_cids--;
412 return cid;
413}
414
415/**
416 * beiscsi_open_conn - Ask FW to open a TCP connection
417 * @ep: endpoint to be used
418 * @src_addr: The source IP address
419 * @dst_addr: The Destination IP address
420 *
421 * Asks the FW to open a TCP connection
422 */
423static int beiscsi_open_conn(struct iscsi_endpoint *ep,
424 struct sockaddr *src_addr,
425 struct sockaddr *dst_addr, int non_blocking)
426{
427 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
428 struct beiscsi_hba *phba = beiscsi_ep->phba;
429 int ret = -1;
430
431 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
432 if (beiscsi_ep->ep_cid == 0xFFFF) {
433 SE_DEBUG(DBG_LVL_1, "No free cid available\n");
434 return ret;
435 }
436 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
437 beiscsi_ep->ep_cid);
438 phba->ep_array[beiscsi_ep->ep_cid] = ep;
439 if (beiscsi_ep->ep_cid >
440 (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
441 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
442 return ret;
443 }
444
445 beiscsi_ep->cid_vld = 0;
446 return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
447}
448
449/**
450 * beiscsi_put_cid - Free the cid
451 * @phba: The phba for which the cid is being freed
452 * @cid: The cid to free
453 */
454static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
455{
456 phba->avlbl_cids++;
457 phba->cid_array[phba->cid_free++] = cid;
458 if (phba->cid_free == phba->params.cxns_per_ctrl)
459 phba->cid_free = 0;
460}
461
462/**
463 * beiscsi_free_ep - free endpoint
464 * @ep: pointer to iscsi endpoint structure
465 */
466static void beiscsi_free_ep(struct iscsi_endpoint *ep)
467{
468 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
469 struct beiscsi_hba *phba = beiscsi_ep->phba;
470
471 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
472 beiscsi_ep->phba = NULL;
473 iscsi_destroy_endpoint(ep);
474}
475
476/**
477 * beiscsi_ep_connect - Ask chip to create TCP Conn
478 * @scsi_host: Pointer to scsi_host structure
479 * @dst_addr: The IP address of Target
480 * @non_blocking: blocking or non-blocking call
481 *
482 * This routines first asks chip to create a connection and then allocates an EP
483 */
484struct iscsi_endpoint *
485beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
486 int non_blocking)
487{
488 struct beiscsi_hba *phba;
489 struct beiscsi_endpoint *beiscsi_ep;
490 struct iscsi_endpoint *ep;
491 int ret;
492
493 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect \n");
494 if (shost)
495 phba = iscsi_host_priv(shost);
496 else {
497 ret = -ENXIO;
498 SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
499 return ERR_PTR(ret);
500 }
501 ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
502 if (!ep) {
503 ret = -ENOMEM;
504 return ERR_PTR(ret);
505 }
506
507 beiscsi_ep = ep->dd_data;
508 beiscsi_ep->phba = phba;
509
510 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
511 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
512 ret = -ENOMEM;
513 goto free_ep;
514 }
515
516 return ep;
517
518free_ep:
519 beiscsi_free_ep(ep);
520 return ERR_PTR(ret);
521}
522
523/**
524 * beiscsi_ep_poll - Poll to see if connection is established
525 * @ep: endpoint to be used
526 * @timeout_ms: timeout specified in millisecs
527 *
528 * Poll to see if TCP connection established
529 */
530int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
531{
532 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
533
534 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n");
535 if (beiscsi_ep->cid_vld == 1)
536 return 1;
537 else
538 return 0;
539}
540
541/**
542 * beiscsi_close_conn - Upload the connection
543 * @ep: The iscsi endpoint
544 * @flag: The type of connection closure
545 */
546static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
547{
548 int ret = 0;
549 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
550 struct beiscsi_hba *phba = beiscsi_ep->phba;
551
552 if (MGMT_STATUS_SUCCESS !=
553 mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
554 CONNECTION_UPLOAD_GRACEFUL)) {
555 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
556 beiscsi_ep->ep_cid);
557 ret = -1;
558 }
559
560 return ret;
561}
562
563/**
564 * beiscsi_ep_disconnect - Tears down the TCP connection
565 * @ep: endpoint to be used
566 *
567 * Tears down the TCP connection
568 */
569void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
570{
571 struct beiscsi_conn *beiscsi_conn;
572 struct beiscsi_endpoint *beiscsi_ep;
573 struct beiscsi_hba *phba;
574 int flag = 0;
575
576 beiscsi_ep = ep->dd_data;
577 phba = beiscsi_ep->phba;
578 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
579
580 if (beiscsi_ep->conn) {
581 beiscsi_conn = beiscsi_ep->conn;
582 iscsi_suspend_queue(beiscsi_conn->conn);
583 beiscsi_close_conn(ep, flag);
584 }
585
586 beiscsi_free_ep(ep);
587}
588
589/**
590 * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
591 * @phba: The phba instance
592 * @cid: The cid to free
593 */
594static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
595 unsigned int cid)
596{
597 if (phba->conn_table[cid])
598 phba->conn_table[cid] = NULL;
599 else {
600 SE_DEBUG(DBG_LVL_8, "Connection table Not occupied. \n");
601 return -EINVAL;
602 }
603 return 0;
604}
605
606/**
607 * beiscsi_conn_stop - Invalidate and stop the connection
608 * @cls_conn: pointer to get iscsi_conn
609 * @flag: The type of connection closure
610 */
611void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
612{
613 struct iscsi_conn *conn = cls_conn->dd_data;
614 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
615 struct beiscsi_endpoint *beiscsi_ep;
616 struct iscsi_session *session = conn->session;
617 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
618 struct beiscsi_hba *phba = iscsi_host_priv(shost);
619 unsigned int status;
620 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
621
622 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
623 beiscsi_ep = beiscsi_conn->ep;
624 if (!beiscsi_ep) {
625 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
626 return;
627 }
628 status = mgmt_invalidate_connection(phba, beiscsi_ep,
629 beiscsi_ep->ep_cid, 1,
630 savecfg_flag);
631 if (status != MGMT_STATUS_SUCCESS) {
632 SE_DEBUG(DBG_LVL_1,
633 "mgmt_invalidate_connection Failed for cid=%d \n",
634 beiscsi_ep->ep_cid);
635 }
636 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
637 iscsi_conn_stop(cls_conn, flag);
638}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
new file mode 100644
index 000000000000..f92ffc5349fb
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -0,0 +1,75 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20
21#ifndef _BE_ISCSI_
22#define _BE_ISCSI_
23
24#include "be_main.h"
25#include "be_mgmt.h"
26
27#define BE2_IPV4 0x1
28#define BE2_IPV6 0x10
29
30void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
31 struct beiscsi_offload_params *params);
32
33void beiscsi_offload_iscsi(struct beiscsi_hba *phba, struct iscsi_conn *conn,
34 struct beiscsi_conn *beiscsi_conn,
35 unsigned int fw_handle);
36
37struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
38 uint16_t cmds_max,
39 uint16_t qdepth,
40 uint32_t initial_cmdsn);
41
42void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
43
44struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
45 *cls_session, uint32_t cid);
46
47int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
48 struct iscsi_cls_conn *cls_conn,
49 uint64_t transport_fd, int is_leading);
50
51int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
52 enum iscsi_param param, char *buf);
53
54int beiscsi_get_host_param(struct Scsi_Host *shost,
55 enum iscsi_host_param param, char *buf);
56
57int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
58 enum iscsi_param param, char *buf, int buflen);
59
60int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
61
62void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag);
63
64struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
65 struct sockaddr *dst_addr,
66 int non_blocking);
67
68int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
69
70void beiscsi_ep_disconnect(struct iscsi_endpoint *ep);
71
72void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
73 struct iscsi_stats *stats);
74
75#endif
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
new file mode 100644
index 000000000000..4f1aca346e38
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -0,0 +1,3390 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/blkdev.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/kernel.h>
27#include <linux/semaphore.h>
28
29#include <scsi/libiscsi.h>
30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi.h>
36#include "be_main.h"
37#include "be_iscsi.h"
38#include "be_mgmt.h"
39
40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix;
43
44MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46MODULE_AUTHOR("ServerEngines Corporation");
47MODULE_LICENSE("GPL");
48module_param(be_iopoll_budget, int, 0);
49module_param(enable_msix, int, 0);
50module_param(be_max_phys_size, uint, S_IRUGO);
51MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52 "contiguous memory that can be allocated."
53 "Range is 16 - 128");
54
55static int beiscsi_slave_configure(struct scsi_device *sdev)
56{
57 blk_queue_max_segment_size(sdev->request_queue, 65536);
58 return 0;
59}
60
61static struct scsi_host_template beiscsi_sht = {
62 .module = THIS_MODULE,
63 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
64 .proc_name = DRV_NAME,
65 .queuecommand = iscsi_queuecommand,
66 .eh_abort_handler = iscsi_eh_abort,
67 .change_queue_depth = iscsi_change_queue_depth,
68 .slave_configure = beiscsi_slave_configure,
69 .target_alloc = iscsi_target_alloc,
70 .eh_device_reset_handler = iscsi_eh_device_reset,
71 .eh_target_reset_handler = iscsi_eh_target_reset,
72 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
73 .can_queue = BE2_IO_DEPTH,
74 .this_id = -1,
75 .max_sectors = BEISCSI_MAX_SECTORS,
76 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
77 .use_clustering = ENABLE_CLUSTERING,
78};
79static struct scsi_transport_template *beiscsi_scsi_transport;
80
81/*------------------- PCI Driver operations and data ----------------- */
82static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
83 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
84 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
85 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
86 { 0 }
87};
88MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
89
90static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
91{
92 struct beiscsi_hba *phba;
93 struct Scsi_Host *shost;
94
95 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
96 if (!shost) {
97 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
98 "iscsi_host_alloc failed \n");
99 return NULL;
100 }
101 shost->dma_boundary = pcidev->dma_mask;
102 shost->max_id = BE2_MAX_SESSIONS;
103 shost->max_channel = 0;
104 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
105 shost->max_lun = BEISCSI_NUM_MAX_LUN;
106 shost->transportt = beiscsi_scsi_transport;
107
108 phba = iscsi_host_priv(shost);
109 memset(phba, 0, sizeof(*phba));
110 phba->shost = shost;
111 phba->pcidev = pci_dev_get(pcidev);
112
113 if (iscsi_host_add(shost, &phba->pcidev->dev))
114 goto free_devices;
115 return phba;
116
117free_devices:
118 pci_dev_put(phba->pcidev);
119 iscsi_host_free(phba->shost);
120 return NULL;
121}
122
123static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
124{
125 if (phba->csr_va) {
126 iounmap(phba->csr_va);
127 phba->csr_va = NULL;
128 }
129 if (phba->db_va) {
130 iounmap(phba->db_va);
131 phba->db_va = NULL;
132 }
133 if (phba->pci_va) {
134 iounmap(phba->pci_va);
135 phba->pci_va = NULL;
136 }
137}
138
139static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
140 struct pci_dev *pcidev)
141{
142 u8 __iomem *addr;
143
144 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
145 pci_resource_len(pcidev, 2));
146 if (addr == NULL)
147 return -ENOMEM;
148 phba->ctrl.csr = addr;
149 phba->csr_va = addr;
150 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
151
152 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
153 if (addr == NULL)
154 goto pci_map_err;
155 phba->ctrl.db = addr;
156 phba->db_va = addr;
157 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
158
159 addr = ioremap_nocache(pci_resource_start(pcidev, 1),
160 pci_resource_len(pcidev, 1));
161 if (addr == NULL)
162 goto pci_map_err;
163 phba->ctrl.pcicfg = addr;
164 phba->pci_va = addr;
165 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
166 return 0;
167
168pci_map_err:
169 beiscsi_unmap_pci_function(phba);
170 return -ENOMEM;
171}
172
173static int beiscsi_enable_pci(struct pci_dev *pcidev)
174{
175 int ret;
176
177 ret = pci_enable_device(pcidev);
178 if (ret) {
179 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
180 "failed. Returning -ENODEV\n");
181 return ret;
182 }
183
184 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
185 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
186 if (ret) {
187 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
188 pci_disable_device(pcidev);
189 return ret;
190 }
191 }
192 return 0;
193}
194
195static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
196{
197 struct be_ctrl_info *ctrl = &phba->ctrl;
198 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
199 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
200 int status = 0;
201
202 ctrl->pdev = pdev;
203 status = beiscsi_map_pci_bars(phba, pdev);
204 if (status)
205 return status;
206
207 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
208 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
209 mbox_mem_alloc->size,
210 &mbox_mem_alloc->dma);
211 if (!mbox_mem_alloc->va) {
212 beiscsi_unmap_pci_function(phba);
213 status = -ENOMEM;
214 return status;
215 }
216
217 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
218 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
219 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
220 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
221 spin_lock_init(&ctrl->mbox_lock);
222 return status;
223}
224
225static void beiscsi_get_params(struct beiscsi_hba *phba)
226{
227 phba->params.ios_per_ctrl = BE2_IO_DEPTH;
228 phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
229 phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
230 phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
231 phba->params.num_sge_per_io = BE2_SGE;
232 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
233 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
234 phba->params.eq_timer = 64;
235 phba->params.num_eq_entries =
236 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
237 512) + 1) * 512;
238 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
239 ? 1024 : phba->params.num_eq_entries;
240 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
241 phba->params.num_eq_entries);
242 phba->params.num_cq_entries =
243 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
244 512) + 1) * 512;
245 SE_DEBUG(DBG_LVL_8,
246 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
247 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
248 phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
249 BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
250 phba->params.wrbs_per_cxn = 256;
251}
252
253static void hwi_ring_eq_db(struct beiscsi_hba *phba,
254 unsigned int id, unsigned int clr_interrupt,
255 unsigned int num_processed,
256 unsigned char rearm, unsigned char event)
257{
258 u32 val = 0;
259 val |= id & DB_EQ_RING_ID_MASK;
260 if (rearm)
261 val |= 1 << DB_EQ_REARM_SHIFT;
262 if (clr_interrupt)
263 val |= 1 << DB_EQ_CLR_SHIFT;
264 if (event)
265 val |= 1 << DB_EQ_EVNT_SHIFT;
266 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
267 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
268}
269
270/**
271 * be_isr - The isr routine of the driver.
272 * @irq: Not used
273 * @dev_id: Pointer to host adapter structure
274 */
275static irqreturn_t be_isr(int irq, void *dev_id)
276{
277 struct beiscsi_hba *phba;
278 struct hwi_controller *phwi_ctrlr;
279 struct hwi_context_memory *phwi_context;
280 struct be_eq_entry *eqe = NULL;
281 struct be_queue_info *eq;
282 struct be_queue_info *cq;
283 unsigned long flags, index;
284 unsigned int num_eq_processed;
285 struct be_ctrl_info *ctrl;
286 int isr;
287
288 phba = dev_id;
289 if (!enable_msix) {
290 ctrl = &phba->ctrl;;
291 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
292 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
293 if (!isr)
294 return IRQ_NONE;
295 }
296
297 phwi_ctrlr = phba->phwi_ctrlr;
298 phwi_context = phwi_ctrlr->phwi_ctxt;
299 eq = &phwi_context->be_eq.q;
300 cq = &phwi_context->be_cq;
301 index = 0;
302 eqe = queue_tail_node(eq);
303 if (!eqe)
304 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
305
306 num_eq_processed = 0;
307 if (blk_iopoll_enabled) {
308 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
309 & EQE_VALID_MASK) {
310 if (!blk_iopoll_sched_prep(&phba->iopoll))
311 blk_iopoll_sched(&phba->iopoll);
312
313 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
314 queue_tail_inc(eq);
315 eqe = queue_tail_node(eq);
316 num_eq_processed++;
317 SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
318 }
319 if (num_eq_processed) {
320 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1);
321 return IRQ_HANDLED;
322 } else
323 return IRQ_NONE;
324 } else {
325 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
326 & EQE_VALID_MASK) {
327
328 if (((eqe->dw[offsetof(struct amap_eq_entry,
329 resource_id) / 32] &
330 EQE_RESID_MASK) >> 16) != cq->id) {
331 spin_lock_irqsave(&phba->isr_lock, flags);
332 phba->todo_mcc_cq = 1;
333 spin_unlock_irqrestore(&phba->isr_lock, flags);
334 } else {
335 spin_lock_irqsave(&phba->isr_lock, flags);
336 phba->todo_cq = 1;
337 spin_unlock_irqrestore(&phba->isr_lock, flags);
338 }
339 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
340 queue_tail_inc(eq);
341 eqe = queue_tail_node(eq);
342 num_eq_processed++;
343 }
344 if (phba->todo_cq || phba->todo_mcc_cq)
345 queue_work(phba->wq, &phba->work_cqs);
346
347 if (num_eq_processed) {
348 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1);
349 return IRQ_HANDLED;
350 } else
351 return IRQ_NONE;
352 }
353}
354
355static int beiscsi_init_irqs(struct beiscsi_hba *phba)
356{
357 struct pci_dev *pcidev = phba->pcidev;
358 int ret;
359
360 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba);
361 if (ret) {
362 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
363 "Failed to register irq\\n");
364 return ret;
365 }
366 return 0;
367}
368
369static void hwi_ring_cq_db(struct beiscsi_hba *phba,
370 unsigned int id, unsigned int num_processed,
371 unsigned char rearm, unsigned char event)
372{
373 u32 val = 0;
374 val |= id & DB_CQ_RING_ID_MASK;
375 if (rearm)
376 val |= 1 << DB_CQ_REARM_SHIFT;
377 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
378 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
379}
380
381/*
382 * async pdus include
383 * a. unsolicited NOP-In (target initiated NOP-In)
384 * b. Async Messages
385 * c. Reject PDU
386 * d. Login response
387 * These headers arrive unprocessed by the EP firmware and iSCSI layer
388 * process them
389 */
390static unsigned int
391beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
392 struct beiscsi_hba *phba,
393 unsigned short cid,
394 struct pdu_base *ppdu,
395 unsigned long pdu_len,
396 void *pbuffer, unsigned long buf_len)
397{
398 struct iscsi_conn *conn = beiscsi_conn->conn;
399 struct iscsi_session *session = conn->session;
400
401 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
402 PDUBASE_OPCODE_MASK) {
403 case ISCSI_OP_NOOP_IN:
404 pbuffer = NULL;
405 buf_len = 0;
406 break;
407 case ISCSI_OP_ASYNC_EVENT:
408 break;
409 case ISCSI_OP_REJECT:
410 WARN_ON(!pbuffer);
411 WARN_ON(!(buf_len == 48));
412 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
413 break;
414 case ISCSI_OP_LOGIN_RSP:
415 break;
416 default:
417 shost_printk(KERN_WARNING, phba->shost,
418 "Unrecognized opcode 0x%x in async msg \n",
419 (ppdu->
420 dw[offsetof(struct amap_pdu_base, opcode) / 32]
421 & PDUBASE_OPCODE_MASK));
422 return 1;
423 }
424
425 spin_lock_bh(&session->lock);
426 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
427 spin_unlock_bh(&session->lock);
428 return 0;
429}
430
431static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
432{
433 struct sgl_handle *psgl_handle;
434
435 if (phba->io_sgl_hndl_avbl) {
436 SE_DEBUG(DBG_LVL_8,
437 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
438 phba->io_sgl_alloc_index);
439 psgl_handle = phba->io_sgl_hndl_base[phba->
440 io_sgl_alloc_index];
441 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
442 phba->io_sgl_hndl_avbl--;
443 if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1))
444 phba->io_sgl_alloc_index = 0;
445 else
446 phba->io_sgl_alloc_index++;
447 } else
448 psgl_handle = NULL;
449 return psgl_handle;
450}
451
452static void
453free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
454{
455 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
456 phba->io_sgl_free_index);
457 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
458 /*
459 * this can happen if clean_task is called on a task that
460 * failed in xmit_task or alloc_pdu.
461 */
462 SE_DEBUG(DBG_LVL_8,
463 "Double Free in IO SGL io_sgl_free_index=%d,"
464 "value there=%p \n", phba->io_sgl_free_index,
465 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
466 return;
467 }
468 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
469 phba->io_sgl_hndl_avbl++;
470 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
471 phba->io_sgl_free_index = 0;
472 else
473 phba->io_sgl_free_index++;
474}
475
476/**
477 * alloc_wrb_handle - To allocate a wrb handle
478 * @phba: The hba pointer
479 * @cid: The cid to use for allocation
480 * @index: index allocation and wrb index
481 *
482 * This happens under session_lock until submission to chip
483 */
484struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
485 int index)
486{
487 struct hwi_wrb_context *pwrb_context;
488 struct hwi_controller *phwi_ctrlr;
489 struct wrb_handle *pwrb_handle;
490
491 phwi_ctrlr = phba->phwi_ctrlr;
492 pwrb_context = &phwi_ctrlr->wrb_context[cid];
493 pwrb_handle = pwrb_context->pwrb_handle_base[index];
494 pwrb_handle->wrb_index = index;
495 pwrb_handle->nxt_wrb_index = index;
496 return pwrb_handle;
497}
498
499/**
500 * free_wrb_handle - To free the wrb handle back to pool
501 * @phba: The hba pointer
502 * @pwrb_context: The context to free from
503 * @pwrb_handle: The wrb_handle to free
504 *
505 * This happens under session_lock until submission to chip
506 */
507static void
508free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
509 struct wrb_handle *pwrb_handle)
510{
511 SE_DEBUG(DBG_LVL_8,
512 "FREE WRB: pwrb_handle=%p free_index=%d=0x%x"
513 "wrb_handles_available=%d \n",
514 pwrb_handle, pwrb_context->free_index,
515 pwrb_context->free_index, pwrb_context->wrb_handles_available);
516}
517
518static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
519{
520 struct sgl_handle *psgl_handle;
521
522 if (phba->eh_sgl_hndl_avbl) {
523 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
524 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
525 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
526 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
527 phba->eh_sgl_hndl_avbl--;
528 if (phba->eh_sgl_alloc_index ==
529 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
530 1))
531 phba->eh_sgl_alloc_index = 0;
532 else
533 phba->eh_sgl_alloc_index++;
534 } else
535 psgl_handle = NULL;
536 return psgl_handle;
537}
538
539void
540free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
541{
542
543 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
544 /*
545 * this can happen if clean_task is called on a task that
546 * failed in xmit_task or alloc_pdu.
547 */
548 SE_DEBUG(DBG_LVL_8,
549 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
550 phba->eh_sgl_free_index);
551 return;
552 }
553 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
554 phba->eh_sgl_hndl_avbl++;
555 if (phba->eh_sgl_free_index ==
556 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
557 phba->eh_sgl_free_index = 0;
558 else
559 phba->eh_sgl_free_index++;
560}
561
562static void
563be_complete_io(struct beiscsi_conn *beiscsi_conn,
564 struct iscsi_task *task, struct sol_cqe *psol)
565{
566 struct beiscsi_io_task *io_task = task->dd_data;
567 struct be_status_bhs *sts_bhs =
568 (struct be_status_bhs *)io_task->cmd_bhs;
569 struct iscsi_conn *conn = beiscsi_conn->conn;
570 unsigned int sense_len;
571 unsigned char *sense;
572 u32 resid = 0, exp_cmdsn, max_cmdsn;
573 u8 rsp, status, flags;
574
575 exp_cmdsn = be32_to_cpu(psol->
576 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
577 & SOL_EXP_CMD_SN_MASK);
578 max_cmdsn = be32_to_cpu((psol->
579 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
580 & SOL_EXP_CMD_SN_MASK) +
581 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
582 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
583 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
584 & SOL_RESP_MASK) >> 16);
585 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
586 & SOL_STS_MASK) >> 8);
587 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
588 & SOL_FLAGS_MASK) >> 24) | 0x80;
589
590 task->sc->result = (DID_OK << 16) | status;
591 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
592 task->sc->result = DID_ERROR << 16;
593 goto unmap;
594 }
595
596 /* bidi not initially supported */
597 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
598 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
599 32] & SOL_RES_CNT_MASK);
600
601 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
602 task->sc->result = DID_ERROR << 16;
603
604 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
605 scsi_set_resid(task->sc, resid);
606 if (!status && (scsi_bufflen(task->sc) - resid <
607 task->sc->underflow))
608 task->sc->result = DID_ERROR << 16;
609 }
610 }
611
612 if (status == SAM_STAT_CHECK_CONDITION) {
613 sense = sts_bhs->sense_info + sizeof(unsigned short);
614 sense_len =
615 cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
616 memcpy(task->sc->sense_buffer, sense,
617 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
618 }
619 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
620 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
621 & SOL_RES_CNT_MASK)
622 conn->rxdata_octets += (psol->
623 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
624 & SOL_RES_CNT_MASK);
625 }
626unmap:
627 scsi_dma_unmap(io_task->scsi_cmnd);
628 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
629}
630
631static void
632be_complete_logout(struct beiscsi_conn *beiscsi_conn,
633 struct iscsi_task *task, struct sol_cqe *psol)
634{
635 struct iscsi_logout_rsp *hdr;
636 struct iscsi_conn *conn = beiscsi_conn->conn;
637
638 hdr = (struct iscsi_logout_rsp *)task->hdr;
639 hdr->t2wait = 5;
640 hdr->t2retain = 0;
641 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
642 & SOL_FLAGS_MASK) >> 24) | 0x80;
643 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
644 32] & SOL_RESP_MASK);
645 hdr->exp_cmdsn = cpu_to_be32(psol->
646 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
647 & SOL_EXP_CMD_SN_MASK);
648 hdr->max_cmdsn = be32_to_cpu((psol->
649 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
650 & SOL_EXP_CMD_SN_MASK) +
651 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
652 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
653 hdr->hlength = 0;
654
655 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
656}
657
658static void
659be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
660 struct iscsi_task *task, struct sol_cqe *psol)
661{
662 struct iscsi_tm_rsp *hdr;
663 struct iscsi_conn *conn = beiscsi_conn->conn;
664
665 hdr = (struct iscsi_tm_rsp *)task->hdr;
666 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
667 & SOL_FLAGS_MASK) >> 24) | 0x80;
668 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
669 32] & SOL_RESP_MASK);
670 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
671 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
672 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
673 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
674 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
675 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
676 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
677}
678
679static void
680hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
681 struct beiscsi_hba *phba, struct sol_cqe *psol)
682{
683 struct hwi_wrb_context *pwrb_context;
684 struct wrb_handle *pwrb_handle;
685 struct hwi_controller *phwi_ctrlr;
686 struct iscsi_conn *conn = beiscsi_conn->conn;
687 struct iscsi_session *session = conn->session;
688
689 phwi_ctrlr = phba->phwi_ctrlr;
690 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
691 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
692 SOL_CID_MASK) >> 6)];
693 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
694 dw[offsetof(struct amap_sol_cqe, wrb_index) /
695 32] & SOL_WRB_INDEX_MASK) >> 16)];
696 spin_lock_bh(&session->lock);
697 free_wrb_handle(phba, pwrb_context, pwrb_handle);
698 spin_unlock_bh(&session->lock);
699}
700
701static void
702be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
703 struct iscsi_task *task, struct sol_cqe *psol)
704{
705 struct iscsi_nopin *hdr;
706 struct iscsi_conn *conn = beiscsi_conn->conn;
707
708 hdr = (struct iscsi_nopin *)task->hdr;
709 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
710 & SOL_FLAGS_MASK) >> 24) | 0x80;
711 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
712 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
713 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
714 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
715 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
716 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
717 hdr->opcode = ISCSI_OP_NOOP_IN;
718 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
719}
720
721static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
722 struct beiscsi_hba *phba, struct sol_cqe *psol)
723{
724 struct hwi_wrb_context *pwrb_context;
725 struct wrb_handle *pwrb_handle;
726 struct iscsi_wrb *pwrb = NULL;
727 struct hwi_controller *phwi_ctrlr;
728 struct iscsi_task *task;
729 struct beiscsi_io_task *io_task;
730 struct iscsi_conn *conn = beiscsi_conn->conn;
731 struct iscsi_session *session = conn->session;
732
733 phwi_ctrlr = phba->phwi_ctrlr;
734
735 pwrb_context = &phwi_ctrlr->
736 wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
737 & SOL_CID_MASK) >> 6)];
738 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
739 dw[offsetof(struct amap_sol_cqe, wrb_index) /
740 32] & SOL_WRB_INDEX_MASK) >> 16)];
741
742 task = pwrb_handle->pio_handle;
743 io_task = task->dd_data;
744 spin_lock_bh(&session->lock);
745 pwrb = pwrb_handle->pwrb;
746 switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
747 WRB_TYPE_MASK) >> 28) {
748 case HWH_TYPE_IO:
749 case HWH_TYPE_IO_RD:
750 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
751 ISCSI_OP_NOOP_OUT) {
752 be_complete_nopin_resp(beiscsi_conn, task, psol);
753 } else
754 be_complete_io(beiscsi_conn, task, psol);
755 break;
756
757 case HWH_TYPE_LOGOUT:
758 be_complete_logout(beiscsi_conn, task, psol);
759 break;
760
761 case HWH_TYPE_LOGIN:
762 SE_DEBUG(DBG_LVL_1,
763 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
764 "- Solicited path \n");
765 break;
766
767 case HWH_TYPE_TMF:
768 be_complete_tmf(beiscsi_conn, task, psol);
769 break;
770
771 case HWH_TYPE_NOP:
772 be_complete_nopin_resp(beiscsi_conn, task, psol);
773 break;
774
775 default:
776 shost_printk(KERN_WARNING, phba->shost,
777 "wrb_index 0x%x CID 0x%x\n",
778 ((psol->dw[offsetof(struct amap_iscsi_wrb, type) /
779 32] & SOL_WRB_INDEX_MASK) >> 16),
780 ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
781 & SOL_CID_MASK) >> 6));
782 break;
783 }
784
785 spin_unlock_bh(&session->lock);
786}
787
788static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
789 *pasync_ctx, unsigned int is_header,
790 unsigned int host_write_ptr)
791{
792 if (is_header)
793 return &pasync_ctx->async_entry[host_write_ptr].
794 header_busy_list;
795 else
796 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
797}
798
799static struct async_pdu_handle *
800hwi_get_async_handle(struct beiscsi_hba *phba,
801 struct beiscsi_conn *beiscsi_conn,
802 struct hwi_async_pdu_context *pasync_ctx,
803 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
804{
805 struct be_bus_address phys_addr;
806 struct list_head *pbusy_list;
807 struct async_pdu_handle *pasync_handle = NULL;
808 int buffer_len = 0;
809 unsigned char buffer_index = -1;
810 unsigned char is_header = 0;
811
812 phys_addr.u.a32.address_lo =
813 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
814 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
815 & PDUCQE_DPL_MASK) >> 16);
816 phys_addr.u.a32.address_hi =
817 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
818
819 phys_addr.u.a64.address =
820 *((unsigned long long *)(&phys_addr.u.a64.address));
821
822 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
823 & PDUCQE_CODE_MASK) {
824 case UNSOL_HDR_NOTIFY:
825 is_header = 1;
826
827 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
828 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
829 index) / 32] & PDUCQE_INDEX_MASK));
830
831 buffer_len = (unsigned int)(phys_addr.u.a64.address -
832 pasync_ctx->async_header.pa_base.u.a64.address);
833
834 buffer_index = buffer_len /
835 pasync_ctx->async_header.buffer_size;
836
837 break;
838 case UNSOL_DATA_NOTIFY:
839 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
840 dw[offsetof(struct amap_i_t_dpdu_cqe,
841 index) / 32] & PDUCQE_INDEX_MASK));
842 buffer_len = (unsigned long)(phys_addr.u.a64.address -
843 pasync_ctx->async_data.pa_base.u.
844 a64.address);
845 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
846 break;
847 default:
848 pbusy_list = NULL;
849 shost_printk(KERN_WARNING, phba->shost,
850 "Unexpected code=%d \n",
851 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
852 code) / 32] & PDUCQE_CODE_MASK);
853 return NULL;
854 }
855
856 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
857 WARN_ON(list_empty(pbusy_list));
858 list_for_each_entry(pasync_handle, pbusy_list, link) {
859 WARN_ON(pasync_handle->consumed);
860 if (pasync_handle->index == buffer_index)
861 break;
862 }
863
864 WARN_ON(!pasync_handle);
865
866 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
867 pasync_handle->is_header = is_header;
868 pasync_handle->buffer_len = ((pdpdu_cqe->
869 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
870 & PDUCQE_DPL_MASK) >> 16);
871
872 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
873 index) / 32] & PDUCQE_INDEX_MASK);
874 return pasync_handle;
875}
876
877static unsigned int
878hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
879 unsigned int is_header, unsigned int cq_index)
880{
881 struct list_head *pbusy_list;
882 struct async_pdu_handle *pasync_handle;
883 unsigned int num_entries, writables = 0;
884 unsigned int *pep_read_ptr, *pwritables;
885
886
887 if (is_header) {
888 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
889 pwritables = &pasync_ctx->async_header.writables;
890 num_entries = pasync_ctx->async_header.num_entries;
891 } else {
892 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
893 pwritables = &pasync_ctx->async_data.writables;
894 num_entries = pasync_ctx->async_data.num_entries;
895 }
896
897 while ((*pep_read_ptr) != cq_index) {
898 (*pep_read_ptr)++;
899 *pep_read_ptr = (*pep_read_ptr) % num_entries;
900
901 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
902 *pep_read_ptr);
903 if (writables == 0)
904 WARN_ON(list_empty(pbusy_list));
905
906 if (!list_empty(pbusy_list)) {
907 pasync_handle = list_entry(pbusy_list->next,
908 struct async_pdu_handle,
909 link);
910 WARN_ON(!pasync_handle);
911 pasync_handle->consumed = 1;
912 }
913
914 writables++;
915 }
916
917 if (!writables) {
918 SE_DEBUG(DBG_LVL_1,
919 "Duplicate notification received - index 0x%x!!\n",
920 cq_index);
921 WARN_ON(1);
922 }
923
924 *pwritables = *pwritables + writables;
925 return 0;
926}
927
928static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
929 unsigned int cri)
930{
931 struct hwi_controller *phwi_ctrlr;
932 struct hwi_async_pdu_context *pasync_ctx;
933 struct async_pdu_handle *pasync_handle, *tmp_handle;
934 struct list_head *plist;
935 unsigned int i = 0;
936
937 phwi_ctrlr = phba->phwi_ctrlr;
938 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
939
940 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
941
942 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
943 list_del(&pasync_handle->link);
944
945 if (i == 0) {
946 list_add_tail(&pasync_handle->link,
947 &pasync_ctx->async_header.free_list);
948 pasync_ctx->async_header.free_entries++;
949 i++;
950 } else {
951 list_add_tail(&pasync_handle->link,
952 &pasync_ctx->async_data.free_list);
953 pasync_ctx->async_data.free_entries++;
954 i++;
955 }
956 }
957
958 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
959 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
960 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
961 return 0;
962}
963
964static struct phys_addr *
965hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
966 unsigned int is_header, unsigned int host_write_ptr)
967{
968 struct phys_addr *pasync_sge = NULL;
969
970 if (is_header)
971 pasync_sge = pasync_ctx->async_header.ring_base;
972 else
973 pasync_sge = pasync_ctx->async_data.ring_base;
974
975 return pasync_sge + host_write_ptr;
976}
977
978static void hwi_post_async_buffers(struct beiscsi_hba *phba,
979 unsigned int is_header)
980{
981 struct hwi_controller *phwi_ctrlr;
982 struct hwi_async_pdu_context *pasync_ctx;
983 struct async_pdu_handle *pasync_handle;
984 struct list_head *pfree_link, *pbusy_list;
985 struct phys_addr *pasync_sge;
986 unsigned int ring_id, num_entries;
987 unsigned int host_write_num;
988 unsigned int writables;
989 unsigned int i = 0;
990 u32 doorbell = 0;
991
992 phwi_ctrlr = phba->phwi_ctrlr;
993 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
994
995 if (is_header) {
996 num_entries = pasync_ctx->async_header.num_entries;
997 writables = min(pasync_ctx->async_header.writables,
998 pasync_ctx->async_header.free_entries);
999 pfree_link = pasync_ctx->async_header.free_list.next;
1000 host_write_num = pasync_ctx->async_header.host_write_ptr;
1001 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1002 } else {
1003 num_entries = pasync_ctx->async_data.num_entries;
1004 writables = min(pasync_ctx->async_data.writables,
1005 pasync_ctx->async_data.free_entries);
1006 pfree_link = pasync_ctx->async_data.free_list.next;
1007 host_write_num = pasync_ctx->async_data.host_write_ptr;
1008 ring_id = phwi_ctrlr->default_pdu_data.id;
1009 }
1010
1011 writables = (writables / 8) * 8;
1012 if (writables) {
1013 for (i = 0; i < writables; i++) {
1014 pbusy_list =
1015 hwi_get_async_busy_list(pasync_ctx, is_header,
1016 host_write_num);
1017 pasync_handle =
1018 list_entry(pfree_link, struct async_pdu_handle,
1019 link);
1020 WARN_ON(!pasync_handle);
1021 pasync_handle->consumed = 0;
1022
1023 pfree_link = pfree_link->next;
1024
1025 pasync_sge = hwi_get_ring_address(pasync_ctx,
1026 is_header, host_write_num);
1027
1028 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1029 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1030
1031 list_move(&pasync_handle->link, pbusy_list);
1032
1033 host_write_num++;
1034 host_write_num = host_write_num % num_entries;
1035 }
1036
1037 if (is_header) {
1038 pasync_ctx->async_header.host_write_ptr =
1039 host_write_num;
1040 pasync_ctx->async_header.free_entries -= writables;
1041 pasync_ctx->async_header.writables -= writables;
1042 pasync_ctx->async_header.busy_entries += writables;
1043 } else {
1044 pasync_ctx->async_data.host_write_ptr = host_write_num;
1045 pasync_ctx->async_data.free_entries -= writables;
1046 pasync_ctx->async_data.writables -= writables;
1047 pasync_ctx->async_data.busy_entries += writables;
1048 }
1049
1050 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1051 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1052 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1053 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1054 << DB_DEF_PDU_CQPROC_SHIFT;
1055
1056 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1057 }
1058}
1059
1060static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1061 struct beiscsi_conn *beiscsi_conn,
1062 struct i_t_dpdu_cqe *pdpdu_cqe)
1063{
1064 struct hwi_controller *phwi_ctrlr;
1065 struct hwi_async_pdu_context *pasync_ctx;
1066 struct async_pdu_handle *pasync_handle = NULL;
1067 unsigned int cq_index = -1;
1068
1069 phwi_ctrlr = phba->phwi_ctrlr;
1070 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1071
1072 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1073 pdpdu_cqe, &cq_index);
1074 BUG_ON(pasync_handle->is_header != 0);
1075 if (pasync_handle->consumed == 0)
1076 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1077 cq_index);
1078
1079 hwi_free_async_msg(phba, pasync_handle->cri);
1080 hwi_post_async_buffers(phba, pasync_handle->is_header);
1081}
1082
1083static unsigned int
1084hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1085 struct beiscsi_hba *phba,
1086 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1087{
1088 struct list_head *plist;
1089 struct async_pdu_handle *pasync_handle;
1090 void *phdr = NULL;
1091 unsigned int hdr_len = 0, buf_len = 0;
1092 unsigned int status, index = 0, offset = 0;
1093 void *pfirst_buffer = NULL;
1094 unsigned int num_buf = 0;
1095
1096 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1097
1098 list_for_each_entry(pasync_handle, plist, link) {
1099 if (index == 0) {
1100 phdr = pasync_handle->pbuffer;
1101 hdr_len = pasync_handle->buffer_len;
1102 } else {
1103 buf_len = pasync_handle->buffer_len;
1104 if (!num_buf) {
1105 pfirst_buffer = pasync_handle->pbuffer;
1106 num_buf++;
1107 }
1108 memcpy(pfirst_buffer + offset,
1109 pasync_handle->pbuffer, buf_len);
1110 offset = buf_len;
1111 }
1112 index++;
1113 }
1114
1115 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1116 beiscsi_conn->beiscsi_conn_cid,
1117 phdr, hdr_len, pfirst_buffer,
1118 buf_len);
1119
1120 if (status == 0)
1121 hwi_free_async_msg(phba, cri);
1122 return 0;
1123}
1124
1125static unsigned int
1126hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1127 struct beiscsi_hba *phba,
1128 struct async_pdu_handle *pasync_handle)
1129{
1130 struct hwi_async_pdu_context *pasync_ctx;
1131 struct hwi_controller *phwi_ctrlr;
1132 unsigned int bytes_needed = 0, status = 0;
1133 unsigned short cri = pasync_handle->cri;
1134 struct pdu_base *ppdu;
1135
1136 phwi_ctrlr = phba->phwi_ctrlr;
1137 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1138
1139 list_del(&pasync_handle->link);
1140 if (pasync_handle->is_header) {
1141 pasync_ctx->async_header.busy_entries--;
1142 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1143 hwi_free_async_msg(phba, cri);
1144 BUG();
1145 }
1146
1147 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1148 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1149 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1150 (unsigned short)pasync_handle->buffer_len;
1151 list_add_tail(&pasync_handle->link,
1152 &pasync_ctx->async_entry[cri].wait_queue.list);
1153
1154 ppdu = pasync_handle->pbuffer;
1155 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1156 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1157 0xFFFF0000) | ((be16_to_cpu((ppdu->
1158 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1159 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1160
1161 if (status == 0) {
1162 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1163 bytes_needed;
1164
1165 if (bytes_needed == 0)
1166 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1167 pasync_ctx, cri);
1168 }
1169 } else {
1170 pasync_ctx->async_data.busy_entries--;
1171 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1172 list_add_tail(&pasync_handle->link,
1173 &pasync_ctx->async_entry[cri].wait_queue.
1174 list);
1175 pasync_ctx->async_entry[cri].wait_queue.
1176 bytes_received +=
1177 (unsigned short)pasync_handle->buffer_len;
1178
1179 if (pasync_ctx->async_entry[cri].wait_queue.
1180 bytes_received >=
1181 pasync_ctx->async_entry[cri].wait_queue.
1182 bytes_needed)
1183 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1184 pasync_ctx, cri);
1185 }
1186 }
1187 return status;
1188}
1189
1190static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1191 struct beiscsi_hba *phba,
1192 struct i_t_dpdu_cqe *pdpdu_cqe)
1193{
1194 struct hwi_controller *phwi_ctrlr;
1195 struct hwi_async_pdu_context *pasync_ctx;
1196 struct async_pdu_handle *pasync_handle = NULL;
1197 unsigned int cq_index = -1;
1198
1199 phwi_ctrlr = phba->phwi_ctrlr;
1200 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1201 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1202 pdpdu_cqe, &cq_index);
1203
1204 if (pasync_handle->consumed == 0)
1205 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1206 cq_index);
1207 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1208 hwi_post_async_buffers(phba, pasync_handle->is_header);
1209}
1210
1211static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1212{
1213 struct hwi_controller *phwi_ctrlr;
1214 struct hwi_context_memory *phwi_context;
1215 struct be_queue_info *cq;
1216 struct sol_cqe *sol;
1217 struct dmsg_cqe *dmsg;
1218 unsigned int num_processed = 0;
1219 unsigned int tot_nump = 0;
1220 struct beiscsi_conn *beiscsi_conn;
1221
1222 phwi_ctrlr = phba->phwi_ctrlr;
1223 phwi_context = phwi_ctrlr->phwi_ctxt;
1224 cq = &phwi_context->be_cq;
1225 sol = queue_tail_node(cq);
1226
1227 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1228 CQE_VALID_MASK) {
1229 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1230
1231 beiscsi_conn = phba->conn_table[(u32) (sol->
1232 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1233 SOL_CID_MASK) >> 6];
1234
1235 if (!beiscsi_conn || !beiscsi_conn->ep) {
1236 shost_printk(KERN_WARNING, phba->shost,
1237 "Connection table empty for cid = %d\n",
1238 (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1239 cid) / 32] & SOL_CID_MASK) >> 6);
1240 return 0;
1241 }
1242
1243 if (num_processed >= 32) {
1244 hwi_ring_cq_db(phba, phwi_context->be_cq.id,
1245 num_processed, 0, 0);
1246 tot_nump += num_processed;
1247 num_processed = 0;
1248 }
1249
1250 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1251 32] & CQE_CODE_MASK) {
1252 case SOL_CMD_COMPLETE:
1253 hwi_complete_cmd(beiscsi_conn, phba, sol);
1254 break;
1255 case DRIVERMSG_NOTIFY:
1256 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1257 dmsg = (struct dmsg_cqe *)sol;
1258 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1259 break;
1260 case UNSOL_HDR_NOTIFY:
1261 case UNSOL_DATA_NOTIFY:
1262 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n");
1263 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1264 (struct i_t_dpdu_cqe *)sol);
1265 break;
1266 case CXN_INVALIDATE_INDEX_NOTIFY:
1267 case CMD_INVALIDATED_NOTIFY:
1268 case CXN_INVALIDATE_NOTIFY:
1269 SE_DEBUG(DBG_LVL_1,
1270 "Ignoring CQ Error notification for cmd/cxn"
1271 "invalidate\n");
1272 break;
1273 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1274 case CMD_KILLED_INVALID_STATSN_RCVD:
1275 case CMD_KILLED_INVALID_R2T_RCVD:
1276 case CMD_CXN_KILLED_LUN_INVALID:
1277 case CMD_CXN_KILLED_ICD_INVALID:
1278 case CMD_CXN_KILLED_ITT_INVALID:
1279 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1280 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1281 SE_DEBUG(DBG_LVL_1,
1282 "CQ Error notification for cmd.. "
1283 "code %d cid 0x%x\n",
1284 sol->dw[offsetof(struct amap_sol_cqe, code) /
1285 32] & CQE_CODE_MASK,
1286 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1287 32] & SOL_CID_MASK));
1288 break;
1289 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1290 SE_DEBUG(DBG_LVL_1,
1291 "Digest error on def pdu ring, dropping..\n");
1292 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1293 (struct i_t_dpdu_cqe *) sol);
1294 break;
1295 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1296 case CXN_KILLED_BURST_LEN_MISMATCH:
1297 case CXN_KILLED_AHS_RCVD:
1298 case CXN_KILLED_HDR_DIGEST_ERR:
1299 case CXN_KILLED_UNKNOWN_HDR:
1300 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1301 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1302 case CXN_KILLED_TIMED_OUT:
1303 case CXN_KILLED_FIN_RCVD:
1304 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1305 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1306 case CXN_KILLED_OVER_RUN_RESIDUAL:
1307 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1308 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1309 SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID "
1310 "0x%x...\n",
1311 sol->dw[offsetof(struct amap_sol_cqe, code) /
1312 32] & CQE_CODE_MASK,
1313 sol->dw[offsetof(struct amap_sol_cqe, cid) /
1314 32] & CQE_CID_MASK);
1315 iscsi_conn_failure(beiscsi_conn->conn,
1316 ISCSI_ERR_CONN_FAILED);
1317 break;
1318 case CXN_KILLED_RST_SENT:
1319 case CXN_KILLED_RST_RCVD:
1320 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent "
1321 "on CID 0x%x...\n",
1322 sol->dw[offsetof(struct amap_sol_cqe, code) /
1323 32] & CQE_CODE_MASK,
1324 sol->dw[offsetof(struct amap_sol_cqe, cid) /
1325 32] & CQE_CID_MASK);
1326 iscsi_conn_failure(beiscsi_conn->conn,
1327 ISCSI_ERR_CONN_FAILED);
1328 break;
1329 default:
1330 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1331 "received on CID 0x%x...\n",
1332 sol->dw[offsetof(struct amap_sol_cqe, code) /
1333 32] & CQE_CODE_MASK,
1334 sol->dw[offsetof(struct amap_sol_cqe, cid) /
1335 32] & CQE_CID_MASK);
1336 break;
1337 }
1338
1339 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1340 queue_tail_inc(cq);
1341 sol = queue_tail_node(cq);
1342 num_processed++;
1343 }
1344
1345 if (num_processed > 0) {
1346 tot_nump += num_processed;
1347 hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed,
1348 1, 0);
1349 }
1350 return tot_nump;
1351}
1352
1353static void beiscsi_process_all_cqs(struct work_struct *work)
1354{
1355 unsigned long flags;
1356 struct beiscsi_hba *phba =
1357 container_of(work, struct beiscsi_hba, work_cqs);
1358
1359 if (phba->todo_mcc_cq) {
1360 spin_lock_irqsave(&phba->isr_lock, flags);
1361 phba->todo_mcc_cq = 0;
1362 spin_unlock_irqrestore(&phba->isr_lock, flags);
1363 SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n");
1364 }
1365
1366 if (phba->todo_cq) {
1367 spin_lock_irqsave(&phba->isr_lock, flags);
1368 phba->todo_cq = 0;
1369 spin_unlock_irqrestore(&phba->isr_lock, flags);
1370 beiscsi_process_cq(phba);
1371 }
1372}
1373
1374static int be_iopoll(struct blk_iopoll *iop, int budget)
1375{
1376 static unsigned int ret;
1377 struct beiscsi_hba *phba;
1378
1379 phba = container_of(iop, struct beiscsi_hba, iopoll);
1380
1381 ret = beiscsi_process_cq(phba);
1382 if (ret < budget) {
1383 struct hwi_controller *phwi_ctrlr;
1384 struct hwi_context_memory *phwi_context;
1385
1386 phwi_ctrlr = phba->phwi_ctrlr;
1387 phwi_context = phwi_ctrlr->phwi_ctxt;
1388 blk_iopoll_complete(iop);
1389 hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0,
1390 0, 1, 1);
1391 }
1392 return ret;
1393}
1394
1395static void
1396hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1397 unsigned int num_sg, struct beiscsi_io_task *io_task)
1398{
1399 struct iscsi_sge *psgl;
1400 unsigned short sg_len, index;
1401 unsigned int sge_len = 0;
1402 unsigned long long addr;
1403 struct scatterlist *l_sg;
1404 unsigned int offset;
1405
1406 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1407 io_task->bhs_pa.u.a32.address_lo);
1408 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1409 io_task->bhs_pa.u.a32.address_hi);
1410
1411 l_sg = sg;
1412 for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1413 if (index == 0) {
1414 sg_len = sg_dma_len(sg);
1415 addr = (u64) sg_dma_address(sg);
1416 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1417 (addr & 0xFFFFFFFF));
1418 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1419 (addr >> 32));
1420 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1421 sg_len);
1422 sge_len = sg_len;
1423 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1424 1);
1425 } else {
1426 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1427 0);
1428 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1429 pwrb, sge_len);
1430 sg_len = sg_dma_len(sg);
1431 addr = (u64) sg_dma_address(sg);
1432 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1433 (addr & 0xFFFFFFFF));
1434 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1435 (addr >> 32));
1436 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1437 sg_len);
1438 }
1439 }
1440 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1441 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1442
1443 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1444
1445 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1446 io_task->bhs_pa.u.a32.address_hi);
1447 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1448 io_task->bhs_pa.u.a32.address_lo);
1449
1450 if (num_sg == 2)
1451 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1452 sg = l_sg;
1453 psgl++;
1454 psgl++;
1455 offset = 0;
1456 for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1457 sg_len = sg_dma_len(sg);
1458 addr = (u64) sg_dma_address(sg);
1459 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1460 (addr & 0xFFFFFFFF));
1461 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1462 (addr >> 32));
1463 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1464 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1465 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1466 offset += sg_len;
1467 }
1468 psgl--;
1469 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1470}
1471
1472static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1473{
1474 struct iscsi_sge *psgl;
1475 unsigned long long addr;
1476 struct beiscsi_io_task *io_task = task->dd_data;
1477 struct beiscsi_conn *beiscsi_conn = io_task->conn;
1478 struct beiscsi_hba *phba = beiscsi_conn->phba;
1479
1480 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1481 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1482 io_task->bhs_pa.u.a32.address_lo);
1483 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1484 io_task->bhs_pa.u.a32.address_hi);
1485
1486 if (task->data) {
1487 if (task->data_count) {
1488 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1489 addr = (u64) pci_map_single(phba->pcidev,
1490 task->data,
1491 task->data_count, 1);
1492 } else {
1493 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1494 addr = 0;
1495 }
1496 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1497 (addr & 0xFFFFFFFF));
1498 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1499 (addr >> 32));
1500 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1501 task->data_count);
1502
1503 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1504 } else {
1505 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1506 addr = 0;
1507 }
1508
1509 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1510
1511 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1512
1513 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1514 io_task->bhs_pa.u.a32.address_hi);
1515 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1516 io_task->bhs_pa.u.a32.address_lo);
1517 if (task->data) {
1518 psgl++;
1519 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1520 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1521 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1522 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1523 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1524 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1525
1526 psgl++;
1527 if (task->data) {
1528 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1529 (addr & 0xFFFFFFFF));
1530 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1531 (addr >> 32));
1532 }
1533 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1534 }
1535 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1536}
1537
1538static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1539{
1540 unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages;
1541 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1542 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1543
1544 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1545 sizeof(struct sol_cqe));
1546 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
1547 sizeof(struct be_eq_entry));
1548 num_async_pdu_buf_pages =
1549 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1550 phba->params.defpdu_hdr_sz);
1551 num_async_pdu_buf_sgl_pages =
1552 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1553 sizeof(struct phys_addr));
1554 num_async_pdu_data_pages =
1555 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1556 phba->params.defpdu_data_sz);
1557 num_async_pdu_data_sgl_pages =
1558 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1559 sizeof(struct phys_addr));
1560
1561 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1562
1563 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1564 BE_ISCSI_PDU_HEADER_SIZE;
1565 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1566 sizeof(struct hwi_context_memory);
1567
1568 phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
1569 phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
1570
1571 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1572 * (phba->params.wrbs_per_cxn)
1573 * phba->params.cxns_per_ctrl;
1574 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
1575 (phba->params.wrbs_per_cxn);
1576 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1577 phba->params.cxns_per_ctrl);
1578
1579 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1580 phba->params.icds_per_ctrl;
1581 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1582 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1583
1584 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1585 num_async_pdu_buf_pages * PAGE_SIZE;
1586 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1587 num_async_pdu_data_pages * PAGE_SIZE;
1588 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1589 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1590 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1591 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1592 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1593 phba->params.asyncpdus_per_ctrl *
1594 sizeof(struct async_pdu_handle);
1595 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1596 phba->params.asyncpdus_per_ctrl *
1597 sizeof(struct async_pdu_handle);
1598 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1599 sizeof(struct hwi_async_pdu_context) +
1600 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1601}
1602
1603static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1604{
1605 struct be_mem_descriptor *mem_descr;
1606 dma_addr_t bus_add;
1607 struct mem_array *mem_arr, *mem_arr_orig;
1608 unsigned int i, j, alloc_size, curr_alloc_size;
1609
1610 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1611 if (!phba->phwi_ctrlr)
1612 return -ENOMEM;
1613
1614 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1615 GFP_KERNEL);
1616 if (!phba->init_mem) {
1617 kfree(phba->phwi_ctrlr);
1618 return -ENOMEM;
1619 }
1620
1621 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1622 GFP_KERNEL);
1623 if (!mem_arr_orig) {
1624 kfree(phba->init_mem);
1625 kfree(phba->phwi_ctrlr);
1626 return -ENOMEM;
1627 }
1628
1629 mem_descr = phba->init_mem;
1630 for (i = 0; i < SE_MEM_MAX; i++) {
1631 j = 0;
1632 mem_arr = mem_arr_orig;
1633 alloc_size = phba->mem_req[i];
1634 memset(mem_arr, 0, sizeof(struct mem_array) *
1635 BEISCSI_MAX_FRAGS_INIT);
1636 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1637 do {
1638 mem_arr->virtual_address = pci_alloc_consistent(
1639 phba->pcidev,
1640 curr_alloc_size,
1641 &bus_add);
1642 if (!mem_arr->virtual_address) {
1643 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1644 goto free_mem;
1645 if (curr_alloc_size -
1646 rounddown_pow_of_two(curr_alloc_size))
1647 curr_alloc_size = rounddown_pow_of_two
1648 (curr_alloc_size);
1649 else
1650 curr_alloc_size = curr_alloc_size / 2;
1651 } else {
1652 mem_arr->bus_address.u.
1653 a64.address = (__u64) bus_add;
1654 mem_arr->size = curr_alloc_size;
1655 alloc_size -= curr_alloc_size;
1656 curr_alloc_size = min(be_max_phys_size *
1657 1024, alloc_size);
1658 j++;
1659 mem_arr++;
1660 }
1661 } while (alloc_size);
1662 mem_descr->num_elements = j;
1663 mem_descr->size_in_bytes = phba->mem_req[i];
1664 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1665 GFP_KERNEL);
1666 if (!mem_descr->mem_array)
1667 goto free_mem;
1668
1669 memcpy(mem_descr->mem_array, mem_arr_orig,
1670 sizeof(struct mem_array) * j);
1671 mem_descr++;
1672 }
1673 kfree(mem_arr_orig);
1674 return 0;
1675free_mem:
1676 mem_descr->num_elements = j;
1677 while ((i) || (j)) {
1678 for (j = mem_descr->num_elements; j > 0; j--) {
1679 pci_free_consistent(phba->pcidev,
1680 mem_descr->mem_array[j - 1].size,
1681 mem_descr->mem_array[j - 1].
1682 virtual_address,
1683 mem_descr->mem_array[j - 1].
1684 bus_address.u.a64.address);
1685 }
1686 if (i) {
1687 i--;
1688 kfree(mem_descr->mem_array);
1689 mem_descr--;
1690 }
1691 }
1692 kfree(mem_arr_orig);
1693 kfree(phba->init_mem);
1694 kfree(phba->phwi_ctrlr);
1695 return -ENOMEM;
1696}
1697
1698static int beiscsi_get_memory(struct beiscsi_hba *phba)
1699{
1700 beiscsi_find_mem_req(phba);
1701 return beiscsi_alloc_mem(phba);
1702}
1703
1704static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1705{
1706 struct pdu_data_out *pdata_out;
1707 struct pdu_nop_out *pnop_out;
1708 struct be_mem_descriptor *mem_descr;
1709
1710 mem_descr = phba->init_mem;
1711 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1712 pdata_out =
1713 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1714 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1715
1716 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1717 IIOC_SCSI_DATA);
1718
1719 pnop_out =
1720 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1721 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1722
1723 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1724 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1725 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1726 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1727}
1728
1729static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1730{
1731 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1732 struct wrb_handle *pwrb_handle;
1733 struct hwi_controller *phwi_ctrlr;
1734 struct hwi_wrb_context *pwrb_context;
1735 struct iscsi_wrb *pwrb;
1736 unsigned int num_cxn_wrbh;
1737 unsigned int num_cxn_wrb, j, idx, index;
1738
1739 mem_descr_wrbh = phba->init_mem;
1740 mem_descr_wrbh += HWI_MEM_WRBH;
1741
1742 mem_descr_wrb = phba->init_mem;
1743 mem_descr_wrb += HWI_MEM_WRB;
1744
1745 idx = 0;
1746 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1747 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1748 ((sizeof(struct wrb_handle)) *
1749 phba->params.wrbs_per_cxn));
1750 phwi_ctrlr = phba->phwi_ctrlr;
1751
1752 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1753 pwrb_context = &phwi_ctrlr->wrb_context[index];
1754 SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
1755 pwrb_context);
1756 pwrb_context->pwrb_handle_base =
1757 kzalloc(sizeof(struct wrb_handle *) *
1758 phba->params.wrbs_per_cxn, GFP_KERNEL);
1759 pwrb_context->pwrb_handle_basestd =
1760 kzalloc(sizeof(struct wrb_handle *) *
1761 phba->params.wrbs_per_cxn, GFP_KERNEL);
1762 if (num_cxn_wrbh) {
1763 pwrb_context->alloc_index = 0;
1764 pwrb_context->wrb_handles_available = 0;
1765 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1766 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
1767 pwrb_context->pwrb_handle_basestd[j] =
1768 pwrb_handle;
1769 pwrb_context->wrb_handles_available++;
1770 pwrb_handle++;
1771 }
1772 pwrb_context->free_index = 0;
1773 num_cxn_wrbh--;
1774 } else {
1775 idx++;
1776 pwrb_handle =
1777 mem_descr_wrbh->mem_array[idx].virtual_address;
1778 num_cxn_wrbh =
1779 ((mem_descr_wrbh->mem_array[idx].size) /
1780 ((sizeof(struct wrb_handle)) *
1781 phba->params.wrbs_per_cxn));
1782 pwrb_context->alloc_index = 0;
1783 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1784 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
1785 pwrb_context->pwrb_handle_basestd[j] =
1786 pwrb_handle;
1787 pwrb_context->wrb_handles_available++;
1788 pwrb_handle++;
1789 }
1790 pwrb_context->free_index = 0;
1791 num_cxn_wrbh--;
1792 }
1793 }
1794 idx = 0;
1795 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1796 num_cxn_wrb =
1797 ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
1798 phba->params.wrbs_per_cxn);
1799
1800 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
1801 pwrb_context = &phwi_ctrlr->wrb_context[index];
1802 if (num_cxn_wrb) {
1803 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1804 pwrb_handle = pwrb_context->pwrb_handle_base[j];
1805 pwrb_handle->pwrb = pwrb;
1806 pwrb++;
1807 }
1808 num_cxn_wrb--;
1809 } else {
1810 idx++;
1811 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1812 num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
1813 (sizeof(struct iscsi_wrb)) *
1814 phba->params.wrbs_per_cxn);
1815 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1816 pwrb_handle = pwrb_context->pwrb_handle_base[j];
1817 pwrb_handle->pwrb = pwrb;
1818 pwrb++;
1819 }
1820 num_cxn_wrb--;
1821 }
1822 }
1823}
1824
1825static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
1826{
1827 struct hwi_controller *phwi_ctrlr;
1828 struct hba_parameters *p = &phba->params;
1829 struct hwi_async_pdu_context *pasync_ctx;
1830 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
1831 unsigned int index;
1832 struct be_mem_descriptor *mem_descr;
1833
1834 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1835 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
1836
1837 phwi_ctrlr = phba->phwi_ctrlr;
1838 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
1839 mem_descr->mem_array[0].virtual_address;
1840 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
1841 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
1842
1843 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
1844 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
1845 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
1846 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
1847
1848 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1849 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
1850 if (mem_descr->mem_array[0].virtual_address) {
1851 SE_DEBUG(DBG_LVL_8,
1852 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
1853 "va=%p \n", mem_descr->mem_array[0].virtual_address);
1854 } else
1855 shost_printk(KERN_WARNING, phba->shost,
1856 "No Virtual address \n");
1857
1858 pasync_ctx->async_header.va_base =
1859 mem_descr->mem_array[0].virtual_address;
1860
1861 pasync_ctx->async_header.pa_base.u.a64.address =
1862 mem_descr->mem_array[0].bus_address.u.a64.address;
1863
1864 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1865 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
1866 if (mem_descr->mem_array[0].virtual_address) {
1867 SE_DEBUG(DBG_LVL_8,
1868 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
1869 "va=%p \n", mem_descr->mem_array[0].virtual_address);
1870 } else
1871 shost_printk(KERN_WARNING, phba->shost,
1872 "No Virtual address \n");
1873 pasync_ctx->async_header.ring_base =
1874 mem_descr->mem_array[0].virtual_address;
1875
1876 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1877 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
1878 if (mem_descr->mem_array[0].virtual_address) {
1879 SE_DEBUG(DBG_LVL_8,
1880 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
1881 "va=%p \n", mem_descr->mem_array[0].virtual_address);
1882 } else
1883 shost_printk(KERN_WARNING, phba->shost,
1884 "No Virtual address \n");
1885
1886 pasync_ctx->async_header.handle_base =
1887 mem_descr->mem_array[0].virtual_address;
1888 pasync_ctx->async_header.writables = 0;
1889 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
1890
1891 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1892 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
1893 if (mem_descr->mem_array[0].virtual_address) {
1894 SE_DEBUG(DBG_LVL_8,
1895 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
1896 "va=%p \n", mem_descr->mem_array[0].virtual_address);
1897 } else
1898 shost_printk(KERN_WARNING, phba->shost,
1899 "No Virtual address \n");
1900 pasync_ctx->async_data.va_base =
1901 mem_descr->mem_array[0].virtual_address;
1902 pasync_ctx->async_data.pa_base.u.a64.address =
1903 mem_descr->mem_array[0].bus_address.u.a64.address;
1904
1905 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1906 mem_descr += HWI_MEM_ASYNC_DATA_RING;
1907 if (mem_descr->mem_array[0].virtual_address) {
1908 SE_DEBUG(DBG_LVL_8,
1909 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
1910 "va=%p \n", mem_descr->mem_array[0].virtual_address);
1911 } else
1912 shost_printk(KERN_WARNING, phba->shost,
1913 "No Virtual address \n");
1914
1915 pasync_ctx->async_data.ring_base =
1916 mem_descr->mem_array[0].virtual_address;
1917
1918 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
1919 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
1920 if (!mem_descr->mem_array[0].virtual_address)
1921 shost_printk(KERN_WARNING, phba->shost,
1922 "No Virtual address \n");
1923
1924 pasync_ctx->async_data.handle_base =
1925 mem_descr->mem_array[0].virtual_address;
1926 pasync_ctx->async_data.writables = 0;
1927 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
1928
1929 pasync_header_h =
1930 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
1931 pasync_data_h =
1932 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
1933
1934 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
1935 pasync_header_h->cri = -1;
1936 pasync_header_h->index = (char)index;
1937 INIT_LIST_HEAD(&pasync_header_h->link);
1938 pasync_header_h->pbuffer =
1939 (void *)((unsigned long)
1940 (pasync_ctx->async_header.va_base) +
1941 (p->defpdu_hdr_sz * index));
1942
1943 pasync_header_h->pa.u.a64.address =
1944 pasync_ctx->async_header.pa_base.u.a64.address +
1945 (p->defpdu_hdr_sz * index);
1946
1947 list_add_tail(&pasync_header_h->link,
1948 &pasync_ctx->async_header.free_list);
1949 pasync_header_h++;
1950 pasync_ctx->async_header.free_entries++;
1951 pasync_ctx->async_header.writables++;
1952
1953 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
1954 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
1955 header_busy_list);
1956 pasync_data_h->cri = -1;
1957 pasync_data_h->index = (char)index;
1958 INIT_LIST_HEAD(&pasync_data_h->link);
1959 pasync_data_h->pbuffer =
1960 (void *)((unsigned long)
1961 (pasync_ctx->async_data.va_base) +
1962 (p->defpdu_data_sz * index));
1963
1964 pasync_data_h->pa.u.a64.address =
1965 pasync_ctx->async_data.pa_base.u.a64.address +
1966 (p->defpdu_data_sz * index);
1967
1968 list_add_tail(&pasync_data_h->link,
1969 &pasync_ctx->async_data.free_list);
1970 pasync_data_h++;
1971 pasync_ctx->async_data.free_entries++;
1972 pasync_ctx->async_data.writables++;
1973
1974 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
1975 }
1976
1977 pasync_ctx->async_header.host_write_ptr = 0;
1978 pasync_ctx->async_header.ep_read_ptr = -1;
1979 pasync_ctx->async_data.host_write_ptr = 0;
1980 pasync_ctx->async_data.ep_read_ptr = -1;
1981}
1982
1983static int
1984be_sgl_create_contiguous(void *virtual_address,
1985 u64 physical_address, u32 length,
1986 struct be_dma_mem *sgl)
1987{
1988 WARN_ON(!virtual_address);
1989 WARN_ON(!physical_address);
1990 WARN_ON(!length > 0);
1991 WARN_ON(!sgl);
1992
1993 sgl->va = virtual_address;
1994 sgl->dma = physical_address;
1995 sgl->size = length;
1996
1997 return 0;
1998}
1999
2000static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2001{
2002 memset(sgl, 0, sizeof(*sgl));
2003}
2004
2005static void
2006hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2007 struct mem_array *pmem, struct be_dma_mem *sgl)
2008{
2009 if (sgl->va)
2010 be_sgl_destroy_contiguous(sgl);
2011
2012 be_sgl_create_contiguous(pmem->virtual_address,
2013 pmem->bus_address.u.a64.address,
2014 pmem->size, sgl);
2015}
2016
2017static void
2018hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2019 struct mem_array *pmem, struct be_dma_mem *sgl)
2020{
2021 if (sgl->va)
2022 be_sgl_destroy_contiguous(sgl);
2023
2024 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2025 pmem->bus_address.u.a64.address,
2026 pmem->size, sgl);
2027}
2028
2029static int be_fill_queue(struct be_queue_info *q,
2030 u16 len, u16 entry_size, void *vaddress)
2031{
2032 struct be_dma_mem *mem = &q->dma_mem;
2033
2034 memset(q, 0, sizeof(*q));
2035 q->len = len;
2036 q->entry_size = entry_size;
2037 mem->size = len * entry_size;
2038 mem->va = vaddress;
2039 if (!mem->va)
2040 return -ENOMEM;
2041 memset(mem->va, 0, mem->size);
2042 return 0;
2043}
2044
2045static int beiscsi_create_eq(struct beiscsi_hba *phba,
2046 struct hwi_context_memory *phwi_context)
2047{
2048 unsigned int idx;
2049 int ret;
2050 struct be_queue_info *eq;
2051 struct be_dma_mem *mem;
2052 struct be_mem_descriptor *mem_descr;
2053 void *eq_vaddress;
2054
2055 idx = 0;
2056 eq = &phwi_context->be_eq.q;
2057 mem = &eq->dma_mem;
2058 mem_descr = phba->init_mem;
2059 mem_descr += HWI_MEM_EQ;
2060 eq_vaddress = mem_descr->mem_array[idx].virtual_address;
2061
2062 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2063 sizeof(struct be_eq_entry), eq_vaddress);
2064 if (ret) {
2065 shost_printk(KERN_ERR, phba->shost,
2066 "be_fill_queue Failed for EQ \n");
2067 return ret;
2068 }
2069
2070 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2071
2072 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2073 phwi_context->be_eq.cur_eqd);
2074 if (ret) {
2075 shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create"
2076 "Failedfor EQ \n");
2077 return ret;
2078 }
2079 SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
2080 return 0;
2081}
2082
2083static int beiscsi_create_cq(struct beiscsi_hba *phba,
2084 struct hwi_context_memory *phwi_context)
2085{
2086 unsigned int idx;
2087 int ret;
2088 struct be_queue_info *cq, *eq;
2089 struct be_dma_mem *mem;
2090 struct be_mem_descriptor *mem_descr;
2091 void *cq_vaddress;
2092
2093 idx = 0;
2094 cq = &phwi_context->be_cq;
2095 eq = &phwi_context->be_eq.q;
2096 mem = &cq->dma_mem;
2097 mem_descr = phba->init_mem;
2098 mem_descr += HWI_MEM_CQ;
2099 cq_vaddress = mem_descr->mem_array[idx].virtual_address;
2100 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2101 sizeof(struct sol_cqe), cq_vaddress);
2102 if (ret) {
2103 shost_printk(KERN_ERR, phba->shost,
2104 "be_fill_queue Failed for ISCSI CQ \n");
2105 return ret;
2106 }
2107
2108 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2109 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0);
2110 if (ret) {
2111 shost_printk(KERN_ERR, phba->shost,
2112 "beiscsi_cmd_eq_create Failed for ISCSI CQ \n");
2113 return ret;
2114 }
2115 SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
2116 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2117 return 0;
2118}
2119
2120static int
2121beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2122 struct hwi_context_memory *phwi_context,
2123 struct hwi_controller *phwi_ctrlr,
2124 unsigned int def_pdu_ring_sz)
2125{
2126 unsigned int idx;
2127 int ret;
2128 struct be_queue_info *dq, *cq;
2129 struct be_dma_mem *mem;
2130 struct be_mem_descriptor *mem_descr;
2131 void *dq_vaddress;
2132
2133 idx = 0;
2134 dq = &phwi_context->be_def_hdrq;
2135 cq = &phwi_context->be_cq;
2136 mem = &dq->dma_mem;
2137 mem_descr = phba->init_mem;
2138 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2139 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2140 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2141 sizeof(struct phys_addr),
2142 sizeof(struct phys_addr), dq_vaddress);
2143 if (ret) {
2144 shost_printk(KERN_ERR, phba->shost,
2145 "be_fill_queue Failed for DEF PDU HDR\n");
2146 return ret;
2147 }
2148 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2149 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2150 def_pdu_ring_sz,
2151 phba->params.defpdu_hdr_sz);
2152 if (ret) {
2153 shost_printk(KERN_ERR, phba->shost,
2154 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2155 return ret;
2156 }
2157 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2158 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2159 phwi_context->be_def_hdrq.id);
2160 hwi_post_async_buffers(phba, 1);
2161 return 0;
2162}
2163
2164static int
2165beiscsi_create_def_data(struct beiscsi_hba *phba,
2166 struct hwi_context_memory *phwi_context,
2167 struct hwi_controller *phwi_ctrlr,
2168 unsigned int def_pdu_ring_sz)
2169{
2170 unsigned int idx;
2171 int ret;
2172 struct be_queue_info *dataq, *cq;
2173 struct be_dma_mem *mem;
2174 struct be_mem_descriptor *mem_descr;
2175 void *dq_vaddress;
2176
2177 idx = 0;
2178 dataq = &phwi_context->be_def_dataq;
2179 cq = &phwi_context->be_cq;
2180 mem = &dataq->dma_mem;
2181 mem_descr = phba->init_mem;
2182 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2183 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2184 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2185 sizeof(struct phys_addr),
2186 sizeof(struct phys_addr), dq_vaddress);
2187 if (ret) {
2188 shost_printk(KERN_ERR, phba->shost,
2189 "be_fill_queue Failed for DEF PDU DATA\n");
2190 return ret;
2191 }
2192 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2193 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2194 def_pdu_ring_sz,
2195 phba->params.defpdu_data_sz);
2196 if (ret) {
2197 shost_printk(KERN_ERR, phba->shost,
2198 "be_cmd_create_default_pdu_queue Failed"
2199 " for DEF PDU DATA\n");
2200 return ret;
2201 }
2202 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2203 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2204 phwi_context->be_def_dataq.id);
2205 hwi_post_async_buffers(phba, 0);
2206 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2207 return 0;
2208}
2209
2210static int
2211beiscsi_post_pages(struct beiscsi_hba *phba)
2212{
2213 struct be_mem_descriptor *mem_descr;
2214 struct mem_array *pm_arr;
2215 unsigned int page_offset, i;
2216 struct be_dma_mem sgl;
2217 int status;
2218
2219 mem_descr = phba->init_mem;
2220 mem_descr += HWI_MEM_SGE;
2221 pm_arr = mem_descr->mem_array;
2222
2223 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2224 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2225 for (i = 0; i < mem_descr->num_elements; i++) {
2226 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2227 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2228 page_offset,
2229 (pm_arr->size / PAGE_SIZE));
2230 page_offset += pm_arr->size / PAGE_SIZE;
2231 if (status != 0) {
2232 shost_printk(KERN_ERR, phba->shost,
2233 "post sgl failed.\n");
2234 return status;
2235 }
2236 pm_arr++;
2237 }
2238 SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2239 return 0;
2240}
2241
2242static int
2243beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2244 struct hwi_context_memory *phwi_context,
2245 struct hwi_controller *phwi_ctrlr)
2246{
2247 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2248 u64 pa_addr_lo;
2249 unsigned int idx, num, i;
2250 struct mem_array *pwrb_arr;
2251 void *wrb_vaddr;
2252 struct be_dma_mem sgl;
2253 struct be_mem_descriptor *mem_descr;
2254 int status;
2255
2256 idx = 0;
2257 mem_descr = phba->init_mem;
2258 mem_descr += HWI_MEM_WRB;
2259 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2260 GFP_KERNEL);
2261 if (!pwrb_arr) {
2262 shost_printk(KERN_ERR, phba->shost,
2263 "Memory alloc failed in create wrb ring.\n");
2264 return -ENOMEM;
2265 }
2266 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2267 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2268 num_wrb_rings = mem_descr->mem_array[idx].size /
2269 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2270
2271 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2272 if (num_wrb_rings) {
2273 pwrb_arr[num].virtual_address = wrb_vaddr;
2274 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2275 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2276 sizeof(struct iscsi_wrb);
2277 wrb_vaddr += pwrb_arr[num].size;
2278 pa_addr_lo += pwrb_arr[num].size;
2279 num_wrb_rings--;
2280 } else {
2281 idx++;
2282 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2283 pa_addr_lo = mem_descr->mem_array[idx].\
2284 bus_address.u.a64.address;
2285 num_wrb_rings = mem_descr->mem_array[idx].size /
2286 (phba->params.wrbs_per_cxn *
2287 sizeof(struct iscsi_wrb));
2288 pwrb_arr[num].virtual_address = wrb_vaddr;
2289 pwrb_arr[num].bus_address.u.a64.address\
2290 = pa_addr_lo;
2291 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2292 sizeof(struct iscsi_wrb);
2293 wrb_vaddr += pwrb_arr[num].size;
2294 pa_addr_lo += pwrb_arr[num].size;
2295 num_wrb_rings--;
2296 }
2297 }
2298 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2299 wrb_mem_index = 0;
2300 offset = 0;
2301 size = 0;
2302
2303 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2304 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2305 &phwi_context->be_wrbq[i]);
2306 if (status != 0) {
2307 shost_printk(KERN_ERR, phba->shost,
2308 "wrbq create failed.");
2309 return status;
2310 }
2311 phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
2312 }
2313 kfree(pwrb_arr);
2314 return 0;
2315}
2316
2317static void free_wrb_handles(struct beiscsi_hba *phba)
2318{
2319 unsigned int index;
2320 struct hwi_controller *phwi_ctrlr;
2321 struct hwi_wrb_context *pwrb_context;
2322
2323 phwi_ctrlr = phba->phwi_ctrlr;
2324 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2325 pwrb_context = &phwi_ctrlr->wrb_context[index];
2326 kfree(pwrb_context->pwrb_handle_base);
2327 kfree(pwrb_context->pwrb_handle_basestd);
2328 }
2329}
2330
2331static void hwi_cleanup(struct beiscsi_hba *phba)
2332{
2333 struct be_queue_info *q;
2334 struct be_ctrl_info *ctrl = &phba->ctrl;
2335 struct hwi_controller *phwi_ctrlr;
2336 struct hwi_context_memory *phwi_context;
2337 int i;
2338
2339 phwi_ctrlr = phba->phwi_ctrlr;
2340 phwi_context = phwi_ctrlr->phwi_ctxt;
2341 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2342 q = &phwi_context->be_wrbq[i];
2343 if (q->created)
2344 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2345 }
2346
2347 free_wrb_handles(phba);
2348
2349 q = &phwi_context->be_def_hdrq;
2350 if (q->created)
2351 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2352
2353 q = &phwi_context->be_def_dataq;
2354 if (q->created)
2355 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2356
2357 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2358
2359 q = &phwi_context->be_cq;
2360 if (q->created)
2361 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2362
2363 q = &phwi_context->be_eq.q;
2364 if (q->created)
2365 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2366}
2367
2368static int hwi_init_port(struct beiscsi_hba *phba)
2369{
2370 struct hwi_controller *phwi_ctrlr;
2371 struct hwi_context_memory *phwi_context;
2372 unsigned int def_pdu_ring_sz;
2373 struct be_ctrl_info *ctrl = &phba->ctrl;
2374 int status;
2375
2376 def_pdu_ring_sz =
2377 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2378 phwi_ctrlr = phba->phwi_ctrlr;
2379
2380 phwi_context = phwi_ctrlr->phwi_ctxt;
2381 phwi_context->be_eq.max_eqd = 0;
2382 phwi_context->be_eq.min_eqd = 0;
2383 phwi_context->be_eq.cur_eqd = 64;
2384 phwi_context->be_eq.enable_aic = false;
2385 be_cmd_fw_initialize(&phba->ctrl);
2386 status = beiscsi_create_eq(phba, phwi_context);
2387 if (status != 0) {
2388 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2389 goto error;
2390 }
2391
2392 status = mgmt_check_supported_fw(ctrl);
2393 if (status != 0) {
2394 shost_printk(KERN_ERR, phba->shost,
2395 "Unsupported fw version \n");
2396 goto error;
2397 }
2398
2399 status = mgmt_get_fw_config(ctrl, phba);
2400 if (status != 0) {
2401 shost_printk(KERN_ERR, phba->shost,
2402 "Error getting fw config\n");
2403 goto error;
2404 }
2405
2406 status = beiscsi_create_cq(phba, phwi_context);
2407 if (status != 0) {
2408 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2409 goto error;
2410 }
2411
2412 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2413 def_pdu_ring_sz);
2414 if (status != 0) {
2415 shost_printk(KERN_ERR, phba->shost,
2416 "Default Header not created\n");
2417 goto error;
2418 }
2419
2420 status = beiscsi_create_def_data(phba, phwi_context,
2421 phwi_ctrlr, def_pdu_ring_sz);
2422 if (status != 0) {
2423 shost_printk(KERN_ERR, phba->shost,
2424 "Default Data not created\n");
2425 goto error;
2426 }
2427
2428 status = beiscsi_post_pages(phba);
2429 if (status != 0) {
2430 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2431 goto error;
2432 }
2433
2434 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2435 if (status != 0) {
2436 shost_printk(KERN_ERR, phba->shost,
2437 "WRB Rings not created\n");
2438 goto error;
2439 }
2440
2441 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2442 return 0;
2443
2444error:
2445 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2446 hwi_cleanup(phba);
2447 return -ENOMEM;
2448}
2449
2450
2451static int hwi_init_controller(struct beiscsi_hba *phba)
2452{
2453 struct hwi_controller *phwi_ctrlr;
2454
2455 phwi_ctrlr = phba->phwi_ctrlr;
2456 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2457 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2458 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2459 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2460 phwi_ctrlr->phwi_ctxt);
2461 } else {
2462 shost_printk(KERN_ERR, phba->shost,
2463 "HWI_MEM_ADDN_CONTEXT is more than one element."
2464 "Failing to load\n");
2465 return -ENOMEM;
2466 }
2467
2468 iscsi_init_global_templates(phba);
2469 beiscsi_init_wrb_handle(phba);
2470 hwi_init_async_pdu_ctx(phba);
2471 if (hwi_init_port(phba) != 0) {
2472 shost_printk(KERN_ERR, phba->shost,
2473 "hwi_init_controller failed\n");
2474 return -ENOMEM;
2475 }
2476 return 0;
2477}
2478
2479static void beiscsi_free_mem(struct beiscsi_hba *phba)
2480{
2481 struct be_mem_descriptor *mem_descr;
2482 int i, j;
2483
2484 mem_descr = phba->init_mem;
2485 i = 0;
2486 j = 0;
2487 for (i = 0; i < SE_MEM_MAX; i++) {
2488 for (j = mem_descr->num_elements; j > 0; j--) {
2489 pci_free_consistent(phba->pcidev,
2490 mem_descr->mem_array[j - 1].size,
2491 mem_descr->mem_array[j - 1].virtual_address,
2492 mem_descr->mem_array[j - 1].bus_address.
2493 u.a64.address);
2494 }
2495 kfree(mem_descr->mem_array);
2496 mem_descr++;
2497 }
2498 kfree(phba->init_mem);
2499 kfree(phba->phwi_ctrlr);
2500}
2501
2502static int beiscsi_init_controller(struct beiscsi_hba *phba)
2503{
2504 int ret = -ENOMEM;
2505
2506 ret = beiscsi_get_memory(phba);
2507 if (ret < 0) {
2508 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2509 "Failed in beiscsi_alloc_memory \n");
2510 return ret;
2511 }
2512
2513 ret = hwi_init_controller(phba);
2514 if (ret)
2515 goto free_init;
2516 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2517 return 0;
2518
2519free_init:
2520 beiscsi_free_mem(phba);
2521 return -ENOMEM;
2522}
2523
2524static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2525{
2526 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2527 struct sgl_handle *psgl_handle;
2528 struct iscsi_sge *pfrag;
2529 unsigned int arr_index, i, idx;
2530
2531 phba->io_sgl_hndl_avbl = 0;
2532 phba->eh_sgl_hndl_avbl = 0;
2533 mem_descr_sglh = phba->init_mem;
2534 mem_descr_sglh += HWI_MEM_SGLH;
2535 if (1 == mem_descr_sglh->num_elements) {
2536 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2537 phba->params.ios_per_ctrl,
2538 GFP_KERNEL);
2539 if (!phba->io_sgl_hndl_base) {
2540 shost_printk(KERN_ERR, phba->shost,
2541 "Mem Alloc Failed. Failing to load\n");
2542 return -ENOMEM;
2543 }
2544 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2545 (phba->params.icds_per_ctrl -
2546 phba->params.ios_per_ctrl),
2547 GFP_KERNEL);
2548 if (!phba->eh_sgl_hndl_base) {
2549 kfree(phba->io_sgl_hndl_base);
2550 shost_printk(KERN_ERR, phba->shost,
2551 "Mem Alloc Failed. Failing to load\n");
2552 return -ENOMEM;
2553 }
2554 } else {
2555 shost_printk(KERN_ERR, phba->shost,
2556 "HWI_MEM_SGLH is more than one element."
2557 "Failing to load\n");
2558 return -ENOMEM;
2559 }
2560
2561 arr_index = 0;
2562 idx = 0;
2563 while (idx < mem_descr_sglh->num_elements) {
2564 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2565
2566 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2567 sizeof(struct sgl_handle)); i++) {
2568 if (arr_index < phba->params.ios_per_ctrl) {
2569 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2570 phba->io_sgl_hndl_avbl++;
2571 arr_index++;
2572 } else {
2573 phba->eh_sgl_hndl_base[arr_index -
2574 phba->params.ios_per_ctrl] =
2575 psgl_handle;
2576 arr_index++;
2577 phba->eh_sgl_hndl_avbl++;
2578 }
2579 psgl_handle++;
2580 }
2581 idx++;
2582 }
2583 SE_DEBUG(DBG_LVL_8,
2584 "phba->io_sgl_hndl_avbl=%d"
2585 "phba->eh_sgl_hndl_avbl=%d \n",
2586 phba->io_sgl_hndl_avbl,
2587 phba->eh_sgl_hndl_avbl);
2588 mem_descr_sg = phba->init_mem;
2589 mem_descr_sg += HWI_MEM_SGE;
2590 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
2591 mem_descr_sg->num_elements);
2592 arr_index = 0;
2593 idx = 0;
2594 while (idx < mem_descr_sg->num_elements) {
2595 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
2596
2597 for (i = 0;
2598 i < (mem_descr_sg->mem_array[idx].size) /
2599 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
2600 i++) {
2601 if (arr_index < phba->params.ios_per_ctrl)
2602 psgl_handle = phba->io_sgl_hndl_base[arr_index];
2603 else
2604 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
2605 phba->params.ios_per_ctrl];
2606 psgl_handle->pfrag = pfrag;
2607 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
2608 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
2609 pfrag += phba->params.num_sge_per_io;
2610 psgl_handle->sgl_index =
2611 phba->fw_config.iscsi_cid_start + arr_index++;
2612 }
2613 idx++;
2614 }
2615 phba->io_sgl_free_index = 0;
2616 phba->io_sgl_alloc_index = 0;
2617 phba->eh_sgl_free_index = 0;
2618 phba->eh_sgl_alloc_index = 0;
2619 return 0;
2620}
2621
2622static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
2623{
2624 int i, new_cid;
2625
2626 phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
2627 GFP_KERNEL);
2628 if (!phba->cid_array) {
2629 shost_printk(KERN_ERR, phba->shost,
2630 "Failed to allocate memory in "
2631 "hba_setup_cid_tbls\n");
2632 return -ENOMEM;
2633 }
2634 phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
2635 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
2636 if (!phba->ep_array) {
2637 shost_printk(KERN_ERR, phba->shost,
2638 "Failed to allocate memory in "
2639 "hba_setup_cid_tbls \n");
2640 kfree(phba->cid_array);
2641 return -ENOMEM;
2642 }
2643 new_cid = phba->fw_config.iscsi_icd_start;
2644 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2645 phba->cid_array[i] = new_cid;
2646 new_cid += 2;
2647 }
2648 phba->avlbl_cids = phba->params.cxns_per_ctrl;
2649 return 0;
2650}
2651
2652static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2653{
2654 struct be_ctrl_info *ctrl = &phba->ctrl;
2655 struct hwi_controller *phwi_ctrlr;
2656 struct hwi_context_memory *phwi_context;
2657 struct be_queue_info *eq;
2658 u8 __iomem *addr;
2659 u32 reg;
2660 u32 enabled;
2661
2662 phwi_ctrlr = phba->phwi_ctrlr;
2663 phwi_context = phwi_ctrlr->phwi_ctxt;
2664
2665 eq = &phwi_context->be_eq.q;
2666 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
2667 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
2668 reg = ioread32(addr);
2669 SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
2670
2671 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2672 if (!enabled) {
2673 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2674 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
2675 iowrite32(reg, addr);
2676 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
2677
2678 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
2679 } else
2680 shost_printk(KERN_WARNING, phba->shost,
2681 "In hwi_enable_intr, Not Enabled \n");
2682 return true;
2683}
2684
2685static void hwi_disable_intr(struct beiscsi_hba *phba)
2686{
2687 struct be_ctrl_info *ctrl = &phba->ctrl;
2688
2689 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
2690 u32 reg = ioread32(addr);
2691
2692 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2693 if (enabled) {
2694 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2695 iowrite32(reg, addr);
2696 } else
2697 shost_printk(KERN_WARNING, phba->shost,
2698 "In hwi_disable_intr, Already Disabled \n");
2699}
2700
2701static int beiscsi_init_port(struct beiscsi_hba *phba)
2702{
2703 int ret;
2704
2705 ret = beiscsi_init_controller(phba);
2706 if (ret < 0) {
2707 shost_printk(KERN_ERR, phba->shost,
2708 "beiscsi_dev_probe - Failed in"
2709 "beiscsi_init_controller \n");
2710 return ret;
2711 }
2712 ret = beiscsi_init_sgl_handle(phba);
2713 if (ret < 0) {
2714 shost_printk(KERN_ERR, phba->shost,
2715 "beiscsi_dev_probe - Failed in"
2716 "beiscsi_init_sgl_handle \n");
2717 goto do_cleanup_ctrlr;
2718 }
2719
2720 if (hba_setup_cid_tbls(phba)) {
2721 shost_printk(KERN_ERR, phba->shost,
2722 "Failed in hba_setup_cid_tbls\n");
2723 kfree(phba->io_sgl_hndl_base);
2724 kfree(phba->eh_sgl_hndl_base);
2725 goto do_cleanup_ctrlr;
2726 }
2727
2728 return ret;
2729
2730do_cleanup_ctrlr:
2731 hwi_cleanup(phba);
2732 return ret;
2733}
2734
2735static void hwi_purge_eq(struct beiscsi_hba *phba)
2736{
2737 struct hwi_controller *phwi_ctrlr;
2738 struct hwi_context_memory *phwi_context;
2739 struct be_queue_info *eq;
2740 struct be_eq_entry *eqe = NULL;
2741
2742 phwi_ctrlr = phba->phwi_ctrlr;
2743 phwi_context = phwi_ctrlr->phwi_ctxt;
2744 eq = &phwi_context->be_eq.q;
2745 eqe = queue_tail_node(eq);
2746
2747 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
2748 & EQE_VALID_MASK) {
2749 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2750 queue_tail_inc(eq);
2751 eqe = queue_tail_node(eq);
2752 }
2753}
2754
2755static void beiscsi_clean_port(struct beiscsi_hba *phba)
2756{
2757 unsigned char mgmt_status;
2758
2759 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
2760 if (mgmt_status)
2761 shost_printk(KERN_WARNING, phba->shost,
2762 "mgmt_epfw_cleanup FAILED \n");
2763 hwi_cleanup(phba);
2764 hwi_purge_eq(phba);
2765 kfree(phba->io_sgl_hndl_base);
2766 kfree(phba->eh_sgl_hndl_base);
2767 kfree(phba->cid_array);
2768 kfree(phba->ep_array);
2769}
2770
2771void
2772beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2773 struct beiscsi_offload_params *params)
2774{
2775 struct wrb_handle *pwrb_handle;
2776 struct iscsi_target_context_update_wrb *pwrb = NULL;
2777 struct be_mem_descriptor *mem_descr;
2778 struct beiscsi_hba *phba = beiscsi_conn->phba;
2779 u32 doorbell = 0;
2780
2781 /*
2782 * We can always use 0 here because it is reserved by libiscsi for
2783 * login/startup related tasks.
2784 */
2785 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
2786 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
2787 memset(pwrb, 0, sizeof(*pwrb));
2788 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2789 max_burst_length, pwrb, params->dw[offsetof
2790 (struct amap_beiscsi_offload_params,
2791 max_burst_length) / 32]);
2792 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2793 max_send_data_segment_length, pwrb,
2794 params->dw[offsetof(struct amap_beiscsi_offload_params,
2795 max_send_data_segment_length) / 32]);
2796 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2797 first_burst_length,
2798 pwrb,
2799 params->dw[offsetof(struct amap_beiscsi_offload_params,
2800 first_burst_length) / 32]);
2801
2802 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
2803 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2804 erl) / 32] & OFFLD_PARAMS_ERL));
2805 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
2806 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2807 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
2808 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
2809 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2810 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
2811 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
2812 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2813 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
2814 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
2815 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2816 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
2817 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
2818 pwrb,
2819 (params->dw[offsetof(struct amap_beiscsi_offload_params,
2820 exp_statsn) / 32] + 1));
2821 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
2822 0x7);
2823 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
2824 pwrb, pwrb_handle->wrb_index);
2825 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
2826 pwrb, pwrb_handle->nxt_wrb_index);
2827 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2828 session_state, pwrb, 0);
2829 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
2830 pwrb, 1);
2831 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
2832 pwrb, 0);
2833 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
2834 0);
2835
2836 mem_descr = phba->init_mem;
2837 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2838
2839 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2840 pad_buffer_addr_hi, pwrb,
2841 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
2842 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
2843 pad_buffer_addr_lo, pwrb,
2844 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
2845
2846 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
2847
2848 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
2849 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) <<
2850 DB_DEF_PDU_WRB_INDEX_SHIFT;
2851 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
2852
2853 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
2854}
2855
2856static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
2857 int *index, int *age)
2858{
2859 *index = be32_to_cpu(itt) >> 16;
2860 if (age)
2861 *age = conn->session->age;
2862}
2863
2864/**
2865 * beiscsi_alloc_pdu - allocates pdu and related resources
2866 * @task: libiscsi task
2867 * @opcode: opcode of pdu for task
2868 *
2869 * This is called with the session lock held. It will allocate
2870 * the wrb and sgl if needed for the command. And it will prep
2871 * the pdu's itt. beiscsi_parse_pdu will later translate
2872 * the pdu itt to the libiscsi task itt.
2873 */
2874static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2875{
2876 struct beiscsi_io_task *io_task = task->dd_data;
2877 struct iscsi_conn *conn = task->conn;
2878 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
2879 struct beiscsi_hba *phba = beiscsi_conn->phba;
2880 struct hwi_wrb_context *pwrb_context;
2881 struct hwi_controller *phwi_ctrlr;
2882 itt_t itt;
2883 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
2884 dma_addr_t paddr;
2885
2886 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
2887 GFP_KERNEL, &paddr);
2888
2889 if (!io_task->cmd_bhs)
2890 return -ENOMEM;
2891
2892 io_task->bhs_pa.u.a64.address = paddr;
2893 io_task->pwrb_handle = alloc_wrb_handle(phba,
2894 beiscsi_conn->beiscsi_conn_cid,
2895 task->itt);
2896 io_task->pwrb_handle->pio_handle = task;
2897 io_task->conn = beiscsi_conn;
2898
2899 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
2900 task->hdr_max = sizeof(struct be_cmd_bhs);
2901
2902 if (task->sc) {
2903 spin_lock(&phba->io_sgl_lock);
2904 io_task->psgl_handle = alloc_io_sgl_handle(phba);
2905 spin_unlock(&phba->io_sgl_lock);
2906 if (!io_task->psgl_handle)
2907 goto free_hndls;
2908
2909 } else {
2910 io_task->scsi_cmnd = NULL;
2911 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
2912 if (!beiscsi_conn->login_in_progress) {
2913 spin_lock(&phba->mgmt_sgl_lock);
2914 io_task->psgl_handle = (struct sgl_handle *)
2915 alloc_mgmt_sgl_handle(phba);
2916 spin_unlock(&phba->mgmt_sgl_lock);
2917 if (!io_task->psgl_handle)
2918 goto free_hndls;
2919
2920 beiscsi_conn->login_in_progress = 1;
2921 beiscsi_conn->plogin_sgl_handle =
2922 io_task->psgl_handle;
2923 } else {
2924 io_task->psgl_handle =
2925 beiscsi_conn->plogin_sgl_handle;
2926 }
2927 } else {
2928 spin_lock(&phba->mgmt_sgl_lock);
2929 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
2930 spin_unlock(&phba->mgmt_sgl_lock);
2931 if (!io_task->psgl_handle)
2932 goto free_hndls;
2933 }
2934 }
2935 itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) |
2936 (unsigned int)(io_task->psgl_handle->sgl_index));
2937 io_task->cmd_bhs->iscsi_hdr.itt = itt;
2938 return 0;
2939
2940free_hndls:
2941 phwi_ctrlr = phba->phwi_ctrlr;
2942 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
2943 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2944 io_task->pwrb_handle = NULL;
2945 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
2946 io_task->bhs_pa.u.a64.address);
2947 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
2948 return -ENOMEM;
2949}
2950
2951static void beiscsi_cleanup_task(struct iscsi_task *task)
2952{
2953 struct beiscsi_io_task *io_task = task->dd_data;
2954 struct iscsi_conn *conn = task->conn;
2955 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
2956 struct beiscsi_hba *phba = beiscsi_conn->phba;
2957 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
2958 struct hwi_wrb_context *pwrb_context;
2959 struct hwi_controller *phwi_ctrlr;
2960
2961 phwi_ctrlr = phba->phwi_ctrlr;
2962 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
2963 if (io_task->pwrb_handle) {
2964 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2965 io_task->pwrb_handle = NULL;
2966 }
2967
2968 if (io_task->cmd_bhs) {
2969 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
2970 io_task->bhs_pa.u.a64.address);
2971 }
2972
2973 if (task->sc) {
2974 if (io_task->psgl_handle) {
2975 spin_lock(&phba->io_sgl_lock);
2976 free_io_sgl_handle(phba, io_task->psgl_handle);
2977 spin_unlock(&phba->io_sgl_lock);
2978 io_task->psgl_handle = NULL;
2979 }
2980 } else {
2981 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
2982 return;
2983 if (io_task->psgl_handle) {
2984 spin_lock(&phba->mgmt_sgl_lock);
2985 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
2986 spin_unlock(&phba->mgmt_sgl_lock);
2987 io_task->psgl_handle = NULL;
2988 }
2989 }
2990}
2991
2992static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
2993 unsigned int num_sg, unsigned int xferlen,
2994 unsigned int writedir)
2995{
2996
2997 struct beiscsi_io_task *io_task = task->dd_data;
2998 struct iscsi_conn *conn = task->conn;
2999 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3000 struct beiscsi_hba *phba = beiscsi_conn->phba;
3001 struct iscsi_wrb *pwrb = NULL;
3002 unsigned int doorbell = 0;
3003
3004 pwrb = io_task->pwrb_handle->pwrb;
3005 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3006 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3007
3008 if (writedir) {
3009 SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
3010 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3011 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3012 &io_task->cmd_bhs->iscsi_data_pdu,
3013 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3014 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3015 &io_task->cmd_bhs->iscsi_data_pdu,
3016 ISCSI_OPCODE_SCSI_DATA_OUT);
3017 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3018 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3019 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
3020 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3021 } else {
3022 SE_DEBUG(DBG_LVL_4, "READ Command \t");
3023 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
3024 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3025 }
3026 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3027 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3028 io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3029
3030 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3031 cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3032 lun[0]));
3033 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3034 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3035 io_task->pwrb_handle->wrb_index);
3036 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3037 be32_to_cpu(task->cmdsn));
3038 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3039 io_task->psgl_handle->sgl_index);
3040
3041 hwi_write_sgl(pwrb, sg, num_sg, io_task);
3042
3043 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3044 io_task->pwrb_handle->nxt_wrb_index);
3045 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3046
3047 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3048 doorbell |= (io_task->pwrb_handle->wrb_index &
3049 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3050 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3051
3052 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3053 return 0;
3054}
3055
3056static int beiscsi_mtask(struct iscsi_task *task)
3057{
3058 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3059 struct iscsi_conn *conn = task->conn;
3060 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3061 struct beiscsi_hba *phba = beiscsi_conn->phba;
3062 struct iscsi_wrb *pwrb = NULL;
3063 unsigned int doorbell = 0;
3064 struct iscsi_task *aborted_task;
3065
3066 pwrb = io_task->pwrb_handle->pwrb;
3067 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3068 be32_to_cpu(task->cmdsn));
3069 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3070 io_task->pwrb_handle->wrb_index);
3071 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3072 io_task->psgl_handle->sgl_index);
3073
3074 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3075 case ISCSI_OP_LOGIN:
3076 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD);
3077 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3078 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3079 hwi_write_buffer(pwrb, task);
3080 break;
3081 case ISCSI_OP_NOOP_OUT:
3082 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
3083 hwi_write_buffer(pwrb, task);
3084 break;
3085 case ISCSI_OP_TEXT:
3086 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
3087 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3088 hwi_write_buffer(pwrb, task);
3089 break;
3090 case ISCSI_OP_SCSI_TMFUNC:
3091 aborted_task = iscsi_itt_to_task(conn,
3092 ((struct iscsi_tm *)task->hdr)->rtt);
3093 if (!aborted_task)
3094 return 0;
3095 aborted_io_task = aborted_task->dd_data;
3096 if (!aborted_io_task->scsi_cmnd)
3097 return 0;
3098
3099 mgmt_invalidate_icds(phba,
3100 aborted_io_task->psgl_handle->sgl_index,
3101 beiscsi_conn->beiscsi_conn_cid);
3102 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD);
3103 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3104 hwi_write_buffer(pwrb, task);
3105 break;
3106 case ISCSI_OP_LOGOUT:
3107 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3108 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3109 HWH_TYPE_LOGOUT);
3110 hwi_write_buffer(pwrb, task);
3111 break;
3112
3113 default:
3114 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3115 task->hdr->opcode & ISCSI_OPCODE_MASK);
3116 return -EINVAL;
3117 }
3118
3119 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3120 be32_to_cpu(task->data_count));
3121 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3122 io_task->pwrb_handle->nxt_wrb_index);
3123 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3124
3125 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3126 doorbell |= (io_task->pwrb_handle->wrb_index &
3127 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3128 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3129 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3130 return 0;
3131}
3132
3133static int beiscsi_task_xmit(struct iscsi_task *task)
3134{
3135 struct iscsi_conn *conn = task->conn;
3136 struct beiscsi_io_task *io_task = task->dd_data;
3137 struct scsi_cmnd *sc = task->sc;
3138 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3139 struct scatterlist *sg;
3140 int num_sg;
3141 unsigned int writedir = 0, xferlen = 0;
3142
3143 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3144 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3145 task, conn, beiscsi_conn);
3146 if (!sc)
3147 return beiscsi_mtask(task);
3148
3149 io_task->scsi_cmnd = sc;
3150 num_sg = scsi_dma_map(sc);
3151 if (num_sg < 0) {
3152 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3153 return num_sg;
3154 }
3155 SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3156 (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3157 xferlen = scsi_bufflen(sc);
3158 sg = scsi_sglist(sc);
3159 if (sc->sc_data_direction == DMA_TO_DEVICE) {
3160 writedir = 1;
3161 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3162 task->imm_count);
3163 } else
3164 writedir = 0;
3165 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3166}
3167
3168static void beiscsi_remove(struct pci_dev *pcidev)
3169{
3170 struct beiscsi_hba *phba = NULL;
3171
3172 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3173 if (!phba) {
3174 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3175 return;
3176 }
3177
3178 hwi_disable_intr(phba);
3179 if (phba->pcidev->irq)
3180 free_irq(phba->pcidev->irq, phba);
3181 destroy_workqueue(phba->wq);
3182 if (blk_iopoll_enabled)
3183 blk_iopoll_disable(&phba->iopoll);
3184
3185 beiscsi_clean_port(phba);
3186 beiscsi_free_mem(phba);
3187 beiscsi_unmap_pci_function(phba);
3188 pci_free_consistent(phba->pcidev,
3189 phba->ctrl.mbox_mem_alloced.size,
3190 phba->ctrl.mbox_mem_alloced.va,
3191 phba->ctrl.mbox_mem_alloced.dma);
3192 iscsi_host_remove(phba->shost);
3193 pci_dev_put(phba->pcidev);
3194 iscsi_host_free(phba->shost);
3195}
3196
3197static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3198 const struct pci_device_id *id)
3199{
3200 struct beiscsi_hba *phba = NULL;
3201 int ret;
3202
3203 ret = beiscsi_enable_pci(pcidev);
3204 if (ret < 0) {
3205 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3206 "Failed to enable pci device \n");
3207 return ret;
3208 }
3209
3210 phba = beiscsi_hba_alloc(pcidev);
3211 if (!phba) {
3212 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3213 " Failed in beiscsi_hba_alloc \n");
3214 goto disable_pci;
3215 }
3216
3217 pci_set_drvdata(pcidev, phba);
3218 ret = be_ctrl_init(phba, pcidev);
3219 if (ret) {
3220 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3221 "Failed in be_ctrl_init\n");
3222 goto hba_free;
3223 }
3224
3225 spin_lock_init(&phba->io_sgl_lock);
3226 spin_lock_init(&phba->mgmt_sgl_lock);
3227 spin_lock_init(&phba->isr_lock);
3228 beiscsi_get_params(phba);
3229 ret = beiscsi_init_port(phba);
3230 if (ret < 0) {
3231 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3232 "Failed in beiscsi_init_port\n");
3233 goto free_port;
3234 }
3235
3236 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3237 phba->shost->host_no);
3238 phba->wq = create_singlethread_workqueue(phba->wq_name);
3239 if (!phba->wq) {
3240 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3241 "Failed to allocate work queue\n");
3242 goto free_twq;
3243 }
3244
3245 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3246
3247 if (blk_iopoll_enabled) {
3248 blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll);
3249 blk_iopoll_enable(&phba->iopoll);
3250 }
3251
3252 ret = beiscsi_init_irqs(phba);
3253 if (ret < 0) {
3254 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3255 "Failed to beiscsi_init_irqs\n");
3256 goto free_blkenbld;
3257 }
3258 ret = hwi_enable_intr(phba);
3259 if (ret < 0) {
3260 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3261 "Failed to hwi_enable_intr\n");
3262 goto free_ctrlr;
3263 }
3264
3265 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3266 return 0;
3267
3268free_ctrlr:
3269 if (phba->pcidev->irq)
3270 free_irq(phba->pcidev->irq, phba);
3271free_blkenbld:
3272 destroy_workqueue(phba->wq);
3273 if (blk_iopoll_enabled)
3274 blk_iopoll_disable(&phba->iopoll);
3275free_twq:
3276 beiscsi_clean_port(phba);
3277 beiscsi_free_mem(phba);
3278free_port:
3279 pci_free_consistent(phba->pcidev,
3280 phba->ctrl.mbox_mem_alloced.size,
3281 phba->ctrl.mbox_mem_alloced.va,
3282 phba->ctrl.mbox_mem_alloced.dma);
3283 beiscsi_unmap_pci_function(phba);
3284hba_free:
3285 iscsi_host_remove(phba->shost);
3286 pci_dev_put(phba->pcidev);
3287 iscsi_host_free(phba->shost);
3288disable_pci:
3289 pci_disable_device(pcidev);
3290 return ret;
3291}
3292
3293struct iscsi_transport beiscsi_iscsi_transport = {
3294 .owner = THIS_MODULE,
3295 .name = DRV_NAME,
3296 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3297 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3298 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3299 ISCSI_MAX_XMIT_DLENGTH |
3300 ISCSI_HDRDGST_EN |
3301 ISCSI_DATADGST_EN |
3302 ISCSI_INITIAL_R2T_EN |
3303 ISCSI_MAX_R2T |
3304 ISCSI_IMM_DATA_EN |
3305 ISCSI_FIRST_BURST |
3306 ISCSI_MAX_BURST |
3307 ISCSI_PDU_INORDER_EN |
3308 ISCSI_DATASEQ_INORDER_EN |
3309 ISCSI_ERL |
3310 ISCSI_CONN_PORT |
3311 ISCSI_CONN_ADDRESS |
3312 ISCSI_EXP_STATSN |
3313 ISCSI_PERSISTENT_PORT |
3314 ISCSI_PERSISTENT_ADDRESS |
3315 ISCSI_TARGET_NAME | ISCSI_TPGT |
3316 ISCSI_USERNAME | ISCSI_PASSWORD |
3317 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3318 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3319 ISCSI_LU_RESET_TMO |
3320 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3321 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3322 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3323 ISCSI_HOST_INITIATOR_NAME,
3324 .create_session = beiscsi_session_create,
3325 .destroy_session = beiscsi_session_destroy,
3326 .create_conn = beiscsi_conn_create,
3327 .bind_conn = beiscsi_conn_bind,
3328 .destroy_conn = iscsi_conn_teardown,
3329 .set_param = beiscsi_set_param,
3330 .get_conn_param = beiscsi_conn_get_param,
3331 .get_session_param = iscsi_session_get_param,
3332 .get_host_param = beiscsi_get_host_param,
3333 .start_conn = beiscsi_conn_start,
3334 .stop_conn = beiscsi_conn_stop,
3335 .send_pdu = iscsi_conn_send_pdu,
3336 .xmit_task = beiscsi_task_xmit,
3337 .cleanup_task = beiscsi_cleanup_task,
3338 .alloc_pdu = beiscsi_alloc_pdu,
3339 .parse_pdu_itt = beiscsi_parse_pdu,
3340 .get_stats = beiscsi_conn_get_stats,
3341 .ep_connect = beiscsi_ep_connect,
3342 .ep_poll = beiscsi_ep_poll,
3343 .ep_disconnect = beiscsi_ep_disconnect,
3344 .session_recovery_timedout = iscsi_session_recovery_timedout,
3345};
3346
3347static struct pci_driver beiscsi_pci_driver = {
3348 .name = DRV_NAME,
3349 .probe = beiscsi_dev_probe,
3350 .remove = beiscsi_remove,
3351 .id_table = beiscsi_pci_id_table
3352};
3353
3354static int __init beiscsi_module_init(void)
3355{
3356 int ret;
3357
3358 beiscsi_scsi_transport =
3359 iscsi_register_transport(&beiscsi_iscsi_transport);
3360 if (!beiscsi_scsi_transport) {
3361 SE_DEBUG(DBG_LVL_1,
3362 "beiscsi_module_init - Unable to register beiscsi"
3363 "transport.\n");
3364 ret = -ENOMEM;
3365 }
3366 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3367 &beiscsi_iscsi_transport);
3368
3369 ret = pci_register_driver(&beiscsi_pci_driver);
3370 if (ret) {
3371 SE_DEBUG(DBG_LVL_1,
3372 "beiscsi_module_init - Unable to register"
3373 "beiscsi pci driver.\n");
3374 goto unregister_iscsi_transport;
3375 }
3376 return 0;
3377
3378unregister_iscsi_transport:
3379 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3380 return ret;
3381}
3382
3383static void __exit beiscsi_module_exit(void)
3384{
3385 pci_unregister_driver(&beiscsi_pci_driver);
3386 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3387}
3388
3389module_init(beiscsi_module_init);
3390module_exit(beiscsi_module_exit);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
new file mode 100644
index 000000000000..53c9b70ac7ac
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -0,0 +1,837 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20
21#ifndef _BEISCSI_MAIN_
22#define _BEISCSI_MAIN_
23
24
25#include <linux/kernel.h>
26#include <linux/pci.h>
27#include <linux/in.h>
28#include <linux/blk-iopoll.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/iscsi_proto.h>
34#include <scsi/libiscsi.h>
35#include <scsi/scsi_transport_iscsi.h>
36
37#include "be.h"
38
39
40
41#define DRV_NAME "be2iscsi"
42#define BUILD_STR "2.0.527.0"
43
44#define BE_NAME "ServerEngines BladeEngine2" \
45 "Linux iSCSI Driver version" BUILD_STR
46#define DRV_DESC BE_NAME " " "Driver"
47
48#define BE_VENDOR_ID 0x19A2
49#define BE_DEVICE_ID1 0x212
50#define OC_DEVICE_ID1 0x702
51#define OC_DEVICE_ID2 0x703
52
53#define BE2_MAX_SESSIONS 64
54#define BE2_CMDS_PER_CXN 128
55#define BE2_LOGOUTS BE2_MAX_SESSIONS
56#define BE2_TMFS 16
57#define BE2_NOPOUT_REQ 16
58#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
59#define BE2_MAX_ICDS 2048
60#define BE2_SGE 32
61#define BE2_DEFPDU_HDR_SZ 64
62#define BE2_DEFPDU_DATA_SZ 8192
63#define BE2_IO_DEPTH \
64 (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
65
66#define BEISCSI_SGLIST_ELEMENTS BE2_SGE
67
68#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
69#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
70#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
71
72#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
73#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
74#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
75#define BEISCSI_MAX_FRAGS_INIT 192
76#define BE_NUM_MSIX_ENTRIES 1
77#define MPU_EP_SEMAPHORE 0xac
78
79#define BE_SENSE_INFO_SIZE 258
80#define BE_ISCSI_PDU_HEADER_SIZE 64
81#define BE_MIN_MEM_SIZE 16384
82
83#define IIOC_SCSI_DATA 0x05 /* Write Operation */
84
85#define DBG_LVL 0x00000001
86#define DBG_LVL_1 0x00000001
87#define DBG_LVL_2 0x00000002
88#define DBG_LVL_3 0x00000004
89#define DBG_LVL_4 0x00000008
90#define DBG_LVL_5 0x00000010
91#define DBG_LVL_6 0x00000020
92#define DBG_LVL_7 0x00000040
93#define DBG_LVL_8 0x00000080
94
95#define SE_DEBUG(debug_mask, fmt, args...) \
96do { \
97 if (debug_mask & DBG_LVL) { \
98 printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
99 printk(fmt, ##args); \
100 } \
101} while (0);
102
103/**
104 * hardware needs the async PDU buffers to be posted in multiples of 8
105 * So have atleast 8 of them by default
106 */
107
108#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
109
110/********* Memory BAR register ************/
111#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
112/**
113 * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
114 * Disable" may still globally block interrupts in addition to individual
115 * interrupt masks; a mechanism for the device driver to block all interrupts
116 * atomically without having to arbitrate for the PCI Interrupt Disable bit
117 * with the OS.
118 */
119#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
120
121/********* ISR0 Register offset **********/
122#define CEV_ISR0_OFFSET 0xC18
123#define CEV_ISR_SIZE 4
124
125/**
126 * Macros for reading/writing a protection domain or CSR registers
127 * in BladeEngine.
128 */
129
130#define DB_TXULP0_OFFSET 0x40
131#define DB_RXULP0_OFFSET 0xA0
132/********* Event Q door bell *************/
133#define DB_EQ_OFFSET DB_CQ_OFFSET
134#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
135/* Clear the interrupt for this eq */
136#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
137/* Must be 1 */
138#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
139/* Number of event entries processed */
140#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
141/* Rearm bit */
142#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
143
144/********* Compl Q door bell *************/
145#define DB_CQ_OFFSET 0x120
146#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
147/* Number of event entries processed */
148#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
149/* Rearm bit */
150#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
151
152#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
153#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
154 (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
155#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
156 (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
157
158#define PAGES_REQUIRED(x) \
159 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
160
161enum be_mem_enum {
162 HWI_MEM_ADDN_CONTEXT,
163 HWI_MEM_CQ,
164 HWI_MEM_EQ,
165 HWI_MEM_WRB,
166 HWI_MEM_WRBH,
167 HWI_MEM_SGLH, /* 5 */
168 HWI_MEM_SGE,
169 HWI_MEM_ASYNC_HEADER_BUF,
170 HWI_MEM_ASYNC_DATA_BUF,
171 HWI_MEM_ASYNC_HEADER_RING,
172 HWI_MEM_ASYNC_DATA_RING, /* 10 */
173 HWI_MEM_ASYNC_HEADER_HANDLE,
174 HWI_MEM_ASYNC_DATA_HANDLE,
175 HWI_MEM_ASYNC_PDU_CONTEXT,
176 ISCSI_MEM_GLOBAL_HEADER,
177 SE_MEM_MAX /* 15 */
178};
179
180struct be_bus_address32 {
181 unsigned int address_lo;
182 unsigned int address_hi;
183};
184
185struct be_bus_address64 {
186 unsigned long long address;
187};
188
189struct be_bus_address {
190 union {
191 struct be_bus_address32 a32;
192 struct be_bus_address64 a64;
193 } u;
194};
195
196struct mem_array {
197 struct be_bus_address bus_address; /* Bus address of location */
198 void *virtual_address; /* virtual address to the location */
199 unsigned int size; /* Size required by memory block */
200};
201
202struct be_mem_descriptor {
203 unsigned int index; /* Index of this memory parameter */
204 unsigned int category; /* type indicates cached/non-cached */
205 unsigned int num_elements; /* number of elements in this
206 * descriptor
207 */
208 unsigned int alignment_mask; /* Alignment mask for this block */
209 unsigned int size_in_bytes; /* Size required by memory block */
210 struct mem_array *mem_array;
211};
212
213struct sgl_handle {
214 unsigned int sgl_index;
215 struct iscsi_sge *pfrag;
216};
217
218struct hba_parameters {
219 unsigned int ios_per_ctrl;
220 unsigned int cxns_per_ctrl;
221 unsigned int asyncpdus_per_ctrl;
222 unsigned int icds_per_ctrl;
223 unsigned int num_sge_per_io;
224 unsigned int defpdu_hdr_sz;
225 unsigned int defpdu_data_sz;
226 unsigned int num_cq_entries;
227 unsigned int num_eq_entries;
228 unsigned int wrbs_per_cxn;
229 unsigned int crashmode;
230 unsigned int hba_num;
231
232 unsigned int mgmt_ws_sz;
233 unsigned int hwi_ws_sz;
234
235 unsigned int eto;
236 unsigned int ldto;
237
238 unsigned int dbg_flags;
239 unsigned int num_cxn;
240
241 unsigned int eq_timer;
242 /**
243 * These are calculated from other params. They're here
244 * for debug purposes
245 */
246 unsigned int num_mcc_pages;
247 unsigned int num_mcc_cq_pages;
248 unsigned int num_cq_pages;
249 unsigned int num_eq_pages;
250
251 unsigned int num_async_pdu_buf_pages;
252 unsigned int num_async_pdu_buf_sgl_pages;
253 unsigned int num_async_pdu_buf_cq_pages;
254
255 unsigned int num_async_pdu_hdr_pages;
256 unsigned int num_async_pdu_hdr_sgl_pages;
257 unsigned int num_async_pdu_hdr_cq_pages;
258
259 unsigned int num_sge;
260};
261
262struct beiscsi_hba {
263 struct hba_parameters params;
264 struct hwi_controller *phwi_ctrlr;
265 unsigned int mem_req[SE_MEM_MAX];
266 /* PCI BAR mapped addresses */
267 u8 __iomem *csr_va; /* CSR */
268 u8 __iomem *db_va; /* Door Bell */
269 u8 __iomem *pci_va; /* PCI Config */
270 struct be_bus_address csr_pa; /* CSR */
271 struct be_bus_address db_pa; /* CSR */
272 struct be_bus_address pci_pa; /* CSR */
273 /* PCI representation of our HBA */
274 struct pci_dev *pcidev;
275 unsigned int state;
276 unsigned short asic_revision;
277 struct blk_iopoll iopoll;
278 struct be_mem_descriptor *init_mem;
279
280 unsigned short io_sgl_alloc_index;
281 unsigned short io_sgl_free_index;
282 unsigned short io_sgl_hndl_avbl;
283 struct sgl_handle **io_sgl_hndl_base;
284
285 unsigned short eh_sgl_alloc_index;
286 unsigned short eh_sgl_free_index;
287 unsigned short eh_sgl_hndl_avbl;
288 struct sgl_handle **eh_sgl_hndl_base;
289 spinlock_t io_sgl_lock;
290 spinlock_t mgmt_sgl_lock;
291 spinlock_t isr_lock;
292 unsigned int age;
293 unsigned short avlbl_cids;
294 unsigned short cid_alloc;
295 unsigned short cid_free;
296 struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
297 struct list_head hba_queue;
298 unsigned short *cid_array;
299 struct iscsi_endpoint **ep_array;
300 struct Scsi_Host *shost;
301 struct {
302 /**
303 * group together since they are used most frequently
304 * for cid to cri conversion
305 */
306 unsigned int iscsi_cid_start;
307 unsigned int phys_port;
308
309 unsigned int isr_offset;
310 unsigned int iscsi_icd_start;
311 unsigned int iscsi_cid_count;
312 unsigned int iscsi_icd_count;
313 unsigned int pci_function;
314
315 unsigned short cid_alloc;
316 unsigned short cid_free;
317 unsigned short avlbl_cids;
318 spinlock_t cid_lock;
319 } fw_config;
320
321 u8 mac_address[ETH_ALEN];
322 unsigned short todo_cq;
323 unsigned short todo_mcc_cq;
324 char wq_name[20];
325 struct workqueue_struct *wq; /* The actuak work queue */
326 struct work_struct work_cqs; /* The work being queued */
327 struct be_ctrl_info ctrl;
328};
329
330struct beiscsi_session {
331 struct pci_pool *bhs_pool;
332};
333
334/**
335 * struct beiscsi_conn - iscsi connection structure
336 */
337struct beiscsi_conn {
338 struct iscsi_conn *conn;
339 struct beiscsi_hba *phba;
340 u32 exp_statsn;
341 u32 beiscsi_conn_cid;
342 struct beiscsi_endpoint *ep;
343 unsigned short login_in_progress;
344 struct sgl_handle *plogin_sgl_handle;
345 struct beiscsi_session *beiscsi_sess;
346};
347
348/* This structure is used by the chip */
349struct pdu_data_out {
350 u32 dw[12];
351};
352/**
353 * Pseudo amap definition in which each bit of the actual structure is defined
354 * as a byte: used to calculate offset/shift/mask of each field
355 */
356struct amap_pdu_data_out {
357 u8 opcode[6]; /* opcode */
358 u8 rsvd0[2]; /* should be 0 */
359 u8 rsvd1[7];
360 u8 final_bit; /* F bit */
361 u8 rsvd2[16];
362 u8 ahs_length[8]; /* no AHS */
363 u8 data_len_hi[8];
364 u8 data_len_lo[16]; /* DataSegmentLength */
365 u8 lun[64];
366 u8 itt[32]; /* ITT; initiator task tag */
367 u8 ttt[32]; /* TTT; valid for R2T or 0xffffffff */
368 u8 rsvd3[32];
369 u8 exp_stat_sn[32];
370 u8 rsvd4[32];
371 u8 data_sn[32];
372 u8 buffer_offset[32];
373 u8 rsvd5[32];
374};
375
376struct be_cmd_bhs {
377 struct iscsi_cmd iscsi_hdr;
378 unsigned char pad1[16];
379 struct pdu_data_out iscsi_data_pdu;
380 unsigned char pad2[BE_SENSE_INFO_SIZE -
381 sizeof(struct pdu_data_out)];
382};
383
384struct beiscsi_io_task {
385 struct wrb_handle *pwrb_handle;
386 struct sgl_handle *psgl_handle;
387 struct beiscsi_conn *conn;
388 struct scsi_cmnd *scsi_cmnd;
389 unsigned int cmd_sn;
390 unsigned int flags;
391 unsigned short cid;
392 unsigned short header_len;
393
394 struct be_cmd_bhs *cmd_bhs;
395 struct be_bus_address bhs_pa;
396 unsigned short bhs_len;
397};
398
399struct be_nonio_bhs {
400 struct iscsi_hdr iscsi_hdr;
401 unsigned char pad1[16];
402 struct pdu_data_out iscsi_data_pdu;
403 unsigned char pad2[BE_SENSE_INFO_SIZE -
404 sizeof(struct pdu_data_out)];
405};
406
407struct be_status_bhs {
408 struct iscsi_cmd iscsi_hdr;
409 unsigned char pad1[16];
410 /**
411 * The plus 2 below is to hold the sense info length that gets
412 * DMA'ed by RxULP
413 */
414 unsigned char sense_info[BE_SENSE_INFO_SIZE];
415};
416
417struct iscsi_sge {
418 u32 dw[4];
419};
420
421/**
422 * Pseudo amap definition in which each bit of the actual structure is defined
423 * as a byte: used to calculate offset/shift/mask of each field
424 */
425struct amap_iscsi_sge {
426 u8 addr_hi[32];
427 u8 addr_lo[32];
428 u8 sge_offset[22]; /* DWORD 2 */
429 u8 rsvd0[9]; /* DWORD 2 */
430 u8 last_sge; /* DWORD 2 */
431 u8 len[17]; /* DWORD 3 */
432 u8 rsvd1[15]; /* DWORD 3 */
433};
434
435struct beiscsi_offload_params {
436 u32 dw[5];
437};
438
439#define OFFLD_PARAMS_ERL 0x00000003
440#define OFFLD_PARAMS_DDE 0x00000004
441#define OFFLD_PARAMS_HDE 0x00000008
442#define OFFLD_PARAMS_IR2T 0x00000010
443#define OFFLD_PARAMS_IMD 0x00000020
444
445/**
446 * Pseudo amap definition in which each bit of the actual structure is defined
447 * as a byte: used to calculate offset/shift/mask of each field
448 */
449struct amap_beiscsi_offload_params {
450 u8 max_burst_length[32];
451 u8 max_send_data_segment_length[32];
452 u8 first_burst_length[32];
453 u8 erl[2];
454 u8 dde[1];
455 u8 hde[1];
456 u8 ir2t[1];
457 u8 imd[1];
458 u8 pad[26];
459 u8 exp_statsn[32];
460};
461
462/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
463 struct beiscsi_hba *phba, struct sol_cqe *psol);*/
464
465struct async_pdu_handle {
466 struct list_head link;
467 struct be_bus_address pa;
468 void *pbuffer;
469 unsigned int consumed;
470 unsigned char index;
471 unsigned char is_header;
472 unsigned short cri;
473 unsigned long buffer_len;
474};
475
476struct hwi_async_entry {
477 struct {
478 unsigned char hdr_received;
479 unsigned char hdr_len;
480 unsigned short bytes_received;
481 unsigned int bytes_needed;
482 struct list_head list;
483 } wait_queue;
484
485 struct list_head header_busy_list;
486 struct list_head data_busy_list;
487};
488
489#define BE_MIN_ASYNC_ENTRIES 128
490
491struct hwi_async_pdu_context {
492 struct {
493 struct be_bus_address pa_base;
494 void *va_base;
495 void *ring_base;
496 struct async_pdu_handle *handle_base;
497
498 unsigned int host_write_ptr;
499 unsigned int ep_read_ptr;
500 unsigned int writables;
501
502 unsigned int free_entries;
503 unsigned int busy_entries;
504 unsigned int buffer_size;
505 unsigned int num_entries;
506
507 struct list_head free_list;
508 } async_header;
509
510 struct {
511 struct be_bus_address pa_base;
512 void *va_base;
513 void *ring_base;
514 struct async_pdu_handle *handle_base;
515
516 unsigned int host_write_ptr;
517 unsigned int ep_read_ptr;
518 unsigned int writables;
519
520 unsigned int free_entries;
521 unsigned int busy_entries;
522 unsigned int buffer_size;
523 struct list_head free_list;
524 unsigned int num_entries;
525 } async_data;
526
527 /**
528 * This is a varying size list! Do not add anything
529 * after this entry!!
530 */
531 struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES];
532};
533
534#define PDUCQE_CODE_MASK 0x0000003F
535#define PDUCQE_DPL_MASK 0xFFFF0000
536#define PDUCQE_INDEX_MASK 0x0000FFFF
537
538struct i_t_dpdu_cqe {
539 u32 dw[4];
540} __packed;
541
542/**
543 * Pseudo amap definition in which each bit of the actual structure is defined
544 * as a byte: used to calculate offset/shift/mask of each field
545 */
546struct amap_i_t_dpdu_cqe {
547 u8 db_addr_hi[32];
548 u8 db_addr_lo[32];
549 u8 code[6];
550 u8 cid[10];
551 u8 dpl[16];
552 u8 index[16];
553 u8 num_cons[10];
554 u8 rsvd0[4];
555 u8 final;
556 u8 valid;
557} __packed;
558
559#define CQE_VALID_MASK 0x80000000
560#define CQE_CODE_MASK 0x0000003F
561#define CQE_CID_MASK 0x0000FFC0
562
563#define EQE_VALID_MASK 0x00000001
564#define EQE_MAJORCODE_MASK 0x0000000E
565#define EQE_RESID_MASK 0xFFFF0000
566
567struct be_eq_entry {
568 u32 dw[1];
569} __packed;
570
571/**
572 * Pseudo amap definition in which each bit of the actual structure is defined
573 * as a byte: used to calculate offset/shift/mask of each field
574 */
575struct amap_eq_entry {
576 u8 valid; /* DWORD 0 */
577 u8 major_code[3]; /* DWORD 0 */
578 u8 minor_code[12]; /* DWORD 0 */
579 u8 resource_id[16]; /* DWORD 0 */
580
581} __packed;
582
583struct cq_db {
584 u32 dw[1];
585} __packed;
586
587/**
588 * Pseudo amap definition in which each bit of the actual structure is defined
589 * as a byte: used to calculate offset/shift/mask of each field
590 */
591struct amap_cq_db {
592 u8 qid[10];
593 u8 event[1];
594 u8 rsvd0[5];
595 u8 num_popped[13];
596 u8 rearm[1];
597 u8 rsvd1[2];
598} __packed;
599
600void beiscsi_process_eq(struct beiscsi_hba *phba);
601
602
603struct iscsi_wrb {
604 u32 dw[16];
605} __packed;
606
607#define WRB_TYPE_MASK 0xF0000000
608
609/**
610 * Pseudo amap definition in which each bit of the actual structure is defined
611 * as a byte: used to calculate offset/shift/mask of each field
612 */
613struct amap_iscsi_wrb {
614 u8 lun[14]; /* DWORD 0 */
615 u8 lt; /* DWORD 0 */
616 u8 invld; /* DWORD 0 */
617 u8 wrb_idx[8]; /* DWORD 0 */
618 u8 dsp; /* DWORD 0 */
619 u8 dmsg; /* DWORD 0 */
620 u8 undr_run; /* DWORD 0 */
621 u8 over_run; /* DWORD 0 */
622 u8 type[4]; /* DWORD 0 */
623 u8 ptr2nextwrb[8]; /* DWORD 1 */
624 u8 r2t_exp_dtl[24]; /* DWORD 1 */
625 u8 sgl_icd_idx[12]; /* DWORD 2 */
626 u8 rsvd0[20]; /* DWORD 2 */
627 u8 exp_data_sn[32]; /* DWORD 3 */
628 u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
629 u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
630 u8 cmdsn_itt[32]; /* DWORD 6 */
631 u8 dif_ref_tag[32]; /* DWORD 7 */
632 u8 sge0_addr_hi[32]; /* DWORD 8 */
633 u8 sge0_addr_lo[32]; /* DWORD 9 */
634 u8 sge0_offset[22]; /* DWORD 10 */
635 u8 pbs; /* DWORD 10 */
636 u8 dif_mode[2]; /* DWORD 10 */
637 u8 rsvd1[6]; /* DWORD 10 */
638 u8 sge0_last; /* DWORD 10 */
639 u8 sge0_len[17]; /* DWORD 11 */
640 u8 dif_meta_tag[14]; /* DWORD 11 */
641 u8 sge0_in_ddr; /* DWORD 11 */
642 u8 sge1_addr_hi[32]; /* DWORD 12 */
643 u8 sge1_addr_lo[32]; /* DWORD 13 */
644 u8 sge1_r2t_offset[22]; /* DWORD 14 */
645 u8 rsvd2[9]; /* DWORD 14 */
646 u8 sge1_last; /* DWORD 14 */
647 u8 sge1_len[17]; /* DWORD 15 */
648 u8 ref_sgl_icd_idx[12]; /* DWORD 15 */
649 u8 rsvd3[2]; /* DWORD 15 */
650 u8 sge1_in_ddr; /* DWORD 15 */
651
652} __packed;
653
654struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
655 int index);
656void
657free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
658
659struct pdu_nop_out {
660 u32 dw[12];
661};
662
663/**
664 * Pseudo amap definition in which each bit of the actual structure is defined
665 * as a byte: used to calculate offset/shift/mask of each field
666 */
667struct amap_pdu_nop_out {
668 u8 opcode[6]; /* opcode 0x00 */
669 u8 i_bit; /* I Bit */
670 u8 x_bit; /* reserved; should be 0 */
671 u8 fp_bit_filler1[7];
672 u8 f_bit; /* always 1 */
673 u8 reserved1[16];
674 u8 ahs_length[8]; /* no AHS */
675 u8 data_len_hi[8];
676 u8 data_len_lo[16]; /* DataSegmentLength */
677 u8 lun[64];
678 u8 itt[32]; /* initiator id for ping or 0xffffffff */
679 u8 ttt[32]; /* target id for ping or 0xffffffff */
680 u8 cmd_sn[32];
681 u8 exp_stat_sn[32];
682 u8 reserved5[128];
683};
684
685#define PDUBASE_OPCODE_MASK 0x0000003F
686#define PDUBASE_DATALENHI_MASK 0x0000FF00
687#define PDUBASE_DATALENLO_MASK 0xFFFF0000
688
689struct pdu_base {
690 u32 dw[16];
691} __packed;
692
693/**
694 * Pseudo amap definition in which each bit of the actual structure is defined
695 * as a byte: used to calculate offset/shift/mask of each field
696 */
697struct amap_pdu_base {
698 u8 opcode[6];
699 u8 i_bit; /* immediate bit */
700 u8 x_bit; /* reserved, always 0 */
701 u8 reserved1[24]; /* opcode-specific fields */
702 u8 ahs_length[8]; /* length units is 4 byte words */
703 u8 data_len_hi[8];
704 u8 data_len_lo[16]; /* DatasegmentLength */
705 u8 lun[64]; /* lun or opcode-specific fields */
706 u8 itt[32]; /* initiator task tag */
707 u8 reserved4[224];
708};
709
710struct iscsi_target_context_update_wrb {
711 u32 dw[16];
712} __packed;
713
714/**
715 * Pseudo amap definition in which each bit of the actual structure is defined
716 * as a byte: used to calculate offset/shift/mask of each field
717 */
718struct amap_iscsi_target_context_update_wrb {
719 u8 lun[14]; /* DWORD 0 */
720 u8 lt; /* DWORD 0 */
721 u8 invld; /* DWORD 0 */
722 u8 wrb_idx[8]; /* DWORD 0 */
723 u8 dsp; /* DWORD 0 */
724 u8 dmsg; /* DWORD 0 */
725 u8 undr_run; /* DWORD 0 */
726 u8 over_run; /* DWORD 0 */
727 u8 type[4]; /* DWORD 0 */
728 u8 ptr2nextwrb[8]; /* DWORD 1 */
729 u8 max_burst_length[19]; /* DWORD 1 */
730 u8 rsvd0[5]; /* DWORD 1 */
731 u8 rsvd1[15]; /* DWORD 2 */
732 u8 max_send_data_segment_length[17]; /* DWORD 2 */
733 u8 first_burst_length[14]; /* DWORD 3 */
734 u8 rsvd2[2]; /* DWORD 3 */
735 u8 tx_wrbindex_drv_msg[8]; /* DWORD 3 */
736 u8 rsvd3[5]; /* DWORD 3 */
737 u8 session_state[3]; /* DWORD 3 */
738 u8 rsvd4[16]; /* DWORD 4 */
739 u8 tx_jumbo; /* DWORD 4 */
740 u8 hde; /* DWORD 4 */
741 u8 dde; /* DWORD 4 */
742 u8 erl[2]; /* DWORD 4 */
743 u8 domain_id[5]; /* DWORD 4 */
744 u8 mode; /* DWORD 4 */
745 u8 imd; /* DWORD 4 */
746 u8 ir2t; /* DWORD 4 */
747 u8 notpredblq[2]; /* DWORD 4 */
748 u8 compltonack; /* DWORD 4 */
749 u8 stat_sn[32]; /* DWORD 5 */
750 u8 pad_buffer_addr_hi[32]; /* DWORD 6 */
751 u8 pad_buffer_addr_lo[32]; /* DWORD 7 */
752 u8 pad_addr_hi[32]; /* DWORD 8 */
753 u8 pad_addr_lo[32]; /* DWORD 9 */
754 u8 rsvd5[32]; /* DWORD 10 */
755 u8 rsvd6[32]; /* DWORD 11 */
756 u8 rsvd7[32]; /* DWORD 12 */
757 u8 rsvd8[32]; /* DWORD 13 */
758 u8 rsvd9[32]; /* DWORD 14 */
759 u8 rsvd10[32]; /* DWORD 15 */
760
761} __packed;
762
763struct be_ring {
764 u32 pages; /* queue size in pages */
765 u32 id; /* queue id assigned by beklib */
766 u32 num; /* number of elements in queue */
767 u32 cidx; /* consumer index */
768 u32 pidx; /* producer index -- not used by most rings */
769 u32 item_size; /* size in bytes of one object */
770
771 void *va; /* The virtual address of the ring. This
772 * should be last to allow 32 & 64 bit debugger
773 * extensions to work.
774 */
775};
776
777struct hwi_wrb_context {
778 struct list_head wrb_handle_list;
779 struct list_head wrb_handle_drvr_list;
780 struct wrb_handle **pwrb_handle_base;
781 struct wrb_handle **pwrb_handle_basestd;
782 struct iscsi_wrb *plast_wrb;
783 unsigned short alloc_index;
784 unsigned short free_index;
785 unsigned short wrb_handles_available;
786 unsigned short cid;
787};
788
789struct hwi_controller {
790 struct list_head io_sgl_list;
791 struct list_head eh_sgl_list;
792 struct sgl_handle *psgl_handle_base;
793 unsigned int wrb_mem_index;
794
795 struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
796 struct mcc_wrb *pmcc_wrb_base;
797 struct be_ring default_pdu_hdr;
798 struct be_ring default_pdu_data;
799 struct hwi_context_memory *phwi_ctxt;
800 unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
801};
802
803enum hwh_type_enum {
804 HWH_TYPE_IO = 1,
805 HWH_TYPE_LOGOUT = 2,
806 HWH_TYPE_TMF = 3,
807 HWH_TYPE_NOP = 4,
808 HWH_TYPE_IO_RD = 5,
809 HWH_TYPE_LOGIN = 11,
810 HWH_TYPE_INVALID = 0xFFFFFFFF
811};
812
813struct wrb_handle {
814 enum hwh_type_enum type;
815 unsigned short wrb_index;
816 unsigned short nxt_wrb_index;
817
818 struct iscsi_task *pio_handle;
819 struct iscsi_wrb *pwrb;
820};
821
822struct hwi_context_memory {
823 struct be_eq_obj be_eq;
824 struct be_queue_info be_cq;
825 struct be_queue_info be_mcc_cq;
826 struct be_queue_info be_mcc;
827
828 struct be_queue_info be_def_hdrq;
829 struct be_queue_info be_def_dataq;
830
831 struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
832 struct be_mcc_wrb_context *pbe_mcc_context;
833
834 struct hwi_async_pdu_context *pasync_ctx;
835};
836
837#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
new file mode 100644
index 000000000000..12e644fc746e
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -0,0 +1,321 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20
21#include "be_mgmt.h"
22#include "be_iscsi.h"
23
24unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
25 struct beiscsi_hba *phba)
26{
27 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
28 struct be_fw_cfg *req = embedded_payload(wrb);
29 int status = 0;
30
31 spin_lock(&ctrl->mbox_lock);
32 memset(wrb, 0, sizeof(*wrb));
33
34 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
35
36 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
37 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
38
39 status = be_mbox_notify(ctrl);
40 if (!status) {
41 struct be_fw_cfg *pfw_cfg;
42 pfw_cfg = req;
43 phba->fw_config.phys_port = pfw_cfg->phys_port;
44 phba->fw_config.iscsi_icd_start =
45 pfw_cfg->ulp[0].icd_base;
46 phba->fw_config.iscsi_icd_count =
47 pfw_cfg->ulp[0].icd_count;
48 phba->fw_config.iscsi_cid_start =
49 pfw_cfg->ulp[0].sq_base;
50 phba->fw_config.iscsi_cid_count =
51 pfw_cfg->ulp[0].sq_count;
52 } else {
53 shost_printk(KERN_WARNING, phba->shost,
54 "Failed in mgmt_get_fw_config \n");
55 }
56
57 spin_unlock(&ctrl->mbox_lock);
58 return status;
59}
60
61unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
62{
63 struct be_dma_mem nonemb_cmd;
64 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
65 struct be_mgmt_controller_attributes *req;
66 struct be_sge *sge = nonembedded_sgl(wrb);
67 int status = 0;
68
69 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
70 sizeof(struct be_mgmt_controller_attributes),
71 &nonemb_cmd.dma);
72 if (nonemb_cmd.va == NULL) {
73 SE_DEBUG(DBG_LVL_1,
74 "Failed to allocate memory for mgmt_check_supported_fw"
75 "\n");
76 return -1;
77 }
78 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
79 req = nonemb_cmd.va;
80 spin_lock(&ctrl->mbox_lock);
81 memset(wrb, 0, sizeof(*wrb));
82 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
83 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
84 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
85 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
86 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
87 sge->len = cpu_to_le32(nonemb_cmd.size);
88
89 status = be_mbox_notify(ctrl);
90 if (!status) {
91 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
92 SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
93 resp->params.hba_attribs.flashrom_version_string);
94 SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
95 resp->params.hba_attribs.firmware_version_string);
96 SE_DEBUG(DBG_LVL_8,
97 "Developer Build, not performing version check...\n");
98
99 } else
100 SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
101 if (nonemb_cmd.va)
102 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
103 nonemb_cmd.va, nonemb_cmd.dma);
104
105 spin_unlock(&ctrl->mbox_lock);
106 return status;
107}
108
109unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
110{
111 struct be_ctrl_info *ctrl = &phba->ctrl;
112 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
113 struct iscsi_cleanup_req *req = embedded_payload(wrb);
114 int status = 0;
115
116 spin_lock(&ctrl->mbox_lock);
117 memset(wrb, 0, sizeof(*wrb));
118
119 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
120 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
121 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
122
123 req->chute = chute;
124 req->hdr_ring_id = 0;
125 req->data_ring_id = 0;
126
127 status = be_mbox_notify(ctrl);
128 if (status)
129 shost_printk(KERN_WARNING, phba->shost,
130 " mgmt_epfw_cleanup , FAILED\n");
131 spin_unlock(&ctrl->mbox_lock);
132 return status;
133}
134
135unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
136 unsigned int icd, unsigned int cid)
137{
138 struct be_dma_mem nonemb_cmd;
139 struct be_ctrl_info *ctrl = &phba->ctrl;
140 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
141 struct be_sge *sge = nonembedded_sgl(wrb);
142 struct invalidate_commands_params_in *req;
143 int status = 0;
144
145 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
146 sizeof(struct invalidate_commands_params_in),
147 &nonemb_cmd.dma);
148 if (nonemb_cmd.va == NULL) {
149 SE_DEBUG(DBG_LVL_1,
150 "Failed to allocate memory for"
151 "mgmt_invalidate_icds \n");
152 return -1;
153 }
154 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
155 req = nonemb_cmd.va;
156 spin_lock(&ctrl->mbox_lock);
157 memset(wrb, 0, sizeof(*wrb));
158
159 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
160 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
161 OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
162 sizeof(*req));
163 req->ref_handle = 0;
164 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
165 req->icd_count = 0;
166 req->table[req->icd_count].icd = icd;
167 req->table[req->icd_count].cid = cid;
168 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
169 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
170 sge->len = cpu_to_le32(nonemb_cmd.size);
171
172 status = be_mbox_notify(ctrl);
173 if (status)
174 SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
175 spin_unlock(&ctrl->mbox_lock);
176 if (nonemb_cmd.va)
177 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
178 nonemb_cmd.va, nonemb_cmd.dma);
179 return status;
180}
181
182unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
183 struct beiscsi_endpoint *beiscsi_ep,
184 unsigned short cid,
185 unsigned short issue_reset,
186 unsigned short savecfg_flag)
187{
188 struct be_ctrl_info *ctrl = &phba->ctrl;
189 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
190 struct iscsi_invalidate_connection_params_in *req =
191 embedded_payload(wrb);
192 int status = 0;
193
194 spin_lock(&ctrl->mbox_lock);
195 memset(wrb, 0, sizeof(*wrb));
196
197 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
198 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
199 OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
200 sizeof(*req));
201 req->session_handle = beiscsi_ep->fw_handle;
202 req->cid = cid;
203 if (issue_reset)
204 req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
205 else
206 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
207 req->save_cfg = savecfg_flag;
208 status = be_mbox_notify(ctrl);
209 if (status)
210 SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
211
212 spin_unlock(&ctrl->mbox_lock);
213 return status;
214}
215
216unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
217 unsigned short cid, unsigned int upload_flag)
218{
219 struct be_ctrl_info *ctrl = &phba->ctrl;
220 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
221 struct tcp_upload_params_in *req = embedded_payload(wrb);
222 int status = 0;
223
224 spin_lock(&ctrl->mbox_lock);
225 memset(wrb, 0, sizeof(*wrb));
226
227 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
228 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
229 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
230 req->id = (unsigned short)cid;
231 req->upload_type = (unsigned char)upload_flag;
232 status = be_mbox_notify(ctrl);
233 if (status)
234 SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
235 spin_unlock(&ctrl->mbox_lock);
236 return status;
237}
238
239int mgmt_open_connection(struct beiscsi_hba *phba,
240 struct sockaddr *dst_addr,
241 struct beiscsi_endpoint *beiscsi_ep)
242{
243 struct hwi_controller *phwi_ctrlr;
244 struct hwi_context_memory *phwi_context;
245 struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
246 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
247 struct be_ctrl_info *ctrl = &phba->ctrl;
248 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
249 struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
250 unsigned short def_hdr_id;
251 unsigned short def_data_id;
252 struct phys_addr template_address = { 0, 0 };
253 struct phys_addr *ptemplate_address;
254 int status = 0;
255 unsigned short cid = beiscsi_ep->ep_cid;
256
257 phwi_ctrlr = phba->phwi_ctrlr;
258 phwi_context = phwi_ctrlr->phwi_ctxt;
259 def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
260 def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
261
262 ptemplate_address = &template_address;
263 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
264 spin_lock(&ctrl->mbox_lock);
265 memset(wrb, 0, sizeof(*wrb));
266
267 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
268 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
269 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
270 sizeof(*req));
271 if (dst_addr->sa_family == PF_INET) {
272 __be32 s_addr = daddr_in->sin_addr.s_addr;
273 req->ip_address.ip_type = BE2_IPV4;
274 req->ip_address.ip_address[0] = s_addr & 0x000000ff;
275 req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
276 req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
277 req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
278 req->tcp_port = ntohs(daddr_in->sin_port);
279 beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
280 beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
281 beiscsi_ep->ip_type = BE2_IPV4;
282 } else if (dst_addr->sa_family == PF_INET6) {
283 req->ip_address.ip_type = BE2_IPV6;
284 memcpy(&req->ip_address.ip_address,
285 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
286 req->tcp_port = ntohs(daddr_in6->sin6_port);
287 beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
288 memcpy(&beiscsi_ep->dst6_addr,
289 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
290 beiscsi_ep->ip_type = BE2_IPV6;
291 } else{
292 shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
293 dst_addr->sa_family);
294 spin_unlock(&ctrl->mbox_lock);
295 return -EINVAL;
296
297 }
298 req->cid = cid;
299 req->cq_id = phwi_context->be_cq.id;
300 req->defq_id = def_hdr_id;
301 req->hdr_ring_id = def_hdr_id;
302 req->data_ring_id = def_data_id;
303 req->do_offload = 1;
304 req->dataout_template_pa.lo = ptemplate_address->lo;
305 req->dataout_template_pa.hi = ptemplate_address->hi;
306 status = be_mbox_notify(ctrl);
307 if (!status) {
308 struct iscsi_endpoint *ep;
309 struct tcp_connect_and_offload_out *ptcpcnct_out =
310 embedded_payload(wrb);
311
312 ep = phba->ep_array[ptcpcnct_out->cid];
313 beiscsi_ep = ep->dd_data;
314 beiscsi_ep->fw_handle = 0;
315 beiscsi_ep->cid_vld = 1;
316 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
317 } else
318 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
319 spin_unlock(&ctrl->mbox_lock);
320 return status;
321}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
new file mode 100644
index 000000000000..00e816ee8070
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -0,0 +1,249 @@
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20
21#ifndef _BEISCSI_MGMT_
22#define _BEISCSI_MGMT_
23
24#include <linux/types.h>
25#include <linux/list.h>
26#include "be_iscsi.h"
27#include "be_main.h"
28
29/**
30 * Pseudo amap definition in which each bit of the actual structure is defined
31 * as a byte: used to calculate offset/shift/mask of each field
32 */
33struct amap_mcc_sge {
34 u8 pa_lo[32]; /* dword 0 */
35 u8 pa_hi[32]; /* dword 1 */
36 u8 length[32]; /* DWORD 2 */
37} __packed;
38
39/**
40 * Pseudo amap definition in which each bit of the actual structure is defined
41 * as a byte: used to calculate offset/shift/mask of each field
42 */
43struct amap_mcc_wrb_payload {
44 union {
45 struct amap_mcc_sge sgl[19];
46 u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
47 } u;
48} __packed;
49
50/**
51 * Pseudo amap definition in which each bit of the actual structure is defined
52 * as a byte: used to calculate offset/shift/mask of each field
53 */
54struct amap_mcc_wrb {
55 u8 embedded; /* DWORD 0 */
56 u8 rsvd0[2]; /* DWORD 0 */
57 u8 sge_count[5]; /* DWORD 0 */
58 u8 rsvd1[16]; /* DWORD 0 */
59 u8 special[8]; /* DWORD 0 */
60 u8 payload_length[32];
61 u8 tag[64]; /* DWORD 2 */
62 u8 rsvd2[32]; /* DWORD 4 */
63 struct amap_mcc_wrb_payload payload;
64};
65
66struct mcc_sge {
67 u32 pa_lo; /* dword 0 */
68 u32 pa_hi; /* dword 1 */
69 u32 length; /* DWORD 2 */
70} __packed;
71
72struct mcc_wrb_payload {
73 union {
74 struct mcc_sge sgl[19];
75 u32 embedded[59]; /* DWORDS 57 to 115 */
76 } u;
77} __packed;
78
79#define MCC_WRB_EMBEDDED_MASK 0x00000001
80
81struct mcc_wrb {
82 u32 dw[0]; /* DWORD 0 */
83 u32 payload_length;
84 u32 tag[2]; /* DWORD 2 */
85 u32 rsvd2[1]; /* DWORD 4 */
86 struct mcc_wrb_payload payload;
87};
88
89unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
90int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr,
91 struct beiscsi_endpoint *beiscsi_ep);
92
93unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
94 unsigned short cid,
95 unsigned int upload_flag);
96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
97 unsigned int icd, unsigned int cid);
98
99struct iscsi_invalidate_connection_params_in {
100 struct be_cmd_req_hdr hdr;
101 unsigned int session_handle;
102 unsigned short cid;
103 unsigned short unused;
104 unsigned short cleanup_type;
105 unsigned short save_cfg;
106} __packed;
107
108struct iscsi_invalidate_connection_params_out {
109 unsigned int session_handle;
110 unsigned short cid;
111 unsigned short unused;
112} __packed;
113
114union iscsi_invalidate_connection_params {
115 struct iscsi_invalidate_connection_params_in request;
116 struct iscsi_invalidate_connection_params_out response;
117} __packed;
118
119struct invalidate_command_table {
120 unsigned short icd;
121 unsigned short cid;
122} __packed;
123
124struct invalidate_commands_params_in {
125 struct be_cmd_req_hdr hdr;
126 unsigned int ref_handle;
127 unsigned int icd_count;
128 struct invalidate_command_table table[128];
129 unsigned short cleanup_type;
130 unsigned short unused;
131} __packed;
132
133struct invalidate_commands_params_out {
134 unsigned int ref_handle;
135 unsigned int icd_count;
136 unsigned int icd_status[128];
137} __packed;
138
139union invalidate_commands_params {
140 struct invalidate_commands_params_in request;
141 struct invalidate_commands_params_out response;
142} __packed;
143
144struct mgmt_hba_attributes {
145 u8 flashrom_version_string[32];
146 u8 manufacturer_name[32];
147 u32 supported_modes;
148 u8 seeprom_version_lo;
149 u8 seeprom_version_hi;
150 u8 rsvd0[2];
151 u32 fw_cmd_data_struct_version;
152 u32 ep_fw_data_struct_version;
153 u32 future_reserved[12];
154 u32 default_extended_timeout;
155 u8 controller_model_number[32];
156 u8 controller_description[64];
157 u8 controller_serial_number[32];
158 u8 ip_version_string[32];
159 u8 firmware_version_string[32];
160 u8 bios_version_string[32];
161 u8 redboot_version_string[32];
162 u8 driver_version_string[32];
163 u8 fw_on_flash_version_string[32];
164 u32 functionalities_supported;
165 u16 max_cdblength;
166 u8 asic_revision;
167 u8 generational_guid[16];
168 u8 hba_port_count;
169 u16 default_link_down_timeout;
170 u8 iscsi_ver_min_max;
171 u8 multifunction_device;
172 u8 cache_valid;
173 u8 hba_status;
174 u8 max_domains_supported;
175 u8 phy_port;
176 u32 firmware_post_status;
177 u32 hba_mtu[8];
178 u32 future_u32[4];
179} __packed;
180
181struct mgmt_controller_attributes {
182 struct mgmt_hba_attributes hba_attribs;
183 u16 pci_vendor_id;
184 u16 pci_device_id;
185 u16 pci_sub_vendor_id;
186 u16 pci_sub_system_id;
187 u8 pci_bus_number;
188 u8 pci_device_number;
189 u8 pci_function_number;
190 u8 interface_type;
191 u64 unique_identifier;
192 u8 netfilters;
193 u8 rsvd0[3];
194 u8 future_u32[4];
195} __packed;
196
197struct be_mgmt_controller_attributes {
198 struct be_cmd_req_hdr hdr;
199 struct mgmt_controller_attributes params;
200} __packed;
201
202struct be_mgmt_controller_attributes_resp {
203 struct be_cmd_resp_hdr hdr;
204 struct mgmt_controller_attributes params;
205} __packed;
206
207/* configuration management */
208
209#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
210
211/* MGMT CMD flags */
212
213#define MGMT_CMDH_FREE (1<<0)
214
215/* --- MGMT_ERROR_CODES --- */
216/* Error Codes returned in the status field of the CMD response header */
217#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
218#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
219 /* the CMD_RESPONSE_HEADER */
220
221#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
222 pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
223 bus_address.u.a32.address_lo; \
224 pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
225 bus_address.u.a32.address_hi; \
226}
227
228struct beiscsi_endpoint {
229 struct beiscsi_hba *phba;
230 struct beiscsi_sess *sess;
231 struct beiscsi_conn *conn;
232 unsigned short ip_type;
233 char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
234 unsigned long dst_addr;
235 unsigned short ep_cid;
236 unsigned int fw_handle;
237 u16 dst_tcpport;
238 u16 cid_vld;
239};
240
241unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
242 struct beiscsi_hba *phba);
243
244unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
245 struct beiscsi_endpoint *beiscsi_ep,
246 unsigned short cid,
247 unsigned short issue_reset,
248 unsigned short savecfg_flag);
249#endif
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
new file mode 100644
index 000000000000..1d6009490d1c
--- /dev/null
+++ b/drivers/scsi/bfa/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
4
5bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o
6bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
9bfa-y += bfa_csdebug.o bfa_sm.o plog.o
10
11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
14
15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna
diff --git a/drivers/scsi/bfa/bfa_callback_priv.h b/drivers/scsi/bfa/bfa_callback_priv.h
new file mode 100644
index 000000000000..1e3265c9f7d4
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_callback_priv.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CALLBACK_PRIV_H__
19#define __BFA_CALLBACK_PRIV_H__
20
21#include <cs/bfa_q.h>
22
23typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
24
25/**
26 * Generic BFA callback element.
27 */
28struct bfa_cb_qe_s {
29 struct list_head qe;
30 bfa_cb_cbfn_t cbfn;
31 bfa_boolean_t once;
32 u32 rsvd;
33 void *cbarg;
34};
35
36#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
37 (__hcb_qe)->cbfn = (__cbfn); \
38 (__hcb_qe)->cbarg = (__cbarg); \
39 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
40} while (0)
41
42#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
43
44#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
45 (__hcb_qe)->cbfn = (__cbfn); \
46 (__hcb_qe)->cbarg = (__cbarg); \
47 if (!(__hcb_qe)->once) { \
48 list_add_tail((__hcb_qe), &(__bfa)->comp_q); \
49 (__hcb_qe)->once = BFA_TRUE; \
50 } \
51} while (0)
52
53#define bfa_cb_queue_done(__hcb_qe) do { \
54 (__hcb_qe)->once = BFA_FALSE; \
55} while (0)
56
57#endif /* __BFA_CALLBACK_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
new file mode 100644
index 000000000000..0050c838c358
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -0,0 +1,205 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cb_ioim_macros.h BFA IOIM driver interface macros.
20 */
21
22#ifndef __BFA_HCB_IOIM_MACROS_H__
23#define __BFA_HCB_IOIM_MACROS_H__
24
25#include <bfa_os_inc.h>
26/*
27 * #include <linux/dma-mapping.h>
28 *
29 * #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
30 * <scsi/scsi_device.h> #include <scsi/scsi_host.h>
31 */
32#include "bfad_im_compat.h"
33
34/*
35 * task attribute values in FCP-2 FCP_CMND IU
36 */
37#define SIMPLE_Q 0
38#define HEAD_OF_Q 1
39#define ORDERED_Q 2
40#define ACA_Q 4
41#define UNTAGGED 5
42
43static inline lun_t
44bfad_int_to_lun(u32 luno)
45{
46 union {
47 u16 scsi_lun[4];
48 lun_t bfa_lun;
49 } lun;
50
51 lun.bfa_lun = 0;
52 lun.scsi_lun[0] = bfa_os_htons(luno);
53
54 return (lun.bfa_lun);
55}
56
57/**
58 * Get LUN for the I/O request
59 */
60#define bfa_cb_ioim_get_lun(__dio) \
61 bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
62
63/**
64 * Get CDB for the I/O request
65 */
66static inline u8 *
67bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
68{
69 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
70
71 return ((u8 *) cmnd->cmnd);
72}
73
74/**
75 * Get I/O direction (read/write) for the I/O request
76 */
77static inline enum fcp_iodir
78bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
79{
80 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
81 enum dma_data_direction dmadir;
82
83 dmadir = cmnd->sc_data_direction;
84 if (dmadir == DMA_TO_DEVICE)
85 return FCP_IODIR_WRITE;
86 else if (dmadir == DMA_FROM_DEVICE)
87 return FCP_IODIR_READ;
88 else
89 return FCP_IODIR_NONE;
90}
91
92/**
93 * Get IO size in bytes for the I/O request
94 */
95static inline u32
96bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
97{
98 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
99
100 return (scsi_bufflen(cmnd));
101}
102
103/**
104 * Get timeout for the I/O request
105 */
106static inline u8
107bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
108{
109 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
110 /*
111 * TBD: need a timeout for scsi passthru
112 */
113 if (cmnd->device->host == NULL)
114 return 4;
115
116 return 0;
117}
118
119/**
120 * Get SG element for the I/O request given the SG element index
121 */
122static inline union bfi_addr_u
123bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
124{
125 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
126 struct scatterlist *sge;
127 u64 addr;
128
129 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
130 addr = (u64) sg_dma_address(sge);
131
132 return (*(union bfi_addr_u *) &addr);
133}
134
135static inline u32
136bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
137{
138 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
139 struct scatterlist *sge;
140 u32 len;
141
142 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
143 len = sg_dma_len(sge);
144
145 return len;
146}
147
148/**
149 * Get Command Reference Number for the I/O request. 0 if none.
150 */
151static inline u8
152bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
153{
154 return 0;
155}
156
157/**
158 * Get SAM-3 priority for the I/O request. 0 is default.
159 */
160static inline u8
161bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
162{
163 return 0;
164}
165
166/**
167 * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
168 */
169static inline u8
170bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
171{
172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
173 u8 task_attr = UNTAGGED;
174
175 if (cmnd->device->tagged_supported) {
176 switch (cmnd->tag) {
177 case HEAD_OF_QUEUE_TAG:
178 task_attr = HEAD_OF_Q;
179 break;
180 case ORDERED_QUEUE_TAG:
181 task_attr = ORDERED_Q;
182 break;
183 default:
184 task_attr = SIMPLE_Q;
185 break;
186 }
187 }
188
189 return task_attr;
190}
191
192/**
193 * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
194 */
195static inline u8
196bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
197{
198 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
199
200 return (cmnd->cmd_len);
201}
202
203
204
205#endif /* __BFA_HCB_IOIM_MACROS_H__ */
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
new file mode 100644
index 000000000000..7a959c34e789
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cee.c
@@ -0,0 +1,492 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <defs/bfa_defs_cee.h>
19#include <cs/bfa_trc.h>
20#include <cs/bfa_log.h>
21#include <cs/bfa_debug.h>
22#include <cee/bfa_cee.h>
23#include <bfi/bfi_cee.h>
24#include <bfi/bfi.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27
28BFA_TRC_FILE(CNA, CEE);
29
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32
33static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
34static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
35 *dcbcx_stats);
36static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
37 *lldp_stats);
38static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
39static void bfa_cee_format_cee_cfg(void *buffer);
40static void bfa_cee_format_cee_stats(void *buffer);
41
42static void
43bfa_cee_format_cee_stats(void *buffer)
44{
45 struct bfa_cee_stats_s *cee_stats = buffer;
46 bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
47 bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
48 bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
49}
50
51static void
52bfa_cee_format_cee_cfg(void *buffer)
53{
54 struct bfa_cee_attr_s *cee_cfg = buffer;
55 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
56}
57
58static void
59bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
60{
61 dcbcx_stats->subtlvs_unrecognized =
62 bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
63 dcbcx_stats->negotiation_failed =
64 bfa_os_ntohl(dcbcx_stats->negotiation_failed);
65 dcbcx_stats->remote_cfg_changed =
66 bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
67 dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
68 dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
69 dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
70 dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
71 dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
72 dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
73}
74
75static void
76bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
77{
78 lldp_stats->frames_transmitted =
79 bfa_os_ntohl(lldp_stats->frames_transmitted);
80 lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
81 lldp_stats->frames_discarded =
82 bfa_os_ntohl(lldp_stats->frames_discarded);
83 lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
84 lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
85 lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
86 lldp_stats->tlvs_unrecognized =
87 bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
88}
89
90static void
91bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
92{
93 cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
94 cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
95 cfg_stats->cee_hw_cfg_changed =
96 bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
97 cfg_stats->recvd_invalid_cfg =
98 bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
99}
100
101static void
102bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
103{
104 lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
105 lldp_cfg->enabled_system_cap =
106 bfa_os_ntohs(lldp_cfg->enabled_system_cap);
107}
108
109/**
110 * bfa_cee_attr_meminfo()
111 *
112 *
113 * @param[in] void
114 *
115 * @return Size of DMA region
116 */
117static u32
118bfa_cee_attr_meminfo(void)
119{
120 return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
121}
122
123/**
124 * bfa_cee_stats_meminfo()
125 *
126 *
127 * @param[in] void
128 *
129 * @return Size of DMA region
130 */
131static u32
132bfa_cee_stats_meminfo(void)
133{
134 return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
135}
136
137/**
138 * bfa_cee_get_attr_isr()
139 *
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
148{
149 cee->get_attr_status = status;
150 bfa_trc(cee, 0);
151 if (status == BFA_STATUS_OK) {
152 bfa_trc(cee, 0);
153 /*
154 * The requested data has been copied to the DMA area, *process
155 * it.
156 */
157 memcpy(cee->attr, cee->attr_dma.kva,
158 sizeof(struct bfa_cee_attr_s));
159 bfa_cee_format_cee_cfg(cee->attr);
160 }
161 cee->get_attr_pending = BFA_FALSE;
162 if (cee->cbfn.get_attr_cbfn) {
163 bfa_trc(cee, 0);
164 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
165 }
166 bfa_trc(cee, 0);
167}
168
169/**
170 * bfa_cee_get_attr_isr()
171 *
172 *
173 * @param[in] cee - Pointer to the CEE module
174 * status - Return status from the f/w
175 *
176 * @return void
177 */
178static void
179bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
180{
181 cee->get_stats_status = status;
182 bfa_trc(cee, 0);
183 if (status == BFA_STATUS_OK) {
184 bfa_trc(cee, 0);
185 /*
186 * The requested data has been copied to the DMA area, process
187 * it.
188 */
189 memcpy(cee->stats, cee->stats_dma.kva,
190 sizeof(struct bfa_cee_stats_s));
191 bfa_cee_format_cee_stats(cee->stats);
192 }
193 cee->get_stats_pending = BFA_FALSE;
194 bfa_trc(cee, 0);
195 if (cee->cbfn.get_stats_cbfn) {
196 bfa_trc(cee, 0);
197 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
198 }
199 bfa_trc(cee, 0);
200}
201
202/**
203 * bfa_cee_get_attr_isr()
204 *
205 *
206 * @param[in] cee - Pointer to the CEE module
207 * status - Return status from the f/w
208 *
209 * @return void
210 */
211static void
212bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
213{
214 cee->reset_stats_status = status;
215 cee->reset_stats_pending = BFA_FALSE;
216 if (cee->cbfn.reset_stats_cbfn)
217 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
218}
219
220/**
221 * bfa_cee_meminfo()
222 *
223 *
224 * @param[in] void
225 *
226 * @return Size of DMA region
227 */
228u32
229bfa_cee_meminfo(void)
230{
231 return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo());
232}
233
234/**
235 * bfa_cee_mem_claim()
236 *
237 *
238 * @param[in] cee CEE module pointer
239 * dma_kva Kernel Virtual Address of CEE DMA Memory
240 * dma_pa Physical Address of CEE DMA Memory
241 *
242 * @return void
243 */
244void
245bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
246{
247 cee->attr_dma.kva = dma_kva;
248 cee->attr_dma.pa = dma_pa;
249 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
250 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
251 cee->attr = (struct bfa_cee_attr_s *)dma_kva;
252 cee->stats =
253 (struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
254}
255
256/**
257 * bfa_cee_get_attr()
258 *
259 * Send the request to the f/w to fetch CEE attributes.
260 *
261 * @param[in] Pointer to the CEE module data structure.
262 *
263 * @return Status
264 */
265
266bfa_status_t
267bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
268 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
269{
270 struct bfi_cee_get_req_s *cmd;
271
272 bfa_assert((cee != NULL) && (cee->ioc != NULL));
273 bfa_trc(cee, 0);
274 if (!bfa_ioc_is_operational(cee->ioc)) {
275 bfa_trc(cee, 0);
276 return BFA_STATUS_IOC_FAILURE;
277 }
278 if (cee->get_attr_pending == BFA_TRUE) {
279 bfa_trc(cee, 0);
280 return BFA_STATUS_DEVBUSY;
281 }
282 cee->get_attr_pending = BFA_TRUE;
283 cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
284 cee->attr = attr;
285 cee->cbfn.get_attr_cbfn = cbfn;
286 cee->cbfn.get_attr_cbarg = cbarg;
287 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
288 bfa_ioc_portid(cee->ioc));
289 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
290 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
291 bfa_trc(cee, 0);
292
293 return BFA_STATUS_OK;
294}
295
296/**
297 * bfa_cee_get_stats()
298 *
299 * Send the request to the f/w to fetch CEE statistics.
300 *
301 * @param[in] Pointer to the CEE module data structure.
302 *
303 * @return Status
304 */
305
306bfa_status_t
307bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
308 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
309{
310 struct bfi_cee_get_req_s *cmd;
311
312 bfa_assert((cee != NULL) && (cee->ioc != NULL));
313
314 if (!bfa_ioc_is_operational(cee->ioc)) {
315 bfa_trc(cee, 0);
316 return BFA_STATUS_IOC_FAILURE;
317 }
318 if (cee->get_stats_pending == BFA_TRUE) {
319 bfa_trc(cee, 0);
320 return BFA_STATUS_DEVBUSY;
321 }
322 cee->get_stats_pending = BFA_TRUE;
323 cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
324 cee->stats = stats;
325 cee->cbfn.get_stats_cbfn = cbfn;
326 cee->cbfn.get_stats_cbarg = cbarg;
327 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
328 bfa_ioc_portid(cee->ioc));
329 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
330 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
331 bfa_trc(cee, 0);
332
333 return BFA_STATUS_OK;
334}
335
336/**
337 * bfa_cee_reset_stats()
338 *
339 *
340 * @param[in] Pointer to the CEE module data structure.
341 *
342 * @return Status
343 */
344
345bfa_status_t
346bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
347 void *cbarg)
348{
349 struct bfi_cee_reset_stats_s *cmd;
350
351 bfa_assert((cee != NULL) && (cee->ioc != NULL));
352 if (!bfa_ioc_is_operational(cee->ioc)) {
353 bfa_trc(cee, 0);
354 return BFA_STATUS_IOC_FAILURE;
355 }
356 if (cee->reset_stats_pending == BFA_TRUE) {
357 bfa_trc(cee, 0);
358 return BFA_STATUS_DEVBUSY;
359 }
360 cee->reset_stats_pending = BFA_TRUE;
361 cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
362 cee->cbfn.reset_stats_cbfn = cbfn;
363 cee->cbfn.reset_stats_cbarg = cbarg;
364 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
365 bfa_ioc_portid(cee->ioc));
366 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
367 bfa_trc(cee, 0);
368 return BFA_STATUS_OK;
369}
370
371/**
372 * bfa_cee_isrs()
373 *
374 *
375 * @param[in] Pointer to the CEE module data structure.
376 *
377 * @return void
378 */
379
380void
381bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
382{
383 union bfi_cee_i2h_msg_u *msg;
384 struct bfi_cee_get_rsp_s *get_rsp;
385 struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
386 msg = (union bfi_cee_i2h_msg_u *)m;
387 get_rsp = (struct bfi_cee_get_rsp_s *)m;
388 bfa_trc(cee, msg->mh.msg_id);
389 switch (msg->mh.msg_id) {
390 case BFI_CEE_I2H_GET_CFG_RSP:
391 bfa_trc(cee, get_rsp->cmd_status);
392 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
393 break;
394 case BFI_CEE_I2H_GET_STATS_RSP:
395 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
396 break;
397 case BFI_CEE_I2H_RESET_STATS_RSP:
398 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
399 break;
400 default:
401 bfa_assert(0);
402 }
403}
404
405/**
406 * bfa_cee_hbfail()
407 *
408 *
409 * @param[in] Pointer to the CEE module data structure.
410 *
411 * @return void
412 */
413
414void
415bfa_cee_hbfail(void *arg)
416{
417 struct bfa_cee_s *cee;
418 cee = (struct bfa_cee_s *)arg;
419
420 if (cee->get_attr_pending == BFA_TRUE) {
421 cee->get_attr_status = BFA_STATUS_FAILED;
422 cee->get_attr_pending = BFA_FALSE;
423 if (cee->cbfn.get_attr_cbfn) {
424 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
425 BFA_STATUS_FAILED);
426 }
427 }
428 if (cee->get_stats_pending == BFA_TRUE) {
429 cee->get_stats_status = BFA_STATUS_FAILED;
430 cee->get_stats_pending = BFA_FALSE;
431 if (cee->cbfn.get_stats_cbfn) {
432 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
433 BFA_STATUS_FAILED);
434 }
435 }
436 if (cee->reset_stats_pending == BFA_TRUE) {
437 cee->reset_stats_status = BFA_STATUS_FAILED;
438 cee->reset_stats_pending = BFA_FALSE;
439 if (cee->cbfn.reset_stats_cbfn) {
440 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
441 BFA_STATUS_FAILED);
442 }
443 }
444}
445
446/**
447 * bfa_cee_attach()
448 *
449 *
450 * @param[in] cee - Pointer to the CEE module data structure
451 * ioc - Pointer to the ioc module data structure
452 * dev - Pointer to the device driver module data structure
453 * The device driver specific mbox ISR functions have
454 * this pointer as one of the parameters.
455 * trcmod -
456 * logmod -
457 *
458 * @return void
459 */
460void
461bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
462 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
463{
464 bfa_assert(cee != NULL);
465 cee->dev = dev;
466 cee->trcmod = trcmod;
467 cee->logmod = logmod;
468 cee->ioc = ioc;
469
470 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
471 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
472 bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
473 bfa_trc(cee, 0);
474}
475
476/**
477 * bfa_cee_detach()
478 *
479 *
480 * @param[in] cee - Pointer to the CEE module data structure
481 *
482 * @return void
483 */
484void
485bfa_cee_detach(struct bfa_cee_s *cee)
486{
487 /*
488 * For now, just check if there is some ioctl pending and mark that as
489 * failed?
490 */
491 /* bfa_cee_hbfail(cee); */
492}
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
new file mode 100644
index 000000000000..44e2d1155c51
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -0,0 +1,402 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <defs/bfa_defs_pci.h>
20#include <cs/bfa_debug.h>
21#include <bfa_iocfc.h>
22
23#define DEF_CFG_NUM_FABRICS 1
24#define DEF_CFG_NUM_LPORTS 256
25#define DEF_CFG_NUM_CQS 4
26#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
27#define DEF_CFG_NUM_TSKIM_REQS 128
28#define DEF_CFG_NUM_FCXP_REQS 64
29#define DEF_CFG_NUM_UF_BUFS 64
30#define DEF_CFG_NUM_RPORTS 1024
31#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
32#define DEF_CFG_NUM_TINS 256
33
34#define DEF_CFG_NUM_SGPGS 2048
35#define DEF_CFG_NUM_REQQ_ELEMS 256
36#define DEF_CFG_NUM_RSPQ_ELEMS 64
37#define DEF_CFG_NUM_SBOOT_TGTS 16
38#define DEF_CFG_NUM_SBOOT_LUNS 16
39
40/**
41 * Use this function query the memory requirement of the BFA library.
42 * This function needs to be called before bfa_attach() to get the
43 * memory required of the BFA layer for a given driver configuration.
44 *
45 * This call will fail, if the cap is out of range compared to pre-defined
46 * values within the BFA library
47 *
48 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
49 * its configuration in this structure.
50 * The default values for struct bfa_iocfc_cfg_s can be
51 * fetched using bfa_cfg_get_default() API.
52 *
53 * If cap's boundary check fails, the library will use
54 * the default bfa_cap_t values (and log a warning msg).
55 *
56 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
57 * indicates the memory type (see bfa_mem_type_t) and
58 * amount of memory required.
59 *
60 * Driver should allocate the memory, populate the
61 * starting address for each block and provide the same
62 * structure as input parameter to bfa_attach() call.
63 *
64 * @return void
65 *
66 * Special Considerations: @note
67 */
68void
69bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
70{
71 int i;
72 u32 km_len = 0, dm_len = 0;
73
74 bfa_assert((cfg != NULL) && (meminfo != NULL));
75
76 bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
77 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
78 BFA_MEM_TYPE_KVA;
79 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
80 BFA_MEM_TYPE_DMA;
81
82 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
83
84 for (i = 0; hal_mods[i]; i++)
85 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
86
87
88 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
89 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
90}
91
92/**
93 * Use this function to do attach the driver instance with the BFA
94 * library. This function will not trigger any HW initialization
95 * process (which will be done in bfa_init() call)
96 *
97 * This call will fail, if the cap is out of range compared to
98 * pre-defined values within the BFA library
99 *
100 * @param[out] bfa Pointer to bfa_t.
101 * @param[in] bfad Opaque handle back to the driver's IOC structure
102 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
103 * that was used in bfa_cfg_get_meminfo().
104 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
105 * use the bfa_cfg_get_meminfo() call to
106 * find the memory blocks required, allocate the
107 * required memory and provide the starting addresses.
108 * @param[in] pcidev pointer to struct bfa_pcidev_s
109 *
110 * @return
111 * void
112 *
113 * Special Considerations:
114 *
115 * @note
116 *
117 */
118void
119bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
120 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
121{
122 int i;
123 struct bfa_mem_elem_s *melem;
124
125 bfa->fcs = BFA_FALSE;
126
127 bfa_assert((cfg != NULL) && (meminfo != NULL));
128
129 /**
130 * initialize all memory pointers for iterative allocation
131 */
132 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
133 melem = meminfo->meminfo + i;
134 melem->kva_curp = melem->kva;
135 melem->dma_curp = melem->dma;
136 }
137
138 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
139
140 for (i = 0; hal_mods[i]; i++)
141 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
142
143}
144
145/**
146 * Use this function to delete a BFA IOC. IOC should be stopped (by
147 * calling bfa_stop()) before this function call.
148 *
149 * @param[in] bfa - pointer to bfa_t.
150 *
151 * @return
152 * void
153 *
154 * Special Considerations:
155 *
156 * @note
157 */
158void
159bfa_detach(struct bfa_s *bfa)
160{
161 int i;
162
163 for (i = 0; hal_mods[i]; i++)
164 hal_mods[i]->detach(bfa);
165
166 bfa_iocfc_detach(bfa);
167}
168
169
170void
171bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
172{
173 bfa->trcmod = trcmod;
174}
175
176
177void
178bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod)
179{
180 bfa->logm = logmod;
181}
182
183
184void
185bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen)
186{
187 bfa->aen = aen;
188}
189
190void
191bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
192{
193 bfa->plog = plog;
194}
195
196/**
197 * Initialize IOC.
198 *
199 * This function will return immediately, when the IOC initialization is
200 * completed, the bfa_cb_init() will be called.
201 *
202 * @param[in] bfa instance
203 *
204 * @return void
205 *
206 * Special Considerations:
207 *
208 * @note
209 * When this function returns, the driver should register the interrupt service
210 * routine(s) and enable the device interrupts. If this is not done,
211 * bfa_cb_init() will never get called
212 */
213void
214bfa_init(struct bfa_s *bfa)
215{
216 bfa_iocfc_init(bfa);
217}
218
219/**
220 * Use this function initiate the IOC configuration setup. This function
221 * will return immediately.
222 *
223 * @param[in] bfa instance
224 *
225 * @return None
226 */
227void
228bfa_start(struct bfa_s *bfa)
229{
230 bfa_iocfc_start(bfa);
231}
232
233/**
234 * Use this function quiese the IOC. This function will return immediately,
235 * when the IOC is actually stopped, the bfa_cb_stop() will be called.
236 *
237 * @param[in] bfa - pointer to bfa_t.
238 *
239 * @return None
240 *
241 * Special Considerations:
242 * bfa_cb_stop() could be called before or after bfa_stop() returns.
243 *
244 * @note
245 * In case of any failure, we could handle it automatically by doing a
246 * reset and then succeed the bfa_stop() call.
247 */
248void
249bfa_stop(struct bfa_s *bfa)
250{
251 bfa_iocfc_stop(bfa);
252}
253
254void
255bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
256{
257 INIT_LIST_HEAD(comp_q);
258 list_splice_tail_init(&bfa->comp_q, comp_q);
259}
260
261void
262bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
263{
264 struct list_head *qe;
265 struct list_head *qen;
266 struct bfa_cb_qe_s *hcb_qe;
267
268 list_for_each_safe(qe, qen, comp_q) {
269 hcb_qe = (struct bfa_cb_qe_s *) qe;
270 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
271 }
272}
273
274void
275bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
276{
277 struct list_head *qe;
278 struct bfa_cb_qe_s *hcb_qe;
279
280 while (!list_empty(comp_q)) {
281 bfa_q_deq(comp_q, &qe);
282 hcb_qe = (struct bfa_cb_qe_s *) qe;
283 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
284 }
285}
286
287void
288bfa_attach_fcs(struct bfa_s *bfa)
289{
290 bfa->fcs = BFA_TRUE;
291}
292
293/**
294 * Periodic timer heart beat from driver
295 */
296void
297bfa_timer_tick(struct bfa_s *bfa)
298{
299 bfa_timer_beat(&bfa->timer_mod);
300}
301
302#ifndef BFA_BIOS_BUILD
303/**
304 * Return the list of PCI vendor/device id lists supported by this
305 * BFA instance.
306 */
307void
308bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
309{
310 static struct bfa_pciid_s __pciids[] = {
311 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
312 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
313 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
314 };
315
316 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
317 *pciids = __pciids;
318}
319
320/**
321 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
322 * into BFA layer). The OS driver can then turn back and overwrite entries that
323 * have been configured by the user.
324 *
325 * @param[in] cfg - pointer to bfa_ioc_cfg_t
326 *
327 * @return
328 * void
329 *
330 * Special Considerations:
331 * note
332 */
333void
334bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
335{
336 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
337 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
338 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
339 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
340 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
341 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
342 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
343 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
344
345 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
346 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
347 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
348 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
349 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
350 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
351 cfg->drvcfg.ioc_recover = BFA_FALSE;
352 cfg->drvcfg.delay_comp = BFA_FALSE;
353
354}
355
356void
357bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
358{
359 bfa_cfg_get_default(cfg);
360 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
361 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
362 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
363 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
364 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
365
366 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
367 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
368 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
369 cfg->drvcfg.min_cfg = BFA_TRUE;
370}
371
372void
373bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
374{
375 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
376}
377
378/**
379 * Retrieve firmware trace information on IOC failure.
380 */
381bfa_status_t
382bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
383{
384 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
385}
386
387/**
388 * Fetch firmware trace data.
389 *
390 * @param[in] bfa BFA instance
391 * @param[out] trcdata Firmware trace buffer
392 * @param[in,out] trclen Firmware trace buffer len
393 *
394 * @retval BFA_STATUS_OK Firmware trace is fetched.
395 * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress.
396 */
397bfa_status_t
398bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
399{
400 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
401}
402#endif
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
new file mode 100644
index 000000000000..1b71d349451a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_csdebug.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_os_inc.h>
20#include <cs/bfa_q.h>
21#include <log/bfa_log_hal.h>
22
23/**
24 * cs_debug_api
25 */
26
27
28void
29bfa_panic(int line, char *file, char *panicstr)
30{
31 bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
32 bfa_os_panic();
33}
34
35void
36bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
37{
38 bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
39 bfa_os_panic();
40}
41
42int
43bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
44{
45 struct list_head *tqe;
46
47 tqe = bfa_q_next(q);
48 while (tqe != q) {
49 if (tqe == qe)
50 return (1);
51 tqe = bfa_q_next(tqe);
52 if (tqe == NULL)
53 break;
54 }
55 return (0);
56}
57
58
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
new file mode 100644
index 000000000000..401babe3494e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <log/bfa_log_hal.h>
20
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
24/**
25 * hal_fcpim_mod BFA FCP Initiator Mode module
26 */
27
28/**
29 * Compute and return memory needed by FCP(im) module.
30 */
31static void
32bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
33 u32 *dm_len)
34{
35 bfa_itnim_meminfo(cfg, km_len, dm_len);
36
37 /**
38 * IO memory
39 */
40 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
41 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
42 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
43 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
44
45 *km_len += cfg->fwcfg.num_ioim_reqs *
46 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
47
48 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
49
50 /**
51 * task management command memory
52 */
53 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
54 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
55 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
56}
57
58
59static void
60bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
61 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
62{
63 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
64
65 bfa_trc(bfa, cfg->drvcfg.path_tov);
66 bfa_trc(bfa, cfg->fwcfg.num_rports);
67 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
68 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
69
70 fcpim->bfa = bfa;
71 fcpim->num_itnims = cfg->fwcfg.num_rports;
72 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
73 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
74 fcpim->path_tov = cfg->drvcfg.path_tov;
75 fcpim->delay_comp = cfg->drvcfg.delay_comp;
76
77 bfa_itnim_attach(fcpim, meminfo);
78 bfa_tskim_attach(fcpim, meminfo);
79 bfa_ioim_attach(fcpim, meminfo);
80}
81
82static void
83bfa_fcpim_initdone(struct bfa_s *bfa)
84{
85}
86
87static void
88bfa_fcpim_detach(struct bfa_s *bfa)
89{
90 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
91
92 bfa_ioim_detach(fcpim);
93 bfa_tskim_detach(fcpim);
94}
95
96static void
97bfa_fcpim_start(struct bfa_s *bfa)
98{
99}
100
101static void
102bfa_fcpim_stop(struct bfa_s *bfa)
103{
104}
105
106static void
107bfa_fcpim_iocdisable(struct bfa_s *bfa)
108{
109 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
110 struct bfa_itnim_s *itnim;
111 struct list_head *qe, *qen;
112
113 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
114 itnim = (struct bfa_itnim_s *) qe;
115 bfa_itnim_iocdisable(itnim);
116 }
117}
118
119void
120bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
121{
122 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
123
124 fcpim->path_tov = path_tov * 1000;
125 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
126 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
127}
128
129u16
130bfa_fcpim_path_tov_get(struct bfa_s *bfa)
131{
132 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
133
134 return (fcpim->path_tov / 1000);
135}
136
137bfa_status_t
138bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats)
139{
140 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
141
142 *modstats = fcpim->stats;
143
144 return BFA_STATUS_OK;
145}
146
147bfa_status_t
148bfa_fcpim_clr_modstats(struct bfa_s *bfa)
149{
150 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
151
152 memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s));
153
154 return BFA_STATUS_OK;
155}
156
157void
158bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
159{
160 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
161
162 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
163
164 fcpim->q_depth = q_depth;
165}
166
167u16
168bfa_fcpim_qdepth_get(struct bfa_s *bfa)
169{
170 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
171
172 return (fcpim->q_depth);
173}
174
175
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
new file mode 100644
index 000000000000..153206cfb37a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim_priv.h
@@ -0,0 +1,188 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_PRIV_H__
19#define __BFA_FCPIM_PRIV_H__
20
21#include <bfa_fcpim.h>
22#include <defs/bfa_defs_fcpim.h>
23#include <cs/bfa_wc.h>
24#include "bfa_sgpg_priv.h"
25
26#define BFA_ITNIM_MIN 32
27#define BFA_ITNIM_MAX 1024
28
29#define BFA_IOIM_MIN 8
30#define BFA_IOIM_MAX 2000
31
32#define BFA_TSKIM_MIN 4
33#define BFA_TSKIM_MAX 512
34#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
35#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
36
37#define bfa_fcpim_stats(__fcpim, __stats) \
38 (__fcpim)->stats.__stats ++
39
40struct bfa_fcpim_mod_s {
41 struct bfa_s *bfa;
42 struct bfa_itnim_s *itnim_arr;
43 struct bfa_ioim_s *ioim_arr;
44 struct bfa_ioim_sp_s *ioim_sp_arr;
45 struct bfa_tskim_s *tskim_arr;
46 struct bfa_dma_s snsbase;
47 int num_itnims;
48 int num_ioim_reqs;
49 int num_tskim_reqs;
50 u32 path_tov;
51 u16 q_depth;
52 u16 rsvd;
53 struct list_head itnim_q; /* queue of active itnim */
54 struct list_head ioim_free_q; /* free IO resources */
55 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
56 struct list_head ioim_comp_q; /* IO global comp Q */
57 struct list_head tskim_free_q;
58 u32 ios_active; /* current active IOs */
59 u32 delay_comp;
60 struct bfa_fcpim_stats_s stats;
61};
62
63struct bfa_ioim_s;
64struct bfa_tskim_s;
65
66/**
67 * BFA IO (initiator mode)
68 */
69struct bfa_ioim_s {
70 struct list_head qe; /* queue elememt */
71 bfa_sm_t sm; /* BFA ioim state machine */
72 struct bfa_s *bfa; /* BFA module */
73 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
74 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
75 struct bfad_ioim_s *dio; /* driver IO handle */
76 u16 iotag; /* FWI IO tag */
77 u16 abort_tag; /* unqiue abort request tag */
78 u16 nsges; /* number of SG elements */
79 u16 nsgpgs; /* number of SG pages */
80 struct bfa_sgpg_s *sgpg; /* first SG page */
81 struct list_head sgpg_q; /* allocated SG pages */
82 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
83 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
84 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
85};
86
87struct bfa_ioim_sp_s {
88 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
89 u8 *snsinfo; /* sense info for this IO */
90 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
91 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
92 bfa_boolean_t abort_explicit; /* aborted by OS */
93 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
94};
95
96/**
97 * BFA Task management command (initiator mode)
98 */
99struct bfa_tskim_s {
100 struct list_head qe;
101 bfa_sm_t sm;
102 struct bfa_s *bfa; /* BFA module */
103 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
104 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
105 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
106 bfa_boolean_t notify; /* notify itnim on TM comp */
107 lun_t lun; /* lun if applicable */
108 enum fcp_tm_cmnd tm_cmnd; /* task management command */
109 u16 tsk_tag; /* FWI IO tag */
110 u8 tsecs; /* timeout in seconds */
111 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
112 struct list_head io_q; /* queue of affected IOs */
113 struct bfa_wc_s wc; /* waiting counter */
114 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
115 enum bfi_tskim_status tsk_status; /* TM status */
116};
117
118/**
119 * BFA i-t-n (initiator mode)
120 */
121struct bfa_itnim_s {
122 struct list_head qe; /* queue element */
123 bfa_sm_t sm; /* i-t-n im BFA state machine */
124 struct bfa_s *bfa; /* bfa instance */
125 struct bfa_rport_s *rport; /* bfa rport */
126 void *ditn; /* driver i-t-n structure */
127 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
128 u8 msg_no; /* itnim/rport firmware handle */
129 u8 reqq; /* CQ for requests */
130 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
131 struct list_head pending_q; /* queue of pending IO requests*/
132 struct list_head io_q; /* queue of active IO requests */
133 struct list_head io_cleanup_q; /* IO being cleaned up */
134 struct list_head tsk_q; /* queue of active TM commands */
135 struct list_head delay_comp_q;/* queue of failed inflight cmds */
136 bfa_boolean_t seq_rec; /* SQER supported */
137 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
138 bfa_boolean_t iotov_active; /* IO TOV timer is active */
139 struct bfa_wc_s wc; /* waiting counter */
140 struct bfa_timer_s timer; /* pending IO TOV */
141 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
142 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
143 struct bfa_itnim_hal_stats_s stats;
144};
145
146#define bfa_itnim_is_online(_itnim) (_itnim)->is_online
147#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
148#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
149 (&fcpim->ioim_arr[_iotag])
150#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
151 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
152
153/*
154 * function prototypes
155 */
156void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
157 struct bfa_meminfo_s *minfo);
158void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
159void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
160void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
161 struct bfi_msg_s *msg);
162void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
163void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
164 struct bfa_tskim_s *tskim);
165void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
166void bfa_ioim_tov(struct bfa_ioim_s *ioim);
167
168void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
169 struct bfa_meminfo_s *minfo);
170void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
171void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
172void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
173void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
174void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
175
176void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
177 u32 *dm_len);
178void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
179 struct bfa_meminfo_s *minfo);
180void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
181void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
182void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
183void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
184void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
185bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
186
187#endif /* __BFA_FCPIM_PRIV_H__ */
188
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
new file mode 100644
index 000000000000..992435987deb
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -0,0 +1,1671 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <bfi/bfi_pport.h>
21#include <cs/bfa_debug.h>
22#include <aen/bfa_aen.h>
23#include <cs/bfa_plog.h>
24#include <aen/bfa_aen_port.h>
25
26BFA_TRC_FILE(HAL, PPORT);
27BFA_MODULE(pport);
28
29#define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
32 } else { \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
36 } \
37} while (0)
38
39/*
40 * The port is considered disabled if corresponding physical port or IOC are
41 * disabled explicitly
42 */
43#define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
46
47/*
48 * forward declarations
49 */
50static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
51static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
52static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
53static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
54static void bfa_pport_set_wwns(struct bfa_pport_s *port);
55static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
56static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
57static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
58static void bfa_port_stats_timeout(void *cbarg);
59static void bfa_port_stats_clr_timeout(void *cbarg);
60
61/**
62 * bfa_pport_private
63 */
64
65/**
66 * BFA port state machine events
67 */
68enum bfa_pport_sm_event {
69 BFA_PPORT_SM_START = 1, /* start port state machine */
70 BFA_PPORT_SM_STOP = 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE = 3, /* enable port */
72 BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
78};
79
80static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
81 enum bfa_pport_sm_event event);
82static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
83 enum bfa_pport_sm_event event);
84static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
85 enum bfa_pport_sm_event event);
86static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
87 enum bfa_pport_sm_event event);
88static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
89 enum bfa_pport_sm_event event);
90static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
91 enum bfa_pport_sm_event event);
92static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
93 enum bfa_pport_sm_event event);
94static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
95 enum bfa_pport_sm_event event);
96static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
97 enum bfa_pport_sm_event event);
98static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
99 enum bfa_pport_sm_event event);
100static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
101 enum bfa_pport_sm_event event);
102
103static struct bfa_sm_table_s hal_pport_sm_table[] = {
104 {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
105 {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
106 {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
107 {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
108 {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
109 {BFA_SM(bfa_pport_sm_disabling_qwait),
110 BFA_PPORT_ST_DISABLING_QWAIT},
111 {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
112 {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
113 {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
114 {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
115 {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
116};
117
118static void
119bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
120{
121 union bfa_aen_data_u aen_data;
122 struct bfa_log_mod_s *logmod = pport->bfa->logm;
123 wwn_t pwwn = pport->pwwn;
124 char pwwn_ptr[BFA_STRING_32];
125 struct bfa_ioc_attr_s ioc_attr;
126
127 wwn2str(pwwn_ptr, pwwn);
128 switch (event) {
129 case BFA_PORT_AEN_ONLINE:
130 bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
131 break;
132 case BFA_PORT_AEN_OFFLINE:
133 bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
134 break;
135 case BFA_PORT_AEN_ENABLE:
136 bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
137 break;
138 case BFA_PORT_AEN_DISABLE:
139 bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
140 break;
141 case BFA_PORT_AEN_DISCONNECT:
142 bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
143 break;
144 case BFA_PORT_AEN_QOS_NEG:
145 bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
146 break;
147 default:
148 break;
149 }
150
151 bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
152 aen_data.port.ioc_type = ioc_attr.ioc_type;
153 aen_data.port.pwwn = pwwn;
154}
155
156static void
157bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
158{
159 bfa_trc(pport->bfa, event);
160
161 switch (event) {
162 case BFA_PPORT_SM_START:
163 /**
164 * Start event after IOC is configured and BFA is started.
165 */
166 if (bfa_pport_send_enable(pport))
167 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
168 else
169 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
170 break;
171
172 case BFA_PPORT_SM_ENABLE:
173 /**
174 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is
176 * received.
177 */
178 break;
179
180 case BFA_PPORT_SM_DISABLE:
181 /**
182 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request.
184 */
185 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
186 break;
187
188 case BFA_PPORT_SM_HWFAIL:
189 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
190 break;
191
192 default:
193 bfa_sm_fault(pport->bfa, event);
194 }
195}
196
197static void
198bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
199 enum bfa_pport_sm_event event)
200{
201 bfa_trc(pport->bfa, event);
202
203 switch (event) {
204 case BFA_PPORT_SM_QRESUME:
205 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
206 bfa_pport_send_enable(pport);
207 break;
208
209 case BFA_PPORT_SM_STOP:
210 bfa_reqq_wcancel(&pport->reqq_wait);
211 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
212 break;
213
214 case BFA_PPORT_SM_ENABLE:
215 /**
216 * Already enable is in progress.
217 */
218 break;
219
220 case BFA_PPORT_SM_DISABLE:
221 /**
222 * Just send disable request to firmware when room becomes
223 * available in request queue.
224 */
225 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
226 bfa_reqq_wcancel(&pport->reqq_wait);
227 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
229 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
230 break;
231
232 case BFA_PPORT_SM_LINKUP:
233 case BFA_PPORT_SM_LINKDOWN:
234 /**
235 * Possible to get link events when doing back-to-back
236 * enable/disables.
237 */
238 break;
239
240 case BFA_PPORT_SM_HWFAIL:
241 bfa_reqq_wcancel(&pport->reqq_wait);
242 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
243 break;
244
245 default:
246 bfa_sm_fault(pport->bfa, event);
247 }
248}
249
250static void
251bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
252{
253 bfa_trc(pport->bfa, event);
254
255 switch (event) {
256 case BFA_PPORT_SM_FWRSP:
257 case BFA_PPORT_SM_LINKDOWN:
258 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
259 break;
260
261 case BFA_PPORT_SM_LINKUP:
262 bfa_pport_update_linkinfo(pport);
263 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
264
265 bfa_assert(pport->event_cbfn);
266 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
267 break;
268
269 case BFA_PPORT_SM_ENABLE:
270 /**
271 * Already being enabled.
272 */
273 break;
274
275 case BFA_PPORT_SM_DISABLE:
276 if (bfa_pport_send_disable(pport))
277 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
278 else
279 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
280
281 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
282 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
283 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
284 break;
285
286 case BFA_PPORT_SM_STOP:
287 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
288 break;
289
290 case BFA_PPORT_SM_HWFAIL:
291 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
292 break;
293
294 default:
295 bfa_sm_fault(pport->bfa, event);
296 }
297}
298
299static void
300bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
301{
302 bfa_trc(pport->bfa, event);
303
304 switch (event) {
305 case BFA_PPORT_SM_LINKUP:
306 bfa_pport_update_linkinfo(pport);
307 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
308 bfa_assert(pport->event_cbfn);
309 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
312 bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
313 /**
314 * If QoS is enabled and it is not online,
315 * Send a separate event.
316 */
317 if ((pport->cfg.qos_enabled)
318 && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
319 bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
320
321 break;
322
323 case BFA_PPORT_SM_LINKDOWN:
324 /**
325 * Possible to get link down event.
326 */
327 break;
328
329 case BFA_PPORT_SM_ENABLE:
330 /**
331 * Already enabled.
332 */
333 break;
334
335 case BFA_PPORT_SM_DISABLE:
336 if (bfa_pport_send_disable(pport))
337 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
338 else
339 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
340
341 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
342 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
343 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
344 break;
345
346 case BFA_PPORT_SM_STOP:
347 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
348 break;
349
350 case BFA_PPORT_SM_HWFAIL:
351 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
352 break;
353
354 default:
355 bfa_sm_fault(pport->bfa, event);
356 }
357}
358
359static void
360bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
361{
362 bfa_trc(pport->bfa, event);
363
364 switch (event) {
365 case BFA_PPORT_SM_ENABLE:
366 /**
367 * Already enabled.
368 */
369 break;
370
371 case BFA_PPORT_SM_DISABLE:
372 if (bfa_pport_send_disable(pport))
373 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
374 else
375 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
376
377 bfa_pport_reset_linkinfo(pport);
378 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
379 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
380 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
381 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
382 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
383 break;
384
385 case BFA_PPORT_SM_LINKDOWN:
386 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
387 bfa_pport_reset_linkinfo(pport);
388 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
389 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
390 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
391 if (BFA_PORT_IS_DISABLED(pport->bfa)) {
392 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
393 } else {
394 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
395 }
396 break;
397
398 case BFA_PPORT_SM_STOP:
399 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
400 bfa_pport_reset_linkinfo(pport);
401 if (BFA_PORT_IS_DISABLED(pport->bfa)) {
402 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
403 } else {
404 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
405 }
406 break;
407
408 case BFA_PPORT_SM_HWFAIL:
409 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
410 bfa_pport_reset_linkinfo(pport);
411 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
412 if (BFA_PORT_IS_DISABLED(pport->bfa)) {
413 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
414 } else {
415 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
416 }
417 break;
418
419 default:
420 bfa_sm_fault(pport->bfa, event);
421 }
422}
423
424static void
425bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
426 enum bfa_pport_sm_event event)
427{
428 bfa_trc(pport->bfa, event);
429
430 switch (event) {
431 case BFA_PPORT_SM_QRESUME:
432 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
433 bfa_pport_send_disable(pport);
434 break;
435
436 case BFA_PPORT_SM_STOP:
437 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
438 bfa_reqq_wcancel(&pport->reqq_wait);
439 break;
440
441 case BFA_PPORT_SM_DISABLE:
442 /**
443 * Already being disabled.
444 */
445 break;
446
447 case BFA_PPORT_SM_LINKUP:
448 case BFA_PPORT_SM_LINKDOWN:
449 /**
450 * Possible to get link events when doing back-to-back
451 * enable/disables.
452 */
453 break;
454
455 case BFA_PPORT_SM_HWFAIL:
456 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
457 bfa_reqq_wcancel(&pport->reqq_wait);
458 break;
459
460 default:
461 bfa_sm_fault(pport->bfa, event);
462 }
463}
464
465static void
466bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
467{
468 bfa_trc(pport->bfa, event);
469
470 switch (event) {
471 case BFA_PPORT_SM_FWRSP:
472 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
473 break;
474
475 case BFA_PPORT_SM_DISABLE:
476 /**
477 * Already being disabled.
478 */
479 break;
480
481 case BFA_PPORT_SM_ENABLE:
482 if (bfa_pport_send_enable(pport))
483 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
484 else
485 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
486
487 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
488 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
489 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
490 break;
491
492 case BFA_PPORT_SM_STOP:
493 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
494 break;
495
496 case BFA_PPORT_SM_LINKUP:
497 case BFA_PPORT_SM_LINKDOWN:
498 /**
499 * Possible to get link events when doing back-to-back
500 * enable/disables.
501 */
502 break;
503
504 case BFA_PPORT_SM_HWFAIL:
505 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
506 break;
507
508 default:
509 bfa_sm_fault(pport->bfa, event);
510 }
511}
512
513static void
514bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
515{
516 bfa_trc(pport->bfa, event);
517
518 switch (event) {
519 case BFA_PPORT_SM_START:
520 /**
521 * Ignore start event for a port that is disabled.
522 */
523 break;
524
525 case BFA_PPORT_SM_STOP:
526 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
527 break;
528
529 case BFA_PPORT_SM_ENABLE:
530 if (bfa_pport_send_enable(pport))
531 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
532 else
533 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
534
535 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
536 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
537 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
538 break;
539
540 case BFA_PPORT_SM_DISABLE:
541 /**
542 * Already disabled.
543 */
544 break;
545
546 case BFA_PPORT_SM_HWFAIL:
547 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
548 break;
549
550 default:
551 bfa_sm_fault(pport->bfa, event);
552 }
553}
554
555static void
556bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
557{
558 bfa_trc(pport->bfa, event);
559
560 switch (event) {
561 case BFA_PPORT_SM_START:
562 if (bfa_pport_send_enable(pport))
563 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
564 else
565 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
566 break;
567
568 default:
569 /**
570 * Ignore all other events.
571 */
572 ;
573 }
574}
575
576/**
577 * Port is enabled. IOC is down/failed.
578 */
579static void
580bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
581{
582 bfa_trc(pport->bfa, event);
583
584 switch (event) {
585 case BFA_PPORT_SM_START:
586 if (bfa_pport_send_enable(pport))
587 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
588 else
589 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
590 break;
591
592 default:
593 /**
594 * Ignore all events.
595 */
596 ;
597 }
598}
599
600/**
601 * Port is disabled. IOC is down/failed.
602 */
603static void
604bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
605{
606 bfa_trc(pport->bfa, event);
607
608 switch (event) {
609 case BFA_PPORT_SM_START:
610 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
611 break;
612
613 case BFA_PPORT_SM_ENABLE:
614 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
615 break;
616
617 default:
618 /**
619 * Ignore all events.
620 */
621 ;
622 }
623}
624
625
626
627/**
628 * bfa_pport_private
629 */
630
631static void
632__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
633{
634 struct bfa_pport_s *pport = cbarg;
635
636 if (complete)
637 pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
638}
639
640#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
641 BFA_CACHELINE_SZ))
642
643static void
644bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
645 u32 *dm_len)
646{
647 *dm_len += PPORT_STATS_DMA_SZ;
648}
649
650static void
651bfa_pport_qresume(void *cbarg)
652{
653 struct bfa_pport_s *port = cbarg;
654
655 bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
656}
657
658static void
659bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
660{
661 u8 *dm_kva;
662 u64 dm_pa;
663
664 dm_kva = bfa_meminfo_dma_virt(meminfo);
665 dm_pa = bfa_meminfo_dma_phys(meminfo);
666
667 pport->stats_kva = dm_kva;
668 pport->stats_pa = dm_pa;
669 pport->stats = (union bfa_pport_stats_u *)dm_kva;
670
671 dm_kva += PPORT_STATS_DMA_SZ;
672 dm_pa += PPORT_STATS_DMA_SZ;
673
674 bfa_meminfo_dma_virt(meminfo) = dm_kva;
675 bfa_meminfo_dma_phys(meminfo) = dm_pa;
676}
677
678/**
679 * Memory initialization.
680 */
681static void
682bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
683 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
684{
685 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
686 struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
687
688 bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
689 pport->bfa = bfa;
690
691 bfa_pport_mem_claim(pport, meminfo);
692
693 bfa_sm_set_state(pport, bfa_pport_sm_uninit);
694
695 /**
696 * initialize and set default configuration
697 */
698 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
699 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
700 port_cfg->trunked = BFA_FALSE;
701 port_cfg->maxfrsize = 0;
702
703 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
704
705 bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
706}
707
708static void
709bfa_pport_initdone(struct bfa_s *bfa)
710{
711 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
712
713 /**
714 * Initialize port attributes from IOC hardware data.
715 */
716 bfa_pport_set_wwns(pport);
717 if (pport->cfg.maxfrsize == 0)
718 pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
719 pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
720 pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
721
722 bfa_assert(pport->cfg.maxfrsize);
723 bfa_assert(pport->cfg.rx_bbcredit);
724 bfa_assert(pport->speed_sup);
725}
726
727static void
728bfa_pport_detach(struct bfa_s *bfa)
729{
730}
731
732/**
733 * Called when IOC is ready.
734 */
735static void
736bfa_pport_start(struct bfa_s *bfa)
737{
738 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
739}
740
741/**
742 * Called before IOC is stopped.
743 */
744static void
745bfa_pport_stop(struct bfa_s *bfa)
746{
747 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
748}
749
750/**
751 * Called when IOC failure is detected.
752 */
753static void
754bfa_pport_iocdisable(struct bfa_s *bfa)
755{
756 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
757}
758
759static void
760bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
761{
762 struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
763
764 pport->speed = pevent->link_state.speed;
765 pport->topology = pevent->link_state.topology;
766
767 if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
768 pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
769
770 /*
771 * QoS Details
772 */
773 bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
774 bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
775
776 bfa_trc(pport->bfa, pport->speed);
777 bfa_trc(pport->bfa, pport->topology);
778}
779
780static void
781bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
782{
783 pport->speed = BFA_PPORT_SPEED_UNKNOWN;
784 pport->topology = BFA_PPORT_TOPOLOGY_NONE;
785}
786
787/**
788 * Send port enable message to firmware.
789 */
790static bfa_boolean_t
791bfa_pport_send_enable(struct bfa_pport_s *port)
792{
793 struct bfi_pport_enable_req_s *m;
794
795 /**
796 * Increment message tag before queue check, so that responses to old
797 * requests are discarded.
798 */
799 port->msgtag++;
800
801 /**
802 * check for room in queue to send request now
803 */
804 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
805 if (!m) {
806 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
807 return BFA_FALSE;
808 }
809
810 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
811 bfa_lpuid(port->bfa));
812 m->nwwn = port->nwwn;
813 m->pwwn = port->pwwn;
814 m->port_cfg = port->cfg;
815 m->msgtag = port->msgtag;
816 m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
817 bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
818 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
819 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
820
821 /**
822 * queue I/O message to firmware
823 */
824 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
825 return BFA_TRUE;
826}
827
828/**
829 * Send port disable message to firmware.
830 */
831static bfa_boolean_t
832bfa_pport_send_disable(struct bfa_pport_s *port)
833{
834 bfi_pport_disable_req_t *m;
835
836 /**
837 * Increment message tag before queue check, so that responses to old
838 * requests are discarded.
839 */
840 port->msgtag++;
841
842 /**
843 * check for room in queue to send request now
844 */
845 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
846 if (!m) {
847 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
848 return BFA_FALSE;
849 }
850
851 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
852 bfa_lpuid(port->bfa));
853 m->msgtag = port->msgtag;
854
855 /**
856 * queue I/O message to firmware
857 */
858 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
859
860 return BFA_TRUE;
861}
862
863static void
864bfa_pport_set_wwns(struct bfa_pport_s *port)
865{
866 port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
867 port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
868
869 bfa_trc(port->bfa, port->pwwn);
870 bfa_trc(port->bfa, port->nwwn);
871}
872
873static void
874bfa_port_send_txcredit(void *port_cbarg)
875{
876
877 struct bfa_pport_s *port = port_cbarg;
878 struct bfi_pport_set_svc_params_req_s *m;
879
880 /**
881 * check for room in queue to send request now
882 */
883 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
884 if (!m) {
885 bfa_trc(port->bfa, port->cfg.tx_bbcredit);
886 return;
887 }
888
889 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
890 bfa_lpuid(port->bfa));
891 m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
892
893 /**
894 * queue I/O message to firmware
895 */
896 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
897}
898
899
900
901/**
902 * bfa_pport_public
903 */
904
905/**
906 * Firmware message handler.
907 */
908void
909bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
910{
911 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
912 union bfi_pport_i2h_msg_u i2hmsg;
913
914 i2hmsg.msg = msg;
915 pport->event_arg.i2hmsg = i2hmsg;
916
917 switch (msg->mhdr.msg_id) {
918 case BFI_PPORT_I2H_ENABLE_RSP:
919 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
920 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
921 break;
922
923 case BFI_PPORT_I2H_DISABLE_RSP:
924 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
925 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
926 break;
927
928 case BFI_PPORT_I2H_EVENT:
929 switch (i2hmsg.event->link_state.linkstate) {
930 case BFA_PPORT_LINKUP:
931 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
932 break;
933 case BFA_PPORT_LINKDOWN:
934 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
935 break;
936 case BFA_PPORT_TRUNK_LINKDOWN:
937 /** todo: event notification */
938 break;
939 }
940 break;
941
942 case BFI_PPORT_I2H_GET_STATS_RSP:
943 case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
944 /*
945 * check for timer pop before processing the rsp
946 */
947 if (pport->stats_busy == BFA_FALSE
948 || pport->stats_status == BFA_STATUS_ETIMER)
949 break;
950
951 bfa_timer_stop(&pport->timer);
952 pport->stats_status = i2hmsg.getstats_rsp->status;
953 bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
954 pport);
955 break;
956 case BFI_PPORT_I2H_CLEAR_STATS_RSP:
957 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
958 /*
959 * check for timer pop before processing the rsp
960 */
961 if (pport->stats_busy == BFA_FALSE
962 || pport->stats_status == BFA_STATUS_ETIMER)
963 break;
964
965 bfa_timer_stop(&pport->timer);
966 pport->stats_status = BFA_STATUS_OK;
967 bfa_cb_queue(pport->bfa, &pport->hcb_qe,
968 __bfa_cb_port_stats_clr, pport);
969 break;
970
971 default:
972 bfa_assert(0);
973 }
974}
975
976
977
978/**
979 * bfa_pport_api
980 */
981
982/**
983 * Registered callback for port events.
984 */
985void
986bfa_pport_event_register(struct bfa_s *bfa,
987 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
988 void *cbarg)
989{
990 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
991
992 pport->event_cbfn = cbfn;
993 pport->event_cbarg = cbarg;
994}
995
996bfa_status_t
997bfa_pport_enable(struct bfa_s *bfa)
998{
999 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1000
1001 if (pport->diag_busy)
1002 return (BFA_STATUS_DIAG_BUSY);
1003 else if (bfa_sm_cmp_state
1004 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
1005 return (BFA_STATUS_DEVBUSY);
1006
1007 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
1008 return BFA_STATUS_OK;
1009}
1010
1011bfa_status_t
1012bfa_pport_disable(struct bfa_s *bfa)
1013{
1014 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
1015 return BFA_STATUS_OK;
1016}
1017
1018/**
1019 * Configure port speed.
1020 */
1021bfa_status_t
1022bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1023{
1024 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1025
1026 bfa_trc(bfa, speed);
1027
1028 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
1029 bfa_trc(bfa, pport->speed_sup);
1030 return BFA_STATUS_UNSUPP_SPEED;
1031 }
1032
1033 pport->cfg.speed = speed;
1034
1035 return (BFA_STATUS_OK);
1036}
1037
1038/**
1039 * Get current speed.
1040 */
1041enum bfa_pport_speed
1042bfa_pport_get_speed(struct bfa_s *bfa)
1043{
1044 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1045
1046 return port->speed;
1047}
1048
1049/**
1050 * Configure port topology.
1051 */
1052bfa_status_t
1053bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1054{
1055 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1056
1057 bfa_trc(bfa, topology);
1058 bfa_trc(bfa, pport->cfg.topology);
1059
1060 switch (topology) {
1061 case BFA_PPORT_TOPOLOGY_P2P:
1062 case BFA_PPORT_TOPOLOGY_LOOP:
1063 case BFA_PPORT_TOPOLOGY_AUTO:
1064 break;
1065
1066 default:
1067 return BFA_STATUS_EINVAL;
1068 }
1069
1070 pport->cfg.topology = topology;
1071 return (BFA_STATUS_OK);
1072}
1073
1074/**
1075 * Get current topology.
1076 */
1077enum bfa_pport_topology
1078bfa_pport_get_topology(struct bfa_s *bfa)
1079{
1080 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1081
1082 return port->topology;
1083}
1084
1085bfa_status_t
1086bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1087{
1088 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1089
1090 bfa_trc(bfa, alpa);
1091 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1092 bfa_trc(bfa, pport->cfg.hardalpa);
1093
1094 pport->cfg.cfg_hardalpa = BFA_TRUE;
1095 pport->cfg.hardalpa = alpa;
1096
1097 return (BFA_STATUS_OK);
1098}
1099
1100bfa_status_t
1101bfa_pport_clr_hardalpa(struct bfa_s *bfa)
1102{
1103 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1104
1105 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1106 bfa_trc(bfa, pport->cfg.hardalpa);
1107
1108 pport->cfg.cfg_hardalpa = BFA_FALSE;
1109 return (BFA_STATUS_OK);
1110}
1111
1112bfa_boolean_t
1113bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1114{
1115 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1116
1117 *alpa = port->cfg.hardalpa;
1118 return port->cfg.cfg_hardalpa;
1119}
1120
1121u8
1122bfa_pport_get_myalpa(struct bfa_s *bfa)
1123{
1124 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1125
1126 return port->myalpa;
1127}
1128
1129bfa_status_t
1130bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1131{
1132 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1133
1134 bfa_trc(bfa, maxfrsize);
1135 bfa_trc(bfa, pport->cfg.maxfrsize);
1136
1137 /*
1138 * with in range
1139 */
1140 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1141 return (BFA_STATUS_INVLD_DFSZ);
1142
1143 /*
1144 * power of 2, if not the max frame size of 2112
1145 */
1146 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1147 return (BFA_STATUS_INVLD_DFSZ);
1148
1149 pport->cfg.maxfrsize = maxfrsize;
1150 return (BFA_STATUS_OK);
1151}
1152
1153u16
1154bfa_pport_get_maxfrsize(struct bfa_s *bfa)
1155{
1156 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1157
1158 return port->cfg.maxfrsize;
1159}
1160
1161u32
1162bfa_pport_mypid(struct bfa_s *bfa)
1163{
1164 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1165
1166 return port->mypid;
1167}
1168
1169u8
1170bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
1171{
1172 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1173
1174 return port->cfg.rx_bbcredit;
1175}
1176
1177void
1178bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1179{
1180 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1181
1182 port->cfg.tx_bbcredit = (u8) tx_bbcredit;
1183 bfa_port_send_txcredit(port);
1184}
1185
1186/**
1187 * Get port attributes.
1188 */
1189
1190wwn_t
1191bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1192{
1193 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1194 if (node)
1195 return pport->nwwn;
1196 else
1197 return pport->pwwn;
1198}
1199
1200void
1201bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1202{
1203 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1204
1205 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1206
1207 attr->nwwn = pport->nwwn;
1208 attr->pwwn = pport->pwwn;
1209
1210 bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
1211 sizeof(struct bfa_pport_cfg_s));
1212 /*
1213 * speed attributes
1214 */
1215 attr->pport_cfg.speed = pport->cfg.speed;
1216 attr->speed_supported = pport->speed_sup;
1217 attr->speed = pport->speed;
1218 attr->cos_supported = FC_CLASS_3;
1219
1220 /*
1221 * topology attributes
1222 */
1223 attr->pport_cfg.topology = pport->cfg.topology;
1224 attr->topology = pport->topology;
1225
1226 /*
1227 * beacon attributes
1228 */
1229 attr->beacon = pport->beacon;
1230 attr->link_e2e_beacon = pport->link_e2e_beacon;
1231 attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
1232
1233 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1234 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1235 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
1236 if (bfa_ioc_is_disabled(&pport->bfa->ioc))
1237 attr->port_state = BFA_PPORT_ST_IOCDIS;
1238 else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
1239 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1240}
1241
1242static void
1243bfa_port_stats_query(void *cbarg)
1244{
1245 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1246 bfi_pport_get_stats_req_t *msg;
1247
1248 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1249
1250 if (!msg) {
1251 port->stats_qfull = BFA_TRUE;
1252 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
1253 port);
1254 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1255 return;
1256 }
1257 port->stats_qfull = BFA_FALSE;
1258
1259 bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
1260 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
1261 bfa_lpuid(port->bfa));
1262 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1263
1264 return;
1265}
1266
1267static void
1268bfa_port_stats_clear(void *cbarg)
1269{
1270 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1271 bfi_pport_clear_stats_req_t *msg;
1272
1273 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1274
1275 if (!msg) {
1276 port->stats_qfull = BFA_TRUE;
1277 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
1278 port);
1279 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1280 return;
1281 }
1282 port->stats_qfull = BFA_FALSE;
1283
1284 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
1285 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
1286 bfa_lpuid(port->bfa));
1287 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1288 return;
1289}
1290
1291static void
1292bfa_port_qos_stats_clear(void *cbarg)
1293{
1294 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1295 bfi_pport_clear_qos_stats_req_t *msg;
1296
1297 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1298
1299 if (!msg) {
1300 port->stats_qfull = BFA_TRUE;
1301 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
1302 port);
1303 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1304 return;
1305 }
1306 port->stats_qfull = BFA_FALSE;
1307
1308 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
1309 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
1310 bfa_lpuid(port->bfa));
1311 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1312 return;
1313}
1314
1315static void
1316bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
1317{
1318 u32 *dip = (u32 *) d;
1319 u32 *sip = (u32 *) s;
1320 int i;
1321
1322 /*
1323 * Do 64 bit fields swap first
1324 */
1325 for (i = 0;
1326 i <
1327 ((sizeof(union bfa_pport_stats_u) -
1328 sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
1329#ifdef __BIGENDIAN
1330 dip[i] = bfa_os_ntohl(sip[i]);
1331 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1332#else
1333 dip[i] = bfa_os_ntohl(sip[i + 1]);
1334 dip[i + 1] = bfa_os_ntohl(sip[i]);
1335#endif
1336 }
1337
1338 /*
1339 * Now swap the 32 bit fields
1340 */
1341 for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
1342 dip[i] = bfa_os_ntohl(sip[i]);
1343}
1344
1345static void
1346__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
1347{
1348 struct bfa_pport_s *port = cbarg;
1349
1350 if (complete) {
1351 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1352 } else {
1353 port->stats_busy = BFA_FALSE;
1354 port->stats_status = BFA_STATUS_OK;
1355 }
1356}
1357
1358static void
1359bfa_port_stats_clr_timeout(void *cbarg)
1360{
1361 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1362
1363 bfa_trc(port->bfa, port->stats_qfull);
1364
1365 if (port->stats_qfull) {
1366 bfa_reqq_wcancel(&port->stats_reqq_wait);
1367 port->stats_qfull = BFA_FALSE;
1368 }
1369
1370 port->stats_status = BFA_STATUS_ETIMER;
1371 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
1372}
1373
1374static void
1375__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
1376{
1377 struct bfa_pport_s *port = cbarg;
1378
1379 if (complete) {
1380 if (port->stats_status == BFA_STATUS_OK)
1381 bfa_pport_stats_swap(port->stats_ret, port->stats);
1382 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1383 } else {
1384 port->stats_busy = BFA_FALSE;
1385 port->stats_status = BFA_STATUS_OK;
1386 }
1387}
1388
1389static void
1390bfa_port_stats_timeout(void *cbarg)
1391{
1392 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1393
1394 bfa_trc(port->bfa, port->stats_qfull);
1395
1396 if (port->stats_qfull) {
1397 bfa_reqq_wcancel(&port->stats_reqq_wait);
1398 port->stats_qfull = BFA_FALSE;
1399 }
1400
1401 port->stats_status = BFA_STATUS_ETIMER;
1402 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
1403}
1404
1405#define BFA_PORT_STATS_TOV 1000
1406
1407/**
1408 * Fetch port attributes.
1409 */
1410bfa_status_t
1411bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1412 bfa_cb_pport_t cbfn, void *cbarg)
1413{
1414 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1415
1416 if (port->stats_busy) {
1417 bfa_trc(bfa, port->stats_busy);
1418 return (BFA_STATUS_DEVBUSY);
1419 }
1420
1421 port->stats_busy = BFA_TRUE;
1422 port->stats_ret = stats;
1423 port->stats_cbfn = cbfn;
1424 port->stats_cbarg = cbarg;
1425
1426 bfa_port_stats_query(port);
1427
1428 bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
1429 BFA_PORT_STATS_TOV);
1430 return (BFA_STATUS_OK);
1431}
1432
1433bfa_status_t
1434bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1435{
1436 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1437
1438 if (port->stats_busy) {
1439 bfa_trc(bfa, port->stats_busy);
1440 return (BFA_STATUS_DEVBUSY);
1441 }
1442
1443 port->stats_busy = BFA_TRUE;
1444 port->stats_cbfn = cbfn;
1445 port->stats_cbarg = cbarg;
1446
1447 bfa_port_stats_clear(port);
1448
1449 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1450 BFA_PORT_STATS_TOV);
1451 return (BFA_STATUS_OK);
1452}
1453
1454bfa_status_t
1455bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1456{
1457 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1458
1459 bfa_trc(bfa, bitmap);
1460 bfa_trc(bfa, pport->cfg.trunked);
1461 bfa_trc(bfa, pport->cfg.trunk_ports);
1462
1463 if (!bitmap || (bitmap & (bitmap - 1)))
1464 return BFA_STATUS_EINVAL;
1465
1466 pport->cfg.trunked = BFA_TRUE;
1467 pport->cfg.trunk_ports = bitmap;
1468
1469 return BFA_STATUS_OK;
1470}
1471
1472void
1473bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1474{
1475 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1476
1477 qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
1478 qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
1479}
1480
1481void
1482bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
1483 struct bfa_qos_vc_attr_s *qos_vc_attr)
1484{
1485 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1486 struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
1487 u32 i = 0;
1488
1489 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1490 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1491 qos_vc_attr->elp_opmode_flags =
1492 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1493
1494 /*
1495 * Individual VC info
1496 */
1497 while (i < qos_vc_attr->total_vc_count) {
1498 qos_vc_attr->vc_info[i].vc_credit =
1499 bfa_vc_attr->vc_info[i].vc_credit;
1500 qos_vc_attr->vc_info[i].borrow_credit =
1501 bfa_vc_attr->vc_info[i].borrow_credit;
1502 qos_vc_attr->vc_info[i].priority =
1503 bfa_vc_attr->vc_info[i].priority;
1504 ++i;
1505 }
1506}
1507
1508/**
1509 * Fetch QoS Stats.
1510 */
1511bfa_status_t
1512bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1513 bfa_cb_pport_t cbfn, void *cbarg)
1514{
1515 /*
1516 * QoS stats is embedded in port stats
1517 */
1518 return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg));
1519}
1520
1521bfa_status_t
1522bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1523{
1524 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1525
1526 if (port->stats_busy) {
1527 bfa_trc(bfa, port->stats_busy);
1528 return (BFA_STATUS_DEVBUSY);
1529 }
1530
1531 port->stats_busy = BFA_TRUE;
1532 port->stats_cbfn = cbfn;
1533 port->stats_cbarg = cbarg;
1534
1535 bfa_port_qos_stats_clear(port);
1536
1537 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1538 BFA_PORT_STATS_TOV);
1539 return (BFA_STATUS_OK);
1540}
1541
1542/**
1543 * Fetch port attributes.
1544 */
1545bfa_status_t
1546bfa_pport_trunk_disable(struct bfa_s *bfa)
1547{
1548 return (BFA_STATUS_OK);
1549}
1550
1551bfa_boolean_t
1552bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1553{
1554 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1555
1556 *bitmap = port->cfg.trunk_ports;
1557 return port->cfg.trunked;
1558}
1559
1560bfa_boolean_t
1561bfa_pport_is_disabled(struct bfa_s *bfa)
1562{
1563 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1564
1565 return (bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
1566 BFA_PPORT_ST_DISABLED);
1567
1568}
1569
1570bfa_boolean_t
1571bfa_pport_is_ratelim(struct bfa_s *bfa)
1572{
1573 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1574
1575return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE);
1576
1577}
1578
1579void
1580bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1581{
1582 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1583
1584 bfa_trc(bfa, on_off);
1585 bfa_trc(bfa, pport->cfg.qos_enabled);
1586
1587 pport->cfg.qos_enabled = on_off;
1588}
1589
1590void
1591bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1592{
1593 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1594
1595 bfa_trc(bfa, on_off);
1596 bfa_trc(bfa, pport->cfg.ratelimit);
1597
1598 pport->cfg.ratelimit = on_off;
1599 if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1600 pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1601}
1602
1603/**
1604 * Configure default minimum ratelim speed
1605 */
1606bfa_status_t
1607bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1608{
1609 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1610
1611 bfa_trc(bfa, speed);
1612
1613 /*
1614 * Auto and speeds greater than the supported speed, are invalid
1615 */
1616 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
1617 bfa_trc(bfa, pport->speed_sup);
1618 return BFA_STATUS_UNSUPP_SPEED;
1619 }
1620
1621 pport->cfg.trl_def_speed = speed;
1622
1623 return (BFA_STATUS_OK);
1624}
1625
1626/**
1627 * Get default minimum ratelim speed
1628 */
1629enum bfa_pport_speed
1630bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
1631{
1632 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1633
1634 bfa_trc(bfa, pport->cfg.trl_def_speed);
1635 return (pport->cfg.trl_def_speed);
1636
1637}
1638
1639void
1640bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1641{
1642 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1643
1644 bfa_trc(bfa, status);
1645 bfa_trc(bfa, pport->diag_busy);
1646
1647 pport->diag_busy = status;
1648}
1649
1650void
1651bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1652 bfa_boolean_t link_e2e_beacon)
1653{
1654 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1655
1656 bfa_trc(bfa, beacon);
1657 bfa_trc(bfa, link_e2e_beacon);
1658 bfa_trc(bfa, pport->beacon);
1659 bfa_trc(bfa, pport->link_e2e_beacon);
1660
1661 pport->beacon = beacon;
1662 pport->link_e2e_beacon = link_e2e_beacon;
1663}
1664
1665bfa_boolean_t
1666bfa_pport_is_linkup(struct bfa_s *bfa)
1667{
1668 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
1669}
1670
1671
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
new file mode 100644
index 000000000000..7cb39a306ea9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs.c BFA FCS main
20 */
21
22#include <fcs/bfa_fcs.h>
23#include "fcs_port.h"
24#include "fcs_uf.h"
25#include "fcs_vport.h"
26#include "fcs_rport.h"
27#include "fcs_fabric.h"
28#include "fcs_fcpim.h"
29#include "fcs_fcptm.h"
30#include "fcbuild.h"
31#include "fcs.h"
32#include "bfad_drv.h"
33#include <fcb/bfa_fcb.h>
34
35/**
36 * FCS sub-modules
37 */
38struct bfa_fcs_mod_s {
39 void (*modinit) (struct bfa_fcs_s *fcs);
40 void (*modexit) (struct bfa_fcs_s *fcs);
41};
42
43#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
44
45static struct bfa_fcs_mod_s fcs_modules[] = {
46 BFA_FCS_MODULE(bfa_fcs_pport),
47 BFA_FCS_MODULE(bfa_fcs_uf),
48 BFA_FCS_MODULE(bfa_fcs_fabric),
49 BFA_FCS_MODULE(bfa_fcs_vport),
50 BFA_FCS_MODULE(bfa_fcs_rport),
51 BFA_FCS_MODULE(bfa_fcs_fcpim),
52};
53
54/**
55 * fcs_api BFA FCS API
56 */
57
58static void
59bfa_fcs_exit_comp(void *fcs_cbarg)
60{
61 struct bfa_fcs_s *fcs = fcs_cbarg;
62 struct bfad_s *bfad = fcs->bfad;
63
64 complete(&bfad->comp);
65}
66
67
68
69/**
70 * fcs_api BFA FCS API
71 */
72
73/**
74 * FCS instance initialization.
75 *
76 * param[in] fcs FCS instance
77 * param[in] bfa BFA instance
78 * param[in] bfad BFA driver instance
79 *
80 * return None
81 */
82void
83bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
84 bfa_boolean_t min_cfg)
85{
86 int i;
87 struct bfa_fcs_mod_s *mod;
88
89 fcs->bfa = bfa;
90 fcs->bfad = bfad;
91 fcs->min_cfg = min_cfg;
92
93 bfa_attach_fcs(bfa);
94 fcbuild_init();
95
96 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
97 mod = &fcs_modules[i];
98 mod->modinit(fcs);
99 }
100}
101
102/**
103 * Start FCS operations.
104 */
105void
106bfa_fcs_start(struct bfa_fcs_s *fcs)
107{
108 bfa_fcs_fabric_modstart(fcs);
109}
110
111/**
112 * FCS driver details initialization.
113 *
114 * param[in] fcs FCS instance
115 * param[in] driver_info Driver Details
116 *
117 * return None
118 */
119void
120bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
121 struct bfa_fcs_driver_info_s *driver_info)
122{
123
124 fcs->driver_info = *driver_info;
125
126 bfa_fcs_fabric_psymb_init(&fcs->fabric);
127}
128
129/**
130 * FCS instance cleanup and exit.
131 *
132 * param[in] fcs FCS instance
133 * return None
134 */
135void
136bfa_fcs_exit(struct bfa_fcs_s *fcs)
137{
138 struct bfa_fcs_mod_s *mod;
139 int nmods, i;
140
141 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
142
143 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
144
145 for (i = 0; i < nmods; i++) {
146 bfa_wc_up(&fcs->wc);
147
148 mod = &fcs_modules[i];
149 mod->modexit(fcs);
150 }
151
152 bfa_wc_wait(&fcs->wc);
153}
154
155
156void
157bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
158{
159 fcs->trcmod = trcmod;
160}
161
162
163void
164bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod)
165{
166 fcs->logm = logmod;
167}
168
169
170void
171bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen)
172{
173 fcs->aen = aen;
174}
175
176void
177bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
178{
179 bfa_wc_down(&fcs->wc);
180}
181
182
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
new file mode 100644
index 000000000000..8975ed041dc0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -0,0 +1,940 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_port.c BFA FCS port
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <fcs/bfa_fcs_lport.h>
24#include <fcs/bfa_fcs_rport.h>
25#include <fcb/bfa_fcb_port.h>
26#include <bfa_svc.h>
27#include <log/bfa_log_fcs.h>
28#include "fcs.h"
29#include "fcs_lport.h"
30#include "fcs_vport.h"
31#include "fcs_rport.h"
32#include "fcs_fcxp.h"
33#include "fcs_trcmod.h"
34#include "lport_priv.h"
35#include <aen/bfa_aen_lport.h>
36
37BFA_TRC_FILE(FCS, PORT);
38
39/**
40 * Forward declarations
41 */
42
43static void bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
44 enum bfa_lport_aen_event event);
45static void bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port,
46 struct fchs_s *rx_fchs, u8 reason_code,
47 u8 reason_code_expl);
48static void bfa_fcs_port_plogi(struct bfa_fcs_port_s *port,
49 struct fchs_s *rx_fchs,
50 struct fc_logi_s *plogi);
51static void bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port);
52static void bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port);
53static void bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port);
54static void bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port);
55static void bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port);
56static void bfa_fcs_port_deleted(struct bfa_fcs_port_s *port);
57static void bfa_fcs_port_echo(struct bfa_fcs_port_s *port,
58 struct fchs_s *rx_fchs,
59 struct fc_echo_s *echo, u16 len);
60static void bfa_fcs_port_rnid(struct bfa_fcs_port_s *port,
61 struct fchs_s *rx_fchs,
62 struct fc_rnid_cmd_s *rnid, u16 len);
63static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
64 struct fc_rnid_general_topology_data_s *gen_topo_data);
65
66static struct {
67 void (*init) (struct bfa_fcs_port_s *port);
68 void (*online) (struct bfa_fcs_port_s *port);
69 void (*offline) (struct bfa_fcs_port_s *port);
70} __port_action[] = {
71 {
72 bfa_fcs_port_unknown_init, bfa_fcs_port_unknown_online,
73 bfa_fcs_port_unknown_offline}, {
74 bfa_fcs_port_fab_init, bfa_fcs_port_fab_online,
75 bfa_fcs_port_fab_offline}, {
76 bfa_fcs_port_loop_init, bfa_fcs_port_loop_online,
77 bfa_fcs_port_loop_offline}, {
78bfa_fcs_port_n2n_init, bfa_fcs_port_n2n_online,
79 bfa_fcs_port_n2n_offline},};
80
81/**
82 * fcs_port_sm FCS logical port state machine
83 */
84
85enum bfa_fcs_port_event {
86 BFA_FCS_PORT_SM_CREATE = 1,
87 BFA_FCS_PORT_SM_ONLINE = 2,
88 BFA_FCS_PORT_SM_OFFLINE = 3,
89 BFA_FCS_PORT_SM_DELETE = 4,
90 BFA_FCS_PORT_SM_DELRPORT = 5,
91};
92
93static void bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
94 enum bfa_fcs_port_event event);
95static void bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port,
96 enum bfa_fcs_port_event event);
97static void bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
98 enum bfa_fcs_port_event event);
99static void bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
100 enum bfa_fcs_port_event event);
101static void bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
102 enum bfa_fcs_port_event event);
103
104static void
105bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
106 enum bfa_fcs_port_event event)
107{
108 bfa_trc(port->fcs, port->port_cfg.pwwn);
109 bfa_trc(port->fcs, event);
110
111 switch (event) {
112 case BFA_FCS_PORT_SM_CREATE:
113 bfa_sm_set_state(port, bfa_fcs_port_sm_init);
114 break;
115
116 default:
117 bfa_assert(0);
118 }
119}
120
121static void
122bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
123{
124 bfa_trc(port->fcs, port->port_cfg.pwwn);
125 bfa_trc(port->fcs, event);
126
127 switch (event) {
128 case BFA_FCS_PORT_SM_ONLINE:
129 bfa_sm_set_state(port, bfa_fcs_port_sm_online);
130 bfa_fcs_port_online_actions(port);
131 break;
132
133 case BFA_FCS_PORT_SM_DELETE:
134 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
135 bfa_fcs_port_deleted(port);
136 break;
137
138 default:
139 bfa_assert(0);
140 }
141}
142
143static void
144bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
145 enum bfa_fcs_port_event event)
146{
147 struct bfa_fcs_rport_s *rport;
148 struct list_head *qe, *qen;
149
150 bfa_trc(port->fcs, port->port_cfg.pwwn);
151 bfa_trc(port->fcs, event);
152
153 switch (event) {
154 case BFA_FCS_PORT_SM_OFFLINE:
155 bfa_sm_set_state(port, bfa_fcs_port_sm_offline);
156 bfa_fcs_port_offline_actions(port);
157 break;
158
159 case BFA_FCS_PORT_SM_DELETE:
160
161 __port_action[port->fabric->fab_type].offline(port);
162
163 if (port->num_rports == 0) {
164 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
165 bfa_fcs_port_deleted(port);
166 } else {
167 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting);
168 list_for_each_safe(qe, qen, &port->rport_q) {
169 rport = (struct bfa_fcs_rport_s *)qe;
170 bfa_fcs_rport_delete(rport);
171 }
172 }
173 break;
174
175 case BFA_FCS_PORT_SM_DELRPORT:
176 break;
177
178 default:
179 bfa_assert(0);
180 }
181}
182
183static void
184bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
185 enum bfa_fcs_port_event event)
186{
187 struct bfa_fcs_rport_s *rport;
188 struct list_head *qe, *qen;
189
190 bfa_trc(port->fcs, port->port_cfg.pwwn);
191 bfa_trc(port->fcs, event);
192
193 switch (event) {
194 case BFA_FCS_PORT_SM_ONLINE:
195 bfa_sm_set_state(port, bfa_fcs_port_sm_online);
196 bfa_fcs_port_online_actions(port);
197 break;
198
199 case BFA_FCS_PORT_SM_DELETE:
200 if (port->num_rports == 0) {
201 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
202 bfa_fcs_port_deleted(port);
203 } else {
204 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting);
205 list_for_each_safe(qe, qen, &port->rport_q) {
206 rport = (struct bfa_fcs_rport_s *)qe;
207 bfa_fcs_rport_delete(rport);
208 }
209 }
210 break;
211
212 case BFA_FCS_PORT_SM_DELRPORT:
213 case BFA_FCS_PORT_SM_OFFLINE:
214 break;
215
216 default:
217 bfa_assert(0);
218 }
219}
220
221static void
222bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
223 enum bfa_fcs_port_event event)
224{
225 bfa_trc(port->fcs, port->port_cfg.pwwn);
226 bfa_trc(port->fcs, event);
227
228 switch (event) {
229 case BFA_FCS_PORT_SM_DELRPORT:
230 if (port->num_rports == 0) {
231 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
232 bfa_fcs_port_deleted(port);
233 }
234 break;
235
236 default:
237 bfa_assert(0);
238 }
239}
240
241
242
243/**
244 * fcs_port_pvt
245 */
246
247/**
248 * Send AEN notification
249 */
250static void
251bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
252 enum bfa_lport_aen_event event)
253{
254 union bfa_aen_data_u aen_data;
255 struct bfa_log_mod_s *logmod = port->fcs->logm;
256 enum bfa_port_role role = port->port_cfg.roles;
257 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
258 char lpwwn_ptr[BFA_STRING_32];
259 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
260 { "Initiator", "Target", "IPFC" };
261
262 wwn2str(lpwwn_ptr, lpwwn);
263
264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
265
266 switch (event) {
267 case BFA_LPORT_AEN_ONLINE:
268 bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
269 role_str[role / 2]);
270 break;
271 case BFA_LPORT_AEN_OFFLINE:
272 bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
273 role_str[role / 2]);
274 break;
275 case BFA_LPORT_AEN_NEW:
276 bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
277 role_str[role / 2]);
278 break;
279 case BFA_LPORT_AEN_DELETE:
280 bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
281 role_str[role / 2]);
282 break;
283 case BFA_LPORT_AEN_DISCONNECT:
284 bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
285 role_str[role / 2]);
286 break;
287 default:
288 break;
289 }
290
291 aen_data.lport.vf_id = port->fabric->vf_id;
292 aen_data.lport.roles = role;
293 aen_data.lport.ppwwn =
294 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
295 aen_data.lport.lpwwn = lpwwn;
296}
297
298/*
299 * Send a LS reject
300 */
301static void
302bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
303 u8 reason_code, u8 reason_code_expl)
304{
305 struct fchs_s fchs;
306 struct bfa_fcxp_s *fcxp;
307 struct bfa_rport_s *bfa_rport = NULL;
308 int len;
309
310 bfa_trc(port->fcs, rx_fchs->s_id);
311
312 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
313 if (!fcxp)
314 return;
315
316 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
317 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
318 reason_code, reason_code_expl);
319
320 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
321 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
322 FC_MAX_PDUSZ, 0);
323}
324
325/**
326 * Process incoming plogi from a remote port.
327 */
328static void
329bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
330 struct fc_logi_s *plogi)
331{
332 struct bfa_fcs_rport_s *rport;
333
334 bfa_trc(port->fcs, rx_fchs->d_id);
335 bfa_trc(port->fcs, rx_fchs->s_id);
336
337 /*
338 * If min cfg mode is enabled, drop any incoming PLOGIs
339 */
340 if (__fcs_min_cfg(port->fcs)) {
341 bfa_trc(port->fcs, rx_fchs->s_id);
342 return;
343 }
344
345 if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) {
346 bfa_trc(port->fcs, rx_fchs->s_id);
347 /*
348 * send a LS reject
349 */
350 bfa_fcs_port_send_ls_rjt(port, rx_fchs,
351 FC_LS_RJT_RSN_PROTOCOL_ERROR,
352 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
353 return;
354 }
355
356 /**
357* Direct Attach P2P mode : verify address assigned by the r-port.
358 */
359 if ((!bfa_fcs_fabric_is_switched(port->fabric))
360 &&
361 (memcmp
362 ((void *)&bfa_fcs_port_get_pwwn(port), (void *)&plogi->port_name,
363 sizeof(wwn_t)) < 0)) {
364 if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
365 /*
366 * Address assigned to us cannot be a WKA
367 */
368 bfa_fcs_port_send_ls_rjt(port, rx_fchs,
369 FC_LS_RJT_RSN_PROTOCOL_ERROR,
370 FC_LS_RJT_EXP_INVALID_NPORT_ID);
371 return;
372 }
373 port->pid = rx_fchs->d_id;
374 }
375
376 /**
377 * First, check if we know the device by pwwn.
378 */
379 rport = bfa_fcs_port_get_rport_by_pwwn(port, plogi->port_name);
380 if (rport) {
381 /**
382 * Direct Attach P2P mode: handle address assigned by the rport.
383 */
384 if ((!bfa_fcs_fabric_is_switched(port->fabric))
385 &&
386 (memcmp
387 ((void *)&bfa_fcs_port_get_pwwn(port),
388 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
389 port->pid = rx_fchs->d_id;
390 rport->pid = rx_fchs->s_id;
391 }
392 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
393 return;
394 }
395
396 /**
397 * Next, lookup rport by PID.
398 */
399 rport = bfa_fcs_port_get_rport_by_pid(port, rx_fchs->s_id);
400 if (!rport) {
401 /**
402 * Inbound PLOGI from a new device.
403 */
404 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
405 return;
406 }
407
408 /**
409 * Rport is known only by PID.
410 */
411 if (rport->pwwn) {
412 /**
413 * This is a different device with the same pid. Old device
414 * disappeared. Send implicit LOGO to old device.
415 */
416 bfa_assert(rport->pwwn != plogi->port_name);
417 bfa_fcs_rport_logo_imp(rport);
418
419 /**
420 * Inbound PLOGI from a new device (with old PID).
421 */
422 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
423 return;
424 }
425
426 /**
427 * PLOGI crossing each other.
428 */
429 bfa_assert(rport->pwwn == WWN_NULL);
430 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
431}
432
433/*
434 * Process incoming ECHO.
435 * Since it does not require a login, it is processed here.
436 */
437static void
438bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
439 struct fc_echo_s *echo, u16 rx_len)
440{
441 struct fchs_s fchs;
442 struct bfa_fcxp_s *fcxp;
443 struct bfa_rport_s *bfa_rport = NULL;
444 int len, pyld_len;
445
446 bfa_trc(port->fcs, rx_fchs->s_id);
447 bfa_trc(port->fcs, rx_fchs->d_id);
448 bfa_trc(port->fcs, rx_len);
449
450 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
451 if (!fcxp)
452 return;
453
454 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
455 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id);
456
457 /*
458 * Copy the payload (if any) from the echo frame
459 */
460 pyld_len = rx_len - sizeof(struct fchs_s);
461 bfa_trc(port->fcs, pyld_len);
462
463 if (pyld_len > len)
464 memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
465 sizeof(struct fc_echo_s), (echo + 1),
466 (pyld_len - sizeof(struct fc_echo_s)));
467
468 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
469 BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
470 FC_MAX_PDUSZ, 0);
471}
472
473/*
474 * Process incoming RNID.
475 * Since it does not require a login, it is processed here.
476 */
477static void
478bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
479 struct fc_rnid_cmd_s *rnid, u16 rx_len)
480{
481 struct fc_rnid_common_id_data_s common_id_data;
482 struct fc_rnid_general_topology_data_s gen_topo_data;
483 struct fchs_s fchs;
484 struct bfa_fcxp_s *fcxp;
485 struct bfa_rport_s *bfa_rport = NULL;
486 u16 len;
487 u32 data_format;
488
489 bfa_trc(port->fcs, rx_fchs->s_id);
490 bfa_trc(port->fcs, rx_fchs->d_id);
491 bfa_trc(port->fcs, rx_len);
492
493 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
494 if (!fcxp)
495 return;
496
497 /*
498 * Check Node Indentification Data Format
499 * We only support General Topology Discovery Format.
500 * For any other requested Data Formats, we return Common Node Id Data
501 * only, as per FC-LS.
502 */
503 bfa_trc(port->fcs, rnid->node_id_data_format);
504 if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
505 data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY;
506 /*
507 * Get General topology data for this port
508 */
509 bfa_fs_port_get_gen_topo_data(port, &gen_topo_data);
510 } else {
511 data_format = RNID_NODEID_DATA_FORMAT_COMMON;
512 }
513
514 /*
515 * Copy the Node Id Info
516 */
517 common_id_data.port_name = bfa_fcs_port_get_pwwn(port);
518 common_id_data.node_name = bfa_fcs_port_get_nwwn(port);
519
520 len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
521 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
522 data_format, &common_id_data, &gen_topo_data);
523
524 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
525 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
526 FC_MAX_PDUSZ, 0);
527
528 return;
529}
530
531/*
532 * Fill out General Topolpgy Discovery Data for RNID ELS.
533 */
534static void
535bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
536 struct fc_rnid_general_topology_data_s *gen_topo_data)
537{
538
539 bfa_os_memset(gen_topo_data, 0,
540 sizeof(struct fc_rnid_general_topology_data_s));
541
542 gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST);
543 gen_topo_data->phy_port_num = 0; /* @todo */
544 gen_topo_data->num_attached_nodes = bfa_os_htonl(1);
545}
546
547static void
548bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port)
549{
550 bfa_trc(port->fcs, port->fabric->oper_type);
551
552 __port_action[port->fabric->fab_type].init(port);
553 __port_action[port->fabric->fab_type].online(port);
554
555 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_ONLINE);
556 bfa_fcb_port_online(port->fcs->bfad, port->port_cfg.roles,
557 port->fabric->vf_drv, (port->vport == NULL) ?
558 NULL : port->vport->vport_drv);
559}
560
561static void
562bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port)
563{
564 struct list_head *qe, *qen;
565 struct bfa_fcs_rport_s *rport;
566
567 bfa_trc(port->fcs, port->fabric->oper_type);
568
569 __port_action[port->fabric->fab_type].offline(port);
570
571 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) {
572 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
573 } else {
574 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE);
575 }
576 bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles,
577 port->fabric->vf_drv,
578 (port->vport == NULL) ? NULL : port->vport->vport_drv);
579
580 list_for_each_safe(qe, qen, &port->rport_q) {
581 rport = (struct bfa_fcs_rport_s *)qe;
582 bfa_fcs_rport_offline(rport);
583 }
584}
585
586static void
587bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port)
588{
589 bfa_assert(0);
590}
591
592static void
593bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port)
594{
595 bfa_assert(0);
596}
597
598static void
599bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port)
600{
601 bfa_assert(0);
602}
603
604static void
605bfa_fcs_port_deleted(struct bfa_fcs_port_s *port)
606{
607 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DELETE);
608
609 /*
610 * Base port will be deleted by the OS driver
611 */
612 if (port->vport) {
613 bfa_fcb_port_delete(port->fcs->bfad, port->port_cfg.roles,
614 port->fabric->vf_drv,
615 port->vport ? port->vport->vport_drv : NULL);
616 bfa_fcs_vport_delete_comp(port->vport);
617 } else {
618 bfa_fcs_fabric_port_delete_comp(port->fabric);
619 }
620}
621
622
623
624/**
625 * fcs_lport_api BFA FCS port API
626 */
627/**
628 * Module initialization
629 */
630void
631bfa_fcs_port_modinit(struct bfa_fcs_s *fcs)
632{
633
634}
635
636/**
637 * Module cleanup
638 */
639void
640bfa_fcs_port_modexit(struct bfa_fcs_s *fcs)
641{
642 bfa_fcs_modexit_comp(fcs);
643}
644
645/**
646 * Unsolicited frame receive handling.
647 */
648void
649bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
650 u16 len)
651{
652 u32 pid = fchs->s_id;
653 struct bfa_fcs_rport_s *rport = NULL;
654 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
655
656 bfa_stats(lport, uf_recvs);
657
658 if (!bfa_fcs_port_is_online(lport)) {
659 bfa_stats(lport, uf_recv_drops);
660 return;
661 }
662
663 /**
664 * First, handle ELSs that donot require a login.
665 */
666 /*
667 * Handle PLOGI first
668 */
669 if ((fchs->type == FC_TYPE_ELS) &&
670 (els_cmd->els_code == FC_ELS_PLOGI)) {
671 bfa_fcs_port_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
672 return;
673 }
674
675 /*
676 * Handle ECHO separately.
677 */
678 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
679 bfa_fcs_port_echo(lport, fchs,
680 (struct fc_echo_s *) els_cmd, len);
681 return;
682 }
683
684 /*
685 * Handle RNID separately.
686 */
687 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
688 bfa_fcs_port_rnid(lport, fchs,
689 (struct fc_rnid_cmd_s *) els_cmd, len);
690 return;
691 }
692
693 /**
694 * look for a matching remote port ID
695 */
696 rport = bfa_fcs_port_get_rport_by_pid(lport, pid);
697 if (rport) {
698 bfa_trc(rport->fcs, fchs->s_id);
699 bfa_trc(rport->fcs, fchs->d_id);
700 bfa_trc(rport->fcs, fchs->type);
701
702 bfa_fcs_rport_uf_recv(rport, fchs, len);
703 return;
704 }
705
706 /**
707 * Only handles ELS frames for now.
708 */
709 if (fchs->type != FC_TYPE_ELS) {
710 bfa_trc(lport->fcs, fchs->type);
711 bfa_assert(0);
712 return;
713 }
714
715 bfa_trc(lport->fcs, els_cmd->els_code);
716 if (els_cmd->els_code == FC_ELS_RSCN) {
717 bfa_fcs_port_scn_process_rscn(lport, fchs, len);
718 return;
719 }
720
721 if (els_cmd->els_code == FC_ELS_LOGO) {
722 /**
723 * @todo Handle LOGO frames received.
724 */
725 bfa_trc(lport->fcs, els_cmd->els_code);
726 return;
727 }
728
729 if (els_cmd->els_code == FC_ELS_PRLI) {
730 /**
731 * @todo Handle PRLI frames received.
732 */
733 bfa_trc(lport->fcs, els_cmd->els_code);
734 return;
735 }
736
737 /**
738 * Unhandled ELS frames. Send a LS_RJT.
739 */
740 bfa_fcs_port_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
741 FC_LS_RJT_EXP_NO_ADDL_INFO);
742
743}
744
745/**
746 * PID based Lookup for a R-Port in the Port R-Port Queue
747 */
748struct bfa_fcs_rport_s *
749bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid)
750{
751 struct bfa_fcs_rport_s *rport;
752 struct list_head *qe;
753
754 list_for_each(qe, &port->rport_q) {
755 rport = (struct bfa_fcs_rport_s *)qe;
756 if (rport->pid == pid)
757 return rport;
758 }
759
760 bfa_trc(port->fcs, pid);
761 return NULL;
762}
763
764/**
765 * PWWN based Lookup for a R-Port in the Port R-Port Queue
766 */
767struct bfa_fcs_rport_s *
768bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
769{
770 struct bfa_fcs_rport_s *rport;
771 struct list_head *qe;
772
773 list_for_each(qe, &port->rport_q) {
774 rport = (struct bfa_fcs_rport_s *)qe;
775 if (wwn_is_equal(rport->pwwn, pwwn))
776 return rport;
777 }
778
779 bfa_trc(port->fcs, pwwn);
780 return (NULL);
781}
782
783/**
784 * NWWN based Lookup for a R-Port in the Port R-Port Queue
785 */
786struct bfa_fcs_rport_s *
787bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
788{
789 struct bfa_fcs_rport_s *rport;
790 struct list_head *qe;
791
792 list_for_each(qe, &port->rport_q) {
793 rport = (struct bfa_fcs_rport_s *)qe;
794 if (wwn_is_equal(rport->nwwn, nwwn))
795 return rport;
796 }
797
798 bfa_trc(port->fcs, nwwn);
799 return (NULL);
800}
801
802/**
803 * Called by rport module when new rports are discovered.
804 */
805void
806bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
807 struct bfa_fcs_rport_s *rport)
808{
809 list_add_tail(&rport->qe, &port->rport_q);
810 port->num_rports++;
811}
812
813/**
814 * Called by rport module to when rports are deleted.
815 */
816void
817bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
818 struct bfa_fcs_rport_s *rport)
819{
820 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
821 list_del(&rport->qe);
822 port->num_rports--;
823
824 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
825}
826
827/**
828 * Called by fabric for base port when fabric login is complete.
829 * Called by vport for virtual ports when FDISC is complete.
830 */
831void
832bfa_fcs_port_online(struct bfa_fcs_port_s *port)
833{
834 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
835}
836
837/**
838 * Called by fabric for base port when fabric goes offline.
839 * Called by vport for virtual ports when virtual port becomes offline.
840 */
841void
842bfa_fcs_port_offline(struct bfa_fcs_port_s *port)
843{
844 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
845}
846
847/**
848 * Called by fabric to delete base lport and associated resources.
849 *
850 * Called by vport to delete lport and associated resources. Should call
851 * bfa_fcs_vport_delete_comp() for vports on completion.
852 */
853void
854bfa_fcs_port_delete(struct bfa_fcs_port_s *port)
855{
856 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
857}
858
859/**
860 * Called by fabric in private loop topology to process LIP event.
861 */
862void
863bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
864{
865}
866
867/**
868 * Return TRUE if port is online, else return FALSE
869 */
870bfa_boolean_t
871bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
872{
873 return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online));
874}
875
876/**
877 * Logical port initialization of base or virtual port.
878 * Called by fabric for base port or by vport for virtual ports.
879 */
880void
881bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
882 u16 vf_id, struct bfa_port_cfg_s *port_cfg,
883 struct bfa_fcs_vport_s *vport)
884{
885 lport->fcs = fcs;
886 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
887 bfa_os_assign(lport->port_cfg, *port_cfg);
888 lport->vport = vport;
889 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
890 bfa_lps_get_tag(lport->fabric->lps);
891
892 INIT_LIST_HEAD(&lport->rport_q);
893 lport->num_rports = 0;
894
895 lport->bfad_port =
896 bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles,
897 lport->fabric->vf_drv,
898 vport ? vport->vport_drv : NULL);
899 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
900
901 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
902 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
903}
904
905
906
907/**
908 * fcs_lport_api
909 */
910
911void
912bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
913 struct bfa_port_attr_s *port_attr)
914{
915 if (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online))
916 port_attr->pid = port->pid;
917 else
918 port_attr->pid = 0;
919
920 port_attr->port_cfg = port->port_cfg;
921
922 if (port->fabric) {
923 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
924 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
925 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
926 memcpy(port_attr->fabric_ip_addr,
927 bfa_fcs_port_get_fabric_ipaddr(port),
928 BFA_FCS_FABRIC_IPADDR_SZ);
929
930 if (port->vport != NULL)
931 port_attr->port_type = BFA_PPORT_TYPE_VPORT;
932
933 } else {
934 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
935 port_attr->state = BFA_PORT_UNINIT;
936 }
937
938}
939
940
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
new file mode 100644
index 000000000000..9c4b24e62de1
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_pport.c BFA FCS PPORT ( physical port)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs_trcmod.h"
26#include "fcs.h"
27#include "fcs_fabric.h"
28#include "fcs_port.h"
29
30BFA_TRC_FILE(FCS, PPORT);
31
32static void
33bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
34{
35 struct bfa_fcs_s *fcs = cbarg;
36
37 bfa_trc(fcs, event);
38
39 switch (event) {
40 case BFA_PPORT_LINKUP:
41 bfa_fcs_fabric_link_up(&fcs->fabric);
42 break;
43
44 case BFA_PPORT_LINKDOWN:
45 bfa_fcs_fabric_link_down(&fcs->fabric);
46 break;
47
48 case BFA_PPORT_TRUNK_LINKDOWN:
49 bfa_assert(0);
50 break;
51
52 default:
53 bfa_assert(0);
54 }
55}
56
57void
58bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs)
59{
60 bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler,
61 fcs);
62}
63
64void
65bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
66{
67 bfa_fcs_modexit_comp(fcs);
68}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
new file mode 100644
index 000000000000..ad01db6444b2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs.h"
26#include "fcs_trcmod.h"
27#include "fcs_fabric.h"
28#include "fcs_uf.h"
29
30BFA_TRC_FILE(FCS, UF);
31
32/**
33 * BFA callback for unsolicited frame receive handler.
34 *
35 * @param[in] cbarg callback arg for receive handler
36 * @param[in] uf unsolicited frame descriptor
37 *
38 * @return None
39 */
40static void
41bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
42{
43 struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
44 struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
45 u16 len = bfa_uf_get_frmlen(uf);
46 struct fc_vft_s *vft;
47 struct bfa_fcs_fabric_s *fabric;
48
49 /**
50 * check for VFT header
51 */
52 if (fchs->routing == FC_RTG_EXT_HDR &&
53 fchs->cat_info == FC_CAT_VFT_HDR) {
54 bfa_stats(fcs, uf.tagged);
55 vft = bfa_uf_get_frmbuf(uf);
56 if (fcs->port_vfid == vft->vf_id)
57 fabric = &fcs->fabric;
58 else
59 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
60
61 /**
62 * drop frame if vfid is unknown
63 */
64 if (!fabric) {
65 bfa_assert(0);
66 bfa_stats(fcs, uf.vfid_unknown);
67 bfa_uf_free(uf);
68 return;
69 }
70
71 /**
72 * skip vft header
73 */
74 fchs = (struct fchs_s *) (vft + 1);
75 len -= sizeof(struct fc_vft_s);
76
77 bfa_trc(fcs, vft->vf_id);
78 } else {
79 bfa_stats(fcs, uf.untagged);
80 fabric = &fcs->fabric;
81 }
82
83 bfa_trc(fcs, ((u32 *) fchs)[0]);
84 bfa_trc(fcs, ((u32 *) fchs)[1]);
85 bfa_trc(fcs, ((u32 *) fchs)[2]);
86 bfa_trc(fcs, ((u32 *) fchs)[3]);
87 bfa_trc(fcs, ((u32 *) fchs)[4]);
88 bfa_trc(fcs, ((u32 *) fchs)[5]);
89 bfa_trc(fcs, len);
90
91 bfa_fcs_fabric_uf_recv(fabric, fchs, len);
92 bfa_uf_free(uf);
93}
94
95void
96bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs)
97{
98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
99}
100
101void
102bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
103{
104 bfa_fcs_modexit_comp(fcs);
105}
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
new file mode 100644
index 000000000000..4754a0e9006a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcxp.c
@@ -0,0 +1,782 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_uf.h>
20#include <cs/bfa_debug.h>
21
22BFA_TRC_FILE(HAL, FCXP);
23BFA_MODULE(fcxp);
24
25/**
26 * forward declarations
27 */
28static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
29static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
30 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
31static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
32 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
33static void bfa_fcxp_qresume(void *cbarg);
34static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
35 struct bfi_fcxp_send_req_s *send_req);
36
37/**
38 * fcxp_pvt BFA FCXP private functions
39 */
40
41static void
42claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
43{
44 u8 *dm_kva = NULL;
45 u64 dm_pa;
46 u32 buf_pool_sz;
47
48 dm_kva = bfa_meminfo_dma_virt(mi);
49 dm_pa = bfa_meminfo_dma_phys(mi);
50
51 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
52
53 /*
54 * Initialize the fcxp req payload list
55 */
56 mod->req_pld_list_kva = dm_kva;
57 mod->req_pld_list_pa = dm_pa;
58 dm_kva += buf_pool_sz;
59 dm_pa += buf_pool_sz;
60 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
61
62 /*
63 * Initialize the fcxp rsp payload list
64 */
65 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
66 mod->rsp_pld_list_kva = dm_kva;
67 mod->rsp_pld_list_pa = dm_pa;
68 dm_kva += buf_pool_sz;
69 dm_pa += buf_pool_sz;
70 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
71
72 bfa_meminfo_dma_virt(mi) = dm_kva;
73 bfa_meminfo_dma_phys(mi) = dm_pa;
74}
75
76static void
77claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
78{
79 u16 i;
80 struct bfa_fcxp_s *fcxp;
81
82 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
83 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
84
85 INIT_LIST_HEAD(&mod->fcxp_free_q);
86 INIT_LIST_HEAD(&mod->fcxp_active_q);
87
88 mod->fcxp_list = fcxp;
89
90 for (i = 0; i < mod->num_fcxps; i++) {
91 fcxp->fcxp_mod = mod;
92 fcxp->fcxp_tag = i;
93
94 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
95 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
96 fcxp->reqq_waiting = BFA_FALSE;
97
98 fcxp = fcxp + 1;
99 }
100
101 bfa_meminfo_kva(mi) = (void *)fcxp;
102}
103
104static void
105bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
106 u32 *dm_len)
107{
108 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
109
110 if (num_fcxp_reqs == 0)
111 return;
112
113 /*
114 * Account for req/rsp payload
115 */
116 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
117 if (cfg->drvcfg.min_cfg)
118 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
119 else
120 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
121
122 /*
123 * Account for fcxp structs
124 */
125 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
126}
127
128static void
129bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
130 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
131{
132 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
133
134 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
135 mod->bfa = bfa;
136 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
137
138 /**
139 * Initialize FCXP request and response payload sizes.
140 */
141 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
142 if (!cfg->drvcfg.min_cfg)
143 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
144
145 INIT_LIST_HEAD(&mod->wait_q);
146
147 claim_fcxp_req_rsp_mem(mod, meminfo);
148 claim_fcxps_mem(mod, meminfo);
149}
150
151static void
152bfa_fcxp_initdone(struct bfa_s *bfa)
153{
154}
155
156static void
157bfa_fcxp_detach(struct bfa_s *bfa)
158{
159}
160
161static void
162bfa_fcxp_start(struct bfa_s *bfa)
163{
164}
165
166static void
167bfa_fcxp_stop(struct bfa_s *bfa)
168{
169}
170
171static void
172bfa_fcxp_iocdisable(struct bfa_s *bfa)
173{
174 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
175 struct bfa_fcxp_s *fcxp;
176 struct list_head *qe, *qen;
177
178 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
179 fcxp = (struct bfa_fcxp_s *) qe;
180 if (fcxp->caller == NULL) {
181 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
182 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
183 bfa_fcxp_free(fcxp);
184 } else {
185 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
186 bfa_cb_queue(bfa, &fcxp->hcb_qe,
187 __bfa_fcxp_send_cbfn, fcxp);
188 }
189 }
190}
191
192static struct bfa_fcxp_s *
193bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
194{
195 struct bfa_fcxp_s *fcxp;
196
197 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
198
199 if (fcxp)
200 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
201
202 return (fcxp);
203}
204
205static void
206bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
207{
208 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
209 struct bfa_fcxp_wqe_s *wqe;
210
211 bfa_q_deq(&mod->wait_q, &wqe);
212 if (wqe) {
213 bfa_trc(mod->bfa, fcxp->fcxp_tag);
214 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
215 return;
216 }
217
218 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
219 list_del(&fcxp->qe);
220 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
221}
222
223static void
224bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
225 bfa_status_t req_status, u32 rsp_len,
226 u32 resid_len, struct fchs_s *rsp_fchs)
227{
228 /**discarded fcxp completion */
229}
230
231static void
232__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
233{
234 struct bfa_fcxp_s *fcxp = cbarg;
235
236 if (complete) {
237 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
238 fcxp->rsp_status, fcxp->rsp_len,
239 fcxp->residue_len, &fcxp->rsp_fchs);
240 } else {
241 bfa_fcxp_free(fcxp);
242 }
243}
244
245static void
246hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
247{
248 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
249 struct bfa_fcxp_s *fcxp;
250 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
251
252 bfa_trc(bfa, fcxp_tag);
253
254 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
255
256 /**
257 * @todo f/w should not set residue to non-0 when everything
258 * is received.
259 */
260 if (fcxp_rsp->req_status == BFA_STATUS_OK)
261 fcxp_rsp->residue_len = 0;
262 else
263 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
264
265 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
266
267 bfa_assert(fcxp->send_cbfn != NULL);
268
269 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
270
271 if (fcxp->send_cbfn != NULL) {
272 if (fcxp->caller == NULL) {
273 bfa_trc(mod->bfa, fcxp->fcxp_tag);
274
275 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
276 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
277 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
278 /*
279 * fcxp automatically freed on return from the callback
280 */
281 bfa_fcxp_free(fcxp);
282 } else {
283 bfa_trc(mod->bfa, fcxp->fcxp_tag);
284 fcxp->rsp_status = fcxp_rsp->req_status;
285 fcxp->rsp_len = fcxp_rsp->rsp_len;
286 fcxp->residue_len = fcxp_rsp->residue_len;
287 fcxp->rsp_fchs = fcxp_rsp->fchs;
288
289 bfa_cb_queue(bfa, &fcxp->hcb_qe,
290 __bfa_fcxp_send_cbfn, fcxp);
291 }
292 } else {
293 bfa_trc(bfa, fcxp_tag);
294 }
295}
296
297static void
298hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
299{
300 union bfi_addr_u sga_zero = { {0} };
301
302 sge->sg_len = reqlen;
303 sge->flags = BFI_SGE_DATA_LAST;
304 bfa_dma_addr_set(sge[0].sga, req_pa);
305 bfa_sge_to_be(sge);
306 sge++;
307
308 sge->sga = sga_zero;
309 sge->sg_len = reqlen;
310 sge->flags = BFI_SGE_PGDLEN;
311 bfa_sge_to_be(sge);
312}
313
314static void
315hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
316 struct fchs_s *fchs)
317{
318 /*
319 * TODO: TX ox_id
320 */
321 if (reqlen > 0) {
322 if (fcxp->use_ireqbuf) {
323 u32 pld_w0 =
324 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
325
326 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
327 BFA_PL_EID_TX,
328 reqlen + sizeof(struct fchs_s), fchs, pld_w0);
329 } else {
330 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
331 BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
332 fchs);
333 }
334 } else {
335 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
336 reqlen + sizeof(struct fchs_s), fchs);
337 }
338}
339
340static void
341hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
342 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
343{
344 if (fcxp_rsp->rsp_len > 0) {
345 if (fcxp->use_irspbuf) {
346 u32 pld_w0 =
347 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
348
349 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
350 BFA_PL_EID_RX,
351 (u16) fcxp_rsp->rsp_len,
352 &fcxp_rsp->fchs, pld_w0);
353 } else {
354 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
355 BFA_PL_EID_RX,
356 (u16) fcxp_rsp->rsp_len,
357 &fcxp_rsp->fchs);
358 }
359 } else {
360 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
361 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
362 }
363}
364
365/**
366 * Handler to resume sending fcxp when space in available in cpe queue.
367 */
368static void
369bfa_fcxp_qresume(void *cbarg)
370{
371 struct bfa_fcxp_s *fcxp = cbarg;
372 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
373 struct bfi_fcxp_send_req_s *send_req;
374
375 fcxp->reqq_waiting = BFA_FALSE;
376 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
377 bfa_fcxp_queue(fcxp, send_req);
378}
379
380/**
381 * Queue fcxp send request to foimrware.
382 */
383static void
384bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
385{
386 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
387 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
388 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
389 struct bfa_rport_s *rport = reqi->bfa_rport;
390
391 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
392 bfa_lpuid(bfa));
393
394 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
395 if (rport) {
396 send_req->rport_fw_hndl = rport->fw_handle;
397 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
398 if (send_req->max_frmsz == 0)
399 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
400 } else {
401 send_req->rport_fw_hndl = 0;
402 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
403 }
404
405 send_req->vf_id = bfa_os_htons(reqi->vf_id);
406 send_req->lp_tag = reqi->lp_tag;
407 send_req->class = reqi->class;
408 send_req->rsp_timeout = rspi->rsp_timeout;
409 send_req->cts = reqi->cts;
410 send_req->fchs = reqi->fchs;
411
412 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
413 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
414
415 /*
416 * setup req sgles
417 */
418 if (fcxp->use_ireqbuf == 1) {
419 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
420 BFA_FCXP_REQ_PLD_PA(fcxp));
421 } else {
422 if (fcxp->nreq_sgles > 0) {
423 bfa_assert(fcxp->nreq_sgles == 1);
424 hal_fcxp_set_local_sges(send_req->req_sge,
425 reqi->req_tot_len,
426 fcxp->req_sga_cbfn(fcxp->caller,
427 0));
428 } else {
429 bfa_assert(reqi->req_tot_len == 0);
430 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
431 }
432 }
433
434 /*
435 * setup rsp sgles
436 */
437 if (fcxp->use_irspbuf == 1) {
438 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
439
440 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
441 BFA_FCXP_RSP_PLD_PA(fcxp));
442
443 } else {
444 if (fcxp->nrsp_sgles > 0) {
445 bfa_assert(fcxp->nrsp_sgles == 1);
446 hal_fcxp_set_local_sges(send_req->rsp_sge,
447 rspi->rsp_maxlen,
448 fcxp->rsp_sga_cbfn(fcxp->caller,
449 0));
450 } else {
451 bfa_assert(rspi->rsp_maxlen == 0);
452 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
453 }
454 }
455
456 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
457
458 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
459
460 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
461 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
462}
463
464
465/**
466 * hal_fcxp_api BFA FCXP API
467 */
468
469/**
470 * Allocate an FCXP instance to send a response or to send a request
471 * that has a response. Request/response buffers are allocated by caller.
472 *
473 * @param[in] bfa BFA bfa instance
474 * @param[in] nreq_sgles Number of SG elements required for request
475 * buffer. 0, if fcxp internal buffers are used.
476 * Use bfa_fcxp_get_reqbuf() to get the
477 * internal req buffer.
478 * @param[in] req_sgles SG elements describing request buffer. Will be
479 * copied in by BFA and hence can be freed on
480 * return from this function.
481 * @param[in] get_req_sga function ptr to be called to get a request SG
482 * Address (given the sge index).
483 * @param[in] get_req_sglen function ptr to be called to get a request SG
484 * len (given the sge index).
485 * @param[in] get_rsp_sga function ptr to be called to get a response SG
486 * Address (given the sge index).
487 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
488 * len (given the sge index).
489 *
490 * @return FCXP instance. NULL on failure.
491 */
492struct bfa_fcxp_s *
493bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
494 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
495 bfa_fcxp_get_sglen_t req_sglen_cbfn,
496 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
497 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
498{
499 struct bfa_fcxp_s *fcxp = NULL;
500 u32 nreq_sgpg, nrsp_sgpg;
501
502 bfa_assert(bfa != NULL);
503
504 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
505 if (fcxp == NULL)
506 return (NULL);
507
508 bfa_trc(bfa, fcxp->fcxp_tag);
509
510 fcxp->caller = caller;
511
512 if (nreq_sgles == 0) {
513 fcxp->use_ireqbuf = 1;
514 } else {
515 bfa_assert(req_sga_cbfn != NULL);
516 bfa_assert(req_sglen_cbfn != NULL);
517
518 fcxp->use_ireqbuf = 0;
519 fcxp->req_sga_cbfn = req_sga_cbfn;
520 fcxp->req_sglen_cbfn = req_sglen_cbfn;
521
522 fcxp->nreq_sgles = nreq_sgles;
523
524 /*
525 * alloc required sgpgs
526 */
527 if (nreq_sgles > BFI_SGE_INLINE) {
528 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
529
530 if (bfa_sgpg_malloc
531 (bfa, &fcxp->req_sgpg_q, nreq_sgpg)
532 != BFA_STATUS_OK) {
533 /* bfa_sgpg_wait(bfa, &fcxp->req_sgpg_wqe,
534 nreq_sgpg); */
535 /*
536 * TODO
537 */
538 }
539 }
540 }
541
542 if (nrsp_sgles == 0) {
543 fcxp->use_irspbuf = 1;
544 } else {
545 bfa_assert(rsp_sga_cbfn != NULL);
546 bfa_assert(rsp_sglen_cbfn != NULL);
547
548 fcxp->use_irspbuf = 0;
549 fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
550 fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
551
552 fcxp->nrsp_sgles = nrsp_sgles;
553 /*
554 * alloc required sgpgs
555 */
556 if (nrsp_sgles > BFI_SGE_INLINE) {
557 nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
558
559 if (bfa_sgpg_malloc
560 (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
561 != BFA_STATUS_OK) {
562 /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
563 nrsp_sgpg); */
564 /*
565 * TODO
566 */
567 }
568 }
569 }
570
571 return (fcxp);
572}
573
574/**
575 * Get the internal request buffer pointer
576 *
577 * @param[in] fcxp BFA fcxp pointer
578 *
579 * @return pointer to the internal request buffer
580 */
581void *
582bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
583{
584 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
585 void *reqbuf;
586
587 bfa_assert(fcxp->use_ireqbuf == 1);
588 reqbuf = ((u8 *)mod->req_pld_list_kva) +
589 fcxp->fcxp_tag * mod->req_pld_sz;
590 return reqbuf;
591}
592
593u32
594bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
595{
596 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
597
598 return mod->req_pld_sz;
599}
600
601/**
602 * Get the internal response buffer pointer
603 *
604 * @param[in] fcxp BFA fcxp pointer
605 *
606 * @return pointer to the internal request buffer
607 */
608void *
609bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
610{
611 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
612 void *rspbuf;
613
614 bfa_assert(fcxp->use_irspbuf == 1);
615
616 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
617 fcxp->fcxp_tag * mod->rsp_pld_sz;
618 return rspbuf;
619}
620
621/**
622 * Free the BFA FCXP
623 *
624 * @param[in] fcxp BFA fcxp pointer
625 *
626 * @return void
627 */
628void
629bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
630{
631 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
632
633 bfa_assert(fcxp != NULL);
634 bfa_trc(mod->bfa, fcxp->fcxp_tag);
635 bfa_fcxp_put(fcxp);
636}
637
638/**
639 * Send a FCXP request
640 *
641 * @param[in] fcxp BFA fcxp pointer
642 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
643 * @param[in] vf_id virtual Fabric ID
644 * @param[in] lp_tag lport tag
645 * @param[in] cts use Continous sequence
646 * @param[in] cos fc Class of Service
647 * @param[in] reqlen request length, does not include FCHS length
648 * @param[in] fchs fc Header Pointer. The header content will be copied
649 * in by BFA.
650 *
651 * @param[in] cbfn call back function to be called on receiving
652 * the response
653 * @param[in] cbarg arg for cbfn
654 * @param[in] rsp_timeout
655 * response timeout
656 *
657 * @return bfa_status_t
658 */
659void
660bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
661 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
662 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
663 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
664{
665 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
666 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
667 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
668 struct bfi_fcxp_send_req_s *send_req;
669
670 bfa_trc(bfa, fcxp->fcxp_tag);
671
672 /**
673 * setup request/response info
674 */
675 reqi->bfa_rport = rport;
676 reqi->vf_id = vf_id;
677 reqi->lp_tag = lp_tag;
678 reqi->class = cos;
679 rspi->rsp_timeout = rsp_timeout;
680 reqi->cts = cts;
681 reqi->fchs = *fchs;
682 reqi->req_tot_len = reqlen;
683 rspi->rsp_maxlen = rsp_maxlen;
684 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
685 fcxp->send_cbarg = cbarg;
686
687 /**
688 * If no room in CPE queue, wait for
689 */
690 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
691 if (!send_req) {
692 bfa_trc(bfa, fcxp->fcxp_tag);
693 fcxp->reqq_waiting = BFA_TRUE;
694 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
695 return;
696 }
697
698 bfa_fcxp_queue(fcxp, send_req);
699}
700
701/**
702 * Abort a BFA FCXP
703 *
704 * @param[in] fcxp BFA fcxp pointer
705 *
706 * @return void
707 */
708bfa_status_t
709bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
710{
711 bfa_assert(0);
712 return (BFA_STATUS_OK);
713}
714
715void
716bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
717 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
718{
719 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
720
721 bfa_assert(list_empty(&mod->fcxp_free_q));
722
723 wqe->alloc_cbfn = alloc_cbfn;
724 wqe->alloc_cbarg = alloc_cbarg;
725 list_add_tail(&wqe->qe, &mod->wait_q);
726}
727
728void
729bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
730{
731 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
732
733 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
734 list_del(&wqe->qe);
735}
736
737void
738bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
739{
740 /**
741 * If waiting for room in request queue, cancel reqq wait
742 * and free fcxp.
743 */
744 if (fcxp->reqq_waiting) {
745 fcxp->reqq_waiting = BFA_FALSE;
746 bfa_reqq_wcancel(&fcxp->reqq_wqe);
747 bfa_fcxp_free(fcxp);
748 return;
749 }
750
751 fcxp->send_cbfn = bfa_fcxp_null_comp;
752}
753
754
755
756/**
757 * hal_fcxp_public BFA FCXP public functions
758 */
759
760void
761bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
762{
763 switch (msg->mhdr.msg_id) {
764 case BFI_FCXP_I2H_SEND_RSP:
765 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
766 break;
767
768 default:
769 bfa_trc(bfa, msg->mhdr.msg_id);
770 bfa_assert(0);
771 }
772}
773
774u32
775bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
776{
777 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
778
779 return mod->rsp_pld_sz;
780}
781
782
diff --git a/drivers/scsi/bfa/bfa_fcxp_priv.h b/drivers/scsi/bfa/bfa_fcxp_priv.h
new file mode 100644
index 000000000000..4cda49397da0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcxp_priv.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCXP_PRIV_H__
19#define __BFA_FCXP_PRIV_H__
20
21#include <cs/bfa_sm.h>
22#include <protocol/fc.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_fcxp.h>
25
26#define BFA_FCXP_MIN (1)
27#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
28#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
29
30struct bfa_fcxp_mod_s {
31 struct bfa_s *bfa; /* backpointer to BFA */
32 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
33 u16 num_fcxps; /* max num FCXP requests */
34 struct list_head fcxp_free_q; /* free FCXPs */
35 struct list_head fcxp_active_q; /* active FCXPs */
36 void *req_pld_list_kva; /* list of FCXP req pld */
37 u64 req_pld_list_pa; /* list of FCXP req pld */
38 void *rsp_pld_list_kva; /* list of FCXP resp pld */
39 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
40 struct list_head wait_q; /* wait queue for free fcxp */
41 u32 req_pld_sz;
42 u32 rsp_pld_sz;
43};
44
45#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
46#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
47
48typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
49 void *cb_arg, bfa_status_t req_status,
50 u32 rsp_len, u32 resid_len,
51 struct fchs_s *rsp_fchs);
52
53/**
54 * Information needed for a FCXP request
55 */
56struct bfa_fcxp_req_info_s {
57 struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was
58 *returned from bfa_rport_create().
59 *This could be left NULL for WKA or for
60 *FCXP interactions before the rport
61 *nexus is established
62 */
63 struct fchs_s fchs; /* request FC header structure */
64 u8 cts; /* continous sequence */
65 u8 class; /* FC class for the request/response */
66 u16 max_frmsz; /* max send frame size */
67 u16 vf_id; /* vsan tag if applicable */
68 u8 lp_tag; /* lport tag */
69 u32 req_tot_len; /* request payload total length */
70};
71
72struct bfa_fcxp_rsp_info_s {
73 struct fchs_s rsp_fchs; /* Response frame's FC header will
74 * be *sent back in this field */
75 u8 rsp_timeout; /* timeout in seconds, 0-no response
76 */
77 u8 rsvd2[3];
78 u32 rsp_maxlen; /* max response length expected */
79};
80
81struct bfa_fcxp_s {
82 struct list_head qe; /* fcxp queue element */
83 bfa_sm_t sm; /* state machine */
84 void *caller; /* driver or fcs */
85 struct bfa_fcxp_mod_s *fcxp_mod;
86 /* back pointer to fcxp mod */
87 u16 fcxp_tag; /* internal tag */
88 struct bfa_fcxp_req_info_s req_info;
89 /* request info */
90 struct bfa_fcxp_rsp_info_s rsp_info;
91 /* response info */
92 u8 use_ireqbuf; /* use internal req buf */
93 u8 use_irspbuf; /* use internal rsp buf */
94 u32 nreq_sgles; /* num request SGLEs */
95 u32 nrsp_sgles; /* num response SGLEs */
96 struct list_head req_sgpg_q; /* SG pages for request buf */
97 struct list_head req_sgpg_wqe; /* wait queue for req SG page */
98 struct list_head rsp_sgpg_q; /* SG pages for response buf */
99 struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
100
101 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
102 /* SG elem addr user function */
103 bfa_fcxp_get_sglen_t req_sglen_cbfn;
104 /* SG elem len user function */
105 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
106 /* SG elem addr user function */
107 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
108 /* SG elem len user function */
109 bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
110 void *send_cbarg; /* callback arg */
111 struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
112 /* req SG elems */
113 struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
114 /* rsp SG elems */
115 u8 rsp_status; /* comp: rsp status */
116 u32 rsp_len; /* comp: actual response len */
117 u32 residue_len; /* comp: residual rsp length */
118 struct fchs_s rsp_fchs; /* comp: response fchs */
119 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
120 struct bfa_reqq_wait_s reqq_wqe;
121 bfa_boolean_t reqq_waiting;
122};
123
124#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
125
126#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
127#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
128
129#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
130 ((_fcxp)->fcxp_mod->req_pld_list_pa + \
131 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
132
133#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
134 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
135 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
136
137void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
138#endif /* __BFA_FCXP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h
new file mode 100644
index 000000000000..1ec1355924d9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fwimg_priv.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FWIMG_PRIV_H__
19#define __BFA_FWIMG_PRIV_H__
20
21#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
22#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
23
24extern u32 *bfi_image_ct_get_chunk(u32 off);
25extern u32 bfi_image_ct_size;
26extern u32 *bfi_image_cb_get_chunk(u32 off);
27extern u32 bfi_image_cb_size;
28extern u32 *bfi_image_cb;
29extern u32 *bfi_image_ct;
30
31#endif /* __BFA_FWIMG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
new file mode 100644
index 000000000000..ede1438619e2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -0,0 +1,142 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_priv.h>
19#include <bfi/bfi_cbreg.h>
20
21void
22bfa_hwcb_reginit(struct bfa_s *bfa)
23{
24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
27
28 if (fn == 0) {
29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
30 bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
31 } else {
32 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
33 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
34 }
35
36 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
37 /*
38 * CPE registers
39 */
40 q = CPE_Q_NUM(fn, i);
41 bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
42 bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
43 bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
44
45 /*
46 * RME registers
47 */
48 q = CPE_Q_NUM(fn, i);
49 bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
50 bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
51 bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
52 }
53}
54
55void
56bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
57{
58}
59
60static void
61bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
62{
63 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
64 __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
65}
66
67void
68bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
69 u32 *num_vecs, u32 *max_vec_bit)
70{
71#define __HFN_NUMINTS 13
72 if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
73 *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
74 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
75 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
76 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
77 __HFN_INT_MBOX_LPU0);
78 *max_vec_bit = __HFN_INT_MBOX_LPU0;
79 } else {
80 *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
81 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
82 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
83 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
84 __HFN_INT_MBOX_LPU1);
85 *max_vec_bit = __HFN_INT_MBOX_LPU1;
86 }
87
88 *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
89 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
90 *num_vecs = __HFN_NUMINTS;
91}
92
93/**
94 * No special setup required for crossbow -- vector assignments are implicit.
95 */
96void
97bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
98{
99 int i;
100
101 bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
102
103 bfa->msix.nvecs = nvecs;
104 if (nvecs == 1) {
105 for (i = 0; i < BFA_MSIX_CB_MAX; i++)
106 bfa->msix.handler[i] = bfa_msix_all;
107 return;
108 }
109
110 for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
111 bfa->msix.handler[i] = bfa_msix_reqq;
112
113 for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
114 bfa->msix.handler[i] = bfa_msix_rspq;
115
116 for (; i < BFA_MSIX_CB_MAX; i++)
117 bfa->msix.handler[i] = bfa_msix_lpu_err;
118}
119
120/**
121 * Crossbow -- dummy, interrupts are masked
122 */
123void
124bfa_hwcb_msix_install(struct bfa_s *bfa)
125{
126}
127
128void
129bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
130{
131}
132
133/**
134 * No special enable/disable -- vector assignments are implicit.
135 */
136void
137bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
138{
139 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
140}
141
142
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
new file mode 100644
index 000000000000..51ae5740e6e9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_priv.h>
19#include <bfi/bfi_ctreg.h>
20#include <bfa_ioc.h>
21
22BFA_TRC_FILE(HAL, IOCFC_CT);
23
24static u32 __ct_msix_err_vec_reg[] = {
25 HOST_MSIX_ERR_INDEX_FN0,
26 HOST_MSIX_ERR_INDEX_FN1,
27 HOST_MSIX_ERR_INDEX_FN2,
28 HOST_MSIX_ERR_INDEX_FN3,
29};
30
31static void
32bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
33{
34 int fn = bfa_ioc_pcifn(&bfa->ioc);
35 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
36
37 if (msix)
38 bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
39 else
40 bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
41}
42
43/**
44 * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
45 */
46static void
47bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec)
48{
49}
50
51void
52bfa_hwct_reginit(struct bfa_s *bfa)
53{
54 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
55 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
56 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
57
58 if (fn == 0) {
59 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
60 bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
61 } else {
62 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
63 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
64 }
65
66 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
67 /*
68 * CPE registers
69 */
70 q = CPE_Q_NUM(fn, i);
71 bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5));
72 bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5));
73 bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5));
74 bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
75
76 /*
77 * RME registers
78 */
79 q = CPE_Q_NUM(fn, i);
80 bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
81 bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
82 bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
83 bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
84 }
85}
86
87void
88bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
89{
90 u32 r32;
91
92 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
93 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
94}
95
96void
97bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
98 u32 *num_vecs, u32 *max_vec_bit)
99{
100 *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1;
101 *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1));
102 *num_vecs = BFA_MSIX_CT_MAX;
103}
104
105/**
106 * Setup MSI-X vector for catapult
107 */
108void
109bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
110{
111 bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX));
112 bfa_trc(bfa, nvecs);
113
114 bfa->msix.nvecs = nvecs;
115 bfa_hwct_msix_uninstall(bfa);
116}
117
118void
119bfa_hwct_msix_install(struct bfa_s *bfa)
120{
121 int i;
122
123 if (bfa->msix.nvecs == 0)
124 return;
125
126 if (bfa->msix.nvecs == 1) {
127 for (i = 0; i < BFA_MSIX_CT_MAX; i++)
128 bfa->msix.handler[i] = bfa_msix_all;
129 return;
130 }
131
132 for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++)
133 bfa->msix.handler[i] = bfa_msix_reqq;
134
135 for (; i <= BFA_MSIX_RME_Q3; i++)
136 bfa->msix.handler[i] = bfa_msix_rspq;
137
138 bfa_assert(i == BFA_MSIX_LPU_ERR);
139 bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
140}
141
142void
143bfa_hwct_msix_uninstall(struct bfa_s *bfa)
144{
145 int i;
146
147 for (i = 0; i < BFA_MSIX_CT_MAX; i++)
148 bfa->msix.handler[i] = bfa_hwct_msix_dummy;
149}
150
151/**
152 * Enable MSI-X vectors
153 */
154void
155bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
156{
157 bfa_trc(bfa, 0);
158 bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
159 bfa_ioc_isr_mode_set(&bfa->ioc, msix);
160}
161
162
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
new file mode 100644
index 000000000000..0ca125712a04
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -0,0 +1,218 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfi/bfi_cbreg.h>
19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h>
22
23BFA_TRC_FILE(HAL, INTR);
24
25static void
26bfa_msix_errint(struct bfa_s *bfa, u32 intr)
27{
28 bfa_ioc_error_isr(&bfa->ioc);
29}
30
31static void
32bfa_msix_lpu(struct bfa_s *bfa)
33{
34 bfa_ioc_mbox_isr(&bfa->ioc);
35}
36
37void
38bfa_msix_all(struct bfa_s *bfa, int vec)
39{
40 bfa_intx(bfa);
41}
42
43/**
44 * hal_intr_api
45 */
46bfa_boolean_t
47bfa_intx(struct bfa_s *bfa)
48{
49 u32 intr, qintr;
50 int queue;
51
52 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
53 if (!intr)
54 return BFA_FALSE;
55
56 /**
57 * RME completion queue interrupt
58 */
59 qintr = intr & __HFN_INT_RME_MASK;
60 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
61
62 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) {
63 if (intr & (__HFN_INT_RME_Q0 << queue))
64 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
65 }
66 intr &= ~qintr;
67 if (!intr)
68 return BFA_TRUE;
69
70 /**
71 * CPE completion queue interrupt
72 */
73 qintr = intr & __HFN_INT_CPE_MASK;
74 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
75
76 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
77 if (intr & (__HFN_INT_CPE_Q0 << queue))
78 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
79 }
80 intr &= ~qintr;
81 if (!intr)
82 return BFA_TRUE;
83
84 bfa_msix_lpu_err(bfa, intr);
85
86 return BFA_TRUE;
87}
88
89void
90bfa_isr_enable(struct bfa_s *bfa)
91{
92 u32 intr_unmask;
93 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
94
95 bfa_trc(bfa, pci_func);
96
97 bfa_msix_install(bfa);
98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
99 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
100
101 if (pci_func == 0)
102 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
103 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
104 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
105 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
106 __HFN_INT_MBOX_LPU0);
107 else
108 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
109 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
110 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
111 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
112 __HFN_INT_MBOX_LPU1);
113
114 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
115 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
116 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
117}
118
119void
120bfa_isr_disable(struct bfa_s *bfa)
121{
122 bfa_isr_mode_set(bfa, BFA_FALSE);
123 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
124 bfa_msix_uninstall(bfa);
125}
126
127void
128bfa_msix_reqq(struct bfa_s *bfa, int qid)
129{
130 struct list_head *waitq, *qe, *qen;
131 struct bfa_reqq_wait_s *wqe;
132
133 qid &= (BFI_IOC_MAX_CQS - 1);
134
135 waitq = bfa_reqq(bfa, qid);
136 list_for_each_safe(qe, qen, waitq) {
137 /**
138 * Callback only as long as there is room in request queue
139 */
140 if (bfa_reqq_full(bfa, qid))
141 break;
142
143 list_del(qe);
144 wqe = (struct bfa_reqq_wait_s *) qe;
145 wqe->qresume(wqe->cbarg);
146 }
147}
148
149void
150bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
151{
152 bfa_trc(bfa, m->mhdr.msg_class);
153 bfa_trc(bfa, m->mhdr.msg_id);
154 bfa_trc(bfa, m->mhdr.mtag.i2htok);
155 bfa_assert(0);
156 bfa_trc_stop(bfa->trcmod);
157}
158
159void
160bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
161{
162 struct bfi_msg_s *m;
163 u32 pi, ci;
164
165 bfa_trc_fp(bfa, rsp_qid);
166
167 rsp_qid &= (BFI_IOC_MAX_CQS - 1);
168
169 bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);
170
171 ci = bfa_rspq_ci(bfa, rsp_qid);
172 pi = bfa_rspq_pi(bfa, rsp_qid);
173
174 bfa_trc_fp(bfa, ci);
175 bfa_trc_fp(bfa, pi);
176
177 if (bfa->rme_process) {
178 while (ci != pi) {
179 m = bfa_rspq_elem(bfa, rsp_qid, ci);
180 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
181
182 bfa_isrs[m->mhdr.msg_class] (bfa, m);
183
184 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
185 }
186 }
187
188 /**
189 * update CI
190 */
191 bfa_rspq_ci(bfa, rsp_qid) = pi;
192 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
193 bfa_os_mmiowb();
194}
195
196void
197bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
198{
199 u32 intr;
200
201 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
202
203 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
204 bfa_msix_lpu(bfa);
205
206 if (intr & (__HFN_INT_ERR_EMC |
207 __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 |
208 __HFN_INT_ERR_PSS))
209 bfa_msix_errint(bfa, intr);
210}
211
212void
213bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
214{
215 bfa_isrs[mc] = isr_func;
216}
217
218
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
new file mode 100644
index 000000000000..8ce6e6b105c8
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_intr_priv.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_INTR_PRIV_H__
19#define __BFA_INTR_PRIV_H__
20
21/**
22 * Message handler
23 */
24typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
25void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
26void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
27
28
29#define bfa_reqq_pi(__bfa, __reqq) (__bfa)->iocfc.req_cq_pi[__reqq]
30#define bfa_reqq_ci(__bfa, __reqq) \
31 *(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)
32
33#define bfa_reqq_full(__bfa, __reqq) \
34 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
35 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
36 bfa_reqq_ci(__bfa, __reqq))
37
38#define bfa_reqq_next(__bfa, __reqq) \
39 (bfa_reqq_full(__bfa, __reqq) ? NULL : \
40 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
41 + bfa_reqq_pi((__bfa), (__reqq)))))
42
43#define bfa_reqq_produce(__bfa, __reqq) do { \
44 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
45 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
46 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
47 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
48 (__bfa)->iocfc.req_cq_pi[__reqq]); \
49 bfa_os_mmiowb(); \
50} while (0)
51
52#define bfa_rspq_pi(__bfa, __rspq) \
53 *(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)
54
55#define bfa_rspq_ci(__bfa, __rspq) (__bfa)->iocfc.rsp_cq_ci[__rspq]
56#define bfa_rspq_elem(__bfa, __rspq, __ci) \
57 &((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]
58
59#define CQ_INCR(__index, __size) \
60 (__index)++; (__index) &= ((__size) - 1)
61
62/**
63 * Queue element to wait for room in request queue. FIFO order is
64 * maintained when fullfilling requests.
65 */
66struct bfa_reqq_wait_s {
67 struct list_head qe;
68 void (*qresume) (void *cbarg);
69 void *cbarg;
70};
71
72/**
73 * Circular queue usage assignments
74 */
75enum {
76 BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
77 BFA_REQQ_FCXP = 0, /* all FCXP messages */
78 BFA_REQQ_LPS = 0, /* all lport service msgs */
79 BFA_REQQ_PORT = 0, /* all port messages */
80 BFA_REQQ_FLASH = 0, /* for flash module */
81 BFA_REQQ_DIAG = 0, /* for diag module */
82 BFA_REQQ_RPORT = 0, /* all port messages */
83 BFA_REQQ_SBOOT = 0, /* all san boot messages */
84 BFA_REQQ_QOS_LO = 1, /* all low priority IO */
85 BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
86 BFA_REQQ_QOS_HI = 3, /* all high priority IO */
87};
88
89static inline void
90bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
91 void *cbarg)
92{
93 wqe->qresume = qresume;
94 wqe->cbarg = cbarg;
95}
96
97#define bfa_reqq(__bfa, __reqq) &(__bfa)->reqq_waitq[__reqq]
98
99/**
100 * static inline void
101 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
102 */
103#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
104 \
105 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
106 \
107 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
108 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
109 \
110 list_add_tail(&(__wqe)->qe, waitq); \
111} while (0)
112
113#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
114
115#endif /* __BFA_INTR_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
new file mode 100644
index 000000000000..149348934ce3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -0,0 +1,2382 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <bfa_trcmod_priv.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <aen/bfa_aen_ioc.h>
26#include <aen/bfa_aen.h>
27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h>
29
30BFA_TRC_FILE(HAL, IOC);
31
32/**
33 * IOC local definitions
34 */
35#define BFA_IOC_TOV 2000 /* msecs */
36#define BFA_IOC_HB_TOV 1000 /* msecs */
37#define BFA_IOC_HB_FAIL_MAX 4
38#define BFA_IOC_HWINIT_MAX 2
39#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
41 + BFA_IOC_TOV)
42
43#define bfa_ioc_timer_start(__ioc) \
44 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
45 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
46#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
47
48#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
49#define BFA_DBG_FWTRC_LEN \
50 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
51 (sizeof(struct bfa_trc_mod_s) - \
52 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
53#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54#define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++
55
56#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
57#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
58#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
59bfa_boolean_t bfa_auto_recover = BFA_FALSE;
60
61/*
62 * forward declarations
63 */
64static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
65 enum bfa_ioc_aen_event event);
66static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
67static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
68static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
69static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
70static void bfa_ioc_timeout(void *ioc);
71static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
72static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
73static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
74static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
75static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
76static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
78static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
79static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
80static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
81static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
82static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
83static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
84
85/**
86 * bfa_ioc_sm
87 */
88
89/**
90 * IOC state machine events
91 */
92enum ioc_event {
93 IOC_E_ENABLE = 1, /* IOC enable request */
94 IOC_E_DISABLE = 2, /* IOC disable request */
95 IOC_E_TIMEOUT = 3, /* f/w response timeout */
96 IOC_E_FWREADY = 4, /* f/w initialization done */
97 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
98 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
99 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
100 IOC_E_HBFAIL = 8, /* heartbeat failure */
101 IOC_E_HWERROR = 9, /* hardware error interrupt */
102 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
103 IOC_E_DETACH = 11, /* driver detach cleanup */
104};
105
106bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
107bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
108bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
109bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
110bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
111bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
112bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
113bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
114bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
115bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
116bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
117bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
118
119static struct bfa_sm_table_s ioc_sm_table[] = {
120 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
121 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
122 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
123 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
124 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
125 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
126 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
127 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
128 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
129 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
130 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
131 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
132};
133
134/**
135 * Reset entry actions -- initialize state machine
136 */
137static void
138bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
139{
140 ioc->retry_count = 0;
141 ioc->auto_recover = bfa_auto_recover;
142}
143
144/**
145 * Beginning state. IOC is in reset state.
146 */
147static void
148bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
149{
150 bfa_trc(ioc, event);
151
152 switch (event) {
153 case IOC_E_ENABLE:
154 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
155 break;
156
157 case IOC_E_DISABLE:
158 bfa_ioc_disable_comp(ioc);
159 break;
160
161 case IOC_E_DETACH:
162 break;
163
164 default:
165 bfa_sm_fault(ioc, event);
166 }
167}
168
169/**
170 * Semaphore should be acquired for version check.
171 */
172static void
173bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
174{
175 bfa_ioc_hw_sem_get(ioc);
176}
177
178/**
179 * Awaiting h/w semaphore to continue with version check.
180 */
181static void
182bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
183{
184 bfa_trc(ioc, event);
185
186 switch (event) {
187 case IOC_E_SEMLOCKED:
188 if (bfa_ioc_firmware_lock(ioc)) {
189 ioc->retry_count = 0;
190 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
191 } else {
192 bfa_ioc_hw_sem_release(ioc);
193 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
194 }
195 break;
196
197 case IOC_E_DISABLE:
198 bfa_ioc_disable_comp(ioc);
199 /*
200 * fall through
201 */
202
203 case IOC_E_DETACH:
204 bfa_ioc_hw_sem_get_cancel(ioc);
205 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
206 break;
207
208 case IOC_E_FWREADY:
209 break;
210
211 default:
212 bfa_sm_fault(ioc, event);
213 }
214}
215
216/**
217 * Notify enable completion callback and generate mismatch AEN.
218 */
219static void
220bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
221{
222 /**
223 * Provide enable completion callback and AEN notification only once.
224 */
225 if (ioc->retry_count == 0) {
226 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
227 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
228 }
229 ioc->retry_count++;
230 bfa_ioc_timer_start(ioc);
231}
232
233/**
234 * Awaiting firmware version match.
235 */
236static void
237bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
238{
239 bfa_trc(ioc, event);
240
241 switch (event) {
242 case IOC_E_TIMEOUT:
243 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
244 break;
245
246 case IOC_E_DISABLE:
247 bfa_ioc_disable_comp(ioc);
248 /*
249 * fall through
250 */
251
252 case IOC_E_DETACH:
253 bfa_ioc_timer_stop(ioc);
254 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
255 break;
256
257 case IOC_E_FWREADY:
258 break;
259
260 default:
261 bfa_sm_fault(ioc, event);
262 }
263}
264
265/**
266 * Request for semaphore.
267 */
268static void
269bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
270{
271 bfa_ioc_hw_sem_get(ioc);
272}
273
274/**
275 * Awaiting semaphore for h/w initialzation.
276 */
277static void
278bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
279{
280 bfa_trc(ioc, event);
281
282 switch (event) {
283 case IOC_E_SEMLOCKED:
284 ioc->retry_count = 0;
285 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
286 break;
287
288 case IOC_E_DISABLE:
289 bfa_ioc_hw_sem_get_cancel(ioc);
290 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
291 break;
292
293 default:
294 bfa_sm_fault(ioc, event);
295 }
296}
297
298
299static void
300bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
301{
302 bfa_ioc_timer_start(ioc);
303 bfa_ioc_reset(ioc, BFA_FALSE);
304}
305
306/**
307 * Hardware is being initialized. Interrupts are enabled.
308 * Holding hardware semaphore lock.
309 */
310static void
311bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
312{
313 bfa_trc(ioc, event);
314
315 switch (event) {
316 case IOC_E_FWREADY:
317 bfa_ioc_timer_stop(ioc);
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
319 break;
320
321 case IOC_E_HWERROR:
322 bfa_ioc_timer_stop(ioc);
323 /*
324 * fall through
325 */
326
327 case IOC_E_TIMEOUT:
328 ioc->retry_count++;
329 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
330 bfa_ioc_timer_start(ioc);
331 bfa_ioc_reset(ioc, BFA_TRUE);
332 break;
333 }
334
335 bfa_ioc_hw_sem_release(ioc);
336 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
337 break;
338
339 case IOC_E_DISABLE:
340 bfa_ioc_hw_sem_release(ioc);
341 bfa_ioc_timer_stop(ioc);
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
343 break;
344
345 default:
346 bfa_sm_fault(ioc, event);
347 }
348}
349
350
351static void
352bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
353{
354 bfa_ioc_timer_start(ioc);
355 bfa_ioc_send_enable(ioc);
356}
357
358/**
359 * Host IOC function is being enabled, awaiting response from firmware.
360 * Semaphore is acquired.
361 */
362static void
363bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
364{
365 bfa_trc(ioc, event);
366
367 switch (event) {
368 case IOC_E_FWRSP_ENABLE:
369 bfa_ioc_timer_stop(ioc);
370 bfa_ioc_hw_sem_release(ioc);
371 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
372 break;
373
374 case IOC_E_HWERROR:
375 bfa_ioc_timer_stop(ioc);
376 /*
377 * fall through
378 */
379
380 case IOC_E_TIMEOUT:
381 ioc->retry_count++;
382 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
383 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
384 BFI_IOC_UNINIT);
385 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
386 break;
387 }
388
389 bfa_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
391 break;
392
393 case IOC_E_DISABLE:
394 bfa_ioc_timer_stop(ioc);
395 bfa_ioc_hw_sem_release(ioc);
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
397 break;
398
399 case IOC_E_FWREADY:
400 bfa_ioc_send_enable(ioc);
401 break;
402
403 default:
404 bfa_sm_fault(ioc, event);
405 }
406}
407
408
409static void
410bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
411{
412 bfa_ioc_timer_start(ioc);
413 bfa_ioc_send_getattr(ioc);
414}
415
416/**
417 * IOC configuration in progress. Timer is active.
418 */
419static void
420bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
421{
422 bfa_trc(ioc, event);
423
424 switch (event) {
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_timer_stop(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 break;
429
430 case IOC_E_HWERROR:
431 bfa_ioc_timer_stop(ioc);
432 /*
433 * fall through
434 */
435
436 case IOC_E_TIMEOUT:
437 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_ioc_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
443 break;
444
445 default:
446 bfa_sm_fault(ioc, event);
447 }
448}
449
450
451static void
452bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
453{
454 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
455 bfa_ioc_hb_monitor(ioc);
456 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
457}
458
459static void
460bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
461{
462 bfa_trc(ioc, event);
463
464 switch (event) {
465 case IOC_E_ENABLE:
466 break;
467
468 case IOC_E_DISABLE:
469 bfa_ioc_hb_stop(ioc);
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
471 break;
472
473 case IOC_E_HWERROR:
474 case IOC_E_FWREADY:
475 /**
476 * Hard error or IOC recovery by other function.
477 * Treat it same as heartbeat failure.
478 */
479 bfa_ioc_hb_stop(ioc);
480 /*
481 * !!! fall through !!!
482 */
483
484 case IOC_E_HBFAIL:
485 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
486 break;
487
488 default:
489 bfa_sm_fault(ioc, event);
490 }
491}
492
493
494static void
495bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
496{
497 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
498 bfa_ioc_timer_start(ioc);
499 bfa_ioc_send_disable(ioc);
500}
501
502/**
503 * IOC is being disabled
504 */
505static void
506bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
507{
508 bfa_trc(ioc, event);
509
510 switch (event) {
511 case IOC_E_HWERROR:
512 case IOC_E_FWRSP_DISABLE:
513 bfa_ioc_timer_stop(ioc);
514 /*
515 * !!! fall through !!!
516 */
517
518 case IOC_E_TIMEOUT:
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
520 break;
521
522 default:
523 bfa_sm_fault(ioc, event);
524 }
525}
526
527/**
528 * IOC disable completion entry.
529 */
530static void
531bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
532{
533 bfa_ioc_disable_comp(ioc);
534}
535
536static void
537bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
538{
539 bfa_trc(ioc, event);
540
541 switch (event) {
542 case IOC_E_ENABLE:
543 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
544 break;
545
546 case IOC_E_DISABLE:
547 ioc->cbfn->disable_cbfn(ioc->bfa);
548 break;
549
550 case IOC_E_FWREADY:
551 break;
552
553 case IOC_E_DETACH:
554 bfa_ioc_firmware_unlock(ioc);
555 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
556 break;
557
558 default:
559 bfa_sm_fault(ioc, event);
560 }
561}
562
563
564static void
565bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
566{
567 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
568 bfa_ioc_timer_start(ioc);
569}
570
571/**
572 * Hardware initialization failed.
573 */
574static void
575bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
576{
577 bfa_trc(ioc, event);
578
579 switch (event) {
580 case IOC_E_DISABLE:
581 bfa_ioc_timer_stop(ioc);
582 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
583 break;
584
585 case IOC_E_DETACH:
586 bfa_ioc_timer_stop(ioc);
587 bfa_ioc_firmware_unlock(ioc);
588 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
589 break;
590
591 case IOC_E_TIMEOUT:
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
593 break;
594
595 default:
596 bfa_sm_fault(ioc, event);
597 }
598}
599
600
601static void
602bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
603{
604 struct list_head *qe;
605 struct bfa_ioc_hbfail_notify_s *notify;
606
607 /**
608 * Mark IOC as failed in hardware and stop firmware.
609 */
610 bfa_ioc_lpu_stop(ioc);
611 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
612
613 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
614 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
615 /*
616 * Wait for halt to take effect
617 */
618 bfa_reg_read(ioc->ioc_regs.ll_halt);
619 }
620
621 /**
622 * Notify driver and common modules registered for notification.
623 */
624 ioc->cbfn->hbfail_cbfn(ioc->bfa);
625 list_for_each(qe, &ioc->hb_notify_q) {
626 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
627 notify->cbfn(notify->cbarg);
628 }
629
630 /**
631 * Flush any queued up mailbox requests.
632 */
633 bfa_ioc_mbox_hbfail(ioc);
634 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
635
636 /**
637 * Trigger auto-recovery after a delay.
638 */
639 if (ioc->auto_recover) {
640 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
641 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
642 }
643}
644
645/**
646 * IOC heartbeat failure.
647 */
648static void
649bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
650{
651 bfa_trc(ioc, event);
652
653 switch (event) {
654
655 case IOC_E_ENABLE:
656 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
657 break;
658
659 case IOC_E_DISABLE:
660 if (ioc->auto_recover)
661 bfa_ioc_timer_stop(ioc);
662 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
663 break;
664
665 case IOC_E_TIMEOUT:
666 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
667 break;
668
669 case IOC_E_FWREADY:
670 /**
671 * Recovery is already initiated by other function.
672 */
673 break;
674
675 default:
676 bfa_sm_fault(ioc, event);
677 }
678}
679
680
681
682/**
683 * bfa_ioc_pvt BFA IOC private functions
684 */
685
686static void
687bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
688{
689 struct list_head *qe;
690 struct bfa_ioc_hbfail_notify_s *notify;
691
692 ioc->cbfn->disable_cbfn(ioc->bfa);
693
694 /**
695 * Notify common modules registered for notification.
696 */
697 list_for_each(qe, &ioc->hb_notify_q) {
698 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
699 notify->cbfn(notify->cbarg);
700 }
701}
702
703static void
704bfa_ioc_sem_timeout(void *ioc_arg)
705{
706 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
707
708 bfa_ioc_hw_sem_get(ioc);
709}
710
711static void
712bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
713{
714 u32 r32;
715 int cnt = 0;
716#define BFA_SEM_SPINCNT 1000
717
718 do {
719 r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
720 cnt++;
721 if (cnt > BFA_SEM_SPINCNT)
722 break;
723 } while (r32 != 0);
724 bfa_assert(cnt < BFA_SEM_SPINCNT);
725}
726
727static void
728bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
729{
730 bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
731}
732
733static void
734bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
735{
736 u32 r32;
737
738 /**
739 * First read to the semaphore register will return 0, subsequent reads
740 * will return 1. Semaphore is released by writing 0 to the register
741 */
742 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
743 if (r32 == 0) {
744 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
745 return;
746 }
747
748 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
749 ioc, BFA_IOC_TOV);
750}
751
752static void
753bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
754{
755 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
756}
757
758static void
759bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
760{
761 bfa_timer_stop(&ioc->sem_timer);
762}
763
764/**
765 * Initialize LPU local memory (aka secondary memory / SRAM)
766 */
767static void
768bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
769{
770 u32 pss_ctl;
771 int i;
772#define PSS_LMEM_INIT_TIME 10000
773
774 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
775 pss_ctl &= ~__PSS_LMEM_RESET;
776 pss_ctl |= __PSS_LMEM_INIT_EN;
777 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
778 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
779
780 /**
781 * wait for memory initialization to be complete
782 */
783 i = 0;
784 do {
785 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
786 i++;
787 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
788
789 /**
790 * If memory initialization is not successful, IOC timeout will catch
791 * such failures.
792 */
793 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
794 bfa_trc(ioc, pss_ctl);
795
796 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
797 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
798}
799
800static void
801bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
802{
803 u32 pss_ctl;
804
805 /**
806 * Take processor out of reset.
807 */
808 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
809 pss_ctl &= ~__PSS_LPU0_RESET;
810
811 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
812}
813
814static void
815bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
816{
817 u32 pss_ctl;
818
819 /**
820 * Put processors in reset.
821 */
822 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
823 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
824
825 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
826}
827
828/**
829 * Get driver and firmware versions.
830 */
831static void
832bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
833{
834 u32 pgnum, pgoff;
835 u32 loff = 0;
836 int i;
837 u32 *fwsig = (u32 *) fwhdr;
838
839 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
840 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
841 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
842
843 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
844 i++) {
845 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
846 loff += sizeof(u32);
847 }
848}
849
850static u32 *
851bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
852{
853 if (ioc->ctdev)
854 return bfi_image_ct_get_chunk(off);
855 return bfi_image_cb_get_chunk(off);
856}
857
858static u32
859bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
860{
861return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
862}
863
864/**
865 * Returns TRUE if same.
866 */
867static bfa_boolean_t
868bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
869{
870 struct bfi_ioc_image_hdr_s *drv_fwhdr;
871 int i;
872
873 drv_fwhdr =
874 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
875
876 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
877 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
878 bfa_trc(ioc, i);
879 bfa_trc(ioc, fwhdr->md5sum[i]);
880 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
881 return BFA_FALSE;
882 }
883 }
884
885 bfa_trc(ioc, fwhdr->md5sum[0]);
886 return BFA_TRUE;
887}
888
889/**
890 * Return true if current running version is valid. Firmware signature and
891 * execution context (driver/bios) must match.
892 */
893static bfa_boolean_t
894bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
895{
896 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
897
898 /**
899 * If bios/efi boot (flash based) -- return true
900 */
901 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
902 return BFA_TRUE;
903
904 bfa_ioc_fwver_get(ioc, &fwhdr);
905 drv_fwhdr =
906 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
907
908 if (fwhdr.signature != drv_fwhdr->signature) {
909 bfa_trc(ioc, fwhdr.signature);
910 bfa_trc(ioc, drv_fwhdr->signature);
911 return BFA_FALSE;
912 }
913
914 if (fwhdr.exec != drv_fwhdr->exec) {
915 bfa_trc(ioc, fwhdr.exec);
916 bfa_trc(ioc, drv_fwhdr->exec);
917 return BFA_FALSE;
918 }
919
920 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
921}
922
923/**
924 * Return true if firmware of current driver matches the running firmware.
925 */
926static bfa_boolean_t
927bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
928{
929 enum bfi_ioc_state ioc_fwstate;
930 u32 usecnt;
931 struct bfi_ioc_image_hdr_s fwhdr;
932
933 /**
934 * Firmware match check is relevant only for CNA.
935 */
936 if (!ioc->cna)
937 return BFA_TRUE;
938
939 /**
940 * If bios boot (flash based) -- do not increment usage count
941 */
942 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
943 return BFA_TRUE;
944
945 bfa_ioc_usage_sem_get(ioc);
946 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
947
948 /**
949 * If usage count is 0, always return TRUE.
950 */
951 if (usecnt == 0) {
952 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
953 bfa_ioc_usage_sem_release(ioc);
954 bfa_trc(ioc, usecnt);
955 return BFA_TRUE;
956 }
957
958 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
959 bfa_trc(ioc, ioc_fwstate);
960
961 /**
962 * Use count cannot be non-zero and chip in uninitialized state.
963 */
964 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
965
966 /**
967 * Check if another driver with a different firmware is active
968 */
969 bfa_ioc_fwver_get(ioc, &fwhdr);
970 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
971 bfa_ioc_usage_sem_release(ioc);
972 bfa_trc(ioc, usecnt);
973 return BFA_FALSE;
974 }
975
976 /**
977 * Same firmware version. Increment the reference count.
978 */
979 usecnt++;
980 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
981 bfa_ioc_usage_sem_release(ioc);
982 bfa_trc(ioc, usecnt);
983 return BFA_TRUE;
984}
985
986static void
987bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
988{
989 u32 usecnt;
990
991 /**
992 * Firmware lock is relevant only for CNA.
993 * If bios boot (flash based) -- do not decrement usage count
994 */
995 if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
996 return;
997
998 /**
999 * decrement usage count
1000 */
1001 bfa_ioc_usage_sem_get(ioc);
1002 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
1003 bfa_assert(usecnt > 0);
1004
1005 usecnt--;
1006 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
1007 bfa_trc(ioc, usecnt);
1008
1009 bfa_ioc_usage_sem_release(ioc);
1010}
1011
1012/**
1013 * Conditionally flush any pending message from firmware at start.
1014 */
1015static void
1016bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1017{
1018 u32 r32;
1019
1020 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1021 if (r32)
1022 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1023}
1024
1025
1026static void
1027bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1028{
1029 enum bfi_ioc_state ioc_fwstate;
1030 bfa_boolean_t fwvalid;
1031
1032 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1033
1034 if (force)
1035 ioc_fwstate = BFI_IOC_UNINIT;
1036
1037 bfa_trc(ioc, ioc_fwstate);
1038
1039 /**
1040 * check if firmware is valid
1041 */
1042 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1043 BFA_FALSE : bfa_ioc_fwver_valid(ioc);
1044
1045 if (!fwvalid) {
1046 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1047 return;
1048 }
1049
1050 /**
1051 * If hardware initialization is in progress (initialized by other IOC),
1052 * just wait for an initialization completion interrupt.
1053 */
1054 if (ioc_fwstate == BFI_IOC_INITING) {
1055 bfa_trc(ioc, ioc_fwstate);
1056 ioc->cbfn->reset_cbfn(ioc->bfa);
1057 return;
1058 }
1059
1060 /**
1061 * If IOC function is disabled and firmware version is same,
1062 * just re-enable IOC.
1063 */
1064 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1065 bfa_trc(ioc, ioc_fwstate);
1066
1067 /**
1068 * When using MSI-X any pending firmware ready event should
1069 * be flushed. Otherwise MSI-X interrupts are not delivered.
1070 */
1071 bfa_ioc_msgflush(ioc);
1072 ioc->cbfn->reset_cbfn(ioc->bfa);
1073 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1074 return;
1075 }
1076
1077 /**
1078 * Initialize the h/w for any other states.
1079 */
1080 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1081}
1082
1083static void
1084bfa_ioc_timeout(void *ioc_arg)
1085{
1086 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1087
1088 bfa_trc(ioc, 0);
1089 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1090}
1091
1092void
1093bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1094{
1095 u32 *msgp = (u32 *) ioc_msg;
1096 u32 i;
1097
1098 bfa_trc(ioc, msgp[0]);
1099 bfa_trc(ioc, len);
1100
1101 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1102
1103 /*
1104 * first write msg to mailbox registers
1105 */
1106 for (i = 0; i < len / sizeof(u32); i++)
1107 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1108 bfa_os_wtole(msgp[i]));
1109
1110 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1111 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1112
1113 /*
1114 * write 1 to mailbox CMD to trigger LPU event
1115 */
1116 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1117 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1118}
1119
1120static void
1121bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1122{
1123 struct bfi_ioc_ctrl_req_s enable_req;
1124
1125 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1126 bfa_ioc_portid(ioc));
1127 enable_req.ioc_class = ioc->ioc_mc;
1128 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1129}
1130
1131static void
1132bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1133{
1134 struct bfi_ioc_ctrl_req_s disable_req;
1135
1136 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1137 bfa_ioc_portid(ioc));
1138 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1139}
1140
1141static void
1142bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1143{
1144 struct bfi_ioc_getattr_req_s attr_req;
1145
1146 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1147 bfa_ioc_portid(ioc));
1148 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1149 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1150}
1151
1152static void
1153bfa_ioc_hb_check(void *cbarg)
1154{
1155 struct bfa_ioc_s *ioc = cbarg;
1156 u32 hb_count;
1157
1158 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1159 if (ioc->hb_count == hb_count) {
1160 ioc->hb_fail++;
1161 } else {
1162 ioc->hb_count = hb_count;
1163 ioc->hb_fail = 0;
1164 }
1165
1166 if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
1167 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
1168 ioc->hb_fail = 0;
1169 bfa_ioc_recover(ioc);
1170 return;
1171 }
1172
1173 bfa_ioc_mbox_poll(ioc);
1174 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1175 BFA_IOC_HB_TOV);
1176}
1177
1178static void
1179bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1180{
1181 ioc->hb_fail = 0;
1182 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1183 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1184 BFA_IOC_HB_TOV);
1185}
1186
1187static void
1188bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1189{
1190 bfa_timer_stop(&ioc->ioc_timer);
1191}
1192
1193/**
1194 * Host to LPU mailbox message addresses
1195 */
1196static struct {
1197 u32 hfn_mbox, lpu_mbox, hfn_pgn;
1198} iocreg_fnreg[] = {
1199 {
1200 HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
1201 HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
1202 HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
1203 HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
1204};
1205
1206/**
1207 * Host <-> LPU mailbox command/status registers - port 0
1208 */
1209static struct {
1210 u32 hfn, lpu;
1211} iocreg_mbcmd_p0[] = {
1212 {
1213 HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
1214 HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
1215 HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
1216 HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
1217};
1218
1219/**
1220 * Host <-> LPU mailbox command/status registers - port 1
1221 */
1222static struct {
1223 u32 hfn, lpu;
1224} iocreg_mbcmd_p1[] = {
1225 {
1226 HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
1227 HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
1228 HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
1229 HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
1230};
1231
1232/**
1233 * Shared IRQ handling in INTX mode
1234 */
1235static struct {
1236 u32 isr, msk;
1237} iocreg_shirq_next[] = {
1238 {
1239 HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
1240 HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
1241 HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
1242HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
1243
1244static void
1245bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
1246{
1247 bfa_os_addr_t rb;
1248 int pcifn = bfa_ioc_pcifn(ioc);
1249
1250 rb = bfa_ioc_bar0(ioc);
1251
1252 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
1253 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
1254 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
1255
1256 if (ioc->port_id == 0) {
1257 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
1258 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
1259 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
1260 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
1261 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
1262 } else {
1263 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
1264 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
1265 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
1266 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
1267 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
1268 }
1269
1270 /**
1271 * Shared IRQ handling in INTX mode
1272 */
1273 ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
1274 ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
1275
1276 /*
1277 * PSS control registers
1278 */
1279 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
1280 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
1281 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
1282
1283 /*
1284 * IOC semaphore registers and serialization
1285 */
1286 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
1287 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
1288 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
1289
1290 /**
1291 * sram memory access
1292 */
1293 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
1294 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
1295 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
1296 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
1297}
1298
1299/**
1300 * Initiate a full firmware download.
1301 */
1302static void
1303bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1304 u32 boot_param)
1305{
1306 u32 *fwimg;
1307 u32 pgnum, pgoff;
1308 u32 loff = 0;
1309 u32 chunkno = 0;
1310 u32 i;
1311
1312 /**
1313 * Initialize LMEM first before code download
1314 */
1315 bfa_ioc_lmem_init(ioc);
1316
1317 /**
1318 * Flash based firmware boot
1319 */
1320 bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
1321 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
1322 boot_type = BFI_BOOT_TYPE_FLASH;
1323 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
1324 fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
1325 fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
1326 bfa_os_swap32(boot_param);
1327
1328 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1329 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1330
1331 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1332
1333 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
1334
1335 if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
1336 chunkno = BFA_FLASH_CHUNK_NO(i);
1337 fwimg = bfa_ioc_fwimg_get_chunk(ioc,
1338 BFA_FLASH_CHUNK_ADDR(chunkno));
1339 }
1340
1341 /**
1342 * write smem
1343 */
1344 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1345 fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
1346
1347 loff += sizeof(u32);
1348
1349 /**
1350 * handle page offset wrap around
1351 */
1352 loff = PSS_SMEM_PGOFF(loff);
1353 if (loff == 0) {
1354 pgnum++;
1355 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1356 }
1357 }
1358
1359 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1360 bfa_ioc_smem_pgnum(ioc, 0));
1361}
1362
1363static void
1364bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1365{
1366 bfa_ioc_hwinit(ioc, force);
1367}
1368
1369/**
1370 * Update BFA configuration from firmware configuration.
1371 */
1372static void
1373bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1374{
1375 struct bfi_ioc_attr_s *attr = ioc->attr;
1376
1377 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1378 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1379
1380 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1381}
1382
1383/**
1384 * Attach time initialization of mbox logic.
1385 */
1386static void
1387bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1388{
1389 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1390 int mc;
1391
1392 INIT_LIST_HEAD(&mod->cmd_q);
1393 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1394 mod->mbhdlr[mc].cbfn = NULL;
1395 mod->mbhdlr[mc].cbarg = ioc->bfa;
1396 }
1397}
1398
1399/**
1400 * Mbox poll timer -- restarts any pending mailbox requests.
1401 */
1402static void
1403bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1404{
1405 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1406 struct bfa_mbox_cmd_s *cmd;
1407 u32 stat;
1408
1409 /**
1410 * If no command pending, do nothing
1411 */
1412 if (list_empty(&mod->cmd_q))
1413 return;
1414
1415 /**
1416 * If previous command is not yet fetched by firmware, do nothing
1417 */
1418 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1419 if (stat)
1420 return;
1421
1422 /**
1423 * Enqueue command to firmware.
1424 */
1425 bfa_q_deq(&mod->cmd_q, &cmd);
1426 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1427}
1428
1429/**
1430 * Cleanup any pending requests.
1431 */
1432static void
1433bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1434{
1435 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1436 struct bfa_mbox_cmd_s *cmd;
1437
1438 while (!list_empty(&mod->cmd_q))
1439 bfa_q_deq(&mod->cmd_q, &cmd);
1440}
1441
1442/**
1443 * Initialize IOC to port mapping.
1444 */
1445
1446#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
1447static void
1448bfa_ioc_map_port(struct bfa_ioc_s *ioc)
1449{
1450 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1451 u32 r32;
1452
1453 /**
1454 * For crossbow, port id is same as pci function.
1455 */
1456 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
1457 ioc->port_id = bfa_ioc_pcifn(ioc);
1458 return;
1459 }
1460
1461 /**
1462 * For catapult, base port id on personality register and IOC type
1463 */
1464 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1465 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
1466 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
1467
1468 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
1469 bfa_trc(ioc, ioc->port_id);
1470}
1471
1472
1473
1474/**
1475 * bfa_ioc_public
1476 */
1477
1478/**
1479* Set interrupt mode for a function: INTX or MSIX
1480 */
1481void
1482bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
1483{
1484 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1485 u32 r32, mode;
1486
1487 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1488 bfa_trc(ioc, r32);
1489
1490 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
1491 __F0_INTX_STATUS;
1492
1493 /**
1494 * If already in desired mode, do not change anything
1495 */
1496 if (!msix && mode)
1497 return;
1498
1499 if (msix)
1500 mode = __F0_INTX_STATUS_MSIX;
1501 else
1502 mode = __F0_INTX_STATUS_INTA;
1503
1504 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1505 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1506 bfa_trc(ioc, r32);
1507
1508 bfa_reg_write(rb + FNC_PERS_REG, r32);
1509}
1510
1511bfa_status_t
1512bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1513{
1514 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1515 u32 pll_sclk, pll_fclk, r32;
1516
1517 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1518 pll_sclk =
1519 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1520 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
1521 __APP_PLL_312_JITLMT0_1(3U) |
1522 __APP_PLL_312_CNTLMT0_1(1U);
1523 pll_fclk =
1524 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1525 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
1526 __APP_PLL_425_JITLMT0_1(3U) |
1527 __APP_PLL_425_CNTLMT0_1(1U);
1528
1529 /**
1530 * For catapult, choose operational mode FC/FCoE
1531 */
1532 if (ioc->fcmode) {
1533 bfa_reg_write((rb + OP_MODE), 0);
1534 bfa_reg_write((rb + ETH_MAC_SER_REG),
1535 __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
1536 | __APP_EMS_CHANNEL_SEL);
1537 } else {
1538 ioc->pllinit = BFA_TRUE;
1539 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
1540 bfa_reg_write((rb + ETH_MAC_SER_REG),
1541 __APP_EMS_REFCKBUFEN1);
1542 }
1543 } else {
1544 pll_sclk =
1545 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1546 __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
1547 __APP_PLL_312_CNTLMT0_1(3U);
1548 pll_fclk =
1549 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1550 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
1551 __APP_PLL_425_JITLMT0_1(3U) |
1552 __APP_PLL_425_CNTLMT0_1(3U);
1553 }
1554
1555 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
1556 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
1557
1558 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1559 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1560 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1561 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1562 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1563 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1564
1565 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1566 __APP_PLL_312_LOGIC_SOFT_RESET);
1567 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1568 __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
1569 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1570 __APP_PLL_425_LOGIC_SOFT_RESET);
1571 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1572 __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
1573 bfa_os_udelay(2);
1574 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1575 __APP_PLL_312_LOGIC_SOFT_RESET);
1576 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1577 __APP_PLL_425_LOGIC_SOFT_RESET);
1578
1579 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1580 pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
1581 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1582 pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
1583
1584 /**
1585 * Wait for PLLs to lock.
1586 */
1587 bfa_os_udelay(2000);
1588 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1589 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1590
1591 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
1592 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
1593
1594 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1595 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
1596 bfa_os_udelay(1000);
1597 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
1598 bfa_trc(ioc, r32);
1599 }
1600
1601 return BFA_STATUS_OK;
1602}
1603
1604/**
1605 * Interface used by diag module to do firmware boot with memory test
1606 * as the entry vector.
1607 */
1608void
1609bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1610{
1611 bfa_os_addr_t rb;
1612
1613 bfa_ioc_stats(ioc, ioc_boots);
1614
1615 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1616 return;
1617
1618 /**
1619 * Initialize IOC state of all functions on a chip reset.
1620 */
1621 rb = ioc->pcidev.pci_bar_kva;
1622 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1623 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1624 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1625 } else {
1626 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1627 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1628 }
1629
1630 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1631
1632 /**
1633 * Enable interrupts just before starting LPU
1634 */
1635 ioc->cbfn->reset_cbfn(ioc->bfa);
1636 bfa_ioc_lpu_start(ioc);
1637}
1638
1639/**
1640 * Enable/disable IOC failure auto recovery.
1641 */
1642void
1643bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1644{
1645 bfa_auto_recover = BFA_FALSE;
1646}
1647
1648
1649bfa_boolean_t
1650bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1651{
1652 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1653}
1654
1655void
1656bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1657{
1658 u32 *msgp = mbmsg;
1659 u32 r32;
1660 int i;
1661
1662 /**
1663 * read the MBOX msg
1664 */
1665 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1666 i++) {
1667 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1668 i * sizeof(u32));
1669 msgp[i] = bfa_os_htonl(r32);
1670 }
1671
1672 /**
1673 * turn off mailbox interrupt by clearing mailbox status
1674 */
1675 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1676 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1677}
1678
1679void
1680bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1681{
1682 union bfi_ioc_i2h_msg_u *msg;
1683
1684 msg = (union bfi_ioc_i2h_msg_u *)m;
1685
1686 bfa_ioc_stats(ioc, ioc_isrs);
1687
1688 switch (msg->mh.msg_id) {
1689 case BFI_IOC_I2H_HBEAT:
1690 break;
1691
1692 case BFI_IOC_I2H_READY_EVENT:
1693 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1694 break;
1695
1696 case BFI_IOC_I2H_ENABLE_REPLY:
1697 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1698 break;
1699
1700 case BFI_IOC_I2H_DISABLE_REPLY:
1701 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1702 break;
1703
1704 case BFI_IOC_I2H_GETATTR_REPLY:
1705 bfa_ioc_getattr_reply(ioc);
1706 break;
1707
1708 default:
1709 bfa_trc(ioc, msg->mh.msg_id);
1710 bfa_assert(0);
1711 }
1712}
1713
1714/**
1715 * IOC attach time initialization and setup.
1716 *
1717 * @param[in] ioc memory for IOC
1718 * @param[in] bfa driver instance structure
1719 * @param[in] trcmod kernel trace module
1720 * @param[in] aen kernel aen event module
1721 * @param[in] logm kernel logging module
1722 */
1723void
1724bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1725 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1726 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1727{
1728 ioc->bfa = bfa;
1729 ioc->cbfn = cbfn;
1730 ioc->timer_mod = timer_mod;
1731 ioc->trcmod = trcmod;
1732 ioc->aen = aen;
1733 ioc->logm = logm;
1734 ioc->fcmode = BFA_FALSE;
1735 ioc->pllinit = BFA_FALSE;
1736 ioc->dbg_fwsave_once = BFA_TRUE;
1737
1738 bfa_ioc_mbox_attach(ioc);
1739 INIT_LIST_HEAD(&ioc->hb_notify_q);
1740
1741 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1742}
1743
1744/**
1745 * Driver detach time IOC cleanup.
1746 */
1747void
1748bfa_ioc_detach(struct bfa_ioc_s *ioc)
1749{
1750 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1751}
1752
1753/**
1754 * Setup IOC PCI properties.
1755 *
1756 * @param[in] pcidev PCI device information for this IOC
1757 */
1758void
1759bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1760 enum bfi_mclass mc)
1761{
1762 ioc->ioc_mc = mc;
1763 ioc->pcidev = *pcidev;
1764 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
1765 ioc->cna = ioc->ctdev && !ioc->fcmode;
1766
1767 bfa_ioc_map_port(ioc);
1768 bfa_ioc_reg_init(ioc);
1769}
1770
1771/**
1772 * Initialize IOC dma memory
1773 *
1774 * @param[in] dm_kva kernel virtual address of IOC dma memory
1775 * @param[in] dm_pa physical address of IOC dma memory
1776 */
1777void
1778bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1779{
1780 /**
1781 * dma memory for firmware attribute
1782 */
1783 ioc->attr_dma.kva = dm_kva;
1784 ioc->attr_dma.pa = dm_pa;
1785 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1786}
1787
1788/**
1789 * Return size of dma memory required.
1790 */
1791u32
1792bfa_ioc_meminfo(void)
1793{
1794 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1795}
1796
1797void
1798bfa_ioc_enable(struct bfa_ioc_s *ioc)
1799{
1800 bfa_ioc_stats(ioc, ioc_enables);
1801 ioc->dbg_fwsave_once = BFA_TRUE;
1802
1803 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1804}
1805
1806void
1807bfa_ioc_disable(struct bfa_ioc_s *ioc)
1808{
1809 bfa_ioc_stats(ioc, ioc_disables);
1810 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1811}
1812
1813/**
1814 * Returns memory required for saving firmware trace in case of crash.
1815 * Driver must call this interface to allocate memory required for
1816 * automatic saving of firmware trace. Driver should call
1817 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1818 * trace memory.
1819 */
1820int
1821bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1822{
1823return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1824}
1825
1826/**
1827 * Initialize memory for saving firmware trace. Driver must initialize
1828 * trace memory before call bfa_ioc_enable().
1829 */
1830void
1831bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1832{
1833 bfa_assert(ioc->auto_recover);
1834 ioc->dbg_fwsave = dbg_fwsave;
1835 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1836}
1837
1838u32
1839bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1840{
1841 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1842}
1843
1844u32
1845bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1846{
1847 return PSS_SMEM_PGOFF(fmaddr);
1848}
1849
1850/**
1851 * Register mailbox message handler functions
1852 *
1853 * @param[in] ioc IOC instance
1854 * @param[in] mcfuncs message class handler functions
1855 */
1856void
1857bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1858{
1859 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1860 int mc;
1861
1862 for (mc = 0; mc < BFI_MC_MAX; mc++)
1863 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1864}
1865
1866/**
1867 * Register mailbox message handler function, to be called by common modules
1868 */
1869void
1870bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1871 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1872{
1873 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1874
1875 mod->mbhdlr[mc].cbfn = cbfn;
1876 mod->mbhdlr[mc].cbarg = cbarg;
1877}
1878
1879/**
1880 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1881 * Responsibility of caller to serialize
1882 *
1883 * @param[in] ioc IOC instance
1884 * @param[i] cmd Mailbox command
1885 */
1886void
1887bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1888{
1889 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1890 u32 stat;
1891
1892 /**
1893 * If a previous command is pending, queue new command
1894 */
1895 if (!list_empty(&mod->cmd_q)) {
1896 list_add_tail(&cmd->qe, &mod->cmd_q);
1897 return;
1898 }
1899
1900 /**
1901 * If mailbox is busy, queue command for poll timer
1902 */
1903 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1904 if (stat) {
1905 list_add_tail(&cmd->qe, &mod->cmd_q);
1906 return;
1907 }
1908
1909 /**
1910 * mailbox is free -- queue command to firmware
1911 */
1912 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1913}
1914
1915/**
1916 * Handle mailbox interrupts
1917 */
1918void
1919bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1920{
1921 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1922 struct bfi_mbmsg_s m;
1923 int mc;
1924
1925 bfa_ioc_msgget(ioc, &m);
1926
1927 /**
1928 * Treat IOC message class as special.
1929 */
1930 mc = m.mh.msg_class;
1931 if (mc == BFI_MC_IOC) {
1932 bfa_ioc_isr(ioc, &m);
1933 return;
1934 }
1935
1936 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1937 return;
1938
1939 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1940}
1941
1942void
1943bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1944{
1945 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1946}
1947
1948#ifndef BFA_BIOS_BUILD
1949
1950/**
1951 * return true if IOC is disabled
1952 */
1953bfa_boolean_t
1954bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1955{
1956 return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1957 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled));
1958}
1959
1960/**
1961 * return true if IOC firmware is different.
1962 */
1963bfa_boolean_t
1964bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1965{
1966 return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1967 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1968 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch));
1969}
1970
1971#define bfa_ioc_state_disabled(__sm) \
1972 (((__sm) == BFI_IOC_UNINIT) || \
1973 ((__sm) == BFI_IOC_INITING) || \
1974 ((__sm) == BFI_IOC_HWINIT) || \
1975 ((__sm) == BFI_IOC_DISABLED) || \
1976 ((__sm) == BFI_IOC_HBFAIL) || \
1977 ((__sm) == BFI_IOC_CFG_DISABLED))
1978
1979/**
1980 * Check if adapter is disabled -- both IOCs should be in a disabled
1981 * state.
1982 */
1983bfa_boolean_t
1984bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1985{
1986 u32 ioc_state;
1987 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1988
1989 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1990 return BFA_FALSE;
1991
1992 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1993 if (!bfa_ioc_state_disabled(ioc_state))
1994 return BFA_FALSE;
1995
1996 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1997 if (!bfa_ioc_state_disabled(ioc_state))
1998 return BFA_FALSE;
1999
2000 return BFA_TRUE;
2001}
2002
2003/**
2004 * Add to IOC heartbeat failure notification queue. To be used by common
2005 * modules such as
2006 */
2007void
2008bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
2009 struct bfa_ioc_hbfail_notify_s *notify)
2010{
2011 list_add_tail(&notify->qe, &ioc->hb_notify_q);
2012}
2013
2014#define BFA_MFG_NAME "Brocade"
2015void
2016bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2017 struct bfa_adapter_attr_s *ad_attr)
2018{
2019 struct bfi_ioc_attr_s *ioc_attr;
2020 char model[BFA_ADAPTER_MODEL_NAME_LEN];
2021
2022 ioc_attr = ioc->attr;
2023 bfa_os_memcpy((void *)&ad_attr->serial_num,
2024 (void *)ioc_attr->brcd_serialnum,
2025 BFA_ADAPTER_SERIAL_NUM_LEN);
2026
2027 bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
2028 bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
2029 BFA_VERSION_LEN);
2030 bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
2031 BFA_ADAPTER_MFG_NAME_LEN);
2032 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2033 sizeof(struct bfa_mfg_vpd_s));
2034
2035 ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
2036 ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2037
2038 /**
2039 * model name
2040 */
2041 if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
2042 strcpy(model, "BR-10?0");
2043 model[5] = '0' + ad_attr->nports;
2044 } else {
2045 strcpy(model, "Brocade-??5");
2046 model[8] =
2047 '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2048 model[9] = '0' + ad_attr->nports;
2049 }
2050
2051 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2052 ad_attr->prototype = 1;
2053 else
2054 ad_attr->prototype = 0;
2055
2056 bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
2057 bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
2058 BFA_ADAPTER_MODEL_NAME_LEN);
2059
2060 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2061 ad_attr->mac = bfa_ioc_get_mac(ioc);
2062
2063 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2064 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2065 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2066 ad_attr->asic_rev = ioc_attr->asic_rev;
2067 ad_attr->hw_ver[0] = 'R';
2068 ad_attr->hw_ver[1] = 'e';
2069 ad_attr->hw_ver[2] = 'v';
2070 ad_attr->hw_ver[3] = '-';
2071 ad_attr->hw_ver[4] = ioc_attr->asic_rev;
2072 ad_attr->hw_ver[5] = '\0';
2073
2074 ad_attr->cna_capable = ioc->cna;
2075}
2076
2077void
2078bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2079{
2080 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2081
2082 ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2083 ioc_attr->port_id = ioc->port_id;
2084
2085 if (!ioc->ctdev)
2086 ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
2087 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2088 ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
2089 else if (ioc->ioc_mc == BFI_MC_LL)
2090 ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
2091
2092 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2093
2094 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2095 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2096 ioc_attr->pci_attr.chip_rev[0] = 'R';
2097 ioc_attr->pci_attr.chip_rev[1] = 'e';
2098 ioc_attr->pci_attr.chip_rev[2] = 'v';
2099 ioc_attr->pci_attr.chip_rev[3] = '-';
2100 ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
2101 ioc_attr->pci_attr.chip_rev[5] = '\0';
2102}
2103
2104/**
2105 * hal_wwn_public
2106 */
2107wwn_t
2108bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2109{
2110 union {
2111 wwn_t wwn;
2112 u8 byte[sizeof(wwn_t)];
2113 }
2114 w;
2115
2116 w.wwn = ioc->attr->mfg_wwn;
2117
2118 if (bfa_ioc_portid(ioc) == 1)
2119 w.byte[7]++;
2120
2121 return w.wwn;
2122}
2123
2124wwn_t
2125bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2126{
2127 union {
2128 wwn_t wwn;
2129 u8 byte[sizeof(wwn_t)];
2130 }
2131 w;
2132
2133 w.wwn = ioc->attr->mfg_wwn;
2134
2135 if (bfa_ioc_portid(ioc) == 1)
2136 w.byte[7]++;
2137
2138 w.byte[0] = 0x20;
2139
2140 return w.wwn;
2141}
2142
2143wwn_t
2144bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
2145{
2146 union {
2147 wwn_t wwn;
2148 u8 byte[sizeof(wwn_t)];
2149 }
2150 w , w5;
2151
2152 bfa_trc(ioc, inst);
2153
2154 w.wwn = ioc->attr->mfg_wwn;
2155 w5.byte[0] = 0x50 | w.byte[2] >> 4;
2156 w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
2157 w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
2158 w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
2159 w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
2160 w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
2161 w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
2162 w5.byte[7] = (inst & 0xff);
2163
2164 return w5.wwn;
2165}
2166
2167u64
2168bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2169{
2170 return ioc->attr->mfg_wwn;
2171}
2172
2173mac_t
2174bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2175{
2176 mac_t mac;
2177
2178 mac = ioc->attr->mfg_mac;
2179 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2180
2181 return mac;
2182}
2183
2184void
2185bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2186{
2187 ioc->fcmode = BFA_TRUE;
2188 ioc->port_id = bfa_ioc_pcifn(ioc);
2189}
2190
2191bfa_boolean_t
2192bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2193{
2194 return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
2195}
2196
2197/**
2198 * Return true if interrupt should be claimed.
2199 */
2200bfa_boolean_t
2201bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
2202{
2203 u32 isr, msk;
2204
2205 /**
2206 * Always claim if not catapult.
2207 */
2208 if (!ioc->ctdev)
2209 return BFA_TRUE;
2210
2211 /**
2212 * FALSE if next device is claiming interrupt.
2213 * TRUE if next device is not interrupting or not present.
2214 */
2215 msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
2216 isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
2217 return !(isr & ~msk);
2218}
2219
2220/**
2221 * Send AEN notification
2222 */
2223static void
2224bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2225{
2226 union bfa_aen_data_u aen_data;
2227 struct bfa_log_mod_s *logmod = ioc->logm;
2228 s32 inst_num = 0;
2229 struct bfa_ioc_attr_s ioc_attr;
2230
2231 switch (event) {
2232 case BFA_IOC_AEN_HBGOOD:
2233 bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
2234 break;
2235 case BFA_IOC_AEN_HBFAIL:
2236 bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
2237 break;
2238 case BFA_IOC_AEN_ENABLE:
2239 bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
2240 break;
2241 case BFA_IOC_AEN_DISABLE:
2242 bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
2243 break;
2244 case BFA_IOC_AEN_FWMISMATCH:
2245 bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
2246 break;
2247 default:
2248 break;
2249 }
2250
2251 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
2252 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
2253 bfa_ioc_get_attr(ioc, &ioc_attr);
2254 switch (ioc_attr.ioc_type) {
2255 case BFA_IOC_TYPE_FC:
2256 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2257 break;
2258 case BFA_IOC_TYPE_FCoE:
2259 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2260 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2261 break;
2262 case BFA_IOC_TYPE_LL:
2263 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2264 break;
2265 default:
2266 bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
2267 break;
2268 }
2269 aen_data.ioc.ioc_type = ioc_attr.ioc_type;
2270}
2271
2272/**
2273 * Retrieve saved firmware trace from a prior IOC failure.
2274 */
2275bfa_status_t
2276bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2277{
2278 int tlen;
2279
2280 if (ioc->dbg_fwsave_len == 0)
2281 return BFA_STATUS_ENOFSAVE;
2282
2283 tlen = *trclen;
2284 if (tlen > ioc->dbg_fwsave_len)
2285 tlen = ioc->dbg_fwsave_len;
2286
2287 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
2288 *trclen = tlen;
2289 return BFA_STATUS_OK;
2290}
2291
2292/**
2293 * Retrieve saved firmware trace from a prior IOC failure.
2294 */
2295bfa_status_t
2296bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2297{
2298 u32 pgnum;
2299 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2300 int i, tlen;
2301 u32 *tbuf = trcdata, r32;
2302
2303 bfa_trc(ioc, *trclen);
2304
2305 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2306 loff = bfa_ioc_smem_pgoff(ioc, loff);
2307 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2308
2309 tlen = *trclen;
2310 if (tlen > BFA_DBG_FWTRC_LEN)
2311 tlen = BFA_DBG_FWTRC_LEN;
2312 tlen /= sizeof(u32);
2313
2314 bfa_trc(ioc, tlen);
2315
2316 for (i = 0; i < tlen; i++) {
2317 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2318 tbuf[i] = bfa_os_ntohl(r32);
2319 loff += sizeof(u32);
2320
2321 /**
2322 * handle page offset wrap around
2323 */
2324 loff = PSS_SMEM_PGOFF(loff);
2325 if (loff == 0) {
2326 pgnum++;
2327 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2328 }
2329 }
2330 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2331 bfa_ioc_smem_pgnum(ioc, 0));
2332 bfa_trc(ioc, pgnum);
2333
2334 *trclen = tlen * sizeof(u32);
2335 return BFA_STATUS_OK;
2336}
2337
2338/**
2339 * Save firmware trace if configured.
2340 */
2341static void
2342bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2343{
2344 int tlen;
2345
2346 if (ioc->dbg_fwsave_len) {
2347 tlen = ioc->dbg_fwsave_len;
2348 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2349 }
2350}
2351
2352/**
2353 * Firmware failure detected. Start recovery actions.
2354 */
2355static void
2356bfa_ioc_recover(struct bfa_ioc_s *ioc)
2357{
2358 if (ioc->dbg_fwsave_once) {
2359 ioc->dbg_fwsave_once = BFA_FALSE;
2360 bfa_ioc_debug_save(ioc);
2361 }
2362
2363 bfa_ioc_stats(ioc, ioc_hbfails);
2364 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2365}
2366
2367#else
2368
2369static void
2370bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2371{
2372}
2373
2374static void
2375bfa_ioc_recover(struct bfa_ioc_s *ioc)
2376{
2377 bfa_assert(0);
2378}
2379
2380#endif
2381
2382
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
new file mode 100644
index 000000000000..58efd4b13143
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -0,0 +1,259 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_IOC_H__
19#define __BFA_IOC_H__
20
21#include <cs/bfa_sm.h>
22#include <bfi/bfi.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_boot.h>
25#include <bfa_timer.h>
26
27/**
28 * PCI device information required by IOC
29 */
30struct bfa_pcidev_s {
31 int pci_slot;
32 u8 pci_func;
33 u16 device_id;
34 bfa_os_addr_t pci_bar_kva;
35};
36
37/**
38 * Structure used to remember the DMA-able memory block's KVA and Physical
39 * Address
40 */
41struct bfa_dma_s {
42 void *kva; /*! Kernel virtual address */
43 u64 pa; /*! Physical address */
44};
45
46#define BFA_DMA_ALIGN_SZ 256
47#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
48
49
50
51#define bfa_dma_addr_set(dma_addr, pa) \
52 __bfa_dma_addr_set(&dma_addr, (u64)pa)
53
54static inline void
55__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
56{
57 dma_addr->a32.addr_lo = (u32) pa;
58 dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa));
59}
60
61
62#define bfa_dma_be_addr_set(dma_addr, pa) \
63 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
64static inline void
65__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
66{
67 dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
68 dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
69}
70
71struct bfa_ioc_regs_s {
72 bfa_os_addr_t hfn_mbox_cmd;
73 bfa_os_addr_t hfn_mbox;
74 bfa_os_addr_t lpu_mbox_cmd;
75 bfa_os_addr_t lpu_mbox;
76 bfa_os_addr_t pss_ctl_reg;
77 bfa_os_addr_t app_pll_fast_ctl_reg;
78 bfa_os_addr_t app_pll_slow_ctl_reg;
79 bfa_os_addr_t ioc_sem_reg;
80 bfa_os_addr_t ioc_usage_sem_reg;
81 bfa_os_addr_t ioc_usage_reg;
82 bfa_os_addr_t host_page_num_fn;
83 bfa_os_addr_t heartbeat;
84 bfa_os_addr_t ioc_fwstate;
85 bfa_os_addr_t ll_halt;
86 bfa_os_addr_t shirq_isr_next;
87 bfa_os_addr_t shirq_msk_next;
88 bfa_os_addr_t smem_page_start;
89 u32 smem_pg0;
90};
91
92#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr)
93#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val)
94#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off)
95#define bfa_mem_write(_raddr, _off, _val) \
96 bfa_os_mem_write(_raddr, _off, _val)
97/**
98 * IOC Mailbox structures
99 */
100struct bfa_mbox_cmd_s {
101 struct list_head qe;
102 u32 msg[BFI_IOC_MSGSZ];
103};
104
105/**
106 * IOC mailbox module
107 */
108typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
109struct bfa_ioc_mbox_mod_s {
110 struct list_head cmd_q; /* pending mbox queue */
111 int nmclass; /* number of handlers */
112 struct {
113 bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
114 void *cbarg;
115 } mbhdlr[BFI_MC_MAX];
116};
117
118/**
119 * IOC callback function interfaces
120 */
121typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
122typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
123typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
124typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
125struct bfa_ioc_cbfn_s {
126 bfa_ioc_enable_cbfn_t enable_cbfn;
127 bfa_ioc_disable_cbfn_t disable_cbfn;
128 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
129 bfa_ioc_reset_cbfn_t reset_cbfn;
130};
131
132/**
133 * Heartbeat failure notification queue element.
134 */
135struct bfa_ioc_hbfail_notify_s {
136 struct list_head qe;
137 bfa_ioc_hbfail_cbfn_t cbfn;
138 void *cbarg;
139};
140
141/**
142 * Initialize a heartbeat failure notification structure
143 */
144#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
145 (__notify)->cbfn = (__cbfn); \
146 (__notify)->cbarg = (__cbarg); \
147} while (0)
148
149struct bfa_ioc_s {
150 bfa_fsm_t fsm;
151 struct bfa_s *bfa;
152 struct bfa_pcidev_s pcidev;
153 struct bfa_timer_mod_s *timer_mod;
154 struct bfa_timer_s ioc_timer;
155 struct bfa_timer_s sem_timer;
156 u32 hb_count;
157 u32 hb_fail;
158 u32 retry_count;
159 struct list_head hb_notify_q;
160 void *dbg_fwsave;
161 int dbg_fwsave_len;
162 bfa_boolean_t dbg_fwsave_once;
163 enum bfi_mclass ioc_mc;
164 struct bfa_ioc_regs_s ioc_regs;
165 struct bfa_trc_mod_s *trcmod;
166 struct bfa_aen_s *aen;
167 struct bfa_log_mod_s *logm;
168 struct bfa_ioc_drv_stats_s stats;
169 bfa_boolean_t auto_recover;
170 bfa_boolean_t fcmode;
171 bfa_boolean_t ctdev;
172 bfa_boolean_t cna;
173 bfa_boolean_t pllinit;
174 u8 port_id;
175
176 struct bfa_dma_s attr_dma;
177 struct bfi_ioc_attr_s *attr;
178 struct bfa_ioc_cbfn_s *cbfn;
179 struct bfa_ioc_mbox_mod_s mbox_mod;
180};
181
182#define bfa_ioc_pcifn(__ioc) (__ioc)->pcidev.pci_func
183#define bfa_ioc_devid(__ioc) (__ioc)->pcidev.device_id
184#define bfa_ioc_bar0(__ioc) (__ioc)->pcidev.pci_bar_kva
185#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
186#define bfa_ioc_fetch_stats(__ioc, __stats) \
187 ((__stats)->drv_stats) = (__ioc)->stats
188#define bfa_ioc_clr_stats(__ioc) \
189 bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
190#define bfa_ioc_maxfrsize(__ioc) (__ioc)->attr->maxfrsize
191#define bfa_ioc_rx_bbcredit(__ioc) (__ioc)->attr->rx_bbcredit
192#define bfa_ioc_speed_sup(__ioc) \
193 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
194
195/**
196 * IOC mailbox interface
197 */
198void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
199void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
200 bfa_ioc_mbox_mcfunc_t *mcfuncs);
201void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
202void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
203void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
204void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
205 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
206
207/**
208 * IOC interfaces
209 */
210void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
211 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
212 struct bfa_trc_mod_s *trcmod,
213 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
214void bfa_ioc_detach(struct bfa_ioc_s *ioc);
215void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
216 enum bfi_mclass mc);
217u32 bfa_ioc_meminfo(void);
218void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
219void bfa_ioc_enable(struct bfa_ioc_s *ioc);
220void bfa_ioc_disable(struct bfa_ioc_s *ioc);
221bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
222
223void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
224void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
225void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
226void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
227bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
228bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
229bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
230bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
231bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
232void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
233void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
234void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
235 struct bfa_adapter_attr_s *ad_attr);
236int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
237void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
238bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
239 int *trclen);
240bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
241 int *trclen);
242u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
243u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
244void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
245bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
246void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
247 struct bfa_ioc_hbfail_notify_s *notify);
248
249/*
250 * bfa mfg wwn API functions
251 */
252wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
253wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
254wwn_t bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst);
255mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
256u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
257
258#endif /* __BFA_IOC_H__ */
259
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
new file mode 100644
index 000000000000..12350b022d63
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -0,0 +1,872 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_priv.h>
20#include <log/bfa_log_hal.h>
21#include <bfi/bfi_boot.h>
22#include <bfi/bfi_cbreg.h>
23#include <aen/bfa_aen_ioc.h>
24#include <defs/bfa_defs_iocfc.h>
25#include <defs/bfa_defs_pci.h>
26#include "bfa_callback_priv.h"
27#include "bfad_drv.h"
28
29BFA_TRC_FILE(HAL, IOCFC);
30
31/**
32 * IOC local definitions
33 */
34#define BFA_IOCFC_TOV 5000 /* msecs */
35
36enum {
37 BFA_IOCFC_ACT_NONE = 0,
38 BFA_IOCFC_ACT_INIT = 1,
39 BFA_IOCFC_ACT_STOP = 2,
40 BFA_IOCFC_ACT_DISABLE = 3,
41};
42
43/*
44 * forward declarations
45 */
46static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
47static void bfa_iocfc_disable_cbfn(void *bfa_arg);
48static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
49static void bfa_iocfc_reset_cbfn(void *bfa_arg);
50static void bfa_iocfc_stats_clear(void *bfa_arg);
51static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
52 struct bfa_fw_stats_s *s);
53static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
54static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
55static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
56static void bfa_iocfc_stats_timeout(void *bfa_arg);
57
58static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
59
60/**
61 * bfa_ioc_pvt BFA IOC private functions
62 */
63
64static void
65bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
66{
67 int i, per_reqq_sz, per_rspq_sz;
68
69 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
70 BFA_DMA_ALIGN_SZ);
71 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
72 BFA_DMA_ALIGN_SZ);
73
74 /*
75 * Calculate CQ size
76 */
77 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
78 *dm_len = *dm_len + per_reqq_sz;
79 *dm_len = *dm_len + per_rspq_sz;
80 }
81
82 /*
83 * Calculate Shadow CI/PI size
84 */
85 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
86 *dm_len += (2 * BFA_CACHELINE_SZ);
87}
88
89static void
90bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
91{
92 *dm_len +=
93 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
94 *dm_len +=
95 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
96 BFA_CACHELINE_SZ);
97 *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
98}
99
100/**
101 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
102 */
103static void
104bfa_iocfc_send_cfg(void *bfa_arg)
105{
106 struct bfa_s *bfa = bfa_arg;
107 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
108 struct bfi_iocfc_cfg_req_s cfg_req;
109 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
110 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
111 int i;
112
113 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
114 bfa_trc(bfa, cfg->fwcfg.num_cqs);
115
116 iocfc->cfgdone = BFA_FALSE;
117 bfa_iocfc_reset_queues(bfa);
118
119 /**
120 * initialize IOC configuration info
121 */
122 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
123 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
124
125 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
126 bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
127
128 /**
129 * dma map REQ and RSP circular queues and shadow pointers
130 */
131 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
132 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
133 iocfc->req_cq_ba[i].pa);
134 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
135 iocfc->req_cq_shadow_ci[i].pa);
136 cfg_info->req_cq_elems[i] =
137 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
138
139 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
140 iocfc->rsp_cq_ba[i].pa);
141 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
142 iocfc->rsp_cq_shadow_pi[i].pa);
143 cfg_info->rsp_cq_elems[i] =
144 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
145 }
146
147 /**
148 * dma map IOC configuration itself
149 */
150 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
151 bfa_lpuid(bfa));
152 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
153
154 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
155 sizeof(struct bfi_iocfc_cfg_req_s));
156}
157
158static void
159bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
160 struct bfa_pcidev_s *pcidev)
161{
162 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
163
164 bfa->bfad = bfad;
165 iocfc->bfa = bfa;
166 iocfc->action = BFA_IOCFC_ACT_NONE;
167
168 bfa_os_assign(iocfc->cfg, *cfg);
169
170 /**
171 * Initialize chip specific handlers.
172 */
173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
174 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
175 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
176 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
177 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
178 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
179 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
180 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
181 } else {
182 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
183 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
184 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
185 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
186 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
187 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
188 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
189 }
190
191 iocfc->hwif.hw_reginit(bfa);
192 bfa->msix.nvecs = 0;
193}
194
195static void
196bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
197 struct bfa_meminfo_s *meminfo)
198{
199 u8 *dm_kva;
200 u64 dm_pa;
201 int i, per_reqq_sz, per_rspq_sz;
202 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
203 int dbgsz;
204
205 dm_kva = bfa_meminfo_dma_virt(meminfo);
206 dm_pa = bfa_meminfo_dma_phys(meminfo);
207
208 /*
209 * First allocate dma memory for IOC.
210 */
211 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
212 dm_kva += bfa_ioc_meminfo();
213 dm_pa += bfa_ioc_meminfo();
214
215 /*
216 * Claim DMA-able memory for the request/response queues and for shadow
217 * ci/pi registers
218 */
219 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
220 BFA_DMA_ALIGN_SZ);
221 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
222 BFA_DMA_ALIGN_SZ);
223
224 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
225 iocfc->req_cq_ba[i].kva = dm_kva;
226 iocfc->req_cq_ba[i].pa = dm_pa;
227 bfa_os_memset(dm_kva, 0, per_reqq_sz);
228 dm_kva += per_reqq_sz;
229 dm_pa += per_reqq_sz;
230
231 iocfc->rsp_cq_ba[i].kva = dm_kva;
232 iocfc->rsp_cq_ba[i].pa = dm_pa;
233 bfa_os_memset(dm_kva, 0, per_rspq_sz);
234 dm_kva += per_rspq_sz;
235 dm_pa += per_rspq_sz;
236 }
237
238 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
239 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
240 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
241 dm_kva += BFA_CACHELINE_SZ;
242 dm_pa += BFA_CACHELINE_SZ;
243
244 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
245 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
246 dm_kva += BFA_CACHELINE_SZ;
247 dm_pa += BFA_CACHELINE_SZ;
248 }
249
250 /*
251 * Claim DMA-able memory for the config info page
252 */
253 bfa->iocfc.cfg_info.kva = dm_kva;
254 bfa->iocfc.cfg_info.pa = dm_pa;
255 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
256 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
257 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
258
259 /*
260 * Claim DMA-able memory for the config response
261 */
262 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
263 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
264 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
265
266 dm_kva +=
267 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
268 BFA_CACHELINE_SZ);
269 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
270 BFA_CACHELINE_SZ);
271
272 /*
273 * Claim DMA-able memory for iocfc stats
274 */
275 bfa->iocfc.stats_kva = dm_kva;
276 bfa->iocfc.stats_pa = dm_pa;
277 bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
278 dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
279 dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
280
281 bfa_meminfo_dma_virt(meminfo) = dm_kva;
282 bfa_meminfo_dma_phys(meminfo) = dm_pa;
283
284 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
285 if (dbgsz > 0) {
286 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
287 bfa_meminfo_kva(meminfo) += dbgsz;
288 }
289}
290
291/**
292 * BFA submodules initialization completion notification.
293 */
294static void
295bfa_iocfc_initdone_submod(struct bfa_s *bfa)
296{
297 int i;
298
299 for (i = 0; hal_mods[i]; i++)
300 hal_mods[i]->initdone(bfa);
301}
302
303/**
304 * Start BFA submodules.
305 */
306static void
307bfa_iocfc_start_submod(struct bfa_s *bfa)
308{
309 int i;
310
311 bfa->rme_process = BFA_TRUE;
312
313 for (i = 0; hal_mods[i]; i++)
314 hal_mods[i]->start(bfa);
315}
316
317/**
318 * Disable BFA submodules.
319 */
320static void
321bfa_iocfc_disable_submod(struct bfa_s *bfa)
322{
323 int i;
324
325 for (i = 0; hal_mods[i]; i++)
326 hal_mods[i]->iocdisable(bfa);
327}
328
329static void
330bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
331{
332 struct bfa_s *bfa = bfa_arg;
333
334 if (complete) {
335 if (bfa->iocfc.cfgdone)
336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
337 else
338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
339 } else
340 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
341}
342
343static void
344bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
345{
346 struct bfa_s *bfa = bfa_arg;
347 struct bfad_s *bfad = bfa->bfad;
348
349 if (compl)
350 complete(&bfad->comp);
351
352 else
353 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
354}
355
356static void
357bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
358{
359 struct bfa_s *bfa = bfa_arg;
360 struct bfad_s *bfad = bfa->bfad;
361
362 if (compl)
363 complete(&bfad->disable_comp);
364}
365
366/**
367 * Update BFA configuration from firmware configuration.
368 */
369static void
370bfa_iocfc_cfgrsp(struct bfa_s *bfa)
371{
372 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
373 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
374 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
375 struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo;
376
377 fwcfg->num_cqs = fwcfg->num_cqs;
378 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
379 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
380 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
381 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
382 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
383
384 cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce;
385 cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay);
386 cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency);
387
388 iocfc->cfgdone = BFA_TRUE;
389
390 /**
391 * Configuration is complete - initialize/start submodules
392 */
393 if (iocfc->action == BFA_IOCFC_ACT_INIT)
394 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
395 else
396 bfa_iocfc_start_submod(bfa);
397}
398
399static void
400bfa_iocfc_stats_clear(void *bfa_arg)
401{
402 struct bfa_s *bfa = bfa_arg;
403 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
404 struct bfi_iocfc_stats_req_s stats_req;
405
406 bfa_timer_start(bfa, &iocfc->stats_timer,
407 bfa_iocfc_stats_clr_timeout, bfa,
408 BFA_IOCFC_TOV);
409
410 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
411 bfa_lpuid(bfa));
412 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
413 sizeof(struct bfi_iocfc_stats_req_s));
414}
415
416static void
417bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
418{
419 u32 *dip = (u32 *) d;
420 u32 *sip = (u32 *) s;
421 int i;
422
423 for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
424 dip[i] = bfa_os_ntohl(sip[i]);
425}
426
427static void
428bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
429{
430 struct bfa_s *bfa = bfa_arg;
431 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
432
433 if (complete) {
434 bfa_ioc_clr_stats(&bfa->ioc);
435 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
436 } else {
437 iocfc->stats_busy = BFA_FALSE;
438 iocfc->stats_status = BFA_STATUS_OK;
439 }
440}
441
442static void
443bfa_iocfc_stats_clr_timeout(void *bfa_arg)
444{
445 struct bfa_s *bfa = bfa_arg;
446 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
447
448 bfa_trc(bfa, 0);
449
450 iocfc->stats_status = BFA_STATUS_ETIMER;
451 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
452}
453
454static void
455bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
456{
457 struct bfa_s *bfa = bfa_arg;
458 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
459
460 if (complete) {
461 if (iocfc->stats_status == BFA_STATUS_OK) {
462 bfa_os_memset(iocfc->stats_ret, 0,
463 sizeof(*iocfc->stats_ret));
464 bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
465 iocfc->fw_stats);
466 }
467 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
468 } else {
469 iocfc->stats_busy = BFA_FALSE;
470 iocfc->stats_status = BFA_STATUS_OK;
471 }
472}
473
474static void
475bfa_iocfc_stats_timeout(void *bfa_arg)
476{
477 struct bfa_s *bfa = bfa_arg;
478 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
479
480 bfa_trc(bfa, 0);
481
482 iocfc->stats_status = BFA_STATUS_ETIMER;
483 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
484}
485
486static void
487bfa_iocfc_stats_query(struct bfa_s *bfa)
488{
489 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
490 struct bfi_iocfc_stats_req_s stats_req;
491
492 bfa_timer_start(bfa, &iocfc->stats_timer,
493 bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
494
495 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
496 bfa_lpuid(bfa));
497 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
498 sizeof(struct bfi_iocfc_stats_req_s));
499}
500
501void
502bfa_iocfc_reset_queues(struct bfa_s *bfa)
503{
504 int q;
505
506 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
507 bfa_reqq_ci(bfa, q) = 0;
508 bfa_reqq_pi(bfa, q) = 0;
509 bfa_rspq_ci(bfa, q) = 0;
510 bfa_rspq_pi(bfa, q) = 0;
511 }
512}
513
514/**
515 * IOC enable request is complete
516 */
517static void
518bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
519{
520 struct bfa_s *bfa = bfa_arg;
521
522 if (status != BFA_STATUS_OK) {
523 bfa_isr_disable(bfa);
524 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
525 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
526 bfa_iocfc_init_cb, bfa);
527 return;
528 }
529
530 bfa_iocfc_initdone_submod(bfa);
531 bfa_iocfc_send_cfg(bfa);
532}
533
534/**
535 * IOC disable request is complete
536 */
537static void
538bfa_iocfc_disable_cbfn(void *bfa_arg)
539{
540 struct bfa_s *bfa = bfa_arg;
541
542 bfa_isr_disable(bfa);
543 bfa_iocfc_disable_submod(bfa);
544
545 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
546 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
547 bfa);
548 else {
549 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
550 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
551 bfa);
552 }
553}
554
555/**
556 * Notify sub-modules of hardware failure.
557 */
558static void
559bfa_iocfc_hbfail_cbfn(void *bfa_arg)
560{
561 struct bfa_s *bfa = bfa_arg;
562
563 bfa->rme_process = BFA_FALSE;
564
565 bfa_isr_disable(bfa);
566 bfa_iocfc_disable_submod(bfa);
567
568 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
569 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
570 bfa);
571}
572
573/**
574 * Actions on chip-reset completion.
575 */
576static void
577bfa_iocfc_reset_cbfn(void *bfa_arg)
578{
579 struct bfa_s *bfa = bfa_arg;
580
581 bfa_iocfc_reset_queues(bfa);
582 bfa_isr_enable(bfa);
583}
584
585
586
587/**
588 * bfa_ioc_public
589 */
590
591/**
592 * Query IOC memory requirement information.
593 */
594void
595bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
596 u32 *dm_len)
597{
598 /* dma memory for IOC */
599 *dm_len += bfa_ioc_meminfo();
600
601 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
602 bfa_iocfc_cqs_sz(cfg, dm_len);
603 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
604}
605
606/**
607 * Query IOC memory requirement information.
608 */
609void
610bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
611 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
612{
613 int i;
614
615 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
616 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
617 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
618 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
619
620 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
621 bfa->trcmod, bfa->aen, bfa->logm);
622 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
623 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
624
625 /**
626 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
627 */
628 if (0)
629 bfa_ioc_set_fcmode(&bfa->ioc);
630
631 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
632 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
633 bfa_timer_init(&bfa->timer_mod);
634
635 INIT_LIST_HEAD(&bfa->comp_q);
636 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
637 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
638}
639
640/**
641 * Query IOC memory requirement information.
642 */
643void
644bfa_iocfc_detach(struct bfa_s *bfa)
645{
646 bfa_ioc_detach(&bfa->ioc);
647}
648
649/**
650 * Query IOC memory requirement information.
651 */
652void
653bfa_iocfc_init(struct bfa_s *bfa)
654{
655 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
656 bfa_ioc_enable(&bfa->ioc);
657 bfa_msix_install(bfa);
658}
659
660/**
661 * IOC start called from bfa_start(). Called to start IOC operations
662 * at driver instantiation for this instance.
663 */
664void
665bfa_iocfc_start(struct bfa_s *bfa)
666{
667 if (bfa->iocfc.cfgdone)
668 bfa_iocfc_start_submod(bfa);
669}
670
671/**
672 * IOC stop called from bfa_stop(). Called only when driver is unloaded
673 * for this instance.
674 */
675void
676bfa_iocfc_stop(struct bfa_s *bfa)
677{
678 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
679
680 bfa->rme_process = BFA_FALSE;
681 bfa_ioc_disable(&bfa->ioc);
682}
683
684void
685bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
686{
687 struct bfa_s *bfa = bfaarg;
688 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
689 union bfi_iocfc_i2h_msg_u *msg;
690
691 msg = (union bfi_iocfc_i2h_msg_u *) m;
692 bfa_trc(bfa, msg->mh.msg_id);
693
694 switch (msg->mh.msg_id) {
695 case BFI_IOCFC_I2H_CFG_REPLY:
696 iocfc->cfg_reply = &msg->cfg_reply;
697 bfa_iocfc_cfgrsp(bfa);
698 break;
699
700 case BFI_IOCFC_I2H_GET_STATS_RSP:
701 if (iocfc->stats_busy == BFA_FALSE
702 || iocfc->stats_status == BFA_STATUS_ETIMER)
703 break;
704
705 bfa_timer_stop(&iocfc->stats_timer);
706 iocfc->stats_status = BFA_STATUS_OK;
707 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
708 bfa);
709 break;
710 case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
711 /*
712 * check for timer pop before processing the rsp
713 */
714 if (iocfc->stats_busy == BFA_FALSE
715 || iocfc->stats_status == BFA_STATUS_ETIMER)
716 break;
717
718 bfa_timer_stop(&iocfc->stats_timer);
719 iocfc->stats_status = BFA_STATUS_OK;
720 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
721 bfa_iocfc_stats_clr_cb, bfa);
722 break;
723 case BFI_IOCFC_I2H_UPDATEQ_RSP:
724 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
725 break;
726 default:
727 bfa_assert(0);
728 }
729}
730
731#ifndef BFA_BIOS_BUILD
732void
733bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
734{
735 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
736}
737
738u64
739bfa_adapter_get_id(struct bfa_s *bfa)
740{
741 return bfa_ioc_get_adid(&bfa->ioc);
742}
743
744void
745bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
746{
747 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
748
749 attr->intr_attr = iocfc->cfginfo->intr_attr;
750 attr->config = iocfc->cfg;
751}
752
753bfa_status_t
754bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
755{
756 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
757 struct bfi_iocfc_set_intr_req_s *m;
758
759 iocfc->cfginfo->intr_attr = *attr;
760 if (!bfa_iocfc_is_operational(bfa))
761 return BFA_STATUS_OK;
762
763 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
764 if (!m)
765 return BFA_STATUS_DEVBUSY;
766
767 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
768 bfa_lpuid(bfa));
769 m->coalesce = attr->coalesce;
770 m->delay = bfa_os_htons(attr->delay);
771 m->latency = bfa_os_htons(attr->latency);
772
773 bfa_trc(bfa, attr->delay);
774 bfa_trc(bfa, attr->latency);
775
776 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
777 return BFA_STATUS_OK;
778}
779
780void
781bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
782{
783 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
784
785 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
786 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
787}
788
789bfa_status_t
790bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
791 bfa_cb_ioc_t cbfn, void *cbarg)
792{
793 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
794
795 if (iocfc->stats_busy) {
796 bfa_trc(bfa, iocfc->stats_busy);
797 return (BFA_STATUS_DEVBUSY);
798 }
799
800 iocfc->stats_busy = BFA_TRUE;
801 iocfc->stats_ret = stats;
802 iocfc->stats_cbfn = cbfn;
803 iocfc->stats_cbarg = cbarg;
804
805 bfa_iocfc_stats_query(bfa);
806
807 return (BFA_STATUS_OK);
808}
809
810bfa_status_t
811bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
812{
813 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
814
815 if (iocfc->stats_busy) {
816 bfa_trc(bfa, iocfc->stats_busy);
817 return (BFA_STATUS_DEVBUSY);
818 }
819
820 iocfc->stats_busy = BFA_TRUE;
821 iocfc->stats_cbfn = cbfn;
822 iocfc->stats_cbarg = cbarg;
823
824 bfa_iocfc_stats_clear(bfa);
825 return (BFA_STATUS_OK);
826}
827
828/**
829 * Enable IOC after it is disabled.
830 */
831void
832bfa_iocfc_enable(struct bfa_s *bfa)
833{
834 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
835 "IOC Enable");
836 bfa_ioc_enable(&bfa->ioc);
837}
838
839void
840bfa_iocfc_disable(struct bfa_s *bfa)
841{
842 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
843 "IOC Disable");
844 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
845
846 bfa->rme_process = BFA_FALSE;
847 bfa_ioc_disable(&bfa->ioc);
848}
849
850
851bfa_boolean_t
852bfa_iocfc_is_operational(struct bfa_s *bfa)
853{
854 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
855}
856
857/**
858 * Return boot target port wwns -- read from boot information in flash.
859 */
860void
861bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns)
862{
863 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
864 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
865
866 *nwwns = cfgrsp->bootwwns.nwwns;
867 *wwns = cfgrsp->bootwwns.wwn;
868}
869
870#endif
871
872
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
new file mode 100644
index 000000000000..7ad177ed4cfc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_IOCFC_H__
19#define __BFA_IOCFC_H__
20
21#include <bfa_ioc.h>
22#include <bfa.h>
23#include <bfi/bfi_iocfc.h>
24#include <bfa_callback_priv.h>
25
26#define BFA_REQQ_NELEMS_MIN (4)
27#define BFA_RSPQ_NELEMS_MIN (4)
28
29struct bfa_iocfc_regs_s {
30 bfa_os_addr_t intr_status;
31 bfa_os_addr_t intr_mask;
32 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
33 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
34 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
35 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
36 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
37 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
38 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
39 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
40};
41
42/**
43 * MSIX vector handlers
44 */
45#define BFA_MSIX_MAX_VECTORS 22
46typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
47struct bfa_msix_s {
48 int nvecs;
49 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
50};
51
52/**
53 * Chip specific interfaces
54 */
55struct bfa_hwif_s {
56 void (*hw_reginit)(struct bfa_s *bfa);
57 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
58 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
59 void (*hw_msix_install)(struct bfa_s *bfa);
60 void (*hw_msix_uninstall)(struct bfa_s *bfa);
61 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
62 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
63 u32 *nvecs, u32 *maxvec);
64};
65typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
66
67struct bfa_iocfc_s {
68 struct bfa_s *bfa;
69 struct bfa_iocfc_cfg_s cfg;
70 int action;
71
72 u32 req_cq_pi[BFI_IOC_MAX_CQS];
73 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
74
75 struct bfa_cb_qe_s init_hcb_qe;
76 struct bfa_cb_qe_s stop_hcb_qe;
77 struct bfa_cb_qe_s dis_hcb_qe;
78 struct bfa_cb_qe_s stats_hcb_qe;
79 bfa_boolean_t cfgdone;
80
81 struct bfa_dma_s cfg_info;
82 struct bfi_iocfc_cfg_s *cfginfo;
83 struct bfa_dma_s cfgrsp_dma;
84 struct bfi_iocfc_cfgrsp_s *cfgrsp;
85 struct bfi_iocfc_cfg_reply_s *cfg_reply;
86
87 u8 *stats_kva;
88 u64 stats_pa;
89 struct bfa_fw_stats_s *fw_stats;
90 struct bfa_timer_s stats_timer; /* timer */
91 struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */
92 bfa_status_t stats_status; /* stats/statsclr status */
93 bfa_boolean_t stats_busy; /* outstanding stats */
94 bfa_cb_ioc_t stats_cbfn; /* driver callback function */
95 void *stats_cbarg; /* user callback arg */
96
97 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
98 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
99 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
100 struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
101 struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
102 struct bfa_hwif_s hwif;
103
104 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
105 void *updateq_cbarg; /* bios callback arg */
106};
107
108#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
109#define bfa_msix_init(__bfa, __nvecs) \
110 (__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)
111#define bfa_msix_install(__bfa) \
112 (__bfa)->iocfc.hwif.hw_msix_install(__bfa)
113#define bfa_msix_uninstall(__bfa) \
114 (__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)
115#define bfa_isr_mode_set(__bfa, __msix) \
116 (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)
117#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
118 (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)
119
120/*
121 * FC specific IOC functions.
122 */
123void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
124 u32 *dm_len);
125void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
126 struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
127 struct bfa_pcidev_s *pcidev);
128void bfa_iocfc_detach(struct bfa_s *bfa);
129void bfa_iocfc_init(struct bfa_s *bfa);
130void bfa_iocfc_start(struct bfa_s *bfa);
131void bfa_iocfc_stop(struct bfa_s *bfa);
132void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
133void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
134bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
135void bfa_iocfc_reset_queues(struct bfa_s *bfa);
136void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
137 u32 reqq_sci, u32 rspq_spi,
138 bfa_cb_iocfc_t cbfn, void *cbarg);
139
140void bfa_msix_all(struct bfa_s *bfa, int vec);
141void bfa_msix_reqq(struct bfa_s *bfa, int vec);
142void bfa_msix_rspq(struct bfa_s *bfa, int vec);
143void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
144
145void bfa_hwcb_reginit(struct bfa_s *bfa);
146void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
147void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
148void bfa_hwcb_msix_install(struct bfa_s *bfa);
149void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
150void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
151void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
152 u32 *nvecs, u32 *maxvec);
153void bfa_hwct_reginit(struct bfa_s *bfa);
154void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
155void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
156void bfa_hwct_msix_install(struct bfa_s *bfa);
157void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
158void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
159void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
160 u32 *nvecs, u32 *maxvec);
161
162void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
163void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
164 bfa_boolean_t mincfg);
165void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns);
166
167#endif /* __BFA_IOCFC_H__ */
168
diff --git a/drivers/scsi/bfa/bfa_iocfc_q.c b/drivers/scsi/bfa/bfa_iocfc_q.c
new file mode 100644
index 000000000000..500a17df40b2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc_q.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include "bfa_intr_priv.h"
20
21BFA_TRC_FILE(HAL, IOCFC_Q);
22
23void
24bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
25 u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
26 void *cbarg)
27{
28 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
29 struct bfi_iocfc_updateq_req_s updateq_req;
30
31 iocfc->updateq_cbfn = cbfn;
32 iocfc->updateq_cbarg = cbarg;
33
34 bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
35 bfa_lpuid(bfa));
36
37 updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
38 updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
39 updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
40 updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
41
42 bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
43 sizeof(struct bfi_iocfc_updateq_req_s));
44}
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
new file mode 100644
index 000000000000..7ae2552e1e14
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -0,0 +1,1311 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <cs/bfa_debug.h>
20#include <bfa_cb_ioim_macros.h>
21
22BFA_TRC_FILE(HAL, IOIM);
23
24/*
25 * forward declarations.
26 */
27static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
29static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
30static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
31static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
32static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
33static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
34static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
35static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
36static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
37
38/**
39 * bfa_ioim_sm
40 */
41
42/**
43 * IO state machine events
44 */
45enum bfa_ioim_event {
46 BFA_IOIM_SM_START = 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
64};
65
66/*
67 * forward declaration of IO state machine
68 */
69static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
70 enum bfa_ioim_event event);
71static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
72 enum bfa_ioim_event event);
73static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
74 enum bfa_ioim_event event);
75static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
76 enum bfa_ioim_event event);
77static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
78 enum bfa_ioim_event event);
79static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
80 enum bfa_ioim_event event);
81static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
82 enum bfa_ioim_event event);
83static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
84 enum bfa_ioim_event event);
85static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
86 enum bfa_ioim_event event);
87static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
88 enum bfa_ioim_event event);
89static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
90 enum bfa_ioim_event event);
91
92/**
93 * IO is not started (unallocated).
94 */
95static void
96bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
97{
98 bfa_trc_fp(ioim->bfa, ioim->iotag);
99 bfa_trc_fp(ioim->bfa, event);
100
101 switch (event) {
102 case BFA_IOIM_SM_START:
103 if (!bfa_itnim_is_online(ioim->itnim)) {
104 if (!bfa_itnim_hold_io(ioim->itnim)) {
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe,
108 &ioim->fcpim->ioim_comp_q);
109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim);
111 } else {
112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe,
114 &ioim->itnim->pending_q);
115 }
116 break;
117 }
118
119 if (ioim->nsges > BFI_SGE_INLINE) {
120 if (!bfa_ioim_sge_setup(ioim)) {
121 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
122 return;
123 }
124 }
125
126 if (!bfa_ioim_send_ioreq(ioim)) {
127 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
128 break;
129 }
130
131 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
132 break;
133
134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
137 __bfa_cb_ioim_pathtov, ioim);
138 break;
139
140 case BFA_IOIM_SM_ABORT:
141 /**
142 * IO in pending queue can get abort requests. Complete abort
143 * requests immediately.
144 */
145 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
146 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
147 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
148 ioim);
149 break;
150
151 default:
152 bfa_assert(0);
153 }
154}
155
156/**
157 * IO is waiting for SG pages.
158 */
159static void
160bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
161{
162 bfa_trc(ioim->bfa, ioim->iotag);
163 bfa_trc(ioim->bfa, event);
164
165 switch (event) {
166 case BFA_IOIM_SM_SGALLOCED:
167 if (!bfa_ioim_send_ioreq(ioim)) {
168 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
169 break;
170 }
171 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
172 break;
173
174 case BFA_IOIM_SM_CLEANUP:
175 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
176 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
177 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
178 ioim);
179 bfa_ioim_notify_cleanup(ioim);
180 break;
181
182 case BFA_IOIM_SM_ABORT:
183 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
184 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
185 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
186 ioim);
187 break;
188
189 case BFA_IOIM_SM_HWFAIL:
190 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
191 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
192 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
193 ioim);
194 break;
195
196 default:
197 bfa_assert(0);
198 }
199}
200
201/**
202 * IO is active.
203 */
204static void
205bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
206{
207 bfa_trc_fp(ioim->bfa, ioim->iotag);
208 bfa_trc_fp(ioim->bfa, event);
209
210 switch (event) {
211 case BFA_IOIM_SM_COMP_GOOD:
212 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
213 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
214 __bfa_cb_ioim_good_comp, ioim);
215 break;
216
217 case BFA_IOIM_SM_COMP:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
220 ioim);
221 break;
222
223 case BFA_IOIM_SM_DONE:
224 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
225 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
226 ioim);
227 break;
228
229 case BFA_IOIM_SM_ABORT:
230 ioim->iosp->abort_explicit = BFA_TRUE;
231 ioim->io_cbfn = __bfa_cb_ioim_abort;
232
233 if (bfa_ioim_send_abort(ioim))
234 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
235 else {
236 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
237 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
238 &ioim->iosp->reqq_wait);
239 }
240 break;
241
242 case BFA_IOIM_SM_CLEANUP:
243 ioim->iosp->abort_explicit = BFA_FALSE;
244 ioim->io_cbfn = __bfa_cb_ioim_failed;
245
246 if (bfa_ioim_send_abort(ioim))
247 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
248 else {
249 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
250 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
251 &ioim->iosp->reqq_wait);
252 }
253 break;
254
255 case BFA_IOIM_SM_HWFAIL:
256 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
257 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
258 ioim);
259 break;
260
261 default:
262 bfa_assert(0);
263 }
264}
265
266/**
267 * IO is being aborted, waiting for completion from firmware.
268 */
269static void
270bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
271{
272 bfa_trc(ioim->bfa, ioim->iotag);
273 bfa_trc(ioim->bfa, event);
274
275 switch (event) {
276 case BFA_IOIM_SM_COMP_GOOD:
277 case BFA_IOIM_SM_COMP:
278 case BFA_IOIM_SM_DONE:
279 case BFA_IOIM_SM_FREE:
280 break;
281
282 case BFA_IOIM_SM_ABORT_DONE:
283 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
284 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
285 ioim);
286 break;
287
288 case BFA_IOIM_SM_ABORT_COMP:
289 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
290 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
291 ioim);
292 break;
293
294 case BFA_IOIM_SM_COMP_UTAG:
295 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
296 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
297 ioim);
298 break;
299
300 case BFA_IOIM_SM_CLEANUP:
301 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
302 ioim->iosp->abort_explicit = BFA_FALSE;
303
304 if (bfa_ioim_send_abort(ioim))
305 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
306 else {
307 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
308 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
309 &ioim->iosp->reqq_wait);
310 }
311 break;
312
313 case BFA_IOIM_SM_HWFAIL:
314 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
315 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
316 ioim);
317 break;
318
319 default:
320 bfa_assert(0);
321 }
322}
323
324/**
325 * IO is being cleaned up (implicit abort), waiting for completion from
326 * firmware.
327 */
328static void
329bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
330{
331 bfa_trc(ioim->bfa, ioim->iotag);
332 bfa_trc(ioim->bfa, event);
333
334 switch (event) {
335 case BFA_IOIM_SM_COMP_GOOD:
336 case BFA_IOIM_SM_COMP:
337 case BFA_IOIM_SM_DONE:
338 case BFA_IOIM_SM_FREE:
339 break;
340
341 case BFA_IOIM_SM_ABORT:
342 /**
343 * IO is already being aborted implicitly
344 */
345 ioim->io_cbfn = __bfa_cb_ioim_abort;
346 break;
347
348 case BFA_IOIM_SM_ABORT_DONE:
349 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
350 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
351 bfa_ioim_notify_cleanup(ioim);
352 break;
353
354 case BFA_IOIM_SM_ABORT_COMP:
355 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
356 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
357 bfa_ioim_notify_cleanup(ioim);
358 break;
359
360 case BFA_IOIM_SM_COMP_UTAG:
361 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
362 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
363 bfa_ioim_notify_cleanup(ioim);
364 break;
365
366 case BFA_IOIM_SM_HWFAIL:
367 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
368 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
369 ioim);
370 break;
371
372 case BFA_IOIM_SM_CLEANUP:
373 /**
374 * IO can be in cleanup state already due to TM command. 2nd cleanup
375 * request comes from ITN offline event.
376 */
377 break;
378
379 default:
380 bfa_assert(0);
381 }
382}
383
384/**
385 * IO is waiting for room in request CQ
386 */
387static void
388bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
389{
390 bfa_trc(ioim->bfa, ioim->iotag);
391 bfa_trc(ioim->bfa, event);
392
393 switch (event) {
394 case BFA_IOIM_SM_QRESUME:
395 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
396 bfa_ioim_send_ioreq(ioim);
397 break;
398
399 case BFA_IOIM_SM_ABORT:
400 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
401 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
402 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
403 ioim);
404 break;
405
406 case BFA_IOIM_SM_CLEANUP:
407 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
408 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
409 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
410 ioim);
411 bfa_ioim_notify_cleanup(ioim);
412 break;
413
414 case BFA_IOIM_SM_HWFAIL:
415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
416 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
418 ioim);
419 break;
420
421 default:
422 bfa_assert(0);
423 }
424}
425
426/**
427 * Active IO is being aborted, waiting for room in request CQ.
428 */
429static void
430bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
431{
432 bfa_trc(ioim->bfa, ioim->iotag);
433 bfa_trc(ioim->bfa, event);
434
435 switch (event) {
436 case BFA_IOIM_SM_QRESUME:
437 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
438 bfa_ioim_send_abort(ioim);
439 break;
440
441 case BFA_IOIM_SM_CLEANUP:
442 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
443 ioim->iosp->abort_explicit = BFA_FALSE;
444 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
445 break;
446
447 case BFA_IOIM_SM_COMP_GOOD:
448 case BFA_IOIM_SM_COMP:
449 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
450 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
451 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
452 ioim);
453 break;
454
455 case BFA_IOIM_SM_DONE:
456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
457 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
458 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
459 ioim);
460 break;
461
462 case BFA_IOIM_SM_HWFAIL:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
464 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
465 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
466 ioim);
467 break;
468
469 default:
470 bfa_assert(0);
471 }
472}
473
474/**
475 * Active IO is being cleaned up, waiting for room in request CQ.
476 */
477static void
478bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
479{
480 bfa_trc(ioim->bfa, ioim->iotag);
481 bfa_trc(ioim->bfa, event);
482
483 switch (event) {
484 case BFA_IOIM_SM_QRESUME:
485 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
486 bfa_ioim_send_abort(ioim);
487 break;
488
489 case BFA_IOIM_SM_ABORT:
490 /**
491 * IO is alraedy being cleaned up implicitly
492 */
493 ioim->io_cbfn = __bfa_cb_ioim_abort;
494 break;
495
496 case BFA_IOIM_SM_COMP_GOOD:
497 case BFA_IOIM_SM_COMP:
498 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
499 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
500 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
501 bfa_ioim_notify_cleanup(ioim);
502 break;
503
504 case BFA_IOIM_SM_DONE:
505 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
506 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
507 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
508 bfa_ioim_notify_cleanup(ioim);
509 break;
510
511 case BFA_IOIM_SM_HWFAIL:
512 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
513 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
514 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
515 ioim);
516 break;
517
518 default:
519 bfa_assert(0);
520 }
521}
522
523/**
524 * IO bfa callback is pending.
525 */
526static void
527bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
528{
529 bfa_trc_fp(ioim->bfa, ioim->iotag);
530 bfa_trc_fp(ioim->bfa, event);
531
532 switch (event) {
533 case BFA_IOIM_SM_HCB:
534 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
535 bfa_ioim_free(ioim);
536 bfa_cb_ioim_resfree(ioim->bfa->bfad);
537 break;
538
539 case BFA_IOIM_SM_CLEANUP:
540 bfa_ioim_notify_cleanup(ioim);
541 break;
542
543 case BFA_IOIM_SM_HWFAIL:
544 break;
545
546 default:
547 bfa_assert(0);
548 }
549}
550
551/**
552 * IO bfa callback is pending. IO resource cannot be freed.
553 */
554static void
555bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
556{
557 bfa_trc(ioim->bfa, ioim->iotag);
558 bfa_trc(ioim->bfa, event);
559
560 switch (event) {
561 case BFA_IOIM_SM_HCB:
562 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
563 list_del(&ioim->qe);
564 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
565 break;
566
567 case BFA_IOIM_SM_FREE:
568 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
569 break;
570
571 case BFA_IOIM_SM_CLEANUP:
572 bfa_ioim_notify_cleanup(ioim);
573 break;
574
575 case BFA_IOIM_SM_HWFAIL:
576 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
577 break;
578
579 default:
580 bfa_assert(0);
581 }
582}
583
584/**
585 * IO is completed, waiting resource free from firmware.
586 */
587static void
588bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
589{
590 bfa_trc(ioim->bfa, ioim->iotag);
591 bfa_trc(ioim->bfa, event);
592
593 switch (event) {
594 case BFA_IOIM_SM_FREE:
595 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
596 bfa_ioim_free(ioim);
597 bfa_cb_ioim_resfree(ioim->bfa->bfad);
598 break;
599
600 case BFA_IOIM_SM_CLEANUP:
601 bfa_ioim_notify_cleanup(ioim);
602 break;
603
604 case BFA_IOIM_SM_HWFAIL:
605 break;
606
607 default:
608 bfa_assert(0);
609 }
610}
611
612
613
614/**
615 * bfa_ioim_private
616 */
617
618static void
619__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
620{
621 struct bfa_ioim_s *ioim = cbarg;
622
623 if (!complete) {
624 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
625 return;
626 }
627
628 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
629}
630
631static void
632__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
633{
634 struct bfa_ioim_s *ioim = cbarg;
635 struct bfi_ioim_rsp_s *m;
636 u8 *snsinfo = NULL;
637 u8 sns_len = 0;
638 s32 residue = 0;
639
640 if (!complete) {
641 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
642 return;
643 }
644
645 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
646 if (m->io_status == BFI_IOIM_STS_OK) {
647 /**
648 * setup sense information, if present
649 */
650 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
651 && m->sns_len) {
652 sns_len = m->sns_len;
653 snsinfo = ioim->iosp->snsinfo;
654 }
655
656 /**
657 * setup residue value correctly for normal completions
658 */
659 if (m->resid_flags == FCP_RESID_UNDER)
660 residue = bfa_os_ntohl(m->residue);
661 if (m->resid_flags == FCP_RESID_OVER) {
662 residue = bfa_os_ntohl(m->residue);
663 residue = -residue;
664 }
665 }
666
667 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
668 m->scsi_status, sns_len, snsinfo, residue);
669}
670
671static void
672__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
673{
674 struct bfa_ioim_s *ioim = cbarg;
675
676 if (!complete) {
677 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
678 return;
679 }
680
681 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
682 0, 0, NULL, 0);
683}
684
685static void
686__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
687{
688 struct bfa_ioim_s *ioim = cbarg;
689
690 if (!complete) {
691 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
692 return;
693 }
694
695 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
696 0, 0, NULL, 0);
697}
698
699static void
700__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
701{
702 struct bfa_ioim_s *ioim = cbarg;
703
704 if (!complete) {
705 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
706 return;
707 }
708
709 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
710}
711
712static void
713bfa_ioim_sgpg_alloced(void *cbarg)
714{
715 struct bfa_ioim_s *ioim = cbarg;
716
717 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
718 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
719 bfa_ioim_sgpg_setup(ioim);
720 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
721}
722
723/**
724 * Send I/O request to firmware.
725 */
726static bfa_boolean_t
727bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
728{
729 struct bfa_itnim_s *itnim = ioim->itnim;
730 struct bfi_ioim_req_s *m;
731 static struct fcp_cmnd_s cmnd_z0 = { 0 };
732 struct bfi_sge_s *sge;
733 u32 pgdlen = 0;
734
735 /**
736 * check for room in queue to send request now
737 */
738 m = bfa_reqq_next(ioim->bfa, itnim->reqq);
739 if (!m) {
740 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
741 &ioim->iosp->reqq_wait);
742 return BFA_FALSE;
743 }
744
745 /**
746 * build i/o request message next
747 */
748 m->io_tag = bfa_os_htons(ioim->iotag);
749 m->rport_hdl = ioim->itnim->rport->fw_handle;
750 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
751
752 /**
753 * build inline IO SG element here
754 */
755 sge = &m->sges[0];
756 if (ioim->nsges) {
757 sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, 0);
758 pgdlen = bfa_cb_ioim_get_sglen(ioim->dio, 0);
759 sge->sg_len = pgdlen;
760 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
761 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
762 bfa_sge_to_be(sge);
763 sge++;
764 }
765
766 if (ioim->nsges > BFI_SGE_INLINE) {
767 sge->sga = ioim->sgpg->sgpg_pa;
768 } else {
769 sge->sga.a32.addr_lo = 0;
770 sge->sga.a32.addr_hi = 0;
771 }
772 sge->sg_len = pgdlen;
773 sge->flags = BFI_SGE_PGDLEN;
774 bfa_sge_to_be(sge);
775
776 /**
777 * set up I/O command parameters
778 */
779 bfa_os_assign(m->cmnd, cmnd_z0);
780 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
781 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
782 bfa_os_assign(m->cmnd.cdb,
783 *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
784 m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
785
786 /**
787 * set up I/O message header
788 */
789 switch (m->cmnd.iodir) {
790 case FCP_IODIR_READ:
791 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
792 bfa_stats(itnim, input_reqs);
793 break;
794 case FCP_IODIR_WRITE:
795 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
796 bfa_stats(itnim, output_reqs);
797 break;
798 case FCP_IODIR_RW:
799 bfa_stats(itnim, input_reqs);
800 bfa_stats(itnim, output_reqs);
801 default:
802 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
803 }
804 if (itnim->seq_rec ||
805 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
806 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
807
808#ifdef IOIM_ADVANCED
809 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
810 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
811 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
812
813 /**
814 * Handle large CDB (>16 bytes).
815 */
816 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
817 FCP_CMND_CDB_LEN) / sizeof(u32);
818 if (m->cmnd.addl_cdb_len) {
819 bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
820 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
821 m->cmnd.addl_cdb_len * sizeof(u32));
822 fcp_cmnd_fcpdl(&m->cmnd) =
823 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
824 }
825#endif
826
827 /**
828 * queue I/O message to firmware
829 */
830 bfa_reqq_produce(ioim->bfa, itnim->reqq);
831 return BFA_TRUE;
832}
833
834/**
835 * Setup any additional SG pages needed.Inline SG element is setup
836 * at queuing time.
837 */
838static bfa_boolean_t
839bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
840{
841 u16 nsgpgs;
842
843 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
844
845 /**
846 * allocate SG pages needed
847 */
848 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
849 if (!nsgpgs)
850 return BFA_TRUE;
851
852 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
853 != BFA_STATUS_OK) {
854 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
855 return BFA_FALSE;
856 }
857
858 ioim->nsgpgs = nsgpgs;
859 bfa_ioim_sgpg_setup(ioim);
860
861 return BFA_TRUE;
862}
863
864static void
865bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
866{
867 int sgeid, nsges, i;
868 struct bfi_sge_s *sge;
869 struct bfa_sgpg_s *sgpg;
870 u32 pgcumsz;
871
872 sgeid = BFI_SGE_INLINE;
873 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
874
875 do {
876 sge = sgpg->sgpg->sges;
877 nsges = ioim->nsges - sgeid;
878 if (nsges > BFI_SGPG_DATA_SGES)
879 nsges = BFI_SGPG_DATA_SGES;
880
881 pgcumsz = 0;
882 for (i = 0; i < nsges; i++, sge++, sgeid++) {
883 sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid);
884 sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid);
885 pgcumsz += sge->sg_len;
886
887 /**
888 * set flags
889 */
890 if (i < (nsges - 1))
891 sge->flags = BFI_SGE_DATA;
892 else if (sgeid < (ioim->nsges - 1))
893 sge->flags = BFI_SGE_DATA_CPL;
894 else
895 sge->flags = BFI_SGE_DATA_LAST;
896 }
897
898 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
899
900 /**
901 * set the link element of each page
902 */
903 if (sgeid == ioim->nsges) {
904 sge->flags = BFI_SGE_PGDLEN;
905 sge->sga.a32.addr_lo = 0;
906 sge->sga.a32.addr_hi = 0;
907 } else {
908 sge->flags = BFI_SGE_LINK;
909 sge->sga = sgpg->sgpg_pa;
910 }
911 sge->sg_len = pgcumsz;
912 } while (sgeid < ioim->nsges);
913}
914
915/**
916 * Send I/O abort request to firmware.
917 */
918static bfa_boolean_t
919bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
920{
921 struct bfa_itnim_s *itnim = ioim->itnim;
922 struct bfi_ioim_abort_req_s *m;
923 enum bfi_ioim_h2i msgop;
924
925 /**
926 * check for room in queue to send request now
927 */
928 m = bfa_reqq_next(ioim->bfa, itnim->reqq);
929 if (!m)
930 return BFA_FALSE;
931
932 /**
933 * build i/o request message next
934 */
935 if (ioim->iosp->abort_explicit)
936 msgop = BFI_IOIM_H2I_IOABORT_REQ;
937 else
938 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
939
940 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
941 m->io_tag = bfa_os_htons(ioim->iotag);
942 m->abort_tag = ++ioim->abort_tag;
943
944 /**
945 * queue I/O message to firmware
946 */
947 bfa_reqq_produce(ioim->bfa, itnim->reqq);
948 return BFA_TRUE;
949}
950
951/**
952 * Call to resume any I/O requests waiting for room in request queue.
953 */
954static void
955bfa_ioim_qresume(void *cbarg)
956{
957 struct bfa_ioim_s *ioim = cbarg;
958
959 bfa_fcpim_stats(ioim->fcpim, qresumes);
960 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
961}
962
963
964static void
965bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
966{
967 /**
968 * Move IO from itnim queue to fcpim global queue since itnim will be
969 * freed.
970 */
971 list_del(&ioim->qe);
972 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
973
974 if (!ioim->iosp->tskim) {
975 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
976 bfa_cb_dequeue(&ioim->hcb_qe);
977 list_del(&ioim->qe);
978 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
979 }
980 bfa_itnim_iodone(ioim->itnim);
981 } else
982 bfa_tskim_iodone(ioim->iosp->tskim);
983}
984
985/**
986 * or after the link comes back.
987 */
988void
989bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
990{
991 /**
992 * If path tov timer expired, failback with PATHTOV status - these
993 * IO requests are not normally retried by IO stack.
994 *
995 * Otherwise device cameback online and fail it with normal failed
996 * status so that IO stack retries these failed IO requests.
997 */
998 if (iotov)
999 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
1000 else
1001 ioim->io_cbfn = __bfa_cb_ioim_failed;
1002
1003 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1004
1005 /**
1006 * Move IO to fcpim global queue since itnim will be
1007 * freed.
1008 */
1009 list_del(&ioim->qe);
1010 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1011}
1012
1013
1014
1015/**
1016 * bfa_ioim_friend
1017 */
1018
1019/**
1020 * Memory allocation and initialization.
1021 */
1022void
1023bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1024{
1025 struct bfa_ioim_s *ioim;
1026 struct bfa_ioim_sp_s *iosp;
1027 u16 i;
1028 u8 *snsinfo;
1029 u32 snsbufsz;
1030
1031 /**
1032 * claim memory first
1033 */
1034 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
1035 fcpim->ioim_arr = ioim;
1036 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
1037
1038 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
1039 fcpim->ioim_sp_arr = iosp;
1040 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
1041
1042 /**
1043 * Claim DMA memory for per IO sense data.
1044 */
1045 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
1046 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
1047 bfa_meminfo_dma_phys(minfo) += snsbufsz;
1048
1049 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
1050 bfa_meminfo_dma_virt(minfo) += snsbufsz;
1051 snsinfo = fcpim->snsbase.kva;
1052 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
1053
1054 /**
1055 * Initialize ioim free queues
1056 */
1057 INIT_LIST_HEAD(&fcpim->ioim_free_q);
1058 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
1059 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
1060
1061 for (i = 0; i < fcpim->num_ioim_reqs;
1062 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
1063 /*
1064 * initialize IOIM
1065 */
1066 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
1067 ioim->iotag = i;
1068 ioim->bfa = fcpim->bfa;
1069 ioim->fcpim = fcpim;
1070 ioim->iosp = iosp;
1071 iosp->snsinfo = snsinfo;
1072 INIT_LIST_HEAD(&ioim->sgpg_q);
1073 bfa_reqq_winit(&ioim->iosp->reqq_wait,
1074 bfa_ioim_qresume, ioim);
1075 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
1076 bfa_ioim_sgpg_alloced, ioim);
1077 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1078
1079 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1080 }
1081}
1082
1083/**
1084 * Driver detach time call.
1085 */
1086void
1087bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
1088{
1089}
1090
1091void
1092bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1093{
1094 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1095 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1096 struct bfa_ioim_s *ioim;
1097 u16 iotag;
1098 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
1099
1100 iotag = bfa_os_ntohs(rsp->io_tag);
1101
1102 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1103 bfa_assert(ioim->iotag == iotag);
1104
1105 bfa_trc(ioim->bfa, ioim->iotag);
1106 bfa_trc(ioim->bfa, rsp->io_status);
1107 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
1108
1109 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
1110 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
1111
1112 switch (rsp->io_status) {
1113 case BFI_IOIM_STS_OK:
1114 bfa_fcpim_stats(fcpim, iocomp_ok);
1115 if (rsp->reuse_io_tag == 0)
1116 evt = BFA_IOIM_SM_DONE;
1117 else
1118 evt = BFA_IOIM_SM_COMP;
1119 break;
1120
1121 case BFI_IOIM_STS_TIMEDOUT:
1122 case BFI_IOIM_STS_ABORTED:
1123 rsp->io_status = BFI_IOIM_STS_ABORTED;
1124 bfa_fcpim_stats(fcpim, iocomp_aborted);
1125 if (rsp->reuse_io_tag == 0)
1126 evt = BFA_IOIM_SM_DONE;
1127 else
1128 evt = BFA_IOIM_SM_COMP;
1129 break;
1130
1131 case BFI_IOIM_STS_PROTO_ERR:
1132 bfa_fcpim_stats(fcpim, iocom_proto_err);
1133 bfa_assert(rsp->reuse_io_tag);
1134 evt = BFA_IOIM_SM_COMP;
1135 break;
1136
1137 case BFI_IOIM_STS_SQER_NEEDED:
1138 bfa_fcpim_stats(fcpim, iocom_sqer_needed);
1139 bfa_assert(rsp->reuse_io_tag == 0);
1140 evt = BFA_IOIM_SM_SQRETRY;
1141 break;
1142
1143 case BFI_IOIM_STS_RES_FREE:
1144 bfa_fcpim_stats(fcpim, iocom_res_free);
1145 evt = BFA_IOIM_SM_FREE;
1146 break;
1147
1148 case BFI_IOIM_STS_HOST_ABORTED:
1149 bfa_fcpim_stats(fcpim, iocom_hostabrts);
1150 if (rsp->abort_tag != ioim->abort_tag) {
1151 bfa_trc(ioim->bfa, rsp->abort_tag);
1152 bfa_trc(ioim->bfa, ioim->abort_tag);
1153 return;
1154 }
1155
1156 if (rsp->reuse_io_tag)
1157 evt = BFA_IOIM_SM_ABORT_COMP;
1158 else
1159 evt = BFA_IOIM_SM_ABORT_DONE;
1160 break;
1161
1162 case BFI_IOIM_STS_UTAG:
1163 bfa_fcpim_stats(fcpim, iocom_utags);
1164 evt = BFA_IOIM_SM_COMP_UTAG;
1165 break;
1166
1167 default:
1168 bfa_assert(0);
1169 }
1170
1171 bfa_sm_send_event(ioim, evt);
1172}
1173
1174void
1175bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1176{
1177 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1178 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1179 struct bfa_ioim_s *ioim;
1180 u16 iotag;
1181
1182 iotag = bfa_os_ntohs(rsp->io_tag);
1183
1184 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1185 bfa_assert(ioim->iotag == iotag);
1186
1187 bfa_trc_fp(ioim->bfa, ioim->iotag);
1188 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
1189}
1190
1191/**
1192 * Called by itnim to clean up IO while going offline.
1193 */
1194void
1195bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
1196{
1197 bfa_trc(ioim->bfa, ioim->iotag);
1198 bfa_fcpim_stats(ioim->fcpim, io_cleanups);
1199
1200 ioim->iosp->tskim = NULL;
1201 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1202}
1203
1204void
1205bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
1206{
1207 bfa_trc(ioim->bfa, ioim->iotag);
1208 bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
1209
1210 ioim->iosp->tskim = tskim;
1211 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1212}
1213
1214/**
1215 * IOC failure handling.
1216 */
1217void
1218bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
1219{
1220 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
1221}
1222
1223/**
1224 * IO offline TOV popped. Fail the pending IO.
1225 */
1226void
1227bfa_ioim_tov(struct bfa_ioim_s *ioim)
1228{
1229 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
1230}
1231
1232
1233
1234/**
1235 * bfa_ioim_api
1236 */
1237
1238/**
1239 * Allocate IOIM resource for initiator mode I/O request.
1240 */
1241struct bfa_ioim_s *
1242bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
1243 struct bfa_itnim_s *itnim, u16 nsges)
1244{
1245 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1246 struct bfa_ioim_s *ioim;
1247
1248 /**
1249 * alocate IOIM resource
1250 */
1251 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
1252 if (!ioim) {
1253 bfa_fcpim_stats(fcpim, no_iotags);
1254 return NULL;
1255 }
1256
1257 ioim->dio = dio;
1258 ioim->itnim = itnim;
1259 ioim->nsges = nsges;
1260 ioim->nsgpgs = 0;
1261
1262 bfa_stats(fcpim, total_ios);
1263 bfa_stats(itnim, ios);
1264 fcpim->ios_active++;
1265
1266 list_add_tail(&ioim->qe, &itnim->io_q);
1267 bfa_trc_fp(ioim->bfa, ioim->iotag);
1268
1269 return ioim;
1270}
1271
1272void
1273bfa_ioim_free(struct bfa_ioim_s *ioim)
1274{
1275 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
1276
1277 bfa_trc_fp(ioim->bfa, ioim->iotag);
1278 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
1279
1280 bfa_assert_fp(list_empty(&ioim->sgpg_q)
1281 || (ioim->nsges > BFI_SGE_INLINE));
1282
1283 if (ioim->nsgpgs > 0)
1284 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
1285
1286 bfa_stats(ioim->itnim, io_comps);
1287 fcpim->ios_active--;
1288
1289 list_del(&ioim->qe);
1290 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1291}
1292
1293void
1294bfa_ioim_start(struct bfa_ioim_s *ioim)
1295{
1296 bfa_trc_fp(ioim->bfa, ioim->iotag);
1297 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1298}
1299
1300/**
1301 * Driver I/O abort request.
1302 */
1303void
1304bfa_ioim_abort(struct bfa_ioim_s *ioim)
1305{
1306 bfa_trc(ioim->bfa, ioim->iotag);
1307 bfa_fcpim_stats(ioim->fcpim, io_aborts);
1308 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
1309}
1310
1311
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
new file mode 100644
index 000000000000..4d5c61a4f85c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_fcpim.h>
20#include "bfa_fcpim_priv.h"
21
22BFA_TRC_FILE(HAL, ITNIM);
23
24#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
25 ((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))
26
27#define bfa_fcpim_additn(__itnim) \
28 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
29#define bfa_fcpim_delitn(__itnim) do { \
30 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
31 list_del(&(__itnim)->qe); \
32 bfa_assert(list_empty(&(__itnim)->io_q)); \
33 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
34 bfa_assert(list_empty(&(__itnim)->pending_q)); \
35} while (0)
36
37#define bfa_itnim_online_cb(__itnim) do { \
38 if ((__itnim)->bfa->fcs) \
39 bfa_cb_itnim_online((__itnim)->ditn); \
40 else { \
41 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
42 __bfa_cb_itnim_online, (__itnim)); \
43 } \
44} while (0)
45
46#define bfa_itnim_offline_cb(__itnim) do { \
47 if ((__itnim)->bfa->fcs) \
48 bfa_cb_itnim_offline((__itnim)->ditn); \
49 else { \
50 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
51 __bfa_cb_itnim_offline, (__itnim)); \
52 } \
53} while (0)
54
55#define bfa_itnim_sler_cb(__itnim) do { \
56 if ((__itnim)->bfa->fcs) \
57 bfa_cb_itnim_sler((__itnim)->ditn); \
58 else { \
59 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
60 __bfa_cb_itnim_sler, (__itnim)); \
61 } \
62} while (0)
63
64/*
65 * forward declarations
66 */
67static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
68static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
69static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
70static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
71static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
72static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
73static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
74static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
75static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
76static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
77static void bfa_itnim_iotov(void *itnim_arg);
78static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
79static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
80static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
81
82/**
83 * bfa_itnim_sm BFA itnim state machine
84 */
85
86
87enum bfa_itnim_event {
88 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
89 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
90 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
91 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
92 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
93 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
94 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
95 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
96 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
97};
98
99static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
100 enum bfa_itnim_event event);
101static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
102 enum bfa_itnim_event event);
103static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
104 enum bfa_itnim_event event);
105static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
106 enum bfa_itnim_event event);
107static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
108 enum bfa_itnim_event event);
109static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
110 enum bfa_itnim_event event);
111static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
112 enum bfa_itnim_event event);
113static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
114 enum bfa_itnim_event event);
115static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
116 enum bfa_itnim_event event);
117static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
118 enum bfa_itnim_event event);
119static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
120 enum bfa_itnim_event event);
121static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
122 enum bfa_itnim_event event);
123static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
124 enum bfa_itnim_event event);
125static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
126 enum bfa_itnim_event event);
127static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
128 enum bfa_itnim_event event);
129
130/**
131 * Beginning/unallocated state - no events expected.
132 */
133static void
134bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
135{
136 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
137 bfa_trc(itnim->bfa, event);
138
139 switch (event) {
140 case BFA_ITNIM_SM_CREATE:
141 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
142 itnim->is_online = BFA_FALSE;
143 bfa_fcpim_additn(itnim);
144 break;
145
146 default:
147 bfa_assert(0);
148 }
149}
150
151/**
152 * Beginning state, only online event expected.
153 */
154static void
155bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
156{
157 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
158 bfa_trc(itnim->bfa, event);
159
160 switch (event) {
161 case BFA_ITNIM_SM_ONLINE:
162 if (bfa_itnim_send_fwcreate(itnim))
163 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
164 else
165 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
166 break;
167
168 case BFA_ITNIM_SM_DELETE:
169 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
170 bfa_fcpim_delitn(itnim);
171 break;
172
173 case BFA_ITNIM_SM_HWFAIL:
174 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
175 break;
176
177 default:
178 bfa_assert(0);
179 }
180}
181
182/**
183 * Waiting for itnim create response from firmware.
184 */
185static void
186bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
187{
188 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
189 bfa_trc(itnim->bfa, event);
190
191 switch (event) {
192 case BFA_ITNIM_SM_FWRSP:
193 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
194 itnim->is_online = BFA_TRUE;
195 bfa_itnim_iotov_online(itnim);
196 bfa_itnim_online_cb(itnim);
197 break;
198
199 case BFA_ITNIM_SM_DELETE:
200 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
201 break;
202
203 case BFA_ITNIM_SM_OFFLINE:
204 if (bfa_itnim_send_fwdelete(itnim))
205 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
206 else
207 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
208 break;
209
210 case BFA_ITNIM_SM_HWFAIL:
211 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
212 break;
213
214 default:
215 bfa_assert(0);
216 }
217}
218
219static void
220bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
221 enum bfa_itnim_event event)
222{
223 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
224 bfa_trc(itnim->bfa, event);
225
226 switch (event) {
227 case BFA_ITNIM_SM_QRESUME:
228 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
229 bfa_itnim_send_fwcreate(itnim);
230 break;
231
232 case BFA_ITNIM_SM_DELETE:
233 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
234 bfa_reqq_wcancel(&itnim->reqq_wait);
235 bfa_fcpim_delitn(itnim);
236 break;
237
238 case BFA_ITNIM_SM_OFFLINE:
239 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
240 bfa_reqq_wcancel(&itnim->reqq_wait);
241 bfa_itnim_offline_cb(itnim);
242 break;
243
244 case BFA_ITNIM_SM_HWFAIL:
245 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
246 bfa_reqq_wcancel(&itnim->reqq_wait);
247 break;
248
249 default:
250 bfa_assert(0);
251 }
252}
253
254/**
255 * Waiting for itnim create response from firmware, a delete is pending.
256 */
257static void
258bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
259 enum bfa_itnim_event event)
260{
261 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
262 bfa_trc(itnim->bfa, event);
263
264 switch (event) {
265 case BFA_ITNIM_SM_FWRSP:
266 if (bfa_itnim_send_fwdelete(itnim))
267 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
268 else
269 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
270 break;
271
272 case BFA_ITNIM_SM_HWFAIL:
273 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
274 bfa_fcpim_delitn(itnim);
275 break;
276
277 default:
278 bfa_assert(0);
279 }
280}
281
282/**
283 * Online state - normal parking state.
284 */
285static void
286bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
287{
288 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
289 bfa_trc(itnim->bfa, event);
290
291 switch (event) {
292 case BFA_ITNIM_SM_OFFLINE:
293 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
294 itnim->is_online = BFA_FALSE;
295 bfa_itnim_iotov_start(itnim);
296 bfa_itnim_cleanup(itnim);
297 break;
298
299 case BFA_ITNIM_SM_DELETE:
300 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
301 itnim->is_online = BFA_FALSE;
302 bfa_itnim_cleanup(itnim);
303 break;
304
305 case BFA_ITNIM_SM_SLER:
306 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
307 itnim->is_online = BFA_FALSE;
308 bfa_itnim_iotov_start(itnim);
309 bfa_itnim_sler_cb(itnim);
310 break;
311
312 case BFA_ITNIM_SM_HWFAIL:
313 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
314 itnim->is_online = BFA_FALSE;
315 bfa_itnim_iotov_start(itnim);
316 bfa_itnim_iocdisable_cleanup(itnim);
317 break;
318
319 default:
320 bfa_assert(0);
321 }
322}
323
324/**
325 * Second level error recovery need.
326 */
327static void
328bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
329{
330 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
331 bfa_trc(itnim->bfa, event);
332
333 switch (event) {
334 case BFA_ITNIM_SM_OFFLINE:
335 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
336 bfa_itnim_cleanup(itnim);
337 break;
338
339 case BFA_ITNIM_SM_DELETE:
340 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
341 bfa_itnim_cleanup(itnim);
342 bfa_itnim_iotov_delete(itnim);
343 break;
344
345 case BFA_ITNIM_SM_HWFAIL:
346 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
347 bfa_itnim_iocdisable_cleanup(itnim);
348 break;
349
350 default:
351 bfa_assert(0);
352 }
353}
354
355/**
356 * Going offline. Waiting for active IO cleanup.
357 */
358static void
359bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
360 enum bfa_itnim_event event)
361{
362 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
363 bfa_trc(itnim->bfa, event);
364
365 switch (event) {
366 case BFA_ITNIM_SM_CLEANUP:
367 if (bfa_itnim_send_fwdelete(itnim))
368 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
369 else
370 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
371 break;
372
373 case BFA_ITNIM_SM_DELETE:
374 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
375 bfa_itnim_iotov_delete(itnim);
376 break;
377
378 case BFA_ITNIM_SM_HWFAIL:
379 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
380 bfa_itnim_iocdisable_cleanup(itnim);
381 bfa_itnim_offline_cb(itnim);
382 break;
383
384 case BFA_ITNIM_SM_SLER:
385 break;
386
387 default:
388 bfa_assert(0);
389 }
390}
391
392/**
393 * Deleting itnim. Waiting for active IO cleanup.
394 */
395static void
396bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
397 enum bfa_itnim_event event)
398{
399 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
400 bfa_trc(itnim->bfa, event);
401
402 switch (event) {
403 case BFA_ITNIM_SM_CLEANUP:
404 if (bfa_itnim_send_fwdelete(itnim))
405 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
406 else
407 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
408 break;
409
410 case BFA_ITNIM_SM_HWFAIL:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
412 bfa_itnim_iocdisable_cleanup(itnim);
413 break;
414
415 default:
416 bfa_assert(0);
417 }
418}
419
420/**
421 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
422 */
423static void
424bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
425{
426 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
427 bfa_trc(itnim->bfa, event);
428
429 switch (event) {
430 case BFA_ITNIM_SM_FWRSP:
431 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
432 bfa_itnim_offline_cb(itnim);
433 break;
434
435 case BFA_ITNIM_SM_DELETE:
436 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
437 break;
438
439 case BFA_ITNIM_SM_HWFAIL:
440 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
441 bfa_itnim_offline_cb(itnim);
442 break;
443
444 default:
445 bfa_assert(0);
446 }
447}
448
449static void
450bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
451 enum bfa_itnim_event event)
452{
453 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
454 bfa_trc(itnim->bfa, event);
455
456 switch (event) {
457 case BFA_ITNIM_SM_QRESUME:
458 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
459 bfa_itnim_send_fwdelete(itnim);
460 break;
461
462 case BFA_ITNIM_SM_DELETE:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
464 break;
465
466 case BFA_ITNIM_SM_HWFAIL:
467 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
468 bfa_reqq_wcancel(&itnim->reqq_wait);
469 bfa_itnim_offline_cb(itnim);
470 break;
471
472 default:
473 bfa_assert(0);
474 }
475}
476
477/**
478 * Offline state.
479 */
480static void
481bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
482{
483 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
484 bfa_trc(itnim->bfa, event);
485
486 switch (event) {
487 case BFA_ITNIM_SM_DELETE:
488 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
489 bfa_itnim_iotov_delete(itnim);
490 bfa_fcpim_delitn(itnim);
491 break;
492
493 case BFA_ITNIM_SM_ONLINE:
494 if (bfa_itnim_send_fwcreate(itnim))
495 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
496 else
497 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
498 break;
499
500 case BFA_ITNIM_SM_HWFAIL:
501 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
502 break;
503
504 default:
505 bfa_assert(0);
506 }
507}
508
509/**
510 * IOC h/w failed state.
511 */
512static void
513bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
514 enum bfa_itnim_event event)
515{
516 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
517 bfa_trc(itnim->bfa, event);
518
519 switch (event) {
520 case BFA_ITNIM_SM_DELETE:
521 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
522 bfa_itnim_iotov_delete(itnim);
523 bfa_fcpim_delitn(itnim);
524 break;
525
526 case BFA_ITNIM_SM_OFFLINE:
527 bfa_itnim_offline_cb(itnim);
528 break;
529
530 case BFA_ITNIM_SM_ONLINE:
531 if (bfa_itnim_send_fwcreate(itnim))
532 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
533 else
534 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
535 break;
536
537 case BFA_ITNIM_SM_HWFAIL:
538 break;
539
540 default:
541 bfa_assert(0);
542 }
543}
544
545/**
546 * Itnim is deleted, waiting for firmware response to delete.
547 */
548static void
549bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
550{
551 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
552 bfa_trc(itnim->bfa, event);
553
554 switch (event) {
555 case BFA_ITNIM_SM_FWRSP:
556 case BFA_ITNIM_SM_HWFAIL:
557 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
558 bfa_fcpim_delitn(itnim);
559 break;
560
561 default:
562 bfa_assert(0);
563 }
564}
565
566static void
567bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
568 enum bfa_itnim_event event)
569{
570 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
571 bfa_trc(itnim->bfa, event);
572
573 switch (event) {
574 case BFA_ITNIM_SM_QRESUME:
575 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
576 bfa_itnim_send_fwdelete(itnim);
577 break;
578
579 case BFA_ITNIM_SM_HWFAIL:
580 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
581 bfa_reqq_wcancel(&itnim->reqq_wait);
582 bfa_fcpim_delitn(itnim);
583 break;
584
585 default:
586 bfa_assert(0);
587 }
588}
589
590
591
592/**
593 * bfa_itnim_private
594 */
595
596/**
597 * Initiate cleanup of all IOs on an IOC failure.
598 */
599static void
600bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
601{
602 struct bfa_tskim_s *tskim;
603 struct bfa_ioim_s *ioim;
604 struct list_head *qe, *qen;
605
606 list_for_each_safe(qe, qen, &itnim->tsk_q) {
607 tskim = (struct bfa_tskim_s *) qe;
608 bfa_tskim_iocdisable(tskim);
609 }
610
611 list_for_each_safe(qe, qen, &itnim->io_q) {
612 ioim = (struct bfa_ioim_s *) qe;
613 bfa_ioim_iocdisable(ioim);
614 }
615
616 /**
617 * For IO request in pending queue, we pretend an early timeout.
618 */
619 list_for_each_safe(qe, qen, &itnim->pending_q) {
620 ioim = (struct bfa_ioim_s *) qe;
621 bfa_ioim_tov(ioim);
622 }
623
624 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
625 ioim = (struct bfa_ioim_s *) qe;
626 bfa_ioim_iocdisable(ioim);
627 }
628}
629
630/**
631 * IO cleanup completion
632 */
633static void
634bfa_itnim_cleanp_comp(void *itnim_cbarg)
635{
636 struct bfa_itnim_s *itnim = itnim_cbarg;
637
638 bfa_stats(itnim, cleanup_comps);
639 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
640}
641
642/**
643 * Initiate cleanup of all IOs.
644 */
645static void
646bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
647{
648 struct bfa_ioim_s *ioim;
649 struct bfa_tskim_s *tskim;
650 struct list_head *qe, *qen;
651
652 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
653
654 list_for_each_safe(qe, qen, &itnim->io_q) {
655 ioim = (struct bfa_ioim_s *) qe;
656
657 /**
658 * Move IO to a cleanup queue from active queue so that a later
659 * TM will not pickup this IO.
660 */
661 list_del(&ioim->qe);
662 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
663
664 bfa_wc_up(&itnim->wc);
665 bfa_ioim_cleanup(ioim);
666 }
667
668 list_for_each_safe(qe, qen, &itnim->tsk_q) {
669 tskim = (struct bfa_tskim_s *) qe;
670 bfa_wc_up(&itnim->wc);
671 bfa_tskim_cleanup(tskim);
672 }
673
674 bfa_wc_wait(&itnim->wc);
675}
676
677static void
678__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
679{
680 struct bfa_itnim_s *itnim = cbarg;
681
682 if (complete)
683 bfa_cb_itnim_online(itnim->ditn);
684}
685
686static void
687__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
688{
689 struct bfa_itnim_s *itnim = cbarg;
690
691 if (complete)
692 bfa_cb_itnim_offline(itnim->ditn);
693}
694
695static void
696__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
697{
698 struct bfa_itnim_s *itnim = cbarg;
699
700 if (complete)
701 bfa_cb_itnim_sler(itnim->ditn);
702}
703
704/**
705 * Call to resume any I/O requests waiting for room in request queue.
706 */
707static void
708bfa_itnim_qresume(void *cbarg)
709{
710 struct bfa_itnim_s *itnim = cbarg;
711
712 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
713}
714
715
716
717
718/**
719 * bfa_itnim_public
720 */
721
722void
723bfa_itnim_iodone(struct bfa_itnim_s *itnim)
724{
725 bfa_wc_down(&itnim->wc);
726}
727
728void
729bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
730{
731 bfa_wc_down(&itnim->wc);
732}
733
734void
735bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
736 u32 *dm_len)
737{
738 /**
739 * ITN memory
740 */
741 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
742}
743
744void
745bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
746{
747 struct bfa_s *bfa = fcpim->bfa;
748 struct bfa_itnim_s *itnim;
749 int i;
750
751 INIT_LIST_HEAD(&fcpim->itnim_q);
752
753 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
754 fcpim->itnim_arr = itnim;
755
756 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
757 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
758 itnim->bfa = bfa;
759 itnim->fcpim = fcpim;
760 itnim->reqq = BFA_REQQ_QOS_LO;
761 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
762 itnim->iotov_active = BFA_FALSE;
763 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
764
765 INIT_LIST_HEAD(&itnim->io_q);
766 INIT_LIST_HEAD(&itnim->io_cleanup_q);
767 INIT_LIST_HEAD(&itnim->pending_q);
768 INIT_LIST_HEAD(&itnim->tsk_q);
769 INIT_LIST_HEAD(&itnim->delay_comp_q);
770 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
771 }
772
773 bfa_meminfo_kva(minfo) = (u8 *) itnim;
774}
775
776void
777bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
778{
779 bfa_stats(itnim, ioc_disabled);
780 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
781}
782
783static bfa_boolean_t
784bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
785{
786 struct bfi_itnim_create_req_s *m;
787
788 itnim->msg_no++;
789
790 /**
791 * check for room in queue to send request now
792 */
793 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
794 if (!m) {
795 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
796 return BFA_FALSE;
797 }
798
799 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
800 bfa_lpuid(itnim->bfa));
801 m->fw_handle = itnim->rport->fw_handle;
802 m->class = FC_CLASS_3;
803 m->seq_rec = itnim->seq_rec;
804 m->msg_no = itnim->msg_no;
805
806 /**
807 * queue I/O message to firmware
808 */
809 bfa_reqq_produce(itnim->bfa, itnim->reqq);
810 return BFA_TRUE;
811}
812
813static bfa_boolean_t
814bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
815{
816 struct bfi_itnim_delete_req_s *m;
817
818 /**
819 * check for room in queue to send request now
820 */
821 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
822 if (!m) {
823 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
824 return BFA_FALSE;
825 }
826
827 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
828 bfa_lpuid(itnim->bfa));
829 m->fw_handle = itnim->rport->fw_handle;
830
831 /**
832 * queue I/O message to firmware
833 */
834 bfa_reqq_produce(itnim->bfa, itnim->reqq);
835 return BFA_TRUE;
836}
837
838/**
839 * Cleanup all pending failed inflight requests.
840 */
841static void
842bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
843{
844 struct bfa_ioim_s *ioim;
845 struct list_head *qe, *qen;
846
847 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
848 ioim = (struct bfa_ioim_s *)qe;
849 bfa_ioim_delayed_comp(ioim, iotov);
850 }
851}
852
853/**
854 * Start all pending IO requests.
855 */
856static void
857bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
858{
859 struct bfa_ioim_s *ioim;
860
861 bfa_itnim_iotov_stop(itnim);
862
863 /**
864 * Abort all inflight IO requests in the queue
865 */
866 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
867
868 /**
869 * Start all pending IO requests.
870 */
871 while (!list_empty(&itnim->pending_q)) {
872 bfa_q_deq(&itnim->pending_q, &ioim);
873 list_add_tail(&ioim->qe, &itnim->io_q);
874 bfa_ioim_start(ioim);
875 }
876}
877
878/**
879 * Fail all pending IO requests
880 */
881static void
882bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
883{
884 struct bfa_ioim_s *ioim;
885
886 /**
887 * Fail all inflight IO requests in the queue
888 */
889 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
890
891 /**
892 * Fail any pending IO requests.
893 */
894 while (!list_empty(&itnim->pending_q)) {
895 bfa_q_deq(&itnim->pending_q, &ioim);
896 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
897 bfa_ioim_tov(ioim);
898 }
899}
900
901/**
902 * IO TOV timer callback. Fail any pending IO requests.
903 */
904static void
905bfa_itnim_iotov(void *itnim_arg)
906{
907 struct bfa_itnim_s *itnim = itnim_arg;
908
909 itnim->iotov_active = BFA_FALSE;
910
911 bfa_cb_itnim_tov_begin(itnim->ditn);
912 bfa_itnim_iotov_cleanup(itnim);
913 bfa_cb_itnim_tov(itnim->ditn);
914}
915
916/**
917 * Start IO TOV timer for failing back pending IO requests in offline state.
918 */
919static void
920bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
921{
922 if (itnim->fcpim->path_tov > 0) {
923
924 itnim->iotov_active = BFA_TRUE;
925 bfa_assert(bfa_itnim_hold_io(itnim));
926 bfa_timer_start(itnim->bfa, &itnim->timer,
927 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
928 }
929}
930
931/**
932 * Stop IO TOV timer.
933 */
934static void
935bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
936{
937 if (itnim->iotov_active) {
938 itnim->iotov_active = BFA_FALSE;
939 bfa_timer_stop(&itnim->timer);
940 }
941}
942
943/**
944 * Stop IO TOV timer.
945 */
946static void
947bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
948{
949 bfa_boolean_t pathtov_active = BFA_FALSE;
950
951 if (itnim->iotov_active)
952 pathtov_active = BFA_TRUE;
953
954 bfa_itnim_iotov_stop(itnim);
955 if (pathtov_active)
956 bfa_cb_itnim_tov_begin(itnim->ditn);
957 bfa_itnim_iotov_cleanup(itnim);
958 if (pathtov_active)
959 bfa_cb_itnim_tov(itnim->ditn);
960}
961
962
963
964/**
965 * bfa_itnim_public
966 */
967
968/**
969 * Itnim interrupt processing.
970 */
971void
972bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
973{
974 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
975 union bfi_itnim_i2h_msg_u msg;
976 struct bfa_itnim_s *itnim;
977
978 bfa_trc(bfa, m->mhdr.msg_id);
979
980 msg.msg = m;
981
982 switch (m->mhdr.msg_id) {
983 case BFI_ITNIM_I2H_CREATE_RSP:
984 itnim = BFA_ITNIM_FROM_TAG(fcpim,
985 msg.create_rsp->bfa_handle);
986 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
987 bfa_stats(itnim, create_comps);
988 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
989 break;
990
991 case BFI_ITNIM_I2H_DELETE_RSP:
992 itnim = BFA_ITNIM_FROM_TAG(fcpim,
993 msg.delete_rsp->bfa_handle);
994 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
995 bfa_stats(itnim, delete_comps);
996 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
997 break;
998
999 case BFI_ITNIM_I2H_SLER_EVENT:
1000 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1001 msg.sler_event->bfa_handle);
1002 bfa_stats(itnim, sler_events);
1003 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1004 break;
1005
1006 default:
1007 bfa_trc(bfa, m->mhdr.msg_id);
1008 bfa_assert(0);
1009 }
1010}
1011
1012
1013
1014/**
1015 * bfa_itnim_api
1016 */
1017
1018struct bfa_itnim_s *
1019bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1020{
1021 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1022 struct bfa_itnim_s *itnim;
1023
1024 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1025 bfa_assert(itnim->rport == rport);
1026
1027 itnim->ditn = ditn;
1028
1029 bfa_stats(itnim, creates);
1030 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1031
1032 return (itnim);
1033}
1034
1035void
1036bfa_itnim_delete(struct bfa_itnim_s *itnim)
1037{
1038 bfa_stats(itnim, deletes);
1039 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1040}
1041
1042void
1043bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1044{
1045 itnim->seq_rec = seq_rec;
1046 bfa_stats(itnim, onlines);
1047 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1048}
1049
1050void
1051bfa_itnim_offline(struct bfa_itnim_s *itnim)
1052{
1053 bfa_stats(itnim, offlines);
1054 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1055}
1056
1057/**
1058 * Return true if itnim is considered offline for holding off IO request.
1059 * IO is not held if itnim is being deleted.
1060 */
1061bfa_boolean_t
1062bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1063{
1064 return (
1065 itnim->fcpim->path_tov && itnim->iotov_active &&
1066 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1067 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1068 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1069 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1070 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1071 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
1072);
1073}
1074
1075void
1076bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1077 struct bfa_itnim_hal_stats_s *stats)
1078{
1079 *stats = itnim->stats;
1080}
1081
1082void
1083bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1084{
1085 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1086}
1087
1088
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c
new file mode 100644
index 000000000000..c2735e55cf03
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_log.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.c BFA log library
20 */
21
22#include <bfa_os_inc.h>
23#include <cs/bfa_log.h>
24
25/*
26 * global log info structure
27 */
28struct bfa_log_info_s {
29 u32 start_idx; /* start index for a module */
30 u32 total_count; /* total count for a module */
31 enum bfa_log_severity level; /* global log level */
32 bfa_log_cb_t cbfn; /* callback function */
33};
34
35static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1];
36static u32 bfa_log_msg_total_count;
37static int bfa_log_initialized;
38
39static char *bfa_log_severity[] =
40 { "[none]", "[critical]", "[error]", "[warn]", "[info]", "" };
41
42/**
43 * BFA log library initialization
44 *
45 * The log library initialization includes the following,
46 * - set log instance name and callback function
47 * - read the message array generated from xml files
48 * - calculate start index for each module
49 * - calculate message count for each module
50 * - perform error checking
51 *
52 * @param[in] log_mod - log module info
53 * @param[in] instance_name - instance name
54 * @param[in] cbfn - callback function
55 *
56 * It return 0 on success, or -1 on failure
57 */
58int
59bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name,
60 bfa_log_cb_t cbfn)
61{
62 struct bfa_log_msgdef_s *msg;
63 u32 pre_mod_id = 0;
64 u32 cur_mod_id = 0;
65 u32 i, pre_idx, idx, msg_id;
66
67 /*
68 * set instance name
69 */
70 if (log_mod) {
71 strncpy(log_mod->instance_info, instance_name,
72 sizeof(log_mod->instance_info));
73 log_mod->cbfn = cbfn;
74 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++)
75 log_mod->log_level[i] = BFA_LOG_WARNING;
76 }
77
78 if (bfa_log_initialized)
79 return 0;
80
81 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) {
82 bfa_log_info[i].start_idx = 0;
83 bfa_log_info[i].total_count = 0;
84 bfa_log_info[i].level = BFA_LOG_WARNING;
85 bfa_log_info[i].cbfn = cbfn;
86 }
87
88 pre_idx = 0;
89 idx = 0;
90 msg = bfa_log_msg_array;
91 msg_id = BFA_LOG_GET_MSG_ID(msg);
92 pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
93 while (msg_id != 0) {
94 cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
95
96 if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) {
97 cbfn(log_mod, msg_id,
98 "%s%s log: module id %u out of range\n",
99 BFA_LOG_CAT_NAME,
100 bfa_log_severity[BFA_LOG_ERROR],
101 cur_mod_id);
102 return -1;
103 }
104
105 if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) {
106 cbfn(log_mod, msg_id,
107 "%s%s log: module id %u out of range\n",
108 BFA_LOG_CAT_NAME,
109 bfa_log_severity[BFA_LOG_ERROR],
110 pre_mod_id);
111 return -1;
112 }
113
114 if (cur_mod_id != pre_mod_id) {
115 bfa_log_info[pre_mod_id].start_idx = pre_idx;
116 bfa_log_info[pre_mod_id].total_count = idx - pre_idx;
117 pre_mod_id = cur_mod_id;
118 pre_idx = idx;
119 }
120
121 idx++;
122 msg++;
123 msg_id = BFA_LOG_GET_MSG_ID(msg);
124 }
125
126 bfa_log_info[cur_mod_id].start_idx = pre_idx;
127 bfa_log_info[cur_mod_id].total_count = idx - pre_idx;
128 bfa_log_msg_total_count = idx;
129
130 cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n",
131 BFA_LOG_CAT_NAME,
132 bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count);
133
134 bfa_log_initialized = 1;
135
136 return 0;
137}
138
139/**
140 * BFA log set log level for a module
141 *
142 * @param[in] log_mod - log module info
143 * @param[in] mod_id - module id
144 * @param[in] log_level - log severity level
145 *
146 * It return BFA_STATUS_OK on success, or > 0 on failure
147 */
148bfa_status_t
149bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id,
150 enum bfa_log_severity log_level)
151{
152 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
153 return BFA_STATUS_EINVAL;
154
155 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
156 return BFA_STATUS_EINVAL;
157
158 if (log_mod)
159 log_mod->log_level[mod_id] = log_level;
160 else
161 bfa_log_info[mod_id].level = log_level;
162
163 return BFA_STATUS_OK;
164}
165
166/**
167 * BFA log set log level for all modules
168 *
169 * @param[in] log_mod - log module info
170 * @param[in] log_level - log severity level
171 *
172 * It return BFA_STATUS_OK on success, or > 0 on failure
173 */
174bfa_status_t
175bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
176 enum bfa_log_severity log_level)
177{
178 int mod_id = BFA_LOG_UNUSED_ID + 1;
179
180 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
181 return BFA_STATUS_EINVAL;
182
183 if (log_mod) {
184 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
185 log_mod->log_level[mod_id] = log_level;
186 } else {
187 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
188 bfa_log_info[mod_id].level = log_level;
189 }
190
191 return BFA_STATUS_OK;
192}
193
194/**
195 * BFA log set log level for all aen sub-modules
196 *
197 * @param[in] log_mod - log module info
198 * @param[in] log_level - log severity level
199 *
200 * It return BFA_STATUS_OK on success, or > 0 on failure
201 */
202bfa_status_t
203bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
204 enum bfa_log_severity log_level)
205{
206 int mod_id = BFA_LOG_AEN_MIN + 1;
207
208 if (log_mod) {
209 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
210 log_mod->log_level[mod_id] = log_level;
211 } else {
212 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
213 bfa_log_info[mod_id].level = log_level;
214 }
215
216 return BFA_STATUS_OK;
217}
218
219/**
220 * BFA log get log level for a module
221 *
222 * @param[in] log_mod - log module info
223 * @param[in] mod_id - module id
224 *
225 * It returns log level or -1 on error
226 */
227enum bfa_log_severity
228bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
229{
230 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
231 return BFA_LOG_INVALID;
232
233 if (log_mod)
234 return (log_mod->log_level[mod_id]);
235 else
236 return (bfa_log_info[mod_id].level);
237}
238
239enum bfa_log_severity
240bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id)
241{
242 struct bfa_log_msgdef_s *msg;
243 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
244 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
245
246 if (!bfa_log_initialized)
247 return BFA_LOG_INVALID;
248
249 if (mod > BFA_LOG_MODULE_ID_MAX)
250 return BFA_LOG_INVALID;
251
252 if (idx >= bfa_log_info[mod].total_count) {
253 bfa_log_info[mod].cbfn(log_mod, msg_id,
254 "%s%s log: inconsistent idx %u vs. total count %u\n",
255 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
256 bfa_log_info[mod].total_count);
257 return BFA_LOG_INVALID;
258 }
259
260 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
261 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
262 bfa_log_info[mod].cbfn(log_mod, msg_id,
263 "%s%s log: inconsistent msg id %u array msg id %u\n",
264 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
265 msg_id, BFA_LOG_GET_MSG_ID(msg));
266 return BFA_LOG_INVALID;
267 }
268
269 return BFA_LOG_GET_SEVERITY(msg);
270}
271
272/**
273 * BFA log message handling
274 *
275 * BFA log message handling finds the message based on message id and prints
276 * out the message based on its format and arguments. It also does prefix
277 * the severity etc.
278 *
279 * @param[in] log_mod - log module info
280 * @param[in] msg_id - message id
281 * @param[in] ... - message arguments
282 *
283 * It return 0 on success, or -1 on errors
284 */
285int
286bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...)
287{
288 va_list ap;
289 char buf[256];
290 struct bfa_log_msgdef_s *msg;
291 int log_level;
292 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
293 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
294
295 if (!bfa_log_initialized)
296 return -1;
297
298 if (mod > BFA_LOG_MODULE_ID_MAX)
299 return -1;
300
301 if (idx >= bfa_log_info[mod].total_count) {
302 bfa_log_info[mod].
303 cbfn
304 (log_mod, msg_id,
305 "%s%s log: inconsistent idx %u vs. total count %u\n",
306 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
307 bfa_log_info[mod].total_count);
308 return -1;
309 }
310
311 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
312 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
313 bfa_log_info[mod].
314 cbfn
315 (log_mod, msg_id,
316 "%s%s log: inconsistent msg id %u array msg id %u\n",
317 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
318 msg_id, BFA_LOG_GET_MSG_ID(msg));
319 return -1;
320 }
321
322 log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level;
323 if ((BFA_LOG_GET_SEVERITY(msg) > log_level) &&
324 (msg->attributes != BFA_LOG_ATTR_NONE))
325 return 0;
326
327 va_start(ap, msg_id);
328 bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap);
329 va_end(ap);
330
331 if (log_mod)
332 log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n",
333 BFA_LOG_CAT_NAME, log_mod->instance_info,
334 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
335 (msg->attributes & BFA_LOG_ATTR_AUDIT)
336 ? " (audit) " : "", msg->msg_value, buf);
337 else
338 bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n",
339 BFA_LOG_CAT_NAME,
340 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
341 (msg->attributes & BFA_LOG_ATTR_AUDIT) ?
342 " (audit) " : "", msg->msg_value, buf);
343
344 return 0;
345}
346
diff --git a/drivers/scsi/bfa/bfa_log_module.c b/drivers/scsi/bfa/bfa_log_module.c
new file mode 100644
index 000000000000..5c154d341d69
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_log_module.c
@@ -0,0 +1,451 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_log.h>
19#include <aen/bfa_aen_adapter.h>
20#include <aen/bfa_aen_audit.h>
21#include <aen/bfa_aen_ethport.h>
22#include <aen/bfa_aen_ioc.h>
23#include <aen/bfa_aen_itnim.h>
24#include <aen/bfa_aen_lport.h>
25#include <aen/bfa_aen_port.h>
26#include <aen/bfa_aen_rport.h>
27#include <log/bfa_log_fcs.h>
28#include <log/bfa_log_hal.h>
29#include <log/bfa_log_linux.h>
30#include <log/bfa_log_wdrv.h>
31
32struct bfa_log_msgdef_s bfa_log_msg_array[] = {
33
34
35/* messages define for BFA_AEN_CAT_ADAPTER Module */
36{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
37 "BFA_AEN_ADAPTER_ADD",
38 "New adapter found: SN = %s, base port WWN = %s.",
39 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
40
41{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
42 BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE",
43 "Adapter removed: SN = %s.",
44 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
45
46
47
48
49/* messages define for BFA_AEN_CAT_AUDIT Module */
50{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
51 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE",
52 "Authentication enabled for base port: WWN = %s.",
53 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
54
55{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
56 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE",
57 "Authentication disabled for base port: WWN = %s.",
58 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
59
60
61
62
63/* messages define for BFA_AEN_CAT_ETHPORT Module */
64{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
65 "BFA_AEN_ETHPORT_LINKUP",
66 "Base port ethernet linkup: mac = %s.",
67 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
68
69{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
70 "BFA_AEN_ETHPORT_LINKDOWN",
71 "Base port ethernet linkdown: mac = %s.",
72 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
73
74{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
75 "BFA_AEN_ETHPORT_ENABLE",
76 "Base port ethernet interface enabled: mac = %s.",
77 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
78
79{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
80 "BFA_AEN_ETHPORT_DISABLE",
81 "Base port ethernet interface disabled: mac = %s.",
82 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
83
84
85
86
87/* messages define for BFA_AEN_CAT_IOC Module */
88{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
89 "BFA_AEN_IOC_HBGOOD",
90 "Heart Beat of IOC %d is good.",
91 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
92
93{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL,
94 "BFA_AEN_IOC_HBFAIL",
95 "Heart Beat of IOC %d has failed.",
96 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
97
98{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
99 "BFA_AEN_IOC_ENABLE",
100 "IOC %d is enabled.",
101 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
102
103{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
104 "BFA_AEN_IOC_DISABLE",
105 "IOC %d is disabled.",
106 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
107
108{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
109 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH",
110 "Running firmware version is incompatible with the driver version.",
111 (0), 0},
112
113
114
115
116/* messages define for BFA_AEN_CAT_ITNIM Module */
117{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
118 "BFA_AEN_ITNIM_ONLINE",
119 "Target (WWN = %s) is online for initiator (WWN = %s).",
120 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
121
122{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
123 "BFA_AEN_ITNIM_OFFLINE",
124 "Target (WWN = %s) offlined by initiator (WWN = %s).",
125 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
126
127{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
128 BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT",
129 "Target (WWN = %s) connectivity lost for initiator (WWN = %s).",
130 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
131
132
133
134
135/* messages define for BFA_AEN_CAT_LPORT Module */
136{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
137 "BFA_AEN_LPORT_NEW",
138 "New logical port created: WWN = %s, Role = %s.",
139 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
140
141{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
142 "BFA_AEN_LPORT_DELETE",
143 "Logical port deleted: WWN = %s, Role = %s.",
144 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
145
146{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
147 "BFA_AEN_LPORT_ONLINE",
148 "Logical port online: WWN = %s, Role = %s.",
149 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
150
151{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
152 "BFA_AEN_LPORT_OFFLINE",
153 "Logical port taken offline: WWN = %s, Role = %s.",
154 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
155
156{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
157 BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT",
158 "Logical port lost fabric connectivity: WWN = %s, Role = %s.",
159 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
160
161{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
162 "BFA_AEN_LPORT_NEW_PROP",
163 "New virtual port created using proprietary interface: WWN = %s, Role = %s.",
164 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
165
166{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
167 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP",
168 "Virtual port deleted using proprietary interface: WWN = %s, Role = %s.",
169 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
170
171{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
172 BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD",
173 "New virtual port created using standard interface: WWN = %s, Role = %s.",
174 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
175
176{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
177 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD",
178 "Virtual port deleted using standard interface: WWN = %s, Role = %s.",
179 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
180
181{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
182 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN",
183 "Virtual port login failed. Duplicate WWN = %s reported by fabric.",
184 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
185
186{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
187 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX",
188 "Virtual port (WWN = %s) login failed. Max NPIV ports already exist in"
189 " fabric/fport.",
190 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
191
192{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
193 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN",
194 "Virtual port (WWN = %s) login failed.",
195 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
196
197
198
199
200/* messages define for BFA_AEN_CAT_PORT Module */
201{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE",
202 "Base port online: WWN = %s.",
203 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
204
205{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
206 "BFA_AEN_PORT_OFFLINE",
207 "Base port offline: WWN = %s.",
208 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
209
210{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
211 "BFA_AEN_PORT_RLIR",
212 "RLIR event not supported.",
213 (0), 0},
214
215{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
216 "BFA_AEN_PORT_SFP_INSERT",
217 "New SFP found: WWN/MAC = %s.",
218 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
219
220{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
221 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE",
222 "SFP removed: WWN/MAC = %s.",
223 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
224
225{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
226 "BFA_AEN_PORT_SFP_POM",
227 "SFP POM level to %s: WWN/MAC = %s.",
228 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
229
230{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
231 "BFA_AEN_PORT_ENABLE",
232 "Base port enabled: WWN = %s.",
233 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
234
235{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
236 "BFA_AEN_PORT_DISABLE",
237 "Base port disabled: WWN = %s.",
238 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
239
240{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
241 "BFA_AEN_PORT_AUTH_ON",
242 "Authentication successful for base port: WWN = %s.",
243 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
244
245{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
246 "BFA_AEN_PORT_AUTH_OFF",
247 "Authentication unsuccessful for base port: WWN = %s.",
248 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
249
250{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
251 "BFA_AEN_PORT_DISCONNECT",
252 "Base port (WWN = %s) lost fabric connectivity.",
253 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
254
255{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
256 "BFA_AEN_PORT_QOS_NEG",
257 "QOS negotiation failed for base port: WWN = %s.",
258 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
259
260{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
261 BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE",
262 "Base port WWN = %s, Fabric WWN = %s.",
263 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
264
265{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
266 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR",
267 "SFP access error: WWN/MAC = %s.",
268 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
269
270{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
271 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT",
272 "Unsupported SFP found: WWN/MAC = %s.",
273 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
274
275
276
277
278/* messages define for BFA_AEN_CAT_RPORT Module */
279{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
280 "BFA_AEN_RPORT_ONLINE",
281 "Remote port (WWN = %s) online for logical port (WWN = %s).",
282 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
283
284{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
285 "BFA_AEN_RPORT_OFFLINE",
286 "Remote port (WWN = %s) offlined by logical port (WWN = %s).",
287 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
288
289{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
290 BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT",
291 "Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).",
292 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
293
294{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
295 "BFA_AEN_RPORT_QOS_PRIO",
296 "QOS priority changed to %s: RPWWN = %s and LPWWN = %s.",
297 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
298 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
299
300{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
301 "BFA_AEN_RPORT_QOS_FLOWID",
302 "QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.",
303 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
304 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
305
306
307
308
309/* messages define for FCS Module */
310{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
311 BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH",
312 "No switched fabric presence is detected.",
313 (0), 0},
314
315{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
316 BFA_LOG_INFO, "FCS_FABRIC_ISOLATED",
317 "Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and"
318 " switch port VF_ID: %04x.",
319 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) |
320 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
321
322
323
324
325/* messages define for HAL Module */
326{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
327 "HAL_ASSERT",
328 "Assertion failure: %s:%d: %s",
329 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
330 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
331
332{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
333 BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE",
334 "Firmware heartbeat failure at %d",
335 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
336
337{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
338 BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID",
339 "Driver configuration %s value %d is invalid. Value should be within"
340 " %d and %d.",
341 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
342 (BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4},
343
344{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
345 "HAL_SM_ASSERT",
346 "SM Assertion failure: %s:%d: event = %d",
347 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
348 (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
349
350
351
352
353/* messages define for LINUX Module */
354{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
355 BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED",
356 "bfa device at %s claimed.",
357 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
358
359{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
360 BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED",
361 "Hash table initialization failure for the port %s.",
362 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
363
364{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
365 BFA_LOG_INFO, "LINUX_SYSFS_FAILED",
366 "sysfs file creation failure for the port %s.",
367 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
368
369{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
370 BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED",
371 "Memory allocation failed: %s. ",
372 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
373
374{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED,
375 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
376 "LINUX_DRIVER_REGISTRATION_FAILED",
377 "%s. ",
378 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
379
380{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
381 "LINUX_ITNIM_FREE",
382 "scsi%d: FCID: %s WWPN: %s",
383 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
384 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
385
386{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
387 BFA_LOG_INFO, "LINUX_ITNIM_ONLINE",
388 "Target: %d:0:%d FCID: %s WWPN: %s",
389 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
390 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
391
392{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
393 BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE",
394 "Target: %d:0:%d FCID: %s WWPN: %s",
395 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
396 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
397
398{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
399 BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE",
400 "Free scsi%d",
401 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
402
403{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
404 "LINUX_SCSI_ABORT",
405 "scsi%d: abort cmnd %p, iotag %x",
406 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
407 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
408
409{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
410 BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP",
411 "scsi%d: complete abort 0x%p, iotag 0x%x",
412 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
413 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
414
415
416
417
418/* messages define for WDRV Module */
419{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
420 BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR",
421 "IOC initialization has failed.",
422 (0), 0},
423
424{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
425 BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR",
426 "IOC internal error. ",
427 (0), 0},
428
429{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
430 BFA_LOG_INFO, "WDRV_IOC_START_ERROR",
431 "IOC could not be started. ",
432 (0), 0},
433
434{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
435 BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR",
436 "IOC could not be stopped. ",
437 (0), 0},
438
439{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
440 BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES",
441 "Insufficient memory. ",
442 (0), 0},
443
444{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
445 BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR",
446 "Unable to map the IOC onto the system address space. ",
447 (0), 0},
448
449
450{0, 0, 0, "", "", 0, 0},
451};
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
new file mode 100644
index 000000000000..9844b45412b6
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -0,0 +1,782 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_lps.h>
20#include <cs/bfa_debug.h>
21
22BFA_TRC_FILE(HAL, LPS);
23BFA_MODULE(lps);
24
25#define BFA_LPS_MIN_LPORTS (1)
26#define BFA_LPS_MAX_LPORTS (256)
27
28/**
29 * forward declarations
30 */
31static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
32 u32 *dm_len);
33static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
34 struct bfa_iocfc_cfg_s *cfg,
35 struct bfa_meminfo_s *meminfo,
36 struct bfa_pcidev_s *pcidev);
37static void bfa_lps_initdone(struct bfa_s *bfa);
38static void bfa_lps_detach(struct bfa_s *bfa);
39static void bfa_lps_start(struct bfa_s *bfa);
40static void bfa_lps_stop(struct bfa_s *bfa);
41static void bfa_lps_iocdisable(struct bfa_s *bfa);
42static void bfa_lps_login_rsp(struct bfa_s *bfa,
43 struct bfi_lps_login_rsp_s *rsp);
44static void bfa_lps_logout_rsp(struct bfa_s *bfa,
45 struct bfi_lps_logout_rsp_s *rsp);
46static void bfa_lps_reqq_resume(void *lps_arg);
47static void bfa_lps_free(struct bfa_lps_s *lps);
48static void bfa_lps_send_login(struct bfa_lps_s *lps);
49static void bfa_lps_send_logout(struct bfa_lps_s *lps);
50static void bfa_lps_login_comp(struct bfa_lps_s *lps);
51static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
52
53
54/**
55 * lps_pvt BFA LPS private functions
56 */
57
58enum bfa_lps_event {
59 BFA_LPS_SM_LOGIN = 1, /* login request from user */
60 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
61 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
62 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
63 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
64 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
65};
66
67static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
68static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
69static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps,
70 enum bfa_lps_event event);
71static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
72static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
73static void bfa_lps_sm_logowait(struct bfa_lps_s *lps,
74 enum bfa_lps_event event);
75
76/**
77 * Init state -- no login
78 */
79static void
80bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
81{
82 bfa_trc(lps->bfa, lps->lp_tag);
83 bfa_trc(lps->bfa, event);
84
85 switch (event) {
86 case BFA_LPS_SM_LOGIN:
87 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
88 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
89 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
90 } else {
91 bfa_sm_set_state(lps, bfa_lps_sm_login);
92 bfa_lps_send_login(lps);
93 }
94 break;
95
96 case BFA_LPS_SM_LOGOUT:
97 bfa_lps_logout_comp(lps);
98 break;
99
100 case BFA_LPS_SM_DELETE:
101 bfa_lps_free(lps);
102 break;
103
104 case BFA_LPS_SM_OFFLINE:
105 break;
106
107 case BFA_LPS_SM_FWRSP:
108 /* Could happen when fabric detects loopback and discards
109 * the lps request. Fw will eventually sent out the timeout
110 * Just ignore
111 */
112 break;
113
114 default:
115 bfa_assert(0);
116 }
117}
118
119/**
120 * login is in progress -- awaiting response from firmware
121 */
122static void
123bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
124{
125 bfa_trc(lps->bfa, lps->lp_tag);
126 bfa_trc(lps->bfa, event);
127
128 switch (event) {
129 case BFA_LPS_SM_FWRSP:
130 if (lps->status == BFA_STATUS_OK)
131 bfa_sm_set_state(lps, bfa_lps_sm_online);
132 else
133 bfa_sm_set_state(lps, bfa_lps_sm_init);
134 bfa_lps_login_comp(lps);
135 break;
136
137 case BFA_LPS_SM_OFFLINE:
138 bfa_sm_set_state(lps, bfa_lps_sm_init);
139 break;
140
141 default:
142 bfa_assert(0);
143 }
144}
145
146/**
147 * login pending - awaiting space in request queue
148 */
149static void
150bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
151{
152 bfa_trc(lps->bfa, lps->lp_tag);
153 bfa_trc(lps->bfa, event);
154
155 switch (event) {
156 case BFA_LPS_SM_RESUME:
157 bfa_sm_set_state(lps, bfa_lps_sm_login);
158 break;
159
160 case BFA_LPS_SM_OFFLINE:
161 bfa_sm_set_state(lps, bfa_lps_sm_init);
162 bfa_reqq_wcancel(&lps->wqe);
163 break;
164
165 default:
166 bfa_assert(0);
167 }
168}
169
170/**
171 * login complete
172 */
173static void
174bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
175{
176 bfa_trc(lps->bfa, lps->lp_tag);
177 bfa_trc(lps->bfa, event);
178
179 switch (event) {
180 case BFA_LPS_SM_LOGOUT:
181 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
182 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
183 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
184 } else {
185 bfa_sm_set_state(lps, bfa_lps_sm_logout);
186 bfa_lps_send_logout(lps);
187 }
188 break;
189
190 case BFA_LPS_SM_OFFLINE:
191 case BFA_LPS_SM_DELETE:
192 bfa_sm_set_state(lps, bfa_lps_sm_init);
193 break;
194
195 default:
196 bfa_assert(0);
197 }
198}
199
200/**
201 * logout in progress - awaiting firmware response
202 */
203static void
204bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
205{
206 bfa_trc(lps->bfa, lps->lp_tag);
207 bfa_trc(lps->bfa, event);
208
209 switch (event) {
210 case BFA_LPS_SM_FWRSP:
211 bfa_sm_set_state(lps, bfa_lps_sm_init);
212 bfa_lps_logout_comp(lps);
213 break;
214
215 case BFA_LPS_SM_OFFLINE:
216 bfa_sm_set_state(lps, bfa_lps_sm_init);
217 break;
218
219 default:
220 bfa_assert(0);
221 }
222}
223
224/**
225 * logout pending -- awaiting space in request queue
226 */
227static void
228bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
229{
230 bfa_trc(lps->bfa, lps->lp_tag);
231 bfa_trc(lps->bfa, event);
232
233 switch (event) {
234 case BFA_LPS_SM_RESUME:
235 bfa_sm_set_state(lps, bfa_lps_sm_logout);
236 bfa_lps_send_logout(lps);
237 break;
238
239 case BFA_LPS_SM_OFFLINE:
240 bfa_sm_set_state(lps, bfa_lps_sm_init);
241 bfa_reqq_wcancel(&lps->wqe);
242 break;
243
244 default:
245 bfa_assert(0);
246 }
247}
248
249
250
251/**
252 * lps_pvt BFA LPS private functions
253 */
254
255/**
256 * return memory requirement
257 */
258static void
259bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
260{
261 if (cfg->drvcfg.min_cfg)
262 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
263 else
264 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
265}
266
267/**
268 * bfa module attach at initialization time
269 */
270static void
271bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
272 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
273{
274 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
275 struct bfa_lps_s *lps;
276 int i;
277
278 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
279 mod->num_lps = BFA_LPS_MAX_LPORTS;
280 if (cfg->drvcfg.min_cfg)
281 mod->num_lps = BFA_LPS_MIN_LPORTS;
282 else
283 mod->num_lps = BFA_LPS_MAX_LPORTS;
284 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
285
286 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
287
288 INIT_LIST_HEAD(&mod->lps_free_q);
289 INIT_LIST_HEAD(&mod->lps_active_q);
290
291 for (i = 0; i < mod->num_lps; i++, lps++) {
292 lps->bfa = bfa;
293 lps->lp_tag = (u8) i;
294 lps->reqq = BFA_REQQ_LPS;
295 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
296 list_add_tail(&lps->qe, &mod->lps_free_q);
297 }
298}
299
300static void
301bfa_lps_initdone(struct bfa_s *bfa)
302{
303}
304
305static void
306bfa_lps_detach(struct bfa_s *bfa)
307{
308}
309
310static void
311bfa_lps_start(struct bfa_s *bfa)
312{
313}
314
315static void
316bfa_lps_stop(struct bfa_s *bfa)
317{
318}
319
320/**
321 * IOC in disabled state -- consider all lps offline
322 */
323static void
324bfa_lps_iocdisable(struct bfa_s *bfa)
325{
326 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
327 struct bfa_lps_s *lps;
328 struct list_head *qe, *qen;
329
330 list_for_each_safe(qe, qen, &mod->lps_active_q) {
331 lps = (struct bfa_lps_s *) qe;
332 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
333 }
334}
335
336/**
337 * Firmware login response
338 */
339static void
340bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
341{
342 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
343 struct bfa_lps_s *lps;
344
345 bfa_assert(rsp->lp_tag < mod->num_lps);
346 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
347
348 lps->status = rsp->status;
349 switch (rsp->status) {
350 case BFA_STATUS_OK:
351 lps->fport = rsp->f_port;
352 lps->npiv_en = rsp->npiv_en;
353 lps->lp_pid = rsp->lp_pid;
354 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
355 lps->pr_pwwn = rsp->port_name;
356 lps->pr_nwwn = rsp->node_name;
357 lps->auth_req = rsp->auth_req;
358 lps->lp_mac = rsp->lp_mac;
359 lps->brcd_switch = rsp->brcd_switch;
360 lps->fcf_mac = rsp->fcf_mac;
361
362 break;
363
364 case BFA_STATUS_FABRIC_RJT:
365 lps->lsrjt_rsn = rsp->lsrjt_rsn;
366 lps->lsrjt_expl = rsp->lsrjt_expl;
367
368 break;
369
370 case BFA_STATUS_EPROTOCOL:
371 lps->ext_status = rsp->ext_status;
372
373 break;
374
375 default:
376 /* Nothing to do with other status */
377 break;
378 }
379
380 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
381}
382
383/**
384 * Firmware logout response
385 */
386static void
387bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
388{
389 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
390 struct bfa_lps_s *lps;
391
392 bfa_assert(rsp->lp_tag < mod->num_lps);
393 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
394
395 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
396}
397
398/**
399 * Space is available in request queue, resume queueing request to firmware.
400 */
401static void
402bfa_lps_reqq_resume(void *lps_arg)
403{
404 struct bfa_lps_s *lps = lps_arg;
405
406 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
407}
408
409/**
410 * lps is freed -- triggered by vport delete
411 */
412static void
413bfa_lps_free(struct bfa_lps_s *lps)
414{
415 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
416
417 list_del(&lps->qe);
418 list_add_tail(&lps->qe, &mod->lps_free_q);
419}
420
421/**
422 * send login request to firmware
423 */
424static void
425bfa_lps_send_login(struct bfa_lps_s *lps)
426{
427 struct bfi_lps_login_req_s *m;
428
429 m = bfa_reqq_next(lps->bfa, lps->reqq);
430 bfa_assert(m);
431
432 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
433 bfa_lpuid(lps->bfa));
434
435 m->lp_tag = lps->lp_tag;
436 m->alpa = lps->alpa;
437 m->pdu_size = bfa_os_htons(lps->pdusz);
438 m->pwwn = lps->pwwn;
439 m->nwwn = lps->nwwn;
440 m->fdisc = lps->fdisc;
441 m->auth_en = lps->auth_en;
442
443 bfa_reqq_produce(lps->bfa, lps->reqq);
444}
445
446/**
447 * send logout request to firmware
448 */
449static void
450bfa_lps_send_logout(struct bfa_lps_s *lps)
451{
452 struct bfi_lps_logout_req_s *m;
453
454 m = bfa_reqq_next(lps->bfa, lps->reqq);
455 bfa_assert(m);
456
457 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
458 bfa_lpuid(lps->bfa));
459
460 m->lp_tag = lps->lp_tag;
461 m->port_name = lps->pwwn;
462 bfa_reqq_produce(lps->bfa, lps->reqq);
463}
464
465/**
466 * Indirect login completion handler for non-fcs
467 */
468static void
469bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
470{
471 struct bfa_lps_s *lps = arg;
472
473 if (!complete)
474 return;
475
476 if (lps->fdisc)
477 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
478 else
479 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
480}
481
482/**
483 * Login completion handler -- direct call for fcs, queue for others
484 */
485static void
486bfa_lps_login_comp(struct bfa_lps_s *lps)
487{
488 if (!lps->bfa->fcs) {
489 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
490 bfa_lps_login_comp_cb, lps);
491 return;
492 }
493
494 if (lps->fdisc)
495 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
496 else
497 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
498}
499
500/**
501 * Indirect logout completion handler for non-fcs
502 */
503static void
504bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
505{
506 struct bfa_lps_s *lps = arg;
507
508 if (!complete)
509 return;
510
511 if (lps->fdisc)
512 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
513 else
514 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
515}
516
517/**
518 * Logout completion handler -- direct call for fcs, queue for others
519 */
520static void
521bfa_lps_logout_comp(struct bfa_lps_s *lps)
522{
523 if (!lps->bfa->fcs) {
524 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
525 bfa_lps_logout_comp_cb, lps);
526 return;
527 }
528 if (lps->fdisc)
529 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
530 else
531 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
532}
533
534
535
536/**
537 * lps_public BFA LPS public functions
538 */
539
540/**
541 * Allocate a lport srvice tag.
542 */
543struct bfa_lps_s *
544bfa_lps_alloc(struct bfa_s *bfa)
545{
546 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
547 struct bfa_lps_s *lps = NULL;
548
549 bfa_q_deq(&mod->lps_free_q, &lps);
550
551 if (lps == NULL)
552 return NULL;
553
554 list_add_tail(&lps->qe, &mod->lps_active_q);
555
556 bfa_sm_set_state(lps, bfa_lps_sm_init);
557 return lps;
558}
559
560/**
561 * Free lport service tag. This can be called anytime after an alloc.
562 * No need to wait for any pending login/logout completions.
563 */
564void
565bfa_lps_delete(struct bfa_lps_s *lps)
566{
567 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
568}
569
570/**
571 * Initiate a lport login.
572 */
573void
574bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
575 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
576{
577 lps->uarg = uarg;
578 lps->alpa = alpa;
579 lps->pdusz = pdusz;
580 lps->pwwn = pwwn;
581 lps->nwwn = nwwn;
582 lps->fdisc = BFA_FALSE;
583 lps->auth_en = auth_en;
584 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
585}
586
587/**
588 * Initiate a lport fdisc login.
589 */
590void
591bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
592 wwn_t nwwn)
593{
594 lps->uarg = uarg;
595 lps->alpa = 0;
596 lps->pdusz = pdusz;
597 lps->pwwn = pwwn;
598 lps->nwwn = nwwn;
599 lps->fdisc = BFA_TRUE;
600 lps->auth_en = BFA_FALSE;
601 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
602}
603
604/**
605 * Initiate a lport logout (flogi).
606 */
607void
608bfa_lps_flogo(struct bfa_lps_s *lps)
609{
610 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
611}
612
613/**
614 * Initiate a lport FDSIC logout.
615 */
616void
617bfa_lps_fdisclogo(struct bfa_lps_s *lps)
618{
619 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
620}
621
622/**
623 * Discard a pending login request -- should be called only for
624 * link down handling.
625 */
626void
627bfa_lps_discard(struct bfa_lps_s *lps)
628{
629 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
630}
631
632/**
633 * Return lport services tag
634 */
635u8
636bfa_lps_get_tag(struct bfa_lps_s *lps)
637{
638 return lps->lp_tag;
639}
640
641/**
642 * Return lport services tag given the pid
643 */
644u8
645bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
646{
647 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
648 struct bfa_lps_s *lps;
649 int i;
650
651 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
652 if (lps->lp_pid == pid)
653 return lps->lp_tag;
654 }
655
656 /* Return base port tag anyway */
657 return 0;
658}
659
660/**
661 * return if fabric login indicates support for NPIV
662 */
663bfa_boolean_t
664bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
665{
666 return lps->npiv_en;
667}
668
669/**
670 * Return TRUE if attached to F-Port, else return FALSE
671 */
672bfa_boolean_t
673bfa_lps_is_fport(struct bfa_lps_s *lps)
674{
675 return lps->fport;
676}
677
678/**
679 * Return TRUE if attached to a Brocade Fabric
680 */
681bfa_boolean_t
682bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
683{
684 return lps->brcd_switch;
685}
686/**
687 * return TRUE if authentication is required
688 */
689bfa_boolean_t
690bfa_lps_is_authreq(struct bfa_lps_s *lps)
691{
692 return lps->auth_req;
693}
694
695bfa_eproto_status_t
696bfa_lps_get_extstatus(struct bfa_lps_s *lps)
697{
698 return lps->ext_status;
699}
700
701/**
702 * return port id assigned to the lport
703 */
704u32
705bfa_lps_get_pid(struct bfa_lps_s *lps)
706{
707 return lps->lp_pid;
708}
709
710/**
711 * Return bb_credit assigned in FLOGI response
712 */
713u16
714bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
715{
716 return lps->pr_bbcred;
717}
718
719/**
720 * Return peer port name
721 */
722wwn_t
723bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
724{
725 return lps->pr_pwwn;
726}
727
728/**
729 * Return peer node name
730 */
731wwn_t
732bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
733{
734 return lps->pr_nwwn;
735}
736
737/**
738 * return reason code if login request is rejected
739 */
740u8
741bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
742{
743 return lps->lsrjt_rsn;
744}
745
746/**
747 * return explanation code if login request is rejected
748 */
749u8
750bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
751{
752 return lps->lsrjt_expl;
753}
754
755
756/**
757 * LPS firmware message class handler.
758 */
759void
760bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
761{
762 union bfi_lps_i2h_msg_u msg;
763
764 bfa_trc(bfa, m->mhdr.msg_id);
765 msg.msg = m;
766
767 switch (m->mhdr.msg_id) {
768 case BFI_LPS_H2I_LOGIN_RSP:
769 bfa_lps_login_rsp(bfa, msg.login_rsp);
770 break;
771
772 case BFI_LPS_H2I_LOGOUT_RSP:
773 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
774 break;
775
776 default:
777 bfa_trc(bfa, m->mhdr.msg_id);
778 bfa_assert(0);
779 }
780}
781
782
diff --git a/drivers/scsi/bfa/bfa_lps_priv.h b/drivers/scsi/bfa/bfa_lps_priv.h
new file mode 100644
index 000000000000..d16c6ce995df
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_lps_priv.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_LPS_PRIV_H__
19#define __BFA_LPS_PRIV_H__
20
21#include <bfa_svc.h>
22
23struct bfa_lps_mod_s {
24 struct list_head lps_free_q;
25 struct list_head lps_active_q;
26 struct bfa_lps_s *lps_arr;
27 int num_lps;
28};
29
30#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
31#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
32
33/*
34 * external functions
35 */
36void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
37
38#endif /* __BFA_LPS_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c
new file mode 100644
index 000000000000..32eda8e1ec65
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_module.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <defs/bfa_defs_pci.h>
19#include <cs/bfa_debug.h>
20#include <bfa_iocfc.h>
21
22/**
23 * BFA module list terminated by NULL
24 */
25struct bfa_module_s *hal_mods[] = {
26 &hal_mod_sgpg,
27 &hal_mod_pport,
28 &hal_mod_fcxp,
29 &hal_mod_lps,
30 &hal_mod_uf,
31 &hal_mod_rport,
32 &hal_mod_fcpim,
33#ifdef BFA_CFG_PBIND
34 &hal_mod_pbind,
35#endif
36 NULL
37};
38
39/**
40 * Message handlers for various modules.
41 */
42bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
43 bfa_isr_unhandled, /* NONE */
44 bfa_isr_unhandled, /* BFI_MC_IOC */
45 bfa_isr_unhandled, /* BFI_MC_DIAG */
46 bfa_isr_unhandled, /* BFI_MC_FLASH */
47 bfa_isr_unhandled, /* BFI_MC_CEE */
48 bfa_pport_isr, /* BFI_MC_PORT */
49 bfa_isr_unhandled, /* BFI_MC_IOCFC */
50 bfa_isr_unhandled, /* BFI_MC_LL */
51 bfa_uf_isr, /* BFI_MC_UF */
52 bfa_fcxp_isr, /* BFI_MC_FCXP */
53 bfa_lps_isr, /* BFI_MC_LPS */
54 bfa_rport_isr, /* BFI_MC_RPORT */
55 bfa_itnim_isr, /* BFI_MC_ITNIM */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
58 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
59 bfa_ioim_isr, /* BFI_MC_IOIM */
60 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
61 bfa_tskim_isr, /* BFI_MC_TSKIM */
62 bfa_isr_unhandled, /* BFI_MC_SBOOT */
63 bfa_isr_unhandled, /* BFI_MC_IPFC */
64 bfa_isr_unhandled, /* BFI_MC_PORT */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74 bfa_isr_unhandled, /* --------- */
75};
76
77/**
78 * Message handlers for mailbox command classes
79 */
80bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
81 NULL,
82 NULL, /* BFI_MC_IOC */
83 NULL, /* BFI_MC_DIAG */
84 NULL, /* BFI_MC_FLASH */
85 NULL, /* BFI_MC_CEE */
86 NULL, /* BFI_MC_PORT */
87 bfa_iocfc_isr, /* BFI_MC_IOCFC */
88 NULL,
89};
90
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
new file mode 100644
index 000000000000..96f70534593c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_modules_priv.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_MODULES_PRIV_H__
19#define __BFA_MODULES_PRIV_H__
20
21#include "bfa_uf_priv.h"
22#include "bfa_port_priv.h"
23#include "bfa_rport_priv.h"
24#include "bfa_fcxp_priv.h"
25#include "bfa_lps_priv.h"
26#include "bfa_fcpim_priv.h"
27#include <cee/bfa_cee.h>
28#include <port/bfa_port.h>
29
30
31struct bfa_modules_s {
32 struct bfa_pport_s pport; /* physical port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_cee_s cee; /* CEE Module */
40 struct bfa_port_s port; /* Physical port module */
41};
42
43#endif /* __BFA_MODULES_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
new file mode 100644
index 000000000000..10a89f75fa94
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -0,0 +1,222 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * Contains declarations all OS Specific files needed for BFA layer
20 */
21
22#ifndef __BFA_OS_INC_H__
23#define __BFA_OS_INC_H__
24
25#ifndef __KERNEL__
26#include <stdint.h>
27#else
28#include <linux/types.h>
29
30#include <linux/version.h>
31#include <linux/pci.h>
32
33#include <linux/dma-mapping.h>
34#define SET_MODULE_VERSION(VER)
35
36#include <linux/idr.h>
37
38#include <linux/interrupt.h>
39#include <linux/cdev.h>
40#include <linux/fs.h>
41#include <linux/delay.h>
42#include <linux/vmalloc.h>
43
44#include <linux/workqueue.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_host.h>
48
49#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_transport_fc.h>
51#include <scsi/scsi_transport.h>
52
53#define BFA_ERR KERN_ERR
54#define BFA_WARNING KERN_WARNING
55#define BFA_NOTICE KERN_NOTICE
56#define BFA_INFO KERN_INFO
57#define BFA_DEBUG KERN_DEBUG
58
59#define LOG_BFAD_INIT 0x00000001
60#define LOG_FCP_IO 0x00000002
61
62#ifdef DEBUG
63#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) \
64 BFA_LOG(bfad, level, mask, fmt, ## arg)
65#define BFA_DEV_TRACE(bfad, level, fmt, arg...) \
66 BFA_DEV_PRINTF(bfad, level, fmt, ## arg)
67#define BFA_TRACE(level, fmt, arg...) \
68 BFA_PRINTF(level, fmt, ## arg)
69#else
70#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...)
71#define BFA_DEV_TRACE(bfad, level, fmt, arg...)
72#define BFA_TRACE(level, fmt, arg...)
73#endif
74
75#define BFA_ASSERT(p) do { \
76 if (!(p)) { \
77 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
78 #p, __FILE__, __LINE__); \
79 BUG(); \
80 } \
81} while (0)
82
83
84#define BFA_LOG(bfad, level, mask, fmt, arg...) \
85do { \
86 if (((mask) & (((struct bfad_s *)(bfad))-> \
87 cfg_data[cfg_log_mask])) || (level[1] <= '3')) \
88 dev_printk(level, &(((struct bfad_s *) \
89 (bfad))->pcidev->dev), fmt, ##arg); \
90} while (0)
91
92#ifndef BFA_DEV_PRINTF
93#define BFA_DEV_PRINTF(bfad, level, fmt, arg...) \
94 dev_printk(level, &(((struct bfad_s *) \
95 (bfad))->pcidev->dev), fmt, ##arg);
96#endif
97
98#define BFA_PRINTF(level, fmt, arg...) \
99 printk(level fmt, ##arg);
100
101int bfa_os_MWB(void *);
102
103#define bfa_os_mmiowb() mmiowb()
104
105#define bfa_swap_3b(_x) \
106 ((((_x) & 0xff) << 16) | \
107 ((_x) & 0x00ff00) | \
108 (((_x) & 0xff0000) >> 16))
109
110#define bfa_swap_8b(_x) \
111 ((((_x) & 0xff00000000000000ull) >> 56) \
112 | (((_x) & 0x00ff000000000000ull) >> 40) \
113 | (((_x) & 0x0000ff0000000000ull) >> 24) \
114 | (((_x) & 0x000000ff00000000ull) >> 8) \
115 | (((_x) & 0x00000000ff000000ull) << 8) \
116 | (((_x) & 0x0000000000ff0000ull) << 24) \
117 | (((_x) & 0x000000000000ff00ull) << 40) \
118 | (((_x) & 0x00000000000000ffull) << 56))
119
120#define bfa_os_swap32(_x) \
121 ((((_x) & 0xff) << 24) | \
122 (((_x) & 0x0000ff00) << 8) | \
123 (((_x) & 0x00ff0000) >> 8) | \
124 (((_x) & 0xff000000) >> 24))
125
126
127#ifndef __BIGENDIAN
128#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
129 (((_x) & 0x00ff) << 8)))
130
131#define bfa_os_htonl(_x) bfa_os_swap32(_x)
132#define bfa_os_htonll(_x) bfa_swap_8b(_x)
133#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
134
135#define bfa_os_wtole(_x) (_x)
136
137#else
138
139#define bfa_os_htons(_x) (_x)
140#define bfa_os_htonl(_x) (_x)
141#define bfa_os_hton3b(_x) (_x)
142#define bfa_os_htonll(_x) (_x)
143#define bfa_os_wtole(_x) bfa_os_swap32(_x)
144
145#endif
146
147#define bfa_os_ntohs(_x) bfa_os_htons(_x)
148#define bfa_os_ntohl(_x) bfa_os_htonl(_x)
149#define bfa_os_ntohll(_x) bfa_os_htonll(_x)
150#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x)
151
152#define bfa_os_u32(__pa64) ((__pa64) >> 32)
153
154#define bfa_os_memset memset
155#define bfa_os_memcpy memcpy
156#define bfa_os_udelay udelay
157#define bfa_os_vsprintf vsprintf
158
159#define bfa_os_assign(__t, __s) __t = __s
160
161#define bfa_os_addr_t char __iomem *
162#define bfa_os_panic()
163
164#define bfa_os_reg_read(_raddr) bfa_os_wtole(readl(_raddr))
165#define bfa_os_reg_write(_raddr, _val) writel(bfa_os_wtole((_val)), (_raddr))
166#define bfa_os_mem_read(_raddr, _off) \
167 bfa_os_ntohl(readl(((_raddr) + (_off))))
168#define bfa_os_mem_write(_raddr, _off, _val) \
169 writel(bfa_os_htonl((_val)), ((_raddr) + (_off)))
170
171#define BFA_TRC_TS(_trcm) \
172 ({ \
173 struct timeval tv; \
174 \
175 do_gettimeofday(&tv); \
176 (tv.tv_sec*1000000+tv.tv_usec); \
177 })
178
179struct bfa_log_mod_s;
180void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
181 const char *fmt, ...);
182#endif
183
184#define boolean_t int
185
186/**
187 * For current time stamp, OS API will fill-in
188 */
189struct bfa_timeval_s {
190 u32 tv_sec; /* seconds */
191 u32 tv_usec; /* microseconds */
192};
193
194void bfa_os_gettimeofday(struct bfa_timeval_s *tv);
195
196static inline void
197wwn2str(char *wwn_str, u64 wwn)
198{
199 union {
200 u64 wwn;
201 u8 byte[8];
202 } w;
203
204 w.wwn = wwn;
205 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
206 w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
207 w.byte[6], w.byte[7]);
208}
209
210static inline void
211fcid2str(char *fcid_str, u32 fcid)
212{
213 union {
214 u32 fcid;
215 u8 byte[4];
216 } f;
217
218 f.fcid = fcid;
219 sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
220}
221
222#endif /* __BFA_OS_INC_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
new file mode 100644
index 000000000000..cab19028361a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -0,0 +1,460 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <defs/bfa_defs_port.h>
19#include <cs/bfa_trc.h>
20#include <cs/bfa_log.h>
21#include <cs/bfa_debug.h>
22#include <port/bfa_port.h>
23#include <bfi/bfi.h>
24#include <bfi/bfi_port.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27
28BFA_TRC_FILE(CNA, PORT);
29
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32
33static void
34bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
35{
36 u32 *dip = (u32 *) stats;
37 u32 t0, t1;
38 int i;
39
40 for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32);
41 i += 2) {
42 t0 = dip[i];
43 t1 = dip[i + 1];
44#ifdef __BIGENDIAN
45 dip[i] = bfa_os_ntohl(t0);
46 dip[i + 1] = bfa_os_ntohl(t1);
47#else
48 dip[i] = bfa_os_ntohl(t1);
49 dip[i + 1] = bfa_os_ntohl(t0);
50#endif
51 }
52
53 /** todo
54 * QoS stats r also swapped as 64bit; that structure also
55 * has to use 64 bit counters
56 */
57}
58
59/**
60 * bfa_port_enable_isr()
61 *
62 *
63 * @param[in] port - Pointer to the port module
64 * status - Return status from the f/w
65 *
66 * @return void
67 */
68static void
69bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
70{
71 bfa_assert(0);
72}
73
74/**
75 * bfa_port_disable_isr()
76 *
77 *
78 * @param[in] port - Pointer to the port module
79 * status - Return status from the f/w
80 *
81 * @return void
82 */
83static void
84bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
85{
86 bfa_assert(0);
87}
88
89/**
90 * bfa_port_get_stats_isr()
91 *
92 *
93 * @param[in] port - Pointer to the Port module
94 * status - Return status from the f/w
95 *
96 * @return void
97 */
98static void
99bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
100{
101 port->stats_status = status;
102 port->stats_busy = BFA_FALSE;
103
104 if (status == BFA_STATUS_OK) {
105 memcpy(port->stats, port->stats_dma.kva,
106 sizeof(union bfa_pport_stats_u));
107 bfa_port_stats_swap(port, port->stats);
108 }
109
110 if (port->stats_cbfn) {
111 port->stats_cbfn(port->stats_cbarg, status);
112 port->stats_cbfn = NULL;
113 }
114}
115
116/**
117 * bfa_port_clear_stats_isr()
118 *
119 *
120 * @param[in] port - Pointer to the Port module
121 * status - Return status from the f/w
122 *
123 * @return void
124 */
125static void
126bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
127{
128 port->stats_status = status;
129 port->stats_busy = BFA_FALSE;
130
131 if (port->stats_cbfn) {
132 port->stats_cbfn(port->stats_cbarg, status);
133 port->stats_cbfn = NULL;
134 }
135}
136
137/**
138 * bfa_port_isr()
139 *
140 *
141 * @param[in] Pointer to the Port module data structure.
142 *
143 * @return void
144 */
145static void
146bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
147{
148 struct bfa_port_s *port = (struct bfa_port_s *)cbarg;
149 union bfi_port_i2h_msg_u *i2hmsg;
150
151 i2hmsg = (union bfi_port_i2h_msg_u *)m;
152 bfa_trc(port, m->mh.msg_id);
153
154 switch (m->mh.msg_id) {
155 case BFI_PORT_I2H_ENABLE_RSP:
156 if (port->endis_pending == BFA_FALSE)
157 break;
158 bfa_port_enable_isr(port, i2hmsg->enable_rsp.status);
159 break;
160
161 case BFI_PORT_I2H_DISABLE_RSP:
162 if (port->endis_pending == BFA_FALSE)
163 break;
164 bfa_port_disable_isr(port, i2hmsg->disable_rsp.status);
165 break;
166
167 case BFI_PORT_I2H_GET_STATS_RSP:
168 /*
169 * Stats busy flag is still set? (may be cmd timed out)
170 */
171 if (port->stats_busy == BFA_FALSE)
172 break;
173 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
174 break;
175
176 case BFI_PORT_I2H_CLEAR_STATS_RSP:
177 if (port->stats_busy == BFA_FALSE)
178 break;
179 bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status);
180 break;
181
182 default:
183 bfa_assert(0);
184 }
185}
186
187/**
188 * bfa_port_meminfo()
189 *
190 *
191 * @param[in] void
192 *
193 * @return Size of DMA region
194 */
195u32
196bfa_port_meminfo(void)
197{
198 return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ);
199}
200
201/**
202 * bfa_port_mem_claim()
203 *
204 *
205 * @param[in] port Port module pointer
206 * dma_kva Kernel Virtual Address of Port DMA Memory
207 * dma_pa Physical Address of Port DMA Memory
208 *
209 * @return void
210 */
211void
212bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
213{
214 port->stats_dma.kva = dma_kva;
215 port->stats_dma.pa = dma_pa;
216}
217
218/**
219 * bfa_port_enable()
220 *
221 * Send the Port enable request to the f/w
222 *
223 * @param[in] Pointer to the Port module data structure.
224 *
225 * @return Status
226 */
227bfa_status_t
228bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
229 void *cbarg)
230{
231 struct bfi_port_generic_req_s *m;
232
233 /** todo Not implemented */
234 bfa_assert(0);
235
236 if (!bfa_ioc_is_operational(port->ioc)) {
237 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
238 return BFA_STATUS_IOC_FAILURE;
239 }
240
241 if (port->endis_pending) {
242 bfa_trc(port, BFA_STATUS_DEVBUSY);
243 return BFA_STATUS_DEVBUSY;
244 }
245
246 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
247
248 port->msgtag++;
249 port->endis_cbfn = cbfn;
250 port->endis_cbarg = cbarg;
251 port->endis_pending = BFA_TRUE;
252
253 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
254 bfa_ioc_portid(port->ioc));
255 bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
256
257 return BFA_STATUS_OK;
258}
259
260/**
261 * bfa_port_disable()
262 *
263 * Send the Port disable request to the f/w
264 *
265 * @param[in] Pointer to the Port module data structure.
266 *
267 * @return Status
268 */
269bfa_status_t
270bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
271 void *cbarg)
272{
273 struct bfi_port_generic_req_s *m;
274
275 /** todo Not implemented */
276 bfa_assert(0);
277
278 if (!bfa_ioc_is_operational(port->ioc)) {
279 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
280 return BFA_STATUS_IOC_FAILURE;
281 }
282
283 if (port->endis_pending) {
284 bfa_trc(port, BFA_STATUS_DEVBUSY);
285 return BFA_STATUS_DEVBUSY;
286 }
287
288 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
289
290 port->msgtag++;
291 port->endis_cbfn = cbfn;
292 port->endis_cbarg = cbarg;
293 port->endis_pending = BFA_TRUE;
294
295 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
296 bfa_ioc_portid(port->ioc));
297 bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
298
299 return BFA_STATUS_OK;
300}
301
302/**
303 * bfa_port_get_stats()
304 *
305 * Send the request to the f/w to fetch Port statistics.
306 *
307 * @param[in] Pointer to the Port module data structure.
308 *
309 * @return Status
310 */
311bfa_status_t
312bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
313 bfa_port_stats_cbfn_t cbfn, void *cbarg)
314{
315 struct bfi_port_get_stats_req_s *m;
316
317 if (!bfa_ioc_is_operational(port->ioc)) {
318 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
319 return BFA_STATUS_IOC_FAILURE;
320 }
321
322 if (port->stats_busy) {
323 bfa_trc(port, BFA_STATUS_DEVBUSY);
324 return BFA_STATUS_DEVBUSY;
325 }
326
327 m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg;
328
329 port->stats = stats;
330 port->stats_cbfn = cbfn;
331 port->stats_cbarg = cbarg;
332 port->stats_busy = BFA_TRUE;
333 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
334
335 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
336 bfa_ioc_portid(port->ioc));
337 bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
338
339 return BFA_STATUS_OK;
340}
341
342/**
343 * bfa_port_clear_stats()
344 *
345 *
346 * @param[in] Pointer to the Port module data structure.
347 *
348 * @return Status
349 */
350bfa_status_t
351bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
352 void *cbarg)
353{
354 struct bfi_port_generic_req_s *m;
355
356 if (!bfa_ioc_is_operational(port->ioc)) {
357 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
358 return BFA_STATUS_IOC_FAILURE;
359 }
360
361 if (port->stats_busy) {
362 bfa_trc(port, BFA_STATUS_DEVBUSY);
363 return BFA_STATUS_DEVBUSY;
364 }
365
366 m = (struct bfi_port_generic_req_s *)port->stats_mb.msg;
367
368 port->stats_cbfn = cbfn;
369 port->stats_cbarg = cbarg;
370 port->stats_busy = BFA_TRUE;
371
372 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
373 bfa_ioc_portid(port->ioc));
374 bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
375
376 return BFA_STATUS_OK;
377}
378
379/**
380 * bfa_port_hbfail()
381 *
382 *
383 * @param[in] Pointer to the Port module data structure.
384 *
385 * @return void
386 */
387void
388bfa_port_hbfail(void *arg)
389{
390 struct bfa_port_s *port = (struct bfa_port_s *)arg;
391
392 /*
393 * Fail any pending get_stats/clear_stats requests
394 */
395 if (port->stats_busy) {
396 if (port->stats_cbfn)
397 port->stats_cbfn(port->dev, BFA_STATUS_FAILED);
398 port->stats_cbfn = NULL;
399 port->stats_busy = BFA_FALSE;
400 }
401
402 /*
403 * Clear any enable/disable is pending
404 */
405 if (port->endis_pending) {
406 if (port->endis_cbfn)
407 port->endis_cbfn(port->dev, BFA_STATUS_FAILED);
408 port->endis_cbfn = NULL;
409 port->endis_pending = BFA_FALSE;
410 }
411}
412
413/**
414 * bfa_port_attach()
415 *
416 *
417 * @param[in] port - Pointer to the Port module data structure
418 * ioc - Pointer to the ioc module data structure
419 * dev - Pointer to the device driver module data structure
420 * The device driver specific mbox ISR functions have
421 * this pointer as one of the parameters.
422 * trcmod -
423 * logmod -
424 *
425 * @return void
426 */
427void
428bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
429 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
430{
431 bfa_assert(port);
432
433 port->dev = dev;
434 port->ioc = ioc;
435 port->trcmod = trcmod;
436 port->logmod = logmod;
437
438 port->stats_busy = port->endis_pending = BFA_FALSE;
439 port->stats_cbfn = port->endis_cbfn = NULL;
440
441 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
442 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
443 bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
444
445 bfa_trc(port, 0);
446}
447
448/**
449 * bfa_port_detach()
450 *
451 *
452 * @param[in] port - Pointer to the Port module data structure
453 *
454 * @return void
455 */
456void
457bfa_port_detach(struct bfa_port_s *port)
458{
459 bfa_trc(port, 0);
460}
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
new file mode 100644
index 000000000000..4b97e2759908
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_PRIV_H__
19#define __BFA_PORT_PRIV_H__
20
21#include <defs/bfa_defs_pport.h>
22#include <bfi/bfi_pport.h>
23#include "bfa_intr_priv.h"
24
25/**
26 * BFA physical port data structure
27 */
28struct bfa_pport_s {
29 struct bfa_s *bfa; /* parent BFA instance */
30 bfa_sm_t sm; /* port state machine */
31 wwn_t nwwn; /* node wwn of physical port */
32 wwn_t pwwn; /* port wwn of physical oprt */
33 enum bfa_pport_speed speed_sup;
34 /* supported speeds */
35 enum bfa_pport_speed speed; /* current speed */
36 enum bfa_pport_topology topology; /* current topology */
37 u8 myalpa; /* my ALPA in LOOP topology */
38 u8 rsvd[3];
39 struct bfa_pport_cfg_s cfg; /* current port configuration */
40 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
41 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
42 struct bfa_reqq_wait_s reqq_wait;
43 /* to wait for room in reqq */
44 struct bfa_reqq_wait_s svcreq_wait;
45 /* to wait for room in reqq */
46 struct bfa_reqq_wait_s stats_reqq_wait;
47 /* to wait for room in reqq (stats) */
48 void *event_cbarg;
49 void (*event_cbfn) (void *cbarg,
50 bfa_pport_event_t event);
51 union {
52 union bfi_pport_i2h_msg_u i2hmsg;
53 } event_arg;
54 void *bfad; /* BFA driver handle */
55 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
56 enum bfa_pport_linkstate hcb_event;
57 /* link event for callback */
58 u32 msgtag; /* fimrware msg tag for reply */
59 u8 *stats_kva;
60 u64 stats_pa;
61 union bfa_pport_stats_u *stats; /* pport stats */
62 u32 mypid : 24;
63 u32 rsvd_b : 8;
64 struct bfa_timer_s timer; /* timer */
65 union bfa_pport_stats_u *stats_ret;
66 /* driver stats location */
67 bfa_status_t stats_status;
68 /* stats/statsclr status */
69 bfa_boolean_t stats_busy;
70 /* outstanding stats/statsclr */
71 bfa_boolean_t stats_qfull;
72 bfa_boolean_t diag_busy;
73 /* diag busy status */
74 bfa_boolean_t beacon;
75 /* port beacon status */
76 bfa_boolean_t link_e2e_beacon;
77 /* link beacon status */
78 bfa_cb_pport_t stats_cbfn;
79 /* driver callback function */
80 void *stats_cbarg;
81 /* *!< user callback arg */
82};
83
84#define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport)
85
86/*
87 * public functions
88 */
89void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
90#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
new file mode 100644
index 000000000000..0747a6b26f7b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PRIV_H__
19#define __BFA_PRIV_H__
20
21#include "bfa_iocfc.h"
22#include "bfa_intr_priv.h"
23#include "bfa_trcmod_priv.h"
24#include "bfa_modules_priv.h"
25#include "bfa_fwimg_priv.h"
26#include <cs/bfa_log.h>
27#include <bfa_timer.h>
28
29/**
30 * Macro to define a new BFA module
31 */
32#define BFA_MODULE(__mod) \
33 static void bfa_ ## __mod ## _meminfo( \
34 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
35 u32 *dm_len); \
36 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
37 void *bfad, struct bfa_iocfc_cfg_s *cfg, \
38 struct bfa_meminfo_s *meminfo, \
39 struct bfa_pcidev_s *pcidev); \
40 static void bfa_ ## __mod ## _initdone(struct bfa_s *bfa); \
41 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
42 static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
43 static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
44 static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \
45 \
46 extern struct bfa_module_s hal_mod_ ## __mod; \
47 struct bfa_module_s hal_mod_ ## __mod = { \
48 bfa_ ## __mod ## _meminfo, \
49 bfa_ ## __mod ## _attach, \
50 bfa_ ## __mod ## _initdone, \
51 bfa_ ## __mod ## _detach, \
52 bfa_ ## __mod ## _start, \
53 bfa_ ## __mod ## _stop, \
54 bfa_ ## __mod ## _iocdisable, \
55 }
56
57#define BFA_CACHELINE_SZ (256)
58
59/**
60 * Structure used to interact between different BFA sub modules
61 *
62 * Each sub module needs to implement only the entry points relevant to it (and
63 * can leave entry points as NULL)
64 */
65struct bfa_module_s {
66 void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
67 u32 *dm_len);
68 void (*attach) (struct bfa_s *bfa, void *bfad,
69 struct bfa_iocfc_cfg_s *cfg,
70 struct bfa_meminfo_s *meminfo,
71 struct bfa_pcidev_s *pcidev);
72 void (*initdone) (struct bfa_s *bfa);
73 void (*detach) (struct bfa_s *bfa);
74 void (*start) (struct bfa_s *bfa);
75 void (*stop) (struct bfa_s *bfa);
76 void (*iocdisable) (struct bfa_s *bfa);
77};
78
79extern struct bfa_module_s *hal_mods[];
80
81struct bfa_s {
82 void *bfad; /* BFA driver instance */
83 struct bfa_aen_s *aen; /* AEN module */
84 struct bfa_plog_s *plog; /* portlog buffer */
85 struct bfa_log_mod_s *logm; /* driver logging modulen */
86 struct bfa_trc_mod_s *trcmod; /* driver tracing */
87 struct bfa_ioc_s ioc; /* IOC module */
88 struct bfa_iocfc_s iocfc; /* IOCFC module */
89 struct bfa_timer_mod_s timer_mod; /* timer module */
90 struct bfa_modules_s modules; /* BFA modules */
91 struct list_head comp_q; /* pending completions */
92 bfa_boolean_t rme_process; /* RME processing enabled */
93 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
94 bfa_boolean_t fcs; /* FCS is attached to BFA */
95 struct bfa_msix_s msix;
96};
97
98extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
99extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[];
100extern bfa_boolean_t bfa_auto_recover;
101extern struct bfa_module_s hal_mod_flash;
102extern struct bfa_module_s hal_mod_fcdiag;
103extern struct bfa_module_s hal_mod_sgpg;
104extern struct bfa_module_s hal_mod_pport;
105extern struct bfa_module_s hal_mod_fcxp;
106extern struct bfa_module_s hal_mod_lps;
107extern struct bfa_module_s hal_mod_uf;
108extern struct bfa_module_s hal_mod_rport;
109extern struct bfa_module_s hal_mod_fcpim;
110extern struct bfa_module_s hal_mod_pbind;
111
112#endif /* __BFA_PRIV_H__ */
113
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
new file mode 100644
index 000000000000..16da77a8db28
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -0,0 +1,911 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <cs/bfa_debug.h>
21#include <bfi/bfi_rport.h>
22#include "bfa_intr_priv.h"
23
24BFA_TRC_FILE(HAL, RPORT);
25BFA_MODULE(rport);
26
27#define bfa_rport_offline_cb(__rp) do { \
28 if ((__rp)->bfa->fcs) \
29 bfa_cb_rport_offline((__rp)->rport_drv); \
30 else { \
31 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
32 __bfa_cb_rport_offline, (__rp)); \
33 } \
34} while (0)
35
36#define bfa_rport_online_cb(__rp) do { \
37 if ((__rp)->bfa->fcs) \
38 bfa_cb_rport_online((__rp)->rport_drv); \
39 else { \
40 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
41 __bfa_cb_rport_online, (__rp)); \
42 } \
43} while (0)
44
45/*
46 * forward declarations
47 */
48static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
49static void bfa_rport_free(struct bfa_rport_s *rport);
50static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
51static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
52static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
53static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
54static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
55
56/**
57 * bfa_rport_sm BFA rport state machine
58 */
59
60
61enum bfa_rport_event {
62 BFA_RPORT_SM_CREATE = 1, /* rport create event */
63 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
64 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
65 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
66 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
67 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
68 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
69 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
70 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
71};
72
73static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
74 enum bfa_rport_event event);
75static void bfa_rport_sm_created(struct bfa_rport_s *rp,
76 enum bfa_rport_event event);
77static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
78 enum bfa_rport_event event);
79static void bfa_rport_sm_online(struct bfa_rport_s *rp,
80 enum bfa_rport_event event);
81static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
82 enum bfa_rport_event event);
83static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
84 enum bfa_rport_event event);
85static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
86 enum bfa_rport_event event);
87static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
88 enum bfa_rport_event event);
89static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
90 enum bfa_rport_event event);
91static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
92 enum bfa_rport_event event);
93static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
94 enum bfa_rport_event event);
95static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
96 enum bfa_rport_event event);
97static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
98 enum bfa_rport_event event);
99
100/**
101 * Beginning state, only online event expected.
102 */
103static void
104bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
105{
106 bfa_trc(rp->bfa, rp->rport_tag);
107 bfa_trc(rp->bfa, event);
108
109 switch (event) {
110 case BFA_RPORT_SM_CREATE:
111 bfa_stats(rp, sm_un_cr);
112 bfa_sm_set_state(rp, bfa_rport_sm_created);
113 break;
114
115 default:
116 bfa_stats(rp, sm_un_unexp);
117 bfa_assert(0);
118 }
119}
120
121static void
122bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
123{
124 bfa_trc(rp->bfa, rp->rport_tag);
125 bfa_trc(rp->bfa, event);
126
127 switch (event) {
128 case BFA_RPORT_SM_ONLINE:
129 bfa_stats(rp, sm_cr_on);
130 if (bfa_rport_send_fwcreate(rp))
131 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
132 else
133 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
134 break;
135
136 case BFA_RPORT_SM_DELETE:
137 bfa_stats(rp, sm_cr_del);
138 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
139 bfa_rport_free(rp);
140 break;
141
142 case BFA_RPORT_SM_HWFAIL:
143 bfa_stats(rp, sm_cr_hwf);
144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
145 break;
146
147 default:
148 bfa_stats(rp, sm_cr_unexp);
149 bfa_assert(0);
150 }
151}
152
153/**
154 * Waiting for rport create response from firmware.
155 */
156static void
157bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
158{
159 bfa_trc(rp->bfa, rp->rport_tag);
160 bfa_trc(rp->bfa, event);
161
162 switch (event) {
163 case BFA_RPORT_SM_FWRSP:
164 bfa_stats(rp, sm_fwc_rsp);
165 bfa_sm_set_state(rp, bfa_rport_sm_online);
166 bfa_rport_online_cb(rp);
167 break;
168
169 case BFA_RPORT_SM_DELETE:
170 bfa_stats(rp, sm_fwc_del);
171 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
172 break;
173
174 case BFA_RPORT_SM_OFFLINE:
175 bfa_stats(rp, sm_fwc_off);
176 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
177 break;
178
179 case BFA_RPORT_SM_HWFAIL:
180 bfa_stats(rp, sm_fwc_hwf);
181 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
182 break;
183
184 default:
185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_assert(0);
187 }
188}
189
190/**
191 * Request queue is full, awaiting queue resume to send create request.
192 */
193static void
194bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
195{
196 bfa_trc(rp->bfa, rp->rport_tag);
197 bfa_trc(rp->bfa, event);
198
199 switch (event) {
200 case BFA_RPORT_SM_QRESUME:
201 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
202 bfa_rport_send_fwcreate(rp);
203 break;
204
205 case BFA_RPORT_SM_DELETE:
206 bfa_stats(rp, sm_fwc_del);
207 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
208 bfa_reqq_wcancel(&rp->reqq_wait);
209 bfa_rport_free(rp);
210 break;
211
212 case BFA_RPORT_SM_OFFLINE:
213 bfa_stats(rp, sm_fwc_off);
214 bfa_sm_set_state(rp, bfa_rport_sm_offline);
215 bfa_reqq_wcancel(&rp->reqq_wait);
216 bfa_rport_offline_cb(rp);
217 break;
218
219 case BFA_RPORT_SM_HWFAIL:
220 bfa_stats(rp, sm_fwc_hwf);
221 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
222 bfa_reqq_wcancel(&rp->reqq_wait);
223 break;
224
225 default:
226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_assert(0);
228 }
229}
230
231/**
232 * Online state - normal parking state.
233 */
234static void
235bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
236{
237 struct bfi_rport_qos_scn_s *qos_scn;
238
239 bfa_trc(rp->bfa, rp->rport_tag);
240 bfa_trc(rp->bfa, event);
241
242 switch (event) {
243 case BFA_RPORT_SM_OFFLINE:
244 bfa_stats(rp, sm_on_off);
245 if (bfa_rport_send_fwdelete(rp))
246 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
247 else
248 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
249 break;
250
251 case BFA_RPORT_SM_DELETE:
252 bfa_stats(rp, sm_on_del);
253 if (bfa_rport_send_fwdelete(rp))
254 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
255 else
256 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
257 break;
258
259 case BFA_RPORT_SM_HWFAIL:
260 bfa_stats(rp, sm_on_hwf);
261 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
262 break;
263
264 case BFA_RPORT_SM_SET_SPEED:
265 bfa_rport_send_fwspeed(rp);
266 break;
267
268 case BFA_RPORT_SM_QOS_SCN:
269 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
270 rp->qos_attr = qos_scn->new_qos_attr;
271 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
272 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
273 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
274 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
275
276 qos_scn->old_qos_attr.qos_flow_id =
277 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
278 qos_scn->new_qos_attr.qos_flow_id =
279 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
280 qos_scn->old_qos_attr.qos_priority =
281 bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
282 qos_scn->new_qos_attr.qos_priority =
283 bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
284
285 if (qos_scn->old_qos_attr.qos_flow_id !=
286 qos_scn->new_qos_attr.qos_flow_id)
287 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
288 qos_scn->old_qos_attr,
289 qos_scn->new_qos_attr);
290 if (qos_scn->old_qos_attr.qos_priority !=
291 qos_scn->new_qos_attr.qos_priority)
292 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
293 qos_scn->old_qos_attr,
294 qos_scn->new_qos_attr);
295 break;
296
297 default:
298 bfa_stats(rp, sm_on_unexp);
299 bfa_assert(0);
300 }
301}
302
303/**
304 * Firmware rport is being deleted - awaiting f/w response.
305 */
306static void
307bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
308{
309 bfa_trc(rp->bfa, rp->rport_tag);
310 bfa_trc(rp->bfa, event);
311
312 switch (event) {
313 case BFA_RPORT_SM_FWRSP:
314 bfa_stats(rp, sm_fwd_rsp);
315 bfa_sm_set_state(rp, bfa_rport_sm_offline);
316 bfa_rport_offline_cb(rp);
317 break;
318
319 case BFA_RPORT_SM_DELETE:
320 bfa_stats(rp, sm_fwd_del);
321 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
322 break;
323
324 case BFA_RPORT_SM_HWFAIL:
325 bfa_stats(rp, sm_fwd_hwf);
326 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
327 bfa_rport_offline_cb(rp);
328 break;
329
330 default:
331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_assert(0);
333 }
334}
335
336static void
337bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
338{
339 bfa_trc(rp->bfa, rp->rport_tag);
340 bfa_trc(rp->bfa, event);
341
342 switch (event) {
343 case BFA_RPORT_SM_QRESUME:
344 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
345 bfa_rport_send_fwdelete(rp);
346 break;
347
348 case BFA_RPORT_SM_DELETE:
349 bfa_stats(rp, sm_fwd_del);
350 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
351 break;
352
353 case BFA_RPORT_SM_HWFAIL:
354 bfa_stats(rp, sm_fwd_hwf);
355 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
356 bfa_reqq_wcancel(&rp->reqq_wait);
357 bfa_rport_offline_cb(rp);
358 break;
359
360 default:
361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_assert(0);
363 }
364}
365
366/**
367 * Offline state.
368 */
369static void
370bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
371{
372 bfa_trc(rp->bfa, rp->rport_tag);
373 bfa_trc(rp->bfa, event);
374
375 switch (event) {
376 case BFA_RPORT_SM_DELETE:
377 bfa_stats(rp, sm_off_del);
378 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
379 bfa_rport_free(rp);
380 break;
381
382 case BFA_RPORT_SM_ONLINE:
383 bfa_stats(rp, sm_off_on);
384 if (bfa_rport_send_fwcreate(rp))
385 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
386 else
387 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
388 break;
389
390 case BFA_RPORT_SM_HWFAIL:
391 bfa_stats(rp, sm_off_hwf);
392 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
393 break;
394
395 default:
396 bfa_stats(rp, sm_off_unexp);
397 bfa_assert(0);
398 }
399}
400
401/**
402 * Rport is deleted, waiting for firmware response to delete.
403 */
404static void
405bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
406{
407 bfa_trc(rp->bfa, rp->rport_tag);
408 bfa_trc(rp->bfa, event);
409
410 switch (event) {
411 case BFA_RPORT_SM_FWRSP:
412 bfa_stats(rp, sm_del_fwrsp);
413 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
414 bfa_rport_free(rp);
415 break;
416
417 case BFA_RPORT_SM_HWFAIL:
418 bfa_stats(rp, sm_del_hwf);
419 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
420 bfa_rport_free(rp);
421 break;
422
423 default:
424 bfa_assert(0);
425 }
426}
427
428static void
429bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
430{
431 bfa_trc(rp->bfa, rp->rport_tag);
432 bfa_trc(rp->bfa, event);
433
434 switch (event) {
435 case BFA_RPORT_SM_QRESUME:
436 bfa_stats(rp, sm_del_fwrsp);
437 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
438 bfa_rport_send_fwdelete(rp);
439 break;
440
441 case BFA_RPORT_SM_HWFAIL:
442 bfa_stats(rp, sm_del_hwf);
443 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
444 bfa_reqq_wcancel(&rp->reqq_wait);
445 bfa_rport_free(rp);
446 break;
447
448 default:
449 bfa_assert(0);
450 }
451}
452
453/**
454 * Waiting for rport create response from firmware. A delete is pending.
455 */
456static void
457bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
458 enum bfa_rport_event event)
459{
460 bfa_trc(rp->bfa, rp->rport_tag);
461 bfa_trc(rp->bfa, event);
462
463 switch (event) {
464 case BFA_RPORT_SM_FWRSP:
465 bfa_stats(rp, sm_delp_fwrsp);
466 if (bfa_rport_send_fwdelete(rp))
467 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
468 else
469 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
470 break;
471
472 case BFA_RPORT_SM_HWFAIL:
473 bfa_stats(rp, sm_delp_hwf);
474 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
475 bfa_rport_free(rp);
476 break;
477
478 default:
479 bfa_stats(rp, sm_delp_unexp);
480 bfa_assert(0);
481 }
482}
483
484/**
485 * Waiting for rport create response from firmware. Rport offline is pending.
486 */
487static void
488bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
489 enum bfa_rport_event event)
490{
491 bfa_trc(rp->bfa, rp->rport_tag);
492 bfa_trc(rp->bfa, event);
493
494 switch (event) {
495 case BFA_RPORT_SM_FWRSP:
496 bfa_stats(rp, sm_offp_fwrsp);
497 if (bfa_rport_send_fwdelete(rp))
498 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
499 else
500 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
501 break;
502
503 case BFA_RPORT_SM_DELETE:
504 bfa_stats(rp, sm_offp_del);
505 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
506 break;
507
508 case BFA_RPORT_SM_HWFAIL:
509 bfa_stats(rp, sm_offp_hwf);
510 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
511 break;
512
513 default:
514 bfa_stats(rp, sm_offp_unexp);
515 bfa_assert(0);
516 }
517}
518
519/**
520 * IOC h/w failed.
521 */
522static void
523bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
524{
525 bfa_trc(rp->bfa, rp->rport_tag);
526 bfa_trc(rp->bfa, event);
527
528 switch (event) {
529 case BFA_RPORT_SM_OFFLINE:
530 bfa_stats(rp, sm_iocd_off);
531 bfa_rport_offline_cb(rp);
532 break;
533
534 case BFA_RPORT_SM_DELETE:
535 bfa_stats(rp, sm_iocd_del);
536 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
537 bfa_rport_free(rp);
538 break;
539
540 case BFA_RPORT_SM_ONLINE:
541 bfa_stats(rp, sm_iocd_on);
542 if (bfa_rport_send_fwcreate(rp))
543 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
544 else
545 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
546 break;
547
548 case BFA_RPORT_SM_HWFAIL:
549 break;
550
551 default:
552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_assert(0);
554 }
555}
556
557
558
559/**
560 * bfa_rport_private BFA rport private functions
561 */
562
563static void
564__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
565{
566 struct bfa_rport_s *rp = cbarg;
567
568 if (complete)
569 bfa_cb_rport_online(rp->rport_drv);
570}
571
572static void
573__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
574{
575 struct bfa_rport_s *rp = cbarg;
576
577 if (complete)
578 bfa_cb_rport_offline(rp->rport_drv);
579}
580
581static void
582bfa_rport_qresume(void *cbarg)
583{
584 struct bfa_rport_s *rp = cbarg;
585
586 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
587}
588
589static void
590bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
591 u32 *dm_len)
592{
593 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
594 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
595
596 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
597}
598
599static void
600bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
601 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
602{
603 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
604 struct bfa_rport_s *rp;
605 u16 i;
606
607 INIT_LIST_HEAD(&mod->rp_free_q);
608 INIT_LIST_HEAD(&mod->rp_active_q);
609
610 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
611 mod->rps_list = rp;
612 mod->num_rports = cfg->fwcfg.num_rports;
613
614 bfa_assert(mod->num_rports
615 && !(mod->num_rports & (mod->num_rports - 1)));
616
617 for (i = 0; i < mod->num_rports; i++, rp++) {
618 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
619 rp->bfa = bfa;
620 rp->rport_tag = i;
621 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
622
623 /**
624 * - is unused
625 */
626 if (i)
627 list_add_tail(&rp->qe, &mod->rp_free_q);
628
629 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
630 }
631
632 /**
633 * consume memory
634 */
635 bfa_meminfo_kva(meminfo) = (u8 *) rp;
636}
637
638static void
639bfa_rport_initdone(struct bfa_s *bfa)
640{
641}
642
643static void
644bfa_rport_detach(struct bfa_s *bfa)
645{
646}
647
648static void
649bfa_rport_start(struct bfa_s *bfa)
650{
651}
652
653static void
654bfa_rport_stop(struct bfa_s *bfa)
655{
656}
657
658static void
659bfa_rport_iocdisable(struct bfa_s *bfa)
660{
661 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
662 struct bfa_rport_s *rport;
663 struct list_head *qe, *qen;
664
665 list_for_each_safe(qe, qen, &mod->rp_active_q) {
666 rport = (struct bfa_rport_s *) qe;
667 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
668 }
669}
670
671static struct bfa_rport_s *
672bfa_rport_alloc(struct bfa_rport_mod_s *mod)
673{
674 struct bfa_rport_s *rport;
675
676 bfa_q_deq(&mod->rp_free_q, &rport);
677 if (rport)
678 list_add_tail(&rport->qe, &mod->rp_active_q);
679
680 return (rport);
681}
682
683static void
684bfa_rport_free(struct bfa_rport_s *rport)
685{
686 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
687
688 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
689 list_del(&rport->qe);
690 list_add_tail(&rport->qe, &mod->rp_free_q);
691}
692
693static bfa_boolean_t
694bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
695{
696 struct bfi_rport_create_req_s *m;
697
698 /**
699 * check for room in queue to send request now
700 */
701 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
702 if (!m) {
703 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
704 return BFA_FALSE;
705 }
706
707 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
708 bfa_lpuid(rp->bfa));
709 m->bfa_handle = rp->rport_tag;
710 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
711 m->pid = rp->rport_info.pid;
712 m->lp_tag = rp->rport_info.lp_tag;
713 m->local_pid = rp->rport_info.local_pid;
714 m->fc_class = rp->rport_info.fc_class;
715 m->vf_en = rp->rport_info.vf_en;
716 m->vf_id = rp->rport_info.vf_id;
717 m->cisc = rp->rport_info.cisc;
718
719 /**
720 * queue I/O message to firmware
721 */
722 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
723 return BFA_TRUE;
724}
725
726static bfa_boolean_t
727bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
728{
729 struct bfi_rport_delete_req_s *m;
730
731 /**
732 * check for room in queue to send request now
733 */
734 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
735 if (!m) {
736 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
737 return BFA_FALSE;
738 }
739
740 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
741 bfa_lpuid(rp->bfa));
742 m->fw_handle = rp->fw_handle;
743
744 /**
745 * queue I/O message to firmware
746 */
747 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
748 return BFA_TRUE;
749}
750
751static bfa_boolean_t
752bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
753{
754 struct bfa_rport_speed_req_s *m;
755
756 /**
757 * check for room in queue to send request now
758 */
759 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
760 if (!m) {
761 bfa_trc(rp->bfa, rp->rport_info.speed);
762 return BFA_FALSE;
763 }
764
765 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
766 bfa_lpuid(rp->bfa));
767 m->fw_handle = rp->fw_handle;
768 m->speed = (u8)rp->rport_info.speed;
769
770 /**
771 * queue I/O message to firmware
772 */
773 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
774 return BFA_TRUE;
775}
776
777
778
779/**
780 * bfa_rport_public
781 */
782
783/**
784 * Rport interrupt processing.
785 */
786void
787bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
788{
789 union bfi_rport_i2h_msg_u msg;
790 struct bfa_rport_s *rp;
791
792 bfa_trc(bfa, m->mhdr.msg_id);
793
794 msg.msg = m;
795
796 switch (m->mhdr.msg_id) {
797 case BFI_RPORT_I2H_CREATE_RSP:
798 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
799 rp->fw_handle = msg.create_rsp->fw_handle;
800 rp->qos_attr = msg.create_rsp->qos_attr;
801 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
802 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
803 break;
804
805 case BFI_RPORT_I2H_DELETE_RSP:
806 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
807 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
808 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
809 break;
810
811 case BFI_RPORT_I2H_QOS_SCN:
812 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
813 rp->event_arg.fw_msg = msg.qos_scn_evt;
814 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
815 break;
816
817 default:
818 bfa_trc(bfa, m->mhdr.msg_id);
819 bfa_assert(0);
820 }
821}
822
823
824
825/**
826 * bfa_rport_api
827 */
828
829struct bfa_rport_s *
830bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
831{
832 struct bfa_rport_s *rp;
833
834 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
835
836 if (rp == NULL)
837 return (NULL);
838
839 rp->bfa = bfa;
840 rp->rport_drv = rport_drv;
841 bfa_rport_clear_stats(rp);
842
843 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
844 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
845
846 return (rp);
847}
848
849void
850bfa_rport_delete(struct bfa_rport_s *rport)
851{
852 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
853}
854
855void
856bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
857{
858 bfa_assert(rport_info->max_frmsz != 0);
859
860 /**
861 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
862 * responses. Default to minimum size.
863 */
864 if (rport_info->max_frmsz == 0) {
865 bfa_trc(rport->bfa, rport->rport_tag);
866 rport_info->max_frmsz = FC_MIN_PDUSZ;
867 }
868
869 bfa_os_assign(rport->rport_info, *rport_info);
870 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
871}
872
873void
874bfa_rport_offline(struct bfa_rport_s *rport)
875{
876 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
877}
878
879void
880bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
881{
882 bfa_assert(speed != 0);
883 bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
884
885 rport->rport_info.speed = speed;
886 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
887}
888
889void
890bfa_rport_get_stats(struct bfa_rport_s *rport,
891 struct bfa_rport_hal_stats_s *stats)
892{
893 *stats = rport->stats;
894}
895
896void
897bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
898 struct bfa_rport_qos_attr_s *qos_attr)
899{
900 qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority);
901 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
902
903}
904
905void
906bfa_rport_clear_stats(struct bfa_rport_s *rport)
907{
908 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
909}
910
911
diff --git a/drivers/scsi/bfa/bfa_rport_priv.h b/drivers/scsi/bfa/bfa_rport_priv.h
new file mode 100644
index 000000000000..6490ce2e990d
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_rport_priv.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_RPORT_PRIV_H__
19#define __BFA_RPORT_PRIV_H__
20
21#include <bfa_svc.h>
22
23#define BFA_RPORT_MIN 4
24
25struct bfa_rport_mod_s {
26 struct bfa_rport_s *rps_list; /* list of rports */
27 struct list_head rp_free_q; /* free bfa_rports */
28 struct list_head rp_active_q; /* free bfa_rports */
29 u16 num_rports; /* number of rports */
30};
31
32#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
33
34/**
35 * Convert rport tag to RPORT
36 */
37#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
38 (BFA_RPORT_MOD(__bfa)->rps_list + \
39 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
40
41/*
42 * external functions
43 */
44void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
45#endif /* __BFA_RPORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sgpg.c b/drivers/scsi/bfa/bfa_sgpg.c
new file mode 100644
index 000000000000..279d8f9b8907
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_sgpg.c
@@ -0,0 +1,231 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19
20BFA_TRC_FILE(HAL, SGPG);
21BFA_MODULE(sgpg);
22
23/**
24 * bfa_sgpg_mod BFA SGPG Mode module
25 */
26
27/**
28 * Compute and return memory needed by FCP(im) module.
29 */
30static void
31bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
32 u32 *dm_len)
33{
34 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
35 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
36
37 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
38 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
39}
40
41
42static void
43bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
44 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
45{
46 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
47 int i;
48 struct bfa_sgpg_s *hsgpg;
49 struct bfi_sgpg_s *sgpg;
50 u64 align_len;
51
52 union {
53 u64 pa;
54 union bfi_addr_u addr;
55 } sgpg_pa;
56
57 INIT_LIST_HEAD(&mod->sgpg_q);
58 INIT_LIST_HEAD(&mod->sgpg_wait_q);
59
60 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
61
62 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
63 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
64 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
65 mod->sgpg_arr_pa += align_len;
66 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
67 align_len);
68 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
69 align_len);
70
71 hsgpg = mod->hsgpg_arr;
72 sgpg = mod->sgpg_arr;
73 sgpg_pa.pa = mod->sgpg_arr_pa;
74 mod->free_sgpgs = mod->num_sgpgs;
75
76 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
77
78 for (i = 0; i < mod->num_sgpgs; i++) {
79 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
80 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
81
82 hsgpg->sgpg = sgpg;
83 hsgpg->sgpg_pa = sgpg_pa.addr;
84 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
85
86 hsgpg++;
87 sgpg++;
88 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
89 }
90
91 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
92 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
93 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
94}
95
96static void
97bfa_sgpg_initdone(struct bfa_s *bfa)
98{
99}
100
101static void
102bfa_sgpg_detach(struct bfa_s *bfa)
103{
104}
105
106static void
107bfa_sgpg_start(struct bfa_s *bfa)
108{
109}
110
111static void
112bfa_sgpg_stop(struct bfa_s *bfa)
113{
114}
115
116static void
117bfa_sgpg_iocdisable(struct bfa_s *bfa)
118{
119}
120
121
122
123/**
124 * bfa_sgpg_public BFA SGPG public functions
125 */
126
127bfa_status_t
128bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
129{
130 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
131 struct bfa_sgpg_s *hsgpg;
132 int i;
133
134 bfa_trc_fp(bfa, nsgpgs);
135
136 if (mod->free_sgpgs < nsgpgs)
137 return BFA_STATUS_ENOMEM;
138
139 for (i = 0; i < nsgpgs; i++) {
140 bfa_q_deq(&mod->sgpg_q, &hsgpg);
141 bfa_assert(hsgpg);
142 list_add_tail(&hsgpg->qe, sgpg_q);
143 }
144
145 mod->free_sgpgs -= nsgpgs;
146 return BFA_STATUS_OK;
147}
148
149void
150bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
151{
152 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
153 struct bfa_sgpg_wqe_s *wqe;
154
155 bfa_trc_fp(bfa, nsgpg);
156
157 mod->free_sgpgs += nsgpg;
158 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
159
160 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
161
162 if (list_empty(&mod->sgpg_wait_q))
163 return;
164
165 /**
166 * satisfy as many waiting requests as possible
167 */
168 do {
169 wqe = bfa_q_first(&mod->sgpg_wait_q);
170 if (mod->free_sgpgs < wqe->nsgpg)
171 nsgpg = mod->free_sgpgs;
172 else
173 nsgpg = wqe->nsgpg;
174 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
175 wqe->nsgpg -= nsgpg;
176 if (wqe->nsgpg == 0) {
177 list_del(&wqe->qe);
178 wqe->cbfn(wqe->cbarg);
179 }
180 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
181}
182
183void
184bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
185{
186 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
187
188 bfa_assert(nsgpg > 0);
189 bfa_assert(nsgpg > mod->free_sgpgs);
190
191 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
192
193 /**
194 * allocate any left to this one first
195 */
196 if (mod->free_sgpgs) {
197 /**
198 * no one else is waiting for SGPG
199 */
200 bfa_assert(list_empty(&mod->sgpg_wait_q));
201 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
202 wqe->nsgpg -= mod->free_sgpgs;
203 mod->free_sgpgs = 0;
204 }
205
206 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
207}
208
209void
210bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
211{
212 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
213
214 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
215 list_del(&wqe->qe);
216
217 if (wqe->nsgpg_total != wqe->nsgpg)
218 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
219 wqe->nsgpg_total - wqe->nsgpg);
220}
221
222void
223bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
224 void *cbarg)
225{
226 INIT_LIST_HEAD(&wqe->sgpg_q);
227 wqe->cbfn = cbfn;
228 wqe->cbarg = cbarg;
229}
230
231
diff --git a/drivers/scsi/bfa/bfa_sgpg_priv.h b/drivers/scsi/bfa/bfa_sgpg_priv.h
new file mode 100644
index 000000000000..9c2a8cbe7522
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_sgpg_priv.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_sgpg.h BFA SG page module
20 */
21
22#ifndef __BFA_SGPG_PRIV_H__
23#define __BFA_SGPG_PRIV_H__
24
25#include <cs/bfa_q.h>
26
27#define BFA_SGPG_MIN (16)
28
29/**
30 * Alignment macro for SG page allocation
31 */
32#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
33 & ~(sizeof(struct bfi_sgpg_s) - 1))
34
35struct bfa_sgpg_wqe_s {
36 struct list_head qe; /* queue sg page element */
37 int nsgpg; /* pages to be allocated */
38 int nsgpg_total; /* total pages required */
39 void (*cbfn) (void *cbarg);
40 /* callback function */
41 void *cbarg; /* callback arg */
42 struct list_head sgpg_q; /* queue of alloced sgpgs */
43};
44
45struct bfa_sgpg_s {
46 struct list_head qe; /* queue sg page element */
47 struct bfi_sgpg_s *sgpg; /* va of SG page */
48 union bfi_addr_u sgpg_pa;/* pa of SG page */
49};
50
51/**
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required.
54 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56
57struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */
66};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
68
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs);
71void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q,
72 int nsgpgs);
73void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
74 void (*cbfn) (void *cbarg), void *cbarg);
75void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
76 int nsgpgs);
77void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
78
79#endif /* __BFA_SGPG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sm.c b/drivers/scsi/bfa/bfa_sm.c
new file mode 100644
index 000000000000..5420f4f45e58
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_sm.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.c BFA State machine utility functions
20 */
21
22#include <cs/bfa_sm.h>
23
24/**
25 * cs_sm_api
26 */
27
28int
29bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
30{
31 int i = 0;
32
33 while (smt[i].sm && smt[i].sm != sm)
34 i++;
35 return smt[i].state;
36}
37
38
diff --git a/drivers/scsi/bfa/bfa_timer.c b/drivers/scsi/bfa/bfa_timer.c
new file mode 100644
index 000000000000..cb76481f5cb1
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_timer.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_timer.h>
19#include <cs/bfa_debug.h>
20
21void
22bfa_timer_init(struct bfa_timer_mod_s *mod)
23{
24 INIT_LIST_HEAD(&mod->timer_q);
25}
26
27void
28bfa_timer_beat(struct bfa_timer_mod_s *mod)
29{
30 struct list_head *qh = &mod->timer_q;
31 struct list_head *qe, *qe_next;
32 struct bfa_timer_s *elem;
33 struct list_head timedout_q;
34
35 INIT_LIST_HEAD(&timedout_q);
36
37 qe = bfa_q_next(qh);
38
39 while (qe != qh) {
40 qe_next = bfa_q_next(qe);
41
42 elem = (struct bfa_timer_s *) qe;
43 if (elem->timeout <= BFA_TIMER_FREQ) {
44 elem->timeout = 0;
45 list_del(&elem->qe);
46 list_add_tail(&elem->qe, &timedout_q);
47 } else {
48 elem->timeout -= BFA_TIMER_FREQ;
49 }
50
51 qe = qe_next; /* go to next elem */
52 }
53
54 /*
55 * Pop all the timeout entries
56 */
57 while (!list_empty(&timedout_q)) {
58 bfa_q_deq(&timedout_q, &elem);
59 elem->timercb(elem->arg);
60 }
61}
62
63/**
64 * Should be called with lock protection
65 */
66void
67bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
68 void (*timercb) (void *), void *arg, unsigned int timeout)
69{
70
71 bfa_assert(timercb != NULL);
72 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
73
74 timer->timeout = timeout;
75 timer->timercb = timercb;
76 timer->arg = arg;
77
78 list_add_tail(&timer->qe, &mod->timer_q);
79}
80
81/**
82 * Should be called with lock protection
83 */
84void
85bfa_timer_stop(struct bfa_timer_s *timer)
86{
87 bfa_assert(!list_empty(&timer->qe));
88
89 list_del(&timer->qe);
90}
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
new file mode 100644
index 000000000000..b3562dce7e9f
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_trcmod_priv.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_trcmod.h BFA trace modules
20 */
21
22#ifndef __BFA_TRCMOD_PRIV_H__
23#define __BFA_TRCMOD_PRIV_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_HAL_IOC = 1,
33 BFA_TRC_HAL_INTR = 2,
34 BFA_TRC_HAL_FCXP = 3,
35 BFA_TRC_HAL_UF = 4,
36 BFA_TRC_HAL_DIAG = 5,
37 BFA_TRC_HAL_RPORT = 6,
38 BFA_TRC_HAL_FCPIM = 7,
39 BFA_TRC_HAL_IOIM = 8,
40 BFA_TRC_HAL_TSKIM = 9,
41 BFA_TRC_HAL_ITNIM = 10,
42 BFA_TRC_HAL_PPORT = 11,
43 BFA_TRC_HAL_SGPG = 12,
44 BFA_TRC_HAL_FLASH = 13,
45 BFA_TRC_HAL_DEBUG = 14,
46 BFA_TRC_HAL_WWN = 15,
47 BFA_TRC_HAL_FLASH_RAW = 16,
48 BFA_TRC_HAL_SBOOT = 17,
49 BFA_TRC_HAL_SBOOT_IO = 18,
50 BFA_TRC_HAL_SBOOT_INTR = 19,
51 BFA_TRC_HAL_SBTEST = 20,
52 BFA_TRC_HAL_IPFC = 21,
53 BFA_TRC_HAL_IOCFC = 22,
54 BFA_TRC_HAL_FCPTM = 23,
55 BFA_TRC_HAL_IOTM = 24,
56 BFA_TRC_HAL_TSKTM = 25,
57 BFA_TRC_HAL_TIN = 26,
58 BFA_TRC_HAL_LPS = 27,
59 BFA_TRC_HAL_FCDIAG = 28,
60 BFA_TRC_HAL_PBIND = 29,
61 BFA_TRC_HAL_IOCFC_CT = 30,
62 BFA_TRC_HAL_IOCFC_CB = 31,
63 BFA_TRC_HAL_IOCFC_Q = 32,
64};
65
66#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
new file mode 100644
index 000000000000..010d40d1e5d3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -0,0 +1,689 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_cb_ioim_macros.h>
20
21BFA_TRC_FILE(HAL, TSKIM);
22
23/**
24 * task management completion handling
25 */
26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \
28 bfa_tskim_notify_comp(__tskim); \
29} while (0)
30
31#define bfa_tskim_notify_comp(__tskim) do { \
32 if ((__tskim)->notify) \
33 bfa_itnim_tskdone((__tskim)->itnim); \
34} while (0)
35
36/*
37 * forward declarations
38 */
39static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
40static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
41static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
42 lun_t lun);
43static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
44static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
45static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
46static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
47static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
48static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
49
50/**
51 * bfa_tskim_sm
52 */
53
54enum bfa_tskim_event {
55 BFA_TSKIM_SM_START = 1, /* TM command start */
56 BFA_TSKIM_SM_DONE = 2, /* TM completion */
57 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
58 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
59 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
60 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
61 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
62 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
63};
64
65static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
66 enum bfa_tskim_event event);
67static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
68 enum bfa_tskim_event event);
69static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
70 enum bfa_tskim_event event);
71static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
72 enum bfa_tskim_event event);
73static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
74 enum bfa_tskim_event event);
75static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
76 enum bfa_tskim_event event);
77static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
78 enum bfa_tskim_event event);
79
80/**
81 * Task management command beginning state.
82 */
83static void
84bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
85{
86 bfa_trc(tskim->bfa, event);
87
88 switch (event) {
89 case BFA_TSKIM_SM_START:
90 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
91 bfa_tskim_gather_ios(tskim);
92
93 /**
94 * If device is offline, do not send TM on wire. Just cleanup
95 * any pending IO requests and complete TM request.
96 */
97 if (!bfa_itnim_is_online(tskim->itnim)) {
98 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
99 tskim->tsk_status = BFI_TSKIM_STS_OK;
100 bfa_tskim_cleanup_ios(tskim);
101 return;
102 }
103
104 if (!bfa_tskim_send(tskim)) {
105 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
106 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
107 &tskim->reqq_wait);
108 }
109 break;
110
111 default:
112 bfa_assert(0);
113 }
114}
115
116/**
117 * brief
118 * TM command is active, awaiting completion from firmware to
119 * cleanup IO requests in TM scope.
120 */
121static void
122bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
123{
124 bfa_trc(tskim->bfa, event);
125
126 switch (event) {
127 case BFA_TSKIM_SM_DONE:
128 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
129 bfa_tskim_cleanup_ios(tskim);
130 break;
131
132 case BFA_TSKIM_SM_CLEANUP:
133 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
134 if (!bfa_tskim_send_abort(tskim)) {
135 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
136 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
137 &tskim->reqq_wait);
138 }
139 break;
140
141 case BFA_TSKIM_SM_HWFAIL:
142 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
143 bfa_tskim_iocdisable_ios(tskim);
144 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
145 break;
146
147 default:
148 bfa_assert(0);
149 }
150}
151
152/**
153 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
154 * completion event from firmware.
155 */
156static void
157bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
158{
159 bfa_trc(tskim->bfa, event);
160
161 switch (event) {
162 case BFA_TSKIM_SM_DONE:
163 /**
164 * Ignore and wait for ABORT completion from firmware.
165 */
166 break;
167
168 case BFA_TSKIM_SM_CLEANUP_DONE:
169 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
170 bfa_tskim_cleanup_ios(tskim);
171 break;
172
173 case BFA_TSKIM_SM_HWFAIL:
174 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
175 bfa_tskim_iocdisable_ios(tskim);
176 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
177 break;
178
179 default:
180 bfa_assert(0);
181 }
182}
183
184static void
185bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
186{
187 bfa_trc(tskim->bfa, event);
188
189 switch (event) {
190 case BFA_TSKIM_SM_IOS_DONE:
191 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
192 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
193 break;
194
195 case BFA_TSKIM_SM_CLEANUP:
196 /**
197 * Ignore, TM command completed on wire.
198 * Notify TM conmpletion on IO cleanup completion.
199 */
200 break;
201
202 case BFA_TSKIM_SM_HWFAIL:
203 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
204 bfa_tskim_iocdisable_ios(tskim);
205 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
206 break;
207
208 default:
209 bfa_assert(0);
210 }
211}
212
213/**
214 * Task management command is waiting for room in request CQ
215 */
216static void
217bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
218{
219 bfa_trc(tskim->bfa, event);
220
221 switch (event) {
222 case BFA_TSKIM_SM_QRESUME:
223 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
224 bfa_tskim_send(tskim);
225 break;
226
227 case BFA_TSKIM_SM_CLEANUP:
228 /**
229 * No need to send TM on wire since ITN is offline.
230 */
231 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
232 bfa_reqq_wcancel(&tskim->reqq_wait);
233 bfa_tskim_cleanup_ios(tskim);
234 break;
235
236 case BFA_TSKIM_SM_HWFAIL:
237 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
238 bfa_reqq_wcancel(&tskim->reqq_wait);
239 bfa_tskim_iocdisable_ios(tskim);
240 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
241 break;
242
243 default:
244 bfa_assert(0);
245 }
246}
247
248/**
249 * Task management command is active, awaiting for room in request CQ
250 * to send clean up request.
251 */
252static void
253bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
254 enum bfa_tskim_event event)
255{
256 bfa_trc(tskim->bfa, event);
257
258 switch (event) {
259 case BFA_TSKIM_SM_DONE:
260 bfa_reqq_wcancel(&tskim->reqq_wait);
261 /**
262 *
263 * Fall through !!!
264 */
265
266 case BFA_TSKIM_SM_QRESUME:
267 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
268 bfa_tskim_send_abort(tskim);
269 break;
270
271 case BFA_TSKIM_SM_HWFAIL:
272 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
273 bfa_reqq_wcancel(&tskim->reqq_wait);
274 bfa_tskim_iocdisable_ios(tskim);
275 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
276 break;
277
278 default:
279 bfa_assert(0);
280 }
281}
282
283/**
284 * BFA callback is pending
285 */
286static void
287bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
288{
289 bfa_trc(tskim->bfa, event);
290
291 switch (event) {
292 case BFA_TSKIM_SM_HCB:
293 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
294 bfa_tskim_free(tskim);
295 break;
296
297 case BFA_TSKIM_SM_CLEANUP:
298 bfa_tskim_notify_comp(tskim);
299 break;
300
301 case BFA_TSKIM_SM_HWFAIL:
302 break;
303
304 default:
305 bfa_assert(0);
306 }
307}
308
309
310
311/**
312 * bfa_tskim_private
313 */
314
315static void
316__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
317{
318 struct bfa_tskim_s *tskim = cbarg;
319
320 if (!complete) {
321 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
322 return;
323 }
324
325 bfa_stats(tskim->itnim, tm_success);
326 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
327}
328
329static void
330__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
331{
332 struct bfa_tskim_s *tskim = cbarg;
333
334 if (!complete) {
335 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
336 return;
337 }
338
339 bfa_stats(tskim->itnim, tm_failures);
340 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
341 BFI_TSKIM_STS_FAILED);
342}
343
344static bfa_boolean_t
345bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
346{
347 switch (tskim->tm_cmnd) {
348 case FCP_TM_TARGET_RESET:
349 return BFA_TRUE;
350
351 case FCP_TM_ABORT_TASK_SET:
352 case FCP_TM_CLEAR_TASK_SET:
353 case FCP_TM_LUN_RESET:
354 case FCP_TM_CLEAR_ACA:
355 return (tskim->lun == lun);
356
357 default:
358 bfa_assert(0);
359 }
360
361 return BFA_FALSE;
362}
363
364/**
365 * Gather affected IO requests and task management commands.
366 */
367static void
368bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
369{
370 struct bfa_itnim_s *itnim = tskim->itnim;
371 struct bfa_ioim_s *ioim;
372 struct list_head *qe, *qen;
373
374 INIT_LIST_HEAD(&tskim->io_q);
375
376 /**
377 * Gather any active IO requests first.
378 */
379 list_for_each_safe(qe, qen, &itnim->io_q) {
380 ioim = (struct bfa_ioim_s *) qe;
381 if (bfa_tskim_match_scope
382 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
383 list_del(&ioim->qe);
384 list_add_tail(&ioim->qe, &tskim->io_q);
385 }
386 }
387
388 /**
389 * Failback any pending IO requests immediately.
390 */
391 list_for_each_safe(qe, qen, &itnim->pending_q) {
392 ioim = (struct bfa_ioim_s *) qe;
393 if (bfa_tskim_match_scope
394 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
395 list_del(&ioim->qe);
396 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
397 bfa_ioim_tov(ioim);
398 }
399 }
400}
401
402/**
403 * IO cleanup completion
404 */
405static void
406bfa_tskim_cleanp_comp(void *tskim_cbarg)
407{
408 struct bfa_tskim_s *tskim = tskim_cbarg;
409
410 bfa_stats(tskim->itnim, tm_io_comps);
411 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
412}
413
414/**
415 * Gather affected IO requests and task management commands.
416 */
417static void
418bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
419{
420 struct bfa_ioim_s *ioim;
421 struct list_head *qe, *qen;
422
423 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
424
425 list_for_each_safe(qe, qen, &tskim->io_q) {
426 ioim = (struct bfa_ioim_s *) qe;
427 bfa_wc_up(&tskim->wc);
428 bfa_ioim_cleanup_tm(ioim, tskim);
429 }
430
431 bfa_wc_wait(&tskim->wc);
432}
433
434/**
435 * Send task management request to firmware.
436 */
437static bfa_boolean_t
438bfa_tskim_send(struct bfa_tskim_s *tskim)
439{
440 struct bfa_itnim_s *itnim = tskim->itnim;
441 struct bfi_tskim_req_s *m;
442
443 /**
444 * check for room in queue to send request now
445 */
446 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
447 if (!m)
448 return BFA_FALSE;
449
450 /**
451 * build i/o request message next
452 */
453 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
454 bfa_lpuid(tskim->bfa));
455
456 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
457 m->itn_fhdl = tskim->itnim->rport->fw_handle;
458 m->t_secs = tskim->tsecs;
459 m->lun = tskim->lun;
460 m->tm_flags = tskim->tm_cmnd;
461
462 /**
463 * queue I/O message to firmware
464 */
465 bfa_reqq_produce(tskim->bfa, itnim->reqq);
466 return BFA_TRUE;
467}
468
469/**
470 * Send abort request to cleanup an active TM to firmware.
471 */
472static bfa_boolean_t
473bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
474{
475 struct bfa_itnim_s *itnim = tskim->itnim;
476 struct bfi_tskim_abortreq_s *m;
477
478 /**
479 * check for room in queue to send request now
480 */
481 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
482 if (!m)
483 return BFA_FALSE;
484
485 /**
486 * build i/o request message next
487 */
488 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
489 bfa_lpuid(tskim->bfa));
490
491 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
492
493 /**
494 * queue I/O message to firmware
495 */
496 bfa_reqq_produce(tskim->bfa, itnim->reqq);
497 return BFA_TRUE;
498}
499
500/**
501 * Call to resume task management cmnd waiting for room in request queue.
502 */
503static void
504bfa_tskim_qresume(void *cbarg)
505{
506 struct bfa_tskim_s *tskim = cbarg;
507
508 bfa_fcpim_stats(tskim->fcpim, qresumes);
509 bfa_stats(tskim->itnim, tm_qresumes);
510 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
511}
512
513/**
514 * Cleanup IOs associated with a task mangement command on IOC failures.
515 */
516static void
517bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
518{
519 struct bfa_ioim_s *ioim;
520 struct list_head *qe, *qen;
521
522 list_for_each_safe(qe, qen, &tskim->io_q) {
523 ioim = (struct bfa_ioim_s *) qe;
524 bfa_ioim_iocdisable(ioim);
525 }
526}
527
528
529
530/**
531 * bfa_tskim_friend
532 */
533
534/**
535 * Notification on completions from related ioim.
536 */
537void
538bfa_tskim_iodone(struct bfa_tskim_s *tskim)
539{
540 bfa_wc_down(&tskim->wc);
541}
542
543/**
544 * Handle IOC h/w failure notification from itnim.
545 */
546void
547bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
548{
549 tskim->notify = BFA_FALSE;
550 bfa_stats(tskim->itnim, tm_iocdowns);
551 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
552}
553
554/**
555 * Cleanup TM command and associated IOs as part of ITNIM offline.
556 */
557void
558bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
559{
560 tskim->notify = BFA_TRUE;
561 bfa_stats(tskim->itnim, tm_cleanups);
562 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
563}
564
565/**
566 * Memory allocation and initialization.
567 */
568void
569bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
570{
571 struct bfa_tskim_s *tskim;
572 u16 i;
573
574 INIT_LIST_HEAD(&fcpim->tskim_free_q);
575
576 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
577 fcpim->tskim_arr = tskim;
578
579 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
580 /*
581 * initialize TSKIM
582 */
583 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
584 tskim->tsk_tag = i;
585 tskim->bfa = fcpim->bfa;
586 tskim->fcpim = fcpim;
587 tskim->notify = BFA_FALSE;
588 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
589 tskim);
590 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
591
592 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
593 }
594
595 bfa_meminfo_kva(minfo) = (u8 *) tskim;
596}
597
598void
599bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
600{
601 /**
602 * @todo
603 */
604}
605
606void
607bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
608{
609 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
610 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
611 struct bfa_tskim_s *tskim;
612 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
613
614 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
615 bfa_assert(tskim->tsk_tag == tsk_tag);
616
617 tskim->tsk_status = rsp->tsk_status;
618
619 /**
620 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
621 * requests. All other statuses are for normal completions.
622 */
623 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
624 bfa_stats(tskim->itnim, tm_cleanup_comps);
625 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
626 } else {
627 bfa_stats(tskim->itnim, tm_fw_rsps);
628 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
629 }
630}
631
632
633
634/**
635 * bfa_tskim_api
636 */
637
638
639struct bfa_tskim_s *
640bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
641{
642 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
643 struct bfa_tskim_s *tskim;
644
645 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
646
647 if (!tskim)
648 bfa_fcpim_stats(fcpim, no_tskims);
649 else
650 tskim->dtsk = dtsk;
651
652 return tskim;
653}
654
655void
656bfa_tskim_free(struct bfa_tskim_s *tskim)
657{
658 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
659 list_del(&tskim->qe);
660 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
661}
662
663/**
664 * Start a task management command.
665 *
666 * @param[in] tskim BFA task management command instance
667 * @param[in] itnim i-t nexus for the task management command
668 * @param[in] lun lun, if applicable
669 * @param[in] tm_cmnd Task management command code.
670 * @param[in] t_secs Timeout in seconds
671 *
672 * @return None.
673 */
674void
675bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
676 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
677{
678 tskim->itnim = itnim;
679 tskim->lun = lun;
680 tskim->tm_cmnd = tm_cmnd;
681 tskim->tsecs = tsecs;
682 tskim->notify = BFA_FALSE;
683 bfa_stats(itnim, tm_cmnds);
684
685 list_add_tail(&tskim->qe, &itnim->tsk_q);
686 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
687}
688
689
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
new file mode 100644
index 000000000000..ff5f9deb1b22
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_uf.c
@@ -0,0 +1,345 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_uf.c BFA unsolicited frame receive implementation
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_uf.h>
25#include <cs/bfa_debug.h>
26
27BFA_TRC_FILE(HAL, UF);
28BFA_MODULE(uf);
29
30/*
31 *****************************************************************************
32 * Internal functions
33 *****************************************************************************
34 */
35static void
36__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
37{
38 struct bfa_uf_s *uf = cbarg;
39 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
40
41 if (complete)
42 ufm->ufrecv(ufm->cbarg, uf);
43}
44
45static void
46claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
47{
48 u32 uf_pb_tot_sz;
49
50 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
51 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
52 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
53 BFA_DMA_ALIGN_SZ);
54
55 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
56 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
57
58 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
59}
60
61static void
62claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
63{
64 struct bfi_uf_buf_post_s *uf_bp_msg;
65 struct bfi_sge_s *sge;
66 union bfi_addr_u sga_zero = { {0} };
67 u16 i;
68 u16 buf_len;
69
70 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
71 uf_bp_msg = ufm->uf_buf_posts;
72
73 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
74 i++, uf_bp_msg++) {
75 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
76
77 uf_bp_msg->buf_tag = i;
78 buf_len = sizeof(struct bfa_uf_buf_s);
79 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
80 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
81 bfa_lpuid(ufm->bfa));
82
83 sge = uf_bp_msg->sge;
84 sge[0].sg_len = buf_len;
85 sge[0].flags = BFI_SGE_DATA_LAST;
86 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
87 bfa_sge_to_be(sge);
88
89 sge[1].sg_len = buf_len;
90 sge[1].flags = BFI_SGE_PGDLEN;
91 sge[1].sga = sga_zero;
92 bfa_sge_to_be(&sge[1]);
93 }
94
95 /**
96 * advance pointer beyond consumed memory
97 */
98 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
99}
100
101static void
102claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
103{
104 u16 i;
105 struct bfa_uf_s *uf;
106
107 /*
108 * Claim block of memory for UF list
109 */
110 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
111
112 /*
113 * Initialize UFs and queue it in UF free queue
114 */
115 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
116 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
117 uf->bfa = ufm->bfa;
118 uf->uf_tag = i;
119 uf->pb_len = sizeof(struct bfa_uf_buf_s);
120 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
121 uf->buf_pa = ufm_pbs_pa(ufm, i);
122 list_add_tail(&uf->qe, &ufm->uf_free_q);
123 }
124
125 /**
126 * advance memory pointer
127 */
128 bfa_meminfo_kva(mi) = (u8 *) uf;
129}
130
131static void
132uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
133{
134 claim_uf_pbs(ufm, mi);
135 claim_ufs(ufm, mi);
136 claim_uf_post_msgs(ufm, mi);
137}
138
139static void
140bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
141{
142 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
143
144 /*
145 * dma-able memory for UF posted bufs
146 */
147 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
148 BFA_DMA_ALIGN_SZ);
149
150 /*
151 * kernel Virtual memory for UFs and UF buf post msg copies
152 */
153 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
154 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
155}
156
157static void
158bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
159 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
160{
161 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
162
163 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
164 ufm->bfa = bfa;
165 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
166 INIT_LIST_HEAD(&ufm->uf_free_q);
167 INIT_LIST_HEAD(&ufm->uf_posted_q);
168
169 uf_mem_claim(ufm, meminfo);
170}
171
172static void
173bfa_uf_initdone(struct bfa_s *bfa)
174{
175}
176
177static void
178bfa_uf_detach(struct bfa_s *bfa)
179{
180}
181
182static struct bfa_uf_s *
183bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
184{
185 struct bfa_uf_s *uf;
186
187 bfa_q_deq(&uf_mod->uf_free_q, &uf);
188 return (uf);
189}
190
191static void
192bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
193{
194 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
195}
196
197static bfa_status_t
198bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
199{
200 struct bfi_uf_buf_post_s *uf_post_msg;
201
202 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
203 if (!uf_post_msg)
204 return BFA_STATUS_FAILED;
205
206 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
207 sizeof(struct bfi_uf_buf_post_s));
208 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
209
210 bfa_trc(ufm->bfa, uf->uf_tag);
211
212 list_add_tail(&uf->qe, &ufm->uf_posted_q);
213 return BFA_STATUS_OK;
214}
215
216static void
217bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
218{
219 struct bfa_uf_s *uf;
220
221 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
222 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
223 break;
224 }
225}
226
227static void
228uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
229{
230 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
231 u16 uf_tag = m->buf_tag;
232 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
233 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
234 u8 *buf = &uf_buf->d[0];
235 struct fchs_s *fchs;
236
237 m->frm_len = bfa_os_ntohs(m->frm_len);
238 m->xfr_len = bfa_os_ntohs(m->xfr_len);
239
240 fchs = (struct fchs_s *) uf_buf;
241
242 list_del(&uf->qe); /* dequeue from posted queue */
243
244 uf->data_ptr = buf;
245 uf->data_len = m->xfr_len;
246
247 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
248
249 if (uf->data_len == sizeof(struct fchs_s)) {
250 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
251 uf->data_len, (struct fchs_s *) buf);
252 } else {
253 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
254 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
255 BFA_PL_EID_RX, uf->data_len,
256 (struct fchs_s *) buf, pld_w0);
257 }
258
259 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
260}
261
262static void
263bfa_uf_stop(struct bfa_s *bfa)
264{
265}
266
267static void
268bfa_uf_iocdisable(struct bfa_s *bfa)
269{
270 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
271 struct bfa_uf_s *uf;
272 struct list_head *qe, *qen;
273
274 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
275 uf = (struct bfa_uf_s *) qe;
276 list_del(&uf->qe);
277 bfa_uf_put(ufm, uf);
278 }
279}
280
281static void
282bfa_uf_start(struct bfa_s *bfa)
283{
284 bfa_uf_post_all(BFA_UF_MOD(bfa));
285}
286
287
288
289/**
290 * bfa_uf_api
291 */
292
293/**
294 * Register handler for all unsolicted recieve frames.
295 *
296 * @param[in] bfa BFA instance
297 * @param[in] ufrecv receive handler function
298 * @param[in] cbarg receive handler arg
299 */
300void
301bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
302{
303 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
304
305 ufm->ufrecv = ufrecv;
306 ufm->cbarg = cbarg;
307}
308
309/**
310 * Free an unsolicited frame back to BFA.
311 *
312 * @param[in] uf unsolicited frame to be freed
313 *
314 * @return None
315 */
316void
317bfa_uf_free(struct bfa_uf_s *uf)
318{
319 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
320 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
321}
322
323
324
325/**
326 * uf_pub BFA uf module public functions
327 */
328
329void
330bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
331{
332 bfa_trc(bfa, msg->mhdr.msg_id);
333
334 switch (msg->mhdr.msg_id) {
335 case BFI_UF_I2H_FRM_RCVD:
336 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
337 break;
338
339 default:
340 bfa_trc(bfa, msg->mhdr.msg_id);
341 bfa_assert(0);
342 }
343}
344
345
diff --git a/drivers/scsi/bfa/bfa_uf_priv.h b/drivers/scsi/bfa/bfa_uf_priv.h
new file mode 100644
index 000000000000..bcb490f834f3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_uf_priv.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_UF_PRIV_H__
18#define __BFA_UF_PRIV_H__
19
20#include <cs/bfa_sm.h>
21#include <bfa_svc.h>
22#include <bfi/bfi_uf.h>
23
24#define BFA_UF_MIN (4)
25
26struct bfa_uf_mod_s {
27 struct bfa_s *bfa; /* back pointer to BFA */
28 struct bfa_uf_s *uf_list; /* array of UFs */
29 u16 num_ufs; /* num unsolicited rx frames */
30 struct list_head uf_free_q; /* free UFs */
31 struct list_head uf_posted_q; /* UFs posted to IOC */
32 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
33 u64 uf_pbs_pa; /* phy addr for UF bufs */
34 struct bfi_uf_buf_post_s *uf_buf_posts;
35 /* pre-built UF post msgs */
36 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
37 void *cbarg; /* uf receive handler arg */
38};
39
40#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
41
42#define ufm_pbs_pa(_ufmod, _uftag) \
43 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
44
45void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
46
47#endif /* __BFA_UF_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
new file mode 100644
index 000000000000..6f2be5abf561
--- /dev/null
+++ b/drivers/scsi/bfa/bfad.c
@@ -0,0 +1,1182 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad.c Linux driver PCI interface module.
20 */
21
22#include <linux/module.h>
23#include "bfad_drv.h"
24#include "bfad_im.h"
25#include "bfad_tm.h"
26#include "bfad_ipfc.h"
27#include "bfad_trcmod.h"
28#include <fcb/bfa_fcb_vf.h>
29#include <fcb/bfa_fcb_rport.h>
30#include <fcb/bfa_fcb_port.h>
31#include <fcb/bfa_fcb.h>
32
33BFA_TRC_FILE(LDRV, BFAD);
34static DEFINE_MUTEX(bfad_mutex);
35LIST_HEAD(bfad_list);
36static int bfad_inst;
37int bfad_supported_fc4s;
38
39static char *host_name;
40static char *os_name;
41static char *os_patch;
42static int num_rports;
43static int num_ios;
44static int num_tms;
45static int num_fcxps;
46static int num_ufbufs;
47static int reqq_size;
48static int rspq_size;
49static int num_sgpgs;
50static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
51static int bfa_io_max_sge = BFAD_IO_MAX_SGE;
52static int log_level = BFA_LOG_WARNING;
53static int ioc_auto_recover = BFA_TRUE;
54static int ipfc_enable = BFA_FALSE;
55static int ipfc_mtu = -1;
56int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
57int bfa_linkup_delay = -1;
58
59module_param(os_name, charp, S_IRUGO | S_IWUSR);
60module_param(os_patch, charp, S_IRUGO | S_IWUSR);
61module_param(host_name, charp, S_IRUGO | S_IWUSR);
62module_param(num_rports, int, S_IRUGO | S_IWUSR);
63module_param(num_ios, int, S_IRUGO | S_IWUSR);
64module_param(num_tms, int, S_IRUGO | S_IWUSR);
65module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
66module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
67module_param(reqq_size, int, S_IRUGO | S_IWUSR);
68module_param(rspq_size, int, S_IRUGO | S_IWUSR);
69module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
70module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
71module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
72module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
73module_param(log_level, int, S_IRUGO | S_IWUSR);
74module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
75module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
76module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
77module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
78
79/*
80 * Stores the module parm num_sgpgs value;
81 * used to reset for bfad next instance.
82 */
83static int num_sgpgs_parm;
84
85static bfa_status_t
86bfad_fc4_probe(struct bfad_s *bfad)
87{
88 int rc;
89
90 rc = bfad_im_probe(bfad);
91 if (rc != BFA_STATUS_OK)
92 goto ext;
93
94 bfad_tm_probe(bfad);
95
96 if (ipfc_enable)
97 bfad_ipfc_probe(bfad);
98ext:
99 return rc;
100}
101
102static void
103bfad_fc4_probe_undo(struct bfad_s *bfad)
104{
105 bfad_im_probe_undo(bfad);
106 bfad_tm_probe_undo(bfad);
107 if (ipfc_enable)
108 bfad_ipfc_probe_undo(bfad);
109}
110
111static void
112bfad_fc4_probe_post(struct bfad_s *bfad)
113{
114 if (bfad->im)
115 bfad_im_probe_post(bfad->im);
116
117 bfad_tm_probe_post(bfad);
118 if (ipfc_enable)
119 bfad_ipfc_probe_post(bfad);
120}
121
122static bfa_status_t
123bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles)
124{
125 int rc = BFA_STATUS_FAILED;
126
127 if (roles & BFA_PORT_ROLE_FCP_IM)
128 rc = bfad_im_port_new(bfad, port);
129 if (rc != BFA_STATUS_OK)
130 goto ext;
131
132 if (roles & BFA_PORT_ROLE_FCP_TM)
133 rc = bfad_tm_port_new(bfad, port);
134 if (rc != BFA_STATUS_OK)
135 goto ext;
136
137 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
138 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type);
139ext:
140 return rc;
141}
142
143static void
144bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles)
145{
146 if (roles & BFA_PORT_ROLE_FCP_IM)
147 bfad_im_port_delete(bfad, port);
148
149 if (roles & BFA_PORT_ROLE_FCP_TM)
150 bfad_tm_port_delete(bfad, port);
151
152 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
153 bfad_ipfc_port_delete(bfad, port);
154}
155
156/**
157 * BFA callbacks
158 */
159void
160bfad_hcb_comp(void *arg, bfa_status_t status)
161{
162 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
163
164 fcomp->status = status;
165 complete(&fcomp->comp);
166}
167
168/**
169 * bfa_init callback
170 */
171void
172bfa_cb_init(void *drv, bfa_status_t init_status)
173{
174 struct bfad_s *bfad = drv;
175
176 if (init_status == BFA_STATUS_OK)
177 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
178
179 complete(&bfad->comp);
180}
181
182
183
184/**
185 * BFA_FCS callbacks
186 */
187static struct bfad_port_s *
188bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv,
189 struct bfad_vport_s *vp_drv)
190{
191 return ((vp_drv) ? (&(vp_drv)->drv_port)
192 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)));
193}
194
195struct bfad_port_s *
196bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port,
197 enum bfa_port_role roles, struct bfad_vf_s *vf_drv,
198 struct bfad_vport_s *vp_drv)
199{
200 bfa_status_t rc;
201 struct bfad_port_s *port_drv;
202
203 if (!vp_drv && !vf_drv) {
204 port_drv = &bfad->pport;
205 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
206 } else if (!vp_drv && vf_drv) {
207 port_drv = &vf_drv->base_port;
208 port_drv->pvb_type = BFAD_PORT_VF_BASE;
209 } else if (vp_drv && !vf_drv) {
210 port_drv = &vp_drv->drv_port;
211 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
212 } else {
213 port_drv = &vp_drv->drv_port;
214 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
215 }
216
217 port_drv->fcs_port = port;
218 port_drv->roles = roles;
219 rc = bfad_fc4_port_new(bfad, port_drv, roles);
220 if (rc != BFA_STATUS_OK) {
221 bfad_fc4_port_delete(bfad, port_drv, roles);
222 port_drv = NULL;
223 }
224
225 return port_drv;
226}
227
228void
229bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles,
230 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
231{
232 struct bfad_port_s *port_drv;
233
234 /*
235 * this will be only called from rmmod context
236 */
237 if (vp_drv && !vp_drv->comp_del) {
238 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
239 bfa_trc(bfad, roles);
240 bfad_fc4_port_delete(bfad, port_drv, roles);
241 }
242}
243
244void
245bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
246 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
247{
248 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
249
250 if (roles & BFA_PORT_ROLE_FCP_IM)
251 bfad_im_port_online(bfad, port_drv);
252
253 if (roles & BFA_PORT_ROLE_FCP_TM)
254 bfad_tm_port_online(bfad, port_drv);
255
256 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
257 bfad_ipfc_port_online(bfad, port_drv);
258
259 bfad->bfad_flags |= BFAD_PORT_ONLINE;
260}
261
262void
263bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
264 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
265{
266 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
267
268 if (roles & BFA_PORT_ROLE_FCP_IM)
269 bfad_im_port_offline(bfad, port_drv);
270
271 if (roles & BFA_PORT_ROLE_FCP_TM)
272 bfad_tm_port_offline(bfad, port_drv);
273
274 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
275 bfad_ipfc_port_offline(bfad, port_drv);
276}
277
278void
279bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
280{
281 if (vport_drv->comp_del) {
282 complete(vport_drv->comp_del);
283 return;
284 }
285
286 kfree(vport_drv);
287}
288
289/**
290 * FCS RPORT alloc callback, after successful PLOGI by FCS
291 */
292bfa_status_t
293bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
294 struct bfad_rport_s **rport_drv)
295{
296 bfa_status_t rc = BFA_STATUS_OK;
297
298 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
299 if (*rport_drv == NULL) {
300 rc = BFA_STATUS_ENOMEM;
301 goto ext;
302 }
303
304 *rport = &(*rport_drv)->fcs_rport;
305
306ext:
307 return rc;
308}
309
310
311
312void
313bfad_hal_mem_release(struct bfad_s *bfad)
314{
315 int i;
316 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
317 struct bfa_mem_elem_s *meminfo_elem;
318
319 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
320 meminfo_elem = &hal_meminfo->meminfo[i];
321 if (meminfo_elem->kva != NULL) {
322 switch (meminfo_elem->mem_type) {
323 case BFA_MEM_TYPE_KVA:
324 vfree(meminfo_elem->kva);
325 break;
326 case BFA_MEM_TYPE_DMA:
327 dma_free_coherent(&bfad->pcidev->dev,
328 meminfo_elem->mem_len,
329 meminfo_elem->kva,
330 (dma_addr_t) meminfo_elem->dma);
331 break;
332 default:
333 bfa_assert(0);
334 break;
335 }
336 }
337 }
338
339 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
340}
341
342void
343bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
344{
345 if (num_rports > 0)
346 bfa_cfg->fwcfg.num_rports = num_rports;
347 if (num_ios > 0)
348 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
349 if (num_tms > 0)
350 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
351 if (num_fcxps > 0)
352 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
353 if (num_ufbufs > 0)
354 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
355 if (reqq_size > 0)
356 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
357 if (rspq_size > 0)
358 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
359 if (num_sgpgs > 0)
360 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
361
362 /*
363 * populate the hal values back to the driver for sysfs use.
364 * otherwise, the default values will be shown as 0 in sysfs
365 */
366 num_rports = bfa_cfg->fwcfg.num_rports;
367 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
368 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
369 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
370 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
371 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
372 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
373 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
374}
375
376bfa_status_t
377bfad_hal_mem_alloc(struct bfad_s *bfad)
378{
379 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
380 struct bfa_mem_elem_s *meminfo_elem;
381 bfa_status_t rc = BFA_STATUS_OK;
382 dma_addr_t phys_addr;
383 int retry_count = 0;
384 int reset_value = 1;
385 int min_num_sgpgs = 512;
386 void *kva;
387 int i;
388
389 bfa_cfg_get_default(&bfad->ioc_cfg);
390
391retry:
392 bfad_update_hal_cfg(&bfad->ioc_cfg);
393 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
394 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
395
396 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
397 meminfo_elem = &hal_meminfo->meminfo[i];
398 switch (meminfo_elem->mem_type) {
399 case BFA_MEM_TYPE_KVA:
400 kva = vmalloc(meminfo_elem->mem_len);
401 if (kva == NULL) {
402 bfad_hal_mem_release(bfad);
403 rc = BFA_STATUS_ENOMEM;
404 goto ext;
405 }
406 memset(kva, 0, meminfo_elem->mem_len);
407 meminfo_elem->kva = kva;
408 break;
409 case BFA_MEM_TYPE_DMA:
410 kva = dma_alloc_coherent(&bfad->pcidev->dev,
411 meminfo_elem->mem_len,
412 &phys_addr, GFP_KERNEL);
413 if (kva == NULL) {
414 bfad_hal_mem_release(bfad);
415 /*
416 * If we cannot allocate with default
417 * num_sgpages try with half the value.
418 */
419 if (num_sgpgs > min_num_sgpgs) {
420 printk(KERN_INFO "bfad[%d]: memory"
421 " allocation failed with"
422 " num_sgpgs: %d\n",
423 bfad->inst_no, num_sgpgs);
424 nextLowerInt(&num_sgpgs);
425 printk(KERN_INFO "bfad[%d]: trying to"
426 " allocate memory with"
427 " num_sgpgs: %d\n",
428 bfad->inst_no, num_sgpgs);
429 retry_count++;
430 goto retry;
431 } else {
432 if (num_sgpgs_parm > 0)
433 num_sgpgs = num_sgpgs_parm;
434 else {
435 reset_value =
436 (1 << retry_count);
437 num_sgpgs *= reset_value;
438 }
439 rc = BFA_STATUS_ENOMEM;
440 goto ext;
441 }
442 }
443
444 if (num_sgpgs_parm > 0)
445 num_sgpgs = num_sgpgs_parm;
446 else {
447 reset_value = (1 << retry_count);
448 num_sgpgs *= reset_value;
449 }
450
451 memset(kva, 0, meminfo_elem->mem_len);
452 meminfo_elem->kva = kva;
453 meminfo_elem->dma = phys_addr;
454 break;
455 default:
456 break;
457
458 }
459 }
460ext:
461 return rc;
462}
463
464/**
465 * Create a vport under a vf.
466 */
467bfa_status_t
468bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
469 struct bfa_port_cfg_s *port_cfg)
470{
471 struct bfad_vport_s *vport;
472 int rc = BFA_STATUS_OK;
473 unsigned long flags;
474 struct completion fcomp;
475
476 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
477 if (!vport) {
478 rc = BFA_STATUS_ENOMEM;
479 goto ext;
480 }
481
482 vport->drv_port.bfad = bfad;
483 spin_lock_irqsave(&bfad->bfad_lock, flags);
484 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
485 port_cfg, vport);
486 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
487
488 if (rc != BFA_STATUS_OK)
489 goto ext_free_vport;
490
491 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) {
492 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port);
493 if (rc != BFA_STATUS_OK)
494 goto ext_free_fcs_vport;
495 }
496
497 spin_lock_irqsave(&bfad->bfad_lock, flags);
498 bfa_fcs_vport_start(&vport->fcs_vport);
499 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
500
501 return BFA_STATUS_OK;
502
503ext_free_fcs_vport:
504 spin_lock_irqsave(&bfad->bfad_lock, flags);
505 vport->comp_del = &fcomp;
506 init_completion(vport->comp_del);
507 bfa_fcs_vport_delete(&vport->fcs_vport);
508 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
509 wait_for_completion(vport->comp_del);
510ext_free_vport:
511 kfree(vport);
512ext:
513 return rc;
514}
515
516/**
517 * Create a vf and its base vport implicitely.
518 */
519bfa_status_t
520bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
521 struct bfa_port_cfg_s *port_cfg)
522{
523 struct bfad_vf_s *vf;
524 int rc = BFA_STATUS_OK;
525
526 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
527 if (!vf) {
528 rc = BFA_STATUS_FAILED;
529 goto ext;
530 }
531
532 rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
533 vf);
534 if (rc != BFA_STATUS_OK)
535 kfree(vf);
536ext:
537 return rc;
538}
539
540void
541bfad_bfa_tmo(unsigned long data)
542{
543 struct bfad_s *bfad = (struct bfad_s *)data;
544 unsigned long flags;
545 struct list_head doneq;
546
547 spin_lock_irqsave(&bfad->bfad_lock, flags);
548
549 bfa_timer_tick(&bfad->bfa);
550
551 bfa_comp_deq(&bfad->bfa, &doneq);
552 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
553
554 if (!list_empty(&doneq)) {
555 bfa_comp_process(&bfad->bfa, &doneq);
556 spin_lock_irqsave(&bfad->bfad_lock, flags);
557 bfa_comp_free(&bfad->bfa, &doneq);
558 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
559 }
560
561 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
562}
563
564void
565bfad_init_timer(struct bfad_s *bfad)
566{
567 init_timer(&bfad->hal_tmo);
568 bfad->hal_tmo.function = bfad_bfa_tmo;
569 bfad->hal_tmo.data = (unsigned long)bfad;
570
571 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
572}
573
574int
575bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
576{
577 unsigned long bar0_len;
578 int rc = -ENODEV;
579
580 if (pci_enable_device(pdev)) {
581 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev);
582 goto out;
583 }
584
585 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
586 goto out_disable_device;
587
588 pci_set_master(pdev);
589
590
591 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
592 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
593 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev);
594 goto out_release_region;
595 }
596
597 bfad->pci_bar0_map = pci_resource_start(pdev, 0);
598 bar0_len = pci_resource_len(pdev, 0);
599 bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);
600
601 if (bfad->pci_bar0_kva == NULL) {
602 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
603 goto out_release_region;
604 }
605
606 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
607 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
608 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
609 bfad->hal_pcidev.device_id = pdev->device;
610 bfad->pci_name = pci_name(pdev);
611
612 bfad->pci_attr.vendor_id = pdev->vendor;
613 bfad->pci_attr.device_id = pdev->device;
614 bfad->pci_attr.ssid = pdev->subsystem_device;
615 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
616 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
617
618 bfad->pcidev = pdev;
619 return 0;
620
621out_release_region:
622 pci_release_regions(pdev);
623out_disable_device:
624 pci_disable_device(pdev);
625out:
626 return rc;
627}
628
629void
630bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
631{
632#if defined(__ia64__)
633 pci_iounmap(pdev, bfad->pci_bar0_kva);
634#else
635 iounmap(bfad->pci_bar0_kva);
636#endif
637 pci_release_regions(pdev);
638 pci_disable_device(pdev);
639 pci_set_drvdata(pdev, NULL);
640}
641
642void
643bfad_fcs_port_cfg(struct bfad_s *bfad)
644{
645 struct bfa_port_cfg_s port_cfg;
646 struct bfa_pport_attr_s attr;
647 char symname[BFA_SYMNAME_MAXLEN];
648
649 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
650 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
651 bfa_pport_get_attr(&bfad->bfa, &attr);
652 port_cfg.nwwn = attr.nwwn;
653 port_cfg.pwwn = attr.pwwn;
654
655 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg);
656}
657
658bfa_status_t
659bfad_drv_init(struct bfad_s *bfad)
660{
661 bfa_status_t rc;
662 unsigned long flags;
663 struct bfa_fcs_driver_info_s driver_info;
664 int i;
665
666 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
667 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
668 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
669 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
670
671 rc = bfad_hal_mem_alloc(bfad);
672 if (rc != BFA_STATUS_OK) {
673 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
674 bfad->inst_no);
675 printk(KERN_WARNING
676 "Not enough memory to attach all Brocade HBA ports,"
677 " System may need more memory.\n");
678 goto out_hal_mem_alloc_failure;
679 }
680
681 bfa_init_log(&bfad->bfa, bfad->logmod);
682 bfa_init_trc(&bfad->bfa, bfad->trcmod);
683 bfa_init_aen(&bfad->bfa, bfad->aen);
684 INIT_LIST_HEAD(&bfad->file_q);
685 INIT_LIST_HEAD(&bfad->file_free_q);
686 for (i = 0; i < BFAD_AEN_MAX_APPS; i++) {
687 bfa_q_qe_init(&bfad->file_buf[i].qe);
688 list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q);
689 }
690 bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
691 bfa_plog_init(&bfad->plog_buf);
692 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
693 0, "Driver Attach");
694
695 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
696 &bfad->hal_pcidev);
697
698 init_completion(&bfad->comp);
699
700 /*
701 * Enable Interrupt and wait bfa_init completion
702 */
703 if (bfad_setup_intr(bfad)) {
704 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
705 bfad->inst_no);
706 goto out_setup_intr_failure;
707 }
708
709 spin_lock_irqsave(&bfad->bfad_lock, flags);
710 bfa_init(&bfad->bfa);
711 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
712
713 /*
714 * Set up interrupt handler for each vectors
715 */
716 if ((bfad->bfad_flags & BFAD_MSIX_ON)
717 && bfad_install_msix_handler(bfad)) {
718 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
719 __FUNCTION__, bfad->inst_no);
720 }
721
722 bfad_init_timer(bfad);
723
724 wait_for_completion(&bfad->comp);
725
726 memset(&driver_info, 0, sizeof(driver_info));
727 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
728 sizeof(driver_info.version) - 1);
729 if (host_name)
730 strncpy(driver_info.host_machine_name, host_name,
731 sizeof(driver_info.host_machine_name) - 1);
732 if (os_name)
733 strncpy(driver_info.host_os_name, os_name,
734 sizeof(driver_info.host_os_name) - 1);
735 if (os_patch)
736 strncpy(driver_info.host_os_patch, os_patch,
737 sizeof(driver_info.host_os_patch) - 1);
738
739 strncpy(driver_info.os_device_name, bfad->pci_name,
740 sizeof(driver_info.os_device_name - 1));
741
742 /*
743 * FCS INIT
744 */
745 spin_lock_irqsave(&bfad->bfad_lock, flags);
746 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
747 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
748 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
749 bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
750 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
751 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
752
753 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
754 return BFA_STATUS_OK;
755
756out_setup_intr_failure:
757 bfa_detach(&bfad->bfa);
758 bfad_hal_mem_release(bfad);
759out_hal_mem_alloc_failure:
760 return BFA_STATUS_FAILED;
761}
762
763void
764bfad_drv_uninit(struct bfad_s *bfad)
765{
766 del_timer_sync(&bfad->hal_tmo);
767 bfa_isr_disable(&bfad->bfa);
768 bfa_detach(&bfad->bfa);
769 bfad_remove_intr(bfad);
770 bfa_assert(list_empty(&bfad->file_q));
771 bfad_hal_mem_release(bfad);
772}
773
774void
775bfad_drv_start(struct bfad_s *bfad)
776{
777 unsigned long flags;
778
779 spin_lock_irqsave(&bfad->bfad_lock, flags);
780 bfa_start(&bfad->bfa);
781 bfa_fcs_start(&bfad->bfa_fcs);
782 bfad->bfad_flags |= BFAD_HAL_START_DONE;
783 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
784
785 bfad_fc4_probe_post(bfad);
786}
787
788void
789bfad_drv_stop(struct bfad_s *bfad)
790{
791 unsigned long flags;
792
793 spin_lock_irqsave(&bfad->bfad_lock, flags);
794 init_completion(&bfad->comp);
795 bfad->pport.flags |= BFAD_PORT_DELETE;
796 bfa_fcs_exit(&bfad->bfa_fcs);
797 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
798 wait_for_completion(&bfad->comp);
799
800 spin_lock_irqsave(&bfad->bfad_lock, flags);
801 init_completion(&bfad->comp);
802 bfa_stop(&bfad->bfa);
803 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
804 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
805 wait_for_completion(&bfad->comp);
806}
807
808bfa_status_t
809bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
810{
811 int rc = BFA_STATUS_OK;
812
813 /*
814 * Allocate scsi_host for the physical port
815 */
816 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
817 && (role & BFA_PORT_ROLE_FCP_IM)) {
818 if (bfad->pport.im_port == NULL) {
819 rc = BFA_STATUS_FAILED;
820 goto out;
821 }
822
823 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port);
824 if (rc != BFA_STATUS_OK)
825 goto out;
826
827 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM;
828 }
829
830 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
831
832out:
833 return rc;
834}
835
836void
837bfad_uncfg_pport(struct bfad_s *bfad)
838{
839 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) {
840 bfad_ipfc_port_delete(bfad, &bfad->pport);
841 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
842 }
843
844 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
845 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) {
846 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
847 bfad_im_port_clean(bfad->pport.im_port);
848 kfree(bfad->pport.im_port);
849 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM;
850 }
851
852 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
853}
854
855void
856bfad_drv_log_level_set(struct bfad_s *bfad)
857{
858 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX)
859 bfa_log_set_level_all(&bfad->log_data, log_level);
860}
861
862 /*
863 * PCI_entry PCI driver entries * {
864 */
865
866/**
867 * PCI probe entry.
868 */
869int
870bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
871{
872 struct bfad_s *bfad;
873 int error = -ENODEV, retval;
874 char buf[16];
875
876 /*
877 * For single port cards - only claim function 0
878 */
879 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P)
880 && (PCI_FUNC(pdev->devfn) != 0))
881 return -ENODEV;
882
883 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry");
884
885 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
886 if (!bfad) {
887 error = -ENOMEM;
888 goto out;
889 }
890
891 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
892 if (!bfad->trcmod) {
893 printk(KERN_WARNING "Error alloc trace buffer!\n");
894 error = -ENOMEM;
895 goto out_alloc_trace_failure;
896 }
897
898 /*
899 * LOG/TRACE INIT
900 */
901 bfa_trc_init(bfad->trcmod);
902 bfa_trc(bfad, bfad_inst);
903
904 bfad->logmod = &bfad->log_data;
905 sprintf(buf, "%d", bfad_inst);
906 bfa_log_init(bfad->logmod, buf, bfa_os_printf);
907
908 bfad_drv_log_level_set(bfad);
909
910 bfad->aen = &bfad->aen_buf;
911
912 if (!(bfad_load_fwimg(pdev))) {
913 printk(KERN_WARNING "bfad_load_fwimg failure!\n");
914 kfree(bfad->trcmod);
915 goto out_alloc_trace_failure;
916 }
917
918 retval = bfad_pci_init(pdev, bfad);
919 if (retval) {
920 printk(KERN_WARNING "bfad_pci_init failure!\n");
921 error = retval;
922 goto out_pci_init_failure;
923 }
924
925 mutex_lock(&bfad_mutex);
926 bfad->inst_no = bfad_inst++;
927 list_add_tail(&bfad->list_entry, &bfad_list);
928 mutex_unlock(&bfad_mutex);
929
930 spin_lock_init(&bfad->bfad_lock);
931 pci_set_drvdata(pdev, bfad);
932
933 bfad->ref_count = 0;
934 bfad->pport.bfad = bfad;
935
936 retval = bfad_drv_init(bfad);
937 if (retval != BFA_STATUS_OK)
938 goto out_drv_init_failure;
939 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
940 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
941 goto ok;
942 }
943
944 /*
945 * PPORT FCS config
946 */
947 bfad_fcs_port_cfg(bfad);
948
949 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
950 if (retval != BFA_STATUS_OK)
951 goto out_cfg_pport_failure;
952
953 /*
954 * BFAD level FC4 (IM/TM/IPFC) specific resource allocation
955 */
956 retval = bfad_fc4_probe(bfad);
957 if (retval != BFA_STATUS_OK) {
958 printk(KERN_WARNING "bfad_fc4_probe failed\n");
959 goto out_fc4_probe_failure;
960 }
961
962 bfad_drv_start(bfad);
963
964 /*
965 * If bfa_linkup_delay is set to -1 default; try to retrive the
966 * value using the bfad_os_get_linkup_delay(); else use the
967 * passed in module param value as the bfa_linkup_delay.
968 */
969 if (bfa_linkup_delay < 0) {
970 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
971 bfad_os_rport_online_wait(bfad);
972 bfa_linkup_delay = -1;
973 } else {
974 bfad_os_rport_online_wait(bfad);
975 }
976
977 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
978ok:
979 return 0;
980
981out_fc4_probe_failure:
982 bfad_fc4_probe_undo(bfad);
983 bfad_uncfg_pport(bfad);
984out_cfg_pport_failure:
985 bfad_drv_uninit(bfad);
986out_drv_init_failure:
987 mutex_lock(&bfad_mutex);
988 bfad_inst--;
989 list_del(&bfad->list_entry);
990 mutex_unlock(&bfad_mutex);
991 bfad_pci_uninit(pdev, bfad);
992out_pci_init_failure:
993 kfree(bfad->trcmod);
994out_alloc_trace_failure:
995 kfree(bfad);
996out:
997 return error;
998}
999
1000/**
1001 * PCI remove entry.
1002 */
1003void
1004bfad_pci_remove(struct pci_dev *pdev)
1005{
1006 struct bfad_s *bfad = pci_get_drvdata(pdev);
1007 unsigned long flags;
1008
1009 bfa_trc(bfad, bfad->inst_no);
1010
1011 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
1012 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1013
1014 spin_lock_irqsave(&bfad->bfad_lock, flags);
1015 init_completion(&bfad->comp);
1016 bfa_stop(&bfad->bfa);
1017 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1018 wait_for_completion(&bfad->comp);
1019
1020 bfad_remove_intr(bfad);
1021 del_timer_sync(&bfad->hal_tmo);
1022 goto hal_detach;
1023 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) {
1024 goto remove_sysfs;
1025 }
1026
1027 if (bfad->bfad_flags & BFAD_HAL_START_DONE)
1028 bfad_drv_stop(bfad);
1029
1030 bfad_remove_intr(bfad);
1031
1032 del_timer_sync(&bfad->hal_tmo);
1033 bfad_fc4_probe_undo(bfad);
1034
1035 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1036 bfad_uncfg_pport(bfad);
1037
1038hal_detach:
1039 spin_lock_irqsave(&bfad->bfad_lock, flags);
1040 bfa_detach(&bfad->bfa);
1041 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1042 bfad_hal_mem_release(bfad);
1043remove_sysfs:
1044
1045 mutex_lock(&bfad_mutex);
1046 bfad_inst--;
1047 list_del(&bfad->list_entry);
1048 mutex_unlock(&bfad_mutex);
1049 bfad_pci_uninit(pdev, bfad);
1050
1051 kfree(bfad->trcmod);
1052 kfree(bfad);
1053}
1054
1055
1056static struct pci_device_id bfad_id_table[] = {
1057 {
1058 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1059 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1060 .subvendor = PCI_ANY_ID,
1061 .subdevice = PCI_ANY_ID,
1062 },
1063 {
1064 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1065 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1066 .subvendor = PCI_ANY_ID,
1067 .subdevice = PCI_ANY_ID,
1068 },
1069 {
1070 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1071 .device = BFA_PCI_DEVICE_ID_CT,
1072 .subvendor = PCI_ANY_ID,
1073 .subdevice = PCI_ANY_ID,
1074 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1075 .class_mask = ~0,
1076 },
1077
1078 {0, 0},
1079};
1080
1081MODULE_DEVICE_TABLE(pci, bfad_id_table);
1082
1083static struct pci_driver bfad_pci_driver = {
1084 .name = BFAD_DRIVER_NAME,
1085 .id_table = bfad_id_table,
1086 .probe = bfad_pci_probe,
1087 .remove = __devexit_p(bfad_pci_remove),
1088};
1089
1090/**
1091 * Linux driver module functions
1092 */
1093bfa_status_t
1094bfad_fc4_module_init(void)
1095{
1096 int rc;
1097
1098 rc = bfad_im_module_init();
1099 if (rc != BFA_STATUS_OK)
1100 goto ext;
1101
1102 bfad_tm_module_init();
1103 if (ipfc_enable)
1104 bfad_ipfc_module_init();
1105ext:
1106 return rc;
1107}
1108
1109void
1110bfad_fc4_module_exit(void)
1111{
1112 if (ipfc_enable)
1113 bfad_ipfc_module_exit();
1114 bfad_tm_module_exit();
1115 bfad_im_module_exit();
1116}
1117
1118/**
1119 * Driver module init.
1120 */
1121static int __init
1122bfad_init(void)
1123{
1124 int error = 0;
1125
1126 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1127 BFAD_DRIVER_VERSION);
1128
1129 if (num_sgpgs > 0)
1130 num_sgpgs_parm = num_sgpgs;
1131
1132 error = bfad_fc4_module_init();
1133 if (error) {
1134 error = -ENOMEM;
1135 printk(KERN_WARNING "bfad_fc4_module_init failure\n");
1136 goto ext;
1137 }
1138
1139 if (!strcmp(FCPI_NAME, " fcpim"))
1140 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM;
1141 if (!strcmp(FCPT_NAME, " fcptm"))
1142 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM;
1143 if (!strcmp(IPFC_NAME, " ipfc"))
1144 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC;
1145
1146 bfa_ioc_auto_recover(ioc_auto_recover);
1147 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1148 error = pci_register_driver(&bfad_pci_driver);
1149
1150 if (error) {
1151 printk(KERN_WARNING "bfad pci_register_driver failure\n");
1152 goto ext;
1153 }
1154
1155 return 0;
1156
1157ext:
1158 bfad_fc4_module_exit();
1159 return error;
1160}
1161
1162/**
1163 * Driver module exit.
1164 */
1165static void __exit
1166bfad_exit(void)
1167{
1168 pci_unregister_driver(&bfad_pci_driver);
1169 bfad_fc4_module_exit();
1170 bfad_free_fwimg();
1171}
1172
1173#define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME
1174
1175module_init(bfad_init);
1176module_exit(bfad_exit);
1177MODULE_LICENSE("GPL");
1178MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1179MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1180MODULE_VERSION(BFAD_DRIVER_VERSION);
1181
1182
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
new file mode 100644
index 000000000000..9129ae3040ff
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -0,0 +1,649 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_attr.c Linux driver configuration interface module.
20 */
21
22#include "bfad_drv.h"
23#include "bfad_im.h"
24#include "bfad_trcmod.h"
25#include "bfad_attr.h"
26
27/**
28 * FC_transport_template FC transport template
29 */
30
31/**
32 * FC transport template entry, get SCSI target port ID.
33 */
34void
35bfad_im_get_starget_port_id(struct scsi_target *starget)
36{
37 struct Scsi_Host *shost;
38 struct bfad_im_port_s *im_port;
39 struct bfad_s *bfad;
40 struct bfad_itnim_s *itnim = NULL;
41 u32 fc_id = -1;
42 unsigned long flags;
43
44 shost = bfad_os_starget_to_shost(starget);
45 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
46 bfad = im_port->bfad;
47 spin_lock_irqsave(&bfad->bfad_lock, flags);
48
49 itnim = bfad_os_get_itnim(im_port, starget->id);
50 if (itnim)
51 fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
52
53 fc_starget_port_id(starget) = fc_id;
54 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
55}
56
57/**
58 * FC transport template entry, get SCSI target nwwn.
59 */
60void
61bfad_im_get_starget_node_name(struct scsi_target *starget)
62{
63 struct Scsi_Host *shost;
64 struct bfad_im_port_s *im_port;
65 struct bfad_s *bfad;
66 struct bfad_itnim_s *itnim = NULL;
67 u64 node_name = 0;
68 unsigned long flags;
69
70 shost = bfad_os_starget_to_shost(starget);
71 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
72 bfad = im_port->bfad;
73 spin_lock_irqsave(&bfad->bfad_lock, flags);
74
75 itnim = bfad_os_get_itnim(im_port, starget->id);
76 if (itnim)
77 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
78
79 fc_starget_node_name(starget) = bfa_os_htonll(node_name);
80 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
81}
82
83/**
84 * FC transport template entry, get SCSI target pwwn.
85 */
86void
87bfad_im_get_starget_port_name(struct scsi_target *starget)
88{
89 struct Scsi_Host *shost;
90 struct bfad_im_port_s *im_port;
91 struct bfad_s *bfad;
92 struct bfad_itnim_s *itnim = NULL;
93 u64 port_name = 0;
94 unsigned long flags;
95
96 shost = bfad_os_starget_to_shost(starget);
97 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
98 bfad = im_port->bfad;
99 spin_lock_irqsave(&bfad->bfad_lock, flags);
100
101 itnim = bfad_os_get_itnim(im_port, starget->id);
102 if (itnim)
103 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
104
105 fc_starget_port_name(starget) = bfa_os_htonll(port_name);
106 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
107}
108
109/**
110 * FC transport template entry, get SCSI host port ID.
111 */
112void
113bfad_im_get_host_port_id(struct Scsi_Host *shost)
114{
115 struct bfad_im_port_s *im_port =
116 (struct bfad_im_port_s *) shost->hostdata[0];
117 struct bfad_port_s *port = im_port->port;
118
119 fc_host_port_id(shost) =
120 bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port));
121}
122
123
124
125
126
127struct Scsi_Host *
128bfad_os_starget_to_shost(struct scsi_target *starget)
129{
130 return dev_to_shost(starget->dev.parent);
131}
132
133/**
134 * FC transport template entry, get SCSI host port type.
135 */
136static void
137bfad_im_get_host_port_type(struct Scsi_Host *shost)
138{
139 struct bfad_im_port_s *im_port =
140 (struct bfad_im_port_s *) shost->hostdata[0];
141 struct bfad_s *bfad = im_port->bfad;
142 struct bfa_pport_attr_s attr;
143
144 bfa_pport_get_attr(&bfad->bfa, &attr);
145
146 switch (attr.port_type) {
147 case BFA_PPORT_TYPE_NPORT:
148 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
149 break;
150 case BFA_PPORT_TYPE_NLPORT:
151 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
152 break;
153 case BFA_PPORT_TYPE_P2P:
154 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
155 break;
156 case BFA_PPORT_TYPE_LPORT:
157 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
158 break;
159 default:
160 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
161 break;
162 }
163}
164
165/**
166 * FC transport template entry, get SCSI host port state.
167 */
168static void
169bfad_im_get_host_port_state(struct Scsi_Host *shost)
170{
171 struct bfad_im_port_s *im_port =
172 (struct bfad_im_port_s *) shost->hostdata[0];
173 struct bfad_s *bfad = im_port->bfad;
174 struct bfa_pport_attr_s attr;
175
176 bfa_pport_get_attr(&bfad->bfa, &attr);
177
178 switch (attr.port_state) {
179 case BFA_PPORT_ST_LINKDOWN:
180 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
181 break;
182 case BFA_PPORT_ST_LINKUP:
183 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
184 break;
185 case BFA_PPORT_ST_UNINIT:
186 case BFA_PPORT_ST_ENABLING_QWAIT:
187 case BFA_PPORT_ST_ENABLING:
188 case BFA_PPORT_ST_DISABLING_QWAIT:
189 case BFA_PPORT_ST_DISABLING:
190 case BFA_PPORT_ST_DISABLED:
191 case BFA_PPORT_ST_STOPPED:
192 case BFA_PPORT_ST_IOCDOWN:
193 default:
194 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
195 break;
196 }
197}
198
199/**
200 * FC transport template entry, get SCSI host active fc4s.
201 */
202static void
203bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
204{
205 struct bfad_im_port_s *im_port =
206 (struct bfad_im_port_s *) shost->hostdata[0];
207 struct bfad_port_s *port = im_port->port;
208
209 memset(fc_host_active_fc4s(shost), 0,
210 sizeof(fc_host_active_fc4s(shost)));
211
212 if (port->supported_fc4s &
213 (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
214 fc_host_active_fc4s(shost)[2] = 1;
215
216 if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
217 fc_host_active_fc4s(shost)[3] = 0x20;
218
219 fc_host_active_fc4s(shost)[7] = 1;
220}
221
222/**
223 * FC transport template entry, get SCSI host link speed.
224 */
225static void
226bfad_im_get_host_speed(struct Scsi_Host *shost)
227{
228 struct bfad_im_port_s *im_port =
229 (struct bfad_im_port_s *) shost->hostdata[0];
230 struct bfad_s *bfad = im_port->bfad;
231 struct bfa_pport_attr_s attr;
232
233 bfa_pport_get_attr(&bfad->bfa, &attr);
234 switch (attr.speed) {
235 case BFA_PPORT_SPEED_8GBPS:
236 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
237 break;
238 case BFA_PPORT_SPEED_4GBPS:
239 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
240 break;
241 case BFA_PPORT_SPEED_2GBPS:
242 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
243 break;
244 case BFA_PPORT_SPEED_1GBPS:
245 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
246 break;
247 default:
248 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
249 break;
250 }
251}
252
253/**
254 * FC transport template entry, get SCSI host port type.
255 */
256static void
257bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
258{
259 struct bfad_im_port_s *im_port =
260 (struct bfad_im_port_s *) shost->hostdata[0];
261 struct bfad_port_s *port = im_port->port;
262 wwn_t fabric_nwwn = 0;
263
264 fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port);
265
266 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
267
268}
269
270/**
271 * FC transport template entry, get BFAD statistics.
272 */
273static struct fc_host_statistics *
274bfad_im_get_stats(struct Scsi_Host *shost)
275{
276 struct bfad_im_port_s *im_port =
277 (struct bfad_im_port_s *) shost->hostdata[0];
278 struct bfad_s *bfad = im_port->bfad;
279 struct bfad_hal_comp fcomp;
280 struct fc_host_statistics *hstats;
281 bfa_status_t rc;
282 unsigned long flags;
283
284 hstats = &bfad->link_stats;
285 init_completion(&fcomp.comp);
286 spin_lock_irqsave(&bfad->bfad_lock, flags);
287 memset(hstats, 0, sizeof(struct fc_host_statistics));
288 rc = bfa_pport_get_stats(&bfad->bfa,
289 (union bfa_pport_stats_u *) hstats,
290 bfad_hcb_comp, &fcomp);
291 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
292 if (rc != BFA_STATUS_OK)
293 return NULL;
294
295 wait_for_completion(&fcomp.comp);
296
297 return hstats;
298}
299
300/**
301 * FC transport template entry, reset BFAD statistics.
302 */
303static void
304bfad_im_reset_stats(struct Scsi_Host *shost)
305{
306 struct bfad_im_port_s *im_port =
307 (struct bfad_im_port_s *) shost->hostdata[0];
308 struct bfad_s *bfad = im_port->bfad;
309 struct bfad_hal_comp fcomp;
310 unsigned long flags;
311 bfa_status_t rc;
312
313 init_completion(&fcomp.comp);
314 spin_lock_irqsave(&bfad->bfad_lock, flags);
315 rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp);
316 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
317
318 if (rc != BFA_STATUS_OK)
319 return;
320
321 wait_for_completion(&fcomp.comp);
322
323 return;
324}
325
326/**
327 * FC transport template entry, get rport loss timeout.
328 */
329static void
330bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
331{
332 struct bfad_itnim_data_s *itnim_data = rport->dd_data;
333 struct bfad_itnim_s *itnim = itnim_data->itnim;
334 struct bfad_s *bfad = itnim->im->bfad;
335 unsigned long flags;
336
337 spin_lock_irqsave(&bfad->bfad_lock, flags);
338 rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa);
339 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
340}
341
342/**
343 * FC transport template entry, set rport loss timeout.
344 */
345static void
346bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
347{
348 struct bfad_itnim_data_s *itnim_data = rport->dd_data;
349 struct bfad_itnim_s *itnim = itnim_data->itnim;
350 struct bfad_s *bfad = itnim->im->bfad;
351 unsigned long flags;
352
353 if (timeout > 0) {
354 spin_lock_irqsave(&bfad->bfad_lock, flags);
355 bfa_fcpim_path_tov_set(&bfad->bfa, timeout);
356 rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa);
357 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
358 }
359
360}
361
362struct fc_function_template bfad_im_fc_function_template = {
363
364 /* Target dynamic attributes */
365 .get_starget_port_id = bfad_im_get_starget_port_id,
366 .show_starget_port_id = 1,
367 .get_starget_node_name = bfad_im_get_starget_node_name,
368 .show_starget_node_name = 1,
369 .get_starget_port_name = bfad_im_get_starget_port_name,
370 .show_starget_port_name = 1,
371
372 /* Host dynamic attribute */
373 .get_host_port_id = bfad_im_get_host_port_id,
374 .show_host_port_id = 1,
375
376 /* Host fixed attributes */
377 .show_host_node_name = 1,
378 .show_host_port_name = 1,
379 .show_host_supported_classes = 1,
380 .show_host_supported_fc4s = 1,
381 .show_host_supported_speeds = 1,
382 .show_host_maxframe_size = 1,
383
384 /* More host dynamic attributes */
385 .show_host_port_type = 1,
386 .get_host_port_type = bfad_im_get_host_port_type,
387 .show_host_port_state = 1,
388 .get_host_port_state = bfad_im_get_host_port_state,
389 .show_host_active_fc4s = 1,
390 .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
391 .show_host_speed = 1,
392 .get_host_speed = bfad_im_get_host_speed,
393 .show_host_fabric_name = 1,
394 .get_host_fabric_name = bfad_im_get_host_fabric_name,
395
396 .show_host_symbolic_name = 1,
397
398 /* Statistics */
399 .get_fc_host_stats = bfad_im_get_stats,
400 .reset_fc_host_stats = bfad_im_reset_stats,
401
402 /* Allocation length for host specific data */
403 .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
404
405 /* Remote port fixed attributes */
406 .show_rport_maxframe_size = 1,
407 .show_rport_supported_classes = 1,
408 .show_rport_dev_loss_tmo = 1,
409 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
410 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
411};
412
413/**
414 * Scsi_Host_attrs SCSI host attributes
415 */
416static ssize_t
417bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
418 char *buf)
419{
420 struct Scsi_Host *shost = class_to_shost(dev);
421 struct bfad_im_port_s *im_port =
422 (struct bfad_im_port_s *) shost->hostdata[0];
423 struct bfad_s *bfad = im_port->bfad;
424 struct bfa_ioc_attr_s ioc_attr;
425
426 memset(&ioc_attr, 0, sizeof(ioc_attr));
427 bfa_get_attr(&bfad->bfa, &ioc_attr);
428 return snprintf(buf, PAGE_SIZE, "%s\n",
429 ioc_attr.adapter_attr.serial_num);
430}
431
432static ssize_t
433bfad_im_model_show(struct device *dev, struct device_attribute *attr,
434 char *buf)
435{
436 struct Scsi_Host *shost = class_to_shost(dev);
437 struct bfad_im_port_s *im_port =
438 (struct bfad_im_port_s *) shost->hostdata[0];
439 struct bfad_s *bfad = im_port->bfad;
440 struct bfa_ioc_attr_s ioc_attr;
441
442 memset(&ioc_attr, 0, sizeof(ioc_attr));
443 bfa_get_attr(&bfad->bfa, &ioc_attr);
444 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model);
445}
446
447static ssize_t
448bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
449 char *buf)
450{
451 struct Scsi_Host *shost = class_to_shost(dev);
452 struct bfad_im_port_s *im_port =
453 (struct bfad_im_port_s *) shost->hostdata[0];
454 struct bfad_s *bfad = im_port->bfad;
455 struct bfa_ioc_attr_s ioc_attr;
456
457 memset(&ioc_attr, 0, sizeof(ioc_attr));
458 bfa_get_attr(&bfad->bfa, &ioc_attr);
459 return snprintf(buf, PAGE_SIZE, "%s\n",
460 ioc_attr.adapter_attr.model_descr);
461}
462
463static ssize_t
464bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
465 char *buf)
466{
467 struct Scsi_Host *shost = class_to_shost(dev);
468 struct bfad_im_port_s *im_port =
469 (struct bfad_im_port_s *) shost->hostdata[0];
470 struct bfad_port_s *port = im_port->port;
471 u64 nwwn;
472
473 nwwn = bfa_fcs_port_get_nwwn(port->fcs_port);
474 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
475}
476
477static ssize_t
478bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
479 char *buf)
480{
481 struct Scsi_Host *shost = class_to_shost(dev);
482 struct bfad_im_port_s *im_port =
483 (struct bfad_im_port_s *) shost->hostdata[0];
484 struct bfad_s *bfad = im_port->bfad;
485 struct bfa_ioc_attr_s ioc_attr;
486
487 memset(&ioc_attr, 0, sizeof(ioc_attr));
488 bfa_get_attr(&bfad->bfa, &ioc_attr);
489
490 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
491 ioc_attr.adapter_attr.model,
492 ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
493}
494
495static ssize_t
496bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
497 char *buf)
498{
499 struct Scsi_Host *shost = class_to_shost(dev);
500 struct bfad_im_port_s *im_port =
501 (struct bfad_im_port_s *) shost->hostdata[0];
502 struct bfad_s *bfad = im_port->bfad;
503 struct bfa_ioc_attr_s ioc_attr;
504
505 memset(&ioc_attr, 0, sizeof(ioc_attr));
506 bfa_get_attr(&bfad->bfa, &ioc_attr);
507 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver);
508}
509
510static ssize_t
511bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
512 char *buf)
513{
514 return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
515}
516
517static ssize_t
518bfad_im_optionrom_version_show(struct device *dev,
519 struct device_attribute *attr, char *buf)
520{
521 struct Scsi_Host *shost = class_to_shost(dev);
522 struct bfad_im_port_s *im_port =
523 (struct bfad_im_port_s *) shost->hostdata[0];
524 struct bfad_s *bfad = im_port->bfad;
525 struct bfa_ioc_attr_s ioc_attr;
526
527 memset(&ioc_attr, 0, sizeof(ioc_attr));
528 bfa_get_attr(&bfad->bfa, &ioc_attr);
529 return snprintf(buf, PAGE_SIZE, "%s\n",
530 ioc_attr.adapter_attr.optrom_ver);
531}
532
533static ssize_t
534bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
535 char *buf)
536{
537 struct Scsi_Host *shost = class_to_shost(dev);
538 struct bfad_im_port_s *im_port =
539 (struct bfad_im_port_s *) shost->hostdata[0];
540 struct bfad_s *bfad = im_port->bfad;
541 struct bfa_ioc_attr_s ioc_attr;
542
543 memset(&ioc_attr, 0, sizeof(ioc_attr));
544 bfa_get_attr(&bfad->bfa, &ioc_attr);
545 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver);
546}
547
548static ssize_t
549bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
550 char *buf)
551{
552 struct Scsi_Host *shost = class_to_shost(dev);
553 struct bfad_im_port_s *im_port =
554 (struct bfad_im_port_s *) shost->hostdata[0];
555 struct bfad_s *bfad = im_port->bfad;
556 struct bfa_ioc_attr_s ioc_attr;
557
558 memset(&ioc_attr, 0, sizeof(ioc_attr));
559 bfa_get_attr(&bfad->bfa, &ioc_attr);
560 return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports);
561}
562
563static ssize_t
564bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
565 char *buf)
566{
567 return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
568}
569
570static ssize_t
571bfad_im_num_of_discovered_ports_show(struct device *dev,
572 struct device_attribute *attr, char *buf)
573{
574 struct Scsi_Host *shost = class_to_shost(dev);
575 struct bfad_im_port_s *im_port =
576 (struct bfad_im_port_s *) shost->hostdata[0];
577 struct bfad_port_s *port = im_port->port;
578 struct bfad_s *bfad = im_port->bfad;
579 int nrports = 2048;
580 wwn_t *rports = NULL;
581 unsigned long flags;
582
583 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
584 if (rports == NULL)
585 return -ENOMEM;
586
587 spin_lock_irqsave(&bfad->bfad_lock, flags);
588 bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports);
589 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
590 kfree(rports);
591
592 return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
593}
594
595static DEVICE_ATTR(serial_number, S_IRUGO,
596 bfad_im_serial_num_show, NULL);
597static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL);
598static DEVICE_ATTR(model_description, S_IRUGO,
599 bfad_im_model_desc_show, NULL);
600static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL);
601static DEVICE_ATTR(symbolic_name, S_IRUGO,
602 bfad_im_symbolic_name_show, NULL);
603static DEVICE_ATTR(hardware_version, S_IRUGO,
604 bfad_im_hw_version_show, NULL);
605static DEVICE_ATTR(driver_version, S_IRUGO,
606 bfad_im_drv_version_show, NULL);
607static DEVICE_ATTR(option_rom_version, S_IRUGO,
608 bfad_im_optionrom_version_show, NULL);
609static DEVICE_ATTR(firmware_version, S_IRUGO,
610 bfad_im_fw_version_show, NULL);
611static DEVICE_ATTR(number_of_ports, S_IRUGO,
612 bfad_im_num_of_ports_show, NULL);
613static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
614static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
615 bfad_im_num_of_discovered_ports_show, NULL);
616
617struct device_attribute *bfad_im_host_attrs[] = {
618 &dev_attr_serial_number,
619 &dev_attr_model,
620 &dev_attr_model_description,
621 &dev_attr_node_name,
622 &dev_attr_symbolic_name,
623 &dev_attr_hardware_version,
624 &dev_attr_driver_version,
625 &dev_attr_option_rom_version,
626 &dev_attr_firmware_version,
627 &dev_attr_number_of_ports,
628 &dev_attr_driver_name,
629 &dev_attr_number_of_discovered_ports,
630 NULL,
631};
632
633struct device_attribute *bfad_im_vport_attrs[] = {
634 &dev_attr_serial_number,
635 &dev_attr_model,
636 &dev_attr_model_description,
637 &dev_attr_node_name,
638 &dev_attr_symbolic_name,
639 &dev_attr_hardware_version,
640 &dev_attr_driver_version,
641 &dev_attr_option_rom_version,
642 &dev_attr_firmware_version,
643 &dev_attr_number_of_ports,
644 &dev_attr_driver_name,
645 &dev_attr_number_of_discovered_ports,
646 NULL,
647};
648
649
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
new file mode 100644
index 000000000000..4d3312da6a81
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_attr.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_ATTR_H__
19#define __BFAD_ATTR_H__
20/**
21 * bfad_attr.h VMware driver configuration interface module.
22 */
23
24/**
25 * FC_transport_template FC transport template
26 */
27
28struct Scsi_Host*
29bfad_os_dev_to_shost(struct scsi_target *starget);
30
31/**
32 * FC transport template entry, get SCSI target port ID.
33 */
34void
35bfad_im_get_starget_port_id(struct scsi_target *starget);
36
37/**
38 * FC transport template entry, get SCSI target nwwn.
39 */
40void
41bfad_im_get_starget_node_name(struct scsi_target *starget);
42
43/**
44 * FC transport template entry, get SCSI target pwwn.
45 */
46void
47bfad_im_get_starget_port_name(struct scsi_target *starget);
48
49/**
50 * FC transport template entry, get SCSI host port ID.
51 */
52void
53bfad_im_get_host_port_id(struct Scsi_Host *shost);
54
55/**
56 * FC transport template entry, issue a LIP.
57 */
58int
59bfad_im_issue_fc_host_lip(struct Scsi_Host *shost);
60
61struct Scsi_Host*
62bfad_os_starget_to_shost(struct scsi_target *starget);
63
64
65#endif /* __BFAD_ATTR_H__ */
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
new file mode 100644
index 000000000000..172c81e25c1c
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * Contains base driver definitions.
20 */
21
22/**
23 * bfa_drv.h Linux driver data structures.
24 */
25
26#ifndef __BFAD_DRV_H__
27#define __BFAD_DRV_H__
28
29#include "bfa_os_inc.h"
30
31#include <bfa.h>
32#include <bfa_svc.h>
33#include <fcs/bfa_fcs.h>
34#include <defs/bfa_defs_pci.h>
35#include <defs/bfa_defs_port.h>
36#include <defs/bfa_defs_rport.h>
37#include <fcs/bfa_fcs_rport.h>
38#include <defs/bfa_defs_vport.h>
39#include <fcs/bfa_fcs_vport.h>
40
41#include <cs/bfa_plog.h>
42#include "aen/bfa_aen.h"
43#include <log/bfa_log_linux.h>
44
45#define BFAD_DRIVER_NAME "bfa"
46#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else
49#define BFAD_DRIVER_VERSION "2.0.0.0"
50#endif
51
52
53#define BFAD_IRQ_FLAGS IRQF_SHARED
54
55/*
56 * BFAD flags
57 */
58#define BFAD_MSIX_ON 0x00000001
59#define BFAD_HAL_INIT_DONE 0x00000002
60#define BFAD_DRV_INIT_DONE 0x00000004
61#define BFAD_CFG_PPORT_DONE 0x00000008
62#define BFAD_HAL_START_DONE 0x00000010
63#define BFAD_PORT_ONLINE 0x00000020
64#define BFAD_RPORT_ONLINE 0x00000040
65
66#define BFAD_PORT_DELETE 0x00000001
67
68/*
69 * BFAD related definition
70 */
71#define SCSI_SCAN_DELAY HZ
72#define BFAD_STOP_TIMEOUT 30
73#define BFAD_SUSPEND_TIMEOUT BFAD_STOP_TIMEOUT
74
75/*
76 * BFAD configuration parameter default values
77 */
78#define BFAD_LUN_QUEUE_DEPTH 32
79#define BFAD_IO_MAX_SGE SG_ALL
80
81#define bfad_isr_t irq_handler_t
82
83#define MAX_MSIX_ENTRY 22
84
85struct bfad_msix_s {
86 struct bfad_s *bfad;
87 struct msix_entry msix;
88};
89
90enum bfad_port_pvb_type {
91 BFAD_PORT_PHYS_BASE = 0,
92 BFAD_PORT_PHYS_VPORT = 1,
93 BFAD_PORT_VF_BASE = 2,
94 BFAD_PORT_VF_VPORT = 3,
95};
96
97/*
98 * PORT data structure
99 */
100struct bfad_port_s {
101 struct list_head list_entry;
102 struct bfad_s *bfad;
103 struct bfa_fcs_port_s *fcs_port;
104 u32 roles;
105 s32 flags;
106 u32 supported_fc4s;
107 u8 ipfc_flags;
108 enum bfad_port_pvb_type pvb_type;
109 struct bfad_im_port_s *im_port; /* IM specific data */
110 struct bfad_tm_port_s *tm_port; /* TM specific data */
111 struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
112};
113
114/*
115 * VPORT data structure
116 */
117struct bfad_vport_s {
118 struct bfad_port_s drv_port;
119 struct bfa_fcs_vport_s fcs_vport;
120 struct completion *comp_del;
121};
122
123/*
124 * VF data structure
125 */
126struct bfad_vf_s {
127 bfa_fcs_vf_t fcs_vf;
128 struct bfad_port_s base_port; /* base port for vf */
129 struct bfad_s *bfad;
130};
131
132struct bfad_cfg_param_s {
133 u32 rport_del_timeout;
134 u32 ioc_queue_depth;
135 u32 lun_queue_depth;
136 u32 io_max_sge;
137 u32 binding_method;
138};
139
140#define BFAD_AEN_MAX_APPS 8
141struct bfad_aen_file_s {
142 struct list_head qe;
143 struct bfad_s *bfad;
144 s32 ri;
145 s32 app_id;
146};
147
148/*
149 * BFAD (PCI function) data structure
150 */
151struct bfad_s {
152 struct list_head list_entry;
153 struct bfa_s bfa;
154 struct bfa_fcs_s bfa_fcs;
155 struct pci_dev *pcidev;
156 const char *pci_name;
157 struct bfa_pcidev_s hal_pcidev;
158 struct bfa_ioc_pci_attr_s pci_attr;
159 unsigned long pci_bar0_map;
160 void __iomem *pci_bar0_kva;
161 struct completion comp;
162 struct completion suspend;
163 struct completion disable_comp;
164 bfa_boolean_t disable_active;
165 struct bfad_port_s pport; /* physical port of the BFAD */
166 struct bfa_meminfo_s meminfo;
167 struct bfa_iocfc_cfg_s ioc_cfg;
168 u32 inst_no; /* BFAD instance number */
169 u32 bfad_flags;
170 spinlock_t bfad_lock;
171 struct bfad_cfg_param_s cfg_data;
172 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
173 int nvec;
174 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
175 char port_name[BFA_ADAPTER_SYM_NAME_LEN];
176 struct timer_list hal_tmo;
177 unsigned long hs_start;
178 struct bfad_im_s *im; /* IM specific data */
179 struct bfad_tm_s *tm; /* TM specific data */
180 struct bfad_ipfc_s *ipfc; /* IPFC specific data */
181 struct bfa_log_mod_s log_data;
182 struct bfa_trc_mod_s *trcmod;
183 struct bfa_log_mod_s *logmod;
184 struct bfa_aen_s *aen;
185 struct bfa_aen_s aen_buf;
186 struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS];
187 struct list_head file_q;
188 struct list_head file_free_q;
189 struct bfa_plog_s plog_buf;
190 int ref_count;
191 bfa_boolean_t ipfc_enabled;
192 struct fc_host_statistics link_stats;
193
194 struct kobject *bfa_kobj;
195 struct kobject *ioc_kobj;
196 struct kobject *pport_kobj;
197 struct kobject *lport_kobj;
198};
199
200/*
201 * RPORT data structure
202 */
203struct bfad_rport_s {
204 struct bfa_fcs_rport_s fcs_rport;
205};
206
207struct bfad_buf_info {
208 void *virt;
209 dma_addr_t phys;
210 u32 size;
211};
212
213struct bfad_fcxp {
214 struct bfad_port_s *port;
215 struct bfa_rport_s *bfa_rport;
216 bfa_status_t req_status;
217 u16 tag;
218 u16 rsp_len;
219 u16 rsp_maxlen;
220 u8 use_ireqbuf;
221 u8 use_irspbuf;
222 u32 num_req_sgles;
223 u32 num_rsp_sgles;
224 struct fchs_s fchs;
225 void *reqbuf_info;
226 void *rspbuf_info;
227 struct bfa_sge_s *req_sge;
228 struct bfa_sge_s *rsp_sge;
229 fcxp_send_cb_t send_cbfn;
230 void *send_cbarg;
231 void *bfa_fcxp;
232 struct completion comp;
233};
234
235struct bfad_hal_comp {
236 bfa_status_t status;
237 struct completion comp;
238};
239
240/*
241 * Macro to obtain the immediate lower power
242 * of two for the integer.
243 */
244#define nextLowerInt(x) \
245do { \
246 int j; \
247 (*x)--; \
248 for (j = 1; j < (sizeof(int) * 8); j <<= 1) \
249 (*x) = (*x) | (*x) >> j; \
250 (*x)++; \
251 (*x) = (*x) >> 1; \
252} while (0)
253
254
255bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
256 struct bfa_port_cfg_s *port_cfg);
257bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
258 struct bfa_port_cfg_s *port_cfg);
259bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
260bfa_status_t bfad_drv_init(struct bfad_s *bfad);
261void bfad_drv_start(struct bfad_s *bfad);
262void bfad_uncfg_pport(struct bfad_s *bfad);
263void bfad_drv_stop(struct bfad_s *bfad);
264void bfad_remove_intr(struct bfad_s *bfad);
265void bfad_hal_mem_release(struct bfad_s *bfad);
266void bfad_hcb_comp(void *arg, bfa_status_t status);
267
268int bfad_setup_intr(struct bfad_s *bfad);
269void bfad_remove_intr(struct bfad_s *bfad);
270
271void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
272bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
273void bfad_bfa_tmo(unsigned long data);
274void bfad_init_timer(struct bfad_s *bfad);
275int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
276void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
277void bfad_fcs_port_cfg(struct bfad_s *bfad);
278void bfad_drv_uninit(struct bfad_s *bfad);
279void bfad_drv_log_level_set(struct bfad_s *bfad);
280bfa_status_t bfad_fc4_module_init(void);
281void bfad_fc4_module_exit(void);
282
283void bfad_pci_remove(struct pci_dev *pdev);
284int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
285void bfad_os_rport_online_wait(struct bfad_s *bfad);
286int bfad_os_get_linkup_delay(struct bfad_s *bfad);
287int bfad_install_msix_handler(struct bfad_s *bfad);
288
289extern struct idr bfad_im_port_index;
290extern struct list_head bfad_list;
291extern int bfa_lun_queue_depth;
292extern int bfad_supported_fc4s;
293extern int bfa_linkup_delay;
294
295#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
new file mode 100644
index 000000000000..bd34b0db2d6b
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_fwimg.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_fwimg.c Linux driver PCI interface module.
20 */
21#include <bfa_os_inc.h>
22#include <bfad_drv.h>
23#include <bfad_im_compat.h>
24#include <defs/bfa_defs_version.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31#include <linux/pci.h>
32#include <linux/firmware.h>
33#include <bfa_fwimg_priv.h>
34#include <bfa.h>
35
36u32 bfi_image_ct_size;
37u32 bfi_image_cb_size;
38u32 *bfi_image_ct;
39u32 *bfi_image_cb;
40
41
42#define BFAD_FW_FILE_CT "ctfw.bin"
43#define BFAD_FW_FILE_CB "cbfw.bin"
44MODULE_FIRMWARE(BFAD_FW_FILE_CT);
45MODULE_FIRMWARE(BFAD_FW_FILE_CB);
46
47u32 *
48bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
49 u32 *bfi_image_size, char *fw_name)
50{
51 const struct firmware *fw;
52
53 if (request_firmware(&fw, fw_name, &pdev->dev)) {
54 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
55 goto error;
56 }
57
58 *bfi_image = vmalloc(fw->size);
59 if (NULL == *bfi_image) {
60 printk(KERN_ALERT "Fail to allocate buffer for fw image "
61 "size=%x!\n", (u32) fw->size);
62 goto error;
63 }
64
65 memcpy(*bfi_image, fw->data, fw->size);
66 *bfi_image_size = fw->size/sizeof(u32);
67
68 return(*bfi_image);
69
70error:
71 return(NULL);
72}
73
74u32 *
75bfad_get_firmware_buf(struct pci_dev *pdev)
76{
77 if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
78 if (bfi_image_ct_size == 0)
79 bfad_read_firmware(pdev, &bfi_image_ct,
80 &bfi_image_ct_size, BFAD_FW_FILE_CT);
81 return(bfi_image_ct);
82 } else {
83 if (bfi_image_cb_size == 0)
84 bfad_read_firmware(pdev, &bfi_image_cb,
85 &bfi_image_cb_size, BFAD_FW_FILE_CB);
86 return(bfi_image_cb);
87 }
88}
89
90u32 *
91bfi_image_ct_get_chunk(u32 off)
92{ return (u32 *)(bfi_image_ct + off); }
93
94u32 *
95bfi_image_cb_get_chunk(u32 off)
96{ return (u32 *)(bfi_image_cb + off); }
97
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
new file mode 100644
index 000000000000..55d012a9a668
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -0,0 +1,1230 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_im.c Linux driver IM module.
20 */
21
22#include "bfad_drv.h"
23#include "bfad_im.h"
24#include "bfad_trcmod.h"
25#include "bfa_cb_ioim_macros.h"
26#include <fcb/bfa_fcb_fcpim.h>
27
28BFA_TRC_FILE(LDRV, IM);
29
30DEFINE_IDR(bfad_im_port_index);
31struct scsi_transport_template *bfad_im_scsi_transport_template;
32static void bfad_im_itnim_work_handler(struct work_struct *work);
33static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
34 void (*done)(struct scsi_cmnd *));
35static int bfad_im_slave_alloc(struct scsi_device *sdev);
36
37void
38bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
39 enum bfi_ioim_status io_status, u8 scsi_status,
40 int sns_len, u8 *sns_info, s32 residue)
41{
42 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
43 struct bfad_s *bfad = drv;
44 struct bfad_itnim_data_s *itnim_data;
45 struct bfad_itnim_s *itnim;
46
47 switch (io_status) {
48 case BFI_IOIM_STS_OK:
49 bfa_trc(bfad, scsi_status);
50 cmnd->result = ScsiResult(DID_OK, scsi_status);
51 scsi_set_resid(cmnd, 0);
52
53 if (sns_len > 0) {
54 bfa_trc(bfad, sns_len);
55 if (sns_len > SCSI_SENSE_BUFFERSIZE)
56 sns_len = SCSI_SENSE_BUFFERSIZE;
57 memcpy(cmnd->sense_buffer, sns_info, sns_len);
58 }
59 if (residue > 0)
60 scsi_set_resid(cmnd, residue);
61 break;
62
63 case BFI_IOIM_STS_ABORTED:
64 case BFI_IOIM_STS_TIMEDOUT:
65 case BFI_IOIM_STS_PATHTOV:
66 default:
67 cmnd->result = ScsiResult(DID_ERROR, 0);
68 }
69
70 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
71 if (cmnd->device->host != NULL)
72 scsi_dma_unmap(cmnd);
73
74 cmnd->host_scribble = NULL;
75 bfa_trc(bfad, cmnd->result);
76
77 itnim_data = cmnd->device->hostdata;
78 if (itnim_data) {
79 itnim = itnim_data->itnim;
80 if (!cmnd->result && itnim &&
81 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
82 /* Queue depth adjustment for good status completion */
83 bfad_os_ramp_up_qdepth(itnim, cmnd->device);
84 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
85 /* qfull handling */
86 bfad_os_handle_qfull(itnim, cmnd->device);
87 }
88 }
89
90 cmnd->scsi_done(cmnd);
91}
92
93void
94bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
95{
96 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
97 struct bfad_itnim_data_s *itnim_data;
98 struct bfad_itnim_s *itnim;
99
100 cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
101
102 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
103 if (cmnd->device->host != NULL)
104 scsi_dma_unmap(cmnd);
105
106 cmnd->host_scribble = NULL;
107
108 /* Queue depth adjustment */
109 if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
110 itnim_data = cmnd->device->hostdata;
111 if (itnim_data) {
112 itnim = itnim_data->itnim;
113 if (itnim)
114 bfad_os_ramp_up_qdepth(itnim, cmnd->device);
115 }
116 }
117
118 cmnd->scsi_done(cmnd);
119}
120
121void
122bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
123{
124 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
125 struct bfad_s *bfad = drv;
126
127 cmnd->result = ScsiResult(DID_ERROR, 0);
128
129 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
130 if (cmnd->device->host != NULL)
131 scsi_dma_unmap(cmnd);
132
133 bfa_trc(bfad, cmnd->result);
134 cmnd->host_scribble = NULL;
135}
136
137void
138bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
139 enum bfi_tskim_status tsk_status)
140{
141 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
142 wait_queue_head_t *wq;
143
144 cmnd->SCp.Status |= tsk_status << 1;
145 set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
146 wq = (wait_queue_head_t *) cmnd->SCp.ptr;
147 cmnd->SCp.ptr = NULL;
148
149 if (wq)
150 wake_up(wq);
151}
152
153void
154bfa_cb_ioim_resfree(void *drv)
155{
156}
157
158/**
159 * Scsi_Host_template SCSI host template
160 */
161/**
162 * Scsi_Host template entry, returns BFAD PCI info.
163 */
164static const char *
165bfad_im_info(struct Scsi_Host *shost)
166{
167 static char bfa_buf[256];
168 struct bfad_im_port_s *im_port =
169 (struct bfad_im_port_s *) shost->hostdata[0];
170 struct bfa_ioc_attr_s ioc_attr;
171 struct bfad_s *bfad = im_port->bfad;
172
173 memset(&ioc_attr, 0, sizeof(ioc_attr));
174 bfa_get_attr(&bfad->bfa, &ioc_attr);
175
176 memset(bfa_buf, 0, sizeof(bfa_buf));
177 snprintf(bfa_buf, sizeof(bfa_buf),
178 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
179 ioc_attr.adapter_attr.model, bfad->pci_name,
180 BFAD_DRIVER_VERSION);
181 return bfa_buf;
182}
183
184/**
185 * Scsi_Host template entry, aborts the specified SCSI command.
186 *
187 * Returns: SUCCESS or FAILED.
188 */
189static int
190bfad_im_abort_handler(struct scsi_cmnd *cmnd)
191{
192 struct Scsi_Host *shost = cmnd->device->host;
193 struct bfad_im_port_s *im_port =
194 (struct bfad_im_port_s *) shost->hostdata[0];
195 struct bfad_s *bfad = im_port->bfad;
196 struct bfa_ioim_s *hal_io;
197 unsigned long flags;
198 u32 timeout;
199 int rc = FAILED;
200
201 spin_lock_irqsave(&bfad->bfad_lock, flags);
202 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
203 if (!hal_io) {
204 /* IO has been completed, retrun success */
205 rc = SUCCESS;
206 goto out;
207 }
208 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
209 rc = FAILED;
210 goto out;
211 }
212
213 bfa_trc(bfad, hal_io->iotag);
214 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT,
215 im_port->shost->host_no, cmnd, hal_io->iotag);
216 bfa_ioim_abort(hal_io);
217 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
218
219 /* Need to wait until the command get aborted */
220 timeout = 10;
221 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
222 set_current_state(TASK_UNINTERRUPTIBLE);
223 schedule_timeout(timeout);
224 if (timeout < 4 * HZ)
225 timeout *= 2;
226 }
227
228 cmnd->scsi_done(cmnd);
229 bfa_trc(bfad, hal_io->iotag);
230 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP,
231 im_port->shost->host_no, cmnd, hal_io->iotag);
232 return SUCCESS;
233out:
234 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
235 return rc;
236}
237
238static bfa_status_t
239bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
240 struct bfad_itnim_s *itnim)
241{
242 struct bfa_tskim_s *tskim;
243 struct bfa_itnim_s *bfa_itnim;
244 bfa_status_t rc = BFA_STATUS_OK;
245
246 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
247 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
248 if (!tskim) {
249 BFA_DEV_PRINTF(bfad, BFA_ERR,
250 "target reset, fail to allocate tskim\n");
251 rc = BFA_STATUS_FAILED;
252 goto out;
253 }
254
255 /*
256 * Set host_scribble to NULL to avoid aborting a task command if
257 * happens.
258 */
259 cmnd->host_scribble = NULL;
260 cmnd->SCp.Status = 0;
261 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
262 bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
263 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
264out:
265 return rc;
266}
267
268/**
269 * Scsi_Host template entry, resets a LUN and abort its all commands.
270 *
271 * Returns: SUCCESS or FAILED.
272 *
273 */
274static int
275bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
276{
277 struct Scsi_Host *shost = cmnd->device->host;
278 struct bfad_im_port_s *im_port =
279 (struct bfad_im_port_s *) shost->hostdata[0];
280 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
281 struct bfad_s *bfad = im_port->bfad;
282 struct bfa_tskim_s *tskim;
283 struct bfad_itnim_s *itnim;
284 struct bfa_itnim_s *bfa_itnim;
285 DECLARE_WAIT_QUEUE_HEAD(wq);
286 int rc = SUCCESS;
287 unsigned long flags;
288 enum bfi_tskim_status task_status;
289
290 spin_lock_irqsave(&bfad->bfad_lock, flags);
291 itnim = itnim_data->itnim;
292 if (!itnim) {
293 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
294 rc = FAILED;
295 goto out;
296 }
297
298 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
299 if (!tskim) {
300 BFA_DEV_PRINTF(bfad, BFA_ERR,
301 "LUN reset, fail to allocate tskim");
302 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
303 rc = FAILED;
304 goto out;
305 }
306
307 /**
308 * Set host_scribble to NULL to avoid aborting a task command
309 * if happens.
310 */
311 cmnd->host_scribble = NULL;
312 cmnd->SCp.ptr = (char *)&wq;
313 cmnd->SCp.Status = 0;
314 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
315 bfa_tskim_start(tskim, bfa_itnim,
316 bfad_int_to_lun(cmnd->device->lun),
317 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
318 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
319
320 wait_event(wq, test_bit(IO_DONE_BIT,
321 (unsigned long *)&cmnd->SCp.Status));
322
323 task_status = cmnd->SCp.Status >> 1;
324 if (task_status != BFI_TSKIM_STS_OK) {
325 BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n",
326 task_status);
327 rc = FAILED;
328 }
329
330out:
331 return rc;
332}
333
334/**
335 * Scsi_Host template entry, resets the bus and abort all commands.
336 */
337static int
338bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
339{
340 struct Scsi_Host *shost = cmnd->device->host;
341 struct bfad_im_port_s *im_port =
342 (struct bfad_im_port_s *) shost->hostdata[0];
343 struct bfad_s *bfad = im_port->bfad;
344 struct bfad_itnim_s *itnim;
345 unsigned long flags;
346 u32 i, rc, err_cnt = 0;
347 DECLARE_WAIT_QUEUE_HEAD(wq);
348 enum bfi_tskim_status task_status;
349
350 spin_lock_irqsave(&bfad->bfad_lock, flags);
351 for (i = 0; i < MAX_FCP_TARGET; i++) {
352 itnim = bfad_os_get_itnim(im_port, i);
353 if (itnim) {
354 cmnd->SCp.ptr = (char *)&wq;
355 rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
356 if (rc != BFA_STATUS_OK) {
357 err_cnt++;
358 continue;
359 }
360
361 /* wait target reset to complete */
362 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
363 wait_event(wq, test_bit(IO_DONE_BIT,
364 (unsigned long *)&cmnd->SCp.Status));
365 spin_lock_irqsave(&bfad->bfad_lock, flags);
366
367 task_status = cmnd->SCp.Status >> 1;
368 if (task_status != BFI_TSKIM_STS_OK) {
369 BFA_DEV_PRINTF(bfad, BFA_ERR,
370 "target reset failure,"
371 " status: %d\n", task_status);
372 err_cnt++;
373 }
374 }
375 }
376 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
377
378 if (err_cnt)
379 return FAILED;
380
381 return SUCCESS;
382}
383
384/**
385 * Scsi_Host template entry slave_destroy.
386 */
387static void
388bfad_im_slave_destroy(struct scsi_device *sdev)
389{
390 sdev->hostdata = NULL;
391 return;
392}
393
394/**
395 * BFA FCS itnim callbacks
396 */
397
398/**
399 * BFA FCS itnim alloc callback, after successful PRLI
400 * Context: Interrupt
401 */
402void
403bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
404 struct bfad_itnim_s **itnim_drv)
405{
406 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
407 if (*itnim_drv == NULL)
408 return;
409
410 (*itnim_drv)->im = bfad->im;
411 *itnim = &(*itnim_drv)->fcs_itnim;
412 (*itnim_drv)->state = ITNIM_STATE_NONE;
413
414 /*
415 * Initiaze the itnim_work
416 */
417 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
418 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
419}
420
421/**
422 * BFA FCS itnim free callback.
423 * Context: Interrupt. bfad_lock is held
424 */
425void
426bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
427{
428 struct bfad_port_s *port;
429 wwn_t wwpn;
430 u32 fcid;
431 char wwpn_str[32], fcid_str[16];
432
433 /* online to free state transtion should not happen */
434 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
435
436 itnim_drv->queue_work = 1;
437 /* offline request is not yet done, use the same request to free */
438 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
439 itnim_drv->queue_work = 0;
440
441 itnim_drv->state = ITNIM_STATE_FREE;
442 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
443 itnim_drv->im_port = port->im_port;
444 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
445 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
446 wwn2str(wwpn_str, wwpn);
447 fcid2str(fcid_str, fcid);
448 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE,
449 port->im_port->shost->host_no,
450 fcid_str, wwpn_str);
451 bfad_os_itnim_process(itnim_drv);
452}
453
454/**
455 * BFA FCS itnim online callback.
456 * Context: Interrupt. bfad_lock is held
457 */
458void
459bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
460{
461 struct bfad_port_s *port;
462
463 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
464 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
465 itnim_drv->state = ITNIM_STATE_ONLINE;
466 itnim_drv->queue_work = 1;
467 itnim_drv->im_port = port->im_port;
468 bfad_os_itnim_process(itnim_drv);
469}
470
471/**
472 * BFA FCS itnim offline callback.
473 * Context: Interrupt. bfad_lock is held
474 */
475void
476bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
477{
478 struct bfad_port_s *port;
479 struct bfad_s *bfad;
480
481 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
482 bfad = port->bfad;
483 if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
484 (port->flags & BFAD_PORT_DELETE)) {
485 itnim_drv->state = ITNIM_STATE_OFFLINE;
486 return;
487 }
488 itnim_drv->im_port = port->im_port;
489 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
490 itnim_drv->queue_work = 1;
491 bfad_os_itnim_process(itnim_drv);
492}
493
494/**
495 * BFA FCS itnim timeout callback.
496 * Context: Interrupt. bfad_lock is held
497 */
498void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
499{
500 itnim->state = ITNIM_STATE_TIMEOUT;
501}
502
503/**
504 * Path TOV processing begin notification -- dummy for linux
505 */
506void
507bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
508{
509}
510
511
512
513/**
514 * Allocate a Scsi_Host for a port.
515 */
516int
517bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
518{
519 int error = 1;
520
521 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
522 printk(KERN_WARNING "idr_pre_get failure\n");
523 goto out;
524 }
525
526 error = idr_get_new(&bfad_im_port_index, im_port,
527 &im_port->idr_id);
528 if (error) {
529 printk(KERN_WARNING "idr_get_new failure\n");
530 goto out;
531 }
532
533 im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
534 if (!im_port->shost) {
535 error = 1;
536 goto out_free_idr;
537 }
538
539 im_port->shost->hostdata[0] = (unsigned long)im_port;
540 im_port->shost->unique_id = im_port->idr_id;
541 im_port->shost->this_id = -1;
542 im_port->shost->max_id = MAX_FCP_TARGET;
543 im_port->shost->max_lun = MAX_FCP_LUN;
544 im_port->shost->max_cmd_len = 16;
545 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
546 im_port->shost->transportt = bfad_im_scsi_transport_template;
547
548 error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
549 if (error) {
550 printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
551 error);
552 goto out_fc_rel;
553 }
554
555 /* setup host fixed attribute if the lk supports */
556 bfad_os_fc_host_init(im_port);
557
558 return 0;
559
560out_fc_rel:
561 scsi_host_put(im_port->shost);
562out_free_idr:
563 idr_remove(&bfad_im_port_index, im_port->idr_id);
564out:
565 return error;
566}
567
568void
569bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
570{
571 unsigned long flags;
572
573 bfa_trc(bfad, bfad->inst_no);
574 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
575 im_port->shost->host_no);
576
577 fc_remove_host(im_port->shost);
578
579 scsi_remove_host(im_port->shost);
580 scsi_host_put(im_port->shost);
581
582 spin_lock_irqsave(&bfad->bfad_lock, flags);
583 idr_remove(&bfad_im_port_index, im_port->idr_id);
584 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
585}
586
587static void
588bfad_im_port_delete_handler(struct work_struct *work)
589{
590 struct bfad_im_port_s *im_port =
591 container_of(work, struct bfad_im_port_s, port_delete_work);
592
593 bfad_im_scsi_host_free(im_port->bfad, im_port);
594 bfad_im_port_clean(im_port);
595 kfree(im_port);
596}
597
598bfa_status_t
599bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
600{
601 int rc = BFA_STATUS_OK;
602 struct bfad_im_port_s *im_port;
603
604 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
605 if (im_port == NULL) {
606 rc = BFA_STATUS_ENOMEM;
607 goto ext;
608 }
609 port->im_port = im_port;
610 im_port->port = port;
611 im_port->bfad = bfad;
612
613 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
614 INIT_LIST_HEAD(&im_port->itnim_mapped_list);
615 INIT_LIST_HEAD(&im_port->binding_list);
616
617ext:
618 return rc;
619}
620
621void
622bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
623{
624 struct bfad_im_port_s *im_port = port->im_port;
625
626 queue_work(bfad->im->drv_workq,
627 &im_port->port_delete_work);
628}
629
630void
631bfad_im_port_clean(struct bfad_im_port_s *im_port)
632{
633 struct bfad_fcp_binding *bp, *bp_new;
634 unsigned long flags;
635 struct bfad_s *bfad = im_port->bfad;
636
637 spin_lock_irqsave(&bfad->bfad_lock, flags);
638 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
639 list_entry) {
640 list_del(&bp->list_entry);
641 kfree(bp);
642 }
643
644 /* the itnim_mapped_list must be empty at this time */
645 bfa_assert(list_empty(&im_port->itnim_mapped_list));
646
647 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
648}
649
650void
651bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
652{
653}
654
655void
656bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
657{
658}
659
660bfa_status_t
661bfad_im_probe(struct bfad_s *bfad)
662{
663 struct bfad_im_s *im;
664 bfa_status_t rc = BFA_STATUS_OK;
665
666 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
667 if (im == NULL) {
668 rc = BFA_STATUS_ENOMEM;
669 goto ext;
670 }
671
672 bfad->im = im;
673 im->bfad = bfad;
674
675 if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
676 kfree(im);
677 rc = BFA_STATUS_FAILED;
678 }
679
680ext:
681 return rc;
682}
683
684void
685bfad_im_probe_undo(struct bfad_s *bfad)
686{
687 if (bfad->im) {
688 bfad_os_destroy_workq(bfad->im);
689 kfree(bfad->im);
690 bfad->im = NULL;
691 }
692}
693
694
695
696
697int
698bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
699 struct bfad_s *bfad)
700{
701 struct device *dev;
702
703 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
704 dev = &bfad->pcidev->dev;
705 else
706 dev = &bfad->pport.im_port->shost->shost_gendev;
707
708 return scsi_add_host(shost, dev);
709}
710
711struct Scsi_Host *
712bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
713{
714 struct scsi_host_template *sht;
715
716 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
717 sht = &bfad_im_scsi_host_template;
718 else
719 sht = &bfad_im_vport_template;
720
721 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
722
723 return scsi_host_alloc(sht, sizeof(unsigned long));
724}
725
726void
727bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
728{
729 flush_workqueue(bfad->im->drv_workq);
730 bfad_im_scsi_host_free(im_port->bfad, im_port);
731 bfad_im_port_clean(im_port);
732 kfree(im_port);
733}
734
735void
736bfad_os_destroy_workq(struct bfad_im_s *im)
737{
738 if (im && im->drv_workq) {
739 destroy_workqueue(im->drv_workq);
740 im->drv_workq = NULL;
741 }
742}
743
744bfa_status_t
745bfad_os_thread_workq(struct bfad_s *bfad)
746{
747 struct bfad_im_s *im = bfad->im;
748
749 bfa_trc(bfad, 0);
750 snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d",
751 bfad->inst_no);
752 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
753 if (!im->drv_workq)
754 return BFA_STATUS_FAILED;
755
756 return BFA_STATUS_OK;
757}
758
759/**
760 * Scsi_Host template entry.
761 *
762 * Description:
763 * OS entry point to adjust the queue_depths on a per-device basis.
764 * Called once per device during the bus scan.
765 * Return non-zero if fails.
766 */
767static int
768bfad_im_slave_configure(struct scsi_device *sdev)
769{
770 if (sdev->tagged_supported)
771 scsi_activate_tcq(sdev, bfa_lun_queue_depth);
772 else
773 scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);
774
775 return 0;
776}
777
778struct scsi_host_template bfad_im_scsi_host_template = {
779 .module = THIS_MODULE,
780 .name = BFAD_DRIVER_NAME,
781 .info = bfad_im_info,
782 .queuecommand = bfad_im_queuecommand,
783 .eh_abort_handler = bfad_im_abort_handler,
784 .eh_device_reset_handler = bfad_im_reset_lun_handler,
785 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
786
787 .slave_alloc = bfad_im_slave_alloc,
788 .slave_configure = bfad_im_slave_configure,
789 .slave_destroy = bfad_im_slave_destroy,
790
791 .this_id = -1,
792 .sg_tablesize = BFAD_IO_MAX_SGE,
793 .cmd_per_lun = 3,
794 .use_clustering = ENABLE_CLUSTERING,
795 .shost_attrs = bfad_im_host_attrs,
796 .max_sectors = 0xFFFF,
797};
798
799struct scsi_host_template bfad_im_vport_template = {
800 .module = THIS_MODULE,
801 .name = BFAD_DRIVER_NAME,
802 .info = bfad_im_info,
803 .queuecommand = bfad_im_queuecommand,
804 .eh_abort_handler = bfad_im_abort_handler,
805 .eh_device_reset_handler = bfad_im_reset_lun_handler,
806 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
807
808 .slave_alloc = bfad_im_slave_alloc,
809 .slave_configure = bfad_im_slave_configure,
810 .slave_destroy = bfad_im_slave_destroy,
811
812 .this_id = -1,
813 .sg_tablesize = BFAD_IO_MAX_SGE,
814 .cmd_per_lun = 3,
815 .use_clustering = ENABLE_CLUSTERING,
816 .shost_attrs = bfad_im_vport_attrs,
817 .max_sectors = 0xFFFF,
818};
819
820void
821bfad_im_probe_post(struct bfad_im_s *im)
822{
823 flush_workqueue(im->drv_workq);
824}
825
826bfa_status_t
827bfad_im_module_init(void)
828{
829 bfad_im_scsi_transport_template =
830 fc_attach_transport(&bfad_im_fc_function_template);
831 if (!bfad_im_scsi_transport_template)
832 return BFA_STATUS_ENOMEM;
833
834 return BFA_STATUS_OK;
835}
836
837void
838bfad_im_module_exit(void)
839{
840 if (bfad_im_scsi_transport_template)
841 fc_release_transport(bfad_im_scsi_transport_template);
842}
843
844void
845bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
846{
847 struct bfad_im_s *im = itnim_drv->im;
848
849 if (itnim_drv->queue_work)
850 queue_work(im->drv_workq, &itnim_drv->itnim_work);
851}
852
853void
854bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
855{
856 struct scsi_device *tmp_sdev;
857
858 if (((jiffies - itnim->last_ramp_up_time) >
859 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
860 ((jiffies - itnim->last_queue_full_time) >
861 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
862 shost_for_each_device(tmp_sdev, sdev->host) {
863 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
864 if (tmp_sdev->id != sdev->id)
865 continue;
866 if (tmp_sdev->ordered_tags)
867 scsi_adjust_queue_depth(tmp_sdev,
868 MSG_ORDERED_TAG,
869 tmp_sdev->queue_depth + 1);
870 else
871 scsi_adjust_queue_depth(tmp_sdev,
872 MSG_SIMPLE_TAG,
873 tmp_sdev->queue_depth + 1);
874
875 itnim->last_ramp_up_time = jiffies;
876 }
877 }
878 }
879}
880
881void
882bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
883{
884 struct scsi_device *tmp_sdev;
885
886 itnim->last_queue_full_time = jiffies;
887
888 shost_for_each_device(tmp_sdev, sdev->host) {
889 if (tmp_sdev->id != sdev->id)
890 continue;
891 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
892 }
893}
894
895
896
897
898struct bfad_itnim_s *
899bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
900{
901 struct bfad_itnim_s *itnim = NULL;
902
903 /* Search the mapped list for this target ID */
904 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
905 if (id == itnim->scsi_tgt_id)
906 return itnim;
907 }
908
909 return NULL;
910}
911
912/**
913 * Scsi_Host template entry slave_alloc
914 */
915static int
916bfad_im_slave_alloc(struct scsi_device *sdev)
917{
918 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
919
920 if (!rport || fc_remote_port_chkready(rport))
921 return -ENXIO;
922
923 sdev->hostdata = rport->dd_data;
924
925 return 0;
926}
927
928void
929bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
930{
931 struct Scsi_Host *host = im_port->shost;
932 struct bfad_s *bfad = im_port->bfad;
933 struct bfad_port_s *port = im_port->port;
934 union attr {
935 struct bfa_pport_attr_s pattr;
936 struct bfa_ioc_attr_s ioc_attr;
937 } attr;
938
939 fc_host_node_name(host) =
940 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
941 fc_host_port_name(host) =
942 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
943
944 fc_host_supported_classes(host) = FC_COS_CLASS3;
945
946 memset(fc_host_supported_fc4s(host), 0,
947 sizeof(fc_host_supported_fc4s(host)));
948 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
949 /* For FCP type 0x08 */
950 fc_host_supported_fc4s(host)[2] = 1;
951 if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
952 /* For LLC/SNAP type 0x05 */
953 fc_host_supported_fc4s(host)[3] = 0x20;
954 /* For fibre channel services type 0x20 */
955 fc_host_supported_fc4s(host)[7] = 1;
956
957 memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr));
958 bfa_get_attr(&bfad->bfa, &attr.ioc_attr);
959 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
960 attr.ioc_attr.adapter_attr.model,
961 attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
962
963 fc_host_supported_speeds(host) = 0;
964 fc_host_supported_speeds(host) |=
965 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
966 FC_PORTSPEED_1GBIT;
967
968 memset(&attr.pattr, 0, sizeof(attr.pattr));
969 bfa_pport_get_attr(&bfad->bfa, &attr.pattr);
970 fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize;
971}
972
973static void
974bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
975{
976 struct fc_rport_identifiers rport_ids;
977 struct fc_rport *fc_rport;
978 struct bfad_itnim_data_s *itnim_data;
979
980 rport_ids.node_name =
981 bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
982 rport_ids.port_name =
983 bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
984 rport_ids.port_id =
985 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
986 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
987
988 itnim->fc_rport = fc_rport =
989 fc_remote_port_add(im_port->shost, 0, &rport_ids);
990
991 if (!fc_rport)
992 return;
993
994 fc_rport->maxframe_size =
995 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
996 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
997
998 itnim_data = fc_rport->dd_data;
999 itnim_data->itnim = itnim;
1000
1001 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1002
1003 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1004 fc_remote_port_rolechg(fc_rport, rport_ids.roles);
1005
1006 if ((fc_rport->scsi_target_id != -1)
1007 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1008 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1009
1010 return;
1011}
1012
1013/**
1014 * Work queue handler using FC transport service
1015* Context: kernel
1016 */
1017static void
1018bfad_im_itnim_work_handler(struct work_struct *work)
1019{
1020 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
1021 itnim_work);
1022 struct bfad_im_s *im = itnim->im;
1023 struct bfad_s *bfad = im->bfad;
1024 struct bfad_im_port_s *im_port;
1025 unsigned long flags;
1026 struct fc_rport *fc_rport;
1027 wwn_t wwpn;
1028 u32 fcid;
1029 char wwpn_str[32], fcid_str[16];
1030
1031 spin_lock_irqsave(&bfad->bfad_lock, flags);
1032 im_port = itnim->im_port;
1033 bfa_trc(bfad, itnim->state);
1034 switch (itnim->state) {
1035 case ITNIM_STATE_ONLINE:
1036 if (!itnim->fc_rport) {
1037 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1038 bfad_im_fc_rport_add(im_port, itnim);
1039 spin_lock_irqsave(&bfad->bfad_lock, flags);
1040 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1041 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1042 wwn2str(wwpn_str, wwpn);
1043 fcid2str(fcid_str, fcid);
1044 list_add_tail(&itnim->list_entry,
1045 &im_port->itnim_mapped_list);
1046 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE,
1047 im_port->shost->host_no,
1048 itnim->scsi_tgt_id,
1049 fcid_str, wwpn_str);
1050 } else {
1051 printk(KERN_WARNING
1052 "%s: itnim %llx is already in online state\n",
1053 __FUNCTION__,
1054 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
1055 }
1056
1057 break;
1058 case ITNIM_STATE_OFFLINE_PENDING:
1059 itnim->state = ITNIM_STATE_OFFLINE;
1060 if (itnim->fc_rport) {
1061 fc_rport = itnim->fc_rport;
1062 ((struct bfad_itnim_data_s *)
1063 fc_rport->dd_data)->itnim = NULL;
1064 itnim->fc_rport = NULL;
1065 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1066 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1067 fc_rport->dev_loss_tmo =
1068 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1069 fc_remote_port_delete(fc_rport);
1070 spin_lock_irqsave(&bfad->bfad_lock, flags);
1071 }
1072 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1073 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1074 wwn2str(wwpn_str, wwpn);
1075 fcid2str(fcid_str, fcid);
1076 list_del(&itnim->list_entry);
1077 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE,
1078 im_port->shost->host_no,
1079 itnim->scsi_tgt_id,
1080 fcid_str, wwpn_str);
1081 }
1082 break;
1083 case ITNIM_STATE_FREE:
1084 if (itnim->fc_rport) {
1085 fc_rport = itnim->fc_rport;
1086 ((struct bfad_itnim_data_s *)
1087 fc_rport->dd_data)->itnim = NULL;
1088 itnim->fc_rport = NULL;
1089 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1090 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1091 fc_rport->dev_loss_tmo =
1092 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1093 fc_remote_port_delete(fc_rport);
1094 spin_lock_irqsave(&bfad->bfad_lock, flags);
1095 }
1096 list_del(&itnim->list_entry);
1097 }
1098
1099 kfree(itnim);
1100 break;
1101 default:
1102 bfa_assert(0);
1103 break;
1104 }
1105
1106 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1107}
1108
1109/**
1110 * Scsi_Host template entry, queue a SCSI command to the BFAD.
1111 */
1112static int
1113bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1114{
1115 struct bfad_im_port_s *im_port =
1116 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
1117 struct bfad_s *bfad = im_port->bfad;
1118 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
1119 struct bfad_itnim_s *itnim;
1120 struct bfa_ioim_s *hal_io;
1121 unsigned long flags;
1122 int rc;
1123 s16 sg_cnt = 0;
1124 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1125
1126 rc = fc_remote_port_chkready(rport);
1127 if (rc) {
1128 cmnd->result = rc;
1129 done(cmnd);
1130 return 0;
1131 }
1132
1133 sg_cnt = scsi_dma_map(cmnd);
1134
1135 if (sg_cnt < 0)
1136 return SCSI_MLQUEUE_HOST_BUSY;
1137
1138 cmnd->scsi_done = done;
1139
1140 spin_lock_irqsave(&bfad->bfad_lock, flags);
1141 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
1142 printk(KERN_WARNING
1143 "bfad%d, queuecommand %p %x failed, BFA stopped\n",
1144 bfad->inst_no, cmnd, cmnd->cmnd[0]);
1145 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
1146 goto out_fail_cmd;
1147 }
1148
1149 itnim = itnim_data->itnim;
1150 if (!itnim) {
1151 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
1152 goto out_fail_cmd;
1153 }
1154
1155 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
1156 itnim->bfa_itnim, sg_cnt);
1157 if (!hal_io) {
1158 printk(KERN_WARNING "hal_io failure\n");
1159 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1160 scsi_dma_unmap(cmnd);
1161 return SCSI_MLQUEUE_HOST_BUSY;
1162 }
1163
1164 cmnd->host_scribble = (char *)hal_io;
1165 bfa_trc_fp(bfad, hal_io->iotag);
1166 bfa_ioim_start(hal_io);
1167 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1168
1169 return 0;
1170
1171out_fail_cmd:
1172 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1173 scsi_dma_unmap(cmnd);
1174 if (done)
1175 done(cmnd);
1176
1177 return 0;
1178}
1179
1180void
1181bfad_os_rport_online_wait(struct bfad_s *bfad)
1182{
1183 int i;
1184 int rport_delay = 10;
1185
1186 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1187 && i < bfa_linkup_delay; i++)
1188 schedule_timeout_uninterruptible(HZ);
1189
1190 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1191 rport_delay = rport_delay < bfa_linkup_delay ?
1192 rport_delay : bfa_linkup_delay;
1193 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1194 && i < rport_delay; i++)
1195 schedule_timeout_uninterruptible(HZ);
1196
1197 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE))
1198 schedule_timeout_uninterruptible(rport_delay * HZ);
1199 }
1200}
1201
1202int
1203bfad_os_get_linkup_delay(struct bfad_s *bfad)
1204{
1205
1206 u8 nwwns = 0;
1207 wwn_t *wwns;
1208 int ldelay;
1209
1210 /*
1211 * Querying for the boot target port wwns
1212 * -- read from boot information in flash.
1213 * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30
1214 * else => local boot machine set bfa_linkup_delay = 10
1215 */
1216
1217 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, &wwns);
1218
1219 if (nwwns > 0) {
1220 /* If boot over SAN; linkup_delay = 30sec */
1221 ldelay = 30;
1222 } else {
1223 /* If local boot; linkup_delay = 10sec */
1224 ldelay = 0;
1225 }
1226
1227 return ldelay;
1228}
1229
1230
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
new file mode 100644
index 000000000000..189a5b29e21a
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_IM_H__
19#define __BFAD_IM_H__
20
21#include "fcs/bfa_fcs_fcpim.h"
22#include "bfad_im_compat.h"
23
24#define FCPI_NAME " fcpim"
25
26void bfad_flags_set(struct bfad_s *bfad, u32 flags);
27bfa_status_t bfad_im_module_init(void);
28void bfad_im_module_exit(void);
29bfa_status_t bfad_im_probe(struct bfad_s *bfad);
30void bfad_im_probe_undo(struct bfad_s *bfad);
31void bfad_im_probe_post(struct bfad_im_s *im);
32bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
33void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
34void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
35void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
36void bfad_im_port_clean(struct bfad_im_port_s *im_port);
37int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
38 struct bfad_im_port_s *im_port);
39void bfad_im_scsi_host_free(struct bfad_s *bfad,
40 struct bfad_im_port_s *im_port);
41
42#define MAX_FCP_TARGET 1024
43#define MAX_FCP_LUN 16384
44#define BFAD_TARGET_RESET_TMO 60
45#define BFAD_LUN_RESET_TMO 60
46#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
47#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
48#define BFAD_KOBJ_NAME_LEN 20
49
50/*
51 * itnim flags
52 */
53#define ITNIM_MAPPED 0x00000001
54
55#define SCSI_TASK_MGMT 0x00000001
56#define IO_DONE_BIT 0
57
58struct bfad_itnim_data_s {
59 struct bfad_itnim_s *itnim;
60};
61
62struct bfad_im_port_s {
63 struct bfad_s *bfad;
64 struct bfad_port_s *port;
65 struct work_struct port_delete_work;
66 int idr_id;
67 u16 cur_scsi_id;
68 struct list_head binding_list;
69 struct Scsi_Host *shost;
70 struct list_head itnim_mapped_list;
71};
72
73enum bfad_itnim_state {
74 ITNIM_STATE_NONE,
75 ITNIM_STATE_ONLINE,
76 ITNIM_STATE_OFFLINE_PENDING,
77 ITNIM_STATE_OFFLINE,
78 ITNIM_STATE_TIMEOUT,
79 ITNIM_STATE_FREE,
80};
81
82/*
83 * Per itnim data structure
84 */
85struct bfad_itnim_s {
86 struct list_head list_entry;
87 struct bfa_fcs_itnim_s fcs_itnim;
88 struct work_struct itnim_work;
89 u32 flags;
90 enum bfad_itnim_state state;
91 struct bfad_im_s *im;
92 struct bfad_im_port_s *im_port;
93 struct bfad_rport_s *drv_rport;
94 struct fc_rport *fc_rport;
95 struct bfa_itnim_s *bfa_itnim;
96 u16 scsi_tgt_id;
97 u16 queue_work;
98 unsigned long last_ramp_up_time;
99 unsigned long last_queue_full_time;
100};
101
102enum bfad_binding_type {
103 FCP_PWWN_BINDING = 0x1,
104 FCP_NWWN_BINDING = 0x2,
105 FCP_FCID_BINDING = 0x3,
106};
107
108struct bfad_fcp_binding {
109 struct list_head list_entry;
110 enum bfad_binding_type binding_type;
111 u16 scsi_target_id;
112 u32 fc_id;
113 wwn_t nwwn;
114 wwn_t pwwn;
115};
116
117struct bfad_im_s {
118 struct bfad_s *bfad;
119 struct workqueue_struct *drv_workq;
120 char drv_workq_name[BFAD_KOBJ_NAME_LEN];
121};
122
123struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
124 struct bfad_s *);
125bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
126void bfad_os_destroy_workq(struct bfad_im_s *im);
127void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
128void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
129void bfad_os_init_work(struct bfad_im_port_s *im_port);
130void bfad_os_scsi_host_free(struct bfad_s *bfad,
131 struct bfad_im_port_s *im_port);
132void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
133 struct scsi_device *sdev);
134void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
135struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
136int bfad_os_scsi_add_host(struct Scsi_Host *shost,
137 struct bfad_im_port_s *im_port, struct bfad_s *bfad);
138
139/*
140 * scsi_host_template entries
141 */
142void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
143 struct bfad_itnim_s *itnim);
144
145extern struct scsi_host_template bfad_im_scsi_host_template;
146extern struct scsi_host_template bfad_im_vport_template;
147extern struct fc_function_template bfad_im_fc_function_template;
148extern struct scsi_transport_template *bfad_im_scsi_transport_template;
149
150#endif
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
new file mode 100644
index 000000000000..1d3e74ec338c
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im_compat.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_IM_COMPAT_H__
19#define __BFAD_IM_COMPAT_H__
20
21extern u32 *bfi_image_buf;
22extern u32 bfi_image_size;
23
24extern struct device_attribute *bfad_im_host_attrs[];
25extern struct device_attribute *bfad_im_vport_attrs[];
26
27u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
28u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
29 u32 *bfi_image_size, char *fw_name);
30
31static inline u32 *
32bfad_load_fwimg(struct pci_dev *pdev)
33{
34 return(bfad_get_firmware_buf(pdev));
35}
36
37static inline void
38bfad_free_fwimg(void)
39{
40 if (bfi_image_ct_size && bfi_image_ct)
41 vfree(bfi_image_ct);
42 if (bfi_image_cb_size && bfi_image_cb)
43 vfree(bfi_image_cb);
44}
45
46#endif
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
new file mode 100644
index 000000000000..f104e029cac9
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfad_drv.h"
19#include "bfad_trcmod.h"
20
21BFA_TRC_FILE(LDRV, INTR);
22
23/**
24 * bfa_isr BFA driver interrupt functions
25 */
26irqreturn_t bfad_intx(int irq, void *dev_id);
27static int msix_disable;
28module_param(msix_disable, int, S_IRUGO | S_IWUSR);
29/**
30 * Line based interrupt handler.
31 */
32irqreturn_t
33bfad_intx(int irq, void *dev_id)
34{
35 struct bfad_s *bfad = dev_id;
36 struct list_head doneq;
37 unsigned long flags;
38 bfa_boolean_t rc;
39
40 spin_lock_irqsave(&bfad->bfad_lock, flags);
41 rc = bfa_intx(&bfad->bfa);
42 if (!rc) {
43 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 return IRQ_NONE;
45 }
46
47 bfa_comp_deq(&bfad->bfa, &doneq);
48 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
49
50 if (!list_empty(&doneq)) {
51 bfa_comp_process(&bfad->bfa, &doneq);
52
53 spin_lock_irqsave(&bfad->bfad_lock, flags);
54 bfa_comp_free(&bfad->bfa, &doneq);
55 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
56 bfa_trc_fp(bfad, irq);
57 }
58
59 return IRQ_HANDLED;
60
61}
62
63static irqreturn_t
64bfad_msix(int irq, void *dev_id)
65{
66 struct bfad_msix_s *vec = dev_id;
67 struct bfad_s *bfad = vec->bfad;
68 struct list_head doneq;
69 unsigned long flags;
70
71 spin_lock_irqsave(&bfad->bfad_lock, flags);
72
73 bfa_msix(&bfad->bfa, vec->msix.entry);
74 bfa_comp_deq(&bfad->bfa, &doneq);
75 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
76
77 if (!list_empty(&doneq)) {
78 bfa_comp_process(&bfad->bfa, &doneq);
79
80 spin_lock_irqsave(&bfad->bfad_lock, flags);
81 bfa_comp_free(&bfad->bfa, &doneq);
82 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
83 }
84
85 return IRQ_HANDLED;
86}
87
88/**
89 * Initialize the MSIX entry table.
90 */
91static void
92bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
93 int mask, int max_bit)
94{
95 int i;
96 int match = 0x00000001;
97
98 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
99 if (mask & match) {
100 bfad->msix_tab[bfad->nvec].msix.entry = i;
101 bfad->msix_tab[bfad->nvec].bfad = bfad;
102 msix_entries[bfad->nvec].entry = i;
103 bfad->nvec++;
104 }
105
106 match <<= 1;
107 }
108
109}
110
111int
112bfad_install_msix_handler(struct bfad_s *bfad)
113{
114 int i, error = 0;
115
116 for (i = 0; i < bfad->nvec; i++) {
117 error = request_irq(bfad->msix_tab[i].msix.vector,
118 (irq_handler_t) bfad_msix, 0,
119 BFAD_DRIVER_NAME, &bfad->msix_tab[i]);
120 bfa_trc(bfad, i);
121 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
122 if (error) {
123 int j;
124
125 for (j = 0; j < i; j++)
126 free_irq(bfad->msix_tab[j].msix.vector,
127 &bfad->msix_tab[j]);
128
129 return 1;
130 }
131 }
132
133 return 0;
134}
135
136/**
137 * Setup MSIX based interrupt.
138 */
139int
140bfad_setup_intr(struct bfad_s *bfad)
141{
142 int error = 0;
143 u32 mask = 0, i, num_bit = 0, max_bit = 0;
144 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
145
146 /* Call BFA to get the msix map for this PCI function. */
147 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
148
149 /* Set up the msix entry table */
150 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
151
152 if (!msix_disable) {
153 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
154 if (error) {
155 /*
156 * Only error number of vector is available.
157 * We don't have a mechanism to map multiple
158 * interrupts into one vector, so even if we
159 * can try to request less vectors, we don't
160 * know how to associate interrupt events to
161 * vectors. Linux doesn't dupicate vectors
162 * in the MSIX table for this case.
163 */
164
165 printk(KERN_WARNING "bfad%d: "
166 "pci_enable_msix failed (%d),"
167 " use line based.\n", bfad->inst_no, error);
168
169 goto line_based;
170 }
171
172 /* Save the vectors */
173 for (i = 0; i < bfad->nvec; i++) {
174 bfa_trc(bfad, msix_entries[i].vector);
175 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
176 }
177
178 bfa_msix_init(&bfad->bfa, bfad->nvec);
179
180 bfad->bfad_flags |= BFAD_MSIX_ON;
181
182 return error;
183 }
184
185line_based:
186 error = 0;
187 if (request_irq
188 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
189 BFAD_DRIVER_NAME, bfad) != 0) {
190 /* Enable interrupt handler failed */
191 return 1;
192 }
193
194 return error;
195}
196
197void
198bfad_remove_intr(struct bfad_s *bfad)
199{
200 int i;
201
202 if (bfad->bfad_flags & BFAD_MSIX_ON) {
203 for (i = 0; i < bfad->nvec; i++)
204 free_irq(bfad->msix_tab[i].msix.vector,
205 &bfad->msix_tab[i]);
206
207 pci_disable_msix(bfad->pcidev);
208 bfad->bfad_flags &= ~BFAD_MSIX_ON;
209 } else {
210 free_irq(bfad->pcidev->irq, bfad);
211 }
212}
213
214
diff --git a/drivers/scsi/bfa/bfad_ipfc.h b/drivers/scsi/bfa/bfad_ipfc.h
new file mode 100644
index 000000000000..718bc5227671
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_ipfc.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DRV_IPFC_H__
18#define __BFA_DRV_IPFC_H__
19
20
21#define IPFC_NAME ""
22
23#define bfad_ipfc_module_init(x) do {} while (0)
24#define bfad_ipfc_module_exit(x) do {} while (0)
25#define bfad_ipfc_probe(x) do {} while (0)
26#define bfad_ipfc_probe_undo(x) do {} while (0)
27#define bfad_ipfc_port_config(x, y) BFA_STATUS_OK
28#define bfad_ipfc_port_unconfig(x, y) do {} while (0)
29
30#define bfad_ipfc_probe_post(x) do {} while (0)
31#define bfad_ipfc_port_new(x, y, z) BFA_STATUS_OK
32#define bfad_ipfc_port_delete(x, y) do {} while (0)
33#define bfad_ipfc_port_online(x, y) do {} while (0)
34#define bfad_ipfc_port_offline(x, y) do {} while (0)
35
36#define bfad_ip_get_attr(x) BFA_STATUS_FAILED
37#define bfad_ip_reset_drv_stats(x) BFA_STATUS_FAILED
38#define bfad_ip_get_drv_stats(x, y) BFA_STATUS_FAILED
39#define bfad_ip_enable_ipfc(x, y, z) BFA_STATUS_FAILED
40
41
42#endif
diff --git a/drivers/scsi/bfa/bfad_os.c b/drivers/scsi/bfa/bfad_os.c
new file mode 100644
index 000000000000..faf47b4f1a38
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_os.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_os.c Linux driver OS specific calls.
20 */
21
22#include "bfa_os_inc.h"
23#include "bfad_drv.h"
24
25void
26bfa_os_gettimeofday(struct bfa_timeval_s *tv)
27{
28 struct timeval tmp_tv;
29
30 do_gettimeofday(&tmp_tv);
31 tv->tv_sec = (u32) tmp_tv.tv_sec;
32 tv->tv_usec = (u32) tmp_tv.tv_usec;
33}
34
35void
36bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
37 const char *fmt, ...)
38{
39 va_list ap;
40 #define BFA_STRING_256 256
41 char tmp[BFA_STRING_256];
42
43 va_start(ap, fmt);
44 vsprintf(tmp, fmt, ap);
45 va_end(ap);
46
47 printk(tmp);
48}
49
50
diff --git a/drivers/scsi/bfa/bfad_tm.h b/drivers/scsi/bfa/bfad_tm.h
new file mode 100644
index 000000000000..4901b1b7df02
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_tm.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * Brocade Fibre Channel HBA Linux Target Mode Driver
20 */
21
22/**
23 * tm/dummy/bfad_tm.h BFA callback dummy header file for BFA Linux target mode PCI interface module.
24 */
25
26#ifndef __BFAD_TM_H__
27#define __BFAD_TM_H__
28
29#include <defs/bfa_defs_status.h>
30
31#define FCPT_NAME ""
32
33/*
34 * Called from base Linux driver on (De)Init events
35 */
36
37/* attach tgt template with scst */
38#define bfad_tm_module_init() do {} while (0)
39
40/* detach/release tgt template */
41#define bfad_tm_module_exit() do {} while (0)
42
43#define bfad_tm_probe(x) do {} while (0)
44#define bfad_tm_probe_undo(x) do {} while (0)
45#define bfad_tm_probe_post(x) do {} while (0)
46
47/*
48 * Called by base Linux driver but triggered by BFA FCS on config events
49 */
50#define bfad_tm_port_new(x, y) BFA_STATUS_OK
51#define bfad_tm_port_delete(x, y) do {} while (0)
52
53/*
54 * Called by base Linux driver but triggered by BFA FCS on PLOGI/O events
55 */
56#define bfad_tm_port_online(x, y) do {} while (0)
57#define bfad_tm_port_offline(x, y) do {} while (0)
58
59#endif
diff --git a/drivers/scsi/bfa/bfad_trcmod.h b/drivers/scsi/bfa/bfad_trcmod.h
new file mode 100644
index 000000000000..2827b2acd041
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_trcmod.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_trcmod.h Linux driver trace modules
20 */
21
22
23#ifndef __BFAD_TRCMOD_H__
24#define __BFAD_TRCMOD_H__
25
26#include <cs/bfa_trc.h>
27
28/*
29 * !!! Only append to the enums defined here to avoid any versioning
30 * !!! needed between trace utility and driver version
31 */
32enum {
33 /* 2.6 Driver */
34 BFA_TRC_LDRV_BFAD = 1,
35 BFA_TRC_LDRV_BFAD_2_6 = 2,
36 BFA_TRC_LDRV_BFAD_2_6_9 = 3,
37 BFA_TRC_LDRV_BFAD_2_6_10 = 4,
38 BFA_TRC_LDRV_INTR = 5,
39 BFA_TRC_LDRV_IOCTL = 6,
40 BFA_TRC_LDRV_OS = 7,
41 BFA_TRC_LDRV_IM = 8,
42 BFA_TRC_LDRV_IM_2_6 = 9,
43 BFA_TRC_LDRV_IM_2_6_9 = 10,
44 BFA_TRC_LDRV_IM_2_6_10 = 11,
45 BFA_TRC_LDRV_TM = 12,
46 BFA_TRC_LDRV_IPFC = 13,
47 BFA_TRC_LDRV_IM_2_4 = 14,
48 BFA_TRC_LDRV_IM_VMW = 15,
49 BFA_TRC_LDRV_IM_LT_2_6_10 = 16,
50};
51
52#endif /* __BFAD_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/fab.c b/drivers/scsi/bfa/fab.c
new file mode 100644
index 000000000000..7e3a4d5d7bb4
--- /dev/null
+++ b/drivers/scsi/bfa/fab.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "lport_priv.h"
23
24/**
25 * fab.c port fab implementation.
26 */
27
28/**
29 * bfa_fcs_port_fab_public port fab public functions
30 */
31
32/**
33 * Called by port to initialize fabric services of the base port.
34 */
35void
36bfa_fcs_port_fab_init(struct bfa_fcs_port_s *port)
37{
38 bfa_fcs_port_ns_init(port);
39 bfa_fcs_port_scn_init(port);
40 bfa_fcs_port_ms_init(port);
41}
42
43/**
44 * Called by port to notify transition to online state.
45 */
46void
47bfa_fcs_port_fab_online(struct bfa_fcs_port_s *port)
48{
49 bfa_fcs_port_ns_online(port);
50 bfa_fcs_port_scn_online(port);
51}
52
53/**
54 * Called by port to notify transition to offline state.
55 */
56void
57bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *port)
58{
59 bfa_fcs_port_ns_offline(port);
60 bfa_fcs_port_scn_offline(port);
61 bfa_fcs_port_ms_offline(port);
62}
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
new file mode 100644
index 000000000000..a8b14c47b009
--- /dev/null
+++ b/drivers/scsi/bfa/fabric.c
@@ -0,0 +1,1278 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fabric.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_lport.h"
24#include "fcs_vport.h"
25#include "fcs_trcmod.h"
26#include "fcs_fcxp.h"
27#include "fcs_auth.h"
28#include "fcs.h"
29#include "fcbuild.h"
30#include <log/bfa_log_fcs.h>
31#include <aen/bfa_aen_port.h>
32#include <bfa_svc.h>
33
34BFA_TRC_FILE(FCS, FABRIC);
35
36#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
38
39#define bfa_fcs_fabric_set_opertype(__fabric) do { \
40 if (bfa_pport_get_topology((__fabric)->fcs->bfa) \
41 == BFA_PPORT_TOPOLOGY_P2P) \
42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
43 else \
44 (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \
45} while (0)
46
47/*
48 * forward declarations
49 */
50static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
51static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
52static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
53static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
54static void bfa_fcs_fabric_delay(void *cbarg);
55static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
56static void bfa_fcs_fabric_delete_comp(void *cbarg);
57static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
58 struct fchs_s *fchs, u16 len);
59static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
60 struct fchs_s *fchs, u16 len);
61static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
62static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
63 struct bfa_fcxp_s *fcxp,
64 void *cbarg, bfa_status_t status,
65 u32 rsp_len,
66 u32 resid_len,
67 struct fchs_s *rspfchs);
68/**
69 * fcs_fabric_sm fabric state machine functions
70 */
71
72/**
73 * Fabric state machine events
74 */
75enum bfa_fcs_fabric_event {
76 BFA_FCS_FABRIC_SM_CREATE = 1, /* fabric create from driver */
77 BFA_FCS_FABRIC_SM_DELETE = 2, /* fabric delete from driver */
78 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
79 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
80 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* continue op from flogi/auth */
81 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* continue op from flogi/auth */
82 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* no fabric from flogi/auth
83 */
84 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* perform EVFP from
85 *flogi/auth */
86 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* isolate from EVFP processing */
87 BFA_FCS_FABRIC_SM_NO_TAGGING = 10,/* no VFT tagging from EVFP */
88 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
89 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* authentication failed */
90 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* authentication successful
91 */
92 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
93 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
94 BFA_FCS_FABRIC_SM_START = 16, /* fabric delete from driver */
95};
96
97static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
98 enum bfa_fcs_fabric_event event);
99static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
100 enum bfa_fcs_fabric_event event);
101static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
102 enum bfa_fcs_fabric_event event);
103static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
104 enum bfa_fcs_fabric_event event);
105static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
106 enum bfa_fcs_fabric_event event);
107static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
108 enum bfa_fcs_fabric_event event);
109static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
110 enum bfa_fcs_fabric_event event);
111static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
112 enum bfa_fcs_fabric_event event);
113static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
114 enum bfa_fcs_fabric_event event);
115static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
116 enum bfa_fcs_fabric_event event);
117static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
118 enum bfa_fcs_fabric_event event);
119static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
120 enum bfa_fcs_fabric_event event);
121static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
122 enum bfa_fcs_fabric_event event);
123static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
124 enum bfa_fcs_fabric_event event);
125/**
126 * Beginning state before fabric creation.
127 */
128static void
129bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
130 enum bfa_fcs_fabric_event event)
131{
132 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
133 bfa_trc(fabric->fcs, event);
134
135 switch (event) {
136 case BFA_FCS_FABRIC_SM_CREATE:
137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
138 bfa_fcs_fabric_init(fabric);
139 bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL,
140 &fabric->bport.port_cfg, NULL);
141 break;
142
143 case BFA_FCS_FABRIC_SM_LINK_UP:
144 case BFA_FCS_FABRIC_SM_LINK_DOWN:
145 break;
146
147 default:
148 bfa_sm_fault(fabric->fcs, event);
149 }
150}
151
152/**
153 * Beginning state before fabric creation.
154 */
155static void
156bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
157 enum bfa_fcs_fabric_event event)
158{
159 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
160 bfa_trc(fabric->fcs, event);
161
162 switch (event) {
163 case BFA_FCS_FABRIC_SM_START:
164 if (bfa_pport_is_linkup(fabric->fcs->bfa)) {
165 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
166 bfa_fcs_fabric_login(fabric);
167 } else
168 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
169 break;
170
171 case BFA_FCS_FABRIC_SM_LINK_UP:
172 case BFA_FCS_FABRIC_SM_LINK_DOWN:
173 break;
174
175 case BFA_FCS_FABRIC_SM_DELETE:
176 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
177 bfa_fcs_modexit_comp(fabric->fcs);
178 break;
179
180 default:
181 bfa_sm_fault(fabric->fcs, event);
182 }
183}
184
185/**
186 * Link is down, awaiting LINK UP event from port. This is also the
187 * first state at fabric creation.
188 */
189static void
190bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
191 enum bfa_fcs_fabric_event event)
192{
193 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
194 bfa_trc(fabric->fcs, event);
195
196 switch (event) {
197 case BFA_FCS_FABRIC_SM_LINK_UP:
198 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
199 bfa_fcs_fabric_login(fabric);
200 break;
201
202 case BFA_FCS_FABRIC_SM_RETRY_OP:
203 break;
204
205 case BFA_FCS_FABRIC_SM_DELETE:
206 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
207 bfa_fcs_fabric_delete(fabric);
208 break;
209
210 default:
211 bfa_sm_fault(fabric->fcs, event);
212 }
213}
214
215/**
216 * FLOGI is in progress, awaiting FLOGI reply.
217 */
218static void
219bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
220 enum bfa_fcs_fabric_event event)
221{
222 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
223 bfa_trc(fabric->fcs, event);
224
225 switch (event) {
226 case BFA_FCS_FABRIC_SM_CONT_OP:
227
228 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
229 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
230
231 if (fabric->auth_reqd && fabric->is_auth) {
232 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
233 bfa_trc(fabric->fcs, event);
234 } else {
235 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
236 bfa_fcs_fabric_notify_online(fabric);
237 }
238 break;
239
240 case BFA_FCS_FABRIC_SM_RETRY_OP:
241 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
242 bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
243 bfa_fcs_fabric_delay, fabric,
244 BFA_FCS_FABRIC_RETRY_DELAY);
245 break;
246
247 case BFA_FCS_FABRIC_SM_LOOPBACK:
248 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
249 bfa_lps_discard(fabric->lps);
250 bfa_fcs_fabric_set_opertype(fabric);
251 break;
252
253 case BFA_FCS_FABRIC_SM_NO_FABRIC:
254 fabric->fab_type = BFA_FCS_FABRIC_N2N;
255 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
256 bfa_fcs_fabric_notify_online(fabric);
257 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
258 break;
259
260 case BFA_FCS_FABRIC_SM_LINK_DOWN:
261 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
262 bfa_lps_discard(fabric->lps);
263 break;
264
265 case BFA_FCS_FABRIC_SM_DELETE:
266 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
267 bfa_lps_discard(fabric->lps);
268 bfa_fcs_fabric_delete(fabric);
269 break;
270
271 default:
272 bfa_sm_fault(fabric->fcs, event);
273 }
274}
275
276
277static void
278bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
279 enum bfa_fcs_fabric_event event)
280{
281 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
282 bfa_trc(fabric->fcs, event);
283
284 switch (event) {
285 case BFA_FCS_FABRIC_SM_DELAYED:
286 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
287 bfa_fcs_fabric_login(fabric);
288 break;
289
290 case BFA_FCS_FABRIC_SM_LINK_DOWN:
291 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
292 bfa_timer_stop(&fabric->delay_timer);
293 break;
294
295 case BFA_FCS_FABRIC_SM_DELETE:
296 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
297 bfa_timer_stop(&fabric->delay_timer);
298 bfa_fcs_fabric_delete(fabric);
299 break;
300
301 default:
302 bfa_sm_fault(fabric->fcs, event);
303 }
304}
305
306/**
307 * Authentication is in progress, awaiting authentication results.
308 */
309static void
310bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
311 enum bfa_fcs_fabric_event event)
312{
313 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
314 bfa_trc(fabric->fcs, event);
315
316 switch (event) {
317 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
318 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
319 bfa_lps_discard(fabric->lps);
320 break;
321
322 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
323 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
324 bfa_fcs_fabric_notify_online(fabric);
325 break;
326
327 case BFA_FCS_FABRIC_SM_PERF_EVFP:
328 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
329 break;
330
331 case BFA_FCS_FABRIC_SM_LINK_DOWN:
332 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
333 bfa_lps_discard(fabric->lps);
334 break;
335
336 case BFA_FCS_FABRIC_SM_DELETE:
337 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
338 bfa_fcs_fabric_delete(fabric);
339 break;
340
341 default:
342 bfa_sm_fault(fabric->fcs, event);
343 }
344}
345
346/**
347 * Authentication failed
348 */
349static void
350bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
351 enum bfa_fcs_fabric_event event)
352{
353 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
354 bfa_trc(fabric->fcs, event);
355
356 switch (event) {
357 case BFA_FCS_FABRIC_SM_LINK_DOWN:
358 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
359 bfa_fcs_fabric_notify_offline(fabric);
360 break;
361
362 case BFA_FCS_FABRIC_SM_DELETE:
363 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
364 bfa_fcs_fabric_delete(fabric);
365 break;
366
367 default:
368 bfa_sm_fault(fabric->fcs, event);
369 }
370}
371
372/**
373 * Port is in loopback mode.
374 */
375static void
376bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
377 enum bfa_fcs_fabric_event event)
378{
379 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
380 bfa_trc(fabric->fcs, event);
381
382 switch (event) {
383 case BFA_FCS_FABRIC_SM_LINK_DOWN:
384 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
385 bfa_fcs_fabric_notify_offline(fabric);
386 break;
387
388 case BFA_FCS_FABRIC_SM_DELETE:
389 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
390 bfa_fcs_fabric_delete(fabric);
391 break;
392
393 default:
394 bfa_sm_fault(fabric->fcs, event);
395 }
396}
397
398/**
399 * There is no attached fabric - private loop or NPort-to-NPort topology.
400 */
401static void
402bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
403 enum bfa_fcs_fabric_event event)
404{
405 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
406 bfa_trc(fabric->fcs, event);
407
408 switch (event) {
409 case BFA_FCS_FABRIC_SM_LINK_DOWN:
410 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
411 bfa_lps_discard(fabric->lps);
412 bfa_fcs_fabric_notify_offline(fabric);
413 break;
414
415 case BFA_FCS_FABRIC_SM_DELETE:
416 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
417 bfa_fcs_fabric_delete(fabric);
418 break;
419
420 case BFA_FCS_FABRIC_SM_NO_FABRIC:
421 bfa_trc(fabric->fcs, fabric->bb_credit);
422 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
423 break;
424
425 default:
426 bfa_sm_fault(fabric->fcs, event);
427 }
428}
429
430/**
431 * Fabric is online - normal operating state.
432 */
433static void
434bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
435 enum bfa_fcs_fabric_event event)
436{
437 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
438 bfa_trc(fabric->fcs, event);
439
440 switch (event) {
441 case BFA_FCS_FABRIC_SM_LINK_DOWN:
442 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
443 bfa_lps_discard(fabric->lps);
444 bfa_fcs_fabric_notify_offline(fabric);
445 break;
446
447 case BFA_FCS_FABRIC_SM_DELETE:
448 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
449 bfa_fcs_fabric_delete(fabric);
450 break;
451
452 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
453 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
454 bfa_lps_discard(fabric->lps);
455 break;
456
457 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
458 break;
459
460 default:
461 bfa_sm_fault(fabric->fcs, event);
462 }
463}
464
465/**
466 * Exchanging virtual fabric parameters.
467 */
468static void
469bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
470 enum bfa_fcs_fabric_event event)
471{
472 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
473 bfa_trc(fabric->fcs, event);
474
475 switch (event) {
476 case BFA_FCS_FABRIC_SM_CONT_OP:
477 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
478 break;
479
480 case BFA_FCS_FABRIC_SM_ISOLATE:
481 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
482 break;
483
484 default:
485 bfa_sm_fault(fabric->fcs, event);
486 }
487}
488
489/**
490 * EVFP exchange complete and VFT tagging is enabled.
491 */
492static void
493bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
494 enum bfa_fcs_fabric_event event)
495{
496 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
497 bfa_trc(fabric->fcs, event);
498}
499
500/**
501 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
502 */
503static void
504bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
505 enum bfa_fcs_fabric_event event)
506{
507 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
508 bfa_trc(fabric->fcs, event);
509
510 bfa_log(fabric->fcs->logm, BFA_LOG_FCS_FABRIC_ISOLATED,
511 fabric->bport.port_cfg.pwwn, fabric->fcs->port_vfid,
512 fabric->event_arg.swp_vfid);
513}
514
515/**
516 * Fabric is being deleted, awaiting vport delete completions.
517 */
518static void
519bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
520 enum bfa_fcs_fabric_event event)
521{
522 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
523 bfa_trc(fabric->fcs, event);
524
525 switch (event) {
526 case BFA_FCS_FABRIC_SM_DELCOMP:
527 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
528 bfa_fcs_modexit_comp(fabric->fcs);
529 break;
530
531 case BFA_FCS_FABRIC_SM_LINK_UP:
532 break;
533
534 case BFA_FCS_FABRIC_SM_LINK_DOWN:
535 bfa_fcs_fabric_notify_offline(fabric);
536 break;
537
538 default:
539 bfa_sm_fault(fabric->fcs, event);
540 }
541}
542
543
544
545/**
546 * fcs_fabric_private fabric private functions
547 */
548
549static void
550bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
551{
552 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
553
554 port_cfg->roles = BFA_PORT_ROLE_FCP_IM;
555 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
556 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
557}
558
559/**
560 * Port Symbolic Name Creation for base port.
561 */
562void
563bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
564{
565 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
566 struct bfa_adapter_attr_s adapter_attr;
567 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
568
569 bfa_os_memset((void *)&adapter_attr, 0,
570 sizeof(struct bfa_adapter_attr_s));
571 bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr);
572
573 /*
574 * Model name/number
575 */
576 strncpy((char *)&port_cfg->sym_name, adapter_attr.model,
577 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
578 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
579 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
580
581 /*
582 * Driver Version
583 */
584 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
585 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
586 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
587 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
588
589 /*
590 * Host machine name
591 */
592 strncat((char *)&port_cfg->sym_name,
593 (char *)driver_info->host_machine_name,
594 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
595 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
596 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
597
598 /*
599 * Host OS Info :
600 * If OS Patch Info is not there, do not truncate any bytes from the
601 * OS name string and instead copy the entire OS info string (64 bytes).
602 */
603 if (driver_info->host_os_patch[0] == '\0') {
604 strncat((char *)&port_cfg->sym_name,
605 (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN);
606 strncat((char *)&port_cfg->sym_name,
607 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
608 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
609 } else {
610 strncat((char *)&port_cfg->sym_name,
611 (char *)driver_info->host_os_name,
612 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
613 strncat((char *)&port_cfg->sym_name,
614 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
615 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
616
617 /*
618 * Append host OS Patch Info
619 */
620 strncat((char *)&port_cfg->sym_name,
621 (char *)driver_info->host_os_patch,
622 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
623 }
624
625 /*
626 * null terminate
627 */
628 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
629}
630
631/**
632 * bfa lps login completion callback
633 */
634void
635bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
636{
637 struct bfa_fcs_fabric_s *fabric = uarg;
638
639 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
640 bfa_trc(fabric->fcs, status);
641
642 switch (status) {
643 case BFA_STATUS_OK:
644 fabric->stats.flogi_accepts++;
645 break;
646
647 case BFA_STATUS_INVALID_MAC:
648 /*
649 * Only for CNA
650 */
651 fabric->stats.flogi_acc_err++;
652 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
653
654 return;
655
656 case BFA_STATUS_EPROTOCOL:
657 switch (bfa_lps_get_extstatus(fabric->lps)) {
658 case BFA_EPROTO_BAD_ACCEPT:
659 fabric->stats.flogi_acc_err++;
660 break;
661
662 case BFA_EPROTO_UNKNOWN_RSP:
663 fabric->stats.flogi_unknown_rsp++;
664 break;
665
666 default:
667 break;
668 }
669 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
670
671 return;
672
673 case BFA_STATUS_FABRIC_RJT:
674 fabric->stats.flogi_rejects++;
675 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
676 return;
677
678 default:
679 fabric->stats.flogi_rsp_err++;
680 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
681 return;
682 }
683
684 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
685 bfa_trc(fabric->fcs, fabric->bb_credit);
686
687 if (!bfa_lps_is_brcd_fabric(fabric->lps))
688 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps);
689
690 /*
691 * Check port type. It should be 1 = F-port.
692 */
693 if (bfa_lps_is_fport(fabric->lps)) {
694 fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
695 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
696 fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
697 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
698 } else {
699 /*
700 * Nport-2-Nport direct attached
701 */
702 fabric->bport.port_topo.pn2n.rem_port_wwn =
703 bfa_lps_get_peer_pwwn(fabric->lps);
704 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
705 }
706
707 bfa_trc(fabric->fcs, fabric->bport.pid);
708 bfa_trc(fabric->fcs, fabric->is_npiv);
709 bfa_trc(fabric->fcs, fabric->is_auth);
710}
711
712/**
713 * Allocate and send FLOGI.
714 */
715static void
716bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
717{
718 struct bfa_s *bfa = fabric->fcs->bfa;
719 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
720 u8 alpa = 0;
721
722 if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
723 alpa = bfa_pport_get_myalpa(bfa);
724
725 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa),
726 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
727
728 fabric->stats.flogi_sent++;
729}
730
731static void
732bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
733{
734 struct bfa_fcs_vport_s *vport;
735 struct list_head *qe, *qen;
736
737 bfa_trc(fabric->fcs, fabric->fabric_name);
738
739 bfa_fcs_fabric_set_opertype(fabric);
740 fabric->stats.fabric_onlines++;
741
742 /**
743 * notify online event to base and then virtual ports
744 */
745 bfa_fcs_port_online(&fabric->bport);
746
747 list_for_each_safe(qe, qen, &fabric->vport_q) {
748 vport = (struct bfa_fcs_vport_s *)qe;
749 bfa_fcs_vport_online(vport);
750 }
751}
752
753static void
754bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
755{
756 struct bfa_fcs_vport_s *vport;
757 struct list_head *qe, *qen;
758
759 bfa_trc(fabric->fcs, fabric->fabric_name);
760 fabric->stats.fabric_offlines++;
761
762 /**
763 * notify offline event first to vports and then base port.
764 */
765 list_for_each_safe(qe, qen, &fabric->vport_q) {
766 vport = (struct bfa_fcs_vport_s *)qe;
767 bfa_fcs_vport_offline(vport);
768 }
769
770 bfa_fcs_port_offline(&fabric->bport);
771
772 fabric->fabric_name = 0;
773 fabric->fabric_ip_addr[0] = 0;
774}
775
776static void
777bfa_fcs_fabric_delay(void *cbarg)
778{
779 struct bfa_fcs_fabric_s *fabric = cbarg;
780
781 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
782}
783
784/**
785 * Delete all vports and wait for vport delete completions.
786 */
787static void
788bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
789{
790 struct bfa_fcs_vport_s *vport;
791 struct list_head *qe, *qen;
792
793 list_for_each_safe(qe, qen, &fabric->vport_q) {
794 vport = (struct bfa_fcs_vport_s *)qe;
795 bfa_fcs_vport_delete(vport);
796 }
797
798 bfa_fcs_port_delete(&fabric->bport);
799 bfa_wc_wait(&fabric->wc);
800}
801
802static void
803bfa_fcs_fabric_delete_comp(void *cbarg)
804{
805 struct bfa_fcs_fabric_s *fabric = cbarg;
806
807 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
808}
809
810
811
812/**
813 * fcs_fabric_public fabric public functions
814 */
815
816/**
817 * Module initialization
818 */
819void
820bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
821{
822 struct bfa_fcs_fabric_s *fabric;
823
824 fabric = &fcs->fabric;
825 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
826
827 /**
828 * Initialize base fabric.
829 */
830 fabric->fcs = fcs;
831 INIT_LIST_HEAD(&fabric->vport_q);
832 INIT_LIST_HEAD(&fabric->vf_q);
833 fabric->lps = bfa_lps_alloc(fcs->bfa);
834 bfa_assert(fabric->lps);
835
836 /**
837 * Initialize fabric delete completion handler. Fabric deletion is complete
838 * when the last vport delete is complete.
839 */
840 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
841 bfa_wc_up(&fabric->wc); /* For the base port */
842
843 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
844 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE);
845 bfa_trc(fcs, 0);
846}
847
848/**
849 * Module cleanup
850 */
851void
852bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
853{
854 struct bfa_fcs_fabric_s *fabric;
855
856 bfa_trc(fcs, 0);
857
858 /**
859 * Cleanup base fabric.
860 */
861 fabric = &fcs->fabric;
862 bfa_lps_delete(fabric->lps);
863 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
864}
865
866/**
867 * Fabric module start -- kick starts FCS actions
868 */
869void
870bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
871{
872 struct bfa_fcs_fabric_s *fabric;
873
874 bfa_trc(fcs, 0);
875 fabric = &fcs->fabric;
876 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
877}
878
879/**
880 * Suspend fabric activity as part of driver suspend.
881 */
882void
883bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
884{
885}
886
887bfa_boolean_t
888bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
889{
890 return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback));
891}
892
893enum bfa_pport_type
894bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
895{
896 return fabric->oper_type;
897}
898
899/**
900 * Link up notification from BFA physical port module.
901 */
902void
903bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
904{
905 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
906 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
907}
908
909/**
910 * Link down notification from BFA physical port module.
911 */
912void
913bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
914{
915 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
916 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
917}
918
919/**
920 * A child vport is being created in the fabric.
921 *
922 * Call from vport module at vport creation. A list of base port and vports
923 * belonging to a fabric is maintained to propagate link events.
924 *
925 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
926 * param[in] vport - Vport being created.
927 *
928 * @return None (always succeeds)
929 */
930void
931bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
932 struct bfa_fcs_vport_s *vport)
933{
934 /**
935 * - add vport to fabric's vport_q
936 */
937 bfa_trc(fabric->fcs, fabric->vf_id);
938
939 list_add_tail(&vport->qe, &fabric->vport_q);
940 fabric->num_vports++;
941 bfa_wc_up(&fabric->wc);
942}
943
944/**
945 * A child vport is being deleted from fabric.
946 *
947 * Vport is being deleted.
948 */
949void
950bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
951 struct bfa_fcs_vport_s *vport)
952{
953 list_del(&vport->qe);
954 fabric->num_vports--;
955 bfa_wc_down(&fabric->wc);
956}
957
958/**
959 * Base port is deleted.
960 */
961void
962bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
963{
964 bfa_wc_down(&fabric->wc);
965}
966
967/**
968 * Check if fabric is online.
969 *
970 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
971 *
972 * @return TRUE/FALSE
973 */
974int
975bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
976{
977 return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online));
978}
979
980
981bfa_status_t
982bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
983 struct bfa_port_cfg_s *port_cfg,
984 struct bfad_vf_s *vf_drv)
985{
986 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
987 return BFA_STATUS_OK;
988}
989
990/**
991 * Lookup for a vport withing a fabric given its pwwn
992 */
993struct bfa_fcs_vport_s *
994bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
995{
996 struct bfa_fcs_vport_s *vport;
997 struct list_head *qe;
998
999 list_for_each(qe, &fabric->vport_q) {
1000 vport = (struct bfa_fcs_vport_s *)qe;
1001 if (bfa_fcs_port_get_pwwn(&vport->lport) == pwwn)
1002 return vport;
1003 }
1004
1005 return NULL;
1006}
1007
1008/**
1009 * In a given fabric, return the number of lports.
1010 *
1011 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1012 *
1013* @return : 1 or more.
1014 */
1015u16
1016bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1017{
1018 return (fabric->num_vports);
1019}
1020
1021/**
1022 * Unsolicited frame receive handling.
1023 */
1024void
1025bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1026 u16 len)
1027{
1028 u32 pid = fchs->d_id;
1029 struct bfa_fcs_vport_s *vport;
1030 struct list_head *qe;
1031 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1032 struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
1033
1034 bfa_trc(fabric->fcs, len);
1035 bfa_trc(fabric->fcs, pid);
1036
1037 /**
1038 * Look for our own FLOGI frames being looped back. This means an
1039 * external loopback cable is in place. Our own FLOGI frames are
1040 * sometimes looped back when switch port gets temporarily bypassed.
1041 */
1042 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT))
1043 && (els_cmd->els_code == FC_ELS_FLOGI)
1044 && (flogi->port_name == bfa_fcs_port_get_pwwn(&fabric->bport))) {
1045 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
1046 return;
1047 }
1048
1049 /**
1050 * FLOGI/EVFP exchanges should be consumed by base fabric.
1051 */
1052 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
1053 bfa_trc(fabric->fcs, pid);
1054 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1055 return;
1056 }
1057
1058 if (fabric->bport.pid == pid) {
1059 /**
1060 * All authentication frames should be routed to auth
1061 */
1062 bfa_trc(fabric->fcs, els_cmd->els_code);
1063 if (els_cmd->els_code == FC_ELS_AUTH) {
1064 bfa_trc(fabric->fcs, els_cmd->els_code);
1065 fabric->auth.response = (u8 *) els_cmd;
1066 return;
1067 }
1068
1069 bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
1070 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1071 return;
1072 }
1073
1074 /**
1075 * look for a matching local port ID
1076 */
1077 list_for_each(qe, &fabric->vport_q) {
1078 vport = (struct bfa_fcs_vport_s *)qe;
1079 if (vport->lport.pid == pid) {
1080 bfa_fcs_port_uf_recv(&vport->lport, fchs, len);
1081 return;
1082 }
1083 }
1084 bfa_trc(fabric->fcs, els_cmd->els_code);
1085 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1086}
1087
1088/**
1089 * Unsolicited frames to be processed by fabric.
1090 */
1091static void
1092bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1093 u16 len)
1094{
1095 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1096
1097 bfa_trc(fabric->fcs, els_cmd->els_code);
1098
1099 switch (els_cmd->els_code) {
1100 case FC_ELS_FLOGI:
1101 bfa_fcs_fabric_process_flogi(fabric, fchs, len);
1102 break;
1103
1104 default:
1105 /*
1106 * need to generate a LS_RJT
1107 */
1108 break;
1109 }
1110}
1111
1112/**
1113 * Process incoming FLOGI
1114 */
1115static void
1116bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1117 struct fchs_s *fchs, u16 len)
1118{
1119 struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
1120 struct bfa_fcs_port_s *bport = &fabric->bport;
1121
1122 bfa_trc(fabric->fcs, fchs->s_id);
1123
1124 fabric->stats.flogi_rcvd++;
1125 /*
1126 * Check port type. It should be 0 = n-port.
1127 */
1128 if (flogi->csp.port_type) {
1129 /*
1130 * @todo: may need to send a LS_RJT
1131 */
1132 bfa_trc(fabric->fcs, flogi->port_name);
1133 fabric->stats.flogi_rejected++;
1134 return;
1135 }
1136
1137 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
1138 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1139 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1140
1141 /*
1142 * Send a Flogi Acc
1143 */
1144 bfa_fcs_fabric_send_flogi_acc(fabric);
1145 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
1146}
1147
1148static void
1149bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1150{
1151 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
1152 struct bfa_fcs_port_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
1153 struct bfa_s *bfa = fabric->fcs->bfa;
1154 struct bfa_fcxp_s *fcxp;
1155 u16 reqlen;
1156 struct fchs_s fchs;
1157
1158 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1159 /**
1160 * Do not expect this failure -- expect remote node to retry
1161 */
1162 if (!fcxp)
1163 return;
1164
1165 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1166 bfa_os_hton3b(FC_FABRIC_PORT),
1167 n2n_port->reply_oxid, pcfg->pwwn,
1168 pcfg->nwwn, bfa_pport_get_maxfrsize(bfa),
1169 bfa_pport_get_rx_bbcredit(bfa));
1170
1171 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1172 BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
1173 bfa_fcs_fabric_flogiacc_comp, fabric,
1174 FC_MAX_PDUSZ, 0); /* Timeout 0 indicates no
1175 * response expected
1176 */
1177}
1178
1179/**
1180 * Flogi Acc completion callback.
1181 */
1182static void
1183bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1184 bfa_status_t status, u32 rsp_len,
1185 u32 resid_len, struct fchs_s *rspfchs)
1186{
1187 struct bfa_fcs_fabric_s *fabric = cbarg;
1188
1189 bfa_trc(fabric->fcs, status);
1190}
1191
1192/*
1193 *
1194 * @param[in] fabric - fabric
1195 * @param[in] result - 1
1196 *
1197 * @return - none
1198 */
1199void
1200bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric, enum auth_status status)
1201{
1202 bfa_trc(fabric->fcs, status);
1203
1204 if (status == FC_AUTH_STATE_SUCCESS)
1205 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_SUCCESS);
1206 else
1207 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_FAILED);
1208}
1209
1210/**
1211 * Send AEN notification
1212 */
1213static void
1214bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
1215 enum bfa_port_aen_event event)
1216{
1217 union bfa_aen_data_u aen_data;
1218 struct bfa_log_mod_s *logmod = port->fcs->logm;
1219 wwn_t pwwn = bfa_fcs_port_get_pwwn(port);
1220 wwn_t fwwn = bfa_fcs_port_get_fabric_name(port);
1221 char pwwn_ptr[BFA_STRING_32];
1222 char fwwn_ptr[BFA_STRING_32];
1223
1224 wwn2str(pwwn_ptr, pwwn);
1225 wwn2str(fwwn_ptr, fwwn);
1226
1227 switch (event) {
1228 case BFA_PORT_AEN_FABRIC_NAME_CHANGE:
1229 bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr,
1230 fwwn_ptr);
1231 break;
1232 default:
1233 break;
1234 }
1235
1236 aen_data.port.pwwn = pwwn;
1237 aen_data.port.fwwn = fwwn;
1238}
1239
1240/*
1241 *
1242 * @param[in] fabric - fabric
1243 * @param[in] wwn_t - new fabric name
1244 *
1245 * @return - none
1246 */
1247void
1248bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1249 wwn_t fabric_name)
1250{
1251 bfa_trc(fabric->fcs, fabric_name);
1252
1253 if (fabric->fabric_name == 0) {
1254 /*
1255 * With BRCD switches, we don't get Fabric Name in FLOGI.
1256 * Don't generate a fabric name change event in this case.
1257 */
1258 fabric->fabric_name = fabric_name;
1259 } else {
1260 fabric->fabric_name = fabric_name;
1261 /*
1262 * Generate a Event
1263 */
1264 bfa_fcs_fabric_aen_post(&fabric->bport,
1265 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1266 }
1267
1268}
1269
1270/**
1271 * Not used by FCS.
1272 */
1273void
1274bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
1275{
1276}
1277
1278
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c
new file mode 100644
index 000000000000..d174706b9caa
--- /dev/null
+++ b/drivers/scsi/bfa/fcbuild.c
@@ -0,0 +1,1449 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.c - FC link service frame building and parsing routines
19 */
20
21#include <bfa_os_inc.h>
22#include "fcbuild.h"
23
24/*
25 * static build functions
26 */
27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
28 u16 ox_id);
29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
30 u16 ox_id);
31static struct fchs_s fc_els_req_tmpl;
32static struct fchs_s fc_els_rsp_tmpl;
33static struct fchs_s fc_bls_req_tmpl;
34static struct fchs_s fc_bls_rsp_tmpl;
35static struct fc_ba_acc_s ba_acc_tmpl;
36static struct fc_logi_s plogi_tmpl;
37static struct fc_prli_s prli_tmpl;
38static struct fc_rrq_s rrq_tmpl;
39static struct fchs_s fcp_fchs_tmpl;
40
41void
42fcbuild_init(void)
43{
44 /*
45 * fc_els_req_tmpl
46 */
47 fc_els_req_tmpl.routing = FC_RTG_EXT_LINK;
48 fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
49 fc_els_req_tmpl.type = FC_TYPE_ELS;
50 fc_els_req_tmpl.f_ctl =
51 bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
52 FCTL_SI_XFER);
53 fc_els_req_tmpl.rx_id = FC_RXID_ANY;
54
55 /*
56 * fc_els_rsp_tmpl
57 */
58 fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK;
59 fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
60 fc_els_rsp_tmpl.type = FC_TYPE_ELS;
61 fc_els_rsp_tmpl.f_ctl =
62 bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
63 FCTL_END_SEQ | FCTL_SI_XFER);
64 fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
65
66 /*
67 * fc_bls_req_tmpl
68 */
69 fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
70 fc_bls_req_tmpl.type = FC_TYPE_BLS;
71 fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
72 fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
73
74 /*
75 * fc_bls_rsp_tmpl
76 */
77 fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK;
78 fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
79 fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
80 fc_bls_rsp_tmpl.f_ctl =
81 bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
82 FCTL_END_SEQ | FCTL_SI_XFER);
83 fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
84
85 /*
86 * ba_acc_tmpl
87 */
88 ba_acc_tmpl.seq_id_valid = 0;
89 ba_acc_tmpl.low_seq_cnt = 0;
90 ba_acc_tmpl.high_seq_cnt = 0xFFFF;
91
92 /*
93 * plogi_tmpl
94 */
95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
97 plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
98 plogi_tmpl.csp.ciro = 0x1;
99 plogi_tmpl.csp.cisc = 0x0;
100 plogi_tmpl.csp.altbbcred = 0x0;
101 plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
102 plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
103 plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);
104
105 plogi_tmpl.class3.class_valid = 1;
106 plogi_tmpl.class3.sequential = 1;
107 plogi_tmpl.class3.conseq = 0xFF;
108 plogi_tmpl.class3.ospx = 1;
109
110 /*
111 * prli_tmpl
112 */
113 prli_tmpl.command = FC_ELS_PRLI;
114 prli_tmpl.pglen = 0x10;
115 prli_tmpl.pagebytes = bfa_os_htons(0x0014);
116 prli_tmpl.parampage.type = FC_TYPE_FCP;
117 prli_tmpl.parampage.imagepair = 1;
118 prli_tmpl.parampage.servparams.rxrdisab = 1;
119
120 /*
121 * rrq_tmpl
122 */
123 rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
124
125 /*
126 * fcp_fchs_tmpl
127 */
128 fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
130 fcp_fchs_tmpl.type = FC_TYPE_FCP;
131 fcp_fchs_tmpl.f_ctl =
132 bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
133 fcp_fchs_tmpl.seq_id = 1;
134 fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
135}
136
137static void
138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
139 u32 ox_id)
140{
141 bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
142
143 fchs->routing = FC_RTG_FC4_DEV_DATA;
144 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
145 fchs->type = FC_TYPE_SERVICES;
146 fchs->f_ctl =
147 bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
148 FCTL_SI_XFER);
149 fchs->rx_id = FC_RXID_ANY;
150 fchs->d_id = (d_id);
151 fchs->s_id = (s_id);
152 fchs->ox_id = bfa_os_htons(ox_id);
153
154 /**
155 * @todo no need to set ox_id for request
156 * no need to set rx_id for response
157 */
158}
159
160void
161fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
162 u16 ox_id)
163{
164 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
165 fchs->d_id = (d_id);
166 fchs->s_id = (s_id);
167 fchs->ox_id = bfa_os_htons(ox_id);
168}
169
170static void
171fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
172 u16 ox_id)
173{
174 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
175 fchs->d_id = d_id;
176 fchs->s_id = s_id;
177 fchs->ox_id = ox_id;
178}
179
180enum fc_parse_status
181fc_els_rsp_parse(struct fchs_s *fchs, int len)
182{
183 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
184 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
185
186 len = len;
187
188 switch (els_cmd->els_code) {
189 case FC_ELS_LS_RJT:
190 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
191 return (FC_PARSE_BUSY);
192 else
193 return (FC_PARSE_FAILURE);
194
195 case FC_ELS_ACC:
196 return (FC_PARSE_OK);
197 }
198 return (FC_PARSE_OK);
199}
200
201static void
202fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
203 u16 ox_id)
204{
205 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
206 fchs->d_id = d_id;
207 fchs->s_id = s_id;
208 fchs->ox_id = ox_id;
209}
210
211static u16
212fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
213 u16 ox_id, wwn_t port_name, wwn_t node_name,
214 u16 pdu_size, u8 els_code)
215{
216 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
217
218 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
219
220 plogi->els_cmd.els_code = els_code;
221 if (els_code == FC_ELS_PLOGI)
222 fc_els_req_build(fchs, d_id, s_id, ox_id);
223 else
224 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
225
226 plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);
227
228 bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
229 bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
230
231 return (sizeof(struct fc_logi_s));
232}
233
234u16
235fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
236 u16 ox_id, wwn_t port_name, wwn_t node_name,
237 u16 pdu_size, u8 set_npiv, u8 set_auth,
238 u16 local_bb_credits)
239{
240 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
241 u32 *vvl_info;
242
243 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
244
245 flogi->els_cmd.els_code = FC_ELS_FLOGI;
246 fc_els_req_build(fchs, d_id, s_id, ox_id);
247
248 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
249 flogi->port_name = port_name;
250 flogi->node_name = node_name;
251
252 /*
253 * Set the NPIV Capability Bit ( word 1, bit 31) of Common
254 * Service Parameters.
255 */
256 flogi->csp.ciro = set_npiv;
257
258 /* set AUTH capability */
259 flogi->csp.security = set_auth;
260
261 flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
262
263 /* Set brcd token in VVL */
264 vvl_info = (u32 *)&flogi->vvl[0];
265
266 /* set the flag to indicate the presence of VVL */
267 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
268 vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD);
269
270 return (sizeof(struct fc_logi_s));
271}
272
273u16
274fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
275 u16 ox_id, wwn_t port_name, wwn_t node_name,
276 u16 pdu_size, u16 local_bb_credits)
277{
278 u32 d_id = 0;
279
280 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
281 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
282
283 flogi->els_cmd.els_code = FC_ELS_ACC;
284 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
285 flogi->port_name = port_name;
286 flogi->node_name = node_name;
287
288 flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
289
290 return (sizeof(struct fc_logi_s));
291}
292
293u16
294fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
295 u16 ox_id, wwn_t port_name, wwn_t node_name,
296 u16 pdu_size)
297{
298 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
299
300 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
301
302 flogi->els_cmd.els_code = FC_ELS_FDISC;
303 fc_els_req_build(fchs, d_id, s_id, ox_id);
304
305 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
306 flogi->port_name = port_name;
307 flogi->node_name = node_name;
308
309 return (sizeof(struct fc_logi_s));
310}
311
312u16
313fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
314 u16 ox_id, wwn_t port_name, wwn_t node_name,
315 u16 pdu_size)
316{
317 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
318 node_name, pdu_size, FC_ELS_PLOGI);
319}
320
321u16
322fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
323 u16 ox_id, wwn_t port_name, wwn_t node_name,
324 u16 pdu_size)
325{
326 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
327 node_name, pdu_size, FC_ELS_ACC);
328}
329
330enum fc_parse_status
331fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
332{
333 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
334 struct fc_logi_s *plogi;
335 struct fc_ls_rjt_s *ls_rjt;
336
337 switch (els_cmd->els_code) {
338 case FC_ELS_LS_RJT:
339 ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1);
340 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
341 return (FC_PARSE_BUSY);
342 else
343 return (FC_PARSE_FAILURE);
344 case FC_ELS_ACC:
345 plogi = (struct fc_logi_s *) (fchs + 1);
346 if (len < sizeof(struct fc_logi_s))
347 return (FC_PARSE_FAILURE);
348
349 if (!wwn_is_equal(plogi->port_name, port_name))
350 return (FC_PARSE_FAILURE);
351
352 if (!plogi->class3.class_valid)
353 return (FC_PARSE_FAILURE);
354
355 if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
356 return (FC_PARSE_FAILURE);
357
358 return (FC_PARSE_OK);
359 default:
360 return (FC_PARSE_FAILURE);
361 }
362}
363
364enum fc_parse_status
365fc_plogi_parse(struct fchs_s *fchs)
366{
367 struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
368
369 if (plogi->class3.class_valid != 1)
370 return FC_PARSE_FAILURE;
371
372 if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
373 || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
374 || (plogi->class3.rxsz == 0))
375 return (FC_PARSE_FAILURE);
376
377 return FC_PARSE_OK;
378}
379
380u16
381fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
382 u16 ox_id)
383{
384 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
385
386 fc_els_req_build(fchs, d_id, s_id, ox_id);
387 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
388
389 prli->command = FC_ELS_PRLI;
390 prli->parampage.servparams.initiator = 1;
391 prli->parampage.servparams.retry = 1;
392 prli->parampage.servparams.rec_support = 1;
393 prli->parampage.servparams.task_retry_id = 0;
394 prli->parampage.servparams.confirm = 1;
395
396 return (sizeof(struct fc_prli_s));
397}
398
399u16
400fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
401 u16 ox_id, enum bfa_port_role role)
402{
403 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
404
405 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
406 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
407
408 prli->command = FC_ELS_ACC;
409
410 if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM)
411 prli->parampage.servparams.target = 1;
412 else
413 prli->parampage.servparams.initiator = 1;
414
415 prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
416
417 return (sizeof(struct fc_prli_s));
418}
419
420enum fc_parse_status
421fc_prli_rsp_parse(struct fc_prli_s *prli, int len)
422{
423 if (len < sizeof(struct fc_prli_s))
424 return (FC_PARSE_FAILURE);
425
426 if (prli->command != FC_ELS_ACC)
427 return (FC_PARSE_FAILURE);
428
429 if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD)
430 && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG))
431 return (FC_PARSE_FAILURE);
432
433 if (prli->parampage.servparams.target != 1)
434 return (FC_PARSE_FAILURE);
435
436 return (FC_PARSE_OK);
437}
438
439enum fc_parse_status
440fc_prli_parse(struct fc_prli_s *prli)
441{
442 if (prli->parampage.type != FC_TYPE_FCP)
443 return (FC_PARSE_FAILURE);
444
445 if (!prli->parampage.imagepair)
446 return (FC_PARSE_FAILURE);
447
448 if (!prli->parampage.servparams.initiator)
449 return (FC_PARSE_FAILURE);
450
451 return (FC_PARSE_OK);
452}
453
454u16
455fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
456 u32 s_id, u16 ox_id, wwn_t port_name)
457{
458 fc_els_req_build(fchs, d_id, s_id, ox_id);
459
460 memset(logo, '\0', sizeof(struct fc_logo_s));
461 logo->els_cmd.els_code = FC_ELS_LOGO;
462 logo->nport_id = (s_id);
463 logo->orig_port_name = port_name;
464
465 return (sizeof(struct fc_logo_s));
466}
467
468static u16
469fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
470 u32 s_id, u16 ox_id, wwn_t port_name,
471 wwn_t node_name, u8 els_code)
472{
473 memset(adisc, '\0', sizeof(struct fc_adisc_s));
474
475 adisc->els_cmd.els_code = els_code;
476
477 if (els_code == FC_ELS_ADISC)
478 fc_els_req_build(fchs, d_id, s_id, ox_id);
479 else
480 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
481
482 adisc->orig_HA = 0;
483 adisc->orig_port_name = port_name;
484 adisc->orig_node_name = node_name;
485 adisc->nport_id = (s_id);
486
487 return (sizeof(struct fc_adisc_s));
488}
489
490u16
491fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
492 u32 s_id, u16 ox_id, wwn_t port_name,
493 wwn_t node_name)
494{
495 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
496 node_name, FC_ELS_ADISC);
497}
498
499u16
500fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
501 u32 s_id, u16 ox_id, wwn_t port_name,
502 wwn_t node_name)
503{
504 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
505 node_name, FC_ELS_ACC);
506}
507
508enum fc_parse_status
509fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
510 wwn_t node_name)
511{
512
513 if (len < sizeof(struct fc_adisc_s))
514 return (FC_PARSE_FAILURE);
515
516 if (adisc->els_cmd.els_code != FC_ELS_ACC)
517 return (FC_PARSE_FAILURE);
518
519 if (!wwn_is_equal(adisc->orig_port_name, port_name))
520 return (FC_PARSE_FAILURE);
521
522 return (FC_PARSE_OK);
523}
524
525enum fc_parse_status
526fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
527 wwn_t node_name, wwn_t port_name)
528{
529 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
530
531 if (adisc->els_cmd.els_code != FC_ELS_ACC)
532 return (FC_PARSE_FAILURE);
533
534 if ((adisc->nport_id == (host_dap))
535 && wwn_is_equal(adisc->orig_port_name, port_name)
536 && wwn_is_equal(adisc->orig_node_name, node_name))
537 return (FC_PARSE_OK);
538
539 return (FC_PARSE_FAILURE);
540}
541
542enum fc_parse_status
543fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
544{
545 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
546
547 if (pdisc->class3.class_valid != 1)
548 return FC_PARSE_FAILURE;
549
550 if ((bfa_os_ntohs(pdisc->class3.rxsz) <
551 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
552 || (pdisc->class3.rxsz == 0))
553 return (FC_PARSE_FAILURE);
554
555 if (!wwn_is_equal(pdisc->port_name, port_name))
556 return (FC_PARSE_FAILURE);
557
558 if (!wwn_is_equal(pdisc->node_name, node_name))
559 return (FC_PARSE_FAILURE);
560
561 return FC_PARSE_OK;
562}
563
564u16
565fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
566{
567 bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
568 fchs->cat_info = FC_CAT_ABTS;
569 fchs->d_id = (d_id);
570 fchs->s_id = (s_id);
571 fchs->ox_id = bfa_os_htons(ox_id);
572
573 return (sizeof(struct fchs_s));
574}
575
576enum fc_parse_status
577fc_abts_rsp_parse(struct fchs_s *fchs, int len)
578{
579 if ((fchs->cat_info == FC_CAT_BA_ACC)
580 || (fchs->cat_info == FC_CAT_BA_RJT))
581 return (FC_PARSE_OK);
582
583 return (FC_PARSE_FAILURE);
584}
585
586u16
587fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
588 u32 s_id, u16 ox_id, u16 rrq_oxid)
589{
590 fc_els_req_build(fchs, d_id, s_id, ox_id);
591
592 /*
593 * build rrq payload
594 */
595 bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
596 rrq->s_id = (s_id);
597 rrq->ox_id = bfa_os_htons(rrq_oxid);
598 rrq->rx_id = FC_RXID_ANY;
599
600 return (sizeof(struct fc_rrq_s));
601}
602
603u16
604fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
605 u16 ox_id)
606{
607 struct fc_els_cmd_s *acc = pld;
608
609 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
610
611 memset(acc, 0, sizeof(struct fc_els_cmd_s));
612 acc->els_code = FC_ELS_ACC;
613
614 return (sizeof(struct fc_els_cmd_s));
615}
616
617u16
618fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
619 u32 s_id, u16 ox_id, u8 reason_code,
620 u8 reason_code_expl)
621{
622 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
623 memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
624
625 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
626 ls_rjt->reason_code = reason_code;
627 ls_rjt->reason_code_expl = reason_code_expl;
628 ls_rjt->vendor_unique = 0x00;
629
630 return (sizeof(struct fc_ls_rjt_s));
631}
632
633u16
634fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
635 u32 s_id, u16 ox_id, u16 rx_id)
636{
637 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
638
639 bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
640
641 fchs->rx_id = rx_id;
642
643 ba_acc->ox_id = fchs->ox_id;
644 ba_acc->rx_id = fchs->rx_id;
645
646 return (sizeof(struct fc_ba_acc_s));
647}
648
649u16
650fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
651 u32 d_id, u32 s_id, u16 ox_id)
652{
653 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
654 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
655 els_cmd->els_code = FC_ELS_ACC;
656
657 return (sizeof(struct fc_els_cmd_s));
658}
659
660int
661fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
662{
663 int num_pages = 0;
664 struct fc_prlo_s *prlo;
665 struct fc_tprlo_s *tprlo;
666
667 if (els_code == FC_ELS_PRLO) {
668 prlo = (struct fc_prlo_s *) (fc_frame + 1);
669 num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16;
670 } else {
671 tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
672 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
673 }
674 return num_pages;
675}
676
677u16
678fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
679 u32 d_id, u32 s_id, u16 ox_id,
680 int num_pages)
681{
682 int page;
683
684 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
685
686 memset(tprlo_acc, 0, (num_pages * 16) + 4);
687 tprlo_acc->command = FC_ELS_ACC;
688
689 tprlo_acc->page_len = 0x10;
690 tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
691
692 for (page = 0; page < num_pages; page++) {
693 tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
694 tprlo_acc->tprlo_acc_params[page].rpa_valid = 0;
695 tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
696 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
697 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
698 }
699 return (bfa_os_ntohs(tprlo_acc->payload_len));
700}
701
702u16
703fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
704 u32 d_id, u32 s_id, u16 ox_id,
705 int num_pages)
706{
707 int page;
708
709 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
710
711 memset(prlo_acc, 0, (num_pages * 16) + 4);
712 prlo_acc->command = FC_ELS_ACC;
713 prlo_acc->page_len = 0x10;
714 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
715
716 for (page = 0; page < num_pages; page++) {
717 prlo_acc->prlo_acc_params[page].opa_valid = 0;
718 prlo_acc->prlo_acc_params[page].rpa_valid = 0;
719 prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
720 prlo_acc->prlo_acc_params[page].orig_process_assc = 0;
721 prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
722 }
723
724 return (bfa_os_ntohs(prlo_acc->payload_len));
725}
726
727u16
728fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
729 u32 s_id, u16 ox_id, u32 data_format)
730{
731 fc_els_req_build(fchs, d_id, s_id, ox_id);
732
733 memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
734
735 rnid->els_cmd.els_code = FC_ELS_RNID;
736 rnid->node_id_data_format = data_format;
737
738 return (sizeof(struct fc_rnid_cmd_s));
739}
740
741u16
742fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
743 u32 d_id, u32 s_id, u16 ox_id,
744 u32 data_format,
745 struct fc_rnid_common_id_data_s *common_id_data,
746 struct fc_rnid_general_topology_data_s *gen_topo_data)
747{
748 memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
749
750 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
751
752 rnid_acc->els_cmd.els_code = FC_ELS_ACC;
753 rnid_acc->node_id_data_format = data_format;
754 rnid_acc->common_id_data_length =
755 sizeof(struct fc_rnid_common_id_data_s);
756 rnid_acc->common_id_data = *common_id_data;
757
758 if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
759 rnid_acc->specific_id_data_length =
760 sizeof(struct fc_rnid_general_topology_data_s);
761 bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
762 return (sizeof(struct fc_rnid_acc_s));
763 } else {
764 return (sizeof(struct fc_rnid_acc_s) -
765 sizeof(struct fc_rnid_general_topology_data_s));
766 }
767
768}
769
770u16
771fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
772 u32 s_id, u16 ox_id)
773{
774 fc_els_req_build(fchs, d_id, s_id, ox_id);
775
776 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
777
778 rpsc->els_cmd.els_code = FC_ELS_RPSC;
779 return (sizeof(struct fc_rpsc_cmd_s));
780}
781
782u16
783fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
784 u32 d_id, u32 s_id, u32 *pid_list,
785 u16 npids)
786{
787 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
788 int i = 0;
789
790 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
791
792 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
793
794 rpsc2->els_cmd.els_code = FC_ELS_RPSC;
795 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
796 rpsc2->num_pids = bfa_os_htons(npids);
797 for (i = 0; i < npids; i++)
798 rpsc2->pid_list[i].pid = pid_list[i];
799
800 return (sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
801 (sizeof(u32))));
802}
803
804u16
805fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
806 u32 d_id, u32 s_id, u16 ox_id,
807 struct fc_rpsc_speed_info_s *oper_speed)
808{
809 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
810
811 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
812
813 rpsc_acc->command = FC_ELS_ACC;
814 rpsc_acc->num_entries = bfa_os_htons(1);
815
816 rpsc_acc->speed_info[0].port_speed_cap =
817 bfa_os_htons(oper_speed->port_speed_cap);
818
819 rpsc_acc->speed_info[0].port_op_speed =
820 bfa_os_htons(oper_speed->port_op_speed);
821
822 return (sizeof(struct fc_rpsc_acc_s));
823
824}
825
826/*
827 * TBD -
828 * . get rid of unnecessary memsets
829 */
830
831u16
832fc_logo_rsp_parse(struct fchs_s *fchs, int len)
833{
834 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
835
836 len = len;
837 if (els_cmd->els_code != FC_ELS_ACC)
838 return FC_PARSE_FAILURE;
839
840 return FC_PARSE_OK;
841}
842
843u16
844fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
845 u16 ox_id, wwn_t port_name, wwn_t node_name,
846 u16 pdu_size)
847{
848 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
849
850 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
851
852 pdisc->els_cmd.els_code = FC_ELS_PDISC;
853 fc_els_req_build(fchs, d_id, s_id, ox_id);
854
855 pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
856 pdisc->port_name = port_name;
857 pdisc->node_name = node_name;
858
859 return (sizeof(struct fc_logi_s));
860}
861
862u16
863fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
864{
865 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
866
867 if (len < sizeof(struct fc_logi_s))
868 return (FC_PARSE_LEN_INVAL);
869
870 if (pdisc->els_cmd.els_code != FC_ELS_ACC)
871 return (FC_PARSE_ACC_INVAL);
872
873 if (!wwn_is_equal(pdisc->port_name, port_name))
874 return (FC_PARSE_PWWN_NOT_EQUAL);
875
876 if (!pdisc->class3.class_valid)
877 return (FC_PARSE_NWWN_NOT_EQUAL);
878
879 if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
880 return (FC_PARSE_RXSZ_INVAL);
881
882 return (FC_PARSE_OK);
883}
884
885u16
886fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
887 int num_pages)
888{
889 struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
890 int page;
891
892 fc_els_req_build(fchs, d_id, s_id, ox_id);
893 memset(prlo, 0, (num_pages * 16) + 4);
894 prlo->command = FC_ELS_PRLO;
895 prlo->page_len = 0x10;
896 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
897
898 for (page = 0; page < num_pages; page++) {
899 prlo->prlo_params[page].type = FC_TYPE_FCP;
900 prlo->prlo_params[page].opa_valid = 0;
901 prlo->prlo_params[page].rpa_valid = 0;
902 prlo->prlo_params[page].orig_process_assc = 0;
903 prlo->prlo_params[page].resp_process_assc = 0;
904 }
905
906 return (bfa_os_ntohs(prlo->payload_len));
907}
908
909u16
910fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
911{
912 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
913 int num_pages = 0;
914 int page = 0;
915
916 len = len;
917
918 if (prlo->command != FC_ELS_ACC)
919 return (FC_PARSE_FAILURE);
920
921 num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
922
923 for (page = 0; page < num_pages; page++) {
924 if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
925 return FC_PARSE_FAILURE;
926
927 if (prlo->prlo_acc_params[page].opa_valid != 0)
928 return FC_PARSE_FAILURE;
929
930 if (prlo->prlo_acc_params[page].rpa_valid != 0)
931 return FC_PARSE_FAILURE;
932
933 if (prlo->prlo_acc_params[page].orig_process_assc != 0)
934 return FC_PARSE_FAILURE;
935
936 if (prlo->prlo_acc_params[page].resp_process_assc != 0)
937 return FC_PARSE_FAILURE;
938 }
939 return (FC_PARSE_OK);
940
941}
942
943u16
944fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
945 u16 ox_id, int num_pages,
946 enum fc_tprlo_type tprlo_type, u32 tpr_id)
947{
948 struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
949 int page;
950
951 fc_els_req_build(fchs, d_id, s_id, ox_id);
952 memset(tprlo, 0, (num_pages * 16) + 4);
953 tprlo->command = FC_ELS_TPRLO;
954 tprlo->page_len = 0x10;
955 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
956
957 for (page = 0; page < num_pages; page++) {
958 tprlo->tprlo_params[page].type = FC_TYPE_FCP;
959 tprlo->tprlo_params[page].opa_valid = 0;
960 tprlo->tprlo_params[page].rpa_valid = 0;
961 tprlo->tprlo_params[page].orig_process_assc = 0;
962 tprlo->tprlo_params[page].resp_process_assc = 0;
963 if (tprlo_type == FC_GLOBAL_LOGO) {
964 tprlo->tprlo_params[page].global_process_logout = 1;
965 } else if (tprlo_type == FC_TPR_LOGO) {
966 tprlo->tprlo_params[page].tpo_nport_valid = 1;
967 tprlo->tprlo_params[page].tpo_nport_id = (tpr_id);
968 }
969 }
970
971 return (bfa_os_ntohs(tprlo->payload_len));
972}
973
974u16
975fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
976{
977 struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
978 int num_pages = 0;
979 int page = 0;
980
981 len = len;
982
983 if (tprlo->command != FC_ELS_ACC)
984 return (FC_PARSE_ACC_INVAL);
985
986 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
987
988 for (page = 0; page < num_pages; page++) {
989 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
990 return (FC_PARSE_NOT_FCP);
991 if (tprlo->tprlo_acc_params[page].opa_valid != 0)
992 return (FC_PARSE_OPAFLAG_INVAL);
993 if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
994 return (FC_PARSE_RPAFLAG_INVAL);
995 if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
996 return (FC_PARSE_OPA_INVAL);
997 if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
998 return (FC_PARSE_RPA_INVAL);
999 }
1000 return (FC_PARSE_OK);
1001}
1002
1003enum fc_parse_status
1004fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1005{
1006 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1007
1008 len = len;
1009 if (els_cmd->els_code != FC_ELS_ACC)
1010 return FC_PARSE_FAILURE;
1011
1012 return FC_PARSE_OK;
1013}
1014
1015u16
1016fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
1017 u16 ox_id, u32 reason_code,
1018 u32 reason_expl)
1019{
1020 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
1021
1022 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
1023
1024 fchs->cat_info = FC_CAT_BA_RJT;
1025 ba_rjt->reason_code = reason_code;
1026 ba_rjt->reason_expl = reason_expl;
1027 return (sizeof(struct fc_ba_rjt_s));
1028}
1029
1030static void
1031fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
1032{
1033 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
1034 cthdr->rev_id = CT_GS3_REVISION;
1035 cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
1036 cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
1037 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
1038}
1039
1040static void
1041fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
1042{
1043 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
1044 cthdr->rev_id = CT_GS3_REVISION;
1045 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
1046 cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
1047 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
1048}
1049
1050static void
1051fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
1052 u8 sub_type)
1053{
1054 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
1055 cthdr->rev_id = CT_GS3_REVISION;
1056 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
1057 cthdr->gs_sub_type = sub_type;
1058 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
1059}
1060
1061u16
1062fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1063 wwn_t port_name)
1064{
1065
1066 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1067 struct fcgs_gidpn_req_s *gidpn =
1068 (struct fcgs_gidpn_req_s *) (cthdr + 1);
1069 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1070
1071 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1072 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
1073
1074 bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
1075 gidpn->port_name = port_name;
1076 return (sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s));
1077}
1078
1079u16
1080fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1081 u32 port_id)
1082{
1083
1084 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1085 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
1086 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1087
1088 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1089 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
1090
1091 bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
1092 gpnid->dap = port_id;
1093 return (sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s));
1094}
1095
1096u16
1097fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1098 u32 port_id)
1099{
1100
1101 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1102 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
1103 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1104
1105 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1106 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
1107
1108 bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
1109 gnnid->dap = port_id;
1110 return (sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s));
1111}
1112
1113u16
1114fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
1115{
1116 if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
1117 if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
1118 return FC_PARSE_BUSY;
1119 else
1120 return FC_PARSE_FAILURE;
1121 }
1122
1123 return FC_PARSE_OK;
1124}
1125
1126u16
1127fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
1128 u32 s_id, u16 ox_id)
1129{
1130 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1131
1132 fc_els_req_build(fchs, d_id, s_id, ox_id);
1133
1134 bfa_os_memset(scr, 0, sizeof(struct fc_scr_s));
1135 scr->command = FC_ELS_SCR;
1136 scr->reg_func = FC_SCR_REG_FUNC_FULL;
1137 if (set_br_reg)
1138 scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE;
1139
1140 return (sizeof(struct fc_scr_s));
1141}
1142
1143u16
1144fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
1145 u16 ox_id)
1146{
1147 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1148 u16 payldlen;
1149
1150 fc_els_req_build(fchs, d_id, s_id, ox_id);
1151 rscn->command = FC_ELS_RSCN;
1152 rscn->pagelen = sizeof(rscn->event[0]);
1153
1154 payldlen = sizeof(u32) + rscn->pagelen;
1155 rscn->payldlen = bfa_os_htons(payldlen);
1156
1157 rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
1158 rscn->event[0].portid = s_id;
1159
1160 return (sizeof(struct fc_rscn_pl_s));
1161}
1162
1163u16
1164fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1165 enum bfa_port_role roles)
1166{
1167 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1168 struct fcgs_rftid_req_s *rftid =
1169 (struct fcgs_rftid_req_s *) (cthdr + 1);
1170 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
1171 u8 index;
1172
1173 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1174 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
1175
1176 bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
1177
1178 rftid->dap = s_id;
1179
1180 /* By default, FCP FC4 Type is registered */
1181 index = FC_TYPE_FCP >> 5;
1182 type_value = 1 << (FC_TYPE_FCP % 32);
1183 rftid->fc4_type[index] = bfa_os_htonl(type_value);
1184
1185 if (roles & BFA_PORT_ROLE_FCP_IPFC) {
1186 index = FC_TYPE_IP >> 5;
1187 type_value = 1 << (FC_TYPE_IP % 32);
1188 rftid->fc4_type[index] |= bfa_os_htonl(type_value);
1189 }
1190
1191 return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s));
1192}
1193
1194u16
1195fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
1196 u16 ox_id, u8 *fc4_bitmap,
1197 u32 bitmap_size)
1198{
1199 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1200 struct fcgs_rftid_req_s *rftid =
1201 (struct fcgs_rftid_req_s *) (cthdr + 1);
1202 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1203
1204 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1205 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
1206
1207 bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
1208
1209 rftid->dap = s_id;
1210 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
1211 (bitmap_size < 32 ? bitmap_size : 32));
1212
1213 return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s));
1214}
1215
1216u16
1217fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1218 u8 fc4_type, u8 fc4_ftrs)
1219{
1220 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1221 struct fcgs_rffid_req_s *rffid =
1222 (struct fcgs_rffid_req_s *) (cthdr + 1);
1223 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1224
1225 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1226 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
1227
1228 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
1229
1230 rffid->dap = s_id;
1231 rffid->fc4ftr_bits = fc4_ftrs;
1232 rffid->fc4_type = fc4_type;
1233
1234 return (sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s));
1235}
1236
1237u16
1238fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1239 u8 *name)
1240{
1241
1242 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1243 struct fcgs_rspnid_req_s *rspnid =
1244 (struct fcgs_rspnid_req_s *) (cthdr + 1);
1245 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1246
1247 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1248 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
1249
1250 bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
1251
1252 rspnid->dap = s_id;
1253 rspnid->spn_len = (u8) strlen((char *)name);
1254 strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
1255
1256 return (sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s));
1257}
1258
1259u16
1260fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1261 u8 fc4_type)
1262{
1263
1264 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1265 struct fcgs_gidft_req_s *gidft =
1266 (struct fcgs_gidft_req_s *) (cthdr + 1);
1267 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1268
1269 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1270
1271 fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
1272
1273 bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
1274 gidft->fc4_type = fc4_type;
1275 gidft->domain_id = 0;
1276 gidft->area_id = 0;
1277
1278 return (sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s));
1279}
1280
1281u16
1282fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1283 wwn_t port_name)
1284{
1285 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1286 struct fcgs_rpnid_req_s *rpnid =
1287 (struct fcgs_rpnid_req_s *) (cthdr + 1);
1288 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1289
1290 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1291 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
1292
1293 bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
1294 rpnid->port_id = port_id;
1295 rpnid->port_name = port_name;
1296
1297 return (sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s));
1298}
1299
1300u16
1301fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1302 wwn_t node_name)
1303{
1304 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1305 struct fcgs_rnnid_req_s *rnnid =
1306 (struct fcgs_rnnid_req_s *) (cthdr + 1);
1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1308
1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1310 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
1311
1312 bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
1313 rnnid->port_id = port_id;
1314 rnnid->node_name = node_name;
1315
1316 return (sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s));
1317}
1318
1319u16
1320fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1321 u32 cos)
1322{
1323 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1324 struct fcgs_rcsid_req_s *rcsid =
1325 (struct fcgs_rcsid_req_s *) (cthdr + 1);
1326 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1327
1328 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1329 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
1330
1331 bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
1332 rcsid->port_id = port_id;
1333 rcsid->cos = cos;
1334
1335 return (sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s));
1336}
1337
1338u16
1339fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1340 u8 port_type)
1341{
1342 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1343 struct fcgs_rptid_req_s *rptid =
1344 (struct fcgs_rptid_req_s *) (cthdr + 1);
1345 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1346
1347 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1348 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
1349
1350 bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
1351 rptid->port_id = port_id;
1352 rptid->port_type = port_type;
1353
1354 return (sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s));
1355}
1356
1357u16
1358fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1359{
1360 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1361 struct fcgs_ganxt_req_s *ganxt =
1362 (struct fcgs_ganxt_req_s *) (cthdr + 1);
1363 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1364
1365 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1366 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
1367
1368 bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
1369 ganxt->port_id = port_id;
1370
1371 return (sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s));
1372}
1373
1374/*
1375 * Builds fc hdr and ct hdr for FDMI requests.
1376 */
1377u16
1378fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1379 u16 cmd_code)
1380{
1381
1382 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1383 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1384
1385 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1386 fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
1387
1388 return (sizeof(struct ct_hdr_s));
1389}
1390
1391/*
1392 * Given a FC4 Type, this function returns a fc4 type bitmask
1393 */
1394void
1395fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
1396{
1397 u8 index;
1398 u32 *ptr = (u32 *) bit_mask;
1399 u32 type_value;
1400
1401 /*
1402 * @todo : Check for bitmask size
1403 */
1404
1405 index = fc4_type >> 5;
1406 type_value = 1 << (fc4_type % 32);
1407 ptr[index] = bfa_os_htonl(type_value);
1408
1409}
1410
1411/*
1412 * GMAL Request
1413 */
1414u16
1415fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1416{
1417 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1418 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
1419 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1420
1421 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1422 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
1423 CT_GSSUBTYPE_CFGSERVER);
1424
1425 bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
1426 gmal->wwn = wwn;
1427
1428 return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t));
1429}
1430
1431/*
1432 * GFN (Get Fabric Name) Request
1433 */
1434u16
1435fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1436{
1437 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1438 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
1439 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1440
1441 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1442 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
1443 CT_GSSUBTYPE_CFGSERVER);
1444
1445 bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
1446 gfn->wwn = wwn;
1447
1448 return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t));
1449}
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
new file mode 100644
index 000000000000..4d248424f7b3
--- /dev/null
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -0,0 +1,273 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.h - FC link service frame building and parsing routines
19 */
20
21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__
23
24#include <bfa_os_inc.h>
25#include <protocol/fc.h>
26#include <protocol/fcp.h>
27#include <protocol/ct.h>
28#include <defs/bfa_defs_port.h>
29#include <defs/bfa_defs_pport.h>
30
31/*
32 * Utility Macros/functions
33 */
34
35#define fcif_sof_set(_ifhdr, _sof) (_ifhdr)->sof = FC_ ## _sof
36#define fcif_eof_set(_ifhdr, _eof) (_ifhdr)->eof = FC_ ## _eof
37
38#define wwn_is_equal(_wwn1, _wwn2) \
39 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
40
41#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
42
43/*
44 * Given the fc response length, this routine will return
45 * the length of the actual payload bytes following the CT header.
46 *
47 * Assumes the input response length does not include the crc, eof, etc.
48 */
49static inline u32
50fc_get_ctresp_pyld_len(u32 resp_len)
51{
52 return (resp_len - sizeof(struct ct_hdr_s));
53}
54
55/*
56 * Convert bfa speed to rpsc speed value.
57 */
58static inline enum bfa_pport_speed
59fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
60{
61 switch (speed) {
62
63 case RPSC_OP_SPEED_1G:
64 return BFA_PPORT_SPEED_1GBPS;
65
66 case RPSC_OP_SPEED_2G:
67 return BFA_PPORT_SPEED_2GBPS;
68
69 case RPSC_OP_SPEED_4G:
70 return BFA_PPORT_SPEED_4GBPS;
71
72 case RPSC_OP_SPEED_8G:
73 return BFA_PPORT_SPEED_8GBPS;
74
75 default:
76 return BFA_PPORT_SPEED_UNKNOWN;
77 }
78}
79
80/*
81 * Convert RPSC speed to bfa speed value.
82 */
83static inline enum fc_rpsc_op_speed_s
84fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
85{
86 switch (op_speed) {
87
88 case BFA_PPORT_SPEED_1GBPS:
89 return RPSC_OP_SPEED_1G;
90
91 case BFA_PPORT_SPEED_2GBPS:
92 return RPSC_OP_SPEED_2G;
93
94 case BFA_PPORT_SPEED_4GBPS:
95 return RPSC_OP_SPEED_4G;
96
97 case BFA_PPORT_SPEED_8GBPS:
98 return RPSC_OP_SPEED_8G;
99
100 default:
101 return RPSC_OP_SPEED_NOT_EST;
102 }
103}
104enum fc_parse_status {
105 FC_PARSE_OK = 0,
106 FC_PARSE_FAILURE = 1,
107 FC_PARSE_BUSY = 2,
108 FC_PARSE_LEN_INVAL,
109 FC_PARSE_ACC_INVAL,
110 FC_PARSE_PWWN_NOT_EQUAL,
111 FC_PARSE_NWWN_NOT_EQUAL,
112 FC_PARSE_RXSZ_INVAL,
113 FC_PARSE_NOT_FCP,
114 FC_PARSE_OPAFLAG_INVAL,
115 FC_PARSE_RPAFLAG_INVAL,
116 FC_PARSE_OPA_INVAL,
117 FC_PARSE_RPA_INVAL,
118
119};
120
121struct fc_templates_s {
122 struct fchs_s fc_els_req;
123 struct fchs_s fc_bls_req;
124 struct fc_logi_s plogi;
125 struct fc_rrq_s rrq;
126};
127
128void fcbuild_init(void);
129
130u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
131 u32 s_id, u16 ox_id, wwn_t port_name,
132 wwn_t node_name, u16 pdu_size, u8 set_npiv,
133 u8 set_auth, u16 local_bb_credits);
134u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi,
135 u32 s_id, u16 ox_id, wwn_t port_name,
136 wwn_t node_name, u16 pdu_size);
137u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
138 u32 s_id, u16 ox_id, wwn_t port_name,
139 wwn_t node_name, u16 pdu_size,
140 u16 local_bb_credits);
141u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
142 u32 s_id, u16 ox_id, wwn_t port_name,
143 wwn_t node_name, u16 pdu_size);
144enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
145u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
146 u16 ox_id);
147enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
148u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
149 u32 s_id, u16 ox_id, u16 rrq_oxid);
150enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
151u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
152 u16 ox_id, u8 *name);
153u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
154 u16 ox_id, enum bfa_port_role role);
155u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
156 u16 ox_id, u8 *fc4_bitmap,
157 u32 bitmap_size);
158u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
159 u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
160u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
161 u16 ox_id, wwn_t port_name);
162u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
163 u16 ox_id, u32 port_id);
164u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
165 u8 set_br_reg, u32 s_id, u16 ox_id);
166u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
167 u32 s_id, u16 ox_id,
168 wwn_t port_name, wwn_t node_name, u16 pdu_size);
169
170u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
171 u32 d_id, u32 s_id, u16 ox_id,
172 wwn_t port_name, wwn_t node_name);
173enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
174 u32 host_dap,
175 wwn_t node_name, wwn_t port_name);
176enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
177 wwn_t port_name, wwn_t node_name);
178u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
179 u32 d_id, u32 s_id, u16 ox_id,
180 wwn_t port_name, wwn_t node_name);
181u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
182 u32 d_id, u32 s_id, u16 ox_id,
183 u8 reason_code, u8 reason_code_expl);
184u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
185 u32 d_id, u32 s_id, u16 ox_id);
186u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
187 u32 s_id, u16 ox_id);
188enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
189
190u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
191 u32 s_id, u16 ox_id,
192 enum bfa_port_role role);
193u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
194 u32 d_id, u32 s_id, u16 ox_id,
195 u32 data_format);
196u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
197 u32 d_id, u32 s_id, u16 ox_id,
198 u32 data_format,
199 struct fc_rnid_common_id_data_s *common_id_data,
200 struct fc_rnid_general_topology_data_s *
201 gen_topo_data);
202u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
203 u32 d_id, u32 s_id,
204 u32 *pid_list, u16 npids);
205u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
206 u32 d_id, u32 s_id, u16 ox_id);
207u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
208 u32 d_id, u32 s_id, u16 ox_id,
209 struct fc_rpsc_speed_info_s *oper_speed);
210u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
211 u8 fc4_type);
212u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
213 u32 port_id, wwn_t port_name);
214u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
215 u32 port_id, wwn_t node_name);
216u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
217 u32 port_id, u32 cos);
218u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
219 u32 port_id, u8 port_type);
220u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
221 u32 port_id);
222u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo,
223 u32 d_id, u32 s_id, u16 ox_id,
224 wwn_t port_name);
225u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
226 u32 s_id, u16 ox_id);
227u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
228 u16 cmd_code);
229u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
230 wwn_t wwn);
231u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
232 wwn_t wwn);
233void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
234void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
235 u16 ox_id);
236enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
237enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
238 wwn_t port_name);
239enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
240enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
241 wwn_t port_name);
242u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc,
243 u32 d_id, u32 s_id, u16 ox_id,
244 u16 rx_id);
245int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
246u16 fc_tprlo_acc_build(struct fchs_s *fchs,
247 struct fc_tprlo_acc_s *tprlo_acc,
248 u32 d_id, u32 s_id, u16 ox_id,
249 int num_pages);
250u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
251 u32 d_id, u32 s_id, u16 ox_id,
252 int num_pages);
253u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
254u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
255 u16 ox_id, wwn_t port_name, wwn_t node_name,
256 u16 pdu_size);
257u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
258u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
259 u16 ox_id, int num_pages);
260u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
261u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
262 u16 ox_id, int num_pages,
263 enum fc_tprlo_type tprlo_type, u32 tpr_id);
264u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
265u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
266 u16 ox_id, u32 reason_code,
267 u32 reason_expl);
268u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
269 u16 ox_id, u32 port_id);
270u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
271u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
272 u32 s_id, u16 ox_id);
273#endif
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
new file mode 100644
index 000000000000..8ce5d8934677
--- /dev/null
+++ b/drivers/scsi/bfa/fcpim.c
@@ -0,0 +1,844 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include "fcs_fcpim.h"
25#include "fcs_rport.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30#include <fcs/bfa_fcs_fcpim.h>
31#include <fcb/bfa_fcb_fcpim.h>
32#include <aen/bfa_aen_itnim.h>
33
34BFA_TRC_FILE(FCS, FCPIM);
35
36/*
37 * forward declarations
38 */
39static void bfa_fcs_itnim_timeout(void *arg);
40static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
41static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_itnim_prli_response(void *fcsarg,
44 struct bfa_fcxp_s *fcxp,
45 void *cbarg,
46 bfa_status_t req_status,
47 u32 rsp_len,
48 u32 resid_len,
49 struct fchs_s *rsp_fchs);
50static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
51 enum bfa_itnim_aen_event event);
52
53/**
54 * fcs_itnim_sm FCS itnim state machine events
55 */
56
57enum bfa_fcs_itnim_event {
58 BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */
59 BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
60 BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
61 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
62 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
63 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
64 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
65 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
66 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
67 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
68 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
69};
70
71static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
72 enum bfa_fcs_itnim_event event);
73static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
74 enum bfa_fcs_itnim_event event);
75static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
76 enum bfa_fcs_itnim_event event);
77static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
78 enum bfa_fcs_itnim_event event);
79static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
80 enum bfa_fcs_itnim_event event);
81static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
82 enum bfa_fcs_itnim_event event);
83static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
84 enum bfa_fcs_itnim_event event);
85static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
86 enum bfa_fcs_itnim_event event);
87
88static struct bfa_sm_table_s itnim_sm_table[] = {
89 {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
90 {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
91 {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
92 {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY},
93 {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE},
94 {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE},
95 {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE},
96 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
97};
98
99/**
100 * fcs_itnim_sm FCS itnim state machine
101 */
102
103static void
104bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
105 enum bfa_fcs_itnim_event event)
106{
107 bfa_trc(itnim->fcs, itnim->rport->pwwn);
108 bfa_trc(itnim->fcs, event);
109
110 switch (event) {
111 case BFA_FCS_ITNIM_SM_ONLINE:
112 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
113 bfa_fcs_itnim_send_prli(itnim, NULL);
114 break;
115
116 case BFA_FCS_ITNIM_SM_OFFLINE:
117 bfa_fcs_rport_itnim_ack(itnim->rport);
118 break;
119
120 case BFA_FCS_ITNIM_SM_INITIATOR:
121 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
122 break;
123
124 case BFA_FCS_ITNIM_SM_DELETE:
125 bfa_fcs_itnim_free(itnim);
126 break;
127
128 default:
129 bfa_assert(0);
130 }
131
132}
133
134static void
135bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
136 enum bfa_fcs_itnim_event event)
137{
138 bfa_trc(itnim->fcs, itnim->rport->pwwn);
139 bfa_trc(itnim->fcs, event);
140
141 switch (event) {
142 case BFA_FCS_ITNIM_SM_FRMSENT:
143 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli);
144 break;
145
146 case BFA_FCS_ITNIM_SM_INITIATOR:
147 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
148 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
149 break;
150
151 case BFA_FCS_ITNIM_SM_OFFLINE:
152 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
153 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
154 bfa_fcs_rport_itnim_ack(itnim->rport);
155 break;
156
157 case BFA_FCS_ITNIM_SM_DELETE:
158 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
159 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
160 bfa_fcs_itnim_free(itnim);
161 break;
162
163 default:
164 bfa_assert(0);
165 }
166}
167
168static void
169bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
170 enum bfa_fcs_itnim_event event)
171{
172 bfa_trc(itnim->fcs, itnim->rport->pwwn);
173 bfa_trc(itnim->fcs, event);
174
175 switch (event) {
176 case BFA_FCS_ITNIM_SM_RSP_OK:
177 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
178 bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
179 break;
180
181 case BFA_FCS_ITNIM_SM_RSP_ERROR:
182 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry);
183 bfa_timer_start(itnim->fcs->bfa, &itnim->timer,
184 bfa_fcs_itnim_timeout, itnim,
185 BFA_FCS_RETRY_TIMEOUT);
186 break;
187
188 case BFA_FCS_ITNIM_SM_OFFLINE:
189 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
190 bfa_fcxp_discard(itnim->fcxp);
191 bfa_fcs_rport_itnim_ack(itnim->rport);
192 break;
193
194 case BFA_FCS_ITNIM_SM_INITIATOR:
195 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
196 /*
197 * dont discard fcxp. accept will reach same state
198 */
199 break;
200
201 case BFA_FCS_ITNIM_SM_DELETE:
202 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
203 bfa_fcxp_discard(itnim->fcxp);
204 bfa_fcs_itnim_free(itnim);
205 break;
206
207 default:
208 bfa_assert(0);
209 }
210}
211
212static void
213bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
214 enum bfa_fcs_itnim_event event)
215{
216 bfa_trc(itnim->fcs, itnim->rport->pwwn);
217 bfa_trc(itnim->fcs, event);
218
219 switch (event) {
220 case BFA_FCS_ITNIM_SM_TIMEOUT:
221 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
222 bfa_fcs_itnim_send_prli(itnim, NULL);
223 break;
224
225 case BFA_FCS_ITNIM_SM_OFFLINE:
226 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
227 bfa_timer_stop(&itnim->timer);
228 bfa_fcs_rport_itnim_ack(itnim->rport);
229 break;
230
231 case BFA_FCS_ITNIM_SM_INITIATOR:
232 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
233 bfa_timer_stop(&itnim->timer);
234 break;
235
236 case BFA_FCS_ITNIM_SM_DELETE:
237 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
238 bfa_timer_stop(&itnim->timer);
239 bfa_fcs_itnim_free(itnim);
240 break;
241
242 default:
243 bfa_assert(0);
244 }
245}
246
247static void
248bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
249 enum bfa_fcs_itnim_event event)
250{
251 bfa_trc(itnim->fcs, itnim->rport->pwwn);
252 bfa_trc(itnim->fcs, event);
253
254 switch (event) {
255 case BFA_FCS_ITNIM_SM_HCB_ONLINE:
256 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
257 bfa_fcb_itnim_online(itnim->itnim_drv);
258 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
259 break;
260
261 case BFA_FCS_ITNIM_SM_OFFLINE:
262 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
263 bfa_itnim_offline(itnim->bfa_itnim);
264 bfa_fcs_rport_itnim_ack(itnim->rport);
265 break;
266
267 case BFA_FCS_ITNIM_SM_DELETE:
268 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
269 bfa_fcs_itnim_free(itnim);
270 break;
271
272 default:
273 bfa_assert(0);
274 }
275}
276
277static void
278bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
279 enum bfa_fcs_itnim_event event)
280{
281 bfa_trc(itnim->fcs, itnim->rport->pwwn);
282 bfa_trc(itnim->fcs, event);
283
284 switch (event) {
285 case BFA_FCS_ITNIM_SM_OFFLINE:
286 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
287 bfa_fcb_itnim_offline(itnim->itnim_drv);
288 bfa_itnim_offline(itnim->bfa_itnim);
289 if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) {
290 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
291 } else {
292 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
293 }
294 break;
295
296 case BFA_FCS_ITNIM_SM_DELETE:
297 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
298 bfa_fcs_itnim_free(itnim);
299 break;
300
301 default:
302 bfa_assert(0);
303 }
304}
305
306static void
307bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
308 enum bfa_fcs_itnim_event event)
309{
310 bfa_trc(itnim->fcs, itnim->rport->pwwn);
311 bfa_trc(itnim->fcs, event);
312
313 switch (event) {
314 case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
315 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
316 bfa_fcs_rport_itnim_ack(itnim->rport);
317 break;
318
319 case BFA_FCS_ITNIM_SM_DELETE:
320 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
321 bfa_fcs_itnim_free(itnim);
322 break;
323
324 default:
325 bfa_assert(0);
326 }
327}
328
329/*
330 * This state is set when a discovered rport is also in intiator mode.
331 * This ITN is marked as no_op and is not active and will not be truned into
332 * online state.
333 */
334static void
335bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
336 enum bfa_fcs_itnim_event event)
337{
338 bfa_trc(itnim->fcs, itnim->rport->pwwn);
339 bfa_trc(itnim->fcs, event);
340
341 switch (event) {
342 case BFA_FCS_ITNIM_SM_OFFLINE:
343 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
344 bfa_fcs_rport_itnim_ack(itnim->rport);
345 break;
346
347 case BFA_FCS_ITNIM_SM_RSP_ERROR:
348 case BFA_FCS_ITNIM_SM_ONLINE:
349 case BFA_FCS_ITNIM_SM_INITIATOR:
350 break;
351
352 case BFA_FCS_ITNIM_SM_DELETE:
353 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
354 bfa_fcs_itnim_free(itnim);
355 break;
356
357 default:
358 bfa_assert(0);
359 }
360}
361
362
363
364/**
365 * itnim_private FCS ITNIM private interfaces
366 */
367
368static void
369bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
370 enum bfa_itnim_aen_event event)
371{
372 struct bfa_fcs_rport_s *rport = itnim->rport;
373 union bfa_aen_data_u aen_data;
374 struct bfa_log_mod_s *logmod = rport->fcs->logm;
375 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
376 wwn_t rpwwn = rport->pwwn;
377 char lpwwn_ptr[BFA_STRING_32];
378 char rpwwn_ptr[BFA_STRING_32];
379
380 /*
381 * Don't post events for well known addresses
382 */
383 if (BFA_FCS_PID_IS_WKA(rport->pid))
384 return;
385
386 wwn2str(lpwwn_ptr, lpwwn);
387 wwn2str(rpwwn_ptr, rpwwn);
388
389 switch (event) {
390 case BFA_ITNIM_AEN_ONLINE:
391 bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr);
392 break;
393 case BFA_ITNIM_AEN_OFFLINE:
394 bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr);
395 break;
396 case BFA_ITNIM_AEN_DISCONNECT:
397 bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
398 break;
399 default:
400 break;
401 }
402
403 aen_data.itnim.vf_id = rport->port->fabric->vf_id;
404 aen_data.itnim.ppwwn =
405 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs));
406 aen_data.itnim.lpwwn = lpwwn;
407 aen_data.itnim.rpwwn = rpwwn;
408}
409
410static void
411bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
412{
413 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
414 struct bfa_fcs_rport_s *rport = itnim->rport;
415 struct bfa_fcs_port_s *port = rport->port;
416 struct fchs_s fchs;
417 struct bfa_fcxp_s *fcxp;
418 int len;
419
420 bfa_trc(itnim->fcs, itnim->rport->pwwn);
421
422 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
423 if (!fcxp) {
424 itnim->stats.fcxp_alloc_wait++;
425 bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
426 bfa_fcs_itnim_send_prli, itnim);
427 return;
428 }
429 itnim->fcxp = fcxp;
430
431 len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid,
432 bfa_fcs_port_get_fcid(port), 0);
433
434 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
435 BFA_FALSE, FC_CLASS_3, len, &fchs,
436 bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ,
437 FC_RA_TOV);
438
439 itnim->stats.prli_sent++;
440 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
441}
442
443static void
444bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
445 bfa_status_t req_status, u32 rsp_len,
446 u32 resid_len, struct fchs_s *rsp_fchs)
447{
448 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
449 struct fc_els_cmd_s *els_cmd;
450 struct fc_prli_s *prli_resp;
451 struct fc_ls_rjt_s *ls_rjt;
452 struct fc_prli_params_s *sparams;
453
454 bfa_trc(itnim->fcs, req_status);
455
456 /*
457 * Sanity Checks
458 */
459 if (req_status != BFA_STATUS_OK) {
460 itnim->stats.prli_rsp_err++;
461 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
462 return;
463 }
464
465 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
466
467 if (els_cmd->els_code == FC_ELS_ACC) {
468 prli_resp = (struct fc_prli_s *) els_cmd;
469
470 if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) {
471 bfa_trc(itnim->fcs, rsp_len);
472 /*
473 * Check if this r-port is also in Initiator mode.
474 * If so, we need to set this ITN as a no-op.
475 */
476 if (prli_resp->parampage.servparams.initiator) {
477 bfa_trc(itnim->fcs, prli_resp->parampage.type);
478 itnim->rport->scsi_function =
479 BFA_RPORT_INITIATOR;
480 itnim->stats.prli_rsp_acc++;
481 bfa_sm_send_event(itnim,
482 BFA_FCS_ITNIM_SM_INITIATOR);
483 return;
484 }
485
486 itnim->stats.prli_rsp_parse_err++;
487 return;
488 }
489 itnim->rport->scsi_function = BFA_RPORT_TARGET;
490
491 sparams = &prli_resp->parampage.servparams;
492 itnim->seq_rec = sparams->retry;
493 itnim->rec_support = sparams->rec_support;
494 itnim->task_retry_id = sparams->task_retry_id;
495 itnim->conf_comp = sparams->confirm;
496
497 itnim->stats.prli_rsp_acc++;
498 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
499 } else {
500 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
501
502 bfa_trc(itnim->fcs, ls_rjt->reason_code);
503 bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
504
505 itnim->stats.prli_rsp_rjt++;
506 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
507 }
508}
509
510static void
511bfa_fcs_itnim_timeout(void *arg)
512{
513 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg;
514
515 itnim->stats.timeout++;
516 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
517}
518
519static void
520bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
521{
522 bfa_itnim_delete(itnim->bfa_itnim);
523 bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
524}
525
526
527
528/**
529 * itnim_public FCS ITNIM public interfaces
530 */
531
532/**
533 * Called by rport when a new rport is created.
534 *
535 * @param[in] rport - remote port.
536 */
537struct bfa_fcs_itnim_s *
538bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
539{
540 struct bfa_fcs_port_s *port = rport->port;
541 struct bfa_fcs_itnim_s *itnim;
542 struct bfad_itnim_s *itnim_drv;
543 struct bfa_itnim_s *bfa_itnim;
544
545 /*
546 * call bfad to allocate the itnim
547 */
548 bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
549 if (itnim == NULL) {
550 bfa_trc(port->fcs, rport->pwwn);
551 return NULL;
552 }
553
554 /*
555 * Initialize itnim
556 */
557 itnim->rport = rport;
558 itnim->fcs = rport->fcs;
559 itnim->itnim_drv = itnim_drv;
560
561 /*
562 * call BFA to create the itnim
563 */
564 bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
565
566 if (bfa_itnim == NULL) {
567 bfa_trc(port->fcs, rport->pwwn);
568 bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
569 bfa_assert(0);
570 return NULL;
571 }
572
573 itnim->bfa_itnim = bfa_itnim;
574 itnim->seq_rec = BFA_FALSE;
575 itnim->rec_support = BFA_FALSE;
576 itnim->conf_comp = BFA_FALSE;
577 itnim->task_retry_id = BFA_FALSE;
578
579 /*
580 * Set State machine
581 */
582 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
583
584 return itnim;
585}
586
587/**
588 * Called by rport to delete the instance of FCPIM.
589 *
590 * @param[in] rport - remote port.
591 */
592void
593bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
594{
595 bfa_trc(itnim->fcs, itnim->rport->pid);
596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
597}
598
599/**
600 * Notification from rport that PLOGI is complete to initiate FC-4 session.
601 */
602void
603bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
604{
605 itnim->stats.onlines++;
606
607 if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) {
608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
609 } else {
610 /*
611 * For well known addresses, we set the itnim to initiator
612 * state
613 */
614 itnim->stats.initiator++;
615 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
616 }
617}
618
619/**
620 * Called by rport to handle a remote device offline.
621 */
622void
623bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
624{
625 itnim->stats.offlines++;
626 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
627}
628
629/**
630 * Called by rport when remote port is known to be an initiator from
631 * PRLI received.
632 */
633void
634bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
635{
636 bfa_trc(itnim->fcs, itnim->rport->pid);
637 itnim->stats.initiator++;
638 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
639}
640
641/**
642 * Called by rport to check if the itnim is online.
643 */
644bfa_status_t
645bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
646{
647 bfa_trc(itnim->fcs, itnim->rport->pid);
648 switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
649 case BFA_ITNIM_ONLINE:
650 case BFA_ITNIM_INITIATIOR:
651 return BFA_STATUS_OK;
652
653 default:
654 return BFA_STATUS_NO_FCPIM_NEXUS;
655
656 }
657}
658
659/**
660 * BFA completion callback for bfa_itnim_online().
661 */
662void
663bfa_cb_itnim_online(void *cbarg)
664{
665 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
666
667 bfa_trc(itnim->fcs, itnim->rport->pwwn);
668 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
669}
670
671/**
672 * BFA completion callback for bfa_itnim_offline().
673 */
674void
675bfa_cb_itnim_offline(void *cb_arg)
676{
677 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
678
679 bfa_trc(itnim->fcs, itnim->rport->pwwn);
680 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
681}
682
683/**
684 * Mark the beginning of PATH TOV handling. IO completion callbacks
685 * are still pending.
686 */
687void
688bfa_cb_itnim_tov_begin(void *cb_arg)
689{
690 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
691
692 bfa_trc(itnim->fcs, itnim->rport->pwwn);
693 bfa_fcb_itnim_tov_begin(itnim->itnim_drv);
694}
695
696/**
697 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
698 */
699void
700bfa_cb_itnim_tov(void *cb_arg)
701{
702 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
703
704 bfa_trc(itnim->fcs, itnim->rport->pwwn);
705 bfa_fcb_itnim_tov(itnim->itnim_drv);
706}
707
708/**
709 * BFA notification to FCS/driver for second level error recovery.
710 *
711 * Atleast one I/O request has timedout and target is unresponsive to
712 * repeated abort requests. Second level error recovery should be initiated
713 * by starting implicit logout and recovery procedures.
714 */
715void
716bfa_cb_itnim_sler(void *cb_arg)
717{
718 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
719
720 itnim->stats.sler++;
721 bfa_trc(itnim->fcs, itnim->rport->pwwn);
722 bfa_fcs_rport_logo_imp(itnim->rport);
723}
724
725struct bfa_fcs_itnim_s *
726bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
727{
728 struct bfa_fcs_rport_s *rport;
729 rport = bfa_fcs_rport_lookup(port, rpwwn);
730
731 if (!rport)
732 return NULL;
733
734 bfa_assert(rport->itnim != NULL);
735 return (rport->itnim);
736}
737
738bfa_status_t
739bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
740 struct bfa_itnim_attr_s *attr)
741{
742 struct bfa_fcs_itnim_s *itnim = NULL;
743
744 itnim = bfa_fcs_itnim_lookup(port, rpwwn);
745
746 if (itnim == NULL)
747 return BFA_STATUS_NO_FCPIM_NEXUS;
748
749 attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
750 attr->retry = itnim->seq_rec;
751 attr->rec_support = itnim->rec_support;
752 attr->conf_comp = itnim->conf_comp;
753 attr->task_retry_id = itnim->task_retry_id;
754
755 return BFA_STATUS_OK;
756}
757
758bfa_status_t
759bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
760 struct bfa_itnim_stats_s *stats)
761{
762 struct bfa_fcs_itnim_s *itnim = NULL;
763
764 bfa_assert(port != NULL);
765
766 itnim = bfa_fcs_itnim_lookup(port, rpwwn);
767
768 if (itnim == NULL)
769 return BFA_STATUS_NO_FCPIM_NEXUS;
770
771 bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
772
773 return BFA_STATUS_OK;
774}
775
776bfa_status_t
777bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
778{
779 struct bfa_fcs_itnim_s *itnim = NULL;
780
781 bfa_assert(port != NULL);
782
783 itnim = bfa_fcs_itnim_lookup(port, rpwwn);
784
785 if (itnim == NULL)
786 return BFA_STATUS_NO_FCPIM_NEXUS;
787
788 bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
789 return BFA_STATUS_OK;
790}
791
792void
793bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
794 u16 len)
795{
796 struct fc_els_cmd_s *els_cmd;
797
798 bfa_trc(itnim->fcs, fchs->type);
799
800 if (fchs->type != FC_TYPE_ELS)
801 return;
802
803 els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
804
805 bfa_trc(itnim->fcs, els_cmd->els_code);
806
807 switch (els_cmd->els_code) {
808 case FC_ELS_PRLO:
809 /* bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_PRLO); */
810 break;
811
812 default:
813 bfa_assert(0);
814 }
815}
816
817void
818bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim)
819{
820}
821
822void
823bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
824{
825}
826
827/**
828 * Module initialization
829 */
830void
831bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs)
832{
833}
834
835/**
836 * Module cleanup
837 */
838void
839bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs)
840{
841 bfa_fcs_modexit_comp(fcs);
842}
843
844
diff --git a/drivers/scsi/bfa/fcptm.c b/drivers/scsi/bfa/fcptm.c
new file mode 100644
index 000000000000..8c8b08c72e7a
--- /dev/null
+++ b/drivers/scsi/bfa/fcptm.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * This file contains dummy FCPTM routines to aid in Initiator Mode only
20 * compilation of OS driver.
21 *
22 */
23
24#include "bfa_os_inc.h"
25#include "fcs_rport.h"
26#include "fcs_fcptm.h"
27#include "fcs/bfa_fcs_rport.h"
28
29struct bfa_fcs_tin_s *
30bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport)
31{
32 return NULL;
33}
34
35void
36bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin)
37{
38}
39
40void
41bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin)
42{
43}
44
45void
46bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin)
47{
48}
49
50void
51bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
52{
53}
54
55void
56bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
57{
58}
59
60void
61bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin)
62{
63}
64
65void
66bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin)
67{
68}
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h
new file mode 100644
index 000000000000..deee685e8478
--- /dev/null
+++ b/drivers/scsi/bfa/fcs.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs.h FCS module functions
20 */
21
22
23#ifndef __FCS_H__
24#define __FCS_H__
25
26#define __fcs_min_cfg(__fcs) (__fcs)->min_cfg
27
28void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
29
30#endif /* __FCS_H__ */
diff --git a/drivers/scsi/bfa/fcs_auth.h b/drivers/scsi/bfa/fcs_auth.h
new file mode 100644
index 000000000000..65d155fea3d7
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_auth.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_AUTH_H__
24#define __FCS_AUTH_H__
25
26#include <fcs/bfa_fcs.h>
27#include <fcs/bfa_fcs_vport.h>
28#include <fcs/bfa_fcs_lport.h>
29
30/*
31 * fcs friend functions: only between fcs modules
32 */
33void bfa_fcs_auth_uf_recv(struct bfa_fcs_fabric_s *fabric, int len);
34void bfa_fcs_auth_start(struct bfa_fcs_fabric_s *fabric);
35void bfa_fcs_auth_stop(struct bfa_fcs_fabric_s *fabric);
36
37#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
new file mode 100644
index 000000000000..eee960820f86
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_FABRIC_H__
23#define __FCS_FABRIC_H__
24
25#include <fcs/bfa_fcs.h>
26#include <fcs/bfa_fcs_vport.h>
27#include <fcs/bfa_fcs_lport.h>
28
29/*
30* fcs friend functions: only between fcs modules
31 */
32void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
33void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
34void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
35void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
36void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
37void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
38 struct bfa_fcs_vport_s *vport);
39void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
40 struct bfa_fcs_vport_s *vport);
41int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
42struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
43 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
44void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
45void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
46 struct fchs_s *fchs, u16 len);
47u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
48bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
49enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
50void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
51void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
52
53bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
54 struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg,
55 struct bfad_vf_s *vf_drv);
56void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric,
57 enum auth_status status);
58
59void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
60 wwn_t fabric_name);
61#endif /* __FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
new file mode 100644
index 000000000000..61e9e2687de3
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_fcpim.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __FCS_FCPIM_H__
18#define __FCS_FCPIM_H__
19
20#include <defs/bfa_defs_port.h>
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_rport.h>
23
24/*
25 * Following routines are from FCPIM and will be called by rport.
26 */
27struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
28void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
29void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
30void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
31bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
32
33void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
43 u16 len);
44#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcptm.h b/drivers/scsi/bfa/fcs_fcptm.h
new file mode 100644
index 000000000000..ffff0829fd31
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_fcptm.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_FCPTM_H__
19#define __FCS_FCPTM_H__
20
21#include <defs/bfa_defs_port.h>
22#include <fcs/bfa_fcs_lport.h>
23#include <fcs/bfa_fcs_rport.h>
24
25/*
26 * Following routines are from FCPTM and will be called by rport.
27 */
28struct bfa_fcs_tin_s *bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport);
29void bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin);
30void bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin);
31void bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin);
32void bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
33 u16 len);
34void bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin);
35void bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin);
36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcptm_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcptm_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
43 u16 len);
44
45#endif /* __FCS_FCPTM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcxp.h b/drivers/scsi/bfa/fcs_fcxp.h
new file mode 100644
index 000000000000..8277fe9c2b70
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_fcxp.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_fcxp.h FCXP helper macros for FCS
20 */
21
22
23#ifndef __FCS_FCXP_H__
24#define __FCS_FCXP_H__
25
26#define bfa_fcs_fcxp_alloc(__fcs) \
27 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
28
29#endif /* __FCS_FCXP_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
new file mode 100644
index 000000000000..ae744ba35671
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_lport.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_LPORT_H__
23#define __FCS_LPORT_H__
24
25#define __VPORT_H__
26#include <defs/bfa_defs_port.h>
27#include <bfa_svc.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <fcs/bfa_fcs_rport.h>
30#include <fcs/bfa_fcs_vport.h>
31#include <fcs_fabric.h>
32#include <fcs_ms.h>
33#include <cs/bfa_q.h>
34#include <fcbuild.h>
35
36/*
37 * PID used in P2P/N2N ( In Big Endian)
38 */
39#define N2N_LOCAL_PID 0x010000
40#define N2N_REMOTE_PID 0x020000
41
42/*
43 * Misc Timeouts
44 */
45/*
46 * To be used when spawning a timer before retrying a failed command. Milli
47 * Secs.
48 */
49#define BFA_FCS_RETRY_TIMEOUT 2000
50
51/*
52 * Check for Port/Vport Mode/Role
53 */
54#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
55 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
56
57#define BFA_FCS_VPORT_IS_TARGET_MODE(port) \
58 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
59
60#define BFA_FCS_VPORT_IS_IPFC_MODE(port) \
61 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IPFC)
62
63/*
64 * Is this a Well Known Address
65 */
66#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
67
68/*
69 * Pointer to elements within Port
70 */
71#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
72#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
73#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
74#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
75#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
76
77/*
78 * handler for unsolicied frames
79 */
80void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
81 u16 len);
82
83/*
84 * Following routines will be called by Fabric to indicate port
85 * online/offline to vport.
86 */
87void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
88 u16 vf_id, struct bfa_port_cfg_s *port_cfg,
89 struct bfa_fcs_vport_s *vport);
90void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
91void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
92void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
93bfa_boolean_t bfa_fcs_port_is_online(struct bfa_fcs_port_s *port);
94
95/*
96 * Lookup rport based on PID
97 */
98struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pid(
99 struct bfa_fcs_port_s *port, u32 pid);
100
101/*
102 * Lookup rport based on PWWN
103 */
104struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pwwn(
105 struct bfa_fcs_port_s *port, wwn_t pwwn);
106struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_nwwn(
107 struct bfa_fcs_port_s *port, wwn_t nwwn);
108void bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
109 struct bfa_fcs_rport_s *rport);
110void bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
111 struct bfa_fcs_rport_s *rport);
112
113void bfa_fcs_port_modinit(struct bfa_fcs_s *fcs);
114void bfa_fcs_port_modexit(struct bfa_fcs_s *fcs);
115void bfa_fcs_port_lip(struct bfa_fcs_port_s *port);
116
117#endif /* __FCS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_ms.h b/drivers/scsi/bfa/fcs_ms.h
new file mode 100644
index 000000000000..b6a8c12876f4
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_ms.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_ms.h FCS ms interfaces
20 */
21#ifndef __FCS_MS_H__
22#define __FCS_MS_H__
23
24/* MS FCS routines */
25void bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port);
26void bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port);
27void bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port);
28void bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port);
29
30/* FDMI FCS routines */
31void bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms);
32void bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms);
33void bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms);
34
35#endif
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
new file mode 100644
index 000000000000..abb65191dd27
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_port.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_pport.h FCS physical port interfaces
20 */
21
22
23#ifndef __FCS_PPORT_H__
24#define __FCS_PPORT_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs);
30void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs);
31
32#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
new file mode 100644
index 000000000000..f601e9d74236
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_rport.h FCS rport interfaces and defines
20 */
21
22#ifndef __FCS_RPORT_H__
23#define __FCS_RPORT_H__
24
25#include <fcs/bfa_fcs_rport.h>
26
27void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs);
28void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs);
29
30void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
31 u16 len);
32void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
33
34struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_port_s *port,
35 u32 pid);
36void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
37void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
38void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
39void bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
40 struct fc_logi_s *plogi_rsp);
41void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port,
42 struct fchs_s *rx_fchs,
43 struct fc_logi_s *plogi);
44void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
45 struct fc_logi_s *plogi);
46void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
47void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
48void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
49void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport);
50void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
51int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
52struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port,
53 wwn_t wwn);
54
55
56/* Rport Features */
57void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
58void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
59void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
60
61#endif /* __FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_trcmod.h b/drivers/scsi/bfa/fcs_trcmod.h
new file mode 100644
index 000000000000..41b5ae8d7644
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_trcmod.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_trcmod.h BFA FCS trace modules
20 */
21
22#ifndef __FCS_TRCMOD_H__
23#define __FCS_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_FCS_FABRIC = 1,
33 BFA_TRC_FCS_VFAPI = 2,
34 BFA_TRC_FCS_PORT = 3,
35 BFA_TRC_FCS_VPORT = 4,
36 BFA_TRC_FCS_VP_API = 5,
37 BFA_TRC_FCS_VPS = 6,
38 BFA_TRC_FCS_RPORT = 7,
39 BFA_TRC_FCS_FCPIM = 8,
40 BFA_TRC_FCS_FCPTM = 9,
41 BFA_TRC_FCS_NS = 10,
42 BFA_TRC_FCS_SCN = 11,
43 BFA_TRC_FCS_LOOP = 12,
44 BFA_TRC_FCS_UF = 13,
45 BFA_TRC_FCS_PPORT = 14,
46 BFA_TRC_FCS_FCPIP = 15,
47 BFA_TRC_FCS_PORT_API = 16,
48 BFA_TRC_FCS_RPORT_API = 17,
49 BFA_TRC_FCS_AUTH = 18,
50 BFA_TRC_FCS_N2N = 19,
51 BFA_TRC_FCS_MS = 20,
52 BFA_TRC_FCS_FDMI = 21,
53 BFA_TRC_FCS_RPORT_FTRS = 22,
54};
55
56#endif /* __FCS_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
new file mode 100644
index 000000000000..96f1bdcb31ed
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_uf.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_UF_H__
24#define __FCS_UF_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs);
30void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs);
31
32#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
new file mode 100644
index 000000000000..9e80b6a97b7f
--- /dev/null
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_VPORT_H__
19#define __FCS_VPORT_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23#include <defs/bfa_defs_pci.h>
24
25/*
26 * Modudle init/cleanup routines.
27 */
28
29void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs);
30void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs);
31
32void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
33void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
34void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
35void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
36u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs);
37
38#endif /* __FCS_VPORT_H__ */
39
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
new file mode 100644
index 000000000000..b845eb272c78
--- /dev/null
+++ b/drivers/scsi/bfa/fdmi.c
@@ -0,0 +1,1223 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22
23#include <bfa.h>
24#include <bfa_svc.h>
25#include "fcs_lport.h"
26#include "fcs_rport.h"
27#include "lport_priv.h"
28#include "fcs_trcmod.h"
29#include "fcs_fcxp.h"
30#include <fcs/bfa_fcs_fdmi.h>
31
32BFA_TRC_FILE(FCS, FDMI);
33
34#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
35
36/*
37 * forward declarations
38 */
39static void bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg,
40 struct bfa_fcxp_s *fcxp_alloced);
41static void bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_fdmi_rhba_response(void *fcsarg,
46 struct bfa_fcxp_s *fcxp,
47 void *cbarg,
48 bfa_status_t req_status,
49 u32 rsp_len,
50 u32 resid_len,
51 struct fchs_s *rsp_fchs);
52static void bfa_fcs_port_fdmi_rprt_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_fdmi_rpa_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_fdmi_timeout(void *arg);
67static u16 bfa_fcs_port_fdmi_build_rhba_pyld(
68 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
69static u16 bfa_fcs_port_fdmi_build_rprt_pyld(
70 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
71static u16 bfa_fcs_port_fdmi_build_rpa_pyld(
72 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
73static u16 bfa_fcs_port_fdmi_build_portattr_block(
74 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
75void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
76 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
77void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
78 struct bfa_fcs_fdmi_port_attr_s *port_attr);
79/**
80 * fcs_fdmi_sm FCS FDMI state machine
81 */
82
83/**
84 * FDMI State Machine events
85 */
86enum port_fdmi_event {
87 FDMISM_EVENT_PORT_ONLINE = 1,
88 FDMISM_EVENT_PORT_OFFLINE = 2,
89 FDMISM_EVENT_RSP_OK = 4,
90 FDMISM_EVENT_RSP_ERROR = 5,
91 FDMISM_EVENT_TIMEOUT = 6,
92 FDMISM_EVENT_RHBA_SENT = 7,
93 FDMISM_EVENT_RPRT_SENT = 8,
94 FDMISM_EVENT_RPA_SENT = 9,
95};
96
97static void bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
98 enum port_fdmi_event event);
99static void bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
100 enum port_fdmi_event event);
101static void bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
102 enum port_fdmi_event event);
103static void bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
104 enum port_fdmi_event event);
105static void bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
106 enum port_fdmi_event event);
107static void bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
108 enum port_fdmi_event event);
109static void bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
110 enum port_fdmi_event event);
111static void bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
112 enum port_fdmi_event event);
113static void bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
114 enum port_fdmi_event event);
115static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
116 enum port_fdmi_event event);
117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
118 enum port_fdmi_event event);
119/**
120 * Start in offline state - awaiting MS to send start.
121 */
122static void
123bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
124 enum port_fdmi_event event)
125{
126 struct bfa_fcs_port_s *port = fdmi->ms->port;
127
128 bfa_trc(port->fcs, port->port_cfg.pwwn);
129 bfa_trc(port->fcs, event);
130
131 fdmi->retry_cnt = 0;
132
133 switch (event) {
134 case FDMISM_EVENT_PORT_ONLINE:
135 if (port->vport) {
136 /*
137 * For Vports, register a new port.
138 */
139 bfa_sm_set_state(fdmi,
140 bfa_fcs_port_fdmi_sm_sending_rprt);
141 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
142 } else {
143 /*
144 * For a base port, we should first register the HBA
145 * atribute. The HBA attribute also contains the base
146 * port registration.
147 */
148 bfa_sm_set_state(fdmi,
149 bfa_fcs_port_fdmi_sm_sending_rhba);
150 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
151 }
152 break;
153
154 case FDMISM_EVENT_PORT_OFFLINE:
155 break;
156
157 default:
158 bfa_assert(0);
159 }
160}
161
162static void
163bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
164 enum port_fdmi_event event)
165{
166 struct bfa_fcs_port_s *port = fdmi->ms->port;
167
168 bfa_trc(port->fcs, port->port_cfg.pwwn);
169 bfa_trc(port->fcs, event);
170
171 switch (event) {
172 case FDMISM_EVENT_RHBA_SENT:
173 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba);
174 break;
175
176 case FDMISM_EVENT_PORT_OFFLINE:
177 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
178 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
179 &fdmi->fcxp_wqe);
180 break;
181
182 default:
183 bfa_assert(0);
184 }
185}
186
187static void
188bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
189 enum port_fdmi_event event)
190{
191 struct bfa_fcs_port_s *port = fdmi->ms->port;
192
193 bfa_trc(port->fcs, port->port_cfg.pwwn);
194 bfa_trc(port->fcs, event);
195
196 switch (event) {
197 case FDMISM_EVENT_RSP_ERROR:
198 /*
199 * if max retries have not been reached, start timer for a
200 * delayed retry
201 */
202 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
203 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba_retry);
204 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
205 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
206 fdmi, BFA_FCS_RETRY_TIMEOUT);
207 } else {
208 /*
209 * set state to offline
210 */
211 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
212 }
213 break;
214
215 case FDMISM_EVENT_RSP_OK:
216 /*
217 * Initiate Register Port Attributes
218 */
219 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
220 fdmi->retry_cnt = 0;
221 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
222 break;
223
224 case FDMISM_EVENT_PORT_OFFLINE:
225 bfa_fcxp_discard(fdmi->fcxp);
226 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
227 break;
228
229 default:
230 bfa_assert(0);
231 }
232}
233
234static void
235bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
236 enum port_fdmi_event event)
237{
238 struct bfa_fcs_port_s *port = fdmi->ms->port;
239
240 bfa_trc(port->fcs, port->port_cfg.pwwn);
241 bfa_trc(port->fcs, event);
242
243 switch (event) {
244 case FDMISM_EVENT_TIMEOUT:
245 /*
246 * Retry Timer Expired. Re-send
247 */
248 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rhba);
249 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
250 break;
251
252 case FDMISM_EVENT_PORT_OFFLINE:
253 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
254 bfa_timer_stop(&fdmi->timer);
255 break;
256
257 default:
258 bfa_assert(0);
259 }
260}
261
262/*
263* RPRT : Register Port
264 */
265static void
266bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
267 enum port_fdmi_event event)
268{
269 struct bfa_fcs_port_s *port = fdmi->ms->port;
270
271 bfa_trc(port->fcs, port->port_cfg.pwwn);
272 bfa_trc(port->fcs, event);
273
274 switch (event) {
275 case FDMISM_EVENT_RPRT_SENT:
276 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt);
277 break;
278
279 case FDMISM_EVENT_PORT_OFFLINE:
280 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
281 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
282 &fdmi->fcxp_wqe);
283 break;
284
285 default:
286 bfa_assert(0);
287 }
288}
289
290static void
291bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
292 enum port_fdmi_event event)
293{
294 struct bfa_fcs_port_s *port = fdmi->ms->port;
295
296 bfa_trc(port->fcs, port->port_cfg.pwwn);
297 bfa_trc(port->fcs, event);
298
299 switch (event) {
300 case FDMISM_EVENT_RSP_ERROR:
301 /*
302 * if max retries have not been reached, start timer for a
303 * delayed retry
304 */
305 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
306 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt_retry);
307 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
308 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
309 fdmi, BFA_FCS_RETRY_TIMEOUT);
310
311 } else {
312 /*
313 * set state to offline
314 */
315 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
316 fdmi->retry_cnt = 0;
317 }
318 break;
319
320 case FDMISM_EVENT_RSP_OK:
321 fdmi->retry_cnt = 0;
322 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
323 break;
324
325 case FDMISM_EVENT_PORT_OFFLINE:
326 bfa_fcxp_discard(fdmi->fcxp);
327 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
328 break;
329
330 default:
331 bfa_assert(0);
332 }
333}
334
335static void
336bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
337 enum port_fdmi_event event)
338{
339 struct bfa_fcs_port_s *port = fdmi->ms->port;
340
341 bfa_trc(port->fcs, port->port_cfg.pwwn);
342 bfa_trc(port->fcs, event);
343
344 switch (event) {
345 case FDMISM_EVENT_TIMEOUT:
346 /*
347 * Retry Timer Expired. Re-send
348 */
349 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rprt);
350 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
351 break;
352
353 case FDMISM_EVENT_PORT_OFFLINE:
354 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
355 bfa_timer_stop(&fdmi->timer);
356 break;
357
358 default:
359 bfa_assert(0);
360 }
361}
362
363/*
364 * Register Port Attributes
365 */
366static void
367bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
368 enum port_fdmi_event event)
369{
370 struct bfa_fcs_port_s *port = fdmi->ms->port;
371
372 bfa_trc(port->fcs, port->port_cfg.pwwn);
373 bfa_trc(port->fcs, event);
374
375 switch (event) {
376 case FDMISM_EVENT_RPA_SENT:
377 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa);
378 break;
379
380 case FDMISM_EVENT_PORT_OFFLINE:
381 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
382 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
383 &fdmi->fcxp_wqe);
384 break;
385
386 default:
387 bfa_assert(0);
388 }
389}
390
391static void
392bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
393 enum port_fdmi_event event)
394{
395 struct bfa_fcs_port_s *port = fdmi->ms->port;
396
397 bfa_trc(port->fcs, port->port_cfg.pwwn);
398 bfa_trc(port->fcs, event);
399
400 switch (event) {
401 case FDMISM_EVENT_RSP_ERROR:
402 /*
403 * if max retries have not been reached, start timer for a
404 * delayed retry
405 */
406 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
407 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa_retry);
408 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
409 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
410 fdmi, BFA_FCS_RETRY_TIMEOUT);
411 } else {
412 /*
413 * set state to offline
414 */
415 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
416 fdmi->retry_cnt = 0;
417 }
418 break;
419
420 case FDMISM_EVENT_RSP_OK:
421 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
422 fdmi->retry_cnt = 0;
423 break;
424
425 case FDMISM_EVENT_PORT_OFFLINE:
426 bfa_fcxp_discard(fdmi->fcxp);
427 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
428 break;
429
430 default:
431 bfa_assert(0);
432 }
433}
434
435static void
436bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
437 enum port_fdmi_event event)
438{
439 struct bfa_fcs_port_s *port = fdmi->ms->port;
440
441 bfa_trc(port->fcs, port->port_cfg.pwwn);
442 bfa_trc(port->fcs, event);
443
444 switch (event) {
445 case FDMISM_EVENT_TIMEOUT:
446 /*
447 * Retry Timer Expired. Re-send
448 */
449 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
450 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
451 break;
452
453 case FDMISM_EVENT_PORT_OFFLINE:
454 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
455 bfa_timer_stop(&fdmi->timer);
456 break;
457
458 default:
459 bfa_assert(0);
460 }
461}
462
463static void
464bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
465 enum port_fdmi_event event)
466{
467 struct bfa_fcs_port_s *port = fdmi->ms->port;
468
469 bfa_trc(port->fcs, port->port_cfg.pwwn);
470 bfa_trc(port->fcs, event);
471
472 switch (event) {
473 case FDMISM_EVENT_PORT_OFFLINE:
474 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
475 break;
476
477 default:
478 bfa_assert(0);
479 }
480}
481
482
483/**
484* RHBA : Register HBA Attributes.
485 */
486static void
487bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
488{
489 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
490 struct bfa_fcs_port_s *port = fdmi->ms->port;
491 struct fchs_s fchs;
492 int len, attr_len;
493 struct bfa_fcxp_s *fcxp;
494 u8 *pyld;
495
496 bfa_trc(port->fcs, port->port_cfg.pwwn);
497
498 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
499 if (!fcxp) {
500 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
501 bfa_fcs_port_fdmi_send_rhba, fdmi);
502 return;
503 }
504 fdmi->fcxp = fcxp;
505
506 pyld = bfa_fcxp_get_reqbuf(fcxp);
507 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
508
509 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
510 FDMI_RHBA);
511
512 attr_len = bfa_fcs_port_fdmi_build_rhba_pyld(fdmi,
513 (u8 *) ((struct ct_hdr_s *) pyld + 1));
514
515 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
516 FC_CLASS_3, (len + attr_len), &fchs,
517 bfa_fcs_port_fdmi_rhba_response, (void *)fdmi,
518 FC_MAX_PDUSZ, FC_RA_TOV);
519
520 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
521}
522
523static u16
524bfa_fcs_port_fdmi_build_rhba_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
525 u8 *pyld)
526{
527 struct bfa_fcs_port_s *port = fdmi->ms->port;
528 struct bfa_fcs_fdmi_hba_attr_s hba_attr; /* @todo */
529 struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; /* @todo */
530 struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
531 struct fdmi_attr_s *attr;
532 u8 *curr_ptr;
533 u16 len, count;
534
535 /*
536 * get hba attributes
537 */
538 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
539
540 rhba->hba_id = bfa_fcs_port_get_pwwn(port);
541 rhba->port_list.num_ports = bfa_os_htonl(1);
542 rhba->port_list.port_entry = bfa_fcs_port_get_pwwn(port);
543
544 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
545
546 count = 0;
547 len += sizeof(rhba->hba_attr_blk.attr_count);
548
549 /*
550 * fill out the invididual entries of the HBA attrib Block
551 */
552 curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
553
554 /*
555 * Node Name
556 */
557 attr = (struct fdmi_attr_s *) curr_ptr;
558 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
559 attr->len = sizeof(wwn_t);
560 memcpy(attr->value, &bfa_fcs_port_get_nwwn(port), attr->len);
561 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
562 len += attr->len;
563 count++;
564 attr->len =
565 bfa_os_htons(attr->len + sizeof(attr->type) +
566 sizeof(attr->len));
567
568 /*
569 * Manufacturer
570 */
571 attr = (struct fdmi_attr_s *) curr_ptr;
572 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
573 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
574 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
575 /* variable fields need to be 4 byte aligned */
576 attr->len = fc_roundup(attr->len, sizeof(u32));
577 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
578 len += attr->len;
579 count++;
580 attr->len =
581 bfa_os_htons(attr->len + sizeof(attr->type) +
582 sizeof(attr->len));
583
584 /*
585 * Serial Number
586 */
587 attr = (struct fdmi_attr_s *) curr_ptr;
588 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
589 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
590 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
591 /* variable fields need to be 4 byte aligned */
592 attr->len = fc_roundup(attr->len, sizeof(u32));
593 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
594 len += attr->len;
595 count++;
596 attr->len =
597 bfa_os_htons(attr->len + sizeof(attr->type) +
598 sizeof(attr->len));
599
600 /*
601 * Model
602 */
603 attr = (struct fdmi_attr_s *) curr_ptr;
604 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
605 attr->len = (u16) strlen(fcs_hba_attr->model);
606 memcpy(attr->value, fcs_hba_attr->model, attr->len);
607 /* variable fields need to be 4 byte aligned */
608 attr->len = fc_roundup(attr->len, sizeof(u32));
609 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
610 len += attr->len;
611 count++;
612 attr->len =
613 bfa_os_htons(attr->len + sizeof(attr->type) +
614 sizeof(attr->len));
615
616 /*
617 * Model Desc
618 */
619 attr = (struct fdmi_attr_s *) curr_ptr;
620 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
621 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
622 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
623 /* variable fields need to be 4 byte aligned */
624 attr->len = fc_roundup(attr->len, sizeof(u32));
625 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
626 len += attr->len;
627 count++;
628 attr->len =
629 bfa_os_htons(attr->len + sizeof(attr->type) +
630 sizeof(attr->len));
631
632 /*
633 * H/W Version
634 */
635 if (fcs_hba_attr->hw_version[0] != '\0') {
636 attr = (struct fdmi_attr_s *) curr_ptr;
637 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
638 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
639 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
640 /* variable fields need to be 4 byte aligned */
641 attr->len = fc_roundup(attr->len, sizeof(u32));
642 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
643 len += attr->len;
644 count++;
645 attr->len =
646 bfa_os_htons(attr->len + sizeof(attr->type) +
647 sizeof(attr->len));
648 }
649
650 /*
651 * Driver Version
652 */
653 attr = (struct fdmi_attr_s *) curr_ptr;
654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
655 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
656 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
657 /* variable fields need to be 4 byte aligned */
658 attr->len = fc_roundup(attr->len, sizeof(u32));
659 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
660 len += attr->len;;
661 count++;
662 attr->len =
663 bfa_os_htons(attr->len + sizeof(attr->type) +
664 sizeof(attr->len));
665
666 /*
667 * Option Rom Version
668 */
669 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
670 attr = (struct fdmi_attr_s *) curr_ptr;
671 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
672 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
673 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
674 /* variable fields need to be 4 byte aligned */
675 attr->len = fc_roundup(attr->len, sizeof(u32));
676 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
677 len += attr->len;
678 count++;
679 attr->len =
680 bfa_os_htons(attr->len + sizeof(attr->type) +
681 sizeof(attr->len));
682 }
683
684 /*
685 * f/w Version = driver version
686 */
687 attr = (struct fdmi_attr_s *) curr_ptr;
688 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
689 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
690 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
691 /* variable fields need to be 4 byte aligned */
692 attr->len = fc_roundup(attr->len, sizeof(u32));
693 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
694 len += attr->len;
695 count++;
696 attr->len =
697 bfa_os_htons(attr->len + sizeof(attr->type) +
698 sizeof(attr->len));
699
700 /*
701 * OS Name
702 */
703 if (fcs_hba_attr->os_name[0] != '\0') {
704 attr = (struct fdmi_attr_s *) curr_ptr;
705 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
706 attr->len = (u16) strlen(fcs_hba_attr->os_name);
707 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
708 /* variable fields need to be 4 byte aligned */
709 attr->len = fc_roundup(attr->len, sizeof(u32));
710 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
711 len += attr->len;
712 count++;
713 attr->len =
714 bfa_os_htons(attr->len + sizeof(attr->type) +
715 sizeof(attr->len));
716 }
717
718 /*
719 * MAX_CT_PAYLOAD
720 */
721 attr = (struct fdmi_attr_s *) curr_ptr;
722 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
723 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
724 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
725 len += attr->len;
726 count++;
727 attr->len =
728 bfa_os_htons(attr->len + sizeof(attr->type) +
729 sizeof(attr->len));
730
731 /*
732 * Update size of payload
733 */
734 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
735
736 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
737 return len;
738}
739
740static void
741bfa_fcs_port_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
742 void *cbarg, bfa_status_t req_status,
743 u32 rsp_len, u32 resid_len,
744 struct fchs_s *rsp_fchs)
745{
746 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
747 struct bfa_fcs_port_s *port = fdmi->ms->port;
748 struct ct_hdr_s *cthdr = NULL;
749
750 bfa_trc(port->fcs, port->port_cfg.pwwn);
751
752 /*
753 * Sanity Checks
754 */
755 if (req_status != BFA_STATUS_OK) {
756 bfa_trc(port->fcs, req_status);
757 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
758 return;
759 }
760
761 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
762 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
763
764 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
765 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
766 return;
767 }
768
769 bfa_trc(port->fcs, cthdr->reason_code);
770 bfa_trc(port->fcs, cthdr->exp_code);
771 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
772}
773
774/**
775* RPRT : Register Port
776 */
777static void
778bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
779{
780 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
781 struct bfa_fcs_port_s *port = fdmi->ms->port;
782 struct fchs_s fchs;
783 u16 len, attr_len;
784 struct bfa_fcxp_s *fcxp;
785 u8 *pyld;
786
787 bfa_trc(port->fcs, port->port_cfg.pwwn);
788
789 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
790 if (!fcxp) {
791 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
792 bfa_fcs_port_fdmi_send_rprt, fdmi);
793 return;
794 }
795 fdmi->fcxp = fcxp;
796
797 pyld = bfa_fcxp_get_reqbuf(fcxp);
798 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
799
800 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
801 FDMI_RPRT);
802
803 attr_len = bfa_fcs_port_fdmi_build_rprt_pyld(fdmi,
804 (u8 *) ((struct ct_hdr_s *) pyld + 1));
805
806 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
807 FC_CLASS_3, len + attr_len, &fchs,
808 bfa_fcs_port_fdmi_rprt_response, (void *)fdmi,
809 FC_MAX_PDUSZ, FC_RA_TOV);
810
811 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
812}
813
814/**
815 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
816 */
817static u16
818bfa_fcs_port_fdmi_build_portattr_block(struct bfa_fcs_port_fdmi_s *fdmi,
819 u8 *pyld)
820{
821 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
822 struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
823 struct fdmi_attr_s *attr;
824 u8 *curr_ptr;
825 u16 len;
826 u8 count = 0;
827
828 /*
829 * get port attributes
830 */
831 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
832
833 len = sizeof(port_attrib->attr_count);
834
835 /*
836 * fill out the invididual entries
837 */
838 curr_ptr = (u8 *) &port_attrib->port_attr;
839
840 /*
841 * FC4 Types
842 */
843 attr = (struct fdmi_attr_s *) curr_ptr;
844 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
845 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
846 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
847 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
848 len += attr->len;
849 ++count;
850 attr->len =
851 bfa_os_htons(attr->len + sizeof(attr->type) +
852 sizeof(attr->len));
853
854 /*
855 * Supported Speed
856 */
857 attr = (struct fdmi_attr_s *) curr_ptr;
858 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
859 attr->len = sizeof(fcs_port_attr.supp_speed);
860 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
861 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
862 len += attr->len;
863 ++count;
864 attr->len =
865 bfa_os_htons(attr->len + sizeof(attr->type) +
866 sizeof(attr->len));
867
868 /*
869 * current Port Speed
870 */
871 attr = (struct fdmi_attr_s *) curr_ptr;
872 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
873 attr->len = sizeof(fcs_port_attr.curr_speed);
874 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
875 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
876 len += attr->len;
877 ++count;
878 attr->len =
879 bfa_os_htons(attr->len + sizeof(attr->type) +
880 sizeof(attr->len));
881
882 /*
883 * max frame size
884 */
885 attr = (struct fdmi_attr_s *) curr_ptr;
886 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
887 attr->len = sizeof(fcs_port_attr.max_frm_size);
888 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
889 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
890 len += attr->len;
891 ++count;
892 attr->len =
893 bfa_os_htons(attr->len + sizeof(attr->type) +
894 sizeof(attr->len));
895
896 /*
897 * OS Device Name
898 */
899 if (fcs_port_attr.os_device_name[0] != '\0') {
900 attr = (struct fdmi_attr_s *) curr_ptr;
901 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
902 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
903 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
904 /* variable fields need to be 4 byte aligned */
905 attr->len = fc_roundup(attr->len, sizeof(u32));
906 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
907 len += attr->len;
908 ++count;
909 attr->len =
910 bfa_os_htons(attr->len + sizeof(attr->type) +
911 sizeof(attr->len));
912
913 }
914 /*
915 * Host Name
916 */
917 if (fcs_port_attr.host_name[0] != '\0') {
918 attr = (struct fdmi_attr_s *) curr_ptr;
919 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
920 attr->len = (u16) strlen(fcs_port_attr.host_name);
921 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
922 /* variable fields need to be 4 byte aligned */
923 attr->len = fc_roundup(attr->len, sizeof(u32));
924 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
925 len += attr->len;
926 ++count;
927 attr->len =
928 bfa_os_htons(attr->len + sizeof(attr->type) +
929 sizeof(attr->len));
930
931 }
932
933 /*
934 * Update size of payload
935 */
936 port_attrib->attr_count = bfa_os_htonl(count);
937 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
938 return len;
939}
940
941static u16
942bfa_fcs_port_fdmi_build_rprt_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
943 u8 *pyld)
944{
945 struct bfa_fcs_port_s *port = fdmi->ms->port;
946 struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
947 u16 len;
948
949 rprt->hba_id = bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
950 rprt->port_name = bfa_fcs_port_get_pwwn(port);
951
952 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
953 (u8 *) &rprt->port_attr_blk);
954
955 len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
956
957 return len;
958}
959
960static void
961bfa_fcs_port_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
962 void *cbarg, bfa_status_t req_status,
963 u32 rsp_len, u32 resid_len,
964 struct fchs_s *rsp_fchs)
965{
966 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
967 struct bfa_fcs_port_s *port = fdmi->ms->port;
968 struct ct_hdr_s *cthdr = NULL;
969
970 bfa_trc(port->fcs, port->port_cfg.pwwn);
971
972 /*
973 * Sanity Checks
974 */
975 if (req_status != BFA_STATUS_OK) {
976 bfa_trc(port->fcs, req_status);
977 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
978 return;
979 }
980
981 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
982 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
983
984 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
985 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
986 return;
987 }
988
989 bfa_trc(port->fcs, cthdr->reason_code);
990 bfa_trc(port->fcs, cthdr->exp_code);
991 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
992}
993
994/**
995* RPA : Register Port Attributes.
996 */
997static void
998bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
999{
1000 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
1001 struct bfa_fcs_port_s *port = fdmi->ms->port;
1002 struct fchs_s fchs;
1003 u16 len, attr_len;
1004 struct bfa_fcxp_s *fcxp;
1005 u8 *pyld;
1006
1007 bfa_trc(port->fcs, port->port_cfg.pwwn);
1008
1009 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1010 if (!fcxp) {
1011 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1012 bfa_fcs_port_fdmi_send_rpa, fdmi);
1013 return;
1014 }
1015 fdmi->fcxp = fcxp;
1016
1017 pyld = bfa_fcxp_get_reqbuf(fcxp);
1018 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1019
1020 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
1021 FDMI_RPA);
1022
1023 attr_len = bfa_fcs_port_fdmi_build_rpa_pyld(fdmi,
1024 (u8 *) ((struct ct_hdr_s *) pyld + 1));
1025
1026 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1027 FC_CLASS_3, len + attr_len, &fchs,
1028 bfa_fcs_port_fdmi_rpa_response, (void *)fdmi,
1029 FC_MAX_PDUSZ, FC_RA_TOV);
1030
1031 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
1032}
1033
1034static u16
1035bfa_fcs_port_fdmi_build_rpa_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
1036 u8 *pyld)
1037{
1038 struct bfa_fcs_port_s *port = fdmi->ms->port;
1039 struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
1040 u16 len;
1041
1042 rpa->port_name = bfa_fcs_port_get_pwwn(port);
1043
1044 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
1045 (u8 *) &rpa->port_attr_blk);
1046
1047 len += sizeof(rpa->port_name);
1048
1049 return len;
1050}
1051
1052static void
1053bfa_fcs_port_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1054 void *cbarg, bfa_status_t req_status,
1055 u32 rsp_len, u32 resid_len,
1056 struct fchs_s *rsp_fchs)
1057{
1058 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
1059 struct bfa_fcs_port_s *port = fdmi->ms->port;
1060 struct ct_hdr_s *cthdr = NULL;
1061
1062 bfa_trc(port->fcs, port->port_cfg.pwwn);
1063
1064 /*
1065 * Sanity Checks
1066 */
1067 if (req_status != BFA_STATUS_OK) {
1068 bfa_trc(port->fcs, req_status);
1069 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1070 return;
1071 }
1072
1073 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1074 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1075
1076 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1077 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1078 return;
1079 }
1080
1081 bfa_trc(port->fcs, cthdr->reason_code);
1082 bfa_trc(port->fcs, cthdr->exp_code);
1083 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1084}
1085
1086static void
1087bfa_fcs_port_fdmi_timeout(void *arg)
1088{
1089 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)arg;
1090
1091 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
1092}
1093
1094void
1095bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
1097{
1098 struct bfa_fcs_port_s *port = fdmi->ms->port;
1099 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1100 struct bfa_adapter_attr_s adapter_attr;
1101
1102 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
1103 bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s));
1104
1105 bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr);
1106
1107 strncpy(hba_attr->manufacturer, adapter_attr.manufacturer,
1108 sizeof(adapter_attr.manufacturer));
1109
1110 strncpy(hba_attr->serial_num, adapter_attr.serial_num,
1111 sizeof(adapter_attr.serial_num));
1112
1113 strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model));
1114
1115 strncpy(hba_attr->model_desc, adapter_attr.model_descr,
1116 sizeof(hba_attr->model_desc));
1117
1118 strncpy(hba_attr->hw_version, adapter_attr.hw_ver,
1119 sizeof(hba_attr->hw_version));
1120
1121 strncpy(hba_attr->driver_version, (char *)driver_info->version,
1122 sizeof(hba_attr->driver_version));
1123
1124 strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver,
1125 sizeof(hba_attr->option_rom_ver));
1126
1127 strncpy(hba_attr->fw_version, adapter_attr.fw_ver,
1128 sizeof(hba_attr->fw_version));
1129
1130 strncpy(hba_attr->os_name, driver_info->host_os_name,
1131 sizeof(hba_attr->os_name));
1132
1133 /*
1134 * If there is a patch level, append it to the os name along with a
1135 * separator
1136 */
1137 if (driver_info->host_os_patch[0] != '\0') {
1138 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1139 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1140 strncat(hba_attr->os_name, driver_info->host_os_patch,
1141 sizeof(driver_info->host_os_patch));
1142 }
1143
1144 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
1145
1146}
1147
1148void
1149bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1150 struct bfa_fcs_fdmi_port_attr_s *port_attr)
1151{
1152 struct bfa_fcs_port_s *port = fdmi->ms->port;
1153 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1154 struct bfa_pport_attr_s pport_attr;
1155
1156 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
1157
1158 /*
1159 * get pport attributes from hal
1160 */
1161 bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
1162
1163 /*
1164 * get FC4 type Bitmask
1165 */
1166 fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
1167
1168 /*
1169 * Supported Speeds
1170 */
1171 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
1172
1173 /*
1174 * Current Speed
1175 */
1176 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
1177
1178 /*
1179 * Max PDU Size.
1180 */
1181 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
1182
1183 /*
1184 * OS device Name
1185 */
1186 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
1187 sizeof(port_attr->os_device_name));
1188
1189 /*
1190 * Host name
1191 */
1192 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
1193 sizeof(port_attr->host_name));
1194
1195}
1196
1197
1198void
1199bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
1200{
1201 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1202
1203 fdmi->ms = ms;
1204 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
1205}
1206
1207void
1208bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms)
1209{
1210 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1211
1212 fdmi->ms = ms;
1213 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
1214}
1215
1216void
1217bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms)
1218{
1219 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1220
1221 fdmi->ms = ms;
1222 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
1223}
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
new file mode 100644
index 000000000000..da8cac093d3d
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_AEN_H__
18#define __BFA_AEN_H__
19
20#include "defs/bfa_defs_aen.h"
21
22#define BFA_AEN_MAX_ENTRY 512
23
24extern s32 bfa_aen_max_cfg_entry;
25struct bfa_aen_s {
26 void *bfad;
27 s32 max_entry;
28 s32 write_index;
29 s32 read_index;
30 u32 bfad_num;
31 u32 seq_num;
32 void (*aen_cb_notify)(void *bfad);
33 void (*gettimeofday)(struct bfa_timeval_s *tv);
34 struct bfa_trc_mod_s *trcmod;
35 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
36};
37
38
39/**
40 * Public APIs
41 */
42static inline void
43bfa_aen_set_max_cfg_entry(int max_entry)
44{
45 bfa_aen_max_cfg_entry = max_entry;
46}
47
48static inline s32
49bfa_aen_get_max_cfg_entry(void)
50{
51 return bfa_aen_max_cfg_entry;
52}
53
54static inline s32
55bfa_aen_get_meminfo(void)
56{
57 return (sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry());
58}
59
60static inline s32
61bfa_aen_get_wi(struct bfa_aen_s *aen)
62{
63 return aen->write_index;
64}
65
66static inline s32
67bfa_aen_get_ri(struct bfa_aen_s *aen)
68{
69 return aen->read_index;
70}
71
72static inline s32
73bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index)
74{
75 return ((aen->write_index + aen->max_entry) - read_index)
76 % aen->max_entry;
77}
78
79s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
80 void *bfad, u32 inst_id, void (*aen_cb_notify)(void *),
81 void (*gettimeofday)(struct bfa_timeval_s *));
82
83s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
84 int aen_type, union bfa_aen_data_u *aen_data);
85
86s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry,
87 s32 entry_space, s32 rii, s32 *ri_arr,
88 s32 ri_arr_cnt);
89
90s32 bfa_aen_get_inst(struct bfa_aen_s *aen);
91
92#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h b/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
new file mode 100644
index 000000000000..260d3ea1cab3
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ADAPTER Module */
19#ifndef __bfa_aen_adapter_h__
20#define __bfa_aen_adapter_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ADAPTER_ADD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_ADD)
27#define BFA_AEN_ADAPTER_REMOVE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_REMOVE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h b/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
new file mode 100644
index 000000000000..12cd7aab5d53
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_AUDIT Module */
19#ifndef __bfa_aen_audit_h__
20#define __bfa_aen_audit_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_AUDIT_AUTH_ENABLE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_ENABLE)
27#define BFA_AEN_AUDIT_AUTH_DISABLE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_DISABLE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h b/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
new file mode 100644
index 000000000000..507d0b58d149
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ETHPORT Module */
19#ifndef __bfa_aen_ethport_h__
20#define __bfa_aen_ethport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ETHPORT_LINKUP \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKUP)
27#define BFA_AEN_ETHPORT_LINKDOWN \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKDOWN)
29#define BFA_AEN_ETHPORT_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_ENABLE)
31#define BFA_AEN_ETHPORT_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_DISABLE)
33
34#endif
35
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
new file mode 100644
index 000000000000..71378b446b69
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_IOC Module */
19#ifndef __bfa_aen_ioc_h__
20#define __bfa_aen_ioc_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_IOC_HBGOOD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBGOOD)
27#define BFA_AEN_IOC_HBFAIL \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBFAIL)
29#define BFA_AEN_IOC_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_ENABLE)
31#define BFA_AEN_IOC_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE)
33#define BFA_AEN_IOC_FWMISMATCH \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH)
35
36#endif
37
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h b/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
new file mode 100644
index 000000000000..a7d8ddcfef99
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ITNIM Module */
19#ifndef __bfa_aen_itnim_h__
20#define __bfa_aen_itnim_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ITNIM_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_ONLINE)
27#define BFA_AEN_ITNIM_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_OFFLINE)
29#define BFA_AEN_ITNIM_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_DISCONNECT)
31
32#endif
33
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h b/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
new file mode 100644
index 000000000000..5a8ebb65193f
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_LPORT Module */
19#ifndef __bfa_aen_lport_h__
20#define __bfa_aen_lport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_LPORT_NEW \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW)
27#define BFA_AEN_LPORT_DELETE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE)
29#define BFA_AEN_LPORT_ONLINE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_ONLINE)
31#define BFA_AEN_LPORT_OFFLINE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_OFFLINE)
33#define BFA_AEN_LPORT_DISCONNECT \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DISCONNECT)
35#define BFA_AEN_LPORT_NEW_PROP \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_PROP)
37#define BFA_AEN_LPORT_DELETE_PROP \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_PROP)
39#define BFA_AEN_LPORT_NEW_STANDARD \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_STANDARD)
41#define BFA_AEN_LPORT_DELETE_STANDARD \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_STANDARD)
43#define BFA_AEN_LPORT_NPIV_DUP_WWN \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_DUP_WWN)
45#define BFA_AEN_LPORT_NPIV_FABRIC_MAX \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_FABRIC_MAX)
47#define BFA_AEN_LPORT_NPIV_UNKNOWN \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_UNKNOWN)
49
50#endif
51
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_port.h b/drivers/scsi/bfa/include/aen/bfa_aen_port.h
new file mode 100644
index 000000000000..9add905a622d
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_port.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_PORT Module */
19#ifndef __bfa_aen_port_h__
20#define __bfa_aen_port_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_PORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ONLINE)
27#define BFA_AEN_PORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_OFFLINE)
29#define BFA_AEN_PORT_RLIR \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_RLIR)
31#define BFA_AEN_PORT_SFP_INSERT \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_INSERT)
33#define BFA_AEN_PORT_SFP_REMOVE \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_REMOVE)
35#define BFA_AEN_PORT_SFP_POM \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_POM)
37#define BFA_AEN_PORT_ENABLE \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ENABLE)
39#define BFA_AEN_PORT_DISABLE \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISABLE)
41#define BFA_AEN_PORT_AUTH_ON \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_ON)
43#define BFA_AEN_PORT_AUTH_OFF \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_OFF)
45#define BFA_AEN_PORT_DISCONNECT \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISCONNECT)
47#define BFA_AEN_PORT_QOS_NEG \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_QOS_NEG)
49#define BFA_AEN_PORT_FABRIC_NAME_CHANGE \
50 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_FABRIC_NAME_CHANGE)
51#define BFA_AEN_PORT_SFP_ACCESS_ERROR \
52 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_ACCESS_ERROR)
53#define BFA_AEN_PORT_SFP_UNSUPPORT \
54 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_UNSUPPORT)
55
56#endif
57
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h b/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
new file mode 100644
index 000000000000..7e4be1fd5e15
--- /dev/null
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_RPORT Module */
19#ifndef __bfa_aen_rport_h__
20#define __bfa_aen_rport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_RPORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_ONLINE)
27#define BFA_AEN_RPORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_OFFLINE)
29#define BFA_AEN_RPORT_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_DISCONNECT)
31#define BFA_AEN_RPORT_QOS_PRIO \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_PRIO)
33#define BFA_AEN_RPORT_QOS_FLOWID \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_FLOWID)
35
36#endif
37
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
new file mode 100644
index 000000000000..64c1412c5703
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_H__
18#define __BFA_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_debug.h>
22#include <cs/bfa_q.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25#include <cs/bfa_plog.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_ioc.h>
28#include <defs/bfa_defs_iocfc.h>
29#include <aen/bfa_aen.h>
30#include <bfi/bfi.h>
31
32struct bfa_s;
33#include <bfa_intr_priv.h>
34
35struct bfa_pcidev_s;
36
37/**
38 * PCI devices supported by the current BFA
39 */
40struct bfa_pciid_s {
41 u16 device_id;
42 u16 vendor_id;
43};
44
45extern char bfa_version[];
46
47/**
48 * BFA Power Mgmt Commands
49 */
50enum bfa_pm_cmd {
51 BFA_PM_CTL_D0 = 0,
52 BFA_PM_CTL_D1 = 1,
53 BFA_PM_CTL_D2 = 2,
54 BFA_PM_CTL_D3 = 3,
55};
56
57/**
58 * BFA memory resources
59 */
60enum bfa_mem_type {
61 BFA_MEM_TYPE_KVA = 1, /*! Kernel Virtual Memory *(non-dma-able) */
62 BFA_MEM_TYPE_DMA = 2, /*! DMA-able memory */
63 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
64};
65
66struct bfa_mem_elem_s {
67 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
68 u32 mem_len; /* Total Length in Bytes */
69 u8 *kva; /* kernel virtual address */
70 u64 dma; /* dma address if DMA memory */
71 u8 *kva_curp; /* kva allocation cursor */
72 u64 dma_curp; /* dma allocation cursor */
73};
74
75struct bfa_meminfo_s {
76 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
77};
78#define bfa_meminfo_kva(_m) \
79 (_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp
80#define bfa_meminfo_dma_virt(_m) \
81 (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp
82#define bfa_meminfo_dma_phys(_m) \
83 (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp
84
85/**
86 * Generic Scatter Gather Element used by driver
87 */
88struct bfa_sge_s {
89 u32 sg_len;
90 void *sg_addr;
91};
92
93#define bfa_sge_to_be(__sge) do { \
94 ((u32 *)(__sge))[0] = bfa_os_htonl(((u32 *)(__sge))[0]); \
95 ((u32 *)(__sge))[1] = bfa_os_htonl(((u32 *)(__sge))[1]); \
96 ((u32 *)(__sge))[2] = bfa_os_htonl(((u32 *)(__sge))[2]); \
97} while (0)
98
99
100/*
101 * bfa stats interfaces
102 */
103#define bfa_stats(_mod, _stats) (_mod)->stats._stats ++
104
105#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
107#define bfa_ioc_clear_stats(__bfa) \
108 bfa_ioc_clr_stats(&(__bfa)->ioc)
109
110/*
111 * bfa API functions
112 */
113void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
114void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
115void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
116void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
117 struct bfa_meminfo_s *meminfo);
118void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
119 struct bfa_meminfo_s *meminfo,
120 struct bfa_pcidev_s *pcidev);
121void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
122void bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod);
123void bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen);
124void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
125void bfa_detach(struct bfa_s *bfa);
126void bfa_init(struct bfa_s *bfa);
127void bfa_start(struct bfa_s *bfa);
128void bfa_stop(struct bfa_s *bfa);
129void bfa_attach_fcs(struct bfa_s *bfa);
130void bfa_cb_init(void *bfad, bfa_status_t status);
131void bfa_cb_stop(void *bfad, bfa_status_t status);
132void bfa_cb_updateq(void *bfad, bfa_status_t status);
133
134bfa_boolean_t bfa_intx(struct bfa_s *bfa);
135void bfa_isr_enable(struct bfa_s *bfa);
136void bfa_isr_disable(struct bfa_s *bfa);
137void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
138 u32 *num_vecs, u32 *max_vec_bit);
139#define bfa_msix(__bfa, __vec) (__bfa)->msix.handler[__vec](__bfa, __vec)
140
141void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
142void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
143void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
144
145typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
146void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
147bfa_status_t bfa_iocfc_get_stats(struct bfa_s *bfa,
148 struct bfa_iocfc_stats_s *stats,
149 bfa_cb_ioc_t cbfn, void *cbarg);
150bfa_status_t bfa_iocfc_clear_stats(struct bfa_s *bfa,
151 bfa_cb_ioc_t cbfn, void *cbarg);
152void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
153
154void bfa_adapter_get_attr(struct bfa_s *bfa,
155 struct bfa_adapter_attr_s *ad_attr);
156u64 bfa_adapter_get_id(struct bfa_s *bfa);
157
158bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
159 struct bfa_iocfc_intr_attr_s *attr);
160
161void bfa_iocfc_enable(struct bfa_s *bfa);
162void bfa_iocfc_disable(struct bfa_s *bfa);
163void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
164void bfa_cb_ioc_disable(void *bfad);
165void bfa_timer_tick(struct bfa_s *bfa);
166#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
167 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
168
169/*
170 * BFA debug API functions
171 */
172bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
173bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
174
175#include "bfa_priv.h"
176
177#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_fcpim.h b/drivers/scsi/bfa/include/bfa_fcpim.h
new file mode 100644
index 000000000000..04789795fa53
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfa_fcpim.h
@@ -0,0 +1,159 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_H__
19#define __BFA_FCPIM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcpim.h>
24#include <defs/bfa_defs_fcpim.h>
25
26/*
27 * forward declarations
28 */
29struct bfa_itnim_s;
30struct bfa_ioim_s;
31struct bfa_tskim_s;
32struct bfad_ioim_s;
33struct bfad_tskim_s;
34
35/*
36 * bfa fcpim module API functions
37 */
38void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
39u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
40void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
41u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
42bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
43 struct bfa_fcpim_stats_s *modstats);
44bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
45
46/*
47 * bfa itnim API functions
48 */
49struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
50 struct bfa_rport_s *rport, void *itnim);
51void bfa_itnim_delete(struct bfa_itnim_s *itnim);
52void bfa_itnim_online(struct bfa_itnim_s *itnim,
53 bfa_boolean_t seq_rec);
54void bfa_itnim_offline(struct bfa_itnim_s *itnim);
55void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
56 struct bfa_itnim_hal_stats_s *stats);
57void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
58
59
60/**
61 * BFA completion callback for bfa_itnim_online().
62 *
63 * @param[in] itnim FCS or driver itnim instance
64 *
65 * return None
66 */
67void bfa_cb_itnim_online(void *itnim);
68
69/**
70 * BFA completion callback for bfa_itnim_offline().
71 *
72 * @param[in] itnim FCS or driver itnim instance
73 *
74 * return None
75 */
76void bfa_cb_itnim_offline(void *itnim);
77void bfa_cb_itnim_tov_begin(void *itnim);
78void bfa_cb_itnim_tov(void *itnim);
79
80/**
81 * BFA notification to FCS/driver for second level error recovery.
82 *
83 * Atleast one I/O request has timedout and target is unresponsive to
84 * repeated abort requests. Second level error recovery should be initiated
85 * by starting implicit logout and recovery procedures.
86 *
87 * @param[in] itnim FCS or driver itnim instance
88 *
89 * return None
90 */
91void bfa_cb_itnim_sler(void *itnim);
92
93/*
94 * bfa ioim API functions
95 */
96struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
97 struct bfad_ioim_s *dio,
98 struct bfa_itnim_s *itnim,
99 u16 nsgles);
100
101void bfa_ioim_free(struct bfa_ioim_s *ioim);
102void bfa_ioim_start(struct bfa_ioim_s *ioim);
103void bfa_ioim_abort(struct bfa_ioim_s *ioim);
104void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
105 bfa_boolean_t iotov);
106
107
108/**
109 * I/O completion notification.
110 *
111 * @param[in] dio driver IO structure
112 * @param[in] io_status IO completion status
113 * @param[in] scsi_status SCSI status returned by target
114 * @param[in] sns_len SCSI sense length, 0 if none
115 * @param[in] sns_info SCSI sense data, if any
116 * @param[in] residue Residual length
117 *
118 * @return None
119 */
120void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
121 enum bfi_ioim_status io_status,
122 u8 scsi_status, int sns_len,
123 u8 *sns_info, s32 residue);
124
125/**
126 * I/O good completion notification.
127 *
128 * @param[in] dio driver IO structure
129 *
130 * @return None
131 */
132void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
133
134/**
135 * I/O abort completion notification
136 *
137 * @param[in] dio driver IO that was aborted
138 *
139 * @return None
140 */
141void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
142void bfa_cb_ioim_resfree(void *hcb_bfad);
143
144void bfa_cb_ioim_resfree(void *hcb_bfad);
145
146/*
147 * bfa tskim API functions
148 */
149struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
150 struct bfad_tskim_s *dtsk);
151void bfa_tskim_free(struct bfa_tskim_s *tskim);
152void bfa_tskim_start(struct bfa_tskim_s *tskim,
153 struct bfa_itnim_s *itnim, lun_t lun,
154 enum fcp_tm_cmnd tm, u8 t_secs);
155void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
156 enum bfi_tskim_status tsk_status);
157
158#endif /* __BFA_FCPIM_H__ */
159
diff --git a/drivers/scsi/bfa/include/bfa_fcptm.h b/drivers/scsi/bfa/include/bfa_fcptm.h
new file mode 100644
index 000000000000..5f5ffe0bb1bb
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfa_fcptm.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPTM_H__
19#define __BFA_FCPTM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcptm.h>
24
25/*
26 * forward declarations
27 */
28struct bfa_tin_s;
29struct bfa_iotm_s;
30struct bfa_tsktm_s;
31
32/*
33 * bfa fcptm module API functions
34 */
35void bfa_fcptm_path_tov_set(struct bfa_s *bfa, u16 path_tov);
36u16 bfa_fcptm_path_tov_get(struct bfa_s *bfa);
37void bfa_fcptm_qdepth_set(struct bfa_s *bfa, u16 q_depth);
38u16 bfa_fcptm_qdepth_get(struct bfa_s *bfa);
39
40/*
41 * bfa tin API functions
42 */
43void bfa_tin_get_stats(struct bfa_tin_s *tin, struct bfa_tin_stats_s *stats);
44void bfa_tin_clear_stats(struct bfa_tin_s *tin);
45
46#endif /* __BFA_FCPTM_H__ */
47
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
new file mode 100644
index 000000000000..0c80b74f72ef
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -0,0 +1,324 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_SVC_H__
18#define __BFA_SVC_H__
19
20/*
21 * forward declarations
22 */
23struct bfa_fcxp_s;
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_pport.h>
27#include <defs/bfa_defs_rport.h>
28#include <defs/bfa_defs_qos.h>
29#include <cs/bfa_sm.h>
30#include <bfa.h>
31
32/**
33 * BFA rport information.
34 */
35struct bfa_rport_info_s {
36 u16 max_frmsz; /* max rcv pdu size */
37 u32 pid : 24, /* remote port ID */
38 lp_tag : 8;
39 u32 local_pid : 24, /* local port ID */
40 cisc : 8; /* CIRO supported */
41 u8 fc_class; /* supported FC classes. enum fc_cos */
42 u8 vf_en; /* virtual fabric enable */
43 u16 vf_id; /* virtual fabric ID */
44 enum bfa_pport_speed speed; /* Rport's current speed */
45};
46
47/**
48 * BFA rport data structure
49 */
50struct bfa_rport_s {
51 struct list_head qe; /* queue element */
52 bfa_sm_t sm; /* state machine */
53 struct bfa_s *bfa; /* backpointer to BFA */
54 void *rport_drv; /* fcs/driver rport object */
55 u16 fw_handle; /* firmware rport handle */
56 u16 rport_tag; /* BFA rport tag */
57 struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */
58 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
59 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
60 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
61 struct bfa_rport_qos_attr_s qos_attr;
62 union a {
63 bfa_status_t status; /* f/w status */
64 void *fw_msg; /* QoS scn event */
65 } event_arg;
66};
67#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
68
69/**
70 * Send completion callback.
71 */
72typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
73 void *cbarg, enum bfa_status req_status,
74 u32 rsp_len, u32 resid_len,
75 struct fchs_s *rsp_fchs);
76
77/**
78 * BFA fcxp allocation (asynchronous)
79 */
80typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
81
82struct bfa_fcxp_wqe_s {
83 struct list_head qe;
84 bfa_fcxp_alloc_cbfn_t alloc_cbfn;
85 void *alloc_cbarg;
86};
87
88typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
89typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
90
91#define BFA_UF_BUFSZ (2 * 1024 + 256)
92
93/**
94 * @todo private
95 */
96struct bfa_uf_buf_s {
97 u8 d[BFA_UF_BUFSZ];
98};
99
100
101struct bfa_uf_s {
102 struct list_head qe; /* queue element */
103 struct bfa_s *bfa; /* bfa instance */
104 u16 uf_tag; /* identifying tag f/w messages */
105 u16 vf_id;
106 u16 src_rport_handle;
107 u16 rsvd;
108 u8 *data_ptr;
109 u16 data_len; /* actual receive length */
110 u16 pb_len; /* posted buffer length */
111 void *buf_kva; /* buffer virtual address */
112 u64 buf_pa; /* buffer physical address */
113 struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
114 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
115};
116
117typedef void (*bfa_cb_pport_t) (void *cbarg, enum bfa_status status);
118
119/**
120 * bfa lport login/logout service interface
121 */
122struct bfa_lps_s {
123 struct list_head qe; /* queue element */
124 struct bfa_s *bfa; /* parent bfa instance */
125 bfa_sm_t sm; /* finite state machine */
126 u8 lp_tag; /* lport tag */
127 u8 reqq; /* lport request queue */
128 u8 alpa; /* ALPA for loop topologies */
129 u32 lp_pid; /* lport port ID */
130 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/
131 bfa_boolean_t auth_en; /* enable authentication */
132 bfa_boolean_t auth_req; /* authentication required */
133 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
134 bfa_boolean_t fport; /* attached peer is F_PORT */
135 bfa_boolean_t brcd_switch;/* attached peer is brcd switch */
136 bfa_status_t status; /* login status */
137 u16 pdusz; /* max receive PDU size */
138 u16 pr_bbcred; /* BB_CREDIT from peer */
139 u8 lsrjt_rsn; /* LSRJT reason */
140 u8 lsrjt_expl; /* LSRJT explanation */
141 wwn_t pwwn; /* port wwn of lport */
142 wwn_t nwwn; /* node wwn of lport */
143 wwn_t pr_pwwn; /* port wwn of lport peer */
144 wwn_t pr_nwwn; /* node wwn of lport peer */
145 mac_t lp_mac; /* fpma/spma MAC for lport */
146 mac_t fcf_mac; /* FCF MAC of lport */
147 struct bfa_reqq_wait_s wqe; /* request wait queue element */
148 void *uarg; /* user callback arg */
149 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
150 struct bfi_lps_login_rsp_s *loginrsp;
151 bfa_eproto_status_t ext_status;
152};
153
154/*
155 * bfa pport API functions
156 */
157bfa_status_t bfa_pport_enable(struct bfa_s *bfa);
158bfa_status_t bfa_pport_disable(struct bfa_s *bfa);
159bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa,
160 enum bfa_pport_speed speed);
161enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa);
162bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa,
163 enum bfa_pport_topology topo);
164enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa);
165bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
166bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
167u8 bfa_pport_get_myalpa(struct bfa_s *bfa);
168bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa);
169bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
170u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa);
171u32 bfa_pport_mypid(struct bfa_s *bfa);
172u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa);
173bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
174bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa);
175bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
176void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
177wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
178bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa,
179 union bfa_pport_stats_u *stats,
180 bfa_cb_pport_t cbfn, void *cbarg);
181bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
182 void *cbarg);
183void bfa_pport_event_register(struct bfa_s *bfa,
184 void (*event_cbfn) (void *cbarg,
185 bfa_pport_event_t event), void *event_cbarg);
186bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa);
187void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
188void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
189bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa,
190 enum bfa_pport_speed speed);
191enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa);
192
193void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
194void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status);
195void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
196 bfa_boolean_t link_e2e_beacon);
197void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
198void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr);
199void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
200 struct bfa_qos_vc_attr_s *qos_vc_attr);
201bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa,
202 union bfa_pport_stats_u *stats,
203 bfa_cb_pport_t cbfn, void *cbarg);
204bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
205 void *cbarg);
206bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa);
207bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa);
208
209/*
210 * bfa rport API functions
211 */
212struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
213void bfa_rport_delete(struct bfa_rport_s *rport);
214void bfa_rport_online(struct bfa_rport_s *rport,
215 struct bfa_rport_info_s *rport_info);
216void bfa_rport_offline(struct bfa_rport_s *rport);
217void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed);
218void bfa_rport_get_stats(struct bfa_rport_s *rport,
219 struct bfa_rport_hal_stats_s *stats);
220void bfa_rport_clear_stats(struct bfa_rport_s *rport);
221void bfa_cb_rport_online(void *rport);
222void bfa_cb_rport_offline(void *rport);
223void bfa_cb_rport_qos_scn_flowid(void *rport,
224 struct bfa_rport_qos_attr_s old_qos_attr,
225 struct bfa_rport_qos_attr_s new_qos_attr);
226void bfa_cb_rport_qos_scn_prio(void *rport,
227 struct bfa_rport_qos_attr_s old_qos_attr,
228 struct bfa_rport_qos_attr_s new_qos_attr);
229void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
230 struct bfa_rport_qos_attr_s *qos_attr);
231
232/*
233 * bfa fcxp API functions
234 */
235struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
236 int nreq_sgles, int nrsp_sgles,
237 bfa_fcxp_get_sgaddr_t get_req_sga,
238 bfa_fcxp_get_sglen_t get_req_sglen,
239 bfa_fcxp_get_sgaddr_t get_rsp_sga,
240 bfa_fcxp_get_sglen_t get_rsp_sglen);
241void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
242 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *cbarg);
243void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
244 struct bfa_fcxp_wqe_s *wqe);
245void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
246
247void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
248void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
249
250void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
251
252void bfa_fcxp_send(struct bfa_fcxp_s *fcxp,
253 struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag,
254 bfa_boolean_t cts, enum fc_cos cos,
255 u32 reqlen, struct fchs_s *fchs,
256 bfa_cb_fcxp_send_t cbfn,
257 void *cbarg,
258 u32 rsp_maxlen, u8 rsp_timeout);
259bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
260u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
261u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
262
263static inline void *
264bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
265{
266 return uf->data_ptr;
267}
268
269static inline u16
270bfa_uf_get_frmlen(struct bfa_uf_s *uf)
271{
272 return uf->data_len;
273}
274
275/**
276 * Callback prototype for unsolicited frame receive handler.
277 *
278 * @param[in] cbarg callback arg for receive handler
279 * @param[in] uf unsolicited frame descriptor
280 *
281 * @return None
282 */
283typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
284
285/*
286 * bfa uf API functions
287 */
288void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
289 void *cbarg);
290void bfa_uf_free(struct bfa_uf_s *uf);
291
292/**
293 * bfa lport service api
294 */
295
296struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
297void bfa_lps_delete(struct bfa_lps_s *lps);
298void bfa_lps_discard(struct bfa_lps_s *lps);
299void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
300 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en);
301void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
302 wwn_t nwwn);
303void bfa_lps_flogo(struct bfa_lps_s *lps);
304void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
305u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
306bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
307bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
308bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
309bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
310bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
311u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
312u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
313u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
314wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
315wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
316u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
317u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
318void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
319void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
320void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
321void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
322
323#endif /* __BFA_SVC_H__ */
324
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
new file mode 100644
index 000000000000..e407103fa565
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfa_timer.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TIMER_H__
18#define __BFA_TIMER_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_q.h>
22
23struct bfa_s;
24
25typedef void (*bfa_timer_cbfn_t)(void *);
26
27/**
28 * BFA timer data structure
29 */
30struct bfa_timer_s {
31 struct list_head qe;
32 bfa_timer_cbfn_t timercb;
33 void *arg;
34 int timeout; /**< in millisecs. */
35};
36
37/**
38 * Timer module structure
39 */
40struct bfa_timer_mod_s {
41 struct list_head timer_q;
42};
43
44#define BFA_TIMER_FREQ 500 /**< specified in millisecs */
45
46void bfa_timer_beat(struct bfa_timer_mod_s *mod);
47void bfa_timer_init(struct bfa_timer_mod_s *mod);
48void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
49 bfa_timer_cbfn_t timercb, void *arg,
50 unsigned int timeout);
51void bfa_timer_stop(struct bfa_timer_s *timer);
52
53#endif /* __BFA_TIMER_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
new file mode 100644
index 000000000000..6cadfe0d4ba1
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_H__
19#define __BFI_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_status.h>
23
24#pragma pack(1)
25
26/**
27 * Msg header common to all msgs
28 */
29struct bfi_mhdr_s {
30 u8 msg_class; /* @ref bfi_mclass_t */
31 u8 msg_id; /* msg opcode with in the class */
32 union {
33 struct {
34 u8 rsvd;
35 u8 lpu_id; /* msg destination */
36 } h2i;
37 u16 i2htok; /* token in msgs to host */
38 } mtag;
39};
40
41#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
42 (_mh).msg_class = (_mc); \
43 (_mh).msg_id = (_op); \
44 (_mh).mtag.h2i.lpu_id = (_lpuid); \
45} while (0)
46
47#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
48 (_mh).msg_class = (_mc); \
49 (_mh).msg_id = (_op); \
50 (_mh).mtag.i2htok = (_i2htok); \
51} while (0)
52
53/*
54 * Message opcodes: 0-127 to firmware, 128-255 to host
55 */
56#define BFI_I2H_OPCODE_BASE 128
57#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
58
59/**
60 ****************************************************************************
61 *
62 * Scatter Gather Element and Page definition
63 *
64 ****************************************************************************
65 */
66
67#define BFI_SGE_INLINE 1
68#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
69
70/**
71 * SG Flags
72 */
73enum {
74 BFI_SGE_DATA = 0, /* data address, not last */
75 BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
76 BFI_SGE_DATA_LAST = 3, /* data address, last */
77 BFI_SGE_LINK = 2, /* link address */
78 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
79};
80
81/**
82 * DMA addresses
83 */
84union bfi_addr_u {
85 struct {
86 u32 addr_lo;
87 u32 addr_hi;
88 } a32;
89};
90
91/**
92 * Scatter Gather Element
93 */
94struct bfi_sge_s {
95#ifdef __BIGENDIAN
96 u32 flags : 2,
97 rsvd : 2,
98 sg_len : 28;
99#else
100 u32 sg_len : 28,
101 rsvd : 2,
102 flags : 2;
103#endif
104 union bfi_addr_u sga;
105};
106
107/**
108 * Scatter Gather Page
109 */
110#define BFI_SGPG_DATA_SGES 7
111#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
112#define BFI_SGPG_RSVD_WD_LEN 8
113struct bfi_sgpg_s {
114 struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
115 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
116};
117
118/*
119 * Large Message structure - 128 Bytes size Msgs
120 */
121#define BFI_LMSG_SZ 128
122#define BFI_LMSG_PL_WSZ \
123 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
124
125struct bfi_msg_s {
126 struct bfi_mhdr_s mhdr;
127 u32 pl[BFI_LMSG_PL_WSZ];
128};
129
130/**
131 * Mailbox message structure
132 */
133#define BFI_MBMSG_SZ 7
134struct bfi_mbmsg_s {
135 struct bfi_mhdr_s mh;
136 u32 pl[BFI_MBMSG_SZ];
137};
138
139/**
140 * Message Classes
141 */
142enum bfi_mclass {
143 BFI_MC_IOC = 1, /* IO Controller (IOC) */
144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
145 BFI_MC_FLASH = 3, /* Flash message class */
146 BFI_MC_CEE = 4,
147 BFI_MC_FC_PORT = 5, /* FC port */
148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
149 BFI_MC_LL = 7, /* Link Layer */
150 BFI_MC_UF = 8, /* Unsolicited frame receive */
151 BFI_MC_FCXP = 9, /* FC Transport */
152 BFI_MC_LPS = 10, /* lport fc login services */
153 BFI_MC_RPORT = 11, /* Remote port */
154 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
155 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
156 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
157 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
158 BFI_MC_IOIM = 16, /* IO (Initiator mode) */
159 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
160 BFI_MC_TSKIM = 18, /* Initiator Task management */
161 BFI_MC_SBOOT = 19, /* SAN boot services */
162 BFI_MC_IPFC = 20, /* IP over FC Msgs */
163 BFI_MC_PORT = 21, /* Physical port */
164 BFI_MC_MAX = 32
165};
166
167#define BFI_IOC_MAX_CQS 4
168#define BFI_IOC_MAX_CQS_ASIC 8
169#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
170
171#pragma pack()
172
173#endif /* __BFI_H__ */
174
diff --git a/drivers/scsi/bfa/include/bfi/bfi_boot.h b/drivers/scsi/bfa/include/bfi/bfi_boot.h
new file mode 100644
index 000000000000..5955afe7d108
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_boot.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * bfi_boot.h
19 */
20
21#ifndef __BFI_BOOT_H__
22#define __BFI_BOOT_H__
23
24#define BFI_BOOT_TYPE_OFF 8
25#define BFI_BOOT_PARAM_OFF 12
26
27#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
28#define BFI_BOOT_TYPE_FLASH 1
29#define BFI_BOOT_TYPE_MEMTEST 2
30
31#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
32#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
33
34#endif
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
new file mode 100644
index 000000000000..b3bb52b565b1
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
@@ -0,0 +1,305 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * bfi_cbreg.h crossbow host block register definitions
20 *
21 * !!! Do not edit. Auto generated. !!!
22 */
23
24#ifndef __BFI_CBREG_H__
25#define __BFI_CBREG_H__
26
27
28#define HOSTFN0_INT_STATUS 0x00014000
29#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
30#define __HOSTFN0_INT_STATUS_LVL_SH 20
31#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
32#define __HOSTFN0_INT_STATUS_P 0x000fffff
33#define HOSTFN0_INT_MSK 0x00014004
34#define HOST_PAGE_NUM_FN0 0x00014008
35#define __HOST_PAGE_NUM_FN 0x000001ff
36#define HOSTFN1_INT_STATUS 0x00014100
37#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000
38#define __HOSTFN1_INT_STAT_LVL_SH 20
39#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH)
40#define __HOSTFN1_INT_STAT_P 0x000fffff
41#define HOSTFN1_INT_MSK 0x00014104
42#define HOST_PAGE_NUM_FN1 0x00014108
43#define APP_PLL_400_CTL_REG 0x00014204
44#define __P_400_PLL_LOCK 0x80000000
45#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000
46#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000
47#define __APP_PLL_400_RESET_TIMER_SH 17
48#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH)
49#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000
50#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000
51#define __APP_PLL_400_CNTLMT0_1_SH 14
52#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH)
53#define __APP_PLL_400_JITLMT0_1_MK 0x00003000
54#define __APP_PLL_400_JITLMT0_1_SH 12
55#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH)
56#define __APP_PLL_400_HREF 0x00000800
57#define __APP_PLL_400_HDIV 0x00000400
58#define __APP_PLL_400_P0_1_MK 0x00000300
59#define __APP_PLL_400_P0_1_SH 8
60#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH)
61#define __APP_PLL_400_Z0_2_MK 0x000000e0
62#define __APP_PLL_400_Z0_2_SH 5
63#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH)
64#define __APP_PLL_400_RSEL200500 0x00000010
65#define __APP_PLL_400_ENARST 0x00000008
66#define __APP_PLL_400_BYPASS 0x00000004
67#define __APP_PLL_400_LRESETN 0x00000002
68#define __APP_PLL_400_ENABLE 0x00000001
69#define APP_PLL_212_CTL_REG 0x00014208
70#define __P_212_PLL_LOCK 0x80000000
71#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000
72#define __APP_PLL_212_RESET_TIMER_SH 17
73#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH)
74#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000
75#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000
76#define __APP_PLL_212_CNTLMT0_1_SH 14
77#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH)
78#define __APP_PLL_212_JITLMT0_1_MK 0x00003000
79#define __APP_PLL_212_JITLMT0_1_SH 12
80#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH)
81#define __APP_PLL_212_HREF 0x00000800
82#define __APP_PLL_212_HDIV 0x00000400
83#define __APP_PLL_212_P0_1_MK 0x00000300
84#define __APP_PLL_212_P0_1_SH 8
85#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH)
86#define __APP_PLL_212_Z0_2_MK 0x000000e0
87#define __APP_PLL_212_Z0_2_SH 5
88#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH)
89#define __APP_PLL_212_RSEL200500 0x00000010
90#define __APP_PLL_212_ENARST 0x00000008
91#define __APP_PLL_212_BYPASS 0x00000004
92#define __APP_PLL_212_LRESETN 0x00000002
93#define __APP_PLL_212_ENABLE 0x00000001
94#define HOST_SEM0_REG 0x00014230
95#define __HOST_SEMAPHORE 0x00000001
96#define HOST_SEM1_REG 0x00014234
97#define HOST_SEM2_REG 0x00014238
98#define HOST_SEM3_REG 0x0001423c
99#define HOST_SEM0_INFO_REG 0x00014240
100#define HOST_SEM1_INFO_REG 0x00014244
101#define HOST_SEM2_INFO_REG 0x00014248
102#define HOST_SEM3_INFO_REG 0x0001424c
103#define HOSTFN0_LPU0_CMD_STAT 0x00019000
104#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe
105#define __HOSTFN0_LPU0_MBOX_INFO_SH 1
106#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH)
107#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001
108#define LPU0_HOSTFN0_CMD_STAT 0x00019008
109#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe
110#define __LPU0_HOSTFN0_MBOX_INFO_SH 1
111#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH)
112#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001
113#define HOSTFN1_LPU1_CMD_STAT 0x00019014
114#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe
115#define __HOSTFN1_LPU1_MBOX_INFO_SH 1
116#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH)
117#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001
118#define LPU1_HOSTFN1_CMD_STAT 0x0001901c
119#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe
120#define __LPU1_HOSTFN1_MBOX_INFO_SH 1
121#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH)
122#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001
123#define CPE_Q0_DEPTH 0x00010014
124#define CPE_Q0_PI 0x0001001c
125#define CPE_Q0_CI 0x00010020
126#define CPE_Q1_DEPTH 0x00010034
127#define CPE_Q1_PI 0x0001003c
128#define CPE_Q1_CI 0x00010040
129#define CPE_Q2_DEPTH 0x00010054
130#define CPE_Q2_PI 0x0001005c
131#define CPE_Q2_CI 0x00010060
132#define CPE_Q3_DEPTH 0x00010074
133#define CPE_Q3_PI 0x0001007c
134#define CPE_Q3_CI 0x00010080
135#define CPE_Q4_DEPTH 0x00010094
136#define CPE_Q4_PI 0x0001009c
137#define CPE_Q4_CI 0x000100a0
138#define CPE_Q5_DEPTH 0x000100b4
139#define CPE_Q5_PI 0x000100bc
140#define CPE_Q5_CI 0x000100c0
141#define CPE_Q6_DEPTH 0x000100d4
142#define CPE_Q6_PI 0x000100dc
143#define CPE_Q6_CI 0x000100e0
144#define CPE_Q7_DEPTH 0x000100f4
145#define CPE_Q7_PI 0x000100fc
146#define CPE_Q7_CI 0x00010100
147#define RME_Q0_DEPTH 0x00011014
148#define RME_Q0_PI 0x0001101c
149#define RME_Q0_CI 0x00011020
150#define RME_Q1_DEPTH 0x00011034
151#define RME_Q1_PI 0x0001103c
152#define RME_Q1_CI 0x00011040
153#define RME_Q2_DEPTH 0x00011054
154#define RME_Q2_PI 0x0001105c
155#define RME_Q2_CI 0x00011060
156#define RME_Q3_DEPTH 0x00011074
157#define RME_Q3_PI 0x0001107c
158#define RME_Q3_CI 0x00011080
159#define RME_Q4_DEPTH 0x00011094
160#define RME_Q4_PI 0x0001109c
161#define RME_Q4_CI 0x000110a0
162#define RME_Q5_DEPTH 0x000110b4
163#define RME_Q5_PI 0x000110bc
164#define RME_Q5_CI 0x000110c0
165#define RME_Q6_DEPTH 0x000110d4
166#define RME_Q6_PI 0x000110dc
167#define RME_Q6_CI 0x000110e0
168#define RME_Q7_DEPTH 0x000110f4
169#define RME_Q7_PI 0x000110fc
170#define RME_Q7_CI 0x00011100
171#define PSS_CTL_REG 0x00018800
172#define __PSS_I2C_CLK_DIV_MK 0x00030000
173#define __PSS_I2C_CLK_DIV_SH 16
174#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
175#define __PSS_LMEM_INIT_DONE 0x00001000
176#define __PSS_LMEM_RESET 0x00000200
177#define __PSS_LMEM_INIT_EN 0x00000100
178#define __PSS_LPU1_RESET 0x00000002
179#define __PSS_LPU0_RESET 0x00000001
180
181
182/*
183 * These definitions are either in error/missing in spec. Its auto-generated
184 * from hard coded values in regparse.pl.
185 */
186#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
187#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
188#define __EMPHPRE_AT_4G_FIX 0x00000003
189#define __SFP_TXRATE_EN_FIX 0x00000100
190#define __SFP_RXRATE_EN_FIX 0x00000080
191
192
193/*
194 * These register definitions are auto-generated from hard coded values
195 * in regparse.pl.
196 */
197#define HOSTFN0_LPU_MBOX0_0 0x00019200
198#define HOSTFN1_LPU_MBOX0_8 0x00019260
199#define LPU_HOSTFN0_MBOX0_0 0x00019280
200#define LPU_HOSTFN1_MBOX0_8 0x000192e0
201
202
203/*
204 * These register mapping definitions are auto-generated from mapping tables
205 * in regparse.pl.
206 */
207#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
208#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
209#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
210#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
211#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
212
213#define CPE_Q_DEPTH(__n) \
214 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
215#define CPE_Q_PI(__n) \
216 (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI))
217#define CPE_Q_CI(__n) \
218 (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI))
219#define RME_Q_DEPTH(__n) \
220 (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH))
221#define RME_Q_PI(__n) \
222 (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI))
223#define RME_Q_CI(__n) \
224 (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI))
225
226#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
227#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
228#define CPE_Q_MASK(__q) ((__q) & 0x3)
229#define RME_Q_MASK(__q) ((__q) & 0x3)
230
231
232/*
233 * PCI MSI-X vector defines
234 */
235enum {
236 BFA_MSIX_CPE_Q0 = 0,
237 BFA_MSIX_CPE_Q1 = 1,
238 BFA_MSIX_CPE_Q2 = 2,
239 BFA_MSIX_CPE_Q3 = 3,
240 BFA_MSIX_CPE_Q4 = 4,
241 BFA_MSIX_CPE_Q5 = 5,
242 BFA_MSIX_CPE_Q6 = 6,
243 BFA_MSIX_CPE_Q7 = 7,
244 BFA_MSIX_RME_Q0 = 8,
245 BFA_MSIX_RME_Q1 = 9,
246 BFA_MSIX_RME_Q2 = 10,
247 BFA_MSIX_RME_Q3 = 11,
248 BFA_MSIX_RME_Q4 = 12,
249 BFA_MSIX_RME_Q5 = 13,
250 BFA_MSIX_RME_Q6 = 14,
251 BFA_MSIX_RME_Q7 = 15,
252 BFA_MSIX_ERR_EMC = 16,
253 BFA_MSIX_ERR_LPU0 = 17,
254 BFA_MSIX_ERR_LPU1 = 18,
255 BFA_MSIX_ERR_PSS = 19,
256 BFA_MSIX_MBOX_LPU0 = 20,
257 BFA_MSIX_MBOX_LPU1 = 21,
258 BFA_MSIX_CB_MAX = 22,
259};
260
261/*
262 * And corresponding host interrupt status bit field defines
263 */
264#define __HFN_INT_CPE_Q0 0x00000001U
265#define __HFN_INT_CPE_Q1 0x00000002U
266#define __HFN_INT_CPE_Q2 0x00000004U
267#define __HFN_INT_CPE_Q3 0x00000008U
268#define __HFN_INT_CPE_Q4 0x00000010U
269#define __HFN_INT_CPE_Q5 0x00000020U
270#define __HFN_INT_CPE_Q6 0x00000040U
271#define __HFN_INT_CPE_Q7 0x00000080U
272#define __HFN_INT_RME_Q0 0x00000100U
273#define __HFN_INT_RME_Q1 0x00000200U
274#define __HFN_INT_RME_Q2 0x00000400U
275#define __HFN_INT_RME_Q3 0x00000800U
276#define __HFN_INT_RME_Q4 0x00001000U
277#define __HFN_INT_RME_Q5 0x00002000U
278#define __HFN_INT_RME_Q6 0x00004000U
279#define __HFN_INT_RME_Q7 0x00008000U
280#define __HFN_INT_ERR_EMC 0x00010000U
281#define __HFN_INT_ERR_LPU0 0x00020000U
282#define __HFN_INT_ERR_LPU1 0x00040000U
283#define __HFN_INT_ERR_PSS 0x00080000U
284#define __HFN_INT_MBOX_LPU0 0x00100000U
285#define __HFN_INT_MBOX_LPU1 0x00200000U
286#define __HFN_INT_MBOX1_LPU0 0x00400000U
287#define __HFN_INT_MBOX1_LPU1 0x00800000U
288#define __HFN_INT_CPE_MASK 0x000000ffU
289#define __HFN_INT_RME_MASK 0x0000ff00U
290
291
292/*
293 * crossbow memory map.
294 */
295#define PSS_SMEM_PAGE_START 0x8000
296#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
297#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
298
299/*
300 * End of crossbow memory map
301 */
302
303
304#endif /* __BFI_CBREG_H__ */
305
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cee.h b/drivers/scsi/bfa/include/bfi/bfi_cee.h
new file mode 100644
index 000000000000..0970596583ea
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_cee.h
@@ -0,0 +1,119 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/**
18 * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
19 * All rights reserved.
20 *
21 * bfi_dcbx.h BFI Interface (Mailbox commands and related structures)
22 * between host driver and DCBX/LLDP firmware module.
23 *
24**/
25
26#ifndef __BFI_CEE_H__
27#define __BFI_CEE_H__
28
29#include <bfi/bfi.h>
30
31#pragma pack(1)
32
33
34enum bfi_cee_h2i_msgs_e {
35 BFI_CEE_H2I_GET_CFG_REQ = 1,
36 BFI_CEE_H2I_RESET_STATS = 2,
37 BFI_CEE_H2I_GET_STATS_REQ = 3,
38};
39
40
41enum bfi_cee_i2h_msgs_e {
42 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
43 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
44 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
45};
46
47
48/* Data structures */
49
50/*
51 * BFI_CEE_H2I_RESET_STATS
52 */
53struct bfi_lldp_reset_stats_s {
54 struct bfi_mhdr_s mh;
55};
56
57/*
58 * BFI_CEE_H2I_RESET_STATS
59 */
60struct bfi_cee_reset_stats_s {
61 struct bfi_mhdr_s mh;
62};
63
64/*
65 * BFI_CEE_H2I_GET_CFG_REQ
66 */
67struct bfi_cee_get_req_s {
68 struct bfi_mhdr_s mh;
69 union bfi_addr_u dma_addr;
70};
71
72
73/*
74 * BFI_CEE_I2H_GET_CFG_RSP
75 */
76struct bfi_cee_get_rsp_s {
77 struct bfi_mhdr_s mh;
78 u8 cmd_status;
79 u8 rsvd[3];
80};
81
82/*
83 * BFI_CEE_H2I_GET_STATS_REQ
84 */
85struct bfi_cee_stats_req_s {
86 struct bfi_mhdr_s mh;
87 union bfi_addr_u dma_addr;
88};
89
90
91/*
92 * BFI_CEE_I2H_GET_STATS_RSP
93 */
94struct bfi_cee_stats_rsp_s {
95 struct bfi_mhdr_s mh;
96 u8 cmd_status;
97 u8 rsvd[3];
98};
99
100
101
102union bfi_cee_h2i_msg_u {
103 struct bfi_mhdr_s mh;
104 struct bfi_cee_get_req_s get_req;
105 struct bfi_cee_stats_req_s stats_req;
106};
107
108
109union bfi_cee_i2h_msg_u {
110 struct bfi_mhdr_s mh;
111 struct bfi_cee_get_rsp_s get_rsp;
112 struct bfi_cee_stats_rsp_s stats_rsp;
113};
114
115#pragma pack()
116
117
118#endif /* __BFI_CEE_H__ */
119
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
new file mode 100644
index 000000000000..d3caa58c0a0a
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -0,0 +1,611 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * bfi_ctreg.h catapult host block register definitions
20 *
21 * !!! Do not edit. Auto generated. !!!
22 */
23
24#ifndef __BFI_CTREG_H__
25#define __BFI_CTREG_H__
26
27
28#define HOSTFN0_LPU_MBOX0_0 0x00019200
29#define HOSTFN1_LPU_MBOX0_8 0x00019260
30#define LPU_HOSTFN0_MBOX0_0 0x00019280
31#define LPU_HOSTFN1_MBOX0_8 0x000192e0
32#define HOSTFN2_LPU_MBOX0_0 0x00019400
33#define HOSTFN3_LPU_MBOX0_8 0x00019460
34#define LPU_HOSTFN2_MBOX0_0 0x00019480
35#define LPU_HOSTFN3_MBOX0_8 0x000194e0
36#define HOSTFN0_INT_STATUS 0x00014000
37#define __HOSTFN0_HALT_OCCURRED 0x01000000
38#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
39#define __HOSTFN0_INT_STATUS_LVL_SH 20
40#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
41#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
42#define __HOSTFN0_INT_STATUS_P_SH 16
43#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
44#define __HOSTFN0_INT_STATUS_F 0x0000ffff
45#define HOSTFN0_INT_MSK 0x00014004
46#define HOST_PAGE_NUM_FN0 0x00014008
47#define __HOST_PAGE_NUM_FN 0x000001ff
48#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
49#define __MSIX_ERR_INDEX_FN 0x000001ff
50#define HOSTFN1_INT_STATUS 0x00014100
51#define __HOSTFN1_HALT_OCCURRED 0x01000000
52#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
53#define __HOSTFN1_INT_STATUS_LVL_SH 20
54#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
55#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
56#define __HOSTFN1_INT_STATUS_P_SH 16
57#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
58#define __HOSTFN1_INT_STATUS_F 0x0000ffff
59#define HOSTFN1_INT_MSK 0x00014104
60#define HOST_PAGE_NUM_FN1 0x00014108
61#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
62#define APP_PLL_425_CTL_REG 0x00014204
63#define __P_425_PLL_LOCK 0x80000000
64#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
65#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
66#define __APP_PLL_425_RESET_TIMER_SH 17
67#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
68#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
69#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
70#define __APP_PLL_425_CNTLMT0_1_SH 14
71#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
72#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
73#define __APP_PLL_425_JITLMT0_1_SH 12
74#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
75#define __APP_PLL_425_HREF 0x00000800
76#define __APP_PLL_425_HDIV 0x00000400
77#define __APP_PLL_425_P0_1_MK 0x00000300
78#define __APP_PLL_425_P0_1_SH 8
79#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
80#define __APP_PLL_425_Z0_2_MK 0x000000e0
81#define __APP_PLL_425_Z0_2_SH 5
82#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
83#define __APP_PLL_425_RSEL200500 0x00000010
84#define __APP_PLL_425_ENARST 0x00000008
85#define __APP_PLL_425_BYPASS 0x00000004
86#define __APP_PLL_425_LRESETN 0x00000002
87#define __APP_PLL_425_ENABLE 0x00000001
88#define APP_PLL_312_CTL_REG 0x00014208
89#define __P_312_PLL_LOCK 0x80000000
90#define __ENABLE_MAC_AHB_1 0x00800000
91#define __ENABLE_MAC_AHB_0 0x00400000
92#define __ENABLE_MAC_1 0x00200000
93#define __ENABLE_MAC_0 0x00100000
94#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
95#define __APP_PLL_312_RESET_TIMER_SH 17
96#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
97#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
98#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
99#define __APP_PLL_312_CNTLMT0_1_SH 14
100#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
101#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
102#define __APP_PLL_312_JITLMT0_1_SH 12
103#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
104#define __APP_PLL_312_HREF 0x00000800
105#define __APP_PLL_312_HDIV 0x00000400
106#define __APP_PLL_312_P0_1_MK 0x00000300
107#define __APP_PLL_312_P0_1_SH 8
108#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
109#define __APP_PLL_312_Z0_2_MK 0x000000e0
110#define __APP_PLL_312_Z0_2_SH 5
111#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
112#define __APP_PLL_312_RSEL200500 0x00000010
113#define __APP_PLL_312_ENARST 0x00000008
114#define __APP_PLL_312_BYPASS 0x00000004
115#define __APP_PLL_312_LRESETN 0x00000002
116#define __APP_PLL_312_ENABLE 0x00000001
117#define MBIST_CTL_REG 0x00014220
118#define __EDRAM_BISTR_START 0x00000004
119#define __MBIST_RESET 0x00000002
120#define __MBIST_START 0x00000001
121#define MBIST_STAT_REG 0x00014224
122#define __EDRAM_BISTR_STATUS 0x00000008
123#define __EDRAM_BISTR_DONE 0x00000004
124#define __MEM_BIT_STATUS 0x00000002
125#define __MBIST_DONE 0x00000001
126#define HOST_SEM0_REG 0x00014230
127#define __HOST_SEMAPHORE 0x00000001
128#define HOST_SEM1_REG 0x00014234
129#define HOST_SEM2_REG 0x00014238
130#define HOST_SEM3_REG 0x0001423c
131#define HOST_SEM0_INFO_REG 0x00014240
132#define HOST_SEM1_INFO_REG 0x00014244
133#define HOST_SEM2_INFO_REG 0x00014248
134#define HOST_SEM3_INFO_REG 0x0001424c
135#define ETH_MAC_SER_REG 0x00014288
136#define __APP_EMS_CKBUFAMPIN 0x00000020
137#define __APP_EMS_REFCLKSEL 0x00000010
138#define __APP_EMS_CMLCKSEL 0x00000008
139#define __APP_EMS_REFCKBUFEN2 0x00000004
140#define __APP_EMS_REFCKBUFEN1 0x00000002
141#define __APP_EMS_CHANNEL_SEL 0x00000001
142#define HOSTFN2_INT_STATUS 0x00014300
143#define __HOSTFN2_HALT_OCCURRED 0x01000000
144#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
145#define __HOSTFN2_INT_STATUS_LVL_SH 20
146#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
147#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
148#define __HOSTFN2_INT_STATUS_P_SH 16
149#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
150#define __HOSTFN2_INT_STATUS_F 0x0000ffff
151#define HOSTFN2_INT_MSK 0x00014304
152#define HOST_PAGE_NUM_FN2 0x00014308
153#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
154#define HOSTFN3_INT_STATUS 0x00014400
155#define __HALT_OCCURRED 0x01000000
156#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
157#define __HOSTFN3_INT_STATUS_LVL_SH 20
158#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
159#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
160#define __HOSTFN3_INT_STATUS_P_SH 16
161#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
162#define __HOSTFN3_INT_STATUS_F 0x0000ffff
163#define HOSTFN3_INT_MSK 0x00014404
164#define HOST_PAGE_NUM_FN3 0x00014408
165#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
166#define FNC_ID_REG 0x00014600
167#define __FUNCTION_NUMBER 0x00000007
168#define FNC_PERS_REG 0x00014604
169#define __F3_FUNCTION_ACTIVE 0x80000000
170#define __F3_FUNCTION_MODE 0x40000000
171#define __F3_PORT_MAP_MK 0x30000000
172#define __F3_PORT_MAP_SH 28
173#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
174#define __F3_VM_MODE 0x08000000
175#define __F3_INTX_STATUS_MK 0x07000000
176#define __F3_INTX_STATUS_SH 24
177#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
178#define __F2_FUNCTION_ACTIVE 0x00800000
179#define __F2_FUNCTION_MODE 0x00400000
180#define __F2_PORT_MAP_MK 0x00300000
181#define __F2_PORT_MAP_SH 20
182#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
183#define __F2_VM_MODE 0x00080000
184#define __F2_INTX_STATUS_MK 0x00070000
185#define __F2_INTX_STATUS_SH 16
186#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
187#define __F1_FUNCTION_ACTIVE 0x00008000
188#define __F1_FUNCTION_MODE 0x00004000
189#define __F1_PORT_MAP_MK 0x00003000
190#define __F1_PORT_MAP_SH 12
191#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
192#define __F1_VM_MODE 0x00000800
193#define __F1_INTX_STATUS_MK 0x00000700
194#define __F1_INTX_STATUS_SH 8
195#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
196#define __F0_FUNCTION_ACTIVE 0x00000080
197#define __F0_FUNCTION_MODE 0x00000040
198#define __F0_PORT_MAP_MK 0x00000030
199#define __F0_PORT_MAP_SH 4
200#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
201#define __F0_VM_MODE 0x00000008
202#define __F0_INTX_STATUS 0x00000007
203enum {
204 __F0_INTX_STATUS_MSIX = 0x0,
205 __F0_INTX_STATUS_INTA = 0x1,
206 __F0_INTX_STATUS_INTB = 0x2,
207 __F0_INTX_STATUS_INTC = 0x3,
208 __F0_INTX_STATUS_INTD = 0x4,
209};
210#define OP_MODE 0x0001460c
211#define __APP_ETH_CLK_LOWSPEED 0x00000004
212#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
213#define __GLOBAL_FCOE_MODE 0x00000001
214#define HOST_SEM4_REG 0x00014610
215#define HOST_SEM5_REG 0x00014614
216#define HOST_SEM6_REG 0x00014618
217#define HOST_SEM7_REG 0x0001461c
218#define HOST_SEM4_INFO_REG 0x00014620
219#define HOST_SEM5_INFO_REG 0x00014624
220#define HOST_SEM6_INFO_REG 0x00014628
221#define HOST_SEM7_INFO_REG 0x0001462c
222#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
223#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
224#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
225#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
226#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
227#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
228#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
229#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
230#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
231#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
232#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
233#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
234#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
235#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
236#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
237#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
238#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
239#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
240#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
241#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
242#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
243#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
244#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
245#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
246#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
247#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
248#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
249#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
250#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
251#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
252#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
253#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
254#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
255#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
256#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
257#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
258#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
259#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
260#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
261#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
262#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
263#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
264#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
265#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
266#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
267#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
268#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
269#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
270#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
271#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
272#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
273#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
274#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
275#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
276#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
277#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
278#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
279#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
280#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
281#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
282#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
283#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
284#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
285#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
286#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
287#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
288#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
289#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
290#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
291#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
292#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
293#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
294#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
295#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
296#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
297#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
298#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
299#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
300#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
301#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
302#define FW_INIT_HALT_P0 0x000191ac
303#define __FW_INIT_HALT_P 0x00000001
304#define FW_INIT_HALT_P1 0x000191bc
305#define CPE_PI_PTR_Q0 0x00038000
306#define __CPE_PI_UNUSED_MK 0xffff0000
307#define __CPE_PI_UNUSED_SH 16
308#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
309#define __CPE_PI_PTR 0x0000ffff
310#define CPE_PI_PTR_Q1 0x00038040
311#define CPE_CI_PTR_Q0 0x00038004
312#define __CPE_CI_UNUSED_MK 0xffff0000
313#define __CPE_CI_UNUSED_SH 16
314#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
315#define __CPE_CI_PTR 0x0000ffff
316#define CPE_CI_PTR_Q1 0x00038044
317#define CPE_DEPTH_Q0 0x00038008
318#define __CPE_DEPTH_UNUSED_MK 0xf8000000
319#define __CPE_DEPTH_UNUSED_SH 27
320#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
321#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
322#define __CPE_MSIX_VEC_INDEX_SH 16
323#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
324#define __CPE_DEPTH 0x0000ffff
325#define CPE_DEPTH_Q1 0x00038048
326#define CPE_QCTRL_Q0 0x0003800c
327#define __CPE_CTRL_UNUSED30_MK 0xfc000000
328#define __CPE_CTRL_UNUSED30_SH 26
329#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
330#define __CPE_FUNC_INT_CTRL_MK 0x03000000
331#define __CPE_FUNC_INT_CTRL_SH 24
332#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
333enum {
334 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
335 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
336 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
337 __CPE_FUNC_INT_CTRL_HALF = 0x3,
338};
339#define __CPE_CTRL_UNUSED20_MK 0x00f00000
340#define __CPE_CTRL_UNUSED20_SH 20
341#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
342#define __CPE_SCI_TH_MK 0x000f0000
343#define __CPE_SCI_TH_SH 16
344#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
345#define __CPE_CTRL_UNUSED10_MK 0x0000c000
346#define __CPE_CTRL_UNUSED10_SH 14
347#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
348#define __CPE_ACK_PENDING 0x00002000
349#define __CPE_CTRL_UNUSED40_MK 0x00001c00
350#define __CPE_CTRL_UNUSED40_SH 10
351#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
352#define __CPE_PCIEID_MK 0x00000300
353#define __CPE_PCIEID_SH 8
354#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
355#define __CPE_CTRL_UNUSED00_MK 0x000000fe
356#define __CPE_CTRL_UNUSED00_SH 1
357#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
358#define __CPE_ESIZE 0x00000001
359#define CPE_QCTRL_Q1 0x0003804c
360#define __CPE_CTRL_UNUSED31_MK 0xfc000000
361#define __CPE_CTRL_UNUSED31_SH 26
362#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
363#define __CPE_CTRL_UNUSED21_MK 0x00f00000
364#define __CPE_CTRL_UNUSED21_SH 20
365#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
366#define __CPE_CTRL_UNUSED11_MK 0x0000c000
367#define __CPE_CTRL_UNUSED11_SH 14
368#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
369#define __CPE_CTRL_UNUSED41_MK 0x00001c00
370#define __CPE_CTRL_UNUSED41_SH 10
371#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
372#define __CPE_CTRL_UNUSED01_MK 0x000000fe
373#define __CPE_CTRL_UNUSED01_SH 1
374#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
375#define RME_PI_PTR_Q0 0x00038020
376#define __LATENCY_TIME_STAMP_MK 0xffff0000
377#define __LATENCY_TIME_STAMP_SH 16
378#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
379#define __RME_PI_PTR 0x0000ffff
380#define RME_PI_PTR_Q1 0x00038060
381#define RME_CI_PTR_Q0 0x00038024
382#define __DELAY_TIME_STAMP_MK 0xffff0000
383#define __DELAY_TIME_STAMP_SH 16
384#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
385#define __RME_CI_PTR 0x0000ffff
386#define RME_CI_PTR_Q1 0x00038064
387#define RME_DEPTH_Q0 0x00038028
388#define __RME_DEPTH_UNUSED_MK 0xf8000000
389#define __RME_DEPTH_UNUSED_SH 27
390#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
391#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
392#define __RME_MSIX_VEC_INDEX_SH 16
393#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
394#define __RME_DEPTH 0x0000ffff
395#define RME_DEPTH_Q1 0x00038068
396#define RME_QCTRL_Q0 0x0003802c
397#define __RME_INT_LATENCY_TIMER_MK 0xff000000
398#define __RME_INT_LATENCY_TIMER_SH 24
399#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
400#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
401#define __RME_INT_DELAY_TIMER_SH 16
402#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
403#define __RME_INT_DELAY_DISABLE 0x00008000
404#define __RME_DLY_DELAY_DISABLE 0x00004000
405#define __RME_ACK_PENDING 0x00002000
406#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
407#define __RME_CTRL_UNUSED10_MK 0x00000c00
408#define __RME_CTRL_UNUSED10_SH 10
409#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
410#define __RME_PCIEID_MK 0x00000300
411#define __RME_PCIEID_SH 8
412#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
413#define __RME_CTRL_UNUSED00_MK 0x000000fe
414#define __RME_CTRL_UNUSED00_SH 1
415#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
416#define __RME_ESIZE 0x00000001
417#define RME_QCTRL_Q1 0x0003806c
418#define __RME_CTRL_UNUSED11_MK 0x00000c00
419#define __RME_CTRL_UNUSED11_SH 10
420#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
421#define __RME_CTRL_UNUSED01_MK 0x000000fe
422#define __RME_CTRL_UNUSED01_SH 1
423#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
424#define PSS_CTL_REG 0x00018800
425#define __PSS_I2C_CLK_DIV_MK 0x007f0000
426#define __PSS_I2C_CLK_DIV_SH 16
427#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
428#define __PSS_LMEM_INIT_DONE 0x00001000
429#define __PSS_LMEM_RESET 0x00000200
430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001
433#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
434#define __RXQ0_ADD_VECTORS_P 0x80000000
435#define __RXQ0_STOP_P 0x40000000
436#define __RXQ0_PRD_PTR_P 0x0000ffff
437#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
438#define __RXQ1_ADD_VECTORS_P 0x80000000
439#define __RXQ1_STOP_P 0x40000000
440#define __RXQ1_PRD_PTR_P 0x0000ffff
441#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
442#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
443#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
444#define __TXQ0_ADD_VECTORS_P 0x80000000
445#define __TXQ0_STOP_P 0x40000000
446#define __TXQ0_PRD_PTR_P 0x0000ffff
447#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
448#define __TXQ1_ADD_VECTORS_P 0x80000000
449#define __TXQ1_STOP_P 0x40000000
450#define __TXQ1_PRD_PTR_P 0x0000ffff
451#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
452#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
453#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
454#define __IB1_0_ACK_P 0x80000000
455#define __IB1_0_DISABLE_P 0x40000000
456#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
457#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
458#define __IB1_1_ACK_P 0x80000000
459#define __IB1_1_DISABLE_P 0x40000000
460#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
461#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
462#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
463#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
464#define __IB2_0_ACK_P 0x80000000
465#define __IB2_0_DISABLE_P 0x40000000
466#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
467#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
468#define __IB2_1_ACK_P 0x80000000
469#define __IB2_1_DISABLE_P 0x40000000
470#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
471#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
472#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
473
474
475/*
476 * These definitions are either in error/missing in spec. Its auto-generated
477 * from hard coded values in regparse.pl.
478 */
479#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
480#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
481#define __EMPHPRE_AT_4G_FIX 0x00000003
482#define __SFP_TXRATE_EN_FIX 0x00000100
483#define __SFP_RXRATE_EN_FIX 0x00000080
484
485
486/*
487 * These register definitions are auto-generated from hard coded values
488 * in regparse.pl.
489 */
490
491
492/*
493 * These register mapping definitions are auto-generated from mapping tables
494 * in regparse.pl.
495 */
496#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
497#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
498#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
499#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
500#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
501
502#define CPE_DEPTH_Q(__n) \
503 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
504#define CPE_QCTRL_Q(__n) \
505 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
506#define CPE_PI_PTR_Q(__n) \
507 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
508#define CPE_CI_PTR_Q(__n) \
509 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
510#define RME_DEPTH_Q(__n) \
511 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
512#define RME_QCTRL_Q(__n) \
513 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
514#define RME_PI_PTR_Q(__n) \
515 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
516#define RME_CI_PTR_Q(__n) \
517 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
518#define HQM_QSET_RXQ_DRBL_P0(__n) \
519 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * (HQM_QSET1_RXQ_DRBL_P0 - \
520 HQM_QSET0_RXQ_DRBL_P0))
521#define HQM_QSET_TXQ_DRBL_P0(__n) \
522 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * (HQM_QSET1_TXQ_DRBL_P0 - \
523 HQM_QSET0_TXQ_DRBL_P0))
524#define HQM_QSET_IB_DRBL_1_P0(__n) \
525 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * (HQM_QSET1_IB_DRBL_1_P0 - \
526 HQM_QSET0_IB_DRBL_1_P0))
527#define HQM_QSET_IB_DRBL_2_P0(__n) \
528 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * (HQM_QSET1_IB_DRBL_2_P0 - \
529 HQM_QSET0_IB_DRBL_2_P0))
530#define HQM_QSET_RXQ_DRBL_P1(__n) \
531 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * (HQM_QSET1_RXQ_DRBL_P1 - \
532 HQM_QSET0_RXQ_DRBL_P1))
533#define HQM_QSET_TXQ_DRBL_P1(__n) \
534 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * (HQM_QSET1_TXQ_DRBL_P1 - \
535 HQM_QSET0_TXQ_DRBL_P1))
536#define HQM_QSET_IB_DRBL_1_P1(__n) \
537 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * (HQM_QSET1_IB_DRBL_1_P1 - \
538 HQM_QSET0_IB_DRBL_1_P1))
539#define HQM_QSET_IB_DRBL_2_P1(__n) \
540 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * (HQM_QSET1_IB_DRBL_2_P1 - \
541 HQM_QSET0_IB_DRBL_2_P1))
542
543#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
544#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
545#define CPE_Q_MASK(__q) ((__q) & 0x3)
546#define RME_Q_MASK(__q) ((__q) & 0x3)
547
548
549/*
550 * PCI MSI-X vector defines
551 */
552enum {
553 BFA_MSIX_CPE_Q0 = 0,
554 BFA_MSIX_CPE_Q1 = 1,
555 BFA_MSIX_CPE_Q2 = 2,
556 BFA_MSIX_CPE_Q3 = 3,
557 BFA_MSIX_RME_Q0 = 4,
558 BFA_MSIX_RME_Q1 = 5,
559 BFA_MSIX_RME_Q2 = 6,
560 BFA_MSIX_RME_Q3 = 7,
561 BFA_MSIX_LPU_ERR = 8,
562 BFA_MSIX_CT_MAX = 9,
563};
564
565/*
566 * And corresponding host interrupt status bit field defines
567 */
568#define __HFN_INT_CPE_Q0 0x00000001U
569#define __HFN_INT_CPE_Q1 0x00000002U
570#define __HFN_INT_CPE_Q2 0x00000004U
571#define __HFN_INT_CPE_Q3 0x00000008U
572#define __HFN_INT_CPE_Q4 0x00000010U
573#define __HFN_INT_CPE_Q5 0x00000020U
574#define __HFN_INT_CPE_Q6 0x00000040U
575#define __HFN_INT_CPE_Q7 0x00000080U
576#define __HFN_INT_RME_Q0 0x00000100U
577#define __HFN_INT_RME_Q1 0x00000200U
578#define __HFN_INT_RME_Q2 0x00000400U
579#define __HFN_INT_RME_Q3 0x00000800U
580#define __HFN_INT_RME_Q4 0x00001000U
581#define __HFN_INT_RME_Q5 0x00002000U
582#define __HFN_INT_RME_Q6 0x00004000U
583#define __HFN_INT_RME_Q7 0x00008000U
584#define __HFN_INT_ERR_EMC 0x00010000U
585#define __HFN_INT_ERR_LPU0 0x00020000U
586#define __HFN_INT_ERR_LPU1 0x00040000U
587#define __HFN_INT_ERR_PSS 0x00080000U
588#define __HFN_INT_MBOX_LPU0 0x00100000U
589#define __HFN_INT_MBOX_LPU1 0x00200000U
590#define __HFN_INT_MBOX1_LPU0 0x00400000U
591#define __HFN_INT_MBOX1_LPU1 0x00800000U
592#define __HFN_INT_CPE_MASK 0x000000ffU
593#define __HFN_INT_RME_MASK 0x0000ff00U
594
595
596/*
597 * catapult memory map.
598 */
599#define LL_PGN_HQM0 0x0096
600#define LL_PGN_HQM1 0x0097
601#define PSS_SMEM_PAGE_START 0x8000
602#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
603#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
604
605/*
606 * End of catapult memory map
607 */
608
609
610#endif /* __BFI_CTREG_H__ */
611
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fabric.h b/drivers/scsi/bfa/include/bfi/bfi_fabric.h
new file mode 100644
index 000000000000..c0669ed41078
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_fabric.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FABRIC_H__
19#define __BFI_FABRIC_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_fabric_h2i_msgs {
26 BFI_FABRIC_H2I_CREATE_REQ = 1,
27 BFI_FABRIC_H2I_DELETE_REQ = 2,
28 BFI_FABRIC_H2I_SETAUTH = 3,
29};
30
31enum bfi_fabric_i2h_msgs {
32 BFI_FABRIC_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_FABRIC_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_FABRIC_I2H_SETAUTH_RSP = BFA_I2HM(3),
35 BFI_FABRIC_I2H_ONLINE = BFA_I2HM(4),
36 BFI_FABRIC_I2H_OFFLINE = BFA_I2HM(5),
37};
38
39struct bfi_fabric_create_req_s {
40 bfi_mhdr_t mh; /* common msg header */
41 u8 vf_en; /* virtual fabric enable */
42 u8 rsvd;
43 u16 vf_id; /* virtual fabric ID */
44 wwn_t pwwn; /* port name */
45 wwn_t nwwn; /* node name */
46};
47
48struct bfi_fabric_create_rsp_s {
49 bfi_mhdr_t mh; /* common msg header */
50 u16 bfa_handle; /* host fabric handle */
51 u8 status; /* fabric create status */
52 u8 rsvd;
53};
54
55struct bfi_fabric_delete_req_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u16 fw_handle; /* firmware fabric handle */
58 u16 rsvd;
59};
60
61struct bfi_fabric_delete_rsp_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 bfa_handle; /* host fabric handle */
64 u8 status; /* fabric deletion status */
65 u8 rsvd;
66};
67
68#define BFI_FABRIC_AUTHSECRET_LEN 64
69struct bfi_fabric_setauth_req_s {
70 bfi_mhdr_t mh; /* common msg header */
71 u16 fw_handle; /* f/w handle of fabric */
72 u8 algorithm;
73 u8 group;
74 u8 secret[BFI_FABRIC_AUTHSECRET_LEN];
75};
76
77union bfi_fabric_h2i_msg_u {
78 bfi_msg_t *msg;
79 struct bfi_fabric_create_req_s *create_req;
80 struct bfi_fabric_delete_req_s *delete_req;
81};
82
83union bfi_fabric_i2h_msg_u {
84 bfi_msg_t *msg;
85 struct bfi_fabric_create_rsp_s *create_rsp;
86 struct bfi_fabric_delete_rsp_s *delete_rsp;
87};
88
89#pragma pack()
90
91#endif /* __BFI_FABRIC_H__ */
92
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h b/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
new file mode 100644
index 000000000000..52c059fb4c3a
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
@@ -0,0 +1,301 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCPIM_H__
19#define __BFI_FCPIM_H__
20
21#include "bfi.h"
22#include <protocol/fcp.h>
23
24#pragma pack(1)
25
26/*
27 * Initiator mode I-T nexus interface defines.
28 */
29
30enum bfi_itnim_h2i {
31 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
32 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
33};
34
35enum bfi_itnim_i2h {
36 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
37 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
38 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
39};
40
41struct bfi_itnim_create_req_s {
42 struct bfi_mhdr_s mh; /* common msg header */
43 u16 fw_handle; /* f/w handle for itnim */
44 u8 class; /* FC class for IO */
45 u8 seq_rec; /* sequence recovery support */
46 u8 msg_no; /* seq id of the msg */
47};
48
49struct bfi_itnim_create_rsp_s {
50 struct bfi_mhdr_s mh; /* common msg header */
51 u16 bfa_handle; /* bfa handle for itnim */
52 u8 status; /* fcp request status */
53 u8 seq_id; /* seq id of the msg */
54};
55
56struct bfi_itnim_delete_req_s {
57 struct bfi_mhdr_s mh; /* common msg header */
58 u16 fw_handle; /* f/w itnim handle */
59 u8 seq_id; /* seq id of the msg */
60 u8 rsvd;
61};
62
63struct bfi_itnim_delete_rsp_s {
64 struct bfi_mhdr_s mh; /* common msg header */
65 u16 bfa_handle; /* bfa handle for itnim */
66 u8 status; /* fcp request status */
67 u8 seq_id; /* seq id of the msg */
68};
69
70struct bfi_itnim_sler_event_s {
71 struct bfi_mhdr_s mh; /* common msg header */
72 u16 bfa_handle; /* bfa handle for itnim */
73 u16 rsvd;
74};
75
76union bfi_itnim_h2i_msg_u {
77 struct bfi_itnim_create_req_s *create_req;
78 struct bfi_itnim_delete_req_s *delete_req;
79 struct bfi_msg_s *msg;
80};
81
82union bfi_itnim_i2h_msg_u {
83 struct bfi_itnim_create_rsp_s *create_rsp;
84 struct bfi_itnim_delete_rsp_s *delete_rsp;
85 struct bfi_itnim_sler_event_s *sler_event;
86 struct bfi_msg_s *msg;
87};
88
89/*
90 * Initiator mode IO interface defines.
91 */
92
93enum bfi_ioim_h2i {
94 BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
95 BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
96};
97
98enum bfi_ioim_i2h {
99 BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
100 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),/* ABORT rsp */
101};
102
103/**
104 * IO command DIF info
105 */
106struct bfi_ioim_dif_s {
107 u32 dif_info[4];
108};
109
110/**
111 * FCP IO messages overview
112 *
113 * @note
114 * - Max CDB length supported is 64 bytes.
115 * - SCSI Linked commands and SCSI bi-directional Commands not
116 * supported.
117 *
118 */
119struct bfi_ioim_req_s {
120 struct bfi_mhdr_s mh; /* Common msg header */
121 u16 io_tag; /* I/O tag */
122 u16 rport_hdl; /* itnim/rport firmware handle */
123 struct fcp_cmnd_s cmnd; /* IO request info */
124
125 /**
126 * SG elements array within the IO request must be double word
127 * aligned. This aligment is required to optimize SGM setup for the IO.
128 */
129 struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
130 u8 io_timeout;
131 u8 dif_en;
132 u8 rsvd_a[2];
133 struct bfi_ioim_dif_s dif;
134};
135
136/**
137 * This table shows various IO status codes from firmware and their
138 * meaning. Host driver can use these status codes to further process
139 * IO completions.
140 *
141 * BFI_IOIM_STS_OK : IO completed with error free SCSI &
142 * transport status.
143 * - io-tag can be reused.
144 *
145 * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
146 * - io-tag can be reused.
147 *
148 * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
149 * host request.
150 * - io-tag cannot be reused yet.
151 *
152 * BFI_IOIM_STS_ABORTED : IO was aborted successfully
153 * internally by f/w.
154 * - io-tag cannot be reused yet.
155 *
156 * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
157 * in the firmware and
158 * - io-tag cannot be reused yet.
159 *
160 * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
161 * with sequence level error
162 * logic and hence host needs to retry
163 * this IO with a different IO tag
164 * - io-tag cannot be used yet.
165 *
166 * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
167 * is required because 2 consecutive ABTS
168 * timedout and host needs logout and
169 * re-login with the target
170 * - io-tag cannot be used yet.
171 *
172 * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
173 * but the data tranferred is less than
174 * the fcp data length in the command.
175 * ex. SCSI INQUIRY where transferred
176 * data length and residue count in FCP
177 * response accounts for total fcp-dl
178 * - io-tag can be reused.
179 *
180 * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
181 * but the data transerred is more than
182 * fcp data length in the command. ex.
183 * TAPE IOs where blocks can of unequal
184 * lengths.
185 * - io-tag can be reused.
186 *
187 * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
188 * during abort process
189 * - io-tag can be reused.
190 *
191 * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
192 * ex target sent more data than
193 * requested, or there was data frame
194 * loss and other reasons
195 * - io-tag cannot be used yet.
196 *
197 * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
198 * CRC err or Ref Tag err or App tag err.
199 * - io-tag can be reused.
200 *
201 * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
202 * Management command from the host
203 * - io-tag can be reused.
204 *
205 * BFI_IOIM_STS_UTAG : Firmware does not know about this
206 * io_tag.
207 * - io-tag can be reused.
208 */
209enum bfi_ioim_status {
210 BFI_IOIM_STS_OK = 0,
211 BFI_IOIM_STS_HOST_ABORTED = 1,
212 BFI_IOIM_STS_ABORTED = 2,
213 BFI_IOIM_STS_TIMEDOUT = 3,
214 BFI_IOIM_STS_RES_FREE = 4,
215 BFI_IOIM_STS_SQER_NEEDED = 5,
216 BFI_IOIM_STS_PROTO_ERR = 6,
217 BFI_IOIM_STS_UTAG = 7,
218 BFI_IOIM_STS_PATHTOV = 8,
219};
220
221#define BFI_IOIM_SNSLEN (256)
222/**
223 * I/O response message
224 */
225struct bfi_ioim_rsp_s {
226 struct bfi_mhdr_s mh; /* common msg header */
227 u16 io_tag; /* completed IO tag */
228 u16 bfa_rport_hndl; /* releated rport handle */
229 u8 io_status; /* IO completion status */
230 u8 reuse_io_tag; /* IO tag can be reused */
231 u16 abort_tag; /* host abort request tag */
232 u8 scsi_status; /* scsi status from target */
233 u8 sns_len; /* scsi sense length */
234 u8 resid_flags; /* IO residue flags */
235 u8 rsvd_a;
236 u32 residue; /* IO residual length in bytes */
237 u32 rsvd_b[3];
238};
239
240struct bfi_ioim_abort_req_s {
241 struct bfi_mhdr_s mh; /* Common msg header */
242 u16 io_tag; /* I/O tag */
243 u16 abort_tag; /* unique request tag */
244};
245
246/*
247 * Initiator mode task management command interface defines.
248 */
249
250enum bfi_tskim_h2i {
251 BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
252 BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
253};
254
255enum bfi_tskim_i2h {
256 BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
257};
258
259struct bfi_tskim_req_s {
260 struct bfi_mhdr_s mh; /* Common msg header */
261 u16 tsk_tag; /* task management tag */
262 u16 itn_fhdl; /* itn firmware handle */
263 lun_t lun; /* LU number */
264 u8 tm_flags; /* see fcp_tm_cmnd_t */
265 u8 t_secs; /* Timeout value in seconds */
266 u8 rsvd[2];
267};
268
269struct bfi_tskim_abortreq_s {
270 struct bfi_mhdr_s mh; /* Common msg header */
271 u16 tsk_tag; /* task management tag */
272 u16 rsvd;
273};
274
275enum bfi_tskim_status {
276 /*
277 * Following are FCP-4 spec defined status codes,
278 * **DO NOT CHANGE THEM **
279 */
280 BFI_TSKIM_STS_OK = 0,
281 BFI_TSKIM_STS_NOT_SUPP = 4,
282 BFI_TSKIM_STS_FAILED = 5,
283
284 /**
285 * Defined by BFA
286 */
287 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
288 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
289};
290
291struct bfi_tskim_rsp_s {
292 struct bfi_mhdr_s mh; /* Common msg header */
293 u16 tsk_tag; /* task mgmt cmnd tag */
294 u8 tsk_status; /* @ref bfi_tskim_status */
295 u8 rsvd;
296};
297
298#pragma pack()
299
300#endif /* __BFI_FCPIM_H__ */
301
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h b/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
new file mode 100644
index 000000000000..e0e995a32828
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCXP_H__
19#define __BFI_FCXP_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_fcxp_h2i {
26 BFI_FCXP_H2I_SEND_REQ = 1,
27};
28
29enum bfi_fcxp_i2h {
30 BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
31};
32
33#define BFA_FCXP_MAX_SGES 2
34
35/**
36 * FCXP send request structure
37 */
38struct bfi_fcxp_send_req_s {
39 struct bfi_mhdr_s mh; /* Common msg header */
40 u16 fcxp_tag; /* driver request tag */
41 u16 max_frmsz; /* max send frame size */
42 u16 vf_id; /* vsan tag if applicable */
43 u16 rport_fw_hndl; /* FW Handle for the remote port */
44 u8 class; /* FC class used for req/rsp */
45 u8 rsp_timeout; /* timeout in secs, 0-no response */
46 u8 cts; /* continue sequence */
47 u8 lp_tag; /* lport tag */
48 struct fchs_s fchs; /* request FC header structure */
49 u32 req_len; /* request payload length */
50 u32 rsp_maxlen; /* max response length expected */
51 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
52 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
53};
54
55/**
56 * FCXP send response structure
57 */
58struct bfi_fcxp_send_rsp_s {
59 struct bfi_mhdr_s mh; /* Common msg header */
60 u16 fcxp_tag; /* send request tag */
61 u8 req_status; /* request status */
62 u8 rsvd;
63 u32 rsp_len; /* actual response length */
64 u32 residue_len; /* residual response length */
65 struct fchs_s fchs; /* response FC header structure */
66};
67
68#pragma pack()
69
70#endif /* __BFI_FCXP_H__ */
71
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
new file mode 100644
index 000000000000..026e9c06ae97
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -0,0 +1,202 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOC_H__
19#define __BFI_IOC_H__
20
21#include "bfi.h"
22#include <defs/bfa_defs_ioc.h>
23
24#pragma pack(1)
25
26enum bfi_ioc_h2i_msgs {
27 BFI_IOC_H2I_ENABLE_REQ = 1,
28 BFI_IOC_H2I_DISABLE_REQ = 2,
29 BFI_IOC_H2I_GETATTR_REQ = 3,
30 BFI_IOC_H2I_DBG_SYNC = 4,
31 BFI_IOC_H2I_DBG_DUMP = 5,
32};
33
34enum bfi_ioc_i2h_msgs {
35 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
36 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
37 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
38 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
39 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
40};
41
42/**
43 * BFI_IOC_H2I_GETATTR_REQ message
44 */
45struct bfi_ioc_getattr_req_s {
46 struct bfi_mhdr_s mh;
47 union bfi_addr_u attr_addr;
48};
49
50struct bfi_ioc_attr_s {
51 wwn_t mfg_wwn;
52 mac_t mfg_mac;
53 u16 rsvd_a;
54 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
55 u8 pcie_gen;
56 u8 pcie_lanes_orig;
57 u8 pcie_lanes;
58 u8 rx_bbcredit; /* receive buffer credits */
59 u32 adapter_prop; /* adapter properties */
60 u16 maxfrsize; /* max receive frame size */
61 char asic_rev;
62 u8 rsvd_b;
63 char fw_version[BFA_VERSION_LEN];
64 char optrom_version[BFA_VERSION_LEN];
65 struct bfa_mfg_vpd_s vpd;
66};
67
68/**
69 * BFI_IOC_I2H_GETATTR_REPLY message
70 */
71struct bfi_ioc_getattr_reply_s {
72 struct bfi_mhdr_s mh; /* Common msg header */
73 u8 status; /* cfg reply status */
74 u8 rsvd[3];
75};
76
77/**
78 * Firmware memory page offsets
79 */
80#define BFI_IOC_SMEM_PG0_CB (0x40)
81#define BFI_IOC_SMEM_PG0_CT (0x180)
82
83/**
84 * Firmware trace offset
85 */
86#define BFI_IOC_TRC_OFF (0x4b00)
87#define BFI_IOC_TRC_ENTS 256
88
89#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
90#define BFI_IOC_MD5SUM_SZ 4
91struct bfi_ioc_image_hdr_s {
92 u32 signature; /* constant signature */
93 u32 rsvd_a;
94 u32 exec; /* exec vector */
95 u32 param; /* parameters */
96 u32 rsvd_b[4];
97 u32 md5sum[BFI_IOC_MD5SUM_SZ];
98};
99
100/**
101 * BFI_IOC_I2H_READY_EVENT message
102 */
103struct bfi_ioc_rdy_event_s {
104 struct bfi_mhdr_s mh; /* common msg header */
105 u8 init_status; /* init event status */
106 u8 rsvd[3];
107};
108
109struct bfi_ioc_hbeat_s {
110 struct bfi_mhdr_s mh; /* common msg header */
111 u32 hb_count; /* current heart beat count */
112};
113
114/**
115 * IOC hardware/firmware state
116 */
117enum bfi_ioc_state {
118 BFI_IOC_UNINIT = 0, /* not initialized */
119 BFI_IOC_INITING = 1, /* h/w is being initialized */
120 BFI_IOC_HWINIT = 2, /* h/w is initialized */
121 BFI_IOC_CFG = 3, /* IOC configuration in progress */
122 BFI_IOC_OP = 4, /* IOC is operational */
123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
124 BFI_IOC_DISABLED = 6, /* IOC is disabled */
125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
126 BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */
127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
128};
129
130#define BFI_IOC_ENDIAN_SIG 0x12345678
131
132enum {
133 BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
134 BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
135 BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
136 BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
137 BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
138 BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
139 BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
140 BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
141 BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
142 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
143};
144
145#define BFI_ADAPTER_GETP(__prop,__adap_prop) \
146 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
147 BFI_ADAPTER_ ## __prop ## _SH)
148#define BFI_ADAPTER_SETP(__prop, __val) \
149 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
150#define BFI_ADAPTER_IS_PROTO(__adap_type) \
151 ((__adap_type) & BFI_ADAPTER_PROTO)
152#define BFI_ADAPTER_IS_TTV(__adap_type) \
153 ((__adap_type) & BFI_ADAPTER_TTV)
154#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
155 ((__adap_type) & BFI_ADAPTER_UNSUPP)
156#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
157 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
158 BFI_ADAPTER_UNSUPP))
159
160/**
161 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
162 */
163struct bfi_ioc_ctrl_req_s {
164 struct bfi_mhdr_s mh;
165 u8 ioc_class;
166 u8 rsvd[3];
167};
168
169/**
170 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
171 */
172struct bfi_ioc_ctrl_reply_s {
173 struct bfi_mhdr_s mh; /* Common msg header */
174 u8 status; /* enable/disable status */
175 u8 rsvd[3];
176};
177
178#define BFI_IOC_MSGSZ 8
179/**
180 * H2I Messages
181 */
182union bfi_ioc_h2i_msg_u {
183 struct bfi_mhdr_s mh;
184 struct bfi_ioc_ctrl_req_s enable_req;
185 struct bfi_ioc_ctrl_req_s disable_req;
186 struct bfi_ioc_getattr_req_s getattr_req;
187 u32 mboxmsg[BFI_IOC_MSGSZ];
188};
189
190/**
191 * I2H Messages
192 */
193union bfi_ioc_i2h_msg_u {
194 struct bfi_mhdr_s mh;
195 struct bfi_ioc_rdy_event_s rdy_event;
196 u32 mboxmsg[BFI_IOC_MSGSZ];
197};
198
199#pragma pack()
200
201#endif /* __BFI_IOC_H__ */
202
diff --git a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
new file mode 100644
index 000000000000..c3760df72575
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOCFC_H__
19#define __BFI_IOCFC_H__
20
21#include "bfi.h"
22#include <defs/bfa_defs_ioc.h>
23#include <defs/bfa_defs_iocfc.h>
24#include <defs/bfa_defs_boot.h>
25
26#pragma pack(1)
27
28enum bfi_iocfc_h2i_msgs {
29 BFI_IOCFC_H2I_CFG_REQ = 1,
30 BFI_IOCFC_H2I_GET_STATS_REQ = 2,
31 BFI_IOCFC_H2I_CLEAR_STATS_REQ = 3,
32 BFI_IOCFC_H2I_SET_INTR_REQ = 4,
33 BFI_IOCFC_H2I_UPDATEQ_REQ = 5,
34};
35
36enum bfi_iocfc_i2h_msgs {
37 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
38 BFI_IOCFC_I2H_GET_STATS_RSP = BFA_I2HM(2),
39 BFI_IOCFC_I2H_CLEAR_STATS_RSP = BFA_I2HM(3),
40 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(5),
41};
42
43struct bfi_iocfc_cfg_s {
44 u8 num_cqs; /* Number of CQs to be used */
45 u8 sense_buf_len; /* SCSI sense length */
46 u8 trunk_enabled; /* port trunking enabled */
47 u8 trunk_ports; /* trunk ports bit map */
48 u32 endian_sig; /* endian signature of host */
49
50 /**
51 * Request and response circular queue base addresses, size and
52 * shadow index pointers.
53 */
54 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
55 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
56 u16 req_cq_elems[BFI_IOC_MAX_CQS];
57 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
58 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
59 u16 rsp_cq_elems[BFI_IOC_MAX_CQS];
60
61 union bfi_addr_u stats_addr; /* DMA-able address for stats */
62 union bfi_addr_u cfgrsp_addr; /* config response dma address */
63 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
64 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
65};
66
67/**
68 * Boot target wwn information for this port. This contains either the stored
69 * or discovered boot target port wwns for the port.
70 */
71struct bfi_iocfc_bootwwns {
72 wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
73 u8 nwwns;
74 u8 rsvd[7];
75};
76
77struct bfi_iocfc_cfgrsp_s {
78 struct bfa_iocfc_fwcfg_s fwcfg;
79 struct bfa_iocfc_intr_attr_s intr_attr;
80 struct bfi_iocfc_bootwwns bootwwns;
81};
82
83/**
84 * BFI_IOCFC_H2I_CFG_REQ message
85 */
86struct bfi_iocfc_cfg_req_s {
87 struct bfi_mhdr_s mh;
88 union bfi_addr_u ioc_cfg_dma_addr;
89};
90
91/**
92 * BFI_IOCFC_I2H_CFG_REPLY message
93 */
94struct bfi_iocfc_cfg_reply_s {
95 struct bfi_mhdr_s mh; /* Common msg header */
96 u8 cfg_success; /* cfg reply status */
97 u8 lpu_bm; /* LPUs assigned for this IOC */
98 u8 rsvd[2];
99};
100
101/**
102 * BFI_IOCFC_H2I_GET_STATS_REQ & BFI_IOCFC_H2I_CLEAR_STATS_REQ messages
103 */
104struct bfi_iocfc_stats_req_s {
105 struct bfi_mhdr_s mh; /* msg header */
106 u32 msgtag; /* msgtag for reply */
107};
108
109/**
110 * BFI_IOCFC_I2H_GET_STATS_RSP & BFI_IOCFC_I2H_CLEAR_STATS_RSP messages
111 */
112struct bfi_iocfc_stats_rsp_s {
113 struct bfi_mhdr_s mh; /* common msg header */
114 u8 status; /* reply status */
115 u8 rsvd[3];
116 u32 msgtag; /* msgtag for reply */
117};
118
119/**
120 * BFI_IOCFC_H2I_SET_INTR_REQ message
121 */
122struct bfi_iocfc_set_intr_req_s {
123 struct bfi_mhdr_s mh; /* common msg header */
124 u8 coalesce; /* enable intr coalescing*/
125 u8 rsvd[3];
126 u16 delay; /* delay timer 0..1125us */
127 u16 latency; /* latency timer 0..225us */
128};
129
130/**
131 * BFI_IOCFC_H2I_UPDATEQ_REQ message
132 */
133struct bfi_iocfc_updateq_req_s {
134 struct bfi_mhdr_s mh; /* common msg header */
135 u32 reqq_ba; /* reqq base addr */
136 u32 rspq_ba; /* rspq base addr */
137 u32 reqq_sci; /* reqq shadow ci */
138 u32 rspq_spi; /* rspq shadow pi */
139};
140
141/**
142 * BFI_IOCFC_I2H_UPDATEQ_RSP message
143 */
144struct bfi_iocfc_updateq_rsp_s {
145 struct bfi_mhdr_s mh; /* common msg header */
146 u8 status; /* updateq status */
147 u8 rsvd[3];
148};
149
150/**
151 * H2I Messages
152 */
153union bfi_iocfc_h2i_msg_u {
154 struct bfi_mhdr_s mh;
155 struct bfi_iocfc_cfg_req_s cfg_req;
156 struct bfi_iocfc_stats_req_s stats_get;
157 struct bfi_iocfc_stats_req_s stats_clr;
158 struct bfi_iocfc_updateq_req_s updateq_req;
159 u32 mboxmsg[BFI_IOC_MSGSZ];
160};
161
162/**
163 * I2H Messages
164 */
165union bfi_iocfc_i2h_msg_u {
166 struct bfi_mhdr_s mh;
167 struct bfi_iocfc_cfg_reply_s cfg_reply;
168 struct bfi_iocfc_stats_rsp_s stats_get_rsp;
169 struct bfi_iocfc_stats_rsp_s stats_clr_rsp;
170 struct bfi_iocfc_updateq_rsp_s updateq_rsp;
171 u32 mboxmsg[BFI_IOC_MSGSZ];
172};
173
174#pragma pack()
175
176#endif /* __BFI_IOCFC_H__ */
177
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lport.h b/drivers/scsi/bfa/include/bfi/bfi_lport.h
new file mode 100644
index 000000000000..29010614bac9
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_lport.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPORT_H__
19#define __BFI_LPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lport_h2i_msgs {
26 BFI_LPORT_H2I_CREATE_REQ = 1,
27 BFI_LPORT_H2I_DELETE_REQ = 2,
28};
29
30enum bfi_lport_i2h_msgs {
31 BFI_LPORT_I2H_CREATE_RSP = BFA_I2HM(1),
32 BFI_LPORT_I2H_DELETE_RSP = BFA_I2HM(2),
33 BFI_LPORT_I2H_ONLINE = BFA_I2HM(3),
34 BFI_LPORT_I2H_OFFLINE = BFA_I2HM(4),
35};
36
37#define BFI_LPORT_MAX_SYNNAME 64
38
39enum bfi_lport_role_e {
40 BFI_LPORT_ROLE_FCPIM = 1,
41 BFI_LPORT_ROLE_FCPTM = 2,
42 BFI_LPORT_ROLE_IPFC = 4,
43};
44
45struct bfi_lport_create_req_s {
46 bfi_mhdr_t mh; /* common msg header */
47 u16 fabric_fwhdl; /* parent fabric instance */
48 u8 roles; /* lport FC-4 roles */
49 u8 rsvd;
50 wwn_t pwwn; /* port name */
51 wwn_t nwwn; /* node name */
52 u8 symname[BFI_LPORT_MAX_SYNNAME];
53};
54
55struct bfi_lport_create_rsp_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u8 status; /* lport creation status */
58 u8 rsvd[3];
59};
60
61struct bfi_lport_delete_req_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 fw_handle; /* firmware lport handle */
64 u16 rsvd;
65};
66
67struct bfi_lport_delete_rsp_s {
68 bfi_mhdr_t mh; /* common msg header */
69 u16 bfa_handle; /* host lport handle */
70 u8 status; /* lport deletion status */
71 u8 rsvd;
72};
73
74union bfi_lport_h2i_msg_u {
75 bfi_msg_t *msg;
76 struct bfi_lport_create_req_s *create_req;
77 struct bfi_lport_delete_req_s *delete_req;
78};
79
80union bfi_lport_i2h_msg_u {
81 bfi_msg_t *msg;
82 struct bfi_lport_create_rsp_s *create_rsp;
83 struct bfi_lport_delete_rsp_s *delete_rsp;
84};
85
86#pragma pack()
87
88#endif /* __BFI_LPORT_H__ */
89
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
new file mode 100644
index 000000000000..414b0e30f6ef
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPS_H__
19#define __BFI_LPS_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lps_h2i_msgs {
26 BFI_LPS_H2I_LOGIN_REQ = 1,
27 BFI_LPS_H2I_LOGOUT_REQ = 2,
28};
29
30enum bfi_lps_i2h_msgs {
31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
33};
34
35struct bfi_lps_login_req_s {
36 struct bfi_mhdr_s mh; /* common msg header */
37 u8 lp_tag;
38 u8 alpa;
39 u16 pdu_size;
40 wwn_t pwwn;
41 wwn_t nwwn;
42 u8 fdisc;
43 u8 auth_en;
44 u8 rsvd[2];
45};
46
47struct bfi_lps_login_rsp_s {
48 struct bfi_mhdr_s mh; /* common msg header */
49 u8 lp_tag;
50 u8 status;
51 u8 lsrjt_rsn;
52 u8 lsrjt_expl;
53 wwn_t port_name;
54 wwn_t node_name;
55 u16 bb_credit;
56 u8 f_port;
57 u8 npiv_en;
58 u32 lp_pid : 24;
59 u32 auth_req : 8;
60 mac_t lp_mac;
61 mac_t fcf_mac;
62 u8 ext_status;
63 u8 brcd_switch;/* attached peer is brcd switch */
64};
65
66struct bfi_lps_logout_req_s {
67 struct bfi_mhdr_s mh; /* common msg header */
68 u8 lp_tag;
69 u8 rsvd[3];
70 wwn_t port_name;
71};
72
73struct bfi_lps_logout_rsp_s {
74 struct bfi_mhdr_s mh; /* common msg header */
75 u8 lp_tag;
76 u8 status;
77 u8 rsvd[2];
78};
79
80union bfi_lps_h2i_msg_u {
81 struct bfi_mhdr_s *msg;
82 struct bfi_lps_login_req_s *login_req;
83 struct bfi_lps_logout_req_s *logout_req;
84};
85
86union bfi_lps_i2h_msg_u {
87 struct bfi_msg_s *msg;
88 struct bfi_lps_login_rsp_s *login_rsp;
89 struct bfi_lps_logout_rsp_s *logout_rsp;
90};
91
92#pragma pack()
93
94#endif /* __BFI_LPS_H__ */
95
96
diff --git a/drivers/scsi/bfa/include/bfi/bfi_port.h b/drivers/scsi/bfa/include/bfi/bfi_port.h
new file mode 100644
index 000000000000..3ec3bea110ba
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_port.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PORT_H__
18#define __BFI_PORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_port_h2i {
26 BFI_PORT_H2I_ENABLE_REQ = (1),
27 BFI_PORT_H2I_DISABLE_REQ = (2),
28 BFI_PORT_H2I_GET_STATS_REQ = (3),
29 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
30};
31
32enum bfi_port_i2h {
33 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
34 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
35 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
36 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
37};
38
39/**
40 * Generic REQ type
41 */
42struct bfi_port_generic_req_s {
43 struct bfi_mhdr_s mh; /* msg header */
44 u32 msgtag; /* msgtag for reply */
45 u32 rsvd;
46};
47
48/**
49 * Generic RSP type
50 */
51struct bfi_port_generic_rsp_s {
52 struct bfi_mhdr_s mh; /* common msg header */
53 u8 status; /* port enable status */
54 u8 rsvd[3];
55 u32 msgtag; /* msgtag for reply */
56};
57
58/**
59 * @todo
60 * BFI_PORT_H2I_ENABLE_REQ
61 */
62
63/**
64 * @todo
65 * BFI_PORT_I2H_ENABLE_RSP
66 */
67
68/**
69 * BFI_PORT_H2I_DISABLE_REQ
70 */
71
72/**
73 * BFI_PORT_I2H_DISABLE_RSP
74 */
75
76/**
77 * BFI_PORT_H2I_GET_STATS_REQ
78 */
79struct bfi_port_get_stats_req_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 union bfi_addr_u dma_addr;
82};
83
84/**
85 * BFI_PORT_I2H_GET_STATS_RSP
86 */
87
88/**
89 * BFI_PORT_H2I_CLEAR_STATS_REQ
90 */
91
92/**
93 * BFI_PORT_I2H_CLEAR_STATS_RSP
94 */
95
96union bfi_port_h2i_msg_u {
97 struct bfi_mhdr_s mh;
98 struct bfi_port_generic_req_s enable_req;
99 struct bfi_port_generic_req_s disable_req;
100 struct bfi_port_get_stats_req_s getstats_req;
101 struct bfi_port_generic_req_s clearstats_req;
102};
103
104union bfi_port_i2h_msg_u {
105 struct bfi_mhdr_s mh;
106 struct bfi_port_generic_rsp_s enable_rsp;
107 struct bfi_port_generic_rsp_s disable_rsp;
108 struct bfi_port_generic_rsp_s getstats_rsp;
109 struct bfi_port_generic_rsp_s clearstats_rsp;
110};
111
112#pragma pack()
113
114#endif /* __BFI_PORT_H__ */
115
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
new file mode 100644
index 000000000000..c96d246851af
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PPORT_H__
18#define __BFI_PPORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_pport_h2i {
26 BFI_PPORT_H2I_ENABLE_REQ = (1),
27 BFI_PPORT_H2I_DISABLE_REQ = (2),
28 BFI_PPORT_H2I_GET_STATS_REQ = (3),
29 BFI_PPORT_H2I_CLEAR_STATS_REQ = (4),
30 BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5),
31 BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6),
32 BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7),
33 BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8),
34 BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9),
35};
36
37enum bfi_pport_i2h {
38 BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
39 BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
40 BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
41 BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
42 BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5),
43 BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6),
44 BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7),
45 BFI_PPORT_I2H_EVENT = BFA_I2HM(8),
46 BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9),
47 BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10),
48};
49
50/**
51 * Generic REQ type
52 */
53struct bfi_pport_generic_req_s {
54 struct bfi_mhdr_s mh; /* msg header */
55 u32 msgtag; /* msgtag for reply */
56};
57
58/**
59 * Generic RSP type
60 */
61struct bfi_pport_generic_rsp_s {
62 struct bfi_mhdr_s mh; /* common msg header */
63 u8 status; /* port enable status */
64 u8 rsvd[3];
65 u32 msgtag; /* msgtag for reply */
66};
67
68/**
69 * BFI_PPORT_H2I_ENABLE_REQ
70 */
71struct bfi_pport_enable_req_s {
72 struct bfi_mhdr_s mh; /* msg header */
73 u32 rsvd1;
74 wwn_t nwwn; /* node wwn of physical port */
75 wwn_t pwwn; /* port wwn of physical port */
76 struct bfa_pport_cfg_s port_cfg; /* port configuration */
77 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
78 u32 msgtag; /* msgtag for reply */
79 u32 rsvd2;
80};
81
82/**
83 * BFI_PPORT_I2H_ENABLE_RSP
84 */
85#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s
86
87/**
88 * BFI_PPORT_H2I_DISABLE_REQ
89 */
90#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s
91
92/**
93 * BFI_PPORT_I2H_DISABLE_RSP
94 */
95#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s
96
97/**
98 * BFI_PPORT_H2I_GET_STATS_REQ
99 */
100#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s
101
102/**
103 * BFI_PPORT_I2H_GET_STATS_RSP
104 */
105#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s
106
107/**
108 * BFI_PPORT_H2I_CLEAR_STATS_REQ
109 */
110#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s
111
112/**
113 * BFI_PPORT_I2H_CLEAR_STATS_RSP
114 */
115#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s
116
117/**
118 * BFI_PPORT_H2I_GET_QOS_STATS_REQ
119 */
120#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s
121
122/**
123 * BFI_PPORT_H2I_GET_QOS_STATS_RSP
124 */
125#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
126
127/**
128 * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
129 */
130#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s
131
132/**
133 * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP
134 */
135#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
136
137/**
138 * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
139 */
140struct bfi_pport_set_svc_params_req_s {
141 struct bfi_mhdr_s mh; /* msg header */
142 u16 tx_bbcredit; /* Tx credits */
143 u16 rsvd;
144};
145
146/**
147 * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP
148 */
149
150/**
151 * BFI_PPORT_I2H_EVENT
152 */
153struct bfi_pport_event_s {
154 struct bfi_mhdr_s mh; /* common msg header */
155 struct bfa_pport_link_s link_state;
156};
157
158union bfi_pport_h2i_msg_u {
159 struct bfi_mhdr_s *mhdr;
160 struct bfi_pport_enable_req_s *penable;
161 struct bfi_pport_generic_req_s *pdisable;
162 struct bfi_pport_generic_req_s *pgetstats;
163 struct bfi_pport_generic_req_s *pclearstats;
164 struct bfi_pport_set_svc_params_req_s *psetsvcparams;
165 struct bfi_pport_get_qos_stats_req_s *pgetqosstats;
166 struct bfi_pport_generic_req_s *pclearqosstats;
167};
168
169union bfi_pport_i2h_msg_u {
170 struct bfi_msg_s *msg;
171 struct bfi_pport_generic_rsp_s *enable_rsp;
172 struct bfi_pport_disable_rsp_s *disable_rsp;
173 struct bfi_pport_generic_rsp_s *getstats_rsp;
174 struct bfi_pport_clear_stats_rsp_s *clearstats_rsp;
175 struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp;
176 struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp;
177 struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp;
178 struct bfi_pport_event_s *event;
179};
180
181#pragma pack()
182
183#endif /* __BFI_PPORT_H__ */
184
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h
new file mode 100644
index 000000000000..3520f55f09d7
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_rport.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_RPORT_H__
19#define __BFI_RPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_rport_h2i_msgs {
26 BFI_RPORT_H2I_CREATE_REQ = 1,
27 BFI_RPORT_H2I_DELETE_REQ = 2,
28 BFI_RPORT_H2I_SET_SPEED_REQ = 3,
29};
30
31enum bfi_rport_i2h_msgs {
32 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
35};
36
37struct bfi_rport_create_req_s {
38 struct bfi_mhdr_s mh; /* common msg header */
39 u16 bfa_handle; /* host rport handle */
40 u16 max_frmsz; /* max rcv pdu size */
41 u32 pid : 24, /* remote port ID */
42 lp_tag : 8; /* local port tag */
43 u32 local_pid : 24, /* local port ID */
44 cisc : 8;
45 u8 fc_class; /* supported FC classes */
46 u8 vf_en; /* virtual fabric enable */
47 u16 vf_id; /* virtual fabric ID */
48};
49
50struct bfi_rport_create_rsp_s {
51 struct bfi_mhdr_s mh; /* common msg header */
52 u8 status; /* rport creation status */
53 u8 rsvd[3];
54 u16 bfa_handle; /* host rport handle */
55 u16 fw_handle; /* firmware rport handle */
56 struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
57};
58
59struct bfa_rport_speed_req_s {
60 struct bfi_mhdr_s mh; /* common msg header */
61 u16 fw_handle; /* firmware rport handle */
62 u8 speed; /*! rport's speed via RPSC */
63 u8 rsvd;
64};
65
66struct bfi_rport_delete_req_s {
67 struct bfi_mhdr_s mh; /* common msg header */
68 u16 fw_handle; /* firmware rport handle */
69 u16 rsvd;
70};
71
72struct bfi_rport_delete_rsp_s {
73 struct bfi_mhdr_s mh; /* common msg header */
74 u16 bfa_handle; /* host rport handle */
75 u8 status; /* rport deletion status */
76 u8 rsvd;
77};
78
79struct bfi_rport_qos_scn_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 u16 bfa_handle; /* host rport handle */
82 u16 rsvd;
83 struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
84 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
85};
86
87union bfi_rport_h2i_msg_u {
88 struct bfi_msg_s *msg;
89 struct bfi_rport_create_req_s *create_req;
90 struct bfi_rport_delete_req_s *delete_req;
91 struct bfi_rport_speed_req_s *speed_req;
92};
93
94union bfi_rport_i2h_msg_u {
95 struct bfi_msg_s *msg;
96 struct bfi_rport_create_rsp_s *create_rsp;
97 struct bfi_rport_delete_rsp_s *delete_rsp;
98 struct bfi_rport_qos_scn_s *qos_scn_evt;
99};
100
101#pragma pack()
102
103#endif /* __BFI_RPORT_H__ */
104
diff --git a/drivers/scsi/bfa/include/bfi/bfi_uf.h b/drivers/scsi/bfa/include/bfi/bfi_uf.h
new file mode 100644
index 000000000000..f328a9e7e622
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_uf.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_UF_H__
19#define __BFI_UF_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_uf_h2i {
26 BFI_UF_H2I_BUF_POST = 1,
27};
28
29enum bfi_uf_i2h {
30 BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
31};
32
33#define BFA_UF_MAX_SGES 2
34
35struct bfi_uf_buf_post_s {
36 struct bfi_mhdr_s mh; /* Common msg header */
37 u16 buf_tag; /* buffer tag */
38 u16 buf_len; /* total buffer length */
39 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
40};
41
42struct bfi_uf_frm_rcvd_s {
43 struct bfi_mhdr_s mh; /* Common msg header */
44 u16 buf_tag; /* buffer tag */
45 u16 rsvd;
46 u16 frm_len; /* received frame length */
47 u16 xfr_len; /* tranferred length */
48};
49
50#pragma pack()
51
52#endif /* __BFI_UF_H__ */
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
new file mode 100644
index 000000000000..43ba7064e81a
--- /dev/null
+++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cna_trcmod.h CNA trace modules
20 */
21
22#ifndef __BFA_CNA_TRCMOD_H__
23#define __BFA_CNA_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_CNA_CEE = 1,
33 BFA_TRC_CNA_PORT = 2,
34};
35
36#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h b/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
new file mode 100644
index 000000000000..77f297f68046
--- /dev/null
+++ b/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CEE_H__
19#define __BFA_CEE_H__
20
21#include <defs/bfa_defs_cee.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
28typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
29typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, bfa_status_t status);
30
31struct bfa_cee_cbfn_s {
32 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
33 void *get_attr_cbarg;
34 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
35 void *get_stats_cbarg;
36 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
37 void *reset_stats_cbarg;
38};
39
40struct bfa_cee_s {
41 void *dev;
42 bfa_boolean_t get_attr_pending;
43 bfa_boolean_t get_stats_pending;
44 bfa_boolean_t reset_stats_pending;
45 bfa_status_t get_attr_status;
46 bfa_status_t get_stats_status;
47 bfa_status_t reset_stats_status;
48 struct bfa_cee_cbfn_s cbfn;
49 struct bfa_ioc_hbfail_notify_s hbfail;
50 struct bfa_trc_mod_s *trcmod;
51 struct bfa_log_mod_s *logmod;
52 struct bfa_cee_attr_s *attr;
53 struct bfa_cee_stats_s *stats;
54 struct bfa_dma_s attr_dma;
55 struct bfa_dma_s stats_dma;
56 struct bfa_ioc_s *ioc;
57 struct bfa_mbox_cmd_s get_cfg_mb;
58 struct bfa_mbox_cmd_s get_stats_mb;
59 struct bfa_mbox_cmd_s reset_stats_mb;
60};
61
62u32 bfa_cee_meminfo(void);
63void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva,
64 u64 dma_pa);
65void bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
66 struct bfa_trc_mod_s *trcmod,
67 struct bfa_log_mod_s *logmod);
68void bfa_cee_detach(struct bfa_cee_s *cee);
69bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
70 struct bfa_cee_attr_s *attr,
71 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
72bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
73 struct bfa_cee_stats_s *stats,
74 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
75bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
76 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
77#endif /* __BFA_CEE_H__ */
diff --git a/drivers/scsi/bfa/include/cna/port/bfa_port.h b/drivers/scsi/bfa/include/cna/port/bfa_port.h
new file mode 100644
index 000000000000..7cbf17d3141b
--- /dev/null
+++ b/drivers/scsi/bfa/include/cna/port/bfa_port.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_H__
19#define __BFA_PORT_H__
20
21#include <defs/bfa_defs_port.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
28
29struct bfa_port_s {
30 void *dev;
31 struct bfa_ioc_s *ioc;
32 struct bfa_trc_mod_s *trcmod;
33 struct bfa_log_mod_s *logmod;
34 u32 msgtag;
35 bfa_boolean_t stats_busy;
36 struct bfa_mbox_cmd_s stats_mb;
37 bfa_port_stats_cbfn_t stats_cbfn;
38 void *stats_cbarg;
39 bfa_status_t stats_status;
40 union bfa_pport_stats_u *stats;
41 struct bfa_dma_s stats_dma;
42 bfa_boolean_t endis_pending;
43 struct bfa_mbox_cmd_s endis_mb;
44 bfa_port_endis_cbfn_t endis_cbfn;
45 void *endis_cbarg;
46 bfa_status_t endis_status;
47 struct bfa_ioc_hbfail_notify_s hbfail;
48};
49
50void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
51 void *dev, struct bfa_trc_mod_s *trcmod,
52 struct bfa_log_mod_s *logmod);
53void bfa_port_detach(struct bfa_port_s *port);
54void bfa_port_hbfail(void *arg);
55
56bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
57 union bfa_pport_stats_u *stats,
58 bfa_port_stats_cbfn_t cbfn, void *cbarg);
59bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
60 bfa_port_stats_cbfn_t cbfn, void *cbarg);
61bfa_status_t bfa_port_enable(struct bfa_port_s *port,
62 bfa_port_endis_cbfn_t cbfn, void *cbarg);
63bfa_status_t bfa_port_disable(struct bfa_port_s *port,
64 bfa_port_endis_cbfn_t cbfn, void *cbarg);
65u32 bfa_port_meminfo(void);
66void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva,
67 u64 dma_pa);
68
69#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h b/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
new file mode 100644
index 000000000000..1563ee512218
--- /dev/null
+++ b/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __ETHPORT_DEFS_H__
18#define __ETHPORT_DEFS_H__
19
20struct bnad_drv_stats {
21 u64 netif_queue_stop;
22 u64 netif_queue_wakeup;
23 u64 tso4;
24 u64 tso6;
25 u64 tso_err;
26 u64 tcpcsum_offload;
27 u64 udpcsum_offload;
28 u64 csum_help;
29 u64 csum_help_err;
30
31 u64 hw_stats_updates;
32 u64 netif_rx_schedule;
33 u64 netif_rx_complete;
34 u64 netif_rx_dropped;
35};
36#endif
diff --git a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h b/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
new file mode 100644
index 000000000000..eb7548030d0f
--- /dev/null
+++ b/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
@@ -0,0 +1,218 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __PHYPORT_DEFS_H__
18#define __PHYPORT_DEFS_H__
19
20#define BNA_TXF_ID_MAX 64
21#define BNA_RXF_ID_MAX 64
22
23/*
24 * Statistics
25 */
26
27/*
28 * TxF Frame Statistics
29 */
30struct bna_stats_txf {
31 u64 ucast_octets;
32 u64 ucast;
33 u64 ucast_vlan;
34
35 u64 mcast_octets;
36 u64 mcast;
37 u64 mcast_vlan;
38
39 u64 bcast_octets;
40 u64 bcast;
41 u64 bcast_vlan;
42
43 u64 errors;
44 u64 filter_vlan; /* frames filtered due to VLAN */
45 u64 filter_mac_sa; /* frames filtered due to SA check */
46};
47
48/*
49 * RxF Frame Statistics
50 */
51struct bna_stats_rxf {
52 u64 ucast_octets;
53 u64 ucast;
54 u64 ucast_vlan;
55
56 u64 mcast_octets;
57 u64 mcast;
58 u64 mcast_vlan;
59
60 u64 bcast_octets;
61 u64 bcast;
62 u64 bcast_vlan;
63 u64 frame_drops;
64};
65
66/*
67 * FC Tx Frame Statistics
68 */
69struct bna_stats_fc_tx {
70 u64 txf_ucast_octets;
71 u64 txf_ucast;
72 u64 txf_ucast_vlan;
73
74 u64 txf_mcast_octets;
75 u64 txf_mcast;
76 u64 txf_mcast_vlan;
77
78 u64 txf_bcast_octets;
79 u64 txf_bcast;
80 u64 txf_bcast_vlan;
81
82 u64 txf_parity_errors;
83 u64 txf_timeout;
84 u64 txf_fid_parity_errors;
85};
86
87/*
88 * FC Rx Frame Statistics
89 */
90struct bna_stats_fc_rx {
91 u64 rxf_ucast_octets;
92 u64 rxf_ucast;
93 u64 rxf_ucast_vlan;
94
95 u64 rxf_mcast_octets;
96 u64 rxf_mcast;
97 u64 rxf_mcast_vlan;
98
99 u64 rxf_bcast_octets;
100 u64 rxf_bcast;
101 u64 rxf_bcast_vlan;
102};
103
104/*
105 * RAD Frame Statistics
106 */
107struct cna_stats_rad {
108 u64 rx_frames;
109 u64 rx_octets;
110 u64 rx_vlan_frames;
111
112 u64 rx_ucast;
113 u64 rx_ucast_octets;
114 u64 rx_ucast_vlan;
115
116 u64 rx_mcast;
117 u64 rx_mcast_octets;
118 u64 rx_mcast_vlan;
119
120 u64 rx_bcast;
121 u64 rx_bcast_octets;
122 u64 rx_bcast_vlan;
123
124 u64 rx_drops;
125};
126
127/*
128 * BPC Tx Registers
129 */
130struct cna_stats_bpc_tx {
131 u64 tx_pause[8];
132 u64 tx_zero_pause[8]; /* Pause cancellation */
133 u64 tx_first_pause[8]; /* Pause initiation rather
134 *than retention */
135};
136
137/*
138 * BPC Rx Registers
139 */
140struct cna_stats_bpc_rx {
141 u64 rx_pause[8];
142 u64 rx_zero_pause[8]; /* Pause cancellation */
143 u64 rx_first_pause[8]; /* Pause initiation rather
144 *than retention */
145};
146
147/*
148 * MAC Rx Statistics
149 */
150struct cna_stats_mac_rx {
151 u64 frame_64; /* both rx and tx counter */
152 u64 frame_65_127; /* both rx and tx counter */
153 u64 frame_128_255; /* both rx and tx counter */
154 u64 frame_256_511; /* both rx and tx counter */
155 u64 frame_512_1023; /* both rx and tx counter */
156 u64 frame_1024_1518; /* both rx and tx counter */
157 u64 frame_1518_1522; /* both rx and tx counter */
158 u64 rx_bytes;
159 u64 rx_packets;
160 u64 rx_fcs_error;
161 u64 rx_multicast;
162 u64 rx_broadcast;
163 u64 rx_control_frames;
164 u64 rx_pause;
165 u64 rx_unknown_opcode;
166 u64 rx_alignment_error;
167 u64 rx_frame_length_error;
168 u64 rx_code_error;
169 u64 rx_carrier_sense_error;
170 u64 rx_undersize;
171 u64 rx_oversize;
172 u64 rx_fragments;
173 u64 rx_jabber;
174 u64 rx_drop;
175};
176
177/*
178 * MAC Tx Statistics
179 */
180struct cna_stats_mac_tx {
181 u64 tx_bytes;
182 u64 tx_packets;
183 u64 tx_multicast;
184 u64 tx_broadcast;
185 u64 tx_pause;
186 u64 tx_deferral;
187 u64 tx_excessive_deferral;
188 u64 tx_single_collision;
189 u64 tx_muliple_collision;
190 u64 tx_late_collision;
191 u64 tx_excessive_collision;
192 u64 tx_total_collision;
193 u64 tx_pause_honored;
194 u64 tx_drop;
195 u64 tx_jabber;
196 u64 tx_fcs_error;
197 u64 tx_control_frame;
198 u64 tx_oversize;
199 u64 tx_undersize;
200 u64 tx_fragments;
201};
202
203/*
204 * Complete statistics
205 */
206struct bna_stats {
207 struct cna_stats_mac_rx mac_rx_stats;
208 struct cna_stats_bpc_rx bpc_rx_stats;
209 struct cna_stats_rad rad_stats;
210 struct bna_stats_fc_rx fc_rx_stats;
211 struct cna_stats_mac_tx mac_tx_stats;
212 struct cna_stats_bpc_tx bpc_tx_stats;
213 struct bna_stats_fc_tx fc_tx_stats;
214 struct bna_stats_rxf rxf_stats[BNA_TXF_ID_MAX];
215 struct bna_stats_txf txf_stats[BNA_RXF_ID_MAX];
216};
217
218#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h
new file mode 100644
index 000000000000..af8c1d533ba8
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_checksum.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_checksum.h BFA checksum utilities
20 */
21
22#ifndef __BFA_CHECKSUM_H__
23#define __BFA_CHECKSUM_H__
24
25static inline u32
26bfa_checksum_u32(u32 *buf, int sz)
27{
28 int i, m = sz >> 2;
29 u32 sum = 0;
30
31 for (i = 0; i < m; i++)
32 sum ^= buf[i];
33
34 return (sum);
35}
36
37static inline u16
38bfa_checksum_u16(u16 *buf, int sz)
39{
40 int i, m = sz >> 1;
41 u16 sum = 0;
42
43 for (i = 0; i < m; i++)
44 sum ^= buf[i];
45
46 return (sum);
47}
48
49static inline u8
50bfa_checksum_u8(u8 *buf, int sz)
51{
52 int i;
53 u8 sum = 0;
54
55 for (i = 0; i < sz; i++)
56 sum ^= buf[i];
57
58 return (sum);
59}
60#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_debug.h b/drivers/scsi/bfa/include/cs/bfa_debug.h
new file mode 100644
index 000000000000..441be86b1b0f
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_debug.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_debug.h BFA debug interfaces
20 */
21
22#ifndef __BFA_DEBUG_H__
23#define __BFA_DEBUG_H__
24
25#define bfa_assert(__cond) do { \
26 if (!(__cond)) \
27 bfa_panic(__LINE__, __FILE__, #__cond); \
28} while (0)
29
30#define bfa_sm_fault(__mod, __event) do { \
31 bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \
32} while (0)
33
34#ifndef BFA_PERF_BUILD
35#define bfa_assert_fp(__cond) bfa_assert(__cond)
36#else
37#define bfa_assert_fp(__cond)
38#endif
39
40struct bfa_log_mod_s;
41void bfa_panic(int line, char *file, char *panicstr);
42void bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event);
43
44#endif /* __BFA_DEBUG_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
new file mode 100644
index 000000000000..761cbe22130a
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_log.h
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.h BFA log library data structure and function definition
20 */
21
22#ifndef __BFA_LOG_H__
23#define __BFA_LOG_H__
24
25#include <bfa_os_inc.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_aen.h>
28
29/*
30 * BFA log module definition
31 *
32 * To create a new module id:
33 * Add a #define at the end of the list below. Select a value for your
34 * definition so that it is one (1) greater than the previous
35 * definition. Modify the definition of BFA_LOG_MODULE_ID_MAX to become
36 * your new definition.
37 * Should have no gaps in between the values because this is used in arrays.
38 * IMPORTANT: AEN_IDs must be at the begining, otherwise update bfa_defs_aen.h
39 */
40
41enum bfa_log_module_id {
42 BFA_LOG_UNUSED_ID = 0,
43
44 /* AEN defs begin */
45 BFA_LOG_AEN_MIN = BFA_LOG_UNUSED_ID,
46
47 BFA_LOG_AEN_ID_ADAPTER = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ADAPTER,/* 1 */
48 BFA_LOG_AEN_ID_PORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_PORT, /* 2 */
49 BFA_LOG_AEN_ID_LPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_LPORT, /* 3 */
50 BFA_LOG_AEN_ID_RPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_RPORT, /* 4 */
51 BFA_LOG_AEN_ID_ITNIM = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ITNIM, /* 5 */
52 BFA_LOG_AEN_ID_TIN = BFA_LOG_AEN_MIN + BFA_AEN_CAT_TIN, /* 6 */
53 BFA_LOG_AEN_ID_IPFC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IPFC, /* 7 */
54 BFA_LOG_AEN_ID_AUDIT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_AUDIT, /* 8 */
55 BFA_LOG_AEN_ID_IOC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IOC, /* 9 */
56 BFA_LOG_AEN_ID_ETHPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ETHPORT,/* 10 */
57
58 BFA_LOG_AEN_MAX = BFA_LOG_AEN_ID_ETHPORT,
59 /* AEN defs end */
60
61 BFA_LOG_MODULE_ID_MIN = BFA_LOG_AEN_MAX,
62
63 BFA_LOG_FW_ID = BFA_LOG_MODULE_ID_MIN + 1,
64 BFA_LOG_HAL_ID = BFA_LOG_MODULE_ID_MIN + 2,
65 BFA_LOG_FCS_ID = BFA_LOG_MODULE_ID_MIN + 3,
66 BFA_LOG_WDRV_ID = BFA_LOG_MODULE_ID_MIN + 4,
67 BFA_LOG_LINUX_ID = BFA_LOG_MODULE_ID_MIN + 5,
68 BFA_LOG_SOLARIS_ID = BFA_LOG_MODULE_ID_MIN + 6,
69
70 BFA_LOG_MODULE_ID_MAX = BFA_LOG_SOLARIS_ID,
71
72 /* Not part of any arrays */
73 BFA_LOG_MODULE_ID_ALL = BFA_LOG_MODULE_ID_MAX + 1,
74 BFA_LOG_AEN_ALL = BFA_LOG_MODULE_ID_MAX + 2,
75 BFA_LOG_DRV_ALL = BFA_LOG_MODULE_ID_MAX + 3,
76};
77
78/*
79 * BFA log catalog name
80 */
81#define BFA_LOG_CAT_NAME "BFA"
82
83/*
84 * bfa log severity values
85 */
86enum bfa_log_severity {
87 BFA_LOG_INVALID = 0,
88 BFA_LOG_CRITICAL = 1,
89 BFA_LOG_ERROR = 2,
90 BFA_LOG_WARNING = 3,
91 BFA_LOG_INFO = 4,
92 BFA_LOG_NONE = 5,
93 BFA_LOG_LEVEL_MAX = BFA_LOG_NONE
94};
95
96#define BFA_LOG_MODID_OFFSET 16
97
98
99struct bfa_log_msgdef_s {
100 u32 msg_id; /* message id */
101 int attributes; /* attributes */
102 int severity; /* severity level */
103 char *msg_value;
104 /* msg string */
105 char *message;
106 /* msg format string */
107 int arg_type; /* argument type */
108 int arg_num; /* number of argument */
109};
110
111/*
112 * supported argument type
113 */
114enum bfa_log_arg_type {
115 BFA_LOG_S = 0, /* string */
116 BFA_LOG_D, /* decimal */
117 BFA_LOG_I, /* integer */
118 BFA_LOG_O, /* oct number */
119 BFA_LOG_U, /* unsigned integer */
120 BFA_LOG_X, /* hex number */
121 BFA_LOG_F, /* floating */
122 BFA_LOG_C, /* character */
123 BFA_LOG_L, /* double */
124 BFA_LOG_P /* pointer */
125};
126
127#define BFA_LOG_ARG_TYPE 2
128#define BFA_LOG_ARG0 (0 * BFA_LOG_ARG_TYPE)
129#define BFA_LOG_ARG1 (1 * BFA_LOG_ARG_TYPE)
130#define BFA_LOG_ARG2 (2 * BFA_LOG_ARG_TYPE)
131#define BFA_LOG_ARG3 (3 * BFA_LOG_ARG_TYPE)
132
133#define BFA_LOG_GET_MOD_ID(msgid) ((msgid >> BFA_LOG_MODID_OFFSET) & 0xff)
134#define BFA_LOG_GET_MSG_IDX(msgid) (msgid & 0xffff)
135#define BFA_LOG_GET_MSG_ID(msgdef) ((msgdef)->msg_id)
136#define BFA_LOG_GET_MSG_FMT_STRING(msgdef) ((msgdef)->message)
137#define BFA_LOG_GET_SEVERITY(msgdef) ((msgdef)->severity)
138
139/*
140 * Event attributes
141 */
142#define BFA_LOG_ATTR_NONE 0
143#define BFA_LOG_ATTR_AUDIT 1
144#define BFA_LOG_ATTR_LOG 2
145#define BFA_LOG_ATTR_FFDC 4
146
147#define BFA_LOG_CREATE_ID(msw, lsw) \
148 (((u32)msw << BFA_LOG_MODID_OFFSET) | lsw)
149
150struct bfa_log_mod_s;
151
152/**
153 * callback function
154 */
155typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
156 const char *format, ...);
157
158
159struct bfa_log_mod_s {
160 char instance_info[16]; /* instance info */
161 int log_level[BFA_LOG_MODULE_ID_MAX + 1];
162 /* log level for modules */
163 bfa_log_cb_t cbfn; /* callback function */
164};
165
166extern int bfa_log_init(struct bfa_log_mod_s *log_mod,
167 char *instance_name, bfa_log_cb_t cbfn);
168extern int bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...);
169extern bfa_status_t bfa_log_set_level(struct bfa_log_mod_s *log_mod,
170 int mod_id, enum bfa_log_severity log_level);
171extern bfa_status_t bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
172 enum bfa_log_severity log_level);
173extern bfa_status_t bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
174 enum bfa_log_severity log_level);
175extern enum bfa_log_severity bfa_log_get_level(struct bfa_log_mod_s *log_mod,
176 int mod_id);
177extern enum bfa_log_severity bfa_log_get_msg_level(
178 struct bfa_log_mod_s *log_mod, u32 msg_id);
179/*
180 * array of messages generated from xml files
181 */
182extern struct bfa_log_msgdef_s bfa_log_msg_array[];
183
184#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_perf.h b/drivers/scsi/bfa/include/cs/bfa_perf.h
new file mode 100644
index 000000000000..45aa5f978ff5
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_perf.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFAD_PERF_H__
18#define __BFAD_PERF_H__
19
20#ifdef BFAD_PERF_BUILD
21
22#undef bfa_trc
23#undef bfa_trc32
24#undef bfa_assert
25#undef BFA_TRC_FILE
26
27#define bfa_trc(_trcp, _data)
28#define bfa_trc32(_trcp, _data)
29#define bfa_assert(__cond)
30#define BFA_TRC_FILE(__mod, __submod)
31
32#endif
33
34#endif /* __BFAD_PERF_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h
new file mode 100644
index 000000000000..670f86e5fc6e
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_plog.h
@@ -0,0 +1,162 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_PORTLOG_H__
18#define __BFA_PORTLOG_H__
19
20#include "protocol/fc.h"
21#include <defs/bfa_defs_types.h>
22
23#define BFA_PL_NLOG_ENTS 256
24#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
25
26#define BFA_PL_STRING_LOG_SZ 32 /* number of chars in string log */
27#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */
28
29enum bfa_plog_log_type {
30 BFA_PL_LOG_TYPE_INVALID = 0,
31 BFA_PL_LOG_TYPE_INT = 1,
32 BFA_PL_LOG_TYPE_STRING = 2,
33};
34
35/*
36 * the (fixed size) record format for each entry in the portlog
37 */
38struct bfa_plog_rec_s {
39 u32 tv; /* Filled by the portlog driver when the *
40 * entry is added to the circular log. */
41 u8 port; /* Source port that logged this entry. CM
42 * entities will use 0xFF */
43 u8 mid; /* Integer value to be used by all entities *
44 * while logging. The module id to string *
45 * conversion will be done by BFAL. See
46 * enum bfa_plog_mid */
47 u8 eid; /* indicates Rx, Tx, IOCTL, etc. See
48 * enum bfa_plog_eid */
49 u8 log_type; /* indicates string log or integer log.
50 * see bfa_plog_log_type_t */
51 u8 log_num_ints;
52 /*
53 * interpreted only if log_type is INT_LOG. indicates number of
54 * integers in the int_log[] (0-PL_INT_LOG_SZ).
55 */
56 u8 rsvd;
57 u16 misc; /* can be used to indicate fc frame length,
58 *etc.. */
59 union {
60 char string_log[BFA_PL_STRING_LOG_SZ];
61 u32 int_log[BFA_PL_INT_LOG_SZ];
62 } log_entry;
63
64};
65
66/*
67 * the following #defines will be used by the logging entities to indicate
68 * their module id. BFAL will convert the integer value to string format
69 *
70* process to be used while changing the following #defines:
71 * - Always add new entries at the end
72 * - define corresponding string in BFAL
73 * - Do not remove any entry or rearrange the order.
74 */
75enum bfa_plog_mid {
76 BFA_PL_MID_INVALID = 0,
77 BFA_PL_MID_DEBUG = 1,
78 BFA_PL_MID_DRVR = 2,
79 BFA_PL_MID_HAL = 3,
80 BFA_PL_MID_HAL_FCXP = 4,
81 BFA_PL_MID_HAL_UF = 5,
82 BFA_PL_MID_FCS = 6,
83 BFA_PL_MID_MAX = 7
84};
85
86#define BFA_PL_MID_STRLEN 8
87struct bfa_plog_mid_strings_s {
88 char m_str[BFA_PL_MID_STRLEN];
89};
90
91/*
92 * the following #defines will be used by the logging entities to indicate
93 * their event type. BFAL will convert the integer value to string format
94 *
95* process to be used while changing the following #defines:
96 * - Always add new entries at the end
97 * - define corresponding string in BFAL
98 * - Do not remove any entry or rearrange the order.
99 */
100enum bfa_plog_eid {
101 BFA_PL_EID_INVALID = 0,
102 BFA_PL_EID_IOC_DISABLE = 1,
103 BFA_PL_EID_IOC_ENABLE = 2,
104 BFA_PL_EID_PORT_DISABLE = 3,
105 BFA_PL_EID_PORT_ENABLE = 4,
106 BFA_PL_EID_PORT_ST_CHANGE = 5,
107 BFA_PL_EID_TX = 6,
108 BFA_PL_EID_TX_ACK1 = 7,
109 BFA_PL_EID_TX_RJT = 8,
110 BFA_PL_EID_TX_BSY = 9,
111 BFA_PL_EID_RX = 10,
112 BFA_PL_EID_RX_ACK1 = 11,
113 BFA_PL_EID_RX_RJT = 12,
114 BFA_PL_EID_RX_BSY = 13,
115 BFA_PL_EID_CT_IN = 14,
116 BFA_PL_EID_CT_OUT = 15,
117 BFA_PL_EID_DRIVER_START = 16,
118 BFA_PL_EID_RSCN = 17,
119 BFA_PL_EID_DEBUG = 18,
120 BFA_PL_EID_MISC = 19,
121 BFA_PL_EID_MAX = 20
122};
123
124#define BFA_PL_ENAME_STRLEN 8
125struct bfa_plog_eid_strings_s {
126 char e_str[BFA_PL_ENAME_STRLEN];
127};
128
129#define BFA_PL_SIG_LEN 8
130#define BFA_PL_SIG_STR "12pl123"
131
132/*
133 * per port circular log buffer
134 */
135struct bfa_plog_s {
136 char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
137 u8 plog_enabled;
138 u8 rsvd[7];
139 u32 ticks;
140 u16 head;
141 u16 tail;
142 struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS];
143};
144
145void bfa_plog_init(struct bfa_plog_s *plog);
146void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
147 enum bfa_plog_eid event, u16 misc, char *log_str);
148void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
149 enum bfa_plog_eid event, u16 misc,
150 u32 *intarr, u32 num_ints);
151void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
152 enum bfa_plog_eid event, u16 misc,
153 struct fchs_s *fchdr);
154void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
155 enum bfa_plog_eid event, u16 misc,
156 struct fchs_s *fchdr, u32 pld_w0);
157void bfa_plog_clear(struct bfa_plog_s *plog);
158void bfa_plog_enable(struct bfa_plog_s *plog);
159void bfa_plog_disable(struct bfa_plog_s *plog);
160bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog);
161
162#endif /* __BFA_PORTLOG_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_q.h b/drivers/scsi/bfa/include/cs/bfa_q.h
new file mode 100644
index 000000000000..ea895facedbc
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_q.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_q.h Circular queue definitions.
20 */
21
22#ifndef __BFA_Q_H__
23#define __BFA_Q_H__
24
25#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
26#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
27#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
28
29/*
30 * bfa_q_qe_init - to initialize a queue element
31 */
32#define bfa_q_qe_init(_qe) { \
33 bfa_q_next(_qe) = (struct list_head *) NULL; \
34 bfa_q_prev(_qe) = (struct list_head *) NULL; \
35}
36
37/*
38 * bfa_q_deq - dequeue an element from head of the queue
39 */
40#define bfa_q_deq(_q, _qe) { \
41 if (!list_empty(_q)) { \
42 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
43 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
44 (struct list_head *) (_q); \
45 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
46 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
47 } else { \
48 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
49 } \
50}
51
52/*
53 * bfa_q_deq_tail - dequeue an element from tail of the queue
54 */
55#define bfa_q_deq_tail(_q, _qe) { \
56 if (!list_empty(_q)) { \
57 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
58 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
59 (struct list_head *) (_q); \
60 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe); \
61 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
62 } else { \
63 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
64 } \
65}
66
67/*
68 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
69 * consistent across modules)
70 */
71#ifndef BFA_PERF_BUILD
72#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
73#else
74#define BFA_Q_DBG_INIT(_qe)
75#endif
76
77#define bfa_q_is_on_q(_q, _qe) \
78 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
79extern int bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe);
80
81#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
new file mode 100644
index 000000000000..9877066680a6
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.h State machine defines
20 */
21
22#ifndef __BFA_SM_H__
23#define __BFA_SM_H__
24
25typedef void (*bfa_sm_t)(void *sm, int event);
26
27#define bfa_sm_set_state(_sm, _state) (_sm)->sm = (bfa_sm_t)(_state)
28#define bfa_sm_send_event(_sm, _event) (_sm)->sm((_sm), (_event))
29#define bfa_sm_get_state(_sm) ((_sm)->sm)
30#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
31
32/**
33 * For converting from state machine function to state encoding.
34 */
35struct bfa_sm_table_s {
36 bfa_sm_t sm; /* state machine function */
37 int state; /* state machine encoding */
38 char *name; /* state name for display */
39};
40#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
41
42int bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm);
43
44/**
45 * State machine with entry actions.
46 */
47typedef void (*bfa_fsm_t)(void *fsm, int event);
48
49/**
50 * oc - object class eg. bfa_ioc
51 * st - state, eg. reset
52 * otype - object type, eg. struct bfa_ioc_s
53 * etype - object type, eg. enum ioc_event
54 */
55#define bfa_fsm_state_decl(oc, st, otype, etype) \
56 static void oc ## _sm_ ## st(otype * fsm, etype event); \
57 static void oc ## _sm_ ## st ## _entry(otype * fsm)
58
59#define bfa_fsm_set_state(_fsm, _state) do { \
60 (_fsm)->fsm = (bfa_fsm_t)(_state); \
61 _state ## _entry(_fsm); \
62} while (0)
63
64#define bfa_fsm_send_event(_fsm, _event) \
65 (_fsm)->fsm((_fsm), (_event))
66#define bfa_fsm_cmp_state(_fsm, _state) \
67 ((_fsm)->fsm == (bfa_fsm_t)(_state))
68
69#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h
new file mode 100644
index 000000000000..3e743928c74c
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_trc.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TRC_H__
18#define __BFA_TRC_H__
19
20#include <bfa_os_inc.h>
21
22#ifndef BFA_TRC_MAX
23#define BFA_TRC_MAX (4 * 1024)
24#endif
25
26#ifndef BFA_TRC_TS
27#define BFA_TRC_TS(_trcm) ((_trcm)->ticks ++)
28#endif
29
30struct bfa_trc_s {
31#ifdef __BIGENDIAN
32 u16 fileno;
33 u16 line;
34#else
35 u16 line;
36 u16 fileno;
37#endif
38 u32 timestamp;
39 union {
40 struct {
41 u32 rsvd;
42 u32 u32;
43 } u32;
44 u64 u64;
45 } data;
46};
47
48
49struct bfa_trc_mod_s {
50 u32 head;
51 u32 tail;
52 u32 ntrc;
53 u32 stopped;
54 u32 ticks;
55 u32 rsvd[3];
56 struct bfa_trc_s trc[BFA_TRC_MAX];
57};
58
59
60enum {
61 BFA_TRC_FW = 1, /* firmware modules */
62 BFA_TRC_HAL = 2, /* BFA modules */
63 BFA_TRC_FCS = 3, /* BFA FCS modules */
64 BFA_TRC_LDRV = 4, /* Linux driver modules */
65 BFA_TRC_SDRV = 5, /* Solaris driver modules */
66 BFA_TRC_VDRV = 6, /* vmware driver modules */
67 BFA_TRC_WDRV = 7, /* windows driver modules */
68 BFA_TRC_AEN = 8, /* AEN module */
69 BFA_TRC_BIOS = 9, /* bios driver modules */
70 BFA_TRC_EFI = 10, /* EFI driver modules */
71 BNA_TRC_WDRV = 11, /* BNA windows driver modules */
72 BNA_TRC_VDRV = 12, /* BNA vmware driver modules */
73 BNA_TRC_SDRV = 13, /* BNA Solaris driver modules */
74 BNA_TRC_LDRV = 14, /* BNA Linux driver modules */
75 BNA_TRC_HAL = 15, /* BNA modules */
76 BFA_TRC_CNA = 16, /* Common modules */
77 BNA_TRC_IMDRV = 17 /* BNA windows intermediate driver modules */
78};
79#define BFA_TRC_MOD_SH 10
80#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
81
82/**
83 * Define a new tracing file (module). Module should match one defined above.
84 */
85#define BFA_TRC_FILE(__mod, __submod) \
86 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
87 BFA_TRC_MOD(__mod))
88
89
90#define bfa_trc32(_trcp, _data) \
91 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
92
93
94#ifndef BFA_BOOT_BUILD
95#define bfa_trc(_trcp, _data) \
96 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
97#else
98void bfa_boot_trc(struct bfa_trc_mod_s *trcmod, u16 fileno,
99 u16 line, u32 data);
100#define bfa_trc(_trcp, _data) \
101 bfa_boot_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
102#endif
103
104
105static inline void
106bfa_trc_init(struct bfa_trc_mod_s *trcm)
107{
108 trcm->head = trcm->tail = trcm->stopped = 0;
109 trcm->ntrc = BFA_TRC_MAX;
110}
111
112
113static inline void
114bfa_trc_stop(struct bfa_trc_mod_s *trcm)
115{
116 trcm->stopped = 1;
117}
118
119#ifdef FWTRC
120extern void dc_flush(void *data);
121#else
122#define dc_flush(data)
123#endif
124
125
126static inline void
127__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
128{
129 int tail = trcm->tail;
130 struct bfa_trc_s *trc = &trcm->trc[tail];
131
132 if (trcm->stopped)
133 return;
134
135 trc->fileno = (u16) fileno;
136 trc->line = (u16) line;
137 trc->data.u64 = data;
138 trc->timestamp = BFA_TRC_TS(trcm);
139 dc_flush(trc);
140
141 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
142 if (trcm->tail == trcm->head)
143 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
144 dc_flush(trcm);
145}
146
147
148static inline void
149__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
150{
151 int tail = trcm->tail;
152 struct bfa_trc_s *trc = &trcm->trc[tail];
153
154 if (trcm->stopped)
155 return;
156
157 trc->fileno = (u16) fileno;
158 trc->line = (u16) line;
159 trc->data.u32.u32 = data;
160 trc->timestamp = BFA_TRC_TS(trcm);
161 dc_flush(trc);
162
163 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
164 if (trcm->tail == trcm->head)
165 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
166 dc_flush(trcm);
167}
168
169#ifndef BFA_PERF_BUILD
170#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
171#else
172#define bfa_trc_fp(_trcp, _data)
173#endif
174
175#endif /* __BFA_TRC_H__ */
176
diff --git a/drivers/scsi/bfa/include/cs/bfa_wc.h b/drivers/scsi/bfa/include/cs/bfa_wc.h
new file mode 100644
index 000000000000..0460bd4fc7c4
--- /dev/null
+++ b/drivers/scsi/bfa/include/cs/bfa_wc.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_wc.h Generic wait counter.
20 */
21
22#ifndef __BFA_WC_H__
23#define __BFA_WC_H__
24
25typedef void (*bfa_wc_resume_t) (void *cbarg);
26
27struct bfa_wc_s {
28 bfa_wc_resume_t wc_resume;
29 void *wc_cbarg;
30 int wc_count;
31};
32
33static inline void
34bfa_wc_up(struct bfa_wc_s *wc)
35{
36 wc->wc_count++;
37}
38
39static inline void
40bfa_wc_down(struct bfa_wc_s *wc)
41{
42 wc->wc_count--;
43 if (wc->wc_count == 0)
44 wc->wc_resume(wc->wc_cbarg);
45}
46
47/**
48 * Initialize a waiting counter.
49 */
50static inline void
51bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
52{
53 wc->wc_resume = wc_resume;
54 wc->wc_cbarg = wc_cbarg;
55 wc->wc_count = 0;
56 bfa_wc_up(wc);
57}
58
59/**
60 * Wait for counter to reach zero
61 */
62static inline void
63bfa_wc_wait(struct bfa_wc_s *wc)
64{
65 bfa_wc_down(wc);
66}
67
68#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
new file mode 100644
index 000000000000..8c208fc8e329
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ADAPTER_H__
18#define __BFA_DEFS_ADAPTER_H__
19
20#include <protocol/types.h>
21#include <defs/bfa_defs_version.h>
22#include <defs/bfa_defs_mfg.h>
23
24/**
25 * BFA adapter level attributes.
26 */
27enum {
28 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
29 /*
30 *!< adapter serial num length
31 */
32 BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
33 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
34 BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
35 BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
36 BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
37};
38
39struct bfa_adapter_attr_s {
40 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
41 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
42 u32 rsvd1;
43 char model[BFA_ADAPTER_MODEL_NAME_LEN];
44 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
45 wwn_t pwwn;
46 char node_symname[FC_SYMNAME_MAX];
47 char hw_ver[BFA_VERSION_LEN];
48 char fw_ver[BFA_VERSION_LEN];
49 char optrom_ver[BFA_VERSION_LEN];
50 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
51 struct bfa_mfg_vpd_s vpd;
52 struct mac_s mac;
53
54 u8 nports;
55 u8 max_speed;
56 u8 prototype;
57 char asic_rev;
58
59 u8 pcie_gen;
60 u8 pcie_lanes_orig;
61 u8 pcie_lanes;
62 u8 cna_capable;
63};
64
65/**
66 * BFA adapter level events
67 * Arguments below are in BFAL context from Mgmt
68 * BFA_PORT_AEN_ADD: [in]: None [out]: serial_num, pwwn, nports
69 * BFA_PORT_AEN_REMOVE: [in]: pwwn [out]: serial_num, pwwn, nports
70 */
71enum bfa_adapter_aen_event {
72 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
73 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
74};
75
76struct bfa_adapter_aen_data_s {
77 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
78 u32 nports; /* Number of NPorts */
79 wwn_t pwwn; /* WWN of one of its physical port */
80};
81
82#endif /* __BFA_DEFS_ADAPTER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
new file mode 100644
index 000000000000..4c81a613db3d
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AEN_H__
19#define __BFA_DEFS_AEN_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_ioc.h>
23#include <defs/bfa_defs_adapter.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_lport.h>
26#include <defs/bfa_defs_rport.h>
27#include <defs/bfa_defs_itnim.h>
28#include <defs/bfa_defs_tin.h>
29#include <defs/bfa_defs_ipfc.h>
30#include <defs/bfa_defs_audit.h>
31#include <defs/bfa_defs_ethport.h>
32
33enum bfa_aen_category {
34 BFA_AEN_CAT_ADAPTER = 1,
35 BFA_AEN_CAT_PORT = 2,
36 BFA_AEN_CAT_LPORT = 3,
37 BFA_AEN_CAT_RPORT = 4,
38 BFA_AEN_CAT_ITNIM = 5,
39 BFA_AEN_CAT_TIN = 6,
40 BFA_AEN_CAT_IPFC = 7,
41 BFA_AEN_CAT_AUDIT = 8,
42 BFA_AEN_CAT_IOC = 9,
43 BFA_AEN_CAT_ETHPORT = 10,
44 BFA_AEN_MAX_CAT = 10
45};
46
47#pragma pack(1)
48union bfa_aen_data_u {
49 struct bfa_adapter_aen_data_s adapter;
50 struct bfa_port_aen_data_s port;
51 struct bfa_lport_aen_data_s lport;
52 struct bfa_rport_aen_data_s rport;
53 struct bfa_itnim_aen_data_s itnim;
54 struct bfa_audit_aen_data_s audit;
55 struct bfa_ioc_aen_data_s ioc;
56 struct bfa_ethport_aen_data_s ethport;
57};
58
59struct bfa_aen_entry_s {
60 enum bfa_aen_category aen_category;
61 int aen_type;
62 union bfa_aen_data_u aen_data;
63 struct bfa_timeval_s aen_tv;
64 s32 seq_num;
65 s32 bfad_num;
66 s32 rsvd[1];
67};
68
69#pragma pack()
70
71#define bfa_aen_event_t int
72
73#endif /* __BFA_DEFS_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h b/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
new file mode 100644
index 000000000000..8e3a962bf20c
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AUDIT_H__
19#define __BFA_DEFS_AUDIT_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA audit events
25 */
26enum bfa_audit_aen_event {
27 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
28 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
29};
30
31/**
32 * audit event data
33 */
34struct bfa_audit_aen_data_s {
35 wwn_t pwwn;
36};
37
38#endif /* __BFA_DEFS_AUDIT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
new file mode 100644
index 000000000000..dd19c83aba58
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_AUTH_H__
18#define __BFA_DEFS_AUTH_H__
19
20#include <defs/bfa_defs_types.h>
21
22#define PUBLIC_KEY 15409
23#define PRIVATE_KEY 19009
24#define KEY_LEN 32399
25#define BFA_AUTH_SECRET_STRING_LEN 256
26#define BFA_AUTH_FAIL_TIMEOUT 0xFF
27
28/**
29 * Authentication status
30 */
31enum bfa_auth_status {
32 BFA_AUTH_STATUS_NONE = 0, /* no authentication */
33 BFA_AUTH_UNINIT = 1, /* state - uninit */
34 BFA_AUTH_NEG_SEND = 2, /* state - negotiate send */
35 BFA_AUTH_CHAL_WAIT = 3, /* state - challenge wait */
36 BFA_AUTH_NEG_RETRY = 4, /* state - negotiate retry */
37 BFA_AUTH_REPLY_SEND = 5, /* state - reply send */
38 BFA_AUTH_STATUS_WAIT = 6, /* state - status wait */
39 BFA_AUTH_SUCCESS = 7, /* state - success */
40 BFA_AUTH_FAILED = 8, /* state - failed */
41 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
42};
43
44struct auth_proto_stats_s {
45 u32 auth_rjts;
46 u32 auth_negs;
47 u32 auth_dones;
48
49 u32 dhchap_challenges;
50 u32 dhchap_replies;
51 u32 dhchap_successes;
52};
53
54/**
55 * Authentication related statistics
56 */
57struct bfa_auth_stats_s {
58 u32 auth_failures; /* authentication failures */
59 u32 auth_successes; /* authentication successes*/
60 struct auth_proto_stats_s auth_rx_stats; /* Rx protocol stats */
61 struct auth_proto_stats_s auth_tx_stats; /* Tx protocol stats */
62};
63
64/**
65 * Authentication hash function algorithms
66 */
67enum bfa_auth_algo {
68 BFA_AUTH_ALGO_MD5 = 1, /* Message-Digest algorithm 5 */
69 BFA_AUTH_ALGO_SHA1 = 2, /* Secure Hash Algorithm 1 */
70 BFA_AUTH_ALGO_MS = 3, /* MD5, then SHA-1 */
71 BFA_AUTH_ALGO_SM = 4, /* SHA-1, then MD5 */
72};
73
74/**
75 * DH Groups
76 *
77 * Current value could be combination of one or more of the following values
78 */
79enum bfa_auth_group {
80 BFA_AUTH_GROUP_DHNULL = 0, /* DH NULL (value == 0) */
81 BFA_AUTH_GROUP_DH768 = 1, /* DH group 768 (value == 1) */
82 BFA_AUTH_GROUP_DH1024 = 2, /* DH group 1024 (value == 2) */
83 BFA_AUTH_GROUP_DH1280 = 4, /* DH group 1280 (value == 3) */
84 BFA_AUTH_GROUP_DH1536 = 8, /* DH group 1536 (value == 4) */
85
86 BFA_AUTH_GROUP_ALL = 256 /* Use default DH group order
87 * 0, 1, 2, 3, 4 */
88};
89
90/**
91 * Authentication secret sources
92 */
93enum bfa_auth_secretsource {
94 BFA_AUTH_SECSRC_LOCAL = 1, /* locally configured */
95 BFA_AUTH_SECSRC_RADIUS = 2, /* use radius server */
96 BFA_AUTH_SECSRC_TACACS = 3, /* TACACS server */
97};
98
99/**
100 * Authentication attributes
101 */
102struct bfa_auth_attr_s {
103 enum bfa_auth_status status;
104 enum bfa_auth_algo algo;
105 enum bfa_auth_group dh_grp;
106 u16 rjt_code;
107 u16 rjt_code_exp;
108 u8 secret_set;
109 u8 resv[7];
110};
111
112#endif /* __BFA_DEFS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
new file mode 100644
index 000000000000..6f4aa5283545
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_BOOT_H__
19#define __BFA_DEFS_BOOT_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_pport.h>
24
25enum {
26 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
27};
28
29#define BOOT_CFG_REV1 1
30
31/**
32 * Boot options setting. Boot options setting determines from where
33 * to get the boot lun information
34 */
35enum bfa_boot_bootopt {
36 BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
37 BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
38 BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
39};
40
41/**
42 * Boot lun information.
43 */
44struct bfa_boot_bootlun_s {
45 wwn_t pwwn; /* port wwn of target */
46 lun_t lun; /* 64-bit lun */
47};
48
49/**
50 * BOOT boot configuraton
51 */
52struct bfa_boot_cfg_s {
53 u8 version;
54 u8 rsvd1;
55 u16 chksum;
56
57 u8 enable; /* enable/disable SAN boot */
58 u8 speed; /* boot speed settings */
59 u8 topology; /* boot topology setting */
60 u8 bootopt; /* bfa_boot_bootopt_t */
61
62 u32 nbluns; /* number of boot luns */
63
64 u32 rsvd2;
65
66 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
67 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
68};
69
70
71#endif /* __BFA_DEFS_BOOT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
new file mode 100644
index 000000000000..520a22f52dd1
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -0,0 +1,159 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_cee.h Interface declarations between host based
7 * BFAL and DCBX/LLDP module in Firmware
8 *
9 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License (GPL) Version 2 as
13 * published by the Free Software Foundation
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 */
20#ifndef __BFA_DEFS_CEE_H__
21#define __BFA_DEFS_CEE_H__
22
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_pport.h>
25#include <protocol/types.h>
26
27#pragma pack(1)
28
29#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
30
31
32/* FIXME: this is coming from the protocol spec. Can the host & apps share the
33 protocol .h files ?
34 */
35#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
36#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
37#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
38#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
39#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
40#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
41#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
42#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
43#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
44#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
45#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
46
47
48/* LLDP string type */
49struct bfa_cee_lldp_str_s {
50 u8 sub_type;
51 u8 len;
52 u8 rsvd[2];
53 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
54};
55
56
57/* LLDP paramters */
58struct bfa_cee_lldp_cfg_s {
59 struct bfa_cee_lldp_str_s chassis_id;
60 struct bfa_cee_lldp_str_s port_id;
61 struct bfa_cee_lldp_str_s port_desc;
62 struct bfa_cee_lldp_str_s sys_name;
63 struct bfa_cee_lldp_str_s sys_desc;
64 struct bfa_cee_lldp_str_s mgmt_addr;
65 u16 time_to_interval;
66 u16 enabled_system_cap;
67};
68
69enum bfa_cee_dcbx_version_e {
70 DCBX_PROTOCOL_PRECEE = 1,
71 DCBX_PROTOCOL_CEE = 2,
72};
73
74enum bfa_cee_lls_e {
75 CEE_LLS_DOWN_NO_TLV = 0, /* LLS is down because the TLV not sent by
76 * the peer */
77 CEE_LLS_DOWN = 1, /* LLS is down as advertised by the peer */
78 CEE_LLS_UP = 2,
79};
80
81/* CEE/DCBX parameters */
82struct bfa_cee_dcbx_cfg_s {
83 u8 pgid[8];
84 u8 pg_percentage[8];
85 u8 pfc_enabled; /* bitmap of priorties with PFC enabled */
86 u8 fcoe_user_priority; /* bitmap of priorities used for FcoE
87 * traffic */
88 u8 dcbx_version; /* operating version:CEE or preCEE */
89 u8 lls_fcoe; /* FCoE Logical Link Status */
90 u8 lls_lan; /* LAN Logical Link Status */
91 u8 rsvd[3];
92};
93
94/* CEE status */
95/* Making this to tri-state for the benefit of port list command */
96enum bfa_cee_status_e {
97 CEE_PHY_DOWN = 0,
98 CEE_PHY_UP = 1,
99 CEE_UP = 2,
100};
101
102/* CEE Query */
103struct bfa_cee_attr_s {
104 u8 cee_status;
105 u8 error_reason;
106 struct bfa_cee_lldp_cfg_s lldp_remote;
107 struct bfa_cee_dcbx_cfg_s dcbx_remote;
108 mac_t src_mac;
109 u8 link_speed;
110 u8 filler[3];
111};
112
113
114
115
116/* LLDP/DCBX/CEE Statistics */
117
118struct bfa_cee_lldp_stats_s {
119 u32 frames_transmitted;
120 u32 frames_aged_out;
121 u32 frames_discarded;
122 u32 frames_in_error;
123 u32 frames_rcvd;
124 u32 tlvs_discarded;
125 u32 tlvs_unrecognized;
126};
127
128struct bfa_cee_dcbx_stats_s {
129 u32 subtlvs_unrecognized;
130 u32 negotiation_failed;
131 u32 remote_cfg_changed;
132 u32 tlvs_received;
133 u32 tlvs_invalid;
134 u32 seqno;
135 u32 ackno;
136 u32 recvd_seqno;
137 u32 recvd_ackno;
138};
139
140struct bfa_cee_cfg_stats_s {
141 u32 cee_status_down;
142 u32 cee_status_up;
143 u32 cee_hw_cfg_changed;
144 u32 recvd_invalid_cfg;
145};
146
147
148struct bfa_cee_stats_s {
149 struct bfa_cee_lldp_stats_s lldp_stats;
150 struct bfa_cee_dcbx_stats_s dcbx_stats;
151 struct bfa_cee_cfg_stats_s cfg_stats;
152};
153
154#pragma pack()
155
156
157#endif /* __BFA_DEFS_CEE_H__ */
158
159
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
new file mode 100644
index 000000000000..57049805762b
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_DRIVER_H__
19#define __BFA_DEFS_DRIVER_H__
20
21/**
22 * Driver statistics
23 */
24 u16 tm_io_abort;
25 u16 tm_io_abort_comp;
26 u16 tm_lun_reset;
27 u16 tm_lun_reset_comp;
28 u16 tm_target_reset;
29 u16 tm_bus_reset;
30 u16 ioc_restart; /* IOC restart count */
31 u16 io_pending; /* outstanding io count per-IOC */
32 u64 control_req;
33 u64 input_req;
34 u64 output_req;
35 u64 input_words;
36 u64 output_words;
37} bfa_driver_stats_t;
38
39
40#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
new file mode 100644
index 000000000000..79f9b3e146f7
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_ETHPORT_H__
19#define __BFA_DEFS_ETHPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <protocol/types.h>
23#include <cna/pstats/phyport_defs.h>
24#include <cna/pstats/ethport_defs.h>
25
26struct bna_tx_info_s {
27 u32 miniport_state;
28 u32 adapter_state;
29 u64 tx_count;
30 u64 tx_wi;
31 u64 tx_sg;
32 u64 tx_tcp_chksum;
33 u64 tx_udp_chksum;
34 u64 tx_ip_chksum;
35 u64 tx_lsov1;
36 u64 tx_lsov2;
37 u64 tx_max_sg_len ;
38};
39
40struct bna_rx_queue_info_s {
41 u16 q_id ;
42 u16 buf_size ;
43 u16 buf_count ;
44 u16 rsvd ;
45 u64 rx_count ;
46 u64 rx_dropped ;
47 u64 rx_unsupported ;
48 u64 rx_internal_err ;
49 u64 rss_count ;
50 u64 vlan_count ;
51 u64 rx_tcp_chksum ;
52 u64 rx_udp_chksum ;
53 u64 rx_ip_chksum ;
54 u64 rx_hds ;
55};
56
57struct bna_rx_q_set_s {
58 u16 q_set_type;
59 u32 miniport_state;
60 u32 adapter_state;
61 struct bna_rx_queue_info_s rx_queue[2];
62};
63
64struct bna_port_stats_s {
65 struct bna_tx_info_s tx_stats;
66 u16 qset_count ;
67 struct bna_rx_q_set_s rx_qset[8];
68};
69
70struct bfa_ethport_stats_s {
71 struct bna_stats_txf txf_stats[1];
72 struct bna_stats_rxf rxf_stats[1];
73 struct bnad_drv_stats drv_stats;
74};
75
76/**
77 * Ethernet port events
78 * Arguments below are in BFAL context from Mgmt
79 * BFA_PORT_AEN_ETH_LINKUP: [in]: mac [out]: mac
80 * BFA_PORT_AEN_ETH_LINKDOWN: [in]: mac [out]: mac
81 * BFA_PORT_AEN_ETH_ENABLE: [in]: mac [out]: mac
82 * BFA_PORT_AEN_ETH_DISABLE: [in]: mac [out]: mac
83 *
84 */
85enum bfa_ethport_aen_event {
86 BFA_ETHPORT_AEN_LINKUP = 1, /* Base Port Ethernet link up event */
87 BFA_ETHPORT_AEN_LINKDOWN = 2, /* Base Port Ethernet link down event */
88 BFA_ETHPORT_AEN_ENABLE = 3, /* Base Port Ethernet link enable event */
89 BFA_ETHPORT_AEN_DISABLE = 4, /* Base Port Ethernet link disable
90 * event */
91};
92
93struct bfa_ethport_aen_data_s {
94 mac_t mac; /* MAC address of the physical port */
95};
96
97
98#endif /* __BFA_DEFS_ETHPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
new file mode 100644
index 000000000000..c08f4f5026ac
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_FCPIM_H__
18#define __BFA_DEFS_FCPIM_H__
19
20struct bfa_fcpim_stats_s {
21 u32 total_ios; /* Total IO count */
22 u32 qresumes; /* IO waiting for CQ space */
23 u32 no_iotags; /* NO IO contexts */
24 u32 io_aborts; /* IO abort requests */
25 u32 no_tskims; /* NO task management contexts */
26 u32 iocomp_ok; /* IO completions with OK status */
27 u32 iocomp_underrun; /* IO underrun (good) */
28 u32 iocomp_overrun; /* IO overrun (good) */
29 u32 iocomp_aborted; /* Aborted IO requests */
30 u32 iocomp_timedout; /* IO timeouts */
31 u32 iocom_nexus_abort; /* IO selection timeouts */
32 u32 iocom_proto_err; /* IO protocol errors */
33 u32 iocom_dif_err; /* IO SBC-3 protection errors */
34 u32 iocom_tm_abort; /* IO aborted by TM requests */
35 u32 iocom_sqer_needed; /* IO retry for SQ error
36 *recovery */
37 u32 iocom_res_free; /* Delayed freeing of IO resources */
38 u32 iocomp_scsierr; /* IO with non-good SCSI status */
39 u32 iocom_hostabrts; /* Host IO abort requests */
40 u32 iocom_utags; /* IO comp with unknown tags */
41 u32 io_cleanups; /* IO implicitly aborted */
42 u32 io_tmaborts; /* IO aborted due to TM commands */
43 u32 rsvd;
44};
45#endif /*__BFA_DEFS_FCPIM_H__*/
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
new file mode 100644
index 000000000000..9ccf53bef65a
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_COMMON_H__
19#define __BFA_DEFS_IM_COMMON_H__
20
21#define BFA_ADAPTER_NAME_LEN 256
22#define BFA_ADAPTER_GUID_LEN 256
23#define RESERVED_VLAN_NAME L"PORT VLAN"
24#define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN"
25
26 u64 tx_pkt_cnt;
27 u64 rx_pkt_cnt;
28 u32 duration;
29 u8 status;
30} bfa_im_stats_t, *pbfa_im_stats_t;
31
32#endif /* __BFA_DEFS_IM_COMMON_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
new file mode 100644
index 000000000000..a486a7eb81d6
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_TEAM_H__
19#define __BFA_DEFS_IM_TEAM_H__
20
21#include <protocol/types.h>
22
23#define BFA_TEAM_MAX_PORTS 8
24#define BFA_TEAM_NAME_LEN 256
25#define BFA_MAX_NUM_TEAMS 16
26#define BFA_TEAM_INVALID_DELAY -1
27
28 BFA_LACP_RATE_SLOW = 1,
29 BFA_LACP_RATE_FAST
30} bfa_im_lacp_rate_t;
31
32 BFA_TEAM_MODE_FAIL_OVER = 1,
33 BFA_TEAM_MODE_FAIL_BACK,
34 BFA_TEAM_MODE_LACP,
35 BFA_TEAM_MODE_NONE
36} bfa_im_team_mode_t;
37
38 BFA_XMIT_POLICY_L2 = 1,
39 BFA_XMIT_POLICY_L3_L4
40} bfa_im_xmit_policy_t;
41
42 bfa_im_team_mode_t team_mode;
43 bfa_im_lacp_rate_t lacp_rate;
44 bfa_im_xmit_policy_t xmit_policy;
45 int delay;
46 wchar_t primary[BFA_ADAPTER_NAME_LEN];
47 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
48 mac_t mac;
49 u16 num_ports;
50 u16 num_vlans;
51 u16 vlan_list[BFA_MAX_VLANS_PER_PORT];
52 wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN];
53 wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN];
54} bfa_im_team_attr_t;
55
56 wchar_t team_name[BFA_TEAM_NAME_LEN];
57 bfa_im_xmit_policy_t xmit_policy;
58 int delay;
59 wchar_t primary[BFA_ADAPTER_NAME_LEN];
60 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
61} bfa_im_team_edit_t, *pbfa_im_team_edit_t;
62
63 wchar_t team_name[BFA_TEAM_NAME_LEN];
64 bfa_im_team_mode_t team_mode;
65 mac_t mac;
66} bfa_im_team_info_t;
67
68 bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS];
69 u16 num_teams;
70} bfa_im_team_list_t, *pbfa_im_team_list_t;
71
72#endif /* __BFA_DEFS_IM_TEAM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
new file mode 100644
index 000000000000..b1d532da3a9d
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -0,0 +1,152 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOC_H__
19#define __BFA_DEFS_IOC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27enum {
28 BFA_IOC_DRIVER_LEN = 16,
29 BFA_IOC_CHIP_REV_LEN = 8,
30};
31
32/**
33 * Driver and firmware versions.
34 */
35struct bfa_ioc_driver_attr_s {
36 char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
37 char driver_ver[BFA_VERSION_LEN]; /* driver version */
38 char fw_ver[BFA_VERSION_LEN]; /* firmware version*/
39 char bios_ver[BFA_VERSION_LEN]; /* bios version */
40 char efi_ver[BFA_VERSION_LEN]; /* EFI version */
41 char ob_ver[BFA_VERSION_LEN]; /* openboot version*/
42};
43
44/**
45 * IOC PCI device attributes
46 */
47struct bfa_ioc_pci_attr_s {
48 u16 vendor_id; /* PCI vendor ID */
49 u16 device_id; /* PCI device ID */
50 u16 ssid; /* subsystem ID */
51 u16 ssvid; /* subsystem vendor ID */
52 u32 pcifn; /* PCI device function */
53 u32 rsvd; /* padding */
54 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
55};
56
57/**
58 * IOC states
59 */
60enum bfa_ioc_state {
61 BFA_IOC_RESET = 1, /* IOC is in reset state */
62 BFA_IOC_SEMWAIT = 2, /* Waiting for IOC hardware semaphore */
63 BFA_IOC_HWINIT = 3, /* IOC hardware is being initialized */
64 BFA_IOC_GETATTR = 4, /* IOC is being configured */
65 BFA_IOC_OPERATIONAL = 5, /* IOC is operational */
66 BFA_IOC_INITFAIL = 6, /* IOC hardware failure */
67 BFA_IOC_HBFAIL = 7, /* IOC heart-beat failure */
68 BFA_IOC_DISABLING = 8, /* IOC is being disabled */
69 BFA_IOC_DISABLED = 9, /* IOC is disabled */
70 BFA_IOC_FWMISMATCH = 10, /* IOC firmware different from drivers */
71};
72
73/**
74 * IOC firmware stats
75 */
76struct bfa_fw_ioc_stats_s {
77 u32 hb_count;
78 u32 cfg_reqs;
79 u32 enable_reqs;
80 u32 disable_reqs;
81 u32 stats_reqs;
82 u32 clrstats_reqs;
83 u32 unknown_reqs;
84 u32 ic_reqs; /* interrupt coalesce reqs */
85};
86
87/**
88 * IOC driver stats
89 */
90struct bfa_ioc_drv_stats_s {
91 u32 ioc_isrs;
92 u32 ioc_enables;
93 u32 ioc_disables;
94 u32 ioc_hbfails;
95 u32 ioc_boots;
96 u32 stats_tmos;
97 u32 hb_count;
98 u32 disable_reqs;
99 u32 enable_reqs;
100 u32 disable_replies;
101 u32 enable_replies;
102};
103
104/**
105 * IOC statistics
106 */
107struct bfa_ioc_stats_s {
108 struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
109 struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
110};
111
112
113enum bfa_ioc_type_e {
114 BFA_IOC_TYPE_FC = 1,
115 BFA_IOC_TYPE_FCoE = 2,
116 BFA_IOC_TYPE_LL = 3,
117};
118
119/**
120 * IOC attributes returned in queries
121 */
122struct bfa_ioc_attr_s {
123 enum bfa_ioc_type_e ioc_type;
124 enum bfa_ioc_state state; /* IOC state */
125 struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */
129};
130
131/**
132 * BFA IOC level events
133 */
134enum bfa_ioc_aen_event {
135 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
136 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
137 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
138 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
139 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
140};
141
142/**
143 * BFA IOC level event data, now just a place holder
144 */
145struct bfa_ioc_aen_data_s {
146 enum bfa_ioc_type_e ioc_type;
147 wwn_t pwwn;
148 mac_t mac;
149};
150
151#endif /* __BFA_DEFS_IOC_H__ */
152
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
new file mode 100644
index 000000000000..d76bcbd9820f
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -0,0 +1,310 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOCFC_H__
19#define __BFA_DEFS_IOCFC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27#define BFA_IOCFC_INTR_DELAY 1125
28#define BFA_IOCFC_INTR_LATENCY 225
29
30/**
31 * Interrupt coalescing configuration.
32 */
33struct bfa_iocfc_intr_attr_s {
34 bfa_boolean_t coalesce; /* enable/disable coalescing */
35 u16 latency; /* latency in microseconds */
36 u16 delay; /* delay in microseconds */
37};
38
39/**
40 * IOC firmware configuraton
41 */
42struct bfa_iocfc_fwcfg_s {
43 u16 num_fabrics; /* number of fabrics */
44 u16 num_lports; /* number of local lports */
45 u16 num_rports; /* number of remote ports */
46 u16 num_ioim_reqs; /* number of IO reqs */
47 u16 num_tskim_reqs; /* task management requests */
48 u16 num_iotm_reqs; /* number of TM IO reqs */
49 u16 num_tsktm_reqs; /* TM task management requests*/
50 u16 num_fcxp_reqs; /* unassisted FC exchanges */
51 u16 num_uf_bufs; /* unsolicited recv buffers */
52 u8 num_cqs;
53 u8 rsvd;
54};
55
56struct bfa_iocfc_drvcfg_s {
57 u16 num_reqq_elems; /* number of req queue elements */
58 u16 num_rspq_elems; /* number of rsp queue elements */
59 u16 num_sgpgs; /* number of total SG pages */
60 u16 num_sboot_tgts; /* number of SAN boot targets */
61 u16 num_sboot_luns; /* number of SAN boot luns */
62 u16 ioc_recover; /* IOC recovery mode */
63 u16 min_cfg; /* minimum configuration */
64 u16 path_tov; /* device path timeout */
65 bfa_boolean_t delay_comp; /* delay completion of
66 failed inflight IOs */
67 u32 rsvd;
68};
69/**
70 * IOC configuration
71 */
72struct bfa_iocfc_cfg_s {
73 struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
74 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
75};
76
77/**
78 * IOC firmware IO stats
79 */
80struct bfa_fw_io_stats_s {
81 u32 host_abort; /* IO aborted by host driver*/
82 u32 host_cleanup; /* IO clean up by host driver */
83
84 u32 fw_io_timeout; /* IOs timedout */
85 u32 fw_frm_parse; /* frame parsed by f/w */
86 u32 fw_frm_data; /* fcp_data frame parsed by f/w */
87 u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
88 u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
89 u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
90 u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
91 u32 fw_frm_unknown; /* unknown parsed by f/w */
92 u32 fw_data_dma; /* f/w DMA'ed the data frame */
93 u32 fw_frm_drop; /* f/w drop the frame */
94
95 u32 rec_timeout; /* FW rec timed out */
96 u32 error_rec; /* FW sending rec on
97 * an error condition*/
98 u32 wait_for_si; /* FW wait for SI */
99 u32 rec_rsp_inval; /* REC rsp invalid */
100 u32 seqr_io_abort; /* target does not know cmd so abort */
101 u32 seqr_io_retry; /* SEQR failed so retry IO */
102
103 u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
104 u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
105 u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
106
107 u32 fcp_data_lost; /* fcp data lost */
108
109 u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
110 u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
111 u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
112
113 u32 io_abort_timeout; /* ABTS timedout */
114 u32 sler_initiated; /* SLER initiated */
115
116 u32 unexp_fcp_rsp; /* fcp response in wrong state */
117
118 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
119 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
120 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
121 u32 fcp_rsp_resid_inval; /* invalid residue */
122 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
123 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
124 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
125 u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
126 u32 fcp_conf_req; /* FCP conf requested */
127
128 u32 tgt_aborted_io; /* target initiated abort */
129
130 u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
131 u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
132 u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
133 u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
134 u32 ioh_hit_class2_event; /* IOH hit class2 */
135 u32 ioh_miss_other_event; /* IOH miss other */
136 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
137 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
138 * bytes xfered */
139 u32 ioh_seq_len_err_event; /* IOH seq len error */
140 u32 ioh_data_oor_event; /* Data out of range */
141 u32 ioh_ro_ooo_event; /* Relative offset out of range */
142 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
143 u32 ioh_unexp_frame_event; /* unexpected frame recieved
144 * count */
145 u32 ioh_err_int; /* IOH error int during data-phase
146 * for scsi write
147 */
148};
149
150/**
151 * IOC port firmware stats
152 */
153
154struct bfa_fw_port_fpg_stats_s {
155 u32 intr_evt;
156 u32 intr;
157 u32 intr_excess;
158 u32 intr_cause0;
159 u32 intr_other;
160 u32 intr_other_ign;
161 u32 sig_lost;
162 u32 sig_regained;
163 u32 sync_lost;
164 u32 sync_to;
165 u32 sync_regained;
166 u32 div2_overflow;
167 u32 div2_underflow;
168 u32 efifo_overflow;
169 u32 efifo_underflow;
170 u32 idle_rx;
171 u32 lrr_rx;
172 u32 lr_rx;
173 u32 ols_rx;
174 u32 nos_rx;
175 u32 lip_rx;
176 u32 arbf0_rx;
177 u32 mrk_rx;
178 u32 const_mrk_rx;
179 u32 prim_unknown;
180 u32 rsvd;
181};
182
183
184struct bfa_fw_port_lksm_stats_s {
185 u32 hwsm_success; /* hwsm state machine success */
186 u32 hwsm_fails; /* hwsm fails */
187 u32 hwsm_wdtov; /* hwsm timed out */
188 u32 swsm_success; /* swsm success */
189 u32 swsm_fails; /* swsm fails */
190 u32 swsm_wdtov; /* swsm timed out */
191 u32 busybufs; /* link init failed due to busybuf */
192 u32 buf_waits; /* bufwait state entries */
193 u32 link_fails; /* link failures */
194 u32 psp_errors; /* primitive sequence protocol errors */
195 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
196 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
197 u32 lr_tx; /* No. of times LR tx started */
198 u32 lrr_tx; /* No. of times LRR tx started */
199 u32 ols_tx; /* No. of times OLS tx started */
200 u32 nos_tx; /* No. of times NOS tx started */
201};
202
203
204struct bfa_fw_port_snsm_stats_s {
205 u32 hwsm_success; /* Successful hwsm terminations */
206 u32 hwsm_fails; /* hwsm fail count */
207 u32 hwsm_wdtov; /* hwsm timed out */
208 u32 swsm_success; /* swsm success */
209 u32 swsm_wdtov; /* swsm timed out */
210 u32 error_resets; /* error resets initiated by upsm */
211 u32 sync_lost; /* Sync loss count */
212 u32 sig_lost; /* Signal loss count */
213};
214
215
216struct bfa_fw_port_physm_stats_s {
217 u32 module_inserts; /* Module insert count */
218 u32 module_xtracts; /* Module extracts count */
219 u32 module_invalids; /* Invalid module inserted count */
220 u32 module_read_ign; /* Module validation status ignored */
221 u32 laser_faults; /* Laser fault count */
222 u32 rsvd;
223};
224
225
226struct bfa_fw_fip_stats_s {
227 u32 disc_req; /* Discovery solicit requests */
228 u32 disc_rsp; /* Discovery solicit response */
229 u32 disc_err; /* Discovery advt. parse errors */
230 u32 disc_unsol; /* Discovery unsolicited */
231 u32 disc_timeouts; /* Discovery timeouts */
232 u32 linksvc_unsupp; /* Unsupported link service req */
233 u32 linksvc_err; /* Parse error in link service req */
234 u32 logo_req; /* Number of FIP logos received */
235 u32 clrvlink_req; /* Clear virtual link req */
236 u32 op_unsupp; /* Unsupported FIP operation */
237 u32 untagged; /* Untagged frames (ignored) */
238 u32 rsvd;
239};
240
241
242struct bfa_fw_lps_stats_s {
243 u32 mac_invalids; /* Invalid mac assigned */
244 u32 rsvd;
245};
246
247
248struct bfa_fw_fcoe_stats_s {
249 u32 cee_linkups; /* CEE link up count */
250 u32 cee_linkdns; /* CEE link down count */
251 u32 fip_linkups; /* FIP link up count */
252 u32 fip_linkdns; /* FIP link up count */
253 u32 fip_fails; /* FIP fail count */
254 u32 mac_invalids; /* Invalid mac assigned */
255};
256
257/**
258 * IOC firmware FCoE port stats
259 */
260struct bfa_fw_fcoe_port_stats_s {
261 struct bfa_fw_fcoe_stats_s fcoe_stats;
262 struct bfa_fw_fip_stats_s fip_stats;
263};
264
265/**
266 * IOC firmware FC port stats
267 */
268struct bfa_fw_fc_port_stats_s {
269 struct bfa_fw_port_fpg_stats_s fpg_stats;
270 struct bfa_fw_port_physm_stats_s physm_stats;
271 struct bfa_fw_port_snsm_stats_s snsm_stats;
272 struct bfa_fw_port_lksm_stats_s lksm_stats;
273};
274
275/**
276 * IOC firmware FC port stats
277 */
278union bfa_fw_port_stats_s {
279 struct bfa_fw_fc_port_stats_s fc_stats;
280 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
281};
282
283/**
284 * IOC firmware stats
285 */
286struct bfa_fw_stats_s {
287 struct bfa_fw_ioc_stats_s ioc_stats;
288 struct bfa_fw_io_stats_s io_stats;
289 union bfa_fw_port_stats_s port_stats;
290};
291
292/**
293 * IOC statistics
294 */
295struct bfa_iocfc_stats_s {
296 struct bfa_fw_stats_s fw_stats; /* firmware IOC stats */
297};
298
299/**
300 * IOC attributes returned in queries
301 */
302struct bfa_iocfc_attr_s {
303 struct bfa_iocfc_cfg_s config; /* IOCFC config */
304 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
305};
306
307#define BFA_IOCFC_PATHTOV_MAX 60
308#define BFA_IOCFC_QDEPTH_MAX 2000
309
310#endif /* __BFA_DEFS_IOC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
new file mode 100644
index 000000000000..7cb63ea98f38
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_IPFC_H__
18#define __BFA_DEFS_IPFC_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * FCS ip remote port states
26 */
27enum bfa_iprp_state {
28 BFA_IPRP_UNINIT = 0, /* PORT is not yet initialized */
29 BFA_IPRP_ONLINE = 1, /* process login is complete */
30 BFA_IPRP_OFFLINE = 2, /* iprp is offline */
31};
32
33/**
34 * FCS remote port statistics
35 */
36struct bfa_iprp_stats_s {
37 u32 offlines;
38 u32 onlines;
39 u32 rscns;
40 u32 plogis;
41 u32 logos;
42 u32 plogi_timeouts;
43 u32 plogi_rejects;
44};
45
46/**
47 * FCS iprp attribute returned in queries
48 */
49struct bfa_iprp_attr_s {
50 enum bfa_iprp_state state;
51};
52
53struct bfa_ipfc_stats_s {
54 u32 arp_sent;
55 u32 arp_recv;
56 u32 arp_reply_sent;
57 u32 arp_reply_recv;
58 u32 farp_sent;
59 u32 farp_recv;
60 u32 farp_reply_sent;
61 u32 farp_reply_recv;
62 u32 farp_reject_sent;
63 u32 farp_reject_recv;
64};
65
66struct bfa_ipfc_attr_s {
67 bfa_boolean_t enabled;
68};
69
70#endif /* __BFA_DEFS_IPFC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
new file mode 100644
index 000000000000..2ec769903d24
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ITNIM_H__
18#define __BFA_DEFS_ITNIM_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22
23/**
24 * FCS itnim states
25 */
26enum bfa_itnim_state {
27 BFA_ITNIM_OFFLINE = 0, /* offline */
28 BFA_ITNIM_PRLI_SEND = 1, /* prli send */
29 BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
30 BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
31 BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
32 BFA_ITNIM_ONLINE = 5, /* online */
33 BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
34 BFA_ITNIM_INITIATIOR = 7, /* initiator */
35};
36
37struct bfa_itnim_hal_stats_s {
38 u32 onlines; /* ITN nexus onlines (PRLI done) */
39 u32 offlines; /* ITN Nexus offlines */
40 u32 creates; /* ITN create requests */
41 u32 deletes; /* ITN delete requests */
42 u32 create_comps; /* ITN create completions */
43 u32 delete_comps; /* ITN delete completions */
44 u32 sler_events; /* SLER (sequence level error
45 * recovery) events */
46 u32 ioc_disabled; /* Num IOC disables */
47 u32 cleanup_comps; /* ITN cleanup completions */
48 u32 tm_cmnds; /* task management(TM) cmnds sent */
49 u32 tm_fw_rsps; /* TM cmds firmware responses */
50 u32 tm_success; /* TM successes */
51 u32 tm_failures; /* TM failures */
52 u32 tm_io_comps; /* TM IO completions */
53 u32 tm_qresumes; /* TM queue resumes (after waiting
54 * for resources)
55 */
56 u32 tm_iocdowns; /* TM cmnds affected by IOC down */
57 u32 tm_cleanups; /* TM cleanups */
58 u32 tm_cleanup_comps;
59 /* TM cleanup completions */
60 u32 ios; /* IO requests */
61 u32 io_comps; /* IO completions */
62 u64 input_reqs; /* INPUT requests */
63 u64 output_reqs; /* OUTPUT requests */
64};
65
66/**
67 * FCS remote port statistics
68 */
69struct bfa_itnim_stats_s {
70 u32 onlines; /* num rport online */
71 u32 offlines; /* num rport offline */
72 u32 prli_sent; /* num prli sent out */
73 u32 fcxp_alloc_wait;/* num fcxp alloc waits */
74 u32 prli_rsp_err; /* num prli rsp errors */
75 u32 prli_rsp_acc; /* num prli rsp accepts */
76 u32 initiator; /* rport is an initiator */
77 u32 prli_rsp_parse_err; /* prli rsp parsing errors */
78 u32 prli_rsp_rjt; /* num prli rsp rejects */
79 u32 timeout; /* num timeouts detected */
80 u32 sler; /* num sler notification from BFA */
81 u32 rsvd;
82 struct bfa_itnim_hal_stats_s hal_stats;
83};
84
85/**
86 * FCS itnim attributes returned in queries
87 */
88struct bfa_itnim_attr_s {
89 enum bfa_itnim_state state; /* FCS itnim state */
90 u8 retry; /* data retransmision support */
91 u8 task_retry_id; /* task retry ident support */
92 u8 rec_support; /* REC supported */
93 u8 conf_comp; /* confirmed completion supp */
94};
95
96/**
97 * BFA ITNIM events.
98 * Arguments below are in BFAL context from Mgmt
99 * BFA_ITNIM_AEN_NEW: [in]: None [out]: vf_id, lpwwn
100 * BFA_ITNIM_AEN_DELETE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
101 * [out]: vf_id, ppwwn, lpwwn, rpwwn
102 * BFA_ITNIM_AEN_ONLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
103 * [out]: vf_id, ppwwn, lpwwn, rpwwn
104 * BFA_ITNIM_AEN_OFFLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
105 * [out]: vf_id, ppwwn, lpwwn, rpwwn
106 * BFA_ITNIM_AEN_DISCONNECT:[in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
107 * [out]: vf_id, ppwwn, lpwwn, rpwwn
108 */
109enum bfa_itnim_aen_event {
110 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
111 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
112 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
113};
114
115/**
116 * BFA ITNIM event data structure.
117 */
118struct bfa_itnim_aen_data_s {
119 u16 vf_id; /* vf_id of the IT nexus */
120 u16 rsvd[3];
121 wwn_t ppwwn; /* WWN of its physical port */
122 wwn_t lpwwn; /* WWN of logical port */
123 wwn_t rpwwn; /* WWN of remote(target) port */
124};
125
126#endif /* __BFA_DEFS_ITNIM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_led.h b/drivers/scsi/bfa/include/defs/bfa_defs_led.h
new file mode 100644
index 000000000000..62039273264e
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_led.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LED_H__
19#define __BFA_DEFS_LED_H__
20
21#define BFA_LED_MAX_NUM 3
22
23enum bfa_led_op {
24 BFA_LED_OFF = 0,
25 BFA_LED_ON = 1,
26 BFA_LED_FLICK = 2,
27 BFA_LED_BLINK = 3,
28};
29
30enum bfa_led_color {
31 BFA_LED_GREEN = 0,
32 BFA_LED_AMBER = 1,
33};
34
35#endif /* __BFA_DEFS_LED_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
new file mode 100644
index 000000000000..7359f82aacfc
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LPORT_H__
19#define __BFA_DEFS_LPORT_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_port.h>
23
24/**
25 * BFA AEN logical port events.
26 * Arguments below are in BFAL context from Mgmt
27 * BFA_LPORT_AEN_NEW: [in]: None [out]: vf_id, ppwwn, lpwwn, roles
28 * BFA_LPORT_AEN_DELETE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
29 * BFA_LPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
30 * BFA_LPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
31 * BFA_LPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
32 * BFA_LPORT_AEN_NEW_PROP: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
33 * BFA_LPORT_AEN_DELETE_PROP: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
34 * BFA_LPORT_AEN_NEW_STANDARD: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
35 * BFA_LPORT_AEN_DELETE_STANDARD: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
36 * BFA_LPORT_AEN_NPIV_DUP_WWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
37 * BFA_LPORT_AEN_NPIV_FABRIC_MAX: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
38 * BFA_LPORT_AEN_NPIV_UNKNOWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
39 */
40enum bfa_lport_aen_event {
41 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
42 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
43 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
44 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
45 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
46 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
47 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
48 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
49 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
50 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort configured with
51 * duplicate WWN event
52 */
53 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
54 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code event */
55};
56
57/**
58 * BFA AEN event data structure
59 */
60struct bfa_lport_aen_data_s {
61 u16 vf_id; /* vf_id of this logical port */
62 u16 rsvd;
63 enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */
64 wwn_t ppwwn; /* WWN of its physical port */
65 wwn_t lpwwn; /* WWN of this logical port */
66};
67
68#endif /* __BFA_DEFS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
new file mode 100644
index 000000000000..13fd4ab6aae2
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_MFG_H__
18#define __BFA_DEFS_MFG_H__
19
20#include <bfa_os_inc.h>
21
22/**
23 * Manufacturing block version
24 */
25#define BFA_MFG_VERSION 1
26
27/**
28 * Manufacturing block format
29 */
30#define BFA_MFG_SERIALNUM_SIZE 11
31#define BFA_MFG_PARTNUM_SIZE 14
32#define BFA_MFG_SUPPLIER_ID_SIZE 10
33#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
34#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
35#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
36#define STRSZ(_n) (((_n) + 4) & ~3)
37
38/**
39 * VPD data length
40 */
41#define BFA_MFG_VPD_LEN 256
42
43/**
44 * All numerical fields are in big-endian format.
45 */
46struct bfa_mfg_vpd_s {
47 u8 version; /* vpd data version */
48 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
49 u8 chksum; /* u8 checksum */
50 u8 vendor; /* vendor */
51 u8 len; /* vpd data length excluding header */
52 u8 rsv;
53 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
54};
55
56#pragma pack(1)
57
58#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
new file mode 100644
index 000000000000..c9b83321694b
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PCI_H__
19#define __BFA_DEFS_PCI_H__
20
21/**
22 * PCI device and vendor ID information
23 */
24enum {
25 BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
26 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
27 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
28 BFA_PCI_DEVICE_ID_CT = 0x14,
29};
30
31/**
32 * PCI sub-system device and vendor ID information
33 */
34enum {
35 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
36};
37
38#define BFA_PCI_ACCESS_RANGES 1 /* Maximum number of device address ranges
39 * mapped through different BAR(s). */
40
41#endif /* __BFA_DEFS_PCI_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h b/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
new file mode 100644
index 000000000000..e8d6d959006e
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PM_H__
19#define __BFA_DEFS_PM_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA power management device states
25 */
26enum bfa_pm_ds {
27 BFA_PM_DS_D0 = 0, /* full power mode */
28 BFA_PM_DS_D1 = 1, /* power save state 1 */
29 BFA_PM_DS_D2 = 2, /* power save state 2 */
30 BFA_PM_DS_D3 = 3, /* power off state */
31};
32
33#endif /* __BFA_DEFS_PM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h b/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
new file mode 100644
index 000000000000..d9fa278472b7
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_POM_H__
18#define __BFA_DEFS_POM_H__
19
20#include <bfa_os_inc.h>
21#include <defs/bfa_defs_types.h>
22
23/**
24 * POM health status levels for each attributes.
25 */
26enum bfa_pom_entry_health {
27 BFA_POM_HEALTH_NOINFO = 1, /* no information */
28 BFA_POM_HEALTH_NORMAL = 2, /* health is normal */
29 BFA_POM_HEALTH_WARNING = 3, /* warning level */
30 BFA_POM_HEALTH_ALARM = 4, /* alarming level */
31};
32
33/**
34 * Reading of temperature/voltage/current/power
35 */
36struct bfa_pom_entry_s {
37 enum bfa_pom_entry_health health; /* POM entry health */
38 u32 curr_value; /* current value */
39 u32 thr_warn_high; /* threshold warning high */
40 u32 thr_warn_low; /* threshold warning low */
41 u32 thr_alarm_low; /* threshold alaram low */
42 u32 thr_alarm_high; /* threshold alarm high */
43};
44
45/**
46 * POM attributes
47 */
48struct bfa_pom_attr_s {
49 struct bfa_pom_entry_s temperature; /* centigrade */
50 struct bfa_pom_entry_s voltage; /* volts */
51 struct bfa_pom_entry_s curr; /* milli amps */
52 struct bfa_pom_entry_s txpower; /* micro watts */
53 struct bfa_pom_entry_s rxpower; /* micro watts */
54};
55
56#endif /* __BFA_DEFS_POM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
new file mode 100644
index 000000000000..de0696c81bc4
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PORT_H__
19#define __BFA_DEFS_PORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_ioc.h>
25
26#define BFA_FCS_FABRIC_IPADDR_SZ 16
27
28/**
29 * symbolic names for base port/virtual port
30 */
31#define BFA_SYMNAME_MAXLEN 128 /* vmware/windows uses 128 bytes */
32struct bfa_port_symname_s {
33 char symname[BFA_SYMNAME_MAXLEN];
34};
35
36/**
37* Roles of FCS port:
38 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
39 * - Create multiple ports if both IM and TM functions required.
40 * - Atleast one role must be specified.
41 */
42enum bfa_port_role {
43 BFA_PORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
44 BFA_PORT_ROLE_FCP_TM = 0x02, /* FCP target role */
45 BFA_PORT_ROLE_FCP_IPFC = 0x04, /* IP over FC role */
46 BFA_PORT_ROLE_FCP_MAX = BFA_PORT_ROLE_FCP_IPFC | BFA_PORT_ROLE_FCP_IM
47};
48
49/**
50 * FCS port configuration.
51 */
52struct bfa_port_cfg_s {
53 wwn_t pwwn; /* port wwn */
54 wwn_t nwwn; /* node wwn */
55 struct bfa_port_symname_s sym_name; /* vm port symbolic name */
56 enum bfa_port_role roles; /* FCS port roles */
57 u32 rsvd;
58 u8 tag[16]; /* opaque tag from application */
59};
60
61/**
62 * FCS port states
63 */
64enum bfa_port_state {
65 BFA_PORT_UNINIT = 0, /* PORT is not yet initialized */
66 BFA_PORT_FDISC = 1, /* FDISC is in progress */
67 BFA_PORT_ONLINE = 2, /* login to fabric is complete */
68 BFA_PORT_OFFLINE = 3, /* No login to fabric */
69};
70
71/**
72 * FCS port type. Required for VmWare.
73 */
74enum bfa_port_type {
75 BFA_PORT_TYPE_PHYSICAL = 0,
76 BFA_PORT_TYPE_VIRTUAL,
77};
78
79/**
80 * FCS port offline reason. Required for VmWare.
81 */
82enum bfa_port_offline_reason {
83 BFA_PORT_OFFLINE_UNKNOWN = 0,
84 BFA_PORT_OFFLINE_LINKDOWN,
85 BFA_PORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
86 * fabric */
87 BFA_PORT_OFFLINE_FAB_NORESOURCES,
88 BFA_PORT_OFFLINE_FAB_LOGOUT,
89};
90
91/**
92 * FCS lport info. Required for VmWare.
93 */
94struct bfa_port_info_s {
95 u8 port_type; /* bfa_port_type_t : physical or
96 * virtual */
97 u8 port_state; /* one of bfa_port_state values */
98 u8 offline_reason; /* one of bfa_port_offline_reason_t
99 * values */
100 wwn_t port_wwn;
101 wwn_t node_wwn;
102
103 /*
104 * following 4 feilds are valid for Physical Ports only
105 */
106 u32 max_vports_supp; /* Max supported vports */
107 u32 num_vports_inuse; /* Num of in use vports */
108 u32 max_rports_supp; /* Max supported rports */
109 u32 num_rports_inuse; /* Num of doscovered rports */
110
111};
112
113/**
114 * FCS port statistics
115 */
116struct bfa_port_stats_s {
117 u32 ns_plogi_sent;
118 u32 ns_plogi_rsp_err;
119 u32 ns_plogi_acc_err;
120 u32 ns_plogi_accepts;
121 u32 ns_rejects; /* NS command rejects */
122 u32 ns_plogi_unknown_rsp;
123 u32 ns_plogi_alloc_wait;
124
125 u32 ns_retries; /* NS command retries */
126 u32 ns_timeouts; /* NS command timeouts */
127
128 u32 ns_rspnid_sent;
129 u32 ns_rspnid_accepts;
130 u32 ns_rspnid_rsp_err;
131 u32 ns_rspnid_rejects;
132 u32 ns_rspnid_alloc_wait;
133
134 u32 ns_rftid_sent;
135 u32 ns_rftid_accepts;
136 u32 ns_rftid_rsp_err;
137 u32 ns_rftid_rejects;
138 u32 ns_rftid_alloc_wait;
139
140 u32 ns_rffid_sent;
141 u32 ns_rffid_accepts;
142 u32 ns_rffid_rsp_err;
143 u32 ns_rffid_rejects;
144 u32 ns_rffid_alloc_wait;
145
146 u32 ns_gidft_sent;
147 u32 ns_gidft_accepts;
148 u32 ns_gidft_rsp_err;
149 u32 ns_gidft_rejects;
150 u32 ns_gidft_unknown_rsp;
151 u32 ns_gidft_alloc_wait;
152
153 /*
154 * Mgmt Server stats
155 */
156 u32 ms_retries; /* MS command retries */
157 u32 ms_timeouts; /* MS command timeouts */
158 u32 ms_plogi_sent;
159 u32 ms_plogi_rsp_err;
160 u32 ms_plogi_acc_err;
161 u32 ms_plogi_accepts;
162 u32 ms_rejects; /* NS command rejects */
163 u32 ms_plogi_unknown_rsp;
164 u32 ms_plogi_alloc_wait;
165
166 u32 num_rscn; /* Num of RSCN received */
167 u32 num_portid_rscn;/* Num portid format RSCN
168 * received */
169
170 u32 uf_recvs; /* unsolicited recv frames */
171 u32 uf_recv_drops; /* dropped received frames */
172
173 u32 rsvd; /* padding for 64 bit alignment */
174};
175
176/**
177 * BFA port attribute returned in queries
178 */
179struct bfa_port_attr_s {
180 enum bfa_port_state state; /* port state */
181 u32 pid; /* port ID */
182 struct bfa_port_cfg_s port_cfg; /* port configuration */
183 enum bfa_pport_type port_type; /* current topology */
184 u32 loopback; /* cable is externally looped back */
185 wwn_t fabric_name; /* attached switch's nwwn */
186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
187 * fabric's ip addr */
188};
189
190/**
191 * BFA physical port Level events
192 * Arguments below are in BFAL context from Mgmt
193 * BFA_PORT_AEN_ONLINE: [in]: pwwn [out]: pwwn
194 * BFA_PORT_AEN_OFFLINE: [in]: pwwn [out]: pwwn
195 * BFA_PORT_AEN_RLIR: [in]: None [out]: pwwn, rlir_data, rlir_len
196 * BFA_PORT_AEN_SFP_INSERT: [in]: pwwn [out]: port_id, pwwn
197 * BFA_PORT_AEN_SFP_REMOVE: [in]: pwwn [out]: port_id, pwwn
198 * BFA_PORT_AEN_SFP_POM: [in]: pwwn [out]: level, port_id, pwwn
199 * BFA_PORT_AEN_ENABLE: [in]: pwwn [out]: pwwn
200 * BFA_PORT_AEN_DISABLE: [in]: pwwn [out]: pwwn
201 * BFA_PORT_AEN_AUTH_ON: [in]: pwwn [out]: pwwn
202 * BFA_PORT_AEN_AUTH_OFF: [in]: pwwn [out]: pwwn
203 * BFA_PORT_AEN_DISCONNECT: [in]: pwwn [out]: pwwn
204 * BFA_PORT_AEN_QOS_NEG: [in]: pwwn [out]: pwwn
205 * BFA_PORT_AEN_FABRIC_NAME_CHANGE: [in]: pwwn, [out]: pwwn, fwwn
206 *
207 */
208enum bfa_port_aen_event {
209 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
210 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
211 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
212 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
213 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
214 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
215 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
216 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
217 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
218 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
219 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
220 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
221 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change
222 * event */
223 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
224 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
225};
226
227enum bfa_port_aen_sfp_pom {
228 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
229 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
230 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
231 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
232};
233
234struct bfa_port_aen_data_s {
235 enum bfa_ioc_type_e ioc_type;
236 wwn_t pwwn; /* WWN of the physical port */
237 wwn_t fwwn; /* WWN of the fabric port */
238 mac_t mac; /* MAC addres of the ethernet port,
239 * applicable to CNA port only */
240 int phy_port_num; /*! For SFP related events */
241 enum bfa_port_aen_sfp_pom level; /* Only transitions will
242 * be informed */
243};
244
245#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
new file mode 100644
index 000000000000..a000bc4e2d4a
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -0,0 +1,383 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PPORT_H__
19#define __BFA_DEFS_PPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/fc.h>
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_qos.h>
25#include <cna/pstats/phyport_defs.h>
26
27/* Modify char* port_stt[] in bfal_port.c if a new state was added */
28enum bfa_pport_states {
29 BFA_PPORT_ST_UNINIT = 1,
30 BFA_PPORT_ST_ENABLING_QWAIT = 2,
31 BFA_PPORT_ST_ENABLING = 3,
32 BFA_PPORT_ST_LINKDOWN = 4,
33 BFA_PPORT_ST_LINKUP = 5,
34 BFA_PPORT_ST_DISABLING_QWAIT = 6,
35 BFA_PPORT_ST_DISABLING = 7,
36 BFA_PPORT_ST_DISABLED = 8,
37 BFA_PPORT_ST_STOPPED = 9,
38 BFA_PPORT_ST_IOCDOWN = 10,
39 BFA_PPORT_ST_IOCDIS = 11,
40 BFA_PPORT_ST_FWMISMATCH = 12,
41 BFA_PPORT_ST_MAX_STATE,
42};
43
44/**
45 * Port speed settings. Each specific speed is a bit field. Use multiple
46 * bits to specify speeds to be selected for auto-negotiation.
47 */
48enum bfa_pport_speed {
49 BFA_PPORT_SPEED_UNKNOWN = 0,
50 BFA_PPORT_SPEED_1GBPS = 1,
51 BFA_PPORT_SPEED_2GBPS = 2,
52 BFA_PPORT_SPEED_4GBPS = 4,
53 BFA_PPORT_SPEED_8GBPS = 8,
54 BFA_PPORT_SPEED_10GBPS = 10,
55 BFA_PPORT_SPEED_AUTO =
56 (BFA_PPORT_SPEED_1GBPS | BFA_PPORT_SPEED_2GBPS |
57 BFA_PPORT_SPEED_4GBPS | BFA_PPORT_SPEED_8GBPS),
58};
59
60/**
61 * Port operational type (in sync with SNIA port type).
62 */
63enum bfa_pport_type {
64 BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unkown */
65 BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */
66 BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */
67 BFA_PPORT_TYPE_NLPORT = 6, /* public loop */
68 BFA_PPORT_TYPE_LPORT = 20, /* private loop */
69 BFA_PPORT_TYPE_P2P = 21, /* P2P with no switched fabric */
70 BFA_PPORT_TYPE_VPORT = 22, /* NPIV - virtual port */
71};
72
73/**
74 * Port topology setting. A port's topology and fabric login status
75 * determine its operational type.
76 */
77enum bfa_pport_topology {
78 BFA_PPORT_TOPOLOGY_NONE = 0, /* No valid topology */
79 BFA_PPORT_TOPOLOGY_P2P = 1, /* P2P only */
80 BFA_PPORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
81 BFA_PPORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
82};
83
84/**
85 * Physical port loopback types.
86 */
87enum bfa_pport_opmode {
88 BFA_PPORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
89 BFA_PPORT_OPMODE_LB_INT = 0x01, /* internal loop back */
90 BFA_PPORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
91 BFA_PPORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
92 BFA_PPORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
93 BFA_PPORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
94};
95
96#define BFA_PPORT_OPMODE_LB_HARD(_mode) \
97 ((_mode == BFA_PPORT_OPMODE_LB_INT) || \
98 (_mode == BFA_PPORT_OPMODE_LB_SLW) || \
99 (_mode == BFA_PPORT_OPMODE_LB_EXT))
100
101/**
102 Port State (in sync with SNIA port state).
103 */
104enum bfa_pport_snia_state {
105 BFA_PPORT_STATE_UNKNOWN = 1, /* port is not initialized */
106 BFA_PPORT_STATE_ONLINE = 2, /* port is ONLINE */
107 BFA_PPORT_STATE_DISABLED = 3, /* port is disabled by user */
108 BFA_PPORT_STATE_BYPASSED = 4, /* port is bypassed (in LOOP) */
109 BFA_PPORT_STATE_DIAG = 5, /* port diagnostics is active */
110 BFA_PPORT_STATE_LINKDOWN = 6, /* link is down */
111 BFA_PPORT_STATE_LOOPBACK = 8, /* port is looped back */
112};
113
114/**
115 * Port link state
116 */
117enum bfa_pport_linkstate {
118 BFA_PPORT_LINKUP = 1, /* Physical port/Trunk link up */
119 BFA_PPORT_LINKDOWN = 2, /* Physical port/Trunk link down */
120 BFA_PPORT_TRUNK_LINKDOWN = 3, /* Trunk link down (new tmaster) */
121};
122
123/**
124 * Port link state event
125 */
126#define bfa_pport_event_t enum bfa_pport_linkstate
127
128/**
129 * Port link state reason code
130 */
131enum bfa_pport_linkstate_rsn {
132 BFA_PPORT_LINKSTATE_RSN_NONE = 0,
133 BFA_PPORT_LINKSTATE_RSN_DISABLED = 1,
134 BFA_PPORT_LINKSTATE_RSN_RX_NOS = 2,
135 BFA_PPORT_LINKSTATE_RSN_RX_OLS = 3,
136 BFA_PPORT_LINKSTATE_RSN_RX_LIP = 4,
137 BFA_PPORT_LINKSTATE_RSN_RX_LIPF7 = 5,
138 BFA_PPORT_LINKSTATE_RSN_SFP_REMOVED = 6,
139 BFA_PPORT_LINKSTATE_RSN_PORT_FAULT = 7,
140 BFA_PPORT_LINKSTATE_RSN_RX_LOS = 8,
141 BFA_PPORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
142 BFA_PPORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
143 BFA_PPORT_LINKSTATE_RSN_TIMEOUT = 11,
144
145
146
147 /* CEE related reason codes/errors */
148 CEE_LLDP_INFO_AGED_OUT = 20,
149 CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
150 CEE_PEER_NOT_ADVERTISE_DCBX = 22,
151 CEE_PEER_NOT_ADVERTISE_PG = 23,
152 CEE_PEER_NOT_ADVERTISE_PFC = 24,
153 CEE_PEER_NOT_ADVERTISE_FCOE = 25,
154 CEE_PG_NOT_COMPATIBLE = 26,
155 CEE_PFC_NOT_COMPATIBLE = 27,
156 CEE_FCOE_NOT_COMPATIBLE = 28,
157 CEE_BAD_PG_RCVD = 29,
158 CEE_BAD_BW_RCVD = 30,
159 CEE_BAD_PFC_RCVD = 31,
160 CEE_BAD_FCOE_PRI_RCVD = 32,
161 CEE_FCOE_PRI_PFC_OFF = 33,
162 CEE_DUP_CONTROL_TLV_RCVD = 34,
163 CEE_DUP_FEAT_TLV_RCVD = 35,
164 CEE_APPLY_NEW_CFG = 36, /* reason, not an error */
165 CEE_PROTOCOL_INIT = 37, /* reason, not an error */
166 CEE_PHY_LINK_DOWN = 38,
167 CEE_LLS_FCOE_ABSENT = 39,
168 CEE_LLS_FCOE_DOWN = 40
169};
170
171/**
172 * Default Target Rate Limiting Speed.
173 */
174#define BFA_PPORT_DEF_TRL_SPEED BFA_PPORT_SPEED_1GBPS
175
176/**
177 * Physical port configuration
178 */
179struct bfa_pport_cfg_s {
180 u8 topology; /* bfa_pport_topology */
181 u8 speed; /* enum bfa_pport_speed */
182 u8 trunked; /* trunked or not */
183 u8 qos_enabled; /* qos enabled or not */
184 u8 trunk_ports; /* bitmap of trunked ports */
185 u8 cfg_hardalpa; /* is hard alpa configured */
186 u16 maxfrsize; /* maximum frame size */
187 u8 hardalpa; /* configured hard alpa */
188 u8 rx_bbcredit; /* receive buffer credits */
189 u8 tx_bbcredit; /* transmit buffer credits */
190 u8 ratelimit; /* ratelimit enabled or not */
191 u8 trl_def_speed; /* ratelimit default speed */
192 u8 rsvd[3];
193 u16 path_tov; /* device path timeout */
194 u16 q_depth; /* SCSI Queue depth */
195};
196
197/**
198 * Port attribute values.
199 */
200struct bfa_pport_attr_s {
201 /*
202 * Static fields
203 */
204 wwn_t nwwn; /* node wwn */
205 wwn_t pwwn; /* port wwn */
206 enum fc_cos cos_supported; /* supported class of services */
207 u32 rsvd;
208 struct fc_symname_s port_symname; /* port symbolic name */
209 enum bfa_pport_speed speed_supported; /* supported speeds */
210 bfa_boolean_t pbind_enabled; /* Will be set if Persistent binding
211 * enabled. Relevant only in Windows
212 */
213
214 /*
215 * Configured values
216 */
217 struct bfa_pport_cfg_s pport_cfg; /* pport cfg */
218
219 /*
220 * Dynamic field - info from BFA
221 */
222 enum bfa_pport_states port_state; /* current port state */
223 enum bfa_pport_speed speed; /* current speed */
224 enum bfa_pport_topology topology; /* current topology */
225 bfa_boolean_t beacon; /* current beacon status */
226 bfa_boolean_t link_e2e_beacon;/* set if link beacon on */
227 bfa_boolean_t plog_enabled; /* set if portlog is enabled*/
228
229 /*
230 * Dynamic field - info from FCS
231 */
232 u32 pid; /* port ID */
233 enum bfa_pport_type port_type; /* current topology */
234 u32 loopback; /* external loopback */
235 u32 rsvd1;
236 u32 rsvd2; /* padding for 64 bit */
237};
238
239/**
240 * FC Port statistics.
241 */
242struct bfa_pport_fc_stats_s {
243 u64 secs_reset; /* seconds since stats is reset */
244 u64 tx_frames; /* transmitted frames */
245 u64 tx_words; /* transmitted words */
246 u64 rx_frames; /* received frames */
247 u64 rx_words; /* received words */
248 u64 lip_count; /* LIPs seen */
249 u64 nos_count; /* NOS count */
250 u64 error_frames; /* errored frames (sent?) */
251 u64 dropped_frames; /* dropped frames */
252 u64 link_failures; /* link failure count */
253 u64 loss_of_syncs; /* loss of sync count */
254 u64 loss_of_signals;/* loss of signal count */
255 u64 primseq_errs; /* primitive sequence protocol */
256 u64 bad_os_count; /* invalid ordered set */
257 u64 err_enc_out; /* Encoding error outside frame */
258 u64 invalid_crcs; /* frames received with invalid CRC*/
259 u64 undersized_frm; /* undersized frames */
260 u64 oversized_frm; /* oversized frames */
261 u64 bad_eof_frm; /* frames with bad EOF */
262 struct bfa_qos_stats_s qos_stats; /* QoS statistics */
263};
264
265/**
266 * Eth Port statistics.
267 */
268struct bfa_pport_eth_stats_s {
269 u64 secs_reset; /* seconds since stats is reset */
270 u64 frame_64; /* both rx and tx counter */
271 u64 frame_65_127; /* both rx and tx counter */
272 u64 frame_128_255; /* both rx and tx counter */
273 u64 frame_256_511; /* both rx and tx counter */
274 u64 frame_512_1023; /* both rx and tx counter */
275 u64 frame_1024_1518; /* both rx and tx counter */
276 u64 frame_1519_1522; /* both rx and tx counter */
277
278 u64 tx_bytes;
279 u64 tx_packets;
280 u64 tx_mcast_packets;
281 u64 tx_bcast_packets;
282 u64 tx_control_frame;
283 u64 tx_drop;
284 u64 tx_jabber;
285 u64 tx_fcs_error;
286 u64 tx_fragments;
287
288 u64 rx_bytes;
289 u64 rx_packets;
290 u64 rx_mcast_packets;
291 u64 rx_bcast_packets;
292 u64 rx_control_frames;
293 u64 rx_unknown_opcode;
294 u64 rx_drop;
295 u64 rx_jabber;
296 u64 rx_fcs_error;
297 u64 rx_alignment_error;
298 u64 rx_frame_length_error;
299 u64 rx_code_error;
300 u64 rx_fragments;
301
302 u64 rx_pause; /* BPC */
303 u64 rx_zero_pause; /* BPC Pause cancellation */
304 u64 tx_pause; /* BPC */
305 u64 tx_zero_pause; /* BPC Pause cancellation */
306 u64 rx_fcoe_pause; /* BPC */
307 u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */
308 u64 tx_fcoe_pause; /* BPC */
309 u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */
310};
311
312/**
313 * Port statistics.
314 */
315union bfa_pport_stats_u {
316 struct bfa_pport_fc_stats_s fc;
317 struct bfa_pport_eth_stats_s eth;
318};
319
320/**
321 * Port FCP mappings.
322 */
323struct bfa_pport_fcpmap_s {
324 char osdevname[256];
325 u32 bus;
326 u32 target;
327 u32 oslun;
328 u32 fcid;
329 wwn_t nwwn;
330 wwn_t pwwn;
331 u64 fcplun;
332 char luid[256];
333};
334
335/**
336 * Port RNID info.
337 */
338struct bfa_pport_rnid_s {
339 wwn_t wwn;
340 u32 unittype;
341 u32 portid;
342 u32 attached_nodes_num;
343 u16 ip_version;
344 u16 udp_port;
345 u8 ipaddr[16];
346 u16 rsvd;
347 u16 topologydiscoveryflags;
348};
349
350/**
351 * Link state information
352 */
353struct bfa_pport_link_s {
354 u8 linkstate; /* Link state bfa_pport_linkstate */
355 u8 linkstate_rsn; /* bfa_pport_linkstate_rsn_t */
356 u8 topology; /* P2P/LOOP bfa_pport_topology */
357 u8 speed; /* Link speed (1/2/4/8 G) */
358 u32 linkstate_opt; /* Linkstate optional data (debug) */
359 u8 trunked; /* Trunked or not (1 or 0) */
360 u8 resvd[3];
361 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
362 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
363 union {
364 struct {
365 u8 tmaster;/* Trunk Master or
366 * not (1 or 0) */
367 u8 tlinks; /* Trunk links bitmap
368 * (linkup) */
369 u8 resv1; /* Reserved */
370 } trunk_info;
371
372 struct {
373 u8 myalpa; /* alpa claimed */
374 u8 login_req; /* Login required or
375 * not (1 or 0) */
376 u8 alpabm_val;/* alpa bitmap valid
377 * or not (1 or 0) */
378 struct fc_alpabm_s alpabm; /* alpa bitmap */
379 } loop_info;
380 } tl;
381};
382
383#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h b/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
new file mode 100644
index 000000000000..aadbacd1d2d7
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_QOS_H__
19#define __BFA_DEFS_QOS_H__
20
21/**
22 * QoS states
23 */
24enum bfa_qos_state {
25 BFA_QOS_ONLINE = 1, /* QoS is online */
26 BFA_QOS_OFFLINE = 2, /* QoS is offline */
27};
28
29
30/**
31 * QoS Priority levels.
32 */
33enum bfa_qos_priority {
34 BFA_QOS_UNKNOWN = 0,
35 BFA_QOS_HIGH = 1, /* QoS Priority Level High */
36 BFA_QOS_MED = 2, /* QoS Priority Level Medium */
37 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
38};
39
40
41/**
42 * QoS bandwidth allocation for each priority level
43 */
44enum bfa_qos_bw_alloc {
45 BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
46 BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
47 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
48};
49
50/**
51 * QoS attribute returned in QoS Query
52 */
53struct bfa_qos_attr_s {
54 enum bfa_qos_state state; /* QoS current state */
55 u32 total_bb_cr; /* Total BB Credits */
56};
57
58/**
59 * These fields should be displayed only from the CLI.
60 * There will be a separate BFAL API (get_qos_vc_attr ?)
61 * to retrieve this.
62 *
63 */
64#define BFA_QOS_MAX_VC 16
65
66struct bfa_qos_vc_info_s {
67 u8 vc_credit;
68 u8 borrow_credit;
69 u8 priority;
70 u8 resvd;
71};
72
73struct bfa_qos_vc_attr_s {
74 u16 total_vc_count; /* Total VC Count */
75 u16 shared_credit;
76 u32 elp_opmode_flags;
77 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
78 * total_vc_count */
79};
80
81/**
82 * QoS statistics
83 */
84struct bfa_qos_stats_s {
85 u32 flogi_sent; /* QoS Flogi sent */
86 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
87 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
88 u32 flogi_retries; /* QoS Flogi retries */
89
90 u32 elp_recvd; /* QoS ELP received */
91 u32 elp_accepted; /* QoS ELP Accepted */
92 u32 elp_rejected; /* QoS ELP rejected */
93 u32 elp_dropped; /* QoS ELP dropped */
94
95 u32 qos_rscn_recvd; /* QoS RSCN received */
96 u32 rsvd; /* padding for 64 bit alignment */
97};
98
99#endif /* __BFA_DEFS_QOS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h b/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
new file mode 100644
index 000000000000..e0af59d6d2f6
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
@@ -0,0 +1,199 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_RPORT_H__
19#define __BFA_DEFS_RPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_qos.h>
26
27/**
28 * FCS remote port states
29 */
30enum bfa_rport_state {
31 BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
32 BFA_RPORT_OFFLINE = 1, /* rport is offline */
33 BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
34 BFA_RPORT_ONLINE = 3, /* login to rport is complete */
35 BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
36 BFA_RPORT_NSQUERY = 5, /* nameserver query */
37 BFA_RPORT_ADISC = 6, /* ADISC authentication */
38 BFA_RPORT_LOGO = 7, /* logging out with rport */
39 BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
40 BFA_RPORT_NSDISC = 9, /* re-discover rport */
41};
42
43/**
44 * Rport Scsi Function : Initiator/Target.
45 */
46enum bfa_rport_function {
47 BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
48 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
49};
50
51/**
52 * port/node symbolic names for rport
53 */
54#define BFA_RPORT_SYMNAME_MAXLEN 255
55struct bfa_rport_symname_s {
56 char symname[BFA_RPORT_SYMNAME_MAXLEN];
57};
58
59struct bfa_rport_hal_stats_s {
60 u32 sm_un_cr; /* uninit: create events */
61 u32 sm_un_unexp; /* uninit: exception events */
62 u32 sm_cr_on; /* created: online events */
63 u32 sm_cr_del; /* created: delete events */
64 u32 sm_cr_hwf; /* created: IOC down */
65 u32 sm_cr_unexp; /* created: exception events */
66 u32 sm_fwc_rsp; /* fw create: f/w responses */
67 u32 sm_fwc_del; /* fw create: delete events */
68 u32 sm_fwc_off; /* fw create: offline events */
69 u32 sm_fwc_hwf; /* fw create: IOC down */
70 u32 sm_fwc_unexp; /* fw create: exception events*/
71 u32 sm_on_off; /* online: offline events */
72 u32 sm_on_del; /* online: delete events */
73 u32 sm_on_hwf; /* online: IOC down events */
74 u32 sm_on_unexp; /* online: exception events */
75 u32 sm_fwd_rsp; /* fw delete: fw responses */
76 u32 sm_fwd_del; /* fw delete: delete events */
77 u32 sm_fwd_hwf; /* fw delete: IOC down events */
78 u32 sm_fwd_unexp; /* fw delete: exception events*/
79 u32 sm_off_del; /* offline: delete events */
80 u32 sm_off_on; /* offline: online events */
81 u32 sm_off_hwf; /* offline: IOC down events */
82 u32 sm_off_unexp; /* offline: exception events */
83 u32 sm_del_fwrsp; /* delete: fw responses */
84 u32 sm_del_hwf; /* delete: IOC down events */
85 u32 sm_del_unexp; /* delete: exception events */
86 u32 sm_delp_fwrsp; /* delete pend: fw responses */
87 u32 sm_delp_hwf; /* delete pend: IOC downs */
88 u32 sm_delp_unexp; /* delete pend: exceptions */
89 u32 sm_offp_fwrsp; /* off-pending: fw responses */
90 u32 sm_offp_del; /* off-pending: deletes */
91 u32 sm_offp_hwf; /* off-pending: IOC downs */
92 u32 sm_offp_unexp; /* off-pending: exceptions */
93 u32 sm_iocd_off; /* IOC down: offline events */
94 u32 sm_iocd_del; /* IOC down: delete events */
95 u32 sm_iocd_on; /* IOC down: online events */
96 u32 sm_iocd_unexp; /* IOC down: exceptions */
97 u32 rsvd;
98};
99
100/**
101 * FCS remote port statistics
102 */
103struct bfa_rport_stats_s {
104 u32 offlines; /* remote port offline count */
105 u32 onlines; /* remote port online count */
106 u32 rscns; /* RSCN affecting rport */
107 u32 plogis; /* plogis sent */
108 u32 plogi_accs; /* plogi accepts */
109 u32 plogi_timeouts; /* plogi timeouts */
110 u32 plogi_rejects; /* rcvd plogi rejects */
111 u32 plogi_failed; /* local failure */
112 u32 plogi_rcvd; /* plogis rcvd */
113 u32 prli_rcvd; /* inbound PRLIs */
114 u32 adisc_rcvd; /* ADISCs received */
115 u32 adisc_rejects; /* recvd ADISC rejects */
116 u32 adisc_sent; /* ADISC requests sent */
117 u32 adisc_accs; /* ADISC accepted by rport */
118 u32 adisc_failed; /* ADISC failed (no response) */
119 u32 adisc_rejected; /* ADISC rejected by us */
120 u32 logos; /* logos sent */
121 u32 logo_accs; /* LOGO accepts from rport */
122 u32 logo_failed; /* LOGO failures */
123 u32 logo_rejected; /* LOGO rejects from rport */
124 u32 logo_rcvd; /* LOGO from remote port */
125
126 u32 rpsc_rcvd; /* RPSC received */
127 u32 rpsc_rejects; /* recvd RPSC rejects */
128 u32 rpsc_sent; /* RPSC requests sent */
129 u32 rpsc_accs; /* RPSC accepted by rport */
130 u32 rpsc_failed; /* RPSC failed (no response) */
131 u32 rpsc_rejected; /* RPSC rejected by us */
132
133 u32 rsvd;
134 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
135};
136
137/**
138 * Rport's QoS attributes
139 */
140struct bfa_rport_qos_attr_s {
141 enum bfa_qos_priority qos_priority; /* rport's QoS priority */
142 u32 qos_flow_id; /* QoS flow Id */
143};
144
145/**
146 * FCS remote port attributes returned in queries
147 */
148struct bfa_rport_attr_s {
149 wwn_t nwwn; /* node wwn */
150 wwn_t pwwn; /* port wwn */
151 enum fc_cos cos_supported; /* supported class of services */
152 u32 pid; /* port ID */
153 u32 df_sz; /* Max payload size */
154 enum bfa_rport_state state; /* Rport State machine state */
155 enum fc_cos fc_cos; /* FC classes of services */
156 bfa_boolean_t cisc; /* CISC capable device */
157 struct bfa_rport_symname_s symname; /* Symbolic Name */
158 enum bfa_rport_function scsi_function; /* Initiator/Target */
159 struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
160 enum bfa_pport_speed curr_speed; /* operating speed got from
161 * RPSC ELS. UNKNOWN, if RPSC
162 * is not supported */
163 bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
164 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
165 * will be used if RPSC is not
166 * supported by the rport */
167};
168
169#define bfa_rport_aen_qos_data_t struct bfa_rport_qos_attr_s
170
171/**
172 * BFA remote port events
173 * Arguments below are in BFAL context from Mgmt
174 * BFA_RPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
175 * BFA_RPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
176 * BFA_RPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn
177 * BFA_RPORT_AEN_QOS_PRIO: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn, prio
178 * BFA_RPORT_AEN_QOS_FLOWID:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn, flow_id
179 */
180enum bfa_rport_aen_event {
181 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
182 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
183 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
184 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
185 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
186};
187
188struct bfa_rport_aen_data_s {
189 u16 vf_id; /* vf_id of this logical port */
190 u16 rsvd[3];
191 wwn_t ppwwn; /* WWN of its physical port */
192 wwn_t lpwwn; /* WWN of this logical port */
193 wwn_t rpwwn; /* WWN of this remote port */
194 union {
195 bfa_rport_aen_qos_data_t qos;
196 } priv;
197};
198
199#endif /* __BFA_DEFS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
new file mode 100644
index 000000000000..cdceaeb9f4b8
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -0,0 +1,255 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_STATUS_H__
18#define __BFA_DEFS_STATUS_H__
19
20/**
21 * API status return values
22 *
23 * NOTE: The error msgs are auto generated from the comments. Only singe line
24 * comments are supported
25 */
26enum bfa_status {
27 BFA_STATUS_OK = 0, /* Success */
28 BFA_STATUS_FAILED = 1, /* Operation failed */
29 BFA_STATUS_EINVAL = 2, /* Invalid params Check input
30 * parameters */
31 BFA_STATUS_ENOMEM = 3, /* Out of resources */
32 BFA_STATUS_ENOSYS = 4, /* Function not implemented */
33 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if
34 * persists, contact support */
35 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
36 BFA_STATUS_ENOFCPORTS = 7, /* No FC ports resources */
37 BFA_STATUS_NOFLASH = 8, /* Flash not present */
38 BFA_STATUS_BADFLASH = 9, /* Flash is corrupted or bad */
39 BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
40 BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
41 BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted
42 * contact support */
43 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
44 BFA_STATUS_ABORTED = 14, /* Operation aborted */
45 BFA_STATUS_NODEV = 15, /* Dev is not present */
46 BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
47 BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
48 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
49 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
50 BFA_STATUS_FCPT_LS_RJT = 20, /* Got LS_RJT for FC Pass
51 * through Req */
52 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
53 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported
54 * limit */
55 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed
56 * setting */
57 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
58 BFA_STATUS_CNFG_FAILED = 25, /* Setting can not be persisted */
59 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
60 BFA_STATUS_NO_ADAPTER = 27, /* No Brocade Adapter Found */
61 BFA_STATUS_LINKDOWN = 28, /* Link is down - Check or replace
62 * SFP/cable */
63 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
64 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
65 BFA_STATUS_NSLOGIN_FAILED = 31, /* Nameserver login failed */
66 BFA_STATUS_NO_RPORTS = 32, /* No remote ports found */
67 BFA_STATUS_NSQUERY_FAILED = 33, /* Nameserver query failed */
68 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
69 BFA_STATUS_RPORT_OFFLINE = 35, /* RPORT is not online */
70 BFA_STATUS_TGTOPEN_FAILED = 36, /* Remote SCSI target open failed */
71 BFA_STATUS_BAD_LUNS = 37, /* No valid LUNs found */
72 BFA_STATUS_IO_FAILURE = 38, /* SCSI target IO failure */
73 BFA_STATUS_NO_FABRIC = 39, /* No switched fabric present */
74 BFA_STATUS_EBADF = 40, /* Bad file descriptor */
75 BFA_STATUS_EINTR = 41, /* A signal was caught during ioctl */
76 BFA_STATUS_EIO = 42, /* I/O error */
77 BFA_STATUS_ENOTTY = 43, /* Inappropriate I/O control
78 * operation */
79 BFA_STATUS_ENXIO = 44, /* No such device or address */
80 BFA_STATUS_EFOPEN = 45, /* Failed to open file */
81 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
82 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port
83 * first */
84 BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */
85 BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace
86 * SFP/cable */
87 BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace
88 * SFP/cable */
89 BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace
90 * SFP/cable */
91 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the
92 * rport */
93 BFA_STATUS_CHECKSUM_FAIL = 53, /* checksum failure */
94 BFA_STATUS_GZME_FAILED = 54, /* Get zone member query failed */
95 BFA_STATUS_SCSISTART_REQD = 55, /* SCSI disk require START command */
96 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
97 * contact support */
98 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
99 BFA_STATUS_MISMATCH = 58, /* Version mismatch */
100 BFA_STATUS_IOC_ENABLED = 59, /* IOC is already enabled */
101 BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled disable
102 * adapter first */
103 BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational. Enable IOC
104 * and if it still fails,
105 * contact support */
106 BFA_STATUS_ADDR_MAP_FAILURE = 62, /* PCI base address not mapped
107 * in OS */
108 BFA_STATUS_SAME_NAME = 63, /* Name exists! use a different
109 * name */
110 BFA_STATUS_PENDING = 64, /* API completes asynchronously */
111 BFA_STATUS_8G_SPD = 65, /* Speed setting not valid for
112 * 8G HBA */
113 BFA_STATUS_4G_SPD = 66, /* Speed setting not valid for
114 * 4G HBA */
115 BFA_STATUS_AD_IS_ENABLE = 67, /* Adapter is already enabled */
116 BFA_STATUS_EINVAL_TOV = 68, /* Invalid path failover TOV */
117 BFA_STATUS_EINVAL_QDEPTH = 69, /* Invalid queue depth value */
118 BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version
119 * mismatch */
120 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
121 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
122 BFA_STATUS_BEACON_OFF = 73, /* Port Beacon already off */
123 BFA_STATUS_LBEACON_ON = 74, /* Link End-to-End Beacon already
124 * on */
125 BFA_STATUS_LBEACON_OFF = 75, /* Link End-to-End Beacon already
126 * off */
127 BFA_STATUS_PORT_NOT_INITED = 76, /* Port not initialized */
128 BFA_STATUS_RPSC_ENABLED = 77, /* Target has a valid speed */
129 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
130 BFA_STATUS_BAD_FILE = 79, /* Not a valid Brocade Boot Code
131 * file */
132 BFA_STATUS_RLIM_EN = 80, /* Target rate limiting is already
133 * enabled */
134 BFA_STATUS_RLIM_DIS = 81, /* Target rate limiting is already
135 * disabled */
136 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
137 BFA_STATUS_ADAPTER_DISABLED = 83, /* Adapter is already disabled */
138 BFA_STATUS_BIOS_DISABLED = 84, /* Bios is already disabled */
139 BFA_STATUS_AUTH_ENABLED = 85, /* Authentication is already
140 * enabled */
141 BFA_STATUS_AUTH_DISABLED = 86, /* Authentication is already
142 * disabled */
143 BFA_STATUS_ERROR_TRL_ENABLED = 87, /* Target rate limiting is
144 * enabled */
145 BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
146 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
147 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact
148 * support */
149 BFA_STATUS_INVALID_DEVID = 91, /* Invalid device id provided */
150 BFA_STATUS_QOS_ENABLED = 92, /* QOS is already enabled */
151 BFA_STATUS_QOS_DISABLED = 93, /* QOS is already disabled */
152 BFA_STATUS_INCORRECT_DRV_CONFIG = 94, /* Check configuration
153 * key/value pair */
154 BFA_STATUS_REG_FAIL = 95, /* Can't read windows registry */
155 BFA_STATUS_IM_INV_CODE = 96, /* Invalid IOCTL code */
156 BFA_STATUS_IM_INV_VLAN = 97, /* Invalid VLAN ID */
157 BFA_STATUS_IM_INV_ADAPT_NAME = 98, /* Invalid adapter name */
158 BFA_STATUS_IM_LOW_RESOURCES = 99, /* Memory allocation failure in
159 * driver */
160 BFA_STATUS_IM_VLANID_IS_PVID = 100, /* Given VLAN id same as PVID */
161 BFA_STATUS_IM_VLANID_EXISTS = 101, /* Given VLAN id already exists */
162 BFA_STATUS_IM_FW_UPDATE_FAIL = 102, /* Updating firmware with new
163 * VLAN ID failed */
164 BFA_STATUS_PORTLOG_ENABLED = 103, /* Port Log is already enabled */
165 BFA_STATUS_PORTLOG_DISABLED = 104, /* Port Log is already disabled */
166 BFA_STATUS_FILE_NOT_FOUND = 105, /* Specified file could not be
167 * found */
168 BFA_STATUS_QOS_FC_ONLY = 106, /* QOS can be enabled for FC mode
169 * only */
170 BFA_STATUS_RLIM_FC_ONLY = 107, /* RATELIM can be enabled for FC mode
171 * only */
172 BFA_STATUS_CT_SPD = 108, /* Invalid speed selection for Catapult. */
173 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
174 BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please
175 * bring down first */
176 BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G HBA */
177 BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */
178 BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already
179 * exists */
180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
181 * of another team */
182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
183 * Delete all VLANs before
184 * creating team */
185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
186 * for adapters */
187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
188 * configured for adapters */
189 BFA_STATUS_IM_MTU_MISMATCH = 118, /* Mismatching MTUs configured for
190 * adapters */
191 BFA_STATUS_IM_RSS_MISMATCH = 119, /* Mismatching RSS parameters
192 * configured for adapters */
193 BFA_STATUS_IM_HDS_MISMATCH = 120, /* Mismatching HDS parameters
194 * configured for adapters */
195 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, /* Mismatching offload
196 * parameters configured for
197 * adapters */
198 BFA_STATUS_IM_PORT_PARAMS = 122, /* Error setting port parameters */
199 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, /* Port is not part of team */
200 BFA_STATUS_IM_CANNOT_REM_PRI = 124, /* Primary adapter cannot be
201 * removed. Change primary before
202 * removing */
203 BFA_STATUS_IM_MAX_PORTS_REACHED = 125, /* Exceeding maximum ports
204 * per team */
205 BFA_STATUS_IM_LAST_PORT_DELETE = 126, /* Last port in team being
206 * deleted */
207 BFA_STATUS_IM_NO_DRIVER = 127, /* IM driver is not installed */
208 BFA_STATUS_IM_MAX_VLANS_REACHED = 128, /* Exceeding maximum VLANs
209 * per port */
210 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, /* Bios speed config not
211 * allowed for CNA */
212 BFA_STATUS_NO_MINPORT_DRIVER = 130, /* Miniport driver is not
213 * loaded */
214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
216 BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */
217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */
218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
220 BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port vlan (PVID) */
221 BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port vlan (PVID) */
222 BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */
223 BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
224 * Non-Zero Value */
225 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network
226 * Subsytem Lock Failed.Please
227 * try after some time */
228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
229 * handle Failed. Please try
230 * after some time */
231 BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not
232 * Enabled on this port */
233 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
234 * permissions to execute the BCU
235 * application */
236 BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved Vlan name
237 * string. The name is not allowed
238 * for the normal Vlans */
239 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
240 BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthru vlan id */
241 BFA_STATUS_IM_BIND_FAILED = 148, /*! < IM Driver bind operation
242 * failed */
243 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation
244 * failed */
245 BFA_STATUS_MAX_VAL /* Unknown error code */
246};
247#define bfa_status_t enum bfa_status
248
249enum bfa_eproto_status {
250 BFA_EPROTO_BAD_ACCEPT = 0,
251 BFA_EPROTO_UNKNOWN_RSP = 1
252};
253#define bfa_eproto_status_t enum bfa_eproto_status
254
255#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h b/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
new file mode 100644
index 000000000000..e05a2db7abed
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TIN_H__
19#define __BFA_DEFS_TIN_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23
24/**
25 * FCS tin states
26 */
27enum bfa_tin_state_e {
28 BFA_TIN_SM_OFFLINE = 0, /* tin is offline */
29 BFA_TIN_SM_WOS_LOGIN = 1, /* Waiting PRLI ACC/RJT from ULP */
30 BFA_TIN_SM_WFW_ONLINE = 2, /* Waiting ACK to PRLI ACC from FW */
31 BFA_TIN_SM_ONLINE = 3, /* tin login is complete */
32 BFA_TIN_SM_WIO_RELOGIN = 4, /* tin relogin is in progress */
33 BFA_TIN_SM_WIO_LOGOUT = 5, /* Processing of PRLO req from
34 * Initiator is in progress
35 */
36 BFA_TIN_SM_WOS_LOGOUT = 6, /* Processing of PRLO req from
37 * Initiator is in progress
38 */
39 BFA_TIN_SM_WIO_CLEAN = 7, /* Waiting for IO cleanup before tin
40 * is offline. This can be triggered
41 * by RPORT LOGO (rcvd/sent) or by
42 * PRLO (rcvd/sent)
43 */
44};
45
46struct bfa_prli_req_s {
47 struct fchs_s fchs;
48 struct fc_prli_s prli_payload;
49};
50
51struct bfa_prlo_req_s {
52 struct fchs_s fchs;
53 struct fc_prlo_s prlo_payload;
54};
55
56void bfa_tin_send_login_rsp(void *bfa_tin, u32 login_rsp,
57 struct fc_ls_rjt_s rjt_payload);
58void bfa_tin_send_logout_rsp(void *bfa_tin, u32 logout_rsp,
59 struct fc_ls_rjt_s rjt_payload);
60/**
61 * FCS target port statistics
62 */
63struct bfa_tin_stats_s {
64 u32 onlines; /* ITN nexus onlines (PRLI done) */
65 u32 offlines; /* ITN Nexus offlines */
66 u32 prli_req_parse_err; /* prli req parsing errors */
67 u32 prli_rsp_rjt; /* num prli rsp rejects sent */
68 u32 prli_rsp_acc; /* num prli rsp accepts sent */
69 u32 cleanup_comps; /* ITN cleanup completions */
70};
71
72/**
73 * FCS tin attributes returned in queries
74 */
75struct bfa_tin_attr_s {
76 enum bfa_tin_state_e state;
77 u8 seq_retry; /* Sequence retry supported */
78 u8 rsvd[3];
79};
80
81/**
82 * BFA TIN async event data structure for BFAL
83 */
84enum bfa_tin_aen_event {
85 BFA_TIN_AEN_ONLINE = 1, /* Target online */
86 BFA_TIN_AEN_OFFLINE = 2, /* Target offline */
87 BFA_TIN_AEN_DISCONNECT = 3, /* Target disconnected */
88};
89
90/**
91 * BFA TIN event data structure.
92 */
93struct bfa_tin_aen_data_s {
94 u16 vf_id; /* vf_id of the IT nexus */
95 u16 rsvd[3];
96 wwn_t lpwwn; /* WWN of logical port */
97 wwn_t rpwwn; /* WWN of remote(target) port */
98};
99
100/**
101 * Below APIs are needed from BFA driver
102 * Move these to BFA driver public header file?
103 */
104/* TIN rcvd new PRLI & gets bfad_tin_t ptr from driver this callback */
105void *bfad_tin_rcvd_login_req(void *bfad_tm_port, void *bfa_tin,
106 wwn_t rp_wwn, u32 rp_fcid,
107 struct bfa_prli_req_s prli_req);
108/* TIN rcvd new PRLO */
109void bfad_tin_rcvd_logout_req(void *bfad_tin, wwn_t rp_wwn, u32 rp_fcid,
110 struct bfa_prlo_req_s prlo_req);
111/* TIN is online and ready for IO */
112void bfad_tin_online(void *bfad_tin);
113/* TIN is offline and BFA driver can shutdown its upper stack */
114void bfad_tin_offline(void *bfad_tin);
115/* TIN does not need this BFA driver tin tag anymore, so can be freed */
116void bfad_tin_res_free(void *bfad_tin);
117
118#endif /* __BFA_DEFS_TIN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
new file mode 100644
index 000000000000..31881d218515
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TSENSOR_H__
19#define __BFA_DEFS_TSENSOR_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * Temperature sensor status values
26 */
27enum bfa_tsensor_status {
28 BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unkown status */
29 BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */
30 BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */
31 BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */
32 BFA_TSENSOR_STATUS_ABOVE_MAX = 5, /* temperature above maximum */
33};
34
35/**
36 * Temperature sensor attribute
37 */
38struct bfa_tsensor_attr_s {
39 enum bfa_tsensor_status status; /* temperature sensor status */
40 u32 value; /* current temperature in celsius */
41};
42
43#endif /* __BFA_DEFS_TSENSOR_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_types.h b/drivers/scsi/bfa/include/defs/bfa_defs_types.h
new file mode 100644
index 000000000000..4348332b107a
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_types.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_TYPES_H__
18#define __BFA_DEFS_TYPES_H__
19
20#include <bfa_os_inc.h>
21
22enum bfa_boolean {
23 BFA_FALSE = 0,
24 BFA_TRUE = 1
25};
26#define bfa_boolean_t enum bfa_boolean
27
28#define BFA_STRING_32 32
29
30#endif /* __BFA_DEFS_TYPES_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_version.h b/drivers/scsi/bfa/include/defs/bfa_defs_version.h
new file mode 100644
index 000000000000..f8902a2c9aad
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_version.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_VERSION_H__
18#define __BFA_DEFS_VERSION_H__
19
20#define BFA_VERSION_LEN 64
21
22#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h b/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
new file mode 100644
index 000000000000..3235be5e9423
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VF_H__
19#define __BFA_DEFS_VF_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VF states
27 */
28enum bfa_vf_state {
29 BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
30 BFA_VF_LINK_DOWN = 1, /* link is down */
31 BFA_VF_FLOGI = 2, /* flogi is in progress */
32 BFA_VF_AUTH = 3, /* authentication in progress */
33 BFA_VF_NOFABRIC = 4, /* fabric is not present */
34 BFA_VF_ONLINE = 5, /* login to fabric is complete */
35 BFA_VF_EVFP = 6, /* EVFP is in progress */
36 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
37};
38
39/**
40 * VF statistics
41 */
42struct bfa_vf_stats_s {
43 u32 flogi_sent; /* Num FLOGIs sent */
44 u32 flogi_rsp_err; /* FLOGI response errors */
45 u32 flogi_acc_err; /* FLOGI accept errors */
46 u32 flogi_accepts; /* FLOGI accepts received */
47 u32 flogi_rejects; /* FLOGI rejects received */
48 u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
49 u32 flogi_alloc_wait; /* Allocation waits prior to
50 * sending FLOGI
51 */
52 u32 flogi_rcvd; /* FLOGIs received */
53 u32 flogi_rejected; /* Incoming FLOGIs rejected */
54 u32 fabric_onlines; /* Internal fabric online
55 * notification sent to other
56 * modules
57 */
58 u32 fabric_offlines; /* Internal fabric offline
59 * notification sent to other
60 * modules
61 */
62 u32 resvd;
63};
64
65/**
66 * VF attributes returned in queries
67 */
68struct bfa_vf_attr_s {
69 enum bfa_vf_state state; /* VF state */
70 u32 rsvd;
71 wwn_t fabric_name; /* fabric name */
72};
73
74#endif /* __BFA_DEFS_VF_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h b/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
new file mode 100644
index 000000000000..9f021f43b3b4
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VPORT_H__
19#define __BFA_DEFS_VPORT_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VPORT states
27 */
28enum bfa_vport_state {
29 BFA_FCS_VPORT_UNINIT = 0,
30 BFA_FCS_VPORT_CREATED = 1,
31 BFA_FCS_VPORT_OFFLINE = 1,
32 BFA_FCS_VPORT_FDISC_SEND = 2,
33 BFA_FCS_VPORT_FDISC = 3,
34 BFA_FCS_VPORT_FDISC_RETRY = 4,
35 BFA_FCS_VPORT_ONLINE = 5,
36 BFA_FCS_VPORT_DELETING = 6,
37 BFA_FCS_VPORT_CLEANUP = 6,
38 BFA_FCS_VPORT_LOGO_SEND = 7,
39 BFA_FCS_VPORT_LOGO = 8,
40 BFA_FCS_VPORT_ERROR = 9,
41 BFA_FCS_VPORT_MAX_STATE,
42};
43
44/**
45 * vport statistics
46 */
47struct bfa_vport_stats_s {
48 struct bfa_port_stats_s port_stats; /* base class (port) stats */
49 /*
50 * TODO - remove
51 */
52
53 u32 fdisc_sent; /* num fdisc sent */
54 u32 fdisc_accepts; /* fdisc accepts */
55 u32 fdisc_retries; /* fdisc retries */
56 u32 fdisc_timeouts; /* fdisc timeouts */
57 u32 fdisc_rsp_err; /* fdisc response error */
58 u32 fdisc_acc_bad; /* bad fdisc accepts */
59 u32 fdisc_rejects; /* fdisc rejects */
60 u32 fdisc_unknown_rsp;
61 /*
62 *!< fdisc rsp unknown error
63 */
64 u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
65
66 u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
67 u32 logo_sent; /* logo sent */
68 u32 logo_accepts; /* logo accepts */
69 u32 logo_rejects; /* logo rejects */
70 u32 logo_rsp_err; /* logo rsp errors */
71 u32 logo_unknown_rsp;
72 /* logo rsp unknown errors */
73
74 u32 fab_no_npiv; /* fabric does not support npiv */
75
76 u32 fab_offline; /* offline events from fab SM */
77 u32 fab_online; /* online events from fab SM */
78 u32 fab_cleanup; /* cleanup request from fab SM */
79 u32 rsvd;
80};
81
82/**
83 * BFA vport attribute returned in queries
84 */
85struct bfa_vport_attr_s {
86 struct bfa_port_attr_s port_attr; /* base class (port) attributes */
87 enum bfa_vport_state vport_state; /* vport state */
88 u32 rsvd;
89};
90
91#endif /* __BFA_DEFS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb.h b/drivers/scsi/bfa/include/fcb/bfa_fcb.h
new file mode 100644
index 000000000000..2963b0bc30e7
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb.h BFA FCS callback interfaces
20 */
21
22#ifndef __BFA_FCB_H__
23#define __BFA_FCB_H__
24
25/**
26 * fcb Main fcs callbacks
27 */
28
29void bfa_fcb_exit(struct bfad_s *bfad);
30
31
32
33#endif /* __BFA_FCB_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
new file mode 100644
index 000000000000..a6c70aee0aa3
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19* : bfad_fcpim.h - BFA FCS initiator mode remote port callbacks
20 */
21
22#ifndef __BFAD_FCB_FCPIM_H__
23#define __BFAD_FCB_FCPIM_H__
24
25struct bfad_itnim_s;
26
27/*
28 * RPIM callbacks
29 */
30
31/**
32 * Memory allocation for remote port instance. Called before PRLI is
33 * initiated to the remote target port.
34 *
35 * @param[in] bfad - driver instance
36 * @param[out] itnim - FCS remote port (IM) instance
37 * @param[out] itnim_drv - driver remote port (IM) instance
38 *
39 * @return None
40 */
41void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
42 struct bfad_itnim_s **itnim_drv);
43
44/**
45 * Free remote port (IM) instance.
46 *
47 * @param[in] bfad - driver instance
48 * @param[in] itnim_drv - driver remote port instance
49 *
50 * @return None
51 */
52void bfa_fcb_itnim_free(struct bfad_s *bfad,
53 struct bfad_itnim_s *itnim_drv);
54
55/**
56 * Notification of when login with a remote target device is complete.
57 *
58 * @param[in] itnim_drv - driver remote port instance
59 *
60 * @return None
61 */
62void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
63
64/**
65 * Notification when login with the remote device is severed.
66 *
67 * @param[in] itnim_drv - driver remote port instance
68 *
69 * @return None
70 */
71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
72
73void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv);
74void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
75
76#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
new file mode 100644
index 000000000000..5fd7f986fa32
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_port.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_PORT_H__
23#define __BFA_FCB_PORT_H__
24
25#include <fcb/bfa_fcb_vport.h>
26/**
27 * fcs_port_fcb FCS port driver interfaces
28 */
29
30/*
31 * Forward declarations
32 */
33struct bfad_port_s;
34
35/*
36 * Callback functions from BFA FCS to driver
37 */
38
39/**
40 * Call from FCS to driver module when a port is instantiated. The port
41 * can be a base port or a virtual port with in the base fabric or
42 * a virtual fabric.
43 *
44 * On this callback, driver is supposed to create scsi_host, scsi_tgt or
45 * network interfaces bases on ports personality/roles.
46 *
47 * base port of base fabric: vf_drv == NULL && vp_drv == NULL
48 * vport of base fabric: vf_drv == NULL && vp_drv != NULL
49 * base port of VF: vf_drv != NULL && vp_drv == NULL
50 * vport of VF: vf_drv != NULL && vp_drv != NULL
51 *
52 * @param[in] bfad - driver instance
53 * @param[in] port - FCS port instance
54 * @param[in] roles - port roles: IM, TM, IP
55 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
56 * @param[in] vp_drv - vport driver instance, NULL if base port
57 *
58 * @return None
59 */
60struct bfad_port_s *bfa_fcb_port_new(struct bfad_s *bfad,
61 struct bfa_fcs_port_s *port,
62 enum bfa_port_role roles, struct bfad_vf_s *vf_drv,
63 struct bfad_vport_s *vp_drv);
64
65/**
66 * Call from FCS to driver module when a port is deleted. The port
67 * can be a base port or a virtual port with in the base fabric or
68 * a virtual fabric.
69 *
70 * @param[in] bfad - driver instance
71 * @param[in] roles - port roles: IM, TM, IP
72 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
73 * @param[in] vp_drv - vport driver instance, NULL if base port
74 *
75 * @return None
76 */
77void bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles,
78 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
79
80/**
81 * Notification when port transitions to ONLINE state.
82 *
83 * Online notification is a logical link up for the local port. This
84 * notification is sent after a successfull FLOGI, or a successful
85 * link initialization in proviate-loop or N2N topologies.
86 *
87 * @param[in] bfad - driver instance
88 * @param[in] roles - port roles: IM, TM, IP
89 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
90 * @param[in] vp_drv - vport driver instance, NULL if base port
91 *
92 * @return None
93 */
94void bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
95 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
96
97/**
98 * Notification when port transitions to OFFLINE state.
99 *
100 * Offline notification is a logical link down for the local port.
101 *
102 * @param[in] bfad - driver instance
103 * @param[in] roles - port roles: IM, TM, IP
104 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
105 * @param[in] vp_drv - vport driver instance, NULL if base port
106 *
107 * @return None
108 */
109void bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
110 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
111
112
113#endif /* __BFA_FCB_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
new file mode 100644
index 000000000000..e0261bb6d1c1
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_rport.h BFA FCS rport driver interfaces
20 */
21
22#ifndef __BFA_FCB_RPORT_H__
23#define __BFA_FCB_RPORT_H__
24
25/**
26 * fcs_rport_fcb Remote port driver interfaces
27 */
28
29
30struct bfad_rport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_rport_add().
38 *
39 * @param[in] rport_drv - driver instance of rport
40 *
41 * @return None
42 */
43void bfa_fcb_rport_add(struct bfad_rport_s *rport_drv);
44
45/**
46 * Completion callback for bfa_fcs_rport_remove().
47 *
48 * @param[in] rport_drv - driver instance of rport
49 *
50 * @return None
51 */
52void bfa_fcb_rport_remove(struct bfad_rport_s *rport_drv);
53
54/**
55 * Call to allocate a rport instance.
56 *
57 * @param[in] bfad - driver instance
58 * @param[out] rport - BFA FCS instance of rport
59 * @param[out] rport_drv - driver instance of rport
60 *
61 * @retval BFA_STATUS_OK - successfully allocated
62 * @retval BFA_STATUS_ENOMEM - cannot allocate
63 */
64bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
65 struct bfa_fcs_rport_s **rport,
66 struct bfad_rport_s **rport_drv);
67
68/**
69 * Call to free rport memory resources.
70 *
71 * @param[in] bfad - driver instance
72 * @param[in] rport_drv - driver instance of rport
73 *
74 * @return None
75 */
76void bfa_fcb_rport_free(struct bfad_s *bfad, struct bfad_rport_s **rport_drv);
77
78
79
80#endif /* __BFA_FCB_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
new file mode 100644
index 000000000000..cfd3fac0a4e2
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vf.h BFA FCS virtual fabric driver interfaces
20 */
21
22#ifndef __BFA_FCB_VF_H__
23#define __BFA_FCB_VF_H__
24
25/**
26 * fcs_vf_fcb Virtual fabric driver intrefaces
27 */
28
29
30struct bfad_vf_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vf_stop().
38 *
39 * @param[in] vf_drv - driver instance of vf
40 *
41 * @return None
42 */
43void bfa_fcb_vf_stop(struct bfad_vf_s *vf_drv);
44
45
46
47#endif /* __BFA_FCB_VF_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
new file mode 100644
index 000000000000..a39f474c2fcf
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vport.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_VPORT_H__
23#define __BFA_FCB_VPORT_H__
24
25/**
26 * fcs_vport_fcb Virtual port driver interfaces
27 */
28
29
30struct bfad_vport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vport_delete().
38 *
39 * @param[in] vport_drv - driver instance of vport
40 *
41 * @return None
42 */
43void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv);
44
45
46
47#endif /* __BFA_FCB_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
new file mode 100644
index 000000000000..627669c65546
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_H__
19#define __BFA_FCS_H__
20
21#include <cs/bfa_debug.h>
22#include <defs/bfa_defs_status.h>
23#include <defs/bfa_defs_version.h>
24#include <bfa.h>
25#include <fcs/bfa_fcs_fabric.h>
26
27#define BFA_FCS_OS_STR_LEN 64
28
29struct bfa_fcs_stats_s {
30 struct {
31 u32 untagged; /* untagged receive frames */
32 u32 tagged; /* tagged receive frames */
33 u32 vfid_unknown; /* VF id is unknown */
34 } uf;
35};
36
37struct bfa_fcs_driver_info_s {
38 u8 version[BFA_VERSION_LEN]; /* Driver Version */
39 u8 host_machine_name[BFA_FCS_OS_STR_LEN];
40 u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
41 u8 host_os_patch[BFA_FCS_OS_STR_LEN];/* patch or service pack */
42 u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
43};
44
45struct bfa_fcs_s {
46 struct bfa_s *bfa; /* corresponding BFA bfa instance */
47 struct bfad_s *bfad; /* corresponding BDA driver instance */
48 struct bfa_log_mod_s *logm; /* driver logging module instance */
49 struct bfa_trc_mod_s *trcmod; /* tracing module */
50 struct bfa_aen_s *aen; /* aen component */
51 bfa_boolean_t vf_enabled; /* VF mode is enabled */
52 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
53 u16 port_vfid; /* port default VF ID */
54 struct bfa_fcs_driver_info_s driver_info;
55 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
56 struct bfa_fcs_stats_s stats; /* FCS statistics */
57 struct bfa_wc_s wc; /* waiting counter */
58};
59
60/*
61 * bfa fcs API functions
62 */
63void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
64 bfa_boolean_t min_cfg);
65void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
66 struct bfa_fcs_driver_info_s *driver_info);
67void bfa_fcs_exit(struct bfa_fcs_s *fcs);
68void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
69void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
70void bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen);
71void bfa_fcs_start(struct bfa_fcs_s *fcs);
72
73#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
new file mode 100644
index 000000000000..28c4c9ff08b3
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_AUTH_H__
19#define __BFA_FCS_AUTH_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_auth.h>
25#include <defs/bfa_defs_vf.h>
26#include <cs/bfa_q.h>
27#include <cs/bfa_sm.h>
28#include <defs/bfa_defs_pport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <protocol/fc_sp.h>
31
32struct bfa_fcs_fabric_s;
33
34
35
36struct bfa_fcs_auth_s {
37 bfa_sm_t sm; /* state machine */
38 bfa_boolean_t policy; /* authentication enabled/disabled */
39 enum bfa_auth_status status; /* authentication status */
40 enum auth_rjt_codes rjt_code; /* auth reject status */
41 enum auth_rjt_code_exps rjt_code_exp; /* auth reject reason */
42 enum bfa_auth_algo algo; /* Authentication algorithm */
43 struct bfa_auth_stats_s stats; /* Statistics */
44 enum auth_dh_gid group; /* DH(diffie-hellman) Group */
45 enum bfa_auth_secretsource source; /* Secret source */
46 char secret[BFA_AUTH_SECRET_STRING_LEN];
47 /* secret string */
48 u8 secret_len;
49 /* secret string length */
50 u8 nretries;
51 /* number of retries */
52 struct bfa_fcs_fabric_s *fabric;/* pointer to fabric */
53 u8 sentcode; /* pointer to response data */
54 u8 *response; /* pointer to response data */
55 struct bfa_timer_s delay_timer; /* delay timer */
56 struct bfa_fcxp_s *fcxp; /* pointer to fcxp */
57 struct bfa_fcxp_wqe_s fcxp_wqe;
58};
59
60/**
61 * bfa fcs authentication public functions
62 */
63bfa_status_t bfa_fcs_auth_get_attr(struct bfa_fcs_s *port,
64 struct bfa_auth_attr_s *attr);
65bfa_status_t bfa_fcs_auth_set_policy(struct bfa_fcs_s *port,
66 bfa_boolean_t policy);
67enum bfa_auth_status bfa_fcs_auth_get_status(struct bfa_fcs_s *port);
68bfa_status_t bfa_fcs_auth_set_algo(struct bfa_fcs_s *port,
69 enum bfa_auth_algo algo);
70bfa_status_t bfa_fcs_auth_get_stats(struct bfa_fcs_s *port,
71 struct bfa_auth_stats_s *stats);
72bfa_status_t bfa_fcs_auth_set_dh_group(struct bfa_fcs_s *port, int group);
73bfa_status_t bfa_fcs_auth_set_secretstring(struct bfa_fcs_s *port,
74 char *secret);
75bfa_status_t bfa_fcs_auth_set_secretstring_encrypt(struct bfa_fcs_s *port,
76 u32 secret[], u32 len);
77bfa_status_t bfa_fcs_auth_set_secretsource(struct bfa_fcs_s *port,
78 enum bfa_auth_secretsource src);
79bfa_status_t bfa_fcs_auth_reset_stats(struct bfa_fcs_s *port);
80bfa_status_t bfa_fcs_auth_reinit(struct bfa_fcs_s *port);
81
82#endif /* __BFA_FCS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
new file mode 100644
index 000000000000..4ffd2242d3de
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_FABRIC_H__
19#define __BFA_FCS_FABRIC_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_vf.h>
25#include <cs/bfa_q.h>
26#include <cs/bfa_sm.h>
27#include <defs/bfa_defs_pport.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <protocol/fc_sp.h>
30#include <fcs/bfa_fcs_auth.h>
31
32/*
33 * forward declaration
34 */
35struct bfad_vf_s;
36
37enum bfa_fcs_fabric_type {
38 BFA_FCS_FABRIC_UNKNOWN = 0,
39 BFA_FCS_FABRIC_SWITCHED = 1,
40 BFA_FCS_FABRIC_PLOOP = 2,
41 BFA_FCS_FABRIC_N2N = 3,
42};
43
44
45struct bfa_fcs_fabric_s {
46 struct list_head qe; /* queue element */
47 bfa_sm_t sm; /* state machine */
48 struct bfa_fcs_s *fcs; /* FCS instance */
49 struct bfa_fcs_port_s bport; /* base logical port */
50 enum bfa_fcs_fabric_type fab_type; /* fabric type */
51 enum bfa_pport_type oper_type; /* current link topology */
52 u8 is_vf; /* is virtual fabric? */
53 u8 is_npiv; /* is NPIV supported ? */
54 u8 is_auth; /* is Security/Auth supported ? */
55 u16 bb_credit; /* BB credit from fabric */
56 u16 vf_id; /* virtual fabric ID */
57 u16 num_vports; /* num vports */
58 u16 rsvd;
59 struct list_head vport_q; /* queue of virtual ports */
60 struct list_head vf_q; /* queue of virtual fabrics */
61 struct bfad_vf_s *vf_drv; /* driver vf structure */
62 struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
63 wwn_t fabric_name; /* attached fabric name */
64 bfa_boolean_t auth_reqd; /* authentication required */
65 struct bfa_timer_s delay_timer; /* delay timer */
66 union {
67 u16 swp_vfid;/* switch port VF id */
68 } event_arg;
69 struct bfa_fcs_auth_s auth; /* authentication config */
70 struct bfa_wc_s wc; /* wait counter for delete */
71 struct bfa_vf_stats_s stats; /* fabric/vf stats */
72 struct bfa_lps_s *lps; /* lport login services */
73 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
74 * fabric's ip addr
75 */
76};
77
78#define bfa_fcs_fabric_npiv_capable(__f) (__f)->is_npiv
79#define bfa_fcs_fabric_is_switched(__f) \
80 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
81
82/**
83 * The design calls for a single implementation of base fabric and vf.
84 */
85#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
86
87struct bfa_vf_event_s {
88 u32 undefined;
89};
90
91/**
92 * bfa fcs vf public functions
93 */
94bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
95bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
96bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
97 u16 vf_id, struct bfa_port_cfg_s *port_cfg,
98 struct bfad_vf_s *vf_drv);
99bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
100void bfa_fcs_vf_start(bfa_fcs_vf_t *vf);
101bfa_status_t bfa_fcs_vf_stop(bfa_fcs_vf_t *vf);
102void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
103void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
104void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
105void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
106 struct bfa_vf_stats_s *vf_stats);
107void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
108void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
109bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
110struct bfad_vf_s *bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf);
111
112#endif /* __BFA_FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
new file mode 100644
index 000000000000..e719f2c3eb35
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fcpim.h BFA FCS FCP Initiator Mode interfaces/defines.
20 */
21
22#ifndef __BFA_FCS_FCPIM_H__
23#define __BFA_FCS_FCPIM_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_itnim.h>
27#include <fcs/bfa_fcs.h>
28#include <fcs/bfa_fcs_rport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <bfa_fcpim.h>
31
32/*
33 * forward declarations
34 */
35struct bfad_itnim_s;
36
37struct bfa_fcs_itnim_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent remote rport */
40 struct bfad_itnim_s *itnim_drv; /* driver peer instance */
41 struct bfa_fcs_s *fcs; /* fcs instance */
42 struct bfa_timer_s timer; /* timer functions */
43 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
44 bfa_boolean_t seq_rec; /* seq recovery support */
45 bfa_boolean_t rec_support; /* REC supported */
46 bfa_boolean_t conf_comp; /* FCP_CONF support */
47 bfa_boolean_t task_retry_id; /* task retry id supp */
48 struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
49 struct bfa_fcxp_s *fcxp; /* FCXP in use */
50 struct bfa_itnim_stats_s stats; /* itn statistics */
51};
52
53
54static inline struct bfad_port_s *
55bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
56{
57 return itnim->rport->port->bfad_port;
58}
59
60
61static inline struct bfa_fcs_port_s *
62bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
63{
64 return itnim->rport->port;
65}
66
67
68static inline wwn_t
69bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
70{
71 return itnim->rport->nwwn;
72}
73
74
75static inline wwn_t
76bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
77{
78 return itnim->rport->pwwn;
79}
80
81
82static inline u32
83bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
84{
85 return itnim->rport->pid;
86}
87
88
89static inline u32
90bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
91{
92 return itnim->rport->maxfrsize;
93}
94
95
96static inline enum fc_cos
97bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
98{
99 return itnim->rport->fc_cos;
100}
101
102
103static inline struct bfad_itnim_s *
104bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
105{
106 return itnim->itnim_drv;
107}
108
109
110static inline struct bfa_itnim_s *
111bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
112{
113 return itnim->bfa_itnim;
114}
115
116/**
117 * bfa fcs FCP Initiator mode API functions
118 */
119void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
120 struct bfa_itnim_attr_s *attr);
121void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
122 struct bfa_itnim_stats_s *stats);
123struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port,
124 wwn_t rpwwn);
125bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
126 struct bfa_itnim_attr_s *attr);
127bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
128 struct bfa_itnim_stats_s *stats);
129bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port,
130 wwn_t rpwwn);
131#endif /* __BFA_FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
new file mode 100644
index 000000000000..4441fffc9c82
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fdmi.h BFA fcs fdmi module public interface
20 */
21
22#ifndef __BFA_FCS_FDMI_H__
23#define __BFA_FCS_FDMI_H__
24#include <bfa_os_inc.h>
25#include <protocol/fdmi.h>
26
27#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
28 FDMI_TRANS_SPEED_2G | \
29 FDMI_TRANS_SPEED_4G | \
30 FDMI_TRANS_SPEED_8G)
31
32/*
33* HBA Attribute Block : BFA internal representation. Note : Some variable
34* sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
35 * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
36 */
37struct bfa_fcs_fdmi_hba_attr_s {
38 wwn_t node_name;
39 u8 manufacturer[64];
40 u8 serial_num[64];
41 u8 model[16];
42 u8 model_desc[256];
43 u8 hw_version[8];
44 u8 driver_version[8];
45 u8 option_rom_ver[BFA_VERSION_LEN];
46 u8 fw_version[8];
47 u8 os_name[256];
48 u32 max_ct_pyld;
49};
50
51/*
52 * Port Attribute Block
53 */
54struct bfa_fcs_fdmi_port_attr_s {
55 u8 supp_fc4_types[32]; /* supported FC4 types */
56 u32 supp_speed; /* supported speed */
57 u32 curr_speed; /* current Speed */
58 u32 max_frm_size; /* max frame size */
59 u8 os_device_name[256]; /* OS device Name */
60 u8 host_name[256]; /* host name */
61};
62
63#endif /* __BFA_FCS_FDMI_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
new file mode 100644
index 000000000000..b85cba884b96
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -0,0 +1,226 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_port.h BFA fcs port module public interface
20 */
21
22#ifndef __BFA_FCS_PORT_H__
23#define __BFA_FCS_PORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_pport.h>
28#include <defs/bfa_defs_rport.h>
29#include <cs/bfa_q.h>
30#include <bfa_svc.h>
31#include <cs/bfa_wc.h>
32
33struct bfa_fcs_s;
34struct bfa_fcs_fabric_s;
35
36/*
37* @todo : need to move to a global config file.
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_FCS_MAX_VPORTS_SUPP_CB 255
41#define BFA_FCS_MAX_VPORTS_SUPP_CT 191
42
43/*
44* @todo : need to move to a global config file.
45 * Maximum Rports supported per port (physical/logical).
46 */
47#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
48
49
50struct bfa_fcs_port_ns_s {
51 bfa_sm_t sm; /* state machine */
52 struct bfa_timer_s timer;
53 struct bfa_fcs_port_s *port; /* parent port */
54 struct bfa_fcxp_s *fcxp;
55 struct bfa_fcxp_wqe_s fcxp_wqe;
56};
57
58
59struct bfa_fcs_port_scn_s {
60 bfa_sm_t sm; /* state machine */
61 struct bfa_timer_s timer;
62 struct bfa_fcs_port_s *port; /* parent port */
63 struct bfa_fcxp_s *fcxp;
64 struct bfa_fcxp_wqe_s fcxp_wqe;
65};
66
67
68struct bfa_fcs_port_fdmi_s {
69 bfa_sm_t sm; /* state machine */
70 struct bfa_timer_s timer;
71 struct bfa_fcs_port_ms_s *ms; /* parent ms */
72 struct bfa_fcxp_s *fcxp;
73 struct bfa_fcxp_wqe_s fcxp_wqe;
74 u8 retry_cnt; /* retry count */
75 u8 rsvd[3];
76};
77
78
79struct bfa_fcs_port_ms_s {
80 bfa_sm_t sm; /* state machine */
81 struct bfa_timer_s timer;
82 struct bfa_fcs_port_s *port; /* parent port */
83 struct bfa_fcxp_s *fcxp;
84 struct bfa_fcxp_wqe_s fcxp_wqe;
85 struct bfa_fcs_port_fdmi_s fdmi; /* FDMI component of MS */
86 u8 retry_cnt; /* retry count */
87 u8 rsvd[3];
88};
89
90
91struct bfa_fcs_port_fab_s {
92 struct bfa_fcs_port_ns_s ns; /* NS component of port */
93 struct bfa_fcs_port_scn_s scn; /* scn component of port */
94 struct bfa_fcs_port_ms_s ms; /* MS component of port */
95};
96
97
98
99#define MAX_ALPA_COUNT 127
100
101struct bfa_fcs_port_loop_s {
102 u8 num_alpa; /* Num of ALPA entries in the map */
103 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
104 *Map */
105 struct bfa_fcs_port_s *port; /* parent port */
106};
107
108
109
110struct bfa_fcs_port_n2n_s {
111 u32 rsvd;
112 u16 reply_oxid; /* ox_id from the req flogi to be
113 *used in flogi acc */
114 wwn_t rem_port_wwn; /* Attached port's wwn */
115};
116
117
118union bfa_fcs_port_topo_u {
119 struct bfa_fcs_port_fab_s pfab;
120 struct bfa_fcs_port_loop_s ploop;
121 struct bfa_fcs_port_n2n_s pn2n;
122};
123
124
125struct bfa_fcs_port_s {
126 struct list_head qe; /* used by port/vport */
127 bfa_sm_t sm; /* state machine */
128 struct bfa_fcs_fabric_s *fabric; /* parent fabric */
129 struct bfa_port_cfg_s port_cfg; /* port configuration */
130 struct bfa_timer_s link_timer; /* timer for link offline */
131 u32 pid : 24; /* FC address */
132 u8 lp_tag; /* lport tag */
133 u16 num_rports; /* Num of r-ports */
134 struct list_head rport_q; /* queue of discovered r-ports */
135 struct bfa_fcs_s *fcs; /* FCS instance */
136 union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */
137 struct bfad_port_s *bfad_port; /* driver peer instance */
138 struct bfa_fcs_vport_s *vport; /* NULL for base ports */
139 struct bfa_fcxp_s *fcxp;
140 struct bfa_fcxp_wqe_s fcxp_wqe;
141 struct bfa_port_stats_s stats;
142 struct bfa_wc_s wc; /* waiting counter for events */
143};
144
145#define bfa_fcs_lport_t struct bfa_fcs_port_s
146
147/**
148 * Symbolic Name related defines
149 * Total bytes 255.
150 * Physical Port's symbolic name 128 bytes.
151 * For Vports, Vport's symbolic name is appended to the Physical port's
152 * Symbolic Name.
153 *
154 * Physical Port's symbolic name Format : (Total 128 bytes)
155 * Adapter Model number/name : 12 bytes
156 * Driver Version : 10 bytes
157 * Host Machine Name : 30 bytes
158 * Host OS Info : 48 bytes
159 * Host OS PATCH Info : 16 bytes
160 * ( remaining 12 bytes reserved to be used for separator)
161 */
162#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
163
164#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
165#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
166#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
167#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
168#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
169
170/**
171 * Get FC port ID for a logical port.
172 */
173#define bfa_fcs_port_get_fcid(_lport) ((_lport)->pid)
174#define bfa_fcs_port_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
175#define bfa_fcs_port_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
176#define bfa_fcs_port_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
177#define bfa_fcs_port_is_initiator(_lport) \
178 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
179#define bfa_fcs_port_is_target(_lport) \
180 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
181#define bfa_fcs_port_get_nrports(_lport) \
182 ((_lport) ? (_lport)->num_rports : 0)
183
184static inline struct bfad_port_s *
185bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port)
186{
187 return port->bfad_port;
188}
189
190
191#define bfa_fcs_port_get_opertype(_lport) (_lport)->fabric->oper_type
192
193
194#define bfa_fcs_port_get_fabric_name(_lport) (_lport)->fabric->fabric_name
195
196
197#define bfa_fcs_port_get_fabric_ipaddr(_lport) (_lport)->fabric->fabric_ip_addr
198
199/**
200 * bfa fcs port public functions
201 */
202void bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs,
203 struct bfa_port_cfg_s *port_cfg);
204struct bfa_fcs_port_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
205void bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port,
206 wwn_t rport_wwns[], int *nrports);
207
208wwn_t bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn,
209 int index, int nrports, bfa_boolean_t bwwn);
210
211struct bfa_fcs_port_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
212 u16 vf_id, wwn_t lpwwn);
213
214void bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
215 struct bfa_port_info_s *port_info);
216void bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
217 struct bfa_port_attr_s *port_attr);
218void bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
219 struct bfa_port_stats_s *port_stats);
220void bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port);
221enum bfa_pport_speed bfa_fcs_port_get_rport_max_speed(
222 struct bfa_fcs_port_s *port);
223void bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
224void bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
225
226#endif /* __BFA_FCS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
new file mode 100644
index 000000000000..702b95b76c2d
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_RPORT_H__
19#define __BFA_FCS_RPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <cs/bfa_q.h>
23#include <fcs/bfa_fcs.h>
24#include <defs/bfa_defs_rport.h>
25
26#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
27/*
28 * forward declarations
29 */
30struct bfad_rport_s;
31
32struct bfa_fcs_itnim_s;
33struct bfa_fcs_tin_s;
34struct bfa_fcs_iprp_s;
35
36/* Rport Features (RPF) */
37struct bfa_fcs_rpf_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent rport */
40 struct bfa_timer_s timer; /* general purpose timer */
41 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
42 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
43 int rpsc_retries; /* max RPSC retry attempts */
44 enum bfa_pport_speed rpsc_speed; /* Current Speed from RPSC.
45 * O if RPSC fails */
46 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
47 * will be used if RPSC is not
48 * supported by the rport */
49};
50
51struct bfa_fcs_rport_s {
52 struct list_head qe; /* used by port/vport */
53 struct bfa_fcs_port_s *port; /* parent FCS port */
54 struct bfa_fcs_s *fcs; /* fcs instance */
55 struct bfad_rport_s *rp_drv; /* driver peer instance */
56 u32 pid; /* port ID of rport */
57 u16 maxfrsize; /* maximum frame size */
58 u16 reply_oxid; /* OX_ID of inbound requests */
59 enum fc_cos fc_cos; /* FC classes of service supp */
60 bfa_boolean_t cisc; /* CISC capable device */
61 wwn_t pwwn; /* port wwn of rport */
62 wwn_t nwwn; /* node wwn of rport */
63 struct bfa_rport_symname_s psym_name; /* port symbolic name */
64 bfa_sm_t sm; /* state machine */
65 struct bfa_timer_s timer; /* general purpose timer */
66 struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
67 struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
68 struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
69 struct bfa_rport_s *bfa_rport; /* BFA Rport */
70 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
71 int plogi_retries; /* max plogi retry attempts */
72 int ns_retries; /* max NS query retry attempts */
73 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
74 struct bfa_rport_stats_s stats; /* rport stats */
75 enum bfa_rport_function scsi_function; /* Initiator/Target */
76 struct bfa_fcs_rpf_s rpf; /* Rport features module */
77};
78
79static inline struct bfa_rport_s *
80bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
81{
82 return rport->bfa_rport;
83}
84
85/**
86 * bfa fcs rport API functions
87 */
88bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
89 struct bfa_fcs_rport_s *rport,
90 struct bfad_rport_s *rport_drv);
91bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
92void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
93 struct bfa_rport_attr_s *attr);
94void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
95 struct bfa_rport_stats_s *stats);
96void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
97struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port,
98 wwn_t rpwwn);
99struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
100 struct bfa_fcs_port_s *port, wwn_t rnwwn);
101void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
102void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
103 enum bfa_pport_speed speed);
104#endif /* __BFA_FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
new file mode 100644
index 000000000000..cd33f2cd5c34
--- /dev/null
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.h BFA fcs vport module public interface
20 */
21
22#ifndef __BFA_FCS_VPORT_H__
23#define __BFA_FCS_VPORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_vport.h>
28#include <fcs/bfa_fcs.h>
29#include <fcb/bfa_fcb_vport.h>
30
31struct bfa_fcs_vport_s {
32 struct list_head qe; /* queue elem */
33 bfa_sm_t sm; /* state machine */
34 bfa_fcs_lport_t lport; /* logical port */
35 struct bfa_timer_s timer; /* general purpose timer */
36 struct bfad_vport_s *vport_drv; /* Driver private */
37 struct bfa_vport_stats_s vport_stats; /* vport statistics */
38 struct bfa_lps_s *lps; /* Lport login service */
39 int fdisc_retries;
40};
41
42#define bfa_fcs_vport_get_port(vport) \
43 ((struct bfa_fcs_port_s *)(&vport->port))
44
45/**
46 * bfa fcs vport public functions
47 */
48bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
49 struct bfa_fcs_s *fcs, u16 vf_id,
50 struct bfa_port_cfg_s *port_cfg,
51 struct bfad_vport_s *vport_drv);
52bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
53bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
54bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
55void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
56 struct bfa_vport_attr_s *vport_attr);
57void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
58 struct bfa_vport_stats_s *vport_stats);
59void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
60struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
61 u16 vf_id, wwn_t vpwwn);
62
63#endif /* __BFA_FCS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_fcs.h b/drivers/scsi/bfa/include/log/bfa_log_fcs.h
new file mode 100644
index 000000000000..b6f5df8827f8
--- /dev/null
+++ b/drivers/scsi/bfa/include/log/bfa_log_fcs.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for FCS Module
20 */
21#ifndef __BFA_LOG_FCS_H__
22#define __BFA_LOG_FCS_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_FCS_FABRIC_NOSWITCH \
25 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_FCS_FABRIC_ISOLATED \
27 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 2)
28#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
new file mode 100644
index 000000000000..0412aea2ec30
--- /dev/null
+++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for HAL Module */
19#ifndef __BFA_LOG_HAL_H__
20#define __BFA_LOG_HAL_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_HAL_ASSERT \
23 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_HAL_HEARTBEAT_FAILURE \
25 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_HAL_FCPIM_PARM_INVALID \
27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_HAL_SM_ASSERT \
29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
30#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
new file mode 100644
index 000000000000..317c0547ee16
--- /dev/null
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for LINUX Module */
19#ifndef __BFA_LOG_LINUX_H__
20#define __BFA_LOG_LINUX_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_LINUX_DEVICE_CLAIMED \
23 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_LINUX_HASH_INIT_FAILED \
25 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_LINUX_SYSFS_FAILED \
27 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_LINUX_MEM_ALLOC_FAILED \
29 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED \
31 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_LINUX_ITNIM_FREE \
33 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_LINUX_ITNIM_ONLINE \
35 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 7)
36#define BFA_LOG_LINUX_ITNIM_OFFLINE \
37 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 8)
38#define BFA_LOG_LINUX_SCSI_HOST_FREE \
39 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 9)
40#define BFA_LOG_LINUX_SCSI_ABORT \
41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
44#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h b/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
new file mode 100644
index 000000000000..809a95f7afe2
--- /dev/null
+++ b/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for WDRV Module
20 */
21#ifndef __BFA_LOG_WDRV_H__
22#define __BFA_LOG_WDRV_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_WDRV_IOC_INIT_ERROR \
25 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_WDRV_IOC_INTERNAL_ERROR \
27 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 2)
28#define BFA_LOG_WDRV_IOC_START_ERROR \
29 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 3)
30#define BFA_LOG_WDRV_IOC_STOP_ERROR \
31 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 4)
32#define BFA_LOG_WDRV_INSUFFICIENT_RESOURCES \
33 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 5)
34#define BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR \
35 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 6)
36#endif
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h
new file mode 100644
index 000000000000..c59d6630b070
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/ct.h
@@ -0,0 +1,492 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __CT_H__
19#define __CT_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25struct ct_hdr_s{
26 u32 rev_id:8; /* Revision of the CT */
27 u32 in_id:24; /* Initiator Id */
28 u32 gs_type:8; /* Generic service Type */
29 u32 gs_sub_type:8; /* Generic service sub type */
30 u32 options:8; /* options */
31 u32 rsvrd:8; /* reserved */
32 u32 cmd_rsp_code:16;/* ct command/response code */
33 u32 max_res_size:16;/* maximum/residual size */
34 u32 frag_id:8; /* fragment ID */
35 u32 reason_code:8; /* reason code */
36 u32 exp_code:8; /* explanation code */
37 u32 vendor_unq:8; /* vendor unique */
38};
39
40/*
41 * defines for the Revision
42 */
43enum {
44 CT_GS3_REVISION = 0x01,
45};
46
47/*
48 * defines for gs_type
49 */
50enum {
51 CT_GSTYPE_KEYSERVICE = 0xF7,
52 CT_GSTYPE_ALIASSERVICE = 0xF8,
53 CT_GSTYPE_MGMTSERVICE = 0xFA,
54 CT_GSTYPE_TIMESERVICE = 0xFB,
55 CT_GSTYPE_DIRSERVICE = 0xFC,
56};
57
58/*
59 * defines for gs_sub_type for gs type directory service
60 */
61enum {
62 CT_GSSUBTYPE_NAMESERVER = 0x02,
63};
64
65/*
66 * defines for gs_sub_type for gs type management service
67 */
68enum {
69 CT_GSSUBTYPE_CFGSERVER = 0x01,
70 CT_GSSUBTYPE_UNZONED_NS = 0x02,
71 CT_GSSUBTYPE_ZONESERVER = 0x03,
72 CT_GSSUBTYPE_LOCKSERVER = 0x04,
73 CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
74};
75
76/*
77 * defines for CT response code field
78 */
79enum {
80 CT_RSP_REJECT = 0x8001,
81 CT_RSP_ACCEPT = 0x8002,
82};
83
84/*
85 * defintions for CT reason code
86 */
87enum {
88 CT_RSN_INV_CMD = 0x01,
89 CT_RSN_INV_VER = 0x02,
90 CT_RSN_LOGIC_ERR = 0x03,
91 CT_RSN_INV_SIZE = 0x04,
92 CT_RSN_LOGICAL_BUSY = 0x05,
93 CT_RSN_PROTO_ERR = 0x07,
94 CT_RSN_UNABLE_TO_PERF = 0x09,
95 CT_RSN_NOT_SUPP = 0x0B,
96 CT_RSN_SERVER_NOT_AVBL = 0x0D,
97 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
98 CT_RSN_VENDOR_SPECIFIC = 0xFF,
99
100};
101
102/*
103 * definitions for explanations code for Name server
104 */
105enum {
106 CT_NS_EXP_NOADDITIONAL = 0x00,
107 CT_NS_EXP_ID_NOT_REG = 0x01,
108 CT_NS_EXP_PN_NOT_REG = 0x02,
109 CT_NS_EXP_NN_NOT_REG = 0x03,
110 CT_NS_EXP_CS_NOT_REG = 0x04,
111 CT_NS_EXP_IPN_NOT_REG = 0x05,
112 CT_NS_EXP_IPA_NOT_REG = 0x06,
113 CT_NS_EXP_FT_NOT_REG = 0x07,
114 CT_NS_EXP_SPN_NOT_REG = 0x08,
115 CT_NS_EXP_SNN_NOT_REG = 0x09,
116 CT_NS_EXP_PT_NOT_REG = 0x0A,
117 CT_NS_EXP_IPP_NOT_REG = 0x0B,
118 CT_NS_EXP_FPN_NOT_REG = 0x0C,
119 CT_NS_EXP_HA_NOT_REG = 0x0D,
120 CT_NS_EXP_FD_NOT_REG = 0x0E,
121 CT_NS_EXP_FF_NOT_REG = 0x0F,
122 CT_NS_EXP_ACCESSDENIED = 0x10,
123 CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
124 CT_NS_EXP_DATABASEEMPTY = 0x12,
125 CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
126 CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
127 CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
128 CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
129};
130
131/*
132 * defintions for the explanation code for all servers
133 */
134enum {
135 CT_EXP_AUTH_EXCEPTION = 0xF1,
136 CT_EXP_DB_FULL = 0xF2,
137 CT_EXP_DB_EMPTY = 0xF3,
138 CT_EXP_PROCESSING_REQ = 0xF4,
139 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
140 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
141};
142
143/*
144 * Command codes for Name server
145 */
146enum {
147 GS_GID_PN = 0x0121, /* Get Id on port name */
148 GS_GPN_ID = 0x0112, /* Get port name on ID */
149 GS_GNN_ID = 0x0113, /* Get node name on ID */
150 GS_GID_FT = 0x0171, /* Get Id on FC4 type */
151 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
152 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
153 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
154 GS_RPN_ID = 0x0212, /* Register port name */
155 GS_RNN_ID = 0x0213, /* Register node name */
156 GS_RCS_ID = 0x0214, /* Register class of service */
157 GS_RPT_ID = 0x021A, /* Register port type */
158 GS_GA_NXT = 0x0100, /* Get all next */
159 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
160};
161
162struct fcgs_id_req_s{
163 u32 rsvd:8;
164 u32 dap:24; /* port identifier */
165};
166#define fcgs_gpnid_req_t struct fcgs_id_req_s
167#define fcgs_gnnid_req_t struct fcgs_id_req_s
168#define fcgs_gspnid_req_t struct fcgs_id_req_s
169
170struct fcgs_gidpn_req_s{
171 wwn_t port_name; /* port wwn */
172};
173
174struct fcgs_gidpn_resp_s{
175 u32 rsvd:8;
176 u32 dap:24; /* port identifier */
177};
178
179/**
180 * RFT_ID
181 */
182struct fcgs_rftid_req_s {
183 u32 rsvd:8;
184 u32 dap:24; /* port identifier */
185 u32 fc4_type[8]; /* fc4 types */
186};
187
188/**
189 * RFF_ID : Register FC4 features.
190 */
191
192#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
193#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
194
195struct fcgs_rffid_req_s{
196 u32 rsvd :8;
197 u32 dap :24; /* port identifier */
198 u32 rsvd1 :16;
199 u32 fc4ftr_bits :8; /* fc4 feature bits */
200 u32 fc4_type :8; /* corresponding FC4 Type */
201};
202
203/**
204 * GID_FT Request
205 */
206struct fcgs_gidft_req_s{
207 u8 reserved;
208 u8 domain_id; /* domain, 0 - all fabric */
209 u8 area_id; /* area, 0 - whole domain */
210 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
211}; /* GID_FT Request */
212
213/**
214 * GID_FT Response
215 */
216struct fcgs_gidft_resp_s {
217 u8 last:1; /* last port identifier flag */
218 u8 reserved:7;
219 u32 pid:24; /* port identifier */
220}; /* GID_FT Response */
221
222/**
223 * RSPN_ID
224 */
225struct fcgs_rspnid_req_s{
226 u32 rsvd:8;
227 u32 dap:24; /* port identifier */
228 u8 spn_len; /* symbolic port name length */
229 u8 spn[256]; /* symbolic port name */
230};
231
232/**
233 * RPN_ID
234 */
235struct fcgs_rpnid_req_s{
236 u32 rsvd:8;
237 u32 port_id:24;
238 wwn_t port_name;
239};
240
241/**
242 * RNN_ID
243 */
244struct fcgs_rnnid_req_s{
245 u32 rsvd:8;
246 u32 port_id:24;
247 wwn_t node_name;
248};
249
250/**
251 * RCS_ID
252 */
253struct fcgs_rcsid_req_s{
254 u32 rsvd:8;
255 u32 port_id:24;
256 u32 cos;
257};
258
259/**
260 * RPT_ID
261 */
262struct fcgs_rptid_req_s{
263 u32 rsvd:8;
264 u32 port_id:24;
265 u32 port_type:8;
266 u32 rsvd1:24;
267};
268
269/**
270 * GA_NXT Request
271 */
272struct fcgs_ganxt_req_s{
273 u32 rsvd:8;
274 u32 port_id:24;
275};
276
277/**
278 * GA_NXT Response
279 */
280struct fcgs_ganxt_rsp_s{
281 u32 port_type:8; /* Port Type */
282 u32 port_id:24; /* Port Identifier */
283 wwn_t port_name; /* Port Name */
284 u8 spn_len; /* Length of Symbolic Port Name */
285 char spn[255]; /* Symbolic Port Name */
286 wwn_t node_name; /* Node Name */
287 u8 snn_len; /* Length of Symbolic Node Name */
288 char snn[255]; /* Symbolic Node Name */
289 u8 ipa[8]; /* Initial Process Associator */
290 u8 ip[16]; /* IP Address */
291 u32 cos; /* Class of Service */
292 u32 fc4types[8]; /* FC-4 TYPEs */
293 wwn_t fabric_port_name;
294 /* Fabric Port Name */
295 u32 rsvd:8; /* Reserved */
296 u32 hard_addr:24; /* Hard Address */
297};
298
299/*
300 * Fabric Config Server
301 */
302
303/*
304 * Command codes for Fabric Configuration Server
305 */
306enum {
307 GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
308 GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
309 GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
310 GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
311};
312
313/*
314 * Source or Destination Port Tags.
315 */
316enum {
317 GS_FTRACE_TAG_NPORT_ID = 1,
318 GS_FTRACE_TAG_NPORT_NAME = 2,
319};
320
321/*
322* Port Value : Could be a Port id or wwn
323 */
324union fcgs_port_val_u{
325 u32 nport_id;
326 wwn_t nport_wwn;
327};
328
329#define GS_FTRACE_MAX_HOP_COUNT 20
330#define GS_FTRACE_REVISION 1
331
332/*
333 * Ftrace Related Structures.
334 */
335
336/*
337 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
338 */
339enum {
340 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
341 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
342 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
343 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
344 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
345 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
346 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
347 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
348 GS_FTRACE_STR_NO_ADDL_EXPLN,
349 GS_FTRACE_STR_FABRIC_BUSY,
350 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
351 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
352 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
353};
354
355/*
356 * Ftrace Request
357 */
358struct fcgs_ftrace_req_s{
359 u32 revision;
360 u16 src_port_tag; /* Source Port tag */
361 u16 src_port_len; /* Source Port len */
362 union fcgs_port_val_u src_port_val; /* Source Port value */
363 u16 dst_port_tag; /* Destination Port tag */
364 u16 dst_port_len; /* Destination Port len */
365 union fcgs_port_val_u dst_port_val; /* Destination Port value */
366 u32 token;
367 u8 vendor_id[8]; /* T10 Vendor Identifier */
368 u8 vendor_info[8]; /* Vendor specific Info */
369 u32 max_hop_cnt; /* Max Hop Count */
370};
371
372/*
373 * Path info structure
374 */
375struct fcgs_ftrace_path_info_s{
376 wwn_t switch_name; /* Switch WWN */
377 u32 domain_id;
378 wwn_t ingress_port_name; /* Ingress ports wwn */
379 u32 ingress_phys_port_num; /* Ingress ports physical port
380 * number
381 */
382 wwn_t egress_port_name; /* Ingress ports wwn */
383 u32 egress_phys_port_num; /* Ingress ports physical port
384 * number
385 */
386};
387
388/*
389 * Ftrace Acc Response
390 */
391struct fcgs_ftrace_resp_s{
392 u32 revision;
393 u32 token;
394 u8 vendor_id[8]; /* T10 Vendor Identifier */
395 u8 vendor_info[8]; /* Vendor specific Info */
396 u32 str_rej_reason_code; /* STR Reject Reason Code */
397 u32 num_path_info_entries; /* No. of path info entries */
398 /*
399 * path info entry/entries.
400 */
401 struct fcgs_ftrace_path_info_s path_info[1];
402
403};
404
405/*
406* Fabric Config Server : FCPing
407 */
408
409/*
410 * FC Ping Request
411 */
412struct fcgs_fcping_req_s{
413 u32 revision;
414 u16 port_tag;
415 u16 port_len; /* Port len */
416 union fcgs_port_val_u port_val; /* Port value */
417 u32 token;
418};
419
420/*
421 * FC Ping Response
422 */
423struct fcgs_fcping_resp_s{
424 u32 token;
425};
426
427/*
428 * Command codes for zone server query.
429 */
430enum {
431 ZS_GZME = 0x0124, /* Get zone member extended */
432};
433
434/*
435 * ZS GZME request
436 */
437#define ZS_GZME_ZNAMELEN 32
438struct zs_gzme_req_s{
439 u8 znamelen;
440 u8 rsvd[3];
441 u8 zname[ZS_GZME_ZNAMELEN];
442};
443
444enum zs_mbr_type{
445 ZS_MBR_TYPE_PWWN = 1,
446 ZS_MBR_TYPE_DOMPORT = 2,
447 ZS_MBR_TYPE_PORTID = 3,
448 ZS_MBR_TYPE_NWWN = 4,
449};
450
451struct zs_mbr_wwn_s{
452 u8 mbr_type;
453 u8 rsvd[3];
454 wwn_t wwn;
455};
456
457struct zs_query_resp_s{
458 u32 nmbrs; /* number of zone members */
459 struct zs_mbr_wwn_s mbr[1];
460};
461
462/*
463 * GMAL Command ( Get ( interconnect Element) Management Address List)
464 * To retrieve the IP Address of a Switch.
465 */
466
467#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
468#define CT_GMAL_RESP_PREFIX_HTTP "http://"
469
470/* GMAL/GFN request */
471struct fcgs_req_s {
472 wwn_t wwn; /* PWWN/NWWN */
473};
474
475#define fcgs_gmal_req_t struct fcgs_req_s
476#define fcgs_gfn_req_t struct fcgs_req_s
477
478/* Accept Response to GMAL */
479struct fcgs_gmal_resp_s {
480 u32 ms_len; /* Num of entries */
481 u8 ms_ma[256];
482};
483
484struct fc_gmal_entry_s {
485 u8 len;
486 u8 prefix[7]; /* like "http://" */
487 u8 ip_addr[248];
488};
489
490#pragma pack()
491
492#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
new file mode 100644
index 000000000000..3e39ba58cfb5
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -0,0 +1,1105 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FC_H__
19#define __FC_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25/*
26 * Fibre Channel Header Structure (FCHS) definition
27 */
28struct fchs_s {
29#ifdef __BIGENDIAN
30 u32 routing:4; /* routing bits */
31 u32 cat_info:4; /* category info */
32#else
33 u32 cat_info:4; /* category info */
34 u32 routing:4; /* routing bits */
35#endif
36 u32 d_id:24; /* destination identifier */
37
38 u32 cs_ctl:8; /* class specific control */
39 u32 s_id:24; /* source identifier */
40
41 u32 type:8; /* data structure type */
42 u32 f_ctl:24; /* initial frame control */
43
44 u8 seq_id; /* sequence identifier */
45 u8 df_ctl; /* data field control */
46 u16 seq_cnt; /* sequence count */
47
48 u16 ox_id; /* originator exchange ID */
49 u16 rx_id; /* responder exchange ID */
50
51 u32 ro; /* relative offset */
52};
53/*
54 * Fibre Channel BB_E Header Structure
55 */
56struct fcbbehs_s {
57 u16 ver_rsvd;
58 u32 rsvd[2];
59 u32 rsvd__sof;
60};
61
62#define FC_SEQ_ID_MAX 256
63
64/*
65 * routing bit definitions
66 */
67enum {
68 FC_RTG_FC4_DEV_DATA = 0x0, /* FC-4 Device Data */
69 FC_RTG_EXT_LINK = 0x2, /* Extended Link Data */
70 FC_RTG_FC4_LINK_DATA = 0x3, /* FC-4 Link Data */
71 FC_RTG_VIDEO_DATA = 0x4, /* Video Data */
72 FC_RTG_EXT_HDR = 0x5, /* VFT, IFR or Encapsuled */
73 FC_RTG_BASIC_LINK = 0x8, /* Basic Link data */
74 FC_RTG_LINK_CTRL = 0xC, /* Link Control */
75};
76
77/*
78 * information category for extended link data and FC-4 Link Data
79 */
80enum {
81 FC_CAT_LD_REQUEST = 0x2, /* Request */
82 FC_CAT_LD_REPLY = 0x3, /* Reply */
83 FC_CAT_LD_DIAG = 0xF, /* for DIAG use only */
84};
85
86/*
87 * information category for extended headers (VFT, IFR or encapsulation)
88 */
89enum {
90 FC_CAT_VFT_HDR = 0x0, /* Virtual fabric tagging header */
91 FC_CAT_IFR_HDR = 0x1, /* Inter-Fabric routing header */
92 FC_CAT_ENC_HDR = 0x2, /* Encapsulation header */
93};
94
95/*
96 * information category for FC-4 device data
97 */
98enum {
99 FC_CAT_UNCATEG_INFO = 0x0, /* Uncategorized information */
100 FC_CAT_SOLICIT_DATA = 0x1, /* Solicited Data */
101 FC_CAT_UNSOLICIT_CTRL = 0x2, /* Unsolicited Control */
102 FC_CAT_SOLICIT_CTRL = 0x3, /* Solicited Control */
103 FC_CAT_UNSOLICIT_DATA = 0x4, /* Unsolicited Data */
104 FC_CAT_DATA_DESC = 0x5, /* Data Descriptor */
105 FC_CAT_UNSOLICIT_CMD = 0x6, /* Unsolicited Command */
106 FC_CAT_CMD_STATUS = 0x7, /* Command Status */
107};
108
109/*
110 * information category for Link Control
111 */
112enum {
113 FC_CAT_ACK_1 = 0x00,
114 FC_CAT_ACK_0_N = 0x01,
115 FC_CAT_P_RJT = 0x02,
116 FC_CAT_F_RJT = 0x03,
117 FC_CAT_P_BSY = 0x04,
118 FC_CAT_F_BSY_DATA = 0x05,
119 FC_CAT_F_BSY_LINK_CTL = 0x06,
120 FC_CAT_F_LCR = 0x07,
121 FC_CAT_NTY = 0x08,
122 FC_CAT_END = 0x09,
123};
124
125/*
126 * Type Field Definitions. FC-PH Section 18.5 pg. 165
127 */
128enum {
129 FC_TYPE_BLS = 0x0, /* Basic Link Service */
130 FC_TYPE_ELS = 0x1, /* Extended Link Service */
131 FC_TYPE_IP = 0x5, /* IP */
132 FC_TYPE_FCP = 0x8, /* SCSI-FCP */
133 FC_TYPE_GPP = 0x9, /* SCSI_GPP */
134 FC_TYPE_SERVICES = 0x20, /* Fibre Channel Services */
135 FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */
136 FC_TYPE_FC_AL = 0x23, /* FC-AL */
137 FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */
138 FC_TYPE_MAX = 256, /* 256 FC-4 types */
139};
140
141struct fc_fc4types_s{
142 u8 bits[FC_TYPE_MAX / 8];
143};
144
145/*
146 * Frame Control Definitions. FC-PH Table-45. pg. 168
147 */
148enum {
149 FCTL_EC_ORIG = 0x000000, /* exchange originator */
150 FCTL_EC_RESP = 0x800000, /* exchange responder */
151 FCTL_SEQ_INI = 0x000000, /* sequence initiator */
152 FCTL_SEQ_REC = 0x400000, /* sequence recipient */
153 FCTL_FS_EXCH = 0x200000, /* first sequence of xchg */
154 FCTL_LS_EXCH = 0x100000, /* last sequence of xchg */
155 FCTL_END_SEQ = 0x080000, /* last frame of sequence */
156 FCTL_SI_XFER = 0x010000, /* seq initiative transfer */
157 FCTL_RO_PRESENT = 0x000008, /* relative offset present */
158 FCTL_FILLBYTE_MASK = 0x000003 /* , fill byte mask */
159};
160
161/*
162 * Fabric Well Known Addresses
163 */
164enum {
165 FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0,
166 FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00,
167 FC_ALIAS_SERVER = 0xFFFFF8,
168 FC_MGMT_SERVER = 0xFFFFFA,
169 FC_TIME_SERVER = 0xFFFFFB,
170 FC_NAME_SERVER = 0xFFFFFC,
171 FC_FABRIC_CONTROLLER = 0xFFFFFD,
172 FC_FABRIC_PORT = 0xFFFFFE,
173 FC_BROADCAST_SERVER = 0xFFFFFF
174};
175
176/*
177 * domain/area/port defines
178 */
179#define FC_DOMAIN_MASK 0xFF0000
180#define FC_DOMAIN_SHIFT 16
181#define FC_AREA_MASK 0x00FF00
182#define FC_AREA_SHIFT 8
183#define FC_PORT_MASK 0x0000FF
184#define FC_PORT_SHIFT 0
185
186#define FC_GET_DOMAIN(p) (((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT)
187#define FC_GET_AREA(p) (((p) & FC_AREA_MASK) >> FC_AREA_SHIFT)
188#define FC_GET_PORT(p) (((p) & FC_PORT_MASK) >> FC_PORT_SHIFT)
189
190#define FC_DOMAIN_CTRLR(p) (FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p)))
191
192enum {
193 FC_RXID_ANY = 0xFFFFU,
194};
195
196/*
197 * generic ELS command
198 */
199struct fc_els_cmd_s{
200 u32 els_code:8; /* ELS Command Code */
201 u32 reserved:24;
202};
203
204/*
205 * ELS Command Codes. FC-PH Table-75. pg. 223
206 */
207enum {
208 FC_ELS_LS_RJT = 0x1, /* Link Service Reject. */
209 FC_ELS_ACC = 0x02, /* Accept */
210 FC_ELS_PLOGI = 0x03, /* N_Port Login. */
211 FC_ELS_FLOGI = 0x04, /* F_Port Login. */
212 FC_ELS_LOGO = 0x05, /* Logout. */
213 FC_ELS_ABTX = 0x06, /* Abort Exchange */
214 FC_ELS_RES = 0x08, /* Read Exchange status */
215 FC_ELS_RSS = 0x09, /* Read sequence status block */
216 FC_ELS_RSI = 0x0A, /* Request Sequence Initiative */
217 FC_ELS_ESTC = 0x0C, /* Estimate Credit. */
218 FC_ELS_RTV = 0x0E, /* Read Timeout Value. */
219 FC_ELS_RLS = 0x0F, /* Read Link Status. */
220 FC_ELS_ECHO = 0x10, /* Echo */
221 FC_ELS_TEST = 0x11, /* Test */
222 FC_ELS_RRQ = 0x12, /* Reinstate Recovery Qualifier. */
223 FC_ELS_REC = 0x13, /* Add this for TAPE support in FCR */
224 FC_ELS_PRLI = 0x20, /* Process Login */
225 FC_ELS_PRLO = 0x21, /* Process Logout. */
226 FC_ELS_SCN = 0x22, /* State Change Notification. */
227 FC_ELS_TPRLO = 0x24, /* Third Party Process Logout. */
228 FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */
229 FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */
230 FC_ELS_ADISC = 0x52, /* Discover Address. */
231 FC_ELS_FAN = 0x60, /* Fabric Address Notification */
232 FC_ELS_RSCN = 0x61, /* Reg State Change Notification */
233 FC_ELS_SCR = 0x62, /* State Change Registration. */
234 FC_ELS_RTIN = 0x77, /* Mangement server request */
235 FC_ELS_RNID = 0x78, /* Mangement server request */
236 FC_ELS_RLIR = 0x79, /* Registered Link Incident Record */
237
238 FC_ELS_RPSC = 0x7D, /* Report Port Speed Capabilities */
239 FC_ELS_QSA = 0x7E, /* Query Security Attributes. Ref FC-SP */
240 FC_ELS_E2E_LBEACON = 0x81,
241 /* End-to-End Link Beacon */
242 FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */
243 FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref
244 *FC-SP */
245
246};
247
248/*
249 * Version numbers for FC-PH standards,
250 * used in login to indicate what port
251 * supports. See FC-PH-X table 158.
252 */
253enum {
254 FC_PH_VER_4_3 = 0x09,
255 FC_PH_VER_PH_3 = 0x20,
256};
257
258/*
259 * PDU size defines
260 */
261enum {
262 FC_MIN_PDUSZ = 512,
263 FC_MAX_PDUSZ = 2112,
264};
265
266/*
267 * N_Port PLOGI Common Service Parameters.
268 * FC-PH-x. Figure-76. pg. 308.
269 */
270struct fc_plogi_csp_s{
271 u8 verhi; /* FC-PH high version */
272 u8 verlo; /* FC-PH low version */
273 u16 bbcred; /* BB_Credit */
274
275#ifdef __BIGENDIAN
276 u8 ciro:1, /* continuously increasing RO */
277 rro:1, /* random relative offset */
278 npiv_supp:1, /* NPIV supported */
279 port_type:1, /* N_Port/F_port */
280 altbbcred:1, /* alternate BB_Credit */
281 resolution:1, /* ms/ns ED_TOV resolution */
282 vvl_info:1, /* VVL Info included */
283 reserved1:1;
284
285 u8 hg_supp:1,
286 query_dbc:1,
287 security:1,
288 sync_cap:1,
289 r_t_tov:1,
290 dh_dup_supp:1,
291 cisc:1, /* continuously increasing seq count */
292 payload:1;
293#else
294 u8 reserved2:2,
295 resolution:1, /* ms/ns ED_TOV resolution */
296 altbbcred:1, /* alternate BB_Credit */
297 port_type:1, /* N_Port/F_port */
298 npiv_supp:1, /* NPIV supported */
299 rro:1, /* random relative offset */
300 ciro:1; /* continuously increasing RO */
301
302 u8 payload:1,
303 cisc:1, /* continuously increasing seq count */
304 dh_dup_supp:1,
305 r_t_tov:1,
306 sync_cap:1,
307 security:1,
308 query_dbc:1,
309 hg_supp:1;
310#endif
311
312 u16 rxsz; /* recieve data_field size */
313
314 u16 conseq;
315 u16 ro_bitmap;
316
317 u32 e_d_tov;
318};
319
320/*
321 * N_Port PLOGI Class Specific Parameters.
322 * FC-PH-x. Figure 78. pg. 318.
323 */
324struct fc_plogi_clp_s{
325#ifdef __BIGENDIAN
326 u32 class_valid:1;
327 u32 intermix:1; /* class intermix supported if set =1.
328 * valid only for class1. Reserved for
329 * class2 & class3
330 */
331 u32 reserved1:2;
332 u32 sequential:1;
333 u32 reserved2:3;
334#else
335 u32 reserved2:3;
336 u32 sequential:1;
337 u32 reserved1:2;
338 u32 intermix:1; /* class intermix supported if set =1.
339 * valid only for class1. Reserved for
340 * class2 & class3
341 */
342 u32 class_valid:1;
343#endif
344
345 u32 reserved3:24;
346
347 u32 reserved4:16;
348 u32 rxsz:16; /* Receive data_field size */
349
350 u32 reserved5:8;
351 u32 conseq:8;
352 u32 e2e_credit:16; /* end to end credit */
353
354 u32 reserved7:8;
355 u32 ospx:8;
356 u32 reserved8:16;
357};
358
359#define FLOGI_VVL_BRCD 0x42524344 /* ASCII value for each character in
360 * string "BRCD" */
361
362/*
363 * PLOGI els command and reply payload
364 */
365struct fc_logi_s{
366 struct fc_els_cmd_s els_cmd; /* ELS command code */
367 struct fc_plogi_csp_s csp; /* common service params */
368 wwn_t port_name;
369 wwn_t node_name;
370 struct fc_plogi_clp_s class1; /* class 1 service parameters */
371 struct fc_plogi_clp_s class2; /* class 2 service parameters */
372 struct fc_plogi_clp_s class3; /* class 3 service parameters */
373 struct fc_plogi_clp_s class4; /* class 4 service parameters */
374 u8 vvl[16]; /* vendor version level */
375};
376
377/*
378 * LOGO els command payload
379 */
380struct fc_logo_s{
381 struct fc_els_cmd_s els_cmd; /* ELS command code */
382 u32 res1:8;
383 u32 nport_id:24; /* N_Port identifier of source */
384 wwn_t orig_port_name; /* Port name of the LOGO originator */
385};
386
387/*
388 * ADISC els command payload
389 */
390struct fc_adisc_s {
391 struct fc_els_cmd_s els_cmd; /* ELS command code */
392 u32 res1:8;
393 u32 orig_HA:24; /* originator hard address */
394 wwn_t orig_port_name; /* originator port name */
395 wwn_t orig_node_name; /* originator node name */
396 u32 res2:8;
397 u32 nport_id:24; /* originator NPortID */
398};
399
400/*
401 * Exchange status block
402 */
403struct fc_exch_status_blk_s{
404 u32 oxid:16;
405 u32 rxid:16;
406 u32 res1:8;
407 u32 orig_np:24; /* originator NPortID */
408 u32 res2:8;
409 u32 resp_np:24; /* responder NPortID */
410 u32 es_bits;
411 u32 res3;
412 /*
413 * un modified section of the fields
414 */
415};
416
417/*
418 * RES els command payload
419 */
420struct fc_res_s {
421 struct fc_els_cmd_s els_cmd; /* ELS command code */
422 u32 res1:8;
423 u32 nport_id:24; /* N_Port identifier of source */
424 u32 oxid:16;
425 u32 rxid:16;
426 u8 assoc_hdr[32];
427};
428
429/*
430 * RES els accept payload
431 */
432struct fc_res_acc_s{
433 struct fc_els_cmd_s els_cmd; /* ELS command code */
434 struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */
435};
436
437/*
438 * REC els command payload
439 */
440struct fc_rec_s {
441 struct fc_els_cmd_s els_cmd; /* ELS command code */
442 u32 res1:8;
443 u32 nport_id:24; /* N_Port identifier of source */
444 u32 oxid:16;
445 u32 rxid:16;
446};
447
448#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */
449#define FC_REC_ESB_SI 0x40000000 /* SI is owned */
450#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */
451#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */
452#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */
453#define FC_REC_ESB_ERRP_MSK 0x03000000
454#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */
455#define FC_REC_ESB_RXID_INV 0x00400000 /* invalid RXID */
456#define FC_REC_ESB_PRIO_INUSE 0x00200000
457
458/*
459 * REC els accept payload
460 */
461struct fc_rec_acc_s {
462 struct fc_els_cmd_s els_cmd; /* ELS command code */
463 u32 oxid:16;
464 u32 rxid:16;
465 u32 res1:8;
466 u32 orig_id:24; /* N_Port id of exchange originator */
467 u32 res2:8;
468 u32 resp_id:24; /* N_Port id of exchange responder */
469 u32 count; /* data transfer count */
470 u32 e_stat; /* exchange status */
471};
472
473/*
474 * RSI els payload
475 */
476struct fc_rsi_s {
477 struct fc_els_cmd_s els_cmd;
478 u32 res1:8;
479 u32 orig_sid:24;
480 u32 oxid:16;
481 u32 rxid:16;
482};
483
484/*
485 * structure for PRLI paramater pages, both request & response
486 * see FC-PH-X table 113 & 115 for explanation also FCP table 8
487 */
488struct fc_prli_params_s{
489 u32 reserved: 16;
490#ifdef __BIGENDIAN
491 u32 reserved1: 5;
492 u32 rec_support : 1;
493 u32 task_retry_id : 1;
494 u32 retry : 1;
495
496 u32 confirm : 1;
497 u32 doverlay:1;
498 u32 initiator:1;
499 u32 target:1;
500 u32 cdmix:1;
501 u32 drmix:1;
502 u32 rxrdisab:1;
503 u32 wxrdisab:1;
504#else
505 u32 retry : 1;
506 u32 task_retry_id : 1;
507 u32 rec_support : 1;
508 u32 reserved1: 5;
509
510 u32 wxrdisab:1;
511 u32 rxrdisab:1;
512 u32 drmix:1;
513 u32 cdmix:1;
514 u32 target:1;
515 u32 initiator:1;
516 u32 doverlay:1;
517 u32 confirm : 1;
518#endif
519};
520
521/*
522 * valid values for rspcode in PRLI ACC payload
523 */
524enum {
525 FC_PRLI_ACC_XQTD = 0x1, /* request executed */
526 FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */
527};
528
529struct fc_prli_params_page_s{
530 u32 type:8;
531 u32 codext:8;
532#ifdef __BIGENDIAN
533 u32 origprocasv:1;
534 u32 rsppav:1;
535 u32 imagepair:1;
536 u32 reserved1:1;
537 u32 rspcode:4;
538#else
539 u32 rspcode:4;
540 u32 reserved1:1;
541 u32 imagepair:1;
542 u32 rsppav:1;
543 u32 origprocasv:1;
544#endif
545 u32 reserved2:8;
546
547 u32 origprocas;
548 u32 rspprocas;
549 struct fc_prli_params_s servparams;
550};
551
552/*
553 * PRLI request and accept payload, FC-PH-X tables 112 & 114
554 */
555struct fc_prli_s{
556 u32 command:8;
557 u32 pglen:8;
558 u32 pagebytes:16;
559 struct fc_prli_params_page_s parampage;
560};
561
562/*
563 * PRLO logout params page
564 */
565struct fc_prlo_params_page_s{
566 u32 type:8;
567 u32 type_ext:8;
568#ifdef __BIGENDIAN
569 u32 opa_valid:1; /* originator process associator
570 * valid
571 */
572 u32 rpa_valid:1; /* responder process associator valid */
573 u32 res1:14;
574#else
575 u32 res1:14;
576 u32 rpa_valid:1; /* responder process associator valid */
577 u32 opa_valid:1; /* originator process associator
578 * valid
579 */
580#endif
581 u32 orig_process_assc;
582 u32 resp_process_assc;
583
584 u32 res2;
585};
586
587/*
588 * PRLO els command payload
589 */
590struct fc_prlo_s{
591 u32 command:8;
592 u32 page_len:8;
593 u32 payload_len:16;
594 struct fc_prlo_params_page_s prlo_params[1];
595};
596
597/*
598 * PRLO Logout response parameter page
599 */
600struct fc_prlo_acc_params_page_s{
601 u32 type:8;
602 u32 type_ext:8;
603
604#ifdef __BIGENDIAN
605 u32 opa_valid:1; /* originator process associator
606 * valid
607 */
608 u32 rpa_valid:1; /* responder process associator valid */
609 u32 res1:14;
610#else
611 u32 res1:14;
612 u32 rpa_valid:1; /* responder process associator valid */
613 u32 opa_valid:1; /* originator process associator
614 * valid
615 */
616#endif
617 u32 orig_process_assc;
618 u32 resp_process_assc;
619
620 u32 fc4type_csp;
621};
622
623/*
624 * PRLO els command ACC payload
625 */
626struct fc_prlo_acc_s{
627 u32 command:8;
628 u32 page_len:8;
629 u32 payload_len:16;
630 struct fc_prlo_acc_params_page_s prlo_acc_params[1];
631};
632
633/*
634 * SCR els command payload
635 */
636enum {
637 FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01,
638 FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02,
639 FC_SCR_REG_FUNC_FULL = 0x03,
640 FC_SCR_REG_FUNC_CLEAR_REG = 0xFF,
641};
642
643/* SCR VU registrations */
644enum {
645 FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
646};
647
648struct fc_scr_s{
649 u32 command:8;
650 u32 res:24;
651 u32 vu_reg_func:8; /* Vendor Unique Registrations */
652 u32 res1:16;
653 u32 reg_func:8;
654};
655
656/*
657 * Information category for Basic link data
658 */
659enum {
660 FC_CAT_NOP = 0x0,
661 FC_CAT_ABTS = 0x1,
662 FC_CAT_RMC = 0x2,
663 FC_CAT_BA_ACC = 0x4,
664 FC_CAT_BA_RJT = 0x5,
665 FC_CAT_PRMT = 0x6,
666};
667
668/*
669 * LS_RJT els reply payload
670 */
671struct fc_ls_rjt_s {
672 struct fc_els_cmd_s els_cmd; /* ELS command code */
673 u32 res1:8;
674 u32 reason_code:8; /* Reason code for reject */
675 u32 reason_code_expl:8; /* Reason code explanation */
676 u32 vendor_unique:8; /* Vendor specific */
677};
678
679/*
680 * LS_RJT reason codes
681 */
682enum {
683 FC_LS_RJT_RSN_INV_CMD_CODE = 0x01,
684 FC_LS_RJT_RSN_LOGICAL_ERROR = 0x03,
685 FC_LS_RJT_RSN_LOGICAL_BUSY = 0x05,
686 FC_LS_RJT_RSN_PROTOCOL_ERROR = 0x07,
687 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09,
688 FC_LS_RJT_RSN_CMD_NOT_SUPP = 0x0B,
689};
690
691/*
692 * LS_RJT reason code explanation
693 */
694enum {
695 FC_LS_RJT_EXP_NO_ADDL_INFO = 0x00,
696 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS = 0x01,
697 FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL = 0x03,
698 FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL = 0x05,
699 FC_LS_RJT_EXP_SPARMS_ERR_RXSZ = 0x07,
700 FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ = 0x09,
701 FC_LS_RJT_EXP_SPARMS_ERR_CREDIT = 0x0B,
702 FC_LS_RJT_EXP_INV_PORT_NAME = 0x0D,
703 FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME = 0x0E,
704 FC_LS_RJT_EXP_INV_CSP = 0x0F,
705 FC_LS_RJT_EXP_INV_ASSOC_HDR = 0x11,
706 FC_LS_RJT_EXP_ASSOC_HDR_REQD = 0x13,
707 FC_LS_RJT_EXP_INV_ORIG_S_ID = 0x15,
708 FC_LS_RJT_EXP_INV_OXID_RXID_COMB = 0x17,
709 FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG = 0x19,
710 FC_LS_RJT_EXP_LOGIN_REQUIRED = 0x1E,
711 FC_LS_RJT_EXP_INVALID_NPORT_ID = 0x1F,
712 FC_LS_RJT_EXP_INSUFF_RES = 0x29,
713 FC_LS_RJT_EXP_CMD_NOT_SUPP = 0x2C,
714 FC_LS_RJT_EXP_INV_PAYLOAD_LEN = 0x2D,
715};
716
717/*
718 * RRQ els command payload
719 */
720struct fc_rrq_s{
721 struct fc_els_cmd_s els_cmd; /* ELS command code */
722 u32 res1:8;
723 u32 s_id:24; /* exchange originator S_ID */
724
725 u32 ox_id:16; /* originator exchange ID */
726 u32 rx_id:16; /* responder exchange ID */
727
728 u32 res2[8]; /* optional association header */
729};
730
731/*
732 * ABTS BA_ACC reply payload
733 */
734struct fc_ba_acc_s{
735 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */
736 u32 seq_id:8; /* invalid for Abort Exchange */
737 u32 res2:16;
738 u32 ox_id:16; /* OX_ID from ABTS frame */
739 u32 rx_id:16; /* RX_ID from ABTS frame */
740 u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */
741 u32 high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */
742};
743
744/*
745 * ABTS BA_RJT reject payload
746 */
747struct fc_ba_rjt_s{
748 u32 res1:8; /* Reserved */
749 u32 reason_code:8; /* reason code for reject */
750 u32 reason_expl:8; /* reason code explanation */
751 u32 vendor_unique:8;/* vendor unique reason code,set to 0 */
752};
753
754/*
755 * TPRLO logout parameter page
756 */
757struct fc_tprlo_params_page_s{
758 u32 type:8;
759 u32 type_ext:8;
760
761#ifdef __BIGENDIAN
762 u32 opa_valid:1;
763 u32 rpa_valid:1;
764 u32 tpo_nport_valid:1;
765 u32 global_process_logout:1;
766 u32 res1:12;
767#else
768 u32 res1:12;
769 u32 global_process_logout:1;
770 u32 tpo_nport_valid:1;
771 u32 rpa_valid:1;
772 u32 opa_valid:1;
773#endif
774
775 u32 orig_process_assc;
776 u32 resp_process_assc;
777
778 u32 res2:8;
779 u32 tpo_nport_id;
780};
781
782/*
783 * TPRLO ELS command payload
784 */
785struct fc_tprlo_s{
786 u32 command:8;
787 u32 page_len:8;
788 u32 payload_len:16;
789
790 struct fc_tprlo_params_page_s tprlo_params[1];
791};
792
793enum fc_tprlo_type{
794 FC_GLOBAL_LOGO = 1,
795 FC_TPR_LOGO
796};
797
798/*
799 * TPRLO els command ACC payload
800 */
801struct fc_tprlo_acc_s{
802 u32 command:8;
803 u32 page_len:8;
804 u32 payload_len:16;
805 struct fc_prlo_acc_params_page_s tprlo_acc_params[1];
806};
807
808/*
809 * RSCN els command req payload
810 */
811#define FC_RSCN_PGLEN 0x4
812
813enum fc_rscn_format{
814 FC_RSCN_FORMAT_PORTID = 0x0,
815 FC_RSCN_FORMAT_AREA = 0x1,
816 FC_RSCN_FORMAT_DOMAIN = 0x2,
817 FC_RSCN_FORMAT_FABRIC = 0x3,
818};
819
820struct fc_rscn_event_s{
821 u32 format:2;
822 u32 qualifier:4;
823 u32 resvd:2;
824 u32 portid:24;
825};
826
827struct fc_rscn_pl_s{
828 u8 command;
829 u8 pagelen;
830 u16 payldlen;
831 struct fc_rscn_event_s event[1];
832};
833
834/*
835 * ECHO els command req payload
836 */
837struct fc_echo_s {
838 struct fc_els_cmd_s els_cmd;
839};
840
841/*
842 * RNID els command
843 */
844
845#define RNID_NODEID_DATA_FORMAT_COMMON 0x00
846#define RNID_NODEID_DATA_FORMAT_FCP3 0x08
847#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF
848
849#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
850#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
851#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
852#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
853#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005
854#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009
855#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A
856#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B
857#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E
858#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011
859#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002
860#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
861#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
862
863/*
864 * RNID els command payload
865 */
866struct fc_rnid_cmd_s{
867 struct fc_els_cmd_s els_cmd;
868 u32 node_id_data_format:8;
869 u32 reserved:24;
870};
871
872/*
873 * RNID els response payload
874 */
875
876struct fc_rnid_common_id_data_s{
877 wwn_t port_name;
878 wwn_t node_name;
879};
880
881struct fc_rnid_general_topology_data_s{
882 u32 vendor_unique[4];
883 u32 asso_type;
884 u32 phy_port_num;
885 u32 num_attached_nodes;
886 u32 node_mgmt:8;
887 u32 ip_version:8;
888 u32 udp_tcp_port_num:16;
889 u32 ip_address[4];
890 u32 reserved:16;
891 u32 vendor_specific:16;
892};
893
894struct fc_rnid_acc_s{
895 struct fc_els_cmd_s els_cmd;
896 u32 node_id_data_format:8;
897 u32 common_id_data_length:8;
898 u32 reserved:8;
899 u32 specific_id_data_length:8;
900 struct fc_rnid_common_id_data_s common_id_data;
901 struct fc_rnid_general_topology_data_s gen_topology_data;
902};
903
904#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
905#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
906#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
907#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
908#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005
909#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009
910#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A
911#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B
912#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E
913#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011
914#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002
915#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
916#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
917
918enum fc_rpsc_speed_cap{
919 RPSC_SPEED_CAP_1G = 0x8000,
920 RPSC_SPEED_CAP_2G = 0x4000,
921 RPSC_SPEED_CAP_4G = 0x2000,
922 RPSC_SPEED_CAP_10G = 0x1000,
923 RPSC_SPEED_CAP_8G = 0x0800,
924 RPSC_SPEED_CAP_16G = 0x0400,
925
926 RPSC_SPEED_CAP_UNKNOWN = 0x0001,
927};
928
929enum fc_rpsc_op_speed_s{
930 RPSC_OP_SPEED_1G = 0x8000,
931 RPSC_OP_SPEED_2G = 0x4000,
932 RPSC_OP_SPEED_4G = 0x2000,
933 RPSC_OP_SPEED_10G = 0x1000,
934 RPSC_OP_SPEED_8G = 0x0800,
935 RPSC_OP_SPEED_16G = 0x0400,
936
937 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */
938};
939
940struct fc_rpsc_speed_info_s{
941 u16 port_speed_cap; /*! see fc_rpsc_speed_cap_t */
942 u16 port_op_speed; /*! see fc_rpsc_op_speed_t */
943};
944
945enum link_e2e_beacon_subcmd{
946 LINK_E2E_BEACON_ON = 1,
947 LINK_E2E_BEACON_OFF = 2
948};
949
950enum beacon_type{
951 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */
952 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */
953 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */
954};
955
956struct link_e2e_beacon_param_s {
957 u8 beacon_type; /* Beacon Type. See beacon_type_t */
958 u8 beacon_frequency;
959 /* Beacon frequency. Number of blinks
960 * per 10 seconds
961 */
962 u16 beacon_duration;/* Beacon duration (in Seconds). The
963 * command operation should be
964 * terminated at the end of this
965 * timeout value.
966 *
967 * Ignored if diag_sub_cmd is
968 * LINK_E2E_BEACON_OFF.
969 *
970 * If 0, beaconing will continue till a
971 * BEACON OFF request is received
972 */
973};
974
975/*
976 * Link E2E beacon request/good response format. For LS_RJTs use fc_ls_rjt_t
977 */
978struct link_e2e_beacon_req_s{
979 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests *
980 *or FC_ELS_ACC in good replies */
981 u32 ls_sub_cmd; /*! See link_e2e_beacon_subcmd_t */
982 struct link_e2e_beacon_param_s beacon_parm;
983};
984
985/**
986 * If RPSC request is sent to the Domain Controller, the request is for
987 * all the ports within that domain (TODO - I don't think FOS implements
988 * this...).
989 */
990struct fc_rpsc_cmd_s{
991 struct fc_els_cmd_s els_cmd;
992};
993
994/*
995 * RPSC Acc
996 */
997struct fc_rpsc_acc_s{
998 u32 command:8;
999 u32 rsvd:8;
1000 u32 num_entries:16;
1001
1002 struct fc_rpsc_speed_info_s speed_info[1];
1003};
1004
1005/**
1006 * If RPSC2 request is sent to the Domain Controller,
1007 */
1008#define FC_BRCD_TOKEN 0x42524344
1009
1010struct fc_rpsc2_cmd_s{
1011 struct fc_els_cmd_s els_cmd;
1012 u32 token;
1013 u16 resvd;
1014 u16 num_pids; /* Number of pids in the request */
1015 struct {
1016 u32 rsvd1:8;
1017 u32 pid:24; /* port identifier */
1018 } pid_list[1];
1019};
1020
1021enum fc_rpsc2_port_type{
1022 RPSC2_PORT_TYPE_UNKNOWN = 0,
1023 RPSC2_PORT_TYPE_NPORT = 1,
1024 RPSC2_PORT_TYPE_NLPORT = 2,
1025 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f,
1026 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f,
1027};
1028
1029/*
1030 * RPSC2 portInfo entry structure
1031 */
1032struct fc_rpsc2_port_info_s{
1033 u32 pid; /* PID */
1034 u16 resvd1;
1035 u16 index; /* port number / index */
1036 u8 resvd2;
1037 u8 type; /* port type N/NL/... */
1038 u16 speed; /* port Operating Speed */
1039};
1040
1041/*
1042 * RPSC2 Accept payload
1043 */
1044struct fc_rpsc2_acc_s{
1045 u8 els_cmd;
1046 u8 resvd;
1047 u16 num_pids; /* Number of pids in the request */
1048 struct fc_rpsc2_port_info_s port_info[1]; /* port information */
1049};
1050
1051/**
1052 * bit fields so that multiple classes can be specified
1053 */
1054enum fc_cos{
1055 FC_CLASS_2 = 0x04,
1056 FC_CLASS_3 = 0x08,
1057 FC_CLASS_2_3 = 0x0C,
1058};
1059
1060/*
1061 * symbolic name
1062 */
1063struct fc_symname_s{
1064 u8 symname[FC_SYMNAME_MAX];
1065};
1066
1067struct fc_alpabm_s{
1068 u8 alpa_bm[FC_ALPA_MAX / 8];
1069};
1070
1071/*
1072 * protocol default timeout values
1073 */
1074#define FC_ED_TOV 2
1075#define FC_REC_TOV (FC_ED_TOV + 1)
1076#define FC_RA_TOV 10
1077#define FC_ELS_TOV (2 * FC_RA_TOV)
1078
1079/*
1080 * virtual fabric related defines
1081 */
1082#define FC_VF_ID_NULL 0 /* must not be used as VF_ID */
1083#define FC_VF_ID_MIN 1
1084#define FC_VF_ID_MAX 0xEFF
1085#define FC_VF_ID_CTL 0xFEF /* control VF_ID */
1086
1087/**
1088 * Virtual Fabric Tagging header format
1089 * @caution This is defined only in BIG ENDIAN format.
1090 */
1091struct fc_vft_s{
1092 u32 r_ctl:8;
1093 u32 ver:2;
1094 u32 type:4;
1095 u32 res_a:2;
1096 u32 priority:3;
1097 u32 vf_id:12;
1098 u32 res_b:1;
1099 u32 hopct:8;
1100 u32 res_c:24;
1101};
1102
1103#pragma pack()
1104
1105#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc_sp.h b/drivers/scsi/bfa/include/protocol/fc_sp.h
new file mode 100644
index 000000000000..55bb0b31d04b
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/fc_sp.h
@@ -0,0 +1,224 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FC_SP_H__
19#define __FC_SP_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25enum auth_els_flags{
26 FC_AUTH_ELS_MORE_FRAGS_FLAG = 0x80, /*! bit-7. More Fragments
27 * Follow
28 */
29 FC_AUTH_ELS_CONCAT_FLAG = 0x40, /*! bit-6. Concatenation Flag */
30 FC_AUTH_ELS_SEQ_NUM_FLAG = 0x01 /*! bit-0. Sequence Number */
31};
32
33enum auth_msg_codes{
34 FC_AUTH_MC_AUTH_RJT = 0x0A, /*! Auth Reject */
35 FC_AUTH_MC_AUTH_NEG = 0x0B, /*! Auth Negotiate */
36 FC_AUTH_MC_AUTH_DONE = 0x0C, /*! Auth Done */
37
38 FC_AUTH_MC_DHCHAP_CHAL = 0x10, /*! DHCHAP Challenge */
39 FC_AUTH_MC_DHCHAP_REPLY = 0x11, /*! DHCHAP Reply */
40 FC_AUTH_MC_DHCHAP_SUCC = 0x12, /*! DHCHAP Success */
41
42 FC_AUTH_MC_FCAP_REQ = 0x13, /*! FCAP Request */
43 FC_AUTH_MC_FCAP_ACK = 0x14, /*! FCAP Acknowledge */
44 FC_AUTH_MC_FCAP_CONF = 0x15, /*! FCAP Confirm */
45
46 FC_AUTH_MC_FCPAP_INIT = 0x16, /*! FCPAP Init */
47 FC_AUTH_MC_FCPAP_ACC = 0x17, /*! FCPAP Accept */
48 FC_AUTH_MC_FCPAP_COMP = 0x18, /*! FCPAP Complete */
49
50 FC_AUTH_MC_IKE_SA_INIT = 0x22, /*! IKE SA INIT */
51 FC_AUTH_MC_IKE_SA_AUTH = 0x23, /*! IKE SA Auth */
52 FC_AUTH_MC_IKE_CREATE_CHILD_SA = 0x24, /*! IKE Create Child SA */
53 FC_AUTH_MC_IKE_INFO = 0x25, /*! IKE informational */
54};
55
56enum auth_proto_version{
57 FC_AUTH_PROTO_VER_1 = 1, /*! Protocol Version 1 */
58};
59
60enum {
61 FC_AUTH_ELS_COMMAND_CODE = 0x90,/*! Authentication ELS Command code */
62 FC_AUTH_PROTO_PARAM_LEN_SZ = 4, /*! Size of Proto Parameter Len Field */
63 FC_AUTH_PROTO_PARAM_VAL_SZ = 4, /*! Size of Proto Parameter Val Field */
64 FC_MAX_AUTH_SECRET_LEN = 256,
65 /*! Maximum secret string length */
66 FC_AUTH_NUM_USABLE_PROTO_LEN_SZ = 4,
67 /*! Size of usable protocols field */
68 FC_AUTH_RESP_VALUE_LEN_SZ = 4,
69 /*! Size of response value length */
70 FC_MAX_CHAP_KEY_LEN = 256, /*! Maximum md5 digest length */
71 FC_MAX_AUTH_RETRIES = 3, /*! Maximum number of retries */
72 FC_MD5_DIGEST_LEN = 16, /*! MD5 digest length */
73 FC_SHA1_DIGEST_LEN = 20, /*! SHA1 digest length */
74 FC_MAX_DHG_SUPPORTED = 1, /*! Maximum DH Groups supported */
75 FC_MAX_ALG_SUPPORTED = 1, /*! Maximum algorithms supported */
76 FC_MAX_PROTO_SUPPORTED = 1, /*! Maximum protocols supported */
77 FC_START_TXN_ID = 2, /*! Starting transaction ID */
78};
79
80enum auth_proto_id{
81 FC_AUTH_PROTO_DHCHAP = 0x00000001,
82 FC_AUTH_PROTO_FCAP = 0x00000002,
83 FC_AUTH_PROTO_FCPAP = 0x00000003,
84 FC_AUTH_PROTO_IKEv2 = 0x00000004,
85 FC_AUTH_PROTO_IKEv2_AUTH = 0x00000005,
86};
87
88struct auth_name_s{
89 u16 name_tag; /*! Name Tag = 1 for Authentication */
90 u16 name_len; /*! Name Length = 8 for Authentication
91 */
92 wwn_t name; /*! Name. TODO - is this PWWN */
93};
94
95
96enum auth_hash_func{
97 FC_AUTH_HASH_FUNC_MD5 = 0x00000005,
98 FC_AUTH_HASH_FUNC_SHA_1 = 0x00000006,
99};
100
101enum auth_dh_gid{
102 FC_AUTH_DH_GID_0_DHG_NULL = 0x00000000,
103 FC_AUTH_DH_GID_1_DHG_1024 = 0x00000001,
104 FC_AUTH_DH_GID_2_DHG_1280 = 0x00000002,
105 FC_AUTH_DH_GID_3_DHG_1536 = 0x00000003,
106 FC_AUTH_DH_GID_4_DHG_2048 = 0x00000004,
107 FC_AUTH_DH_GID_6_DHG_3072 = 0x00000006,
108 FC_AUTH_DH_GID_7_DHG_4096 = 0x00000007,
109 FC_AUTH_DH_GID_8_DHG_6144 = 0x00000008,
110 FC_AUTH_DH_GID_9_DHG_8192 = 0x00000009,
111};
112
113struct auth_els_msg_s {
114 u8 auth_els_code; /* Authentication ELS Code (0x90) */
115 u8 auth_els_flag; /* Authentication ELS Flags */
116 u8 auth_msg_code; /* Authentication Message Code */
117 u8 proto_version; /* Protocol Version */
118 u32 msg_len; /* Message Length */
119 u32 trans_id; /* Transaction Identifier (T_ID) */
120
121 /* Msg payload follows... */
122};
123
124
125enum auth_neg_param_tags {
126 FC_AUTH_NEG_DHCHAP_HASHLIST = 0x0001,
127 FC_AUTH_NEG_DHCHAP_DHG_ID_LIST = 0x0002,
128};
129
130
131struct dhchap_param_format_s {
132 u16 tag; /*! Parameter Tag. See
133 * auth_neg_param_tags_t
134 */
135 u16 word_cnt;
136
137 /* followed by variable length parameter value... */
138};
139
140struct auth_proto_params_s {
141 u32 proto_param_len;
142 u32 proto_id;
143
144 /*
145 * Followed by variable length Protocol specific parameters. DH-CHAP
146 * uses dhchap_param_format_t
147 */
148};
149
150struct auth_neg_msg_s {
151 struct auth_name_s auth_ini_name;
152 u32 usable_auth_protos;
153 struct auth_proto_params_s proto_params[1]; /*! (1..usable_auth_proto)
154 * protocol params
155 */
156};
157
158struct auth_dh_val_s {
159 u32 dh_val_len;
160 u32 dh_val[1];
161};
162
163struct auth_dhchap_chal_msg_s {
164 struct auth_els_msg_s hdr;
165 struct auth_name_s auth_responder_name; /* TODO VRK - is auth_name_t
166 * type OK?
167 */
168 u32 hash_id;
169 u32 dh_grp_id;
170 u32 chal_val_len;
171 char chal_val[1];
172
173 /* ...followed by variable Challenge length/value and DH length/value */
174};
175
176
177enum auth_rjt_codes {
178 FC_AUTH_RJT_CODE_AUTH_FAILURE = 0x01,
179 FC_AUTH_RJT_CODE_LOGICAL_ERR = 0x02,
180};
181
182enum auth_rjt_code_exps {
183 FC_AUTH_CEXP_AUTH_MECH_NOT_USABLE = 0x01,
184 FC_AUTH_CEXP_DH_GROUP_NOT_USABLE = 0x02,
185 FC_AUTH_CEXP_HASH_FUNC_NOT_USABLE = 0x03,
186 FC_AUTH_CEXP_AUTH_XACT_STARTED = 0x04,
187 FC_AUTH_CEXP_AUTH_FAILED = 0x05,
188 FC_AUTH_CEXP_INCORRECT_PLD = 0x06,
189 FC_AUTH_CEXP_INCORRECT_PROTO_MSG = 0x07,
190 FC_AUTH_CEXP_RESTART_AUTH_PROTO = 0x08,
191 FC_AUTH_CEXP_AUTH_CONCAT_NOT_SUPP = 0x09,
192 FC_AUTH_CEXP_PROTO_VER_NOT_SUPP = 0x0A,
193};
194
195enum auth_status {
196 FC_AUTH_STATE_INPROGRESS = 0, /*! authentication in progress */
197 FC_AUTH_STATE_FAILED = 1, /*! authentication failed */
198 FC_AUTH_STATE_SUCCESS = 2 /*! authentication successful */
199};
200
201struct auth_rjt_msg_s {
202 struct auth_els_msg_s hdr;
203 u8 reason_code;
204 u8 reason_code_exp;
205 u8 rsvd[2];
206};
207
208
209struct auth_dhchap_neg_msg_s {
210 struct auth_els_msg_s hdr;
211 struct auth_neg_msg_s nego;
212};
213
214struct auth_dhchap_reply_msg_s {
215 struct auth_els_msg_s hdr;
216
217 /*
218 * followed by response value length & Value + DH Value Length & Value
219 */
220};
221
222#pragma pack()
223
224#endif /* __FC_SP_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/fcp.h b/drivers/scsi/bfa/include/protocol/fcp.h
new file mode 100644
index 000000000000..9ade68ad2853
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/fcp.h
@@ -0,0 +1,186 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCPPROTO_H__
19#define __FCPPROTO_H__
20
21#include <protocol/scsi.h>
22
23#pragma pack(1)
24
25enum {
26 FCP_RJT = 0x01000000, /* SRR reject */
27 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
28 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
29};
30
31/*
32 * SRR FC-4 LS payload
33 */
34struct fc_srr_s{
35 u32 ls_cmd;
36 u32 ox_id:16; /* ox-id */
37 u32 rx_id:16; /* rx-id */
38 u32 ro; /* relative offset */
39 u32 r_ctl:8; /* R_CTL for I.U. */
40 u32 res:24;
41};
42
43
44/*
45 * FCP_CMND definitions
46 */
47#define FCP_CMND_CDB_LEN 16
48#define FCP_CMND_LUN_LEN 8
49
50struct fcp_cmnd_s{
51 lun_t lun; /* 64-bit LU number */
52 u8 crn; /* command reference number */
53#ifdef __BIGENDIAN
54 u8 resvd:1,
55 priority:4, /* FCP-3: SAM-3 priority */
56 taskattr:3; /* scsi task attribute */
57#else
58 u8 taskattr:3, /* scsi task attribute */
59 priority:4, /* FCP-3: SAM-3 priority */
60 resvd:1;
61#endif
62 u8 tm_flags; /* task management flags */
63#ifdef __BIGENDIAN
64 u8 addl_cdb_len:6, /* additional CDB length words */
65 iodir:2; /* read/write FCP_DATA IUs */
66#else
67 u8 iodir:2, /* read/write FCP_DATA IUs */
68 addl_cdb_len:6; /* additional CDB length */
69#endif
70 struct scsi_cdb_s cdb;
71
72 /*
73 * !!! additional cdb bytes follows here!!!
74 */
75 u32 fcp_dl; /* bytes to be transferred */
76};
77
78#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
79#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
80
81/*
82 * fcp_cmnd_t.iodir field values
83 */
84enum fcp_iodir{
85 FCP_IODIR_NONE = 0,
86 FCP_IODIR_WRITE = 1,
87 FCP_IODIR_READ = 2,
88 FCP_IODIR_RW = 3,
89};
90
91/*
92 * Task attribute field
93 */
94enum {
95 FCP_TASK_ATTR_SIMPLE = 0,
96 FCP_TASK_ATTR_HOQ = 1,
97 FCP_TASK_ATTR_ORDERED = 2,
98 FCP_TASK_ATTR_ACA = 4,
99 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
100};
101
102/*
103 * Task management flags field - only one bit shall be set
104 */
105#ifndef BIT
106#define BIT(_x) (1 << (_x))
107#endif
108enum fcp_tm_cmnd{
109 FCP_TM_ABORT_TASK_SET = BIT(1),
110 FCP_TM_CLEAR_TASK_SET = BIT(2),
111 FCP_TM_LUN_RESET = BIT(4),
112 FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
113 FCP_TM_CLEAR_ACA = BIT(6),
114};
115
116/*
117 * FCP_XFER_RDY IU defines
118 */
119struct fcp_xfer_rdy_s{
120 u32 data_ro;
121 u32 burst_len;
122 u32 reserved;
123};
124
125/*
126 * FCP_RSP residue flags
127 */
128enum fcp_residue{
129 FCP_NO_RESIDUE = 0, /* no residue */
130 FCP_RESID_OVER = 1, /* more data left that was not sent */
131 FCP_RESID_UNDER = 2, /* less data than requested */
132};
133
134enum {
135 FCP_RSPINFO_GOOD = 0,
136 FCP_RSPINFO_DATALEN_MISMATCH = 1,
137 FCP_RSPINFO_CMND_INVALID = 2,
138 FCP_RSPINFO_ROLEN_MISMATCH = 3,
139 FCP_RSPINFO_TM_NOT_SUPP = 4,
140 FCP_RSPINFO_TM_FAILED = 5,
141};
142
143struct fcp_rspinfo_s{
144 u32 res0:24;
145 u32 rsp_code:8; /* response code (as above) */
146 u32 res1;
147};
148
149struct fcp_resp_s{
150 u32 reserved[2]; /* 2 words reserved */
151 u16 reserved2;
152#ifdef __BIGENDIAN
153 u8 reserved3:3;
154 u8 fcp_conf_req:1; /* FCP_CONF is requested */
155 u8 resid_flags:2; /* underflow/overflow */
156 u8 sns_len_valid:1;/* sense len is valid */
157 u8 rsp_len_valid:1;/* response len is valid */
158#else
159 u8 rsp_len_valid:1;/* response len is valid */
160 u8 sns_len_valid:1;/* sense len is valid */
161 u8 resid_flags:2; /* underflow/overflow */
162 u8 fcp_conf_req:1; /* FCP_CONF is requested */
163 u8 reserved3:3;
164#endif
165 u8 scsi_status; /* one byte SCSI status */
166 u32 residue; /* residual data bytes */
167 u32 sns_len; /* length od sense info */
168 u32 rsp_len; /* length of response info */
169};
170
171#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
172 (__fcprsp)->sns_len : 0)
173#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
174 (__fcprsp)->rsp_len : 0)
175#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
176#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
177 fcp_rsplen(__fcprsp))
178
179struct fcp_cmnd_fr_s{
180 struct fchs_s fchs;
181 struct fcp_cmnd_s fcp;
182};
183
184#pragma pack()
185
186#endif
diff --git a/drivers/scsi/bfa/include/protocol/fdmi.h b/drivers/scsi/bfa/include/protocol/fdmi.h
new file mode 100644
index 000000000000..6c05c268c71b
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/fdmi.h
@@ -0,0 +1,163 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FDMI_H__
19#define __FDMI_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23#include <protocol/ct.h>
24
25#pragma pack(1)
26
27/*
28 * FDMI Command Codes
29 */
30#define FDMI_GRHL 0x0100
31#define FDMI_GHAT 0x0101
32#define FDMI_GRPL 0x0102
33#define FDMI_GPAT 0x0110
34#define FDMI_RHBA 0x0200
35#define FDMI_RHAT 0x0201
36#define FDMI_RPRT 0x0210
37#define FDMI_RPA 0x0211
38#define FDMI_DHBA 0x0300
39#define FDMI_DPRT 0x0310
40
41/*
42 * FDMI reason codes
43 */
44#define FDMI_NO_ADDITIONAL_EXP 0x00
45#define FDMI_HBA_ALREADY_REG 0x10
46#define FDMI_HBA_ATTRIB_NOT_REG 0x11
47#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
48#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
49#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
50#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
51#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
52#define FDMI_PORT_ATTRIB_NOT_REG 0x20
53#define FDMI_PORT_NOT_REG 0x21
54#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
55#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
56#define FDMI_PORT_ALREADY_REGISTEREED 0x24
57
58/*
59 * FDMI Transmission Speed Mask values
60 */
61#define FDMI_TRANS_SPEED_1G 0x00000001
62#define FDMI_TRANS_SPEED_2G 0x00000002
63#define FDMI_TRANS_SPEED_10G 0x00000004
64#define FDMI_TRANS_SPEED_4G 0x00000008
65#define FDMI_TRANS_SPEED_8G 0x00000010
66#define FDMI_TRANS_SPEED_16G 0x00000020
67#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
68
69/*
70 * FDMI HBA attribute types
71 */
72enum fdmi_hba_attribute_type {
73 FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
74 FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
75 FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
76 FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
77 FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
78 FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
79 FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
80 FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
81 FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
82 FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
83 FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
84
85 FDMI_HBA_ATTRIB_MAX_TYPE
86};
87
88/*
89 * FDMI Port attribute types
90 */
91enum fdmi_port_attribute_type {
92 FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
93 FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
94 FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
95 FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
96 FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
97 FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
98
99 FDMI_PORT_ATTR_MAX_TYPE
100};
101
102/*
103 * FDMI attribute
104 */
105struct fdmi_attr_s {
106 u16 type;
107 u16 len;
108 u8 value[1];
109};
110
111/*
112 * HBA Attribute Block
113 */
114struct fdmi_hba_attr_s {
115 u32 attr_count; /* # of attributes */
116 struct fdmi_attr_s hba_attr; /* n attributes */
117};
118
119/*
120 * Registered Port List
121 */
122struct fdmi_port_list_s {
123 u32 num_ports; /* number Of Port Entries */
124 wwn_t port_entry; /* one or more */
125};
126
127/*
128 * Port Attribute Block
129 */
130struct fdmi_port_attr_s {
131 u32 attr_count; /* # of attributes */
132 struct fdmi_attr_s port_attr; /* n attributes */
133};
134
135/*
136 * FDMI Register HBA Attributes
137 */
138struct fdmi_rhba_s {
139 wwn_t hba_id; /* HBA Identifier */
140 struct fdmi_port_list_s port_list; /* Registered Port List */
141 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
142};
143
144/*
145 * FDMI Register Port
146 */
147struct fdmi_rprt_s {
148 wwn_t hba_id; /* HBA Identifier */
149 wwn_t port_name; /* Port wwn */
150 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
151};
152
153/*
154 * FDMI Register Port Attributes
155 */
156struct fdmi_rpa_s {
157 wwn_t port_name; /* port wwn */
158 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
159};
160
161#pragma pack()
162
163#endif
diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h
new file mode 100644
index 000000000000..6830dc3ee58a
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/pcifw.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * pcifw.h PCI FW related headers
20 */
21
22#ifndef __PCIFW_H__
23#define __PCIFW_H__
24
25#pragma pack(1)
26
27struct pnp_hdr_s{
28 u32 signature; /* "$PnP" */
29 u8 rev; /* Struct revision */
30 u8 len; /* Header structure len in multiples
31 * of 16 bytes */
32 u16 off; /* Offset to next header 00 if none */
33 u8 rsvd; /* Reserved byte */
34 u8 cksum; /* 8-bit checksum for this header */
35 u32 pnp_dev_id; /* PnP Device Id */
36 u16 mfstr; /* Pointer to manufacturer string */
37 u16 prstr; /* Pointer to product string */
38 u8 devtype[3]; /* Device Type Code */
39 u8 devind; /* Device Indicator */
40 u16 bcventr; /* Bootstrap entry vector */
41 u16 rsvd2; /* Reserved */
42 u16 sriv; /* Static resource information vector */
43};
44
45struct pci_3_0_ds_s{
46 u32 sig; /* Signature "PCIR" */
47 u16 vendid; /* Vendor ID */
48 u16 devid; /* Device ID */
49 u16 devlistoff; /* Device List Offset */
50 u16 len; /* PCI Data Structure Length */
51 u8 rev; /* PCI Data Structure Revision */
52 u8 clcode[3]; /* Class Code */
53 u16 imglen; /* Code image length in multiples of
54 * 512 bytes */
55 u16 coderev; /* Revision level of code/data */
56 u8 codetype; /* Code type 0x00 - BIOS */
57 u8 indr; /* Last image indicator */
58 u16 mrtimglen; /* Max Run Time Image Length */
59 u16 cuoff; /* Config Utility Code Header Offset */
60 u16 dmtfclp; /* DMTF CLP entry point offset */
61};
62
63struct pci_optrom_hdr_s{
64 u16 sig; /* Signature 0x55AA */
65 u8 len; /* Option ROM length in units of 512 bytes */
66 u8 inivec[3]; /* Initialization vector */
67 u8 rsvd[16]; /* Reserved field */
68 u16 verptr; /* Pointer to version string - private */
69 u16 pcids; /* Pointer to PCI data structure */
70 u16 pnphdr; /* Pointer to PnP expansion header */
71};
72
73#pragma pack()
74
75#endif
diff --git a/drivers/scsi/bfa/include/protocol/scsi.h b/drivers/scsi/bfa/include/protocol/scsi.h
new file mode 100644
index 000000000000..b220e6b4f6e1
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/scsi.h
@@ -0,0 +1,1648 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __SCSI_H__
19#define __SCSI_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25/*
26 * generic SCSI cdb definition
27 */
28#define SCSI_MAX_CDBLEN 16
29struct scsi_cdb_s{
30 u8 scsi_cdb[SCSI_MAX_CDBLEN];
31};
32
33/*
34 * scsi lun serial number definition
35 */
36#define SCSI_LUN_SN_LEN 32
37struct scsi_lun_sn_s{
38 u8 lun_sn[SCSI_LUN_SN_LEN];
39};
40
41/*
42 * SCSI Direct Access Commands
43 */
44enum {
45 SCSI_OP_TEST_UNIT_READY = 0x00,
46 SCSI_OP_REQUEST_SENSE = 0x03,
47 SCSI_OP_FORMAT_UNIT = 0x04,
48 SCSI_OP_READ6 = 0x08,
49 SCSI_OP_WRITE6 = 0x0A,
50 SCSI_OP_WRITE_FILEMARKS = 0x10,
51 SCSI_OP_INQUIRY = 0x12,
52 SCSI_OP_MODE_SELECT6 = 0x15,
53 SCSI_OP_RESERVE6 = 0x16,
54 SCSI_OP_RELEASE6 = 0x17,
55 SCSI_OP_MODE_SENSE6 = 0x1A,
56 SCSI_OP_START_STOP_UNIT = 0x1B,
57 SCSI_OP_SEND_DIAGNOSTIC = 0x1D,
58 SCSI_OP_READ_CAPACITY = 0x25,
59 SCSI_OP_READ10 = 0x28,
60 SCSI_OP_WRITE10 = 0x2A,
61 SCSI_OP_VERIFY10 = 0x2F,
62 SCSI_OP_READ_DEFECT_DATA = 0x37,
63 SCSI_OP_LOG_SELECT = 0x4C,
64 SCSI_OP_LOG_SENSE = 0x4D,
65 SCSI_OP_MODE_SELECT10 = 0x55,
66 SCSI_OP_RESERVE10 = 0x56,
67 SCSI_OP_RELEASE10 = 0x57,
68 SCSI_OP_MODE_SENSE10 = 0x5A,
69 SCSI_OP_PER_RESERVE_IN = 0x5E,
70 SCSI_OP_PER_RESERVE_OUR = 0x5E,
71 SCSI_OP_READ16 = 0x88,
72 SCSI_OP_WRITE16 = 0x8A,
73 SCSI_OP_VERIFY16 = 0x8F,
74 SCSI_OP_READ_CAPACITY16 = 0x9E,
75 SCSI_OP_REPORT_LUNS = 0xA0,
76 SCSI_OP_READ12 = 0xA8,
77 SCSI_OP_WRITE12 = 0xAA,
78 SCSI_OP_UNDEF = 0xFF,
79};
80
81/*
82 * SCSI START_STOP_UNIT command
83 */
84struct scsi_start_stop_unit_s{
85 u8 opcode;
86#ifdef __BIGENDIAN
87 u8 lun:3;
88 u8 reserved1:4;
89 u8 immed:1;
90#else
91 u8 immed:1;
92 u8 reserved1:4;
93 u8 lun:3;
94#endif
95 u8 reserved2;
96 u8 reserved3;
97#ifdef __BIGENDIAN
98 u8 power_conditions:4;
99 u8 reserved4:2;
100 u8 loEj:1;
101 u8 start:1;
102#else
103 u8 start:1;
104 u8 loEj:1;
105 u8 reserved4:2;
106 u8 power_conditions:4;
107#endif
108 u8 control;
109};
110
111/*
112 * SCSI SEND_DIAGNOSTIC command
113 */
114struct scsi_send_diagnostic_s{
115 u8 opcode;
116#ifdef __BIGENDIAN
117 u8 self_test_code:3;
118 u8 pf:1;
119 u8 reserved1:1;
120 u8 self_test:1;
121 u8 dev_offl:1;
122 u8 unit_offl:1;
123#else
124 u8 unit_offl:1;
125 u8 dev_offl:1;
126 u8 self_test:1;
127 u8 reserved1:1;
128 u8 pf:1;
129 u8 self_test_code:3;
130#endif
131 u8 reserved2;
132
133 u8 param_list_length[2]; /* MSB first */
134 u8 control;
135
136};
137
138/*
139 * SCSI READ10/WRITE10 commands
140 */
141struct scsi_rw10_s{
142 u8 opcode;
143#ifdef __BIGENDIAN
144 u8 lun:3;
145 u8 dpo:1; /* Disable Page Out */
146 u8 fua:1; /* Force Unit Access */
147 u8 reserved1:2;
148 u8 rel_adr:1; /* relative address */
149#else
150 u8 rel_adr:1;
151 u8 reserved1:2;
152 u8 fua:1;
153 u8 dpo:1;
154 u8 lun:3;
155#endif
156 u8 lba0; /* logical block address - MSB */
157 u8 lba1;
158 u8 lba2;
159 u8 lba3; /* LSB */
160 u8 reserved3;
161 u8 xfer_length0; /* transfer length in blocks - MSB */
162 u8 xfer_length1; /* LSB */
163 u8 control;
164};
165
166#define SCSI_CDB10_GET_LBA(cdb) \
167 (((cdb)->lba0 << 24) | ((cdb)->lba1 << 16) | \
168 ((cdb)->lba2 << 8) | (cdb)->lba3)
169
170#define SCSI_CDB10_SET_LBA(cdb, lba) { \
171 (cdb)->lba0 = lba >> 24; \
172 (cdb)->lba1 = (lba >> 16) & 0xFF; \
173 (cdb)->lba2 = (lba >> 8) & 0xFF; \
174 (cdb)->lba3 = lba & 0xFF; \
175}
176
177#define SCSI_CDB10_GET_TL(cdb) \
178 ((cdb)->xfer_length0 << 8 | (cdb)->xfer_length1)
179#define SCSI_CDB10_SET_TL(cdb, tl) { \
180 (cdb)->xfer_length0 = tl >> 8; \
181 (cdb)->xfer_length1 = tl & 0xFF; \
182}
183
184/*
185 * SCSI READ6/WRITE6 commands
186 */
187struct scsi_rw6_s{
188 u8 opcode;
189#ifdef __BIGENDIAN
190 u8 lun:3;
191 u8 lba0:5; /* MSb */
192#else
193 u8 lba0:5; /* MSb */
194 u8 lun:3;
195#endif
196 u8 lba1;
197 u8 lba2; /* LSB */
198 u8 xfer_length;
199 u8 control;
200};
201
202#define SCSI_TAPE_CDB6_GET_TL(cdb) \
203 (((cdb)->tl0 << 16) | ((cdb)->tl1 << 8) | (cdb)->tl2)
204
205#define SCSI_TAPE_CDB6_SET_TL(cdb, tl) { \
206 (cdb)->tl0 = tl >> 16; \
207 (cdb)->tl1 = (tl >> 8) & 0xFF; \
208 (cdb)->tl2 = tl & 0xFF; \
209}
210
211/*
212 * SCSI sequential (TAPE) wrtie command
213 */
214struct scsi_tape_wr_s{
215 u8 opcode;
216#ifdef __BIGENDIAN
217 u8 rsvd:7;
218 u8 fixed:1; /* MSb */
219#else
220 u8 fixed:1; /* MSb */
221 u8 rsvd:7;
222#endif
223 u8 tl0; /* Msb */
224 u8 tl1;
225 u8 tl2; /* Lsb */
226
227 u8 control;
228};
229
230#define SCSI_CDB6_GET_LBA(cdb) \
231 (((cdb)->lba0 << 16) | ((cdb)->lba1 << 8) | (cdb)->lba2)
232
233#define SCSI_CDB6_SET_LBA(cdb, lba) { \
234 (cdb)->lba0 = lba >> 16; \
235 (cdb)->lba1 = (lba >> 8) & 0xFF; \
236 (cdb)->lba2 = lba & 0xFF; \
237}
238
239#define SCSI_CDB6_GET_TL(cdb) ((cdb)->xfer_length)
240#define SCSI_CDB6_SET_TL(cdb, tl) { \
241 (cdb)->xfer_length = tl; \
242}
243
244/*
245 * SCSI sense data format
246 */
247struct scsi_sense_s{
248#ifdef __BIGENDIAN
249 u8 valid:1;
250 u8 rsp_code:7;
251#else
252 u8 rsp_code:7;
253 u8 valid:1;
254#endif
255 u8 seg_num;
256#ifdef __BIGENDIAN
257 u8 file_mark:1;
258 u8 eom:1; /* end of media */
259 u8 ili:1; /* incorrect length indicator */
260 u8 reserved:1;
261 u8 sense_key:4;
262#else
263 u8 sense_key:4;
264 u8 reserved:1;
265 u8 ili:1; /* incorrect length indicator */
266 u8 eom:1; /* end of media */
267 u8 file_mark:1;
268#endif
269 u8 information[4]; /* device-type or command specific info
270 */
271 u8 add_sense_length;
272 /* additional sense length */
273 u8 command_info[4];/* command specific information
274 */
275 u8 asc; /* additional sense code */
276 u8 ascq; /* additional sense code qualifier */
277 u8 fru_code; /* field replaceable unit code */
278#ifdef __BIGENDIAN
279 u8 sksv:1; /* sense key specific valid */
280 u8 c_d:1; /* command/data bit */
281 u8 res1:2;
282 u8 bpv:1; /* bit pointer valid */
283 u8 bpointer:3; /* bit pointer */
284#else
285 u8 bpointer:3; /* bit pointer */
286 u8 bpv:1; /* bit pointer valid */
287 u8 res1:2;
288 u8 c_d:1; /* command/data bit */
289 u8 sksv:1; /* sense key specific valid */
290#endif
291 u8 fpointer[2]; /* field pointer */
292};
293
294#define SCSI_SENSE_CUR_ERR 0x70
295#define SCSI_SENSE_DEF_ERR 0x71
296
297/*
298 * SCSI sense key values
299 */
300#define SCSI_SK_NO_SENSE 0x0
301#define SCSI_SK_REC_ERR 0x1 /* recovered error */
302#define SCSI_SK_NOT_READY 0x2
303#define SCSI_SK_MED_ERR 0x3 /* medium error */
304#define SCSI_SK_HW_ERR 0x4 /* hardware error */
305#define SCSI_SK_ILLEGAL_REQ 0x5
306#define SCSI_SK_UNIT_ATT 0x6 /* unit attention */
307#define SCSI_SK_DATA_PROTECT 0x7
308#define SCSI_SK_BLANK_CHECK 0x8
309#define SCSI_SK_VENDOR_SPEC 0x9
310#define SCSI_SK_COPY_ABORTED 0xA
311#define SCSI_SK_ABORTED_CMND 0xB
312#define SCSI_SK_VOL_OVERFLOW 0xD
313#define SCSI_SK_MISCOMPARE 0xE
314
315/*
316 * SCSI additional sense codes
317 */
318#define SCSI_ASC_NO_ADD_SENSE 0x00
319#define SCSI_ASC_LUN_NOT_READY 0x04
320#define SCSI_ASC_LUN_COMMUNICATION 0x08
321#define SCSI_ASC_WRITE_ERROR 0x0C
322#define SCSI_ASC_INVALID_CMND_CODE 0x20
323#define SCSI_ASC_BAD_LBA 0x21
324#define SCSI_ASC_INVALID_FIELD_IN_CDB 0x24
325#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
326#define SCSI_ASC_LUN_WRITE_PROTECT 0x27
327#define SCSI_ASC_POWERON_BDR 0x29 /* power on reset, bus reset,
328 * bus device reset
329 */
330#define SCSI_ASC_PARAMS_CHANGED 0x2A
331#define SCSI_ASC_CMND_CLEARED_BY_A_I 0x2F
332#define SCSI_ASC_SAVING_PARAM_NOTSUPP 0x39
333#define SCSI_ASC_TOCC 0x3F /* target operating condtions
334 * changed
335 */
336#define SCSI_ASC_PARITY_ERROR 0x47
337#define SCSI_ASC_CMND_PHASE_ERROR 0x4A
338#define SCSI_ASC_DATA_PHASE_ERROR 0x4B
339#define SCSI_ASC_VENDOR_SPEC 0x7F
340
341/*
342 * SCSI additional sense code qualifiers
343 */
344#define SCSI_ASCQ_CAUSE_NOT_REPORT 0x00
345#define SCSI_ASCQ_BECOMING_READY 0x01
346#define SCSI_ASCQ_INIT_CMD_REQ 0x02
347#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
348#define SCSI_ASCQ_OPERATION_IN_PROGRESS 0x07
349#define SCSI_ASCQ_SELF_TEST_IN_PROGRESS 0x09
350#define SCSI_ASCQ_WR_UNEXP_UNSOL_DATA 0x0C
351#define SCSI_ASCQ_WR_NOTENG_UNSOL_DATA 0x0D
352
353#define SCSI_ASCQ_LBA_OUT_OF_RANGE 0x00
354#define SCSI_ASCQ_INVALID_ELEMENT_ADDR 0x01
355
356#define SCSI_ASCQ_LUN_WRITE_PROTECTED 0x00
357#define SCSI_ASCQ_LUN_HW_WRITE_PROTECTED 0x01
358#define SCSI_ASCQ_LUN_SW_WRITE_PROTECTED 0x02
359
360#define SCSI_ASCQ_POR 0x01 /* power on reset */
361#define SCSI_ASCQ_SBR 0x02 /* scsi bus reset */
362#define SCSI_ASCQ_BDR 0x03 /* bus device reset */
363#define SCSI_ASCQ_DIR 0x04 /* device internal reset */
364
365#define SCSI_ASCQ_MODE_PARAMS_CHANGED 0x01
366#define SCSI_ASCQ_LOG_PARAMS_CHANGED 0x02
367#define SCSI_ASCQ_RESERVATIONS_PREEMPTED 0x03
368#define SCSI_ASCQ_RESERVATIONS_RELEASED 0x04
369#define SCSI_ASCQ_REGISTRATIONS_PREEMPTED 0x05
370
371#define SCSI_ASCQ_MICROCODE_CHANGED 0x01
372#define SCSI_ASCQ_CHANGED_OPER_COND 0x02
373#define SCSI_ASCQ_INQ_CHANGED 0x03 /* inquiry data changed */
374#define SCSI_ASCQ_DI_CHANGED 0x05 /* device id changed */
375#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
376
377#define SCSI_ASCQ_DP_CRC_ERR 0x01 /* data phase crc error */
378#define SCSI_ASCQ_DP_SCSI_PARITY_ERR 0x02 /* data phase scsi parity error
379 */
380#define SCSI_ASCQ_IU_CRC_ERR 0x03 /* information unit crc error */
381#define SCSI_ASCQ_PROTO_SERV_CRC_ERR 0x05
382
383#define SCSI_ASCQ_LUN_TIME_OUT 0x01
384
385/* ------------------------------------------------------------
386 * SCSI INQUIRY
387 * ------------------------------------------------------------*/
388
389struct scsi_inquiry_s{
390 u8 opcode;
391#ifdef __BIGENDIAN
392 u8 lun:3;
393 u8 reserved1:3;
394 u8 cmd_dt:1;
395 u8 evpd:1;
396#else
397 u8 evpd:1;
398 u8 cmd_dt:1;
399 u8 reserved1:3;
400 u8 lun:3;
401#endif
402 u8 page_code;
403 u8 reserved2;
404 u8 alloc_length;
405 u8 control;
406};
407
408struct scsi_inquiry_vendor_s{
409 u8 vendor_id[8];
410};
411
412struct scsi_inquiry_prodid_s{
413 u8 product_id[16];
414};
415
416struct scsi_inquiry_prodrev_s{
417 u8 product_rev[4];
418};
419
420struct scsi_inquiry_data_s{
421#ifdef __BIGENDIAN
422 u8 peripheral_qual:3; /* peripheral qualifier */
423 u8 device_type:5; /* peripheral device type */
424
425 u8 rmb:1; /* removable medium bit */
426 u8 device_type_mod:7; /* device type modifier */
427
428 u8 version;
429
430 u8 aenc:1; /* async event notification capability
431 */
432 u8 trm_iop:1; /* terminate I/O process */
433 u8 norm_aca:1; /* normal ACA supported */
434 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
435 u8 rsp_data_format:4;
436
437 u8 additional_len;
438 u8 sccs:1;
439 u8 reserved1:7;
440
441 u8 reserved2:1;
442 u8 enc_serv:1; /* enclosure service component */
443 u8 reserved3:1;
444 u8 multi_port:1; /* multi-port device */
445 u8 m_chngr:1; /* device in medium transport element */
446 u8 ack_req_q:1; /* SIP specific bit */
447 u8 addr32:1; /* SIP specific bit */
448 u8 addr16:1; /* SIP specific bit */
449
450 u8 rel_adr:1; /* relative address */
451 u8 w_bus32:1;
452 u8 w_bus16:1;
453 u8 synchronous:1;
454 u8 linked_commands:1;
455 u8 trans_dis:1;
456 u8 cmd_queue:1; /* command queueing supported */
457 u8 soft_reset:1; /* soft reset alternative (VS) */
458#else
459 u8 device_type:5; /* peripheral device type */
460 u8 peripheral_qual:3;
461 /* peripheral qualifier */
462
463 u8 device_type_mod:7;
464 /* device type modifier */
465 u8 rmb:1; /* removable medium bit */
466
467 u8 version;
468
469 u8 rsp_data_format:4;
470 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
471 u8 norm_aca:1; /* normal ACA supported */
472 u8 terminate_iop:1;/* terminate I/O process */
473 u8 aenc:1; /* async event notification capability
474 */
475
476 u8 additional_len;
477 u8 reserved1:7;
478 u8 sccs:1;
479
480 u8 addr16:1; /* SIP specific bit */
481 u8 addr32:1; /* SIP specific bit */
482 u8 ack_req_q:1; /* SIP specific bit */
483 u8 m_chngr:1; /* device in medium transport element */
484 u8 multi_port:1; /* multi-port device */
485 u8 reserved3:1; /* TBD - Vendor Specific */
486 u8 enc_serv:1; /* enclosure service component */
487 u8 reserved2:1;
488
489 u8 soft_seset:1; /* soft reset alternative (VS) */
490 u8 cmd_queue:1; /* command queueing supported */
491 u8 trans_dis:1;
492 u8 linked_commands:1;
493 u8 synchronous:1;
494 u8 w_bus16:1;
495 u8 w_bus32:1;
496 u8 rel_adr:1; /* relative address */
497#endif
498 struct scsi_inquiry_vendor_s vendor_id;
499 struct scsi_inquiry_prodid_s product_id;
500 struct scsi_inquiry_prodrev_s product_rev;
501 u8 vendor_specific[20];
502 u8 reserved4[40];
503};
504
505/*
506 * inquiry.peripheral_qual field values
507 */
508#define SCSI_DEVQUAL_DEFAULT 0
509#define SCSI_DEVQUAL_NOT_CONNECTED 1
510#define SCSI_DEVQUAL_NOT_SUPPORTED 3
511
512/*
513 * inquiry.device_type field values
514 */
515#define SCSI_DEVICE_DIRECT_ACCESS 0x00
516#define SCSI_DEVICE_SEQ_ACCESS 0x01
517#define SCSI_DEVICE_ARRAY_CONTROLLER 0x0C
518#define SCSI_DEVICE_UNKNOWN 0x1F
519
520/*
521 * inquiry.version
522 */
523#define SCSI_VERSION_ANSI_X3131 2 /* ANSI X3.131 SCSI-2 */
524#define SCSI_VERSION_SPC 3 /* SPC (SCSI-3), ANSI X3.301:1997 */
525#define SCSI_VERSION_SPC_2 4 /* SPC-2 */
526
527/*
528 * response data format
529 */
530#define SCSI_RSP_DATA_FORMAT 2 /* SCSI-2 & SPC */
531
532/*
533 * SCSI inquiry page codes
534 */
535#define SCSI_INQ_PAGE_VPD_PAGES 0x00 /* supported vpd pages */
536#define SCSI_INQ_PAGE_USN_PAGE 0x80 /* unit serial number page */
537#define SCSI_INQ_PAGE_DEV_IDENT 0x83 /* device indentification page
538 */
539#define SCSI_INQ_PAGES_MAX 3
540
541/*
542 * supported vital product data pages
543 */
544struct scsi_inq_page_vpd_pages_s{
545#ifdef __BIGENDIAN
546 u8 peripheral_qual:3;
547 u8 device_type:5;
548#else
549 u8 device_type:5;
550 u8 peripheral_qual:3;
551#endif
552 u8 page_code;
553 u8 reserved;
554 u8 page_length;
555 u8 pages[SCSI_INQ_PAGES_MAX];
556};
557
558/*
559 * Unit serial number page
560 */
561#define SCSI_INQ_USN_LEN 32
562
563struct scsi_inq_usn_s{
564 char usn[SCSI_INQ_USN_LEN];
565};
566
567struct scsi_inq_page_usn_s{
568#ifdef __BIGENDIAN
569 u8 peripheral_qual:3;
570 u8 device_type:5;
571#else
572 u8 device_type:5;
573 u8 peripheral_qual:3;
574#endif
575 u8 page_code;
576 u8 reserved1;
577 u8 page_length;
578 struct scsi_inq_usn_s usn;
579};
580
581enum {
582 SCSI_INQ_DIP_CODE_BINARY = 1, /* identifier has binary value */
583 SCSI_INQ_DIP_CODE_ASCII = 2, /* identifier has ascii value */
584};
585
586enum {
587 SCSI_INQ_DIP_ASSOC_LUN = 0, /* id is associated with device */
588 SCSI_INQ_DIP_ASSOC_PORT = 1, /* id is associated with port that
589 * received the request
590 */
591};
592
593enum {
594 SCSI_INQ_ID_TYPE_VENDOR = 1,
595 SCSI_INQ_ID_TYPE_IEEE = 2,
596 SCSI_INQ_ID_TYPE_FC_FS = 3,
597 SCSI_INQ_ID_TYPE_OTHER = 4,
598};
599
600struct scsi_inq_dip_desc_s{
601#ifdef __BIGENDIAN
602 u8 res0:4;
603 u8 code_set:4;
604 u8 res1:2;
605 u8 association:2;
606 u8 id_type:4;
607#else
608 u8 code_set:4;
609 u8 res0:4;
610 u8 id_type:4;
611 u8 association:2;
612 u8 res1:2;
613#endif
614 u8 res2;
615 u8 id_len;
616 struct scsi_lun_sn_s id;
617};
618
619/*
620 * Device indentification page
621 */
622struct scsi_inq_page_dev_ident_s{
623#ifdef __BIGENDIAN
624 u8 peripheral_qual:3;
625 u8 device_type:5;
626#else
627 u8 device_type:5;
628 u8 peripheral_qual:3;
629#endif
630 u8 page_code;
631 u8 reserved1;
632 u8 page_length;
633 struct scsi_inq_dip_desc_s desc;
634};
635
636/* ------------------------------------------------------------
637 * READ CAPACITY
638 * ------------------------------------------------------------
639 */
640
641struct scsi_read_capacity_s{
642 u8 opcode;
643#ifdef __BIGENDIAN
644 u8 lun:3;
645 u8 reserved1:4;
646 u8 rel_adr:1;
647#else
648 u8 rel_adr:1;
649 u8 reserved1:4;
650 u8 lun:3;
651#endif
652 u8 lba0; /* MSB */
653 u8 lba1;
654 u8 lba2;
655 u8 lba3; /* LSB */
656 u8 reserved2;
657 u8 reserved3;
658#ifdef __BIGENDIAN
659 u8 reserved4:7;
660 u8 pmi:1; /* partial medium indicator */
661#else
662 u8 pmi:1; /* partial medium indicator */
663 u8 reserved4:7;
664#endif
665 u8 control;
666};
667
668struct scsi_read_capacity_data_s{
669 u32 max_lba; /* maximum LBA available */
670 u32 block_length; /* in bytes */
671};
672
673struct scsi_read_capacity16_data_s{
674 u64 lba; /* maximum LBA available */
675 u32 block_length; /* in bytes */
676#ifdef __BIGENDIAN
677 u8 reserved1:4,
678 p_type:3,
679 prot_en:1;
680 u8 reserved2:4,
681 lb_pbe:4; /* logical blocks per physical block
682 * exponent */
683 u16 reserved3:2,
684 lba_align:14; /* lowest aligned logical block
685 * address */
686#else
687 u16 lba_align:14, /* lowest aligned logical block
688 * address */
689 reserved3:2;
690 u8 lb_pbe:4, /* logical blocks per physical block
691 * exponent */
692 reserved2:4;
693 u8 prot_en:1,
694 p_type:3,
695 reserved1:4;
696#endif
697 u64 reserved4;
698 u64 reserved5;
699};
700
701/* ------------------------------------------------------------
702 * REPORT LUNS command
703 * ------------------------------------------------------------
704 */
705
706struct scsi_report_luns_s{
707 u8 opcode; /* A0h - REPORT LUNS opCode */
708 u8 reserved1[5];
709 u8 alloc_length[4];/* allocation length MSB first */
710 u8 reserved2;
711 u8 control;
712};
713
714#define SCSI_REPORT_LUN_ALLOC_LENGTH(rl) \
715 ((rl->alloc_length[0] << 24) | (rl->alloc_length[1] << 16) | \
716 (rl->alloc_length[2] << 8) | (rl->alloc_length[3]))
717
718#define SCSI_REPORT_LUNS_SET_ALLOCLEN(rl, alloc_len) { \
719 (rl)->alloc_length[0] = (alloc_len) >> 24; \
720 (rl)->alloc_length[1] = ((alloc_len) >> 16) & 0xFF; \
721 (rl)->alloc_length[2] = ((alloc_len) >> 8) & 0xFF; \
722 (rl)->alloc_length[3] = (alloc_len) & 0xFF; \
723}
724
725struct scsi_report_luns_data_s{
726 u32 lun_list_length; /* length of LUN list length */
727 u32 reserved;
728 lun_t lun[1]; /* first LUN in lun list */
729};
730
731/* -------------------------------------------------------------
732 * SCSI mode parameters
733 * -----------------------------------------------------------
734 */
735enum {
736 SCSI_DA_MEDIUM_DEF = 0, /* direct access default medium type */
737 SCSI_DA_MEDIUM_SS = 1, /* direct access single sided */
738 SCSI_DA_MEDIUM_DS = 2, /* direct access double sided */
739};
740
741/*
742 * SCSI Mode Select(6) cdb
743 */
744struct scsi_mode_select6_s{
745 u8 opcode;
746#ifdef __BIGENDIAN
747 u8 reserved1:3;
748 u8 pf:1; /* page format */
749 u8 reserved2:3;
750 u8 sp:1; /* save pages if set to 1 */
751#else
752 u8 sp:1; /* save pages if set to 1 */
753 u8 reserved2:3;
754 u8 pf:1; /* page format */
755 u8 reserved1:3;
756#endif
757 u8 reserved3[2];
758 u8 alloc_len;
759 u8 control;
760};
761
762/*
763 * SCSI Mode Select(10) cdb
764 */
765struct scsi_mode_select10_s{
766 u8 opcode;
767#ifdef __BIGENDIAN
768 u8 reserved1:3;
769 u8 pf:1; /* page format */
770 u8 reserved2:3;
771 u8 sp:1; /* save pages if set to 1 */
772#else
773 u8 sp:1; /* save pages if set to 1 */
774 u8 reserved2:3;
775 u8 pf:1; /* page format */
776 u8 reserved1:3;
777#endif
778 u8 reserved3[5];
779 u8 alloc_len_msb;
780 u8 alloc_len_lsb;
781 u8 control;
782};
783
784/*
785 * SCSI Mode Sense(6) cdb
786 */
787struct scsi_mode_sense6_s{
788 u8 opcode;
789#ifdef __BIGENDIAN
790 u8 reserved1:4;
791 u8 dbd:1; /* disable block discriptors if set to 1 */
792 u8 reserved2:3;
793
794 u8 pc:2; /* page control */
795 u8 page_code:6;
796#else
797 u8 reserved2:3;
798 u8 dbd:1; /* disable block descriptors if set to 1 */
799 u8 reserved1:4;
800
801 u8 page_code:6;
802 u8 pc:2; /* page control */
803#endif
804 u8 reserved3;
805 u8 alloc_len;
806 u8 control;
807};
808
809/*
810 * SCSI Mode Sense(10) cdb
811 */
812struct scsi_mode_sense10_s{
813 u8 opcode;
814#ifdef __BIGENDIAN
815 u8 reserved1:3;
816 u8 LLBAA:1; /* long LBA accepted if set to 1 */
817 u8 dbd:1; /* disable block descriptors if set
818 * to 1
819 */
820 u8 reserved2:3;
821
822 u8 pc:2; /* page control */
823 u8 page_code:6;
824#else
825 u8 reserved2:3;
826 u8 dbd:1; /* disable block descriptors if set to
827 * 1
828 */
829 u8 LLBAA:1; /* long LBA accepted if set to 1 */
830 u8 reserved1:3;
831
832 u8 page_code:6;
833 u8 pc:2; /* page control */
834#endif
835 u8 reserved3[4];
836 u8 alloc_len_msb;
837 u8 alloc_len_lsb;
838 u8 control;
839};
840
841#define SCSI_CDB10_GET_AL(cdb) \
842 ((cdb)->alloc_len_msb << 8 | (cdb)->alloc_len_lsb)
843
844#define SCSI_CDB10_SET_AL(cdb, al) { \
845 (cdb)->alloc_len_msb = al >> 8; \
846 (cdb)->alloc_len_lsb = al & 0xFF; \
847}
848
849#define SCSI_CDB6_GET_AL(cdb) ((cdb)->alloc_len)
850
851#define SCSI_CDB6_SET_AL(cdb, al) { \
852 (cdb)->alloc_len = al; \
853}
854
855/*
856 * page control field values
857 */
858#define SCSI_PC_CURRENT_VALUES 0x0
859#define SCSI_PC_CHANGEABLE_VALUES 0x1
860#define SCSI_PC_DEFAULT_VALUES 0x2
861#define SCSI_PC_SAVED_VALUES 0x3
862
863/*
864 * SCSI mode page codes
865 */
866#define SCSI_MP_VENDOR_SPEC 0x00
867#define SCSI_MP_DISC_RECN 0x02 /* disconnect-reconnect page */
868#define SCSI_MP_FORMAT_DEVICE 0x03
869#define SCSI_MP_RDG 0x04 /* rigid disk geometry page */
870#define SCSI_MP_FDP 0x05 /* flexible disk page */
871#define SCSI_MP_CACHING 0x08 /* caching page */
872#define SCSI_MP_CONTROL 0x0A /* control mode page */
873#define SCSI_MP_MED_TYPES_SUP 0x0B /* medium types supported page */
874#define SCSI_MP_INFO_EXCP_CNTL 0x1C /* informational exception control */
875#define SCSI_MP_ALL 0x3F /* return all pages - mode sense only */
876
877/*
878 * mode parameter header
879 */
880struct scsi_mode_param_header6_s{
881 u8 mode_datalen;
882 u8 medium_type;
883
884 /*
885 * device specific parameters expanded for direct access devices
886 */
887#ifdef __BIGENDIAN
888 u32 wp:1; /* write protected */
889 u32 reserved1:2;
890 u32 dpofua:1; /* disable page out + force unit access
891 */
892 u32 reserved2:4;
893#else
894 u32 reserved2:4;
895 u32 dpofua:1; /* disable page out + force unit access
896 */
897 u32 reserved1:2;
898 u32 wp:1; /* write protected */
899#endif
900
901 u8 block_desclen;
902};
903
904struct scsi_mode_param_header10_s{
905 u32 mode_datalen:16;
906 u32 medium_type:8;
907
908 /*
909 * device specific parameters expanded for direct access devices
910 */
911#ifdef __BIGENDIAN
912 u32 wp:1; /* write protected */
913 u32 reserved1:2;
914 u32 dpofua:1; /* disable page out + force unit access
915 */
916 u32 reserved2:4;
917#else
918 u32 reserved2:4;
919 u32 dpofua:1; /* disable page out + force unit access
920 */
921 u32 reserved1:2;
922 u32 wp:1; /* write protected */
923#endif
924
925#ifdef __BIGENDIAN
926 u32 reserved3:7;
927 u32 longlba:1;
928#else
929 u32 longlba:1;
930 u32 reserved3:7;
931#endif
932 u32 reserved4:8;
933 u32 block_desclen:16;
934};
935
936/*
937 * mode parameter block descriptor
938 */
939struct scsi_mode_param_desc_s{
940 u32 nblks;
941 u32 density_code:8;
942 u32 block_length:24;
943};
944
945/*
946 * Disconnect-reconnect mode page format
947 */
948struct scsi_mp_disc_recn_s{
949#ifdef __BIGENDIAN
950 u8 ps:1;
951 u8 reserved1:1;
952 u8 page_code:6;
953#else
954 u8 page_code:6;
955 u8 reserved1:1;
956 u8 ps:1;
957#endif
958 u8 page_len;
959 u8 buf_full_ratio;
960 u8 buf_empty_ratio;
961
962 u8 bil_msb; /* bus inactivity limit -MSB */
963 u8 bil_lsb; /* bus inactivity limit -LSB */
964
965 u8 dtl_msb; /* disconnect time limit - MSB */
966 u8 dtl_lsb; /* disconnect time limit - LSB */
967
968 u8 ctl_msb; /* connect time limit - MSB */
969 u8 ctl_lsb; /* connect time limit - LSB */
970
971 u8 max_burst_len_msb;
972 u8 max_burst_len_lsb;
973#ifdef __BIGENDIAN
974 u8 emdp:1; /* enable modify data pointers */
975 u8 fa:3; /* fair arbitration */
976 u8 dimm:1; /* disconnect immediate */
977 u8 dtdc:3; /* data transfer disconnect control */
978#else
979 u8 dtdc:3; /* data transfer disconnect control */
980 u8 dimm:1; /* disconnect immediate */
981 u8 fa:3; /* fair arbitration */
982 u8 emdp:1; /* enable modify data pointers */
983#endif
984
985 u8 reserved3;
986
987 u8 first_burst_len_msb;
988 u8 first_burst_len_lsb;
989};
990
991/*
992 * SCSI format device mode page
993 */
994struct scsi_mp_format_device_s{
995#ifdef __BIGENDIAN
996 u32 ps:1;
997 u32 reserved1:1;
998 u32 page_code:6;
999#else
1000 u32 page_code:6;
1001 u32 reserved1:1;
1002 u32 ps:1;
1003#endif
1004 u32 page_len:8;
1005 u32 tracks_per_zone:16;
1006
1007 u32 a_sec_per_zone:16;
1008 u32 a_tracks_per_zone:16;
1009
1010 u32 a_tracks_per_lun:16; /* alternate tracks/lun-MSB */
1011 u32 sec_per_track:16; /* sectors/track-MSB */
1012
1013 u32 bytes_per_sector:16;
1014 u32 interleave:16;
1015
1016 u32 tsf:16; /* track skew factor-MSB */
1017 u32 csf:16; /* cylinder skew factor-MSB */
1018
1019#ifdef __BIGENDIAN
1020 u32 ssec:1; /* soft sector formatting */
1021 u32 hsec:1; /* hard sector formatting */
1022 u32 rmb:1; /* removable media */
1023 u32 surf:1; /* surface */
1024 u32 reserved2:4;
1025#else
1026 u32 reserved2:4;
1027 u32 surf:1; /* surface */
1028 u32 rmb:1; /* removable media */
1029 u32 hsec:1; /* hard sector formatting */
1030 u32 ssec:1; /* soft sector formatting */
1031#endif
1032 u32 reserved3:24;
1033};
1034
1035/*
1036 * SCSI rigid disk device geometry page
1037 */
1038struct scsi_mp_rigid_device_geometry_s{
1039#ifdef __BIGENDIAN
1040 u32 ps:1;
1041 u32 reserved1:1;
1042 u32 page_code:6;
1043#else
1044 u32 page_code:6;
1045 u32 reserved1:1;
1046 u32 ps:1;
1047#endif
1048 u32 page_len:8;
1049 u32 num_cylinders0:8;
1050 u32 num_cylinders1:8;
1051
1052 u32 num_cylinders2:8;
1053 u32 num_heads:8;
1054 u32 scwp0:8;
1055 u32 scwp1:8;
1056
1057 u32 scwp2:8;
1058 u32 scrwc0:8;
1059 u32 scrwc1:8;
1060 u32 scrwc2:8;
1061
1062 u32 dsr:16;
1063 u32 lscyl0:8;
1064 u32 lscyl1:8;
1065
1066 u32 lscyl2:8;
1067#ifdef __BIGENDIAN
1068 u32 reserved2:6;
1069 u32 rpl:2; /* rotational position locking */
1070#else
1071 u32 rpl:2; /* rotational position locking */
1072 u32 reserved2:6;
1073#endif
1074 u32 rot_off:8;
1075 u32 reserved3:8;
1076
1077 u32 med_rot_rate:16;
1078 u32 reserved4:16;
1079};
1080
1081/*
1082 * SCSI caching mode page
1083 */
1084struct scsi_mp_caching_s{
1085#ifdef __BIGENDIAN
1086 u8 ps:1;
1087 u8 res1:1;
1088 u8 page_code:6;
1089#else
1090 u8 page_code:6;
1091 u8 res1:1;
1092 u8 ps:1;
1093#endif
1094 u8 page_len;
1095#ifdef __BIGENDIAN
1096 u8 ic:1; /* initiator control */
1097 u8 abpf:1; /* abort pre-fetch */
1098 u8 cap:1; /* caching analysis permitted */
1099 u8 disc:1; /* discontinuity */
1100 u8 size:1; /* size enable */
1101 u8 wce:1; /* write cache enable */
1102 u8 mf:1; /* multiplication factor */
1103 u8 rcd:1; /* read cache disable */
1104
1105 u8 drrp:4; /* demand read retention priority */
1106 u8 wrp:4; /* write retention priority */
1107#else
1108 u8 rcd:1; /* read cache disable */
1109 u8 mf:1; /* multiplication factor */
1110 u8 wce:1; /* write cache enable */
1111 u8 size:1; /* size enable */
1112 u8 disc:1; /* discontinuity */
1113 u8 cap:1; /* caching analysis permitted */
1114 u8 abpf:1; /* abort pre-fetch */
1115 u8 ic:1; /* initiator control */
1116
1117 u8 wrp:4; /* write retention priority */
1118 u8 drrp:4; /* demand read retention priority */
1119#endif
1120 u8 dptl[2];/* disable pre-fetch transfer length */
1121 u8 min_prefetch[2];
1122 u8 max_prefetch[2];
1123 u8 max_prefetch_limit[2];
1124#ifdef __BIGENDIAN
1125 u8 fsw:1; /* force sequential write */
1126 u8 lbcss:1;/* logical block cache segment size */
1127 u8 dra:1; /* disable read ahead */
1128 u8 vs:2; /* vendor specific */
1129 u8 res2:3;
1130#else
1131 u8 res2:3;
1132 u8 vs:2; /* vendor specific */
1133 u8 dra:1; /* disable read ahead */
1134 u8 lbcss:1;/* logical block cache segment size */
1135 u8 fsw:1; /* force sequential write */
1136#endif
1137 u8 num_cache_segs;
1138
1139 u8 cache_seg_size[2];
1140 u8 res3;
1141 u8 non_cache_seg_size[3];
1142};
1143
1144/*
1145 * SCSI control mode page
1146 */
1147struct scsi_mp_control_page_s{
1148#ifdef __BIGENDIAN
1149u8 ps:1;
1150u8 reserved1:1;
1151u8 page_code:6;
1152#else
1153u8 page_code:6;
1154u8 reserved1:1;
1155u8 ps:1;
1156#endif
1157 u8 page_len;
1158#ifdef __BIGENDIAN
1159 u8 tst:3; /* task set type */
1160 u8 reserved3:3;
1161 u8 gltsd:1; /* global logging target save disable */
1162 u8 rlec:1; /* report log exception condition */
1163
1164 u8 qalgo_mod:4; /* queue alogorithm modifier */
1165 u8 reserved4:1;
1166 u8 qerr:2; /* queue error management */
1167 u8 dque:1; /* disable queuing */
1168
1169 u8 reserved5:1;
1170 u8 rac:1; /* report a check */
1171 u8 reserved6:2;
1172 u8 swp:1; /* software write protect */
1173 u8 raerp:1; /* ready AER permission */
1174 u8 uaaerp:1; /* unit attenstion AER permission */
1175 u8 eaerp:1; /* error AER permission */
1176
1177 u8 reserved7:5;
1178 u8 autoload_mod:3;
1179#else
1180 u8 rlec:1; /* report log exception condition */
1181 u8 gltsd:1; /* global logging target save disable */
1182 u8 reserved3:3;
1183 u8 tst:3; /* task set type */
1184
1185 u8 dque:1; /* disable queuing */
1186 u8 qerr:2; /* queue error management */
1187 u8 reserved4:1;
1188 u8 qalgo_mod:4; /* queue alogorithm modifier */
1189
1190 u8 eaerp:1; /* error AER permission */
1191 u8 uaaerp:1; /* unit attenstion AER permission */
1192 u8 raerp:1; /* ready AER permission */
1193 u8 swp:1; /* software write protect */
1194 u8 reserved6:2;
1195 u8 rac:1; /* report a check */
1196 u8 reserved5:1;
1197
1198 u8 autoload_mod:3;
1199 u8 reserved7:5;
1200#endif
1201 u8 rahp_msb; /* ready AER holdoff period - MSB */
1202 u8 rahp_lsb; /* ready AER holdoff period - LSB */
1203
1204 u8 busy_timeout_period_msb;
1205 u8 busy_timeout_period_lsb;
1206
1207 u8 ext_selftest_compl_time_msb;
1208 u8 ext_selftest_compl_time_lsb;
1209};
1210
1211/*
1212 * SCSI medium types supported mode page
1213 */
1214struct scsi_mp_medium_types_sup_s{
1215#ifdef __BIGENDIAN
1216 u8 ps:1;
1217 u8 reserved1:1;
1218 u8 page_code:6;
1219#else
1220 u8 page_code:6;
1221 u8 reserved1:1;
1222 u8 ps:1;
1223#endif
1224 u8 page_len;
1225
1226 u8 reserved3[2];
1227 u8 med_type1_sup; /* medium type one supported */
1228 u8 med_type2_sup; /* medium type two supported */
1229 u8 med_type3_sup; /* medium type three supported */
1230 u8 med_type4_sup; /* medium type four supported */
1231};
1232
1233/*
1234 * SCSI informational exception control mode page
1235 */
1236struct scsi_mp_info_excpt_cntl_s{
1237#ifdef __BIGENDIAN
1238 u8 ps:1;
1239 u8 reserved1:1;
1240 u8 page_code:6;
1241#else
1242 u8 page_code:6;
1243 u8 reserved1:1;
1244 u8 ps:1;
1245#endif
1246 u8 page_len;
1247#ifdef __BIGENDIAN
1248 u8 perf:1; /* performance */
1249 u8 reserved3:1;
1250 u8 ebf:1; /* enable background fucntion */
1251 u8 ewasc:1; /* enable warning */
1252 u8 dexcpt:1; /* disable exception control */
1253 u8 test:1; /* enable test device failure
1254 * notification
1255 */
1256 u8 reserved4:1;
1257 u8 log_error:1;
1258
1259 u8 reserved5:4;
1260 u8 mrie:4; /* method of reporting info
1261 * exceptions
1262 */
1263#else
1264 u8 log_error:1;
1265 u8 reserved4:1;
1266 u8 test:1; /* enable test device failure
1267 * notification
1268 */
1269 u8 dexcpt:1; /* disable exception control */
1270 u8 ewasc:1; /* enable warning */
1271 u8 ebf:1; /* enable background fucntion */
1272 u8 reserved3:1;
1273 u8 perf:1; /* performance */
1274
1275 u8 mrie:4; /* method of reporting info
1276 * exceptions
1277 */
1278 u8 reserved5:4;
1279#endif
1280 u8 interval_timer_msb;
1281 u8 interval_timer_lsb;
1282
1283 u8 report_count_msb;
1284 u8 report_count_lsb;
1285};
1286
1287/*
1288 * Methods of reporting informational exceptions
1289 */
1290#define SCSI_MP_IEC_NO_REPORT 0x0 /* no reporting of exceptions */
1291#define SCSI_MP_IEC_AER 0x1 /* async event reporting */
1292#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attenstion */
1293#define SCSI_MO_IEC_COND_REC_ERR 0x3 /* conditionally generate recovered
1294 * error
1295 */
1296#define SCSI_MP_IEC_UNCOND_REC_ERR 0x4 /* unconditionally generate recovered
1297 * error
1298 */
1299#define SCSI_MP_IEC_NO_SENSE 0x5 /* generate no sense */
1300#define SCSI_MP_IEC_ON_REQUEST 0x6 /* only report exceptions on request */
1301
1302/*
1303 * SCSI flexible disk page
1304 */
1305struct scsi_mp_flexible_disk_s{
1306#ifdef __BIGENDIAN
1307 u8 ps:1;
1308 u8 reserved1:1;
1309 u8 page_code:6;
1310#else
1311 u8 page_code:6;
1312 u8 reserved1:1;
1313 u8 ps:1;
1314#endif
1315 u8 page_len;
1316
1317 u8 transfer_rate_msb;
1318 u8 transfer_rate_lsb;
1319
1320 u8 num_heads;
1321 u8 num_sectors;
1322
1323 u8 bytes_per_sector_msb;
1324 u8 bytes_per_sector_lsb;
1325
1326 u8 num_cylinders_msb;
1327 u8 num_cylinders_lsb;
1328
1329 u8 sc_wpc_msb; /* starting cylinder-write
1330 * precompensation msb
1331 */
1332 u8 sc_wpc_lsb; /* starting cylinder-write
1333 * precompensation lsb
1334 */
1335 u8 sc_rwc_msb; /* starting cylinder-reduced write
1336 * current msb
1337 */
1338 u8 sc_rwc_lsb; /* starting cylinder-reduced write
1339 * current lsb
1340 */
1341
1342 u8 dev_step_rate_msb;
1343 u8 dev_step_rate_lsb;
1344
1345 u8 dev_step_pulse_width;
1346
1347 u8 head_sd_msb; /* head settle delay msb */
1348 u8 head_sd_lsb; /* head settle delay lsb */
1349
1350 u8 motor_on_delay;
1351 u8 motor_off_delay;
1352#ifdef __BIGENDIAN
1353 u8 trdy:1; /* true ready bit */
1354 u8 ssn:1; /* start sector number bit */
1355 u8 mo:1; /* motor on bit */
1356 u8 reserved3:5;
1357
1358 u8 reserved4:4;
1359 u8 spc:4; /* step pulse per cylinder */
1360#else
1361 u8 reserved3:5;
1362 u8 mo:1; /* motor on bit */
1363 u8 ssn:1; /* start sector number bit */
1364 u8 trdy:1; /* true ready bit */
1365
1366 u8 spc:4; /* step pulse per cylinder */
1367 u8 reserved4:4;
1368#endif
1369 u8 write_comp;
1370 u8 head_load_delay;
1371 u8 head_unload_delay;
1372#ifdef __BIGENDIAN
1373 u8 pin34:4; /* pin34 usage */
1374 u8 pin2:4; /* pin2 usage */
1375
1376 u8 pin4:4; /* pin4 usage */
1377 u8 pin1:4; /* pin1 usage */
1378#else
1379 u8 pin2:4; /* pin2 usage */
1380 u8 pin34:4; /* pin34 usage */
1381
1382 u8 pin1:4; /* pin1 usage */
1383 u8 pin4:4; /* pin4 usage */
1384#endif
1385 u8 med_rot_rate_msb;
1386 u8 med_rot_rate_lsb;
1387
1388 u8 reserved5[2];
1389};
1390
1391struct scsi_mode_page_format_data6_s{
1392 struct scsi_mode_param_header6_s mph; /* mode page header */
1393 struct scsi_mode_param_desc_s desc; /* block descriptor */
1394 struct scsi_mp_format_device_s format; /* format device data */
1395};
1396
1397struct scsi_mode_page_format_data10_s{
1398 struct scsi_mode_param_header10_s mph; /* mode page header */
1399 struct scsi_mode_param_desc_s desc; /* block descriptor */
1400 struct scsi_mp_format_device_s format; /* format device data */
1401};
1402
1403struct scsi_mode_page_rdg_data6_s{
1404 struct scsi_mode_param_header6_s mph; /* mode page header */
1405 struct scsi_mode_param_desc_s desc; /* block descriptor */
1406 struct scsi_mp_rigid_device_geometry_s rdg;
1407 /* rigid geometry data */
1408};
1409
1410struct scsi_mode_page_rdg_data10_s{
1411 struct scsi_mode_param_header10_s mph; /* mode page header */
1412 struct scsi_mode_param_desc_s desc; /* block descriptor */
1413 struct scsi_mp_rigid_device_geometry_s rdg;
1414 /* rigid geometry data */
1415};
1416
1417struct scsi_mode_page_cache6_s{
1418 struct scsi_mode_param_header6_s mph; /* mode page header */
1419 struct scsi_mode_param_desc_s desc; /* block descriptor */
1420 struct scsi_mp_caching_s cache; /* cache page data */
1421};
1422
1423struct scsi_mode_page_cache10_s{
1424 struct scsi_mode_param_header10_s mph; /* mode page header */
1425 struct scsi_mode_param_desc_s desc; /* block descriptor */
1426 struct scsi_mp_caching_s cache; /* cache page data */
1427};
1428
1429/* --------------------------------------------------------------
1430 * Format Unit command
1431 * ------------------------------------------------------------
1432 */
1433
1434/*
1435 * Format Unit CDB
1436 */
1437struct scsi_format_unit_s{
1438 u8 opcode;
1439#ifdef __BIGENDIAN
1440 u8 res1:3;
1441 u8 fmtdata:1; /* if set, data out phase has format
1442 * data
1443 */
1444 u8 cmplst:1; /* if set, defect list is complete */
1445 u8 def_list:3; /* format of defect descriptor is
1446 * fmtdata =1
1447 */
1448#else
1449 u8 def_list:3; /* format of defect descriptor is
1450 * fmtdata = 1
1451 */
1452 u8 cmplst:1; /* if set, defect list is complete */
1453 u8 fmtdata:1; /* if set, data out phase has format
1454 * data
1455 */
1456 u8 res1:3;
1457#endif
1458 u8 interleave_msb;
1459 u8 interleave_lsb;
1460 u8 vendor_spec;
1461 u8 control;
1462};
1463
1464/*
1465 * h
1466 */
1467struct scsi_reserve6_s{
1468 u8 opcode;
1469#ifdef __BIGENDIAN
1470 u8 reserved:3;
1471 u8 obsolete:4;
1472 u8 extent:1;
1473#else
1474 u8 extent:1;
1475 u8 obsolete:4;
1476 u8 reserved:3;
1477#endif
1478 u8 reservation_id;
1479 u16 param_list_len;
1480 u8 control;
1481};
1482
1483/*
1484 * h
1485 */
1486struct scsi_release6_s{
1487 u8 opcode;
1488#ifdef __BIGENDIAN
1489 u8 reserved1:3;
1490 u8 obsolete:4;
1491 u8 extent:1;
1492#else
1493 u8 extent:1;
1494 u8 obsolete:4;
1495 u8 reserved1:3;
1496#endif
1497 u8 reservation_id;
1498 u16 reserved2;
1499 u8 control;
1500};
1501
1502/*
1503 * h
1504 */
1505struct scsi_reserve10_s{
1506 u8 opcode;
1507#ifdef __BIGENDIAN
1508 u8 reserved1:3;
1509 u8 third_party:1;
1510 u8 reserved2:2;
1511 u8 long_id:1;
1512 u8 extent:1;
1513#else
1514 u8 extent:1;
1515 u8 long_id:1;
1516 u8 reserved2:2;
1517 u8 third_party:1;
1518 u8 reserved1:3;
1519#endif
1520 u8 reservation_id;
1521 u8 third_pty_dev_id;
1522 u8 reserved3;
1523 u8 reserved4;
1524 u8 reserved5;
1525 u16 param_list_len;
1526 u8 control;
1527};
1528
1529struct scsi_release10_s{
1530 u8 opcode;
1531#ifdef __BIGENDIAN
1532 u8 reserved1:3;
1533 u8 third_party:1;
1534 u8 reserved2:2;
1535 u8 long_id:1;
1536 u8 extent:1;
1537#else
1538 u8 extent:1;
1539 u8 long_id:1;
1540 u8 reserved2:2;
1541 u8 third_party:1;
1542 u8 reserved1:3;
1543#endif
1544 u8 reservation_id;
1545 u8 third_pty_dev_id;
1546 u8 reserved3;
1547 u8 reserved4;
1548 u8 reserved5;
1549 u16 param_list_len;
1550 u8 control;
1551};
1552
1553struct scsi_verify10_s{
1554 u8 opcode;
1555#ifdef __BIGENDIAN
1556 u8 lun:3;
1557 u8 dpo:1;
1558 u8 reserved:2;
1559 u8 bytchk:1;
1560 u8 reladdr:1;
1561#else
1562 u8 reladdr:1;
1563 u8 bytchk:1;
1564 u8 reserved:2;
1565 u8 dpo:1;
1566 u8 lun:3;
1567#endif
1568 u8 lba0;
1569 u8 lba1;
1570 u8 lba2;
1571 u8 lba3;
1572 u8 reserved1;
1573 u8 verification_len0;
1574 u8 verification_len1;
1575 u8 control_byte;
1576};
1577
1578struct scsi_request_sense_s{
1579 u8 opcode;
1580#ifdef __BIGENDIAN
1581 u8 lun:3;
1582 u8 reserved:5;
1583#else
1584 u8 reserved:5;
1585 u8 lun:3;
1586#endif
1587 u8 reserved0;
1588 u8 reserved1;
1589 u8 alloc_len;
1590 u8 control_byte;
1591};
1592
1593/* ------------------------------------------------------------
1594 * SCSI status byte values
1595 * ------------------------------------------------------------
1596 */
1597#define SCSI_STATUS_GOOD 0x00
1598#define SCSI_STATUS_CHECK_CONDITION 0x02
1599#define SCSI_STATUS_CONDITION_MET 0x04
1600#define SCSI_STATUS_BUSY 0x08
1601#define SCSI_STATUS_INTERMEDIATE 0x10
1602#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
1603#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
1604#define SCSI_STATUS_COMMAND_TERMINATED 0x22
1605#define SCSI_STATUS_QUEUE_FULL 0x28
1606#define SCSI_STATUS_ACA_ACTIVE 0x30
1607
1608#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length
1609 * in CDBs
1610 */
1611
1612#define SCSI_OP_WRITE_VERIFY10 0x2E
1613#define SCSI_OP_WRITE_VERIFY12 0xAE
1614#define SCSI_OP_UNDEF 0xFF
1615
1616/*
1617 * SCSI WRITE-VERIFY(10) command
1618 */
1619struct scsi_write_verify10_s{
1620 u8 opcode;
1621#ifdef __BIGENDIAN
1622 u8 reserved1:3;
1623 u8 dpo:1; /* Disable Page Out */
1624 u8 reserved2:1;
1625 u8 ebp:1; /* erse by-pass */
1626 u8 bytchk:1; /* byte check */
1627 u8 rel_adr:1; /* relative address */
1628#else
1629 u8 rel_adr:1; /* relative address */
1630 u8 bytchk:1; /* byte check */
1631 u8 ebp:1; /* erse by-pass */
1632 u8 reserved2:1;
1633 u8 dpo:1; /* Disable Page Out */
1634 u8 reserved1:3;
1635#endif
1636 u8 lba0; /* logical block address - MSB */
1637 u8 lba1;
1638 u8 lba2;
1639 u8 lba3; /* LSB */
1640 u8 reserved3;
1641 u8 xfer_length0; /* transfer length in blocks - MSB */
1642 u8 xfer_length1; /* LSB */
1643 u8 control;
1644};
1645
1646#pragma pack()
1647
1648#endif /* __SCSI_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/types.h b/drivers/scsi/bfa/include/protocol/types.h
new file mode 100644
index 000000000000..2875a6cced3b
--- /dev/null
+++ b/drivers/scsi/bfa/include/protocol/types.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * types.h Protocol defined base types
20 */
21
22#ifndef __TYPES_H__
23#define __TYPES_H__
24
25#include <bfa_os_inc.h>
26
27#define wwn_t u64
28#define lun_t u64
29
30#define WWN_NULL (0)
31#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
32#define FC_ALPA_MAX 128
33
34#pragma pack(1)
35
36#define MAC_ADDRLEN (6)
37struct mac_s { u8 mac[MAC_ADDRLEN]; };
38#define mac_t struct mac_s
39
40#pragma pack()
41
42#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
new file mode 100644
index 000000000000..a418dedebe9e
--- /dev/null
+++ b/drivers/scsi/bfa/loop.c
@@ -0,0 +1,422 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_loop.c vport private loop implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, LOOP);
29
30/**
31 * ALPA to LIXA bitmap mapping
32 *
33 * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
34 * is for L_bit (login required) and is filled as ALPA 0x00 here.
35 */
36static const u8 port_loop_alpa_map[] = {
37 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, /* Word 3 Bits 0..7 */
38 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, /* Word 3 Bits 8..15 */
39 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, /* Word 3 Bits 16..23 */
40 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, /* Word 3 Bits 24..31 */
41
42 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, /* Word 2 Bits 0..7 */
43 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, /* Word 2 Bits 8..15 */
44 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, /* Word 2 Bits 16..23 */
45 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, /* Word 2 Bits 24..31 */
46
47 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, /* Word 1 Bits 0..7 */
48 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, /* Word 1 Bits 8..15 */
49 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, /* Word 1 Bits 16..23 */
50 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, /* Word 1 Bits 24..31 */
51
52 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, /* Word 0 Bits 0..7 */
53 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, /* Word 0 Bits 8..15 */
54 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, /* Word 0 Bits 16..23 */
55 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01, 0x00, 0x00, /* Word 0 Bits 24..31 */
56};
57
58/*
59 * Local Functions
60 */
61bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
62 u8 alpa);
63
64void bfa_fcs_port_loop_plogi_response(void *fcsarg,
65 struct bfa_fcxp_s *fcxp,
66 void *cbarg,
67 bfa_status_t req_status,
68 u32 rsp_len,
69 u32 resid_len,
70 struct fchs_s *rsp_fchs);
71
72bfa_status_t bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port,
73 u8 alpa);
74
75void bfa_fcs_port_loop_adisc_response(void *fcsarg,
76 struct bfa_fcxp_s *fcxp,
77 void *cbarg,
78 bfa_status_t req_status,
79 u32 rsp_len,
80 u32 resid_len,
81 struct fchs_s *rsp_fchs);
82
83bfa_status_t bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port,
84 u8 alpa);
85
86void bfa_fcs_port_loop_plogi_acc_response(void *fcsarg,
87 struct bfa_fcxp_s *fcxp,
88 void *cbarg,
89 bfa_status_t req_status,
90 u32 rsp_len,
91 u32 resid_len,
92 struct fchs_s *rsp_fchs);
93
94bfa_status_t bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port,
95 u8 alpa);
96
97void bfa_fcs_port_loop_adisc_acc_response(void *fcsarg,
98 struct bfa_fcxp_s *fcxp,
99 void *cbarg,
100 bfa_status_t req_status,
101 u32 rsp_len,
102 u32 resid_len,
103 struct fchs_s *rsp_fchs);
104/**
105 * Called by port to initializar in provate LOOP topology.
106 */
107void
108bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port)
109{
110}
111
112/**
113 * Called by port to notify transition to online state.
114 */
115void
116bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port)
117{
118
119 u8 num_alpa = port->port_topo.ploop.num_alpa;
120 u8 *alpa_pos_map = port->port_topo.ploop.alpa_pos_map;
121 struct bfa_fcs_rport_s *r_port;
122 int ii = 0;
123
124 /*
125 * If the port role is Initiator Mode, create Rports.
126 */
127 if (port->port_cfg.roles == BFA_PORT_ROLE_FCP_IM) {
128 /*
129 * Check if the ALPA positional bitmap is available.
130 * if not, we send PLOGI to all possible ALPAs.
131 */
132 if (num_alpa > 0) {
133 for (ii = 0; ii < num_alpa; ii++) {
134 /*
135 * ignore ALPA of bfa port
136 */
137 if (alpa_pos_map[ii] != port->pid) {
138 r_port = bfa_fcs_rport_create(port,
139 alpa_pos_map[ii]);
140 }
141 }
142 } else {
143 for (ii = 0; ii < MAX_ALPA_COUNT; ii++) {
144 /*
145 * ignore ALPA of bfa port
146 */
147 if ((port_loop_alpa_map[ii] > 0)
148 && (port_loop_alpa_map[ii] != port->pid))
149 bfa_fcs_port_loop_send_plogi(port,
150 port_loop_alpa_map[ii]);
151 /**TBD */
152 }
153 }
154 } else {
155 /*
156 * TBD Target Mode ??
157 */
158 }
159
160}
161
162/**
163 * Called by port to notify transition to offline state.
164 */
165void
166bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port)
167{
168
169}
170
171/**
172 * Called by port to notify a LIP on the loop.
173 */
174void
175bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port)
176{
177}
178
179/**
180 * Local Functions.
181 */
182bfa_status_t
183bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
184{
185 struct fchs_s fchs;
186 struct bfa_fcxp_s *fcxp = NULL;
187 int len;
188
189 bfa_trc(port->fcs, alpa);
190
191 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
192 NULL);
193 bfa_assert(fcxp);
194
195 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
196 bfa_fcs_port_get_fcid(port), 0,
197 port->port_cfg.pwwn, port->port_cfg.nwwn,
198 bfa_pport_get_maxfrsize(port->fcs->bfa));
199
200 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
201 FC_CLASS_3, len, &fchs,
202 bfa_fcs_port_loop_plogi_response, (void *)port,
203 FC_MAX_PDUSZ, FC_RA_TOV);
204
205 return BFA_STATUS_OK;
206}
207
208/**
209 * Called by fcxp to notify the Plogi response
210 */
211void
212bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
213 void *cbarg, bfa_status_t req_status,
214 u32 rsp_len, u32 resid_len,
215 struct fchs_s *rsp_fchs)
216{
217 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
218 struct fc_logi_s *plogi_resp;
219 struct fc_els_cmd_s *els_cmd;
220
221 bfa_trc(port->fcs, req_status);
222
223 /*
224 * Sanity Checks
225 */
226 if (req_status != BFA_STATUS_OK) {
227 bfa_trc(port->fcs, req_status);
228 /*
229 * @todo
230 * This could mean that the device with this APLA does not
231 * exist on the loop.
232 */
233
234 return;
235 }
236
237 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
238 plogi_resp = (struct fc_logi_s *) els_cmd;
239
240 if (els_cmd->els_code == FC_ELS_ACC) {
241 bfa_fcs_rport_start(port, rsp_fchs, plogi_resp);
242 } else {
243 bfa_trc(port->fcs, plogi_resp->els_cmd.els_code);
244 bfa_assert(0);
245 }
246}
247
248bfa_status_t
249bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, u8 alpa)
250{
251 struct fchs_s fchs;
252 struct bfa_fcxp_s *fcxp;
253 int len;
254
255 bfa_trc(port->fcs, alpa);
256
257 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
258 NULL);
259 bfa_assert(fcxp);
260
261 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
262 bfa_fcs_port_get_fcid(port), 0,
263 port->port_cfg.pwwn, port->port_cfg.nwwn,
264 bfa_pport_get_maxfrsize(port->fcs->bfa));
265
266 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
267 FC_CLASS_3, len, &fchs,
268 bfa_fcs_port_loop_plogi_acc_response,
269 (void *)port, FC_MAX_PDUSZ, 0); /* No response
270 * expected
271 */
272
273 return BFA_STATUS_OK;
274}
275
276/*
277 * Plogi Acc Response
278 * We donot do any processing here.
279 */
280void
281bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
282 void *cbarg, bfa_status_t req_status,
283 u32 rsp_len, u32 resid_len,
284 struct fchs_s *rsp_fchs)
285{
286
287 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
288
289 bfa_trc(port->fcs, port->pid);
290
291 /*
292 * Sanity Checks
293 */
294 if (req_status != BFA_STATUS_OK) {
295 bfa_trc(port->fcs, req_status);
296 return;
297 }
298}
299
300bfa_status_t
301bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, u8 alpa)
302{
303 struct fchs_s fchs;
304 struct bfa_fcxp_s *fcxp;
305 int len;
306
307 bfa_trc(port->fcs, alpa);
308
309 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
310 NULL);
311 bfa_assert(fcxp);
312
313 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
314 bfa_fcs_port_get_fcid(port), 0,
315 port->port_cfg.pwwn, port->port_cfg.nwwn);
316
317 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
318 FC_CLASS_3, len, &fchs,
319 bfa_fcs_port_loop_adisc_response, (void *)port,
320 FC_MAX_PDUSZ, FC_RA_TOV);
321
322 return BFA_STATUS_OK;
323}
324
325/**
326 * Called by fcxp to notify the ADISC response
327 */
328void
329bfa_fcs_port_loop_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
330 void *cbarg, bfa_status_t req_status,
331 u32 rsp_len, u32 resid_len,
332 struct fchs_s *rsp_fchs)
333{
334 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
335 struct bfa_fcs_rport_s *rport;
336 struct fc_adisc_s *adisc_resp;
337 struct fc_els_cmd_s *els_cmd;
338 u32 pid = rsp_fchs->s_id;
339
340 bfa_trc(port->fcs, req_status);
341
342 /*
343 * Sanity Checks
344 */
345 if (req_status != BFA_STATUS_OK) {
346 /*
347 * TBD : we may need to retry certain requests
348 */
349 bfa_fcxp_free(fcxp);
350 return;
351 }
352
353 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
354 adisc_resp = (struct fc_adisc_s *) els_cmd;
355
356 if (els_cmd->els_code == FC_ELS_ACC) {
357 } else {
358 bfa_trc(port->fcs, adisc_resp->els_cmd.els_code);
359
360 /*
361 * TBD: we may need to check for reject codes and retry
362 */
363 rport = bfa_fcs_port_get_rport_by_pid(port, pid);
364 if (rport) {
365 list_del(&rport->qe);
366 bfa_fcs_rport_delete(rport);
367 }
368
369 }
370 return;
371}
372
373bfa_status_t
374bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, u8 alpa)
375{
376 struct fchs_s fchs;
377 struct bfa_fcxp_s *fcxp;
378 int len;
379
380 bfa_trc(port->fcs, alpa);
381
382 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
383 NULL);
384 bfa_assert(fcxp);
385
386 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
387 bfa_fcs_port_get_fcid(port), 0,
388 port->port_cfg.pwwn, port->port_cfg.nwwn);
389
390 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
391 FC_CLASS_3, len, &fchs,
392 bfa_fcs_port_loop_adisc_acc_response,
393 (void *)port, FC_MAX_PDUSZ, 0); /* no reponse
394 * expected
395 */
396
397 return BFA_STATUS_OK;
398}
399
400/*
401 * Adisc Acc Response
402 * We donot do any processing here.
403 */
404void
405bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
406 void *cbarg, bfa_status_t req_status,
407 u32 rsp_len, u32 resid_len,
408 struct fchs_s *rsp_fchs)
409{
410
411 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
412
413 bfa_trc(port->fcs, port->pid);
414
415 /*
416 * Sanity Checks
417 */
418 if (req_status != BFA_STATUS_OK) {
419 bfa_trc(port->fcs, req_status);
420 return;
421 }
422}
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
new file mode 100644
index 000000000000..8f51a83f1834
--- /dev/null
+++ b/drivers/scsi/bfa/lport_api.c
@@ -0,0 +1,291 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <fcs/bfa_fcs_lport.h>
24#include <fcs/bfa_fcs_rport.h>
25#include "fcs_rport.h"
26#include "fcs_fabric.h"
27#include "fcs_trcmod.h"
28#include "fcs_vport.h"
29
30BFA_TRC_FILE(FCS, PORT_API);
31
32
33
34/**
35 * fcs_port_api BFA FCS port API
36 */
37
38void
39bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg)
40{
41}
42
43struct bfa_fcs_port_s *
44bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
45{
46 return (&fcs->fabric.bport);
47}
48
49wwn_t
50bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index,
51 int nrports, bfa_boolean_t bwwn)
52{
53 struct list_head *qh, *qe;
54 struct bfa_fcs_rport_s *rport = NULL;
55 int i;
56 struct bfa_fcs_s *fcs;
57
58 if (port == NULL || nrports == 0)
59 return (wwn_t) 0;
60
61 fcs = port->fcs;
62 bfa_trc(fcs, (u32) nrports);
63
64 i = 0;
65 qh = &port->rport_q;
66 qe = bfa_q_first(qh);
67
68 while ((qe != qh) && (i < nrports)) {
69 rport = (struct bfa_fcs_rport_s *)qe;
70 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
71 qe = bfa_q_next(qe);
72 bfa_trc(fcs, (u32) rport->pwwn);
73 bfa_trc(fcs, rport->pid);
74 bfa_trc(fcs, i);
75 continue;
76 }
77
78 if (bwwn) {
79 if (!memcmp(&wwn, &rport->pwwn, 8))
80 break;
81 } else {
82 if (i == index)
83 break;
84 }
85
86 i++;
87 qe = bfa_q_next(qe);
88 }
89
90 bfa_trc(fcs, i);
91 if (rport) {
92 return rport->pwwn;
93 } else {
94 return (wwn_t) 0;
95 }
96}
97
98void
99bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[],
100 int *nrports)
101{
102 struct list_head *qh, *qe;
103 struct bfa_fcs_rport_s *rport = NULL;
104 int i;
105 struct bfa_fcs_s *fcs;
106
107 if (port == NULL || rport_wwns == NULL || *nrports == 0)
108 return;
109
110 fcs = port->fcs;
111 bfa_trc(fcs, (u32) *nrports);
112
113 i = 0;
114 qh = &port->rport_q;
115 qe = bfa_q_first(qh);
116
117 while ((qe != qh) && (i < *nrports)) {
118 rport = (struct bfa_fcs_rport_s *)qe;
119 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
120 qe = bfa_q_next(qe);
121 bfa_trc(fcs, (u32) rport->pwwn);
122 bfa_trc(fcs, rport->pid);
123 bfa_trc(fcs, i);
124 continue;
125 }
126
127 rport_wwns[i] = rport->pwwn;
128
129 i++;
130 qe = bfa_q_next(qe);
131 }
132
133 bfa_trc(fcs, i);
134 *nrports = i;
135 return;
136}
137
138/*
139 * Iterate's through all the rport's in the given port to
140 * determine the maximum operating speed.
141 */
142enum bfa_pport_speed
143bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
144{
145 struct list_head *qh, *qe;
146 struct bfa_fcs_rport_s *rport = NULL;
147 struct bfa_fcs_s *fcs;
148 enum bfa_pport_speed max_speed = 0;
149 struct bfa_pport_attr_s pport_attr;
150 enum bfa_pport_speed pport_speed;
151
152 if (port == NULL)
153 return 0;
154
155 fcs = port->fcs;
156
157 /*
158 * Get Physical port's current speed
159 */
160 bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
161 pport_speed = pport_attr.speed;
162 bfa_trc(fcs, pport_speed);
163
164 qh = &port->rport_q;
165 qe = bfa_q_first(qh);
166
167 while (qe != qh) {
168 rport = (struct bfa_fcs_rport_s *)qe;
169 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000)
170 || (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE)) {
171 qe = bfa_q_next(qe);
172 continue;
173 }
174
175 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_8GBPS)
176 || (rport->rpf.rpsc_speed > pport_speed)) {
177 max_speed = rport->rpf.rpsc_speed;
178 break;
179 } else if (rport->rpf.rpsc_speed > max_speed) {
180 max_speed = rport->rpf.rpsc_speed;
181 }
182
183 qe = bfa_q_next(qe);
184 }
185
186 bfa_trc(fcs, max_speed);
187 return max_speed;
188}
189
190struct bfa_fcs_port_s *
191bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
192{
193 struct bfa_fcs_vport_s *vport;
194 bfa_fcs_vf_t *vf;
195
196 bfa_assert(fcs != NULL);
197
198 vf = bfa_fcs_vf_lookup(fcs, vf_id);
199 if (vf == NULL) {
200 bfa_trc(fcs, vf_id);
201 return (NULL);
202 }
203
204 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
205 return (&vf->bport);
206
207 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
208 if (vport)
209 return (&vport->lport);
210
211 return (NULL);
212}
213
214/*
215 * API corresponding to VmWare's NPIV_VPORT_GETINFO.
216 */
217void
218bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
219 struct bfa_port_info_s *port_info)
220{
221
222 bfa_trc(port->fcs, port->fabric->fabric_name);
223
224 if (port->vport == NULL) {
225 /*
226 * This is a Physical port
227 */
228 port_info->port_type = BFA_PORT_TYPE_PHYSICAL;
229
230 /*
231 * @todo : need to fix the state & reason
232 */
233 port_info->port_state = 0;
234 port_info->offline_reason = 0;
235
236 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
237 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
238
239 port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs);
240 port_info->num_vports_inuse =
241 bfa_fcs_fabric_vport_count(port->fabric);
242 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
243 port_info->num_rports_inuse = port->num_rports;
244 } else {
245 /*
246 * This is a virtual port
247 */
248 port_info->port_type = BFA_PORT_TYPE_VIRTUAL;
249
250 /*
251 * @todo : need to fix the state & reason
252 */
253 port_info->port_state = 0;
254 port_info->offline_reason = 0;
255
256 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
257 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
258 }
259}
260
261void
262bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
263 struct bfa_port_stats_s *port_stats)
264{
265 bfa_os_memcpy(port_stats, &fcs_port->stats,
266 sizeof(struct bfa_port_stats_s));
267 return;
268}
269
270void
271bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port)
272{
273 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_port_stats_s));
274 return;
275}
276
277void
278bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
279{
280 fcs_port->port_cfg.roles |= BFA_PORT_ROLE_FCP_IPFC;
281 return;
282}
283
284void
285bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
286{
287 fcs_port->port_cfg.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
288 return;
289}
290
291
diff --git a/drivers/scsi/bfa/lport_priv.h b/drivers/scsi/bfa/lport_priv.h
new file mode 100644
index 000000000000..dbae370a599a
--- /dev/null
+++ b/drivers/scsi/bfa/lport_priv.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __VP_PRIV_H__
19#define __VP_PRIV_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23
24/*
25 * Functions exported by vps
26 */
27void bfa_fcs_vport_init(struct bfa_fcs_vport_s *vport);
28
29/*
30 * Functions exported by vps
31 */
32void bfa_fcs_vps_online(struct bfa_fcs_port_s *port);
33void bfa_fcs_vps_offline(struct bfa_fcs_port_s *port);
34void bfa_fcs_vps_lip(struct bfa_fcs_port_s *port);
35
36/*
37 * Functions exported by port_fab
38 */
39void bfa_fcs_port_fab_init(struct bfa_fcs_port_s *vport);
40void bfa_fcs_port_fab_online(struct bfa_fcs_port_s *vport);
41void bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *vport);
42void bfa_fcs_port_fab_rx_frame(struct bfa_fcs_port_s *port,
43 u8 *rx_frame, u32 len);
44
45/*
46 * Functions exported by VP-NS.
47 */
48void bfa_fcs_port_ns_init(struct bfa_fcs_port_s *vport);
49void bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *vport);
50void bfa_fcs_port_ns_online(struct bfa_fcs_port_s *vport);
51void bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port);
52
53/*
54 * Functions exported by VP-SCN
55 */
56void bfa_fcs_port_scn_init(struct bfa_fcs_port_s *vport);
57void bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *vport);
58void bfa_fcs_port_scn_online(struct bfa_fcs_port_s *vport);
59void bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port,
60 struct fchs_s *rx_frame, u32 len);
61
62/*
63 * Functions exported by VP-N2N
64 */
65
66void bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port);
67void bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port);
68void bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port);
69void bfa_fcs_port_n2n_rx_frame(struct bfa_fcs_port_s *port,
70 u8 *rx_frame, u32 len);
71
72/*
73 * Functions exported by VP-LOOP
74 */
75void bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port);
76void bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port);
77void bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port);
78void bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port);
79void bfa_fcs_port_loop_rx_frame(struct bfa_fcs_port_s *port,
80 u8 *rx_frame, u32 len);
81
82#endif /* __VP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
new file mode 100644
index 000000000000..c96b3ca007ae
--- /dev/null
+++ b/drivers/scsi/bfa/ms.c
@@ -0,0 +1,759 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18
19#include <bfa.h>
20#include <bfa_svc.h>
21#include "fcs_lport.h"
22#include "fcs_rport.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "lport_priv.h"
26
27BFA_TRC_FILE(FCS, MS);
28
29#define BFA_FCS_MS_CMD_MAX_RETRIES 2
30/*
31 * forward declarations
32 */
33static void bfa_fcs_port_ms_send_plogi(void *ms_cbarg,
34 struct bfa_fcxp_s *fcxp_alloced);
35static void bfa_fcs_port_ms_timeout(void *arg);
36static void bfa_fcs_port_ms_plogi_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp,
38 void *cbarg,
39 bfa_status_t req_status,
40 u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43
44static void bfa_fcs_port_ms_send_gmal(void *ms_cbarg,
45 struct bfa_fcxp_s *fcxp_alloced);
46static void bfa_fcs_port_ms_gmal_response(void *fcsarg,
47 struct bfa_fcxp_s *fcxp,
48 void *cbarg,
49 bfa_status_t req_status,
50 u32 rsp_len,
51 u32 resid_len,
52 struct fchs_s *rsp_fchs);
53static void bfa_fcs_port_ms_send_gfn(void *ms_cbarg,
54 struct bfa_fcxp_s *fcxp_alloced);
55static void bfa_fcs_port_ms_gfn_response(void *fcsarg,
56 struct bfa_fcxp_s *fcxp,
57 void *cbarg,
58 bfa_status_t req_status,
59 u32 rsp_len,
60 u32 resid_len,
61 struct fchs_s *rsp_fchs);
62/**
63 * fcs_ms_sm FCS MS state machine
64 */
65
66/**
67 * MS State Machine events
68 */
69enum port_ms_event {
70 MSSM_EVENT_PORT_ONLINE = 1,
71 MSSM_EVENT_PORT_OFFLINE = 2,
72 MSSM_EVENT_RSP_OK = 3,
73 MSSM_EVENT_RSP_ERROR = 4,
74 MSSM_EVENT_TIMEOUT = 5,
75 MSSM_EVENT_FCXP_SENT = 6,
76 MSSM_EVENT_PORT_FABRIC_RSCN = 7
77};
78
79static void bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
80 enum port_ms_event event);
81static void bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
82 enum port_ms_event event);
83static void bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms,
84 enum port_ms_event event);
85static void bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
86 enum port_ms_event event);
87static void bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
88 enum port_ms_event event);
89static void bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms,
90 enum port_ms_event event);
91static void bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
92 enum port_ms_event event);
93static void bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
94 enum port_ms_event event);
95static void bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms,
96 enum port_ms_event event);
97static void bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
98 enum port_ms_event event);
99static void bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
100 enum port_ms_event event);
101/**
102 * Start in offline state - awaiting NS to send start.
103 */
104static void
105bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
106 enum port_ms_event event)
107{
108 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
109 bfa_trc(ms->port->fcs, event);
110
111 switch (event) {
112 case MSSM_EVENT_PORT_ONLINE:
113 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
114 bfa_fcs_port_ms_send_plogi(ms, NULL);
115 break;
116
117 case MSSM_EVENT_PORT_OFFLINE:
118 break;
119
120 default:
121 bfa_assert(0);
122 }
123}
124
125static void
126bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
127 enum port_ms_event event)
128{
129 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
130 bfa_trc(ms->port->fcs, event);
131
132 switch (event) {
133 case MSSM_EVENT_FCXP_SENT:
134 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi);
135 break;
136
137 case MSSM_EVENT_PORT_OFFLINE:
138 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
139 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
140 &ms->fcxp_wqe);
141 break;
142
143 default:
144 bfa_assert(0);
145 }
146}
147
148static void
149bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
150{
151 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
152 bfa_trc(ms->port->fcs, event);
153
154 switch (event) {
155 case MSSM_EVENT_RSP_ERROR:
156 /*
157 * Start timer for a delayed retry
158 */
159 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry);
160 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer,
161 bfa_fcs_port_ms_timeout, ms,
162 BFA_FCS_RETRY_TIMEOUT);
163 break;
164
165 case MSSM_EVENT_RSP_OK:
166 /*
167 * since plogi is done, now invoke MS related sub-modules
168 */
169 bfa_fcs_port_fdmi_online(ms);
170
171 /**
172 * if this is a Vport, go to online state.
173 */
174 if (ms->port->vport) {
175 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
176 break;
177 }
178
179 /*
180 * For a base port we need to get the
181 * switch's IP address.
182 */
183 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
184 bfa_fcs_port_ms_send_gmal(ms, NULL);
185 break;
186
187 case MSSM_EVENT_PORT_OFFLINE:
188 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
189 bfa_fcxp_discard(ms->fcxp);
190 break;
191
192 default:
193 bfa_assert(0);
194 }
195}
196
197static void
198bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
199 enum port_ms_event event)
200{
201 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
202 bfa_trc(ms->port->fcs, event);
203
204 switch (event) {
205 case MSSM_EVENT_TIMEOUT:
206 /*
207 * Retry Timer Expired. Re-send
208 */
209 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
210 bfa_fcs_port_ms_send_plogi(ms, NULL);
211 break;
212
213 case MSSM_EVENT_PORT_OFFLINE:
214 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
215 bfa_timer_stop(&ms->timer);
216 break;
217
218 default:
219 bfa_assert(0);
220 }
221}
222
223static void
224bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
225 enum port_ms_event event)
226{
227 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
228 bfa_trc(ms->port->fcs, event);
229
230 switch (event) {
231 case MSSM_EVENT_PORT_OFFLINE:
232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
233 /*
234 * now invoke MS related sub-modules
235 */
236 bfa_fcs_port_fdmi_offline(ms);
237 break;
238
239 case MSSM_EVENT_PORT_FABRIC_RSCN:
240 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
241 ms->retry_cnt = 0;
242 bfa_fcs_port_ms_send_gfn(ms, NULL);
243 break;
244
245 default:
246 bfa_assert(0);
247 }
248}
249
250static void
251bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
252 enum port_ms_event event)
253{
254 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
255 bfa_trc(ms->port->fcs, event);
256
257 switch (event) {
258 case MSSM_EVENT_FCXP_SENT:
259 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal);
260 break;
261
262 case MSSM_EVENT_PORT_OFFLINE:
263 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
264 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
265 &ms->fcxp_wqe);
266 break;
267
268 default:
269 bfa_assert(0);
270 }
271}
272
273static void
274bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
275{
276 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
277 bfa_trc(ms->port->fcs, event);
278
279 switch (event) {
280 case MSSM_EVENT_RSP_ERROR:
281 /*
282 * Start timer for a delayed retry
283 */
284 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
285 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry);
286 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
287 &ms->timer, bfa_fcs_port_ms_timeout, ms,
288 BFA_FCS_RETRY_TIMEOUT);
289 } else {
290 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
291 bfa_fcs_port_ms_send_gfn(ms, NULL);
292 ms->retry_cnt = 0;
293 }
294 break;
295
296 case MSSM_EVENT_RSP_OK:
297 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
298 bfa_fcs_port_ms_send_gfn(ms, NULL);
299 break;
300
301 case MSSM_EVENT_PORT_OFFLINE:
302 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
303 bfa_fcxp_discard(ms->fcxp);
304 break;
305
306 default:
307 bfa_assert(0);
308 }
309}
310
311static void
312bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
313 enum port_ms_event event)
314{
315 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
316 bfa_trc(ms->port->fcs, event);
317
318 switch (event) {
319 case MSSM_EVENT_TIMEOUT:
320 /*
321 * Retry Timer Expired. Re-send
322 */
323 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
324 bfa_fcs_port_ms_send_gmal(ms, NULL);
325 break;
326
327 case MSSM_EVENT_PORT_OFFLINE:
328 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
329 bfa_timer_stop(&ms->timer);
330 break;
331
332 default:
333 bfa_assert(0);
334 }
335}
336
337/**
338 * ms_pvt MS local functions
339 */
340
341static void
342bfa_fcs_port_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
343{
344 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
345 struct bfa_fcs_port_s *port = ms->port;
346 struct fchs_s fchs;
347 int len;
348 struct bfa_fcxp_s *fcxp;
349
350 bfa_trc(port->fcs, port->pid);
351
352 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
353 if (!fcxp) {
354 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
355 bfa_fcs_port_ms_send_gmal, ms);
356 return;
357 }
358 ms->fcxp = fcxp;
359
360 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
361 bfa_fcs_port_get_fcid(port),
362 bfa_lps_get_peer_nwwn(port->fabric->lps));
363
364 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
365 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response,
366 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV);
367
368 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
369}
370
371static void
372bfa_fcs_port_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
373 void *cbarg, bfa_status_t req_status,
374 u32 rsp_len, u32 resid_len,
375 struct fchs_s *rsp_fchs)
376{
377 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
378 struct bfa_fcs_port_s *port = ms->port;
379 struct ct_hdr_s *cthdr = NULL;
380 struct fcgs_gmal_resp_s *gmal_resp;
381 struct fc_gmal_entry_s *gmal_entry;
382 u32 num_entries;
383 u8 *rsp_str;
384
385 bfa_trc(port->fcs, req_status);
386 bfa_trc(port->fcs, port->port_cfg.pwwn);
387
388 /*
389 * Sanity Checks
390 */
391 if (req_status != BFA_STATUS_OK) {
392 bfa_trc(port->fcs, req_status);
393 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
394 return;
395 }
396
397 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
398 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
399
400 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
401 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
402 num_entries = bfa_os_ntohl(gmal_resp->ms_len);
403 if (num_entries == 0) {
404 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
405 return;
406 }
407 /*
408 * The response could contain multiple Entries.
409 * Entries for SNMP interface, etc.
410 * We look for the entry with a telnet prefix.
411 * First "http://" entry refers to IP addr
412 */
413
414 gmal_entry = (struct fc_gmal_entry_s *)gmal_resp->ms_ma;
415 while (num_entries > 0) {
416 if (strncmp
417 (gmal_entry->prefix, CT_GMAL_RESP_PREFIX_HTTP,
418 sizeof(gmal_entry->prefix)) == 0) {
419
420 /*
421 * if the IP address is terminating with a '/',
422 * remove it. *Byte 0 consists of the length
423 * of the string.
424 */
425 rsp_str = &(gmal_entry->prefix[0]);
426 if (rsp_str[gmal_entry->len - 1] == '/')
427 rsp_str[gmal_entry->len - 1] = 0;
428 /*
429 * copy IP Address to fabric
430 */
431 strncpy(bfa_fcs_port_get_fabric_ipaddr(port),
432 gmal_entry->ip_addr,
433 BFA_FCS_FABRIC_IPADDR_SZ);
434 break;
435 } else {
436 --num_entries;
437 ++gmal_entry;
438 }
439 }
440
441 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
442 return;
443 }
444
445 bfa_trc(port->fcs, cthdr->reason_code);
446 bfa_trc(port->fcs, cthdr->exp_code);
447 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
448}
449
450static void
451bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
452 enum port_ms_event event)
453{
454 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
455 bfa_trc(ms->port->fcs, event);
456
457 switch (event) {
458 case MSSM_EVENT_FCXP_SENT:
459 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn);
460 break;
461
462 case MSSM_EVENT_PORT_OFFLINE:
463 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
464 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
465 &ms->fcxp_wqe);
466 break;
467
468 default:
469 bfa_assert(0);
470 }
471}
472
473static void
474bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
475{
476 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
477 bfa_trc(ms->port->fcs, event);
478
479 switch (event) {
480 case MSSM_EVENT_RSP_ERROR:
481 /*
482 * Start timer for a delayed retry
483 */
484 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
485 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
486 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
487 &ms->timer, bfa_fcs_port_ms_timeout, ms,
488 BFA_FCS_RETRY_TIMEOUT);
489 } else {
490 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
491 ms->retry_cnt = 0;
492 }
493 break;
494
495 case MSSM_EVENT_RSP_OK:
496 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
497 break;
498
499 case MSSM_EVENT_PORT_OFFLINE:
500 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
501 bfa_fcxp_discard(ms->fcxp);
502 break;
503
504 default:
505 bfa_assert(0);
506 }
507}
508
509static void
510bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
511 enum port_ms_event event)
512{
513 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
514 bfa_trc(ms->port->fcs, event);
515
516 switch (event) {
517 case MSSM_EVENT_TIMEOUT:
518 /*
519 * Retry Timer Expired. Re-send
520 */
521 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
522 bfa_fcs_port_ms_send_gfn(ms, NULL);
523 break;
524
525 case MSSM_EVENT_PORT_OFFLINE:
526 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
527 bfa_timer_stop(&ms->timer);
528 break;
529
530 default:
531 bfa_assert(0);
532 }
533}
534
535/**
536 * ms_pvt MS local functions
537 */
538
539static void
540bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
541{
542 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
543 struct bfa_fcs_port_s *port = ms->port;
544 struct fchs_s fchs;
545 int len;
546 struct bfa_fcxp_s *fcxp;
547
548 bfa_trc(port->fcs, port->pid);
549
550 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
551 if (!fcxp) {
552 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
553 bfa_fcs_port_ms_send_gfn, ms);
554 return;
555 }
556 ms->fcxp = fcxp;
557
558 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
559 bfa_fcs_port_get_fcid(port),
560 bfa_lps_get_peer_nwwn(port->fabric->lps));
561
562 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
563 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response,
564 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV);
565
566 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
567}
568
569static void
570bfa_fcs_port_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
571 bfa_status_t req_status, u32 rsp_len,
572 u32 resid_len, struct fchs_s *rsp_fchs)
573{
574 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
575 struct bfa_fcs_port_s *port = ms->port;
576 struct ct_hdr_s *cthdr = NULL;
577 wwn_t *gfn_resp;
578
579 bfa_trc(port->fcs, req_status);
580 bfa_trc(port->fcs, port->port_cfg.pwwn);
581
582 /*
583 * Sanity Checks
584 */
585 if (req_status != BFA_STATUS_OK) {
586 bfa_trc(port->fcs, req_status);
587 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
588 return;
589 }
590
591 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
592 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
593
594 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
595 gfn_resp = (wwn_t *) (cthdr + 1);
596 /*
597 * check if it has actually changed
598 */
599 if ((memcmp
600 ((void *)&bfa_fcs_port_get_fabric_name(port), gfn_resp,
601 sizeof(wwn_t)) != 0))
602 bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
603 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
604 return;
605 }
606
607 bfa_trc(port->fcs, cthdr->reason_code);
608 bfa_trc(port->fcs, cthdr->exp_code);
609 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
610}
611
612/**
613 * ms_pvt MS local functions
614 */
615
616static void
617bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
618{
619 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
620 struct bfa_fcs_port_s *port = ms->port;
621 struct fchs_s fchs;
622 int len;
623 struct bfa_fcxp_s *fcxp;
624
625 bfa_trc(port->fcs, port->pid);
626
627 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
628 if (!fcxp) {
629 port->stats.ms_plogi_alloc_wait++;
630 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
631 bfa_fcs_port_ms_send_plogi, ms);
632 return;
633 }
634 ms->fcxp = fcxp;
635
636 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
637 bfa_os_hton3b(FC_MGMT_SERVER),
638 bfa_fcs_port_get_fcid(port), 0,
639 port->port_cfg.pwwn, port->port_cfg.nwwn,
640 bfa_pport_get_maxfrsize(port->fcs->bfa));
641
642 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
643 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
644 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV);
645
646 port->stats.ms_plogi_sent++;
647 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
648}
649
650static void
651bfa_fcs_port_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
652 void *cbarg, bfa_status_t req_status,
653 u32 rsp_len, u32 resid_len,
654 struct fchs_s *rsp_fchs)
655{
656 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
657
658 struct bfa_fcs_port_s *port = ms->port;
659 struct fc_els_cmd_s *els_cmd;
660 struct fc_ls_rjt_s *ls_rjt;
661
662 bfa_trc(port->fcs, req_status);
663 bfa_trc(port->fcs, port->port_cfg.pwwn);
664
665 /*
666 * Sanity Checks
667 */
668 if (req_status != BFA_STATUS_OK) {
669 port->stats.ms_plogi_rsp_err++;
670 bfa_trc(port->fcs, req_status);
671 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
672 return;
673 }
674
675 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
676
677 switch (els_cmd->els_code) {
678
679 case FC_ELS_ACC:
680 if (rsp_len < sizeof(struct fc_logi_s)) {
681 bfa_trc(port->fcs, rsp_len);
682 port->stats.ms_plogi_acc_err++;
683 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
684 break;
685 }
686 port->stats.ms_plogi_accepts++;
687 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
688 break;
689
690 case FC_ELS_LS_RJT:
691 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
692
693 bfa_trc(port->fcs, ls_rjt->reason_code);
694 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
695
696 port->stats.ms_rejects++;
697 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
698 break;
699
700 default:
701 port->stats.ms_plogi_unknown_rsp++;
702 bfa_trc(port->fcs, els_cmd->els_code);
703 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
704 }
705}
706
707static void
708bfa_fcs_port_ms_timeout(void *arg)
709{
710 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)arg;
711
712 ms->port->stats.ms_timeouts++;
713 bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
714}
715
716
717void
718bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port)
719{
720 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
721
722 ms->port = port;
723 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
724
725 /*
726 * Invoke init routines of sub modules.
727 */
728 bfa_fcs_port_fdmi_init(ms);
729}
730
731void
732bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
733{
734 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
735
736 ms->port = port;
737 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
738}
739
740void
741bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port)
742{
743 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
744
745 ms->port = port;
746 bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
747}
748
749void
750bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port)
751{
752 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
753
754 /*
755 * @todo. Handle this only when in Online state
756 */
757 if (bfa_sm_cmp_state(ms, bfa_fcs_port_ms_sm_online))
758 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
759}
diff --git a/drivers/scsi/bfa/n2n.c b/drivers/scsi/bfa/n2n.c
new file mode 100644
index 000000000000..735456824346
--- /dev/null
+++ b/drivers/scsi/bfa/n2n.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * n2n.c n2n implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, N2N);
29
30/**
31 * Called by fcs/port to initialize N2N topology.
32 */
33void
34bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port)
35{
36}
37
38/**
39 * Called by fcs/port to notify transition to online state.
40 */
41void
42bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port)
43{
44 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
45 struct bfa_port_cfg_s *pcfg = &port->port_cfg;
46 struct bfa_fcs_rport_s *rport;
47
48 bfa_trc(port->fcs, pcfg->pwwn);
49
50 /*
51 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
52 * and assign an Address. if not, we need to wait for its PLOGI.
53 *
54 * If our PWWN is < than that of the remote port, it will send a PLOGI
55 * with the PIDs assigned. The rport state machine take care of this
56 * incoming PLOGI.
57 */
58 if (memcmp
59 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
60 sizeof(wwn_t)) > 0) {
61 port->pid = N2N_LOCAL_PID;
62 /**
63 * First, check if we know the device by pwwn.
64 */
65 rport = bfa_fcs_port_get_rport_by_pwwn(port,
66 n2n_port->rem_port_wwn);
67 if (rport) {
68 bfa_trc(port->fcs, rport->pid);
69 bfa_trc(port->fcs, rport->pwwn);
70 rport->pid = N2N_REMOTE_PID;
71 bfa_fcs_rport_online(rport);
72 return;
73 }
74
75 /*
76 * In n2n there can be only one rport. Delete the old one whose
77 * pid should be zero, because it is offline.
78 */
79 if (port->num_rports > 0) {
80 rport = bfa_fcs_port_get_rport_by_pid(port, 0);
81 bfa_assert(rport != NULL);
82 if (rport) {
83 bfa_trc(port->fcs, rport->pwwn);
84 bfa_fcs_rport_delete(rport);
85 }
86 }
87 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
88 }
89}
90
91/**
92 * Called by fcs/port to notify transition to offline state.
93 */
94void
95bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port)
96{
97 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
98
99 bfa_trc(port->fcs, port->pid);
100 port->pid = 0;
101 n2n_port->rem_port_wwn = 0;
102 n2n_port->reply_oxid = 0;
103}
104
105
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
new file mode 100644
index 000000000000..59fea99d67a4
--- /dev/null
+++ b/drivers/scsi/bfa/ns.c
@@ -0,0 +1,1243 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * @page ns_sm_info VPORT NS State Machine
20 *
21 * @section ns_sm_interactions VPORT NS State Machine Interactions
22 *
23 * @section ns_sm VPORT NS State Machine
24 * img ns_sm.jpg
25 */
26#include <bfa.h>
27#include <bfa_svc.h>
28#include <bfa_iocfc.h>
29#include "fcs_lport.h"
30#include "fcs_rport.h"
31#include "fcs_trcmod.h"
32#include "fcs_fcxp.h"
33#include "fcs.h"
34#include "lport_priv.h"
35
36BFA_TRC_FILE(FCS, NS);
37
38/*
39 * forward declarations
40 */
41static void bfa_fcs_port_ns_send_plogi(void *ns_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_ns_send_rft_id(void *ns_cbarg,
46 struct bfa_fcxp_s *fcxp_alloced);
47static void bfa_fcs_port_ns_send_rff_id(void *ns_cbarg,
48 struct bfa_fcxp_s *fcxp_alloced);
49static void bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg,
50 struct bfa_fcxp_s *fcxp_alloced);
51static void bfa_fcs_port_ns_timeout(void *arg);
52static void bfa_fcs_port_ns_plogi_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_ns_rspn_id_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_ns_rft_id_response(void *fcsarg,
67 struct bfa_fcxp_s *fcxp,
68 void *cbarg,
69 bfa_status_t req_status,
70 u32 rsp_len,
71 u32 resid_len,
72 struct fchs_s *rsp_fchs);
73static void bfa_fcs_port_ns_rff_id_response(void *fcsarg,
74 struct bfa_fcxp_s *fcxp,
75 void *cbarg,
76 bfa_status_t req_status,
77 u32 rsp_len,
78 u32 resid_len,
79 struct fchs_s *rsp_fchs);
80static void bfa_fcs_port_ns_gid_ft_response(void *fcsarg,
81 struct bfa_fcxp_s *fcxp,
82 void *cbarg,
83 bfa_status_t req_status,
84 u32 rsp_len,
85 u32 resid_len,
86 struct fchs_s *rsp_fchs);
87static void bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
88 u32 *pid_buf,
89 u32 n_pids);
90
91static void bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port);
92/**
93 * fcs_ns_sm FCS nameserver interface state machine
94 */
95
96/**
97 * VPort NS State Machine events
98 */
99enum vport_ns_event {
100 NSSM_EVENT_PORT_ONLINE = 1,
101 NSSM_EVENT_PORT_OFFLINE = 2,
102 NSSM_EVENT_PLOGI_SENT = 3,
103 NSSM_EVENT_RSP_OK = 4,
104 NSSM_EVENT_RSP_ERROR = 5,
105 NSSM_EVENT_TIMEOUT = 6,
106 NSSM_EVENT_NS_QUERY = 7,
107 NSSM_EVENT_RSPNID_SENT = 8,
108 NSSM_EVENT_RFTID_SENT = 9,
109 NSSM_EVENT_RFFID_SENT = 10,
110 NSSM_EVENT_GIDFT_SENT = 11,
111};
112
113static void bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
114 enum vport_ns_event event);
115static void bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
116 enum vport_ns_event event);
117static void bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
118 enum vport_ns_event event);
119static void bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
120 enum vport_ns_event event);
121static void bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
122 enum vport_ns_event event);
123static void bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
124 enum vport_ns_event event);
125static void bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
126 enum vport_ns_event event);
127static void bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
128 enum vport_ns_event event);
129static void bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
130 enum vport_ns_event event);
131static void bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
132 enum vport_ns_event event);
133static void bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
134 enum vport_ns_event event);
135static void bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
136 enum vport_ns_event event);
137static void bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
138 enum vport_ns_event event);
139static void bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
140 enum vport_ns_event event);
141static void bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
142 enum vport_ns_event event);
143static void bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
144 enum vport_ns_event event);
145static void bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
146 enum vport_ns_event event);
147/**
148 * Start in offline state - awaiting linkup
149 */
150static void
151bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
152 enum vport_ns_event event)
153{
154 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
155 bfa_trc(ns->port->fcs, event);
156
157 switch (event) {
158 case NSSM_EVENT_PORT_ONLINE:
159 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
160 bfa_fcs_port_ns_send_plogi(ns, NULL);
161 break;
162
163 case NSSM_EVENT_PORT_OFFLINE:
164 break;
165
166 default:
167 bfa_assert(0);
168 }
169}
170
171static void
172bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
173 enum vport_ns_event event)
174{
175 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
176 bfa_trc(ns->port->fcs, event);
177
178 switch (event) {
179 case NSSM_EVENT_PLOGI_SENT:
180 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi);
181 break;
182
183 case NSSM_EVENT_PORT_OFFLINE:
184 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
185 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
186 &ns->fcxp_wqe);
187 break;
188
189 default:
190 bfa_assert(0);
191 }
192}
193
194static void
195bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
196 enum vport_ns_event event)
197{
198 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
199 bfa_trc(ns->port->fcs, event);
200
201 switch (event) {
202 case NSSM_EVENT_RSP_ERROR:
203 /*
204 * Start timer for a delayed retry
205 */
206 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_retry);
207 ns->port->stats.ns_retries++;
208 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
209 bfa_fcs_port_ns_timeout, ns,
210 BFA_FCS_RETRY_TIMEOUT);
211 break;
212
213 case NSSM_EVENT_RSP_OK:
214 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
215 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
216 break;
217
218 case NSSM_EVENT_PORT_OFFLINE:
219 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
220 bfa_fcxp_discard(ns->fcxp);
221 break;
222
223 default:
224 bfa_assert(0);
225 }
226}
227
228static void
229bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
230 enum vport_ns_event event)
231{
232 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
233 bfa_trc(ns->port->fcs, event);
234
235 switch (event) {
236 case NSSM_EVENT_TIMEOUT:
237 /*
238 * Retry Timer Expired. Re-send
239 */
240 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
241 bfa_fcs_port_ns_send_plogi(ns, NULL);
242 break;
243
244 case NSSM_EVENT_PORT_OFFLINE:
245 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
246 bfa_timer_stop(&ns->timer);
247 break;
248
249 default:
250 bfa_assert(0);
251 }
252}
253
254static void
255bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
256 enum vport_ns_event event)
257{
258 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
259 bfa_trc(ns->port->fcs, event);
260
261 switch (event) {
262 case NSSM_EVENT_RSPNID_SENT:
263 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id);
264 break;
265
266 case NSSM_EVENT_PORT_OFFLINE:
267 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
268 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
269 &ns->fcxp_wqe);
270 break;
271
272 default:
273 bfa_assert(0);
274 }
275}
276
277static void
278bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
279 enum vport_ns_event event)
280{
281 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
282 bfa_trc(ns->port->fcs, event);
283
284 switch (event) {
285 case NSSM_EVENT_RSP_ERROR:
286 /*
287 * Start timer for a delayed retry
288 */
289 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id_retry);
290 ns->port->stats.ns_retries++;
291 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
292 bfa_fcs_port_ns_timeout, ns,
293 BFA_FCS_RETRY_TIMEOUT);
294 break;
295
296 case NSSM_EVENT_RSP_OK:
297 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
298 bfa_fcs_port_ns_send_rft_id(ns, NULL);
299 break;
300
301 case NSSM_EVENT_PORT_OFFLINE:
302 bfa_fcxp_discard(ns->fcxp);
303 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
304 break;
305
306 default:
307 bfa_assert(0);
308 }
309}
310
311static void
312bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
313 enum vport_ns_event event)
314{
315 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
316 bfa_trc(ns->port->fcs, event);
317
318 switch (event) {
319 case NSSM_EVENT_TIMEOUT:
320 /*
321 * Retry Timer Expired. Re-send
322 */
323 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
324 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
325 break;
326
327 case NSSM_EVENT_PORT_OFFLINE:
328 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
329 bfa_timer_stop(&ns->timer);
330 break;
331
332 default:
333 bfa_assert(0);
334 }
335}
336
337static void
338bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
339 enum vport_ns_event event)
340{
341 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
342 bfa_trc(ns->port->fcs, event);
343
344 switch (event) {
345 case NSSM_EVENT_RFTID_SENT:
346 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id);
347 break;
348
349 case NSSM_EVENT_PORT_OFFLINE:
350 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
351 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
352 &ns->fcxp_wqe);
353 break;
354
355 default:
356 bfa_assert(0);
357 }
358}
359
360static void
361bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
362 enum vport_ns_event event)
363{
364 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
365 bfa_trc(ns->port->fcs, event);
366
367 switch (event) {
368 case NSSM_EVENT_RSP_OK:
369 /*
370 * Now move to register FC4 Features
371 */
372 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
373 bfa_fcs_port_ns_send_rff_id(ns, NULL);
374 break;
375
376 case NSSM_EVENT_RSP_ERROR:
377 /*
378 * Start timer for a delayed retry
379 */
380 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id_retry);
381 ns->port->stats.ns_retries++;
382 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
383 bfa_fcs_port_ns_timeout, ns,
384 BFA_FCS_RETRY_TIMEOUT);
385 break;
386
387 case NSSM_EVENT_PORT_OFFLINE:
388 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
389 bfa_fcxp_discard(ns->fcxp);
390 break;
391
392 default:
393 bfa_assert(0);
394 }
395}
396
397static void
398bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
399 enum vport_ns_event event)
400{
401 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
402 bfa_trc(ns->port->fcs, event);
403
404 switch (event) {
405 case NSSM_EVENT_TIMEOUT:
406 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
407 bfa_fcs_port_ns_send_rft_id(ns, NULL);
408 break;
409
410 case NSSM_EVENT_PORT_OFFLINE:
411 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
412 bfa_timer_stop(&ns->timer);
413 break;
414
415 default:
416 bfa_assert(0);
417 }
418}
419
420static void
421bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
422 enum vport_ns_event event)
423{
424 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
425 bfa_trc(ns->port->fcs, event);
426
427 switch (event) {
428 case NSSM_EVENT_RFFID_SENT:
429 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id);
430 break;
431
432 case NSSM_EVENT_PORT_OFFLINE:
433 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
434 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
435 &ns->fcxp_wqe);
436 break;
437
438 default:
439 bfa_assert(0);
440 }
441}
442
443static void
444bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
445 enum vport_ns_event event)
446{
447 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
448 bfa_trc(ns->port->fcs, event);
449
450 switch (event) {
451 case NSSM_EVENT_RSP_OK:
452
453 /*
454 * If min cfg mode is enabled, we donot initiate rport
455 * discovery with the fabric. Instead, we will retrieve the
456 * boot targets from HAL/FW.
457 */
458 if (__fcs_min_cfg(ns->port->fcs)) {
459 bfa_fcs_port_ns_boot_target_disc(ns->port);
460 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
461 return;
462 }
463
464 /*
465 * If the port role is Initiator Mode issue NS query.
466 * If it is Target Mode, skip this and go to online.
467 */
468 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
469 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
470 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
471 } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
472 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
473 }
474 /*
475 * kick off mgmt srvr state machine
476 */
477 bfa_fcs_port_ms_online(ns->port);
478 break;
479
480 case NSSM_EVENT_RSP_ERROR:
481 /*
482 * Start timer for a delayed retry
483 */
484 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry);
485 ns->port->stats.ns_retries++;
486 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
487 bfa_fcs_port_ns_timeout, ns,
488 BFA_FCS_RETRY_TIMEOUT);
489 break;
490
491 case NSSM_EVENT_PORT_OFFLINE:
492 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
493 bfa_fcxp_discard(ns->fcxp);
494 break;
495
496 default:
497 bfa_assert(0);
498 }
499}
500
501static void
502bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
503 enum vport_ns_event event)
504{
505 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
506 bfa_trc(ns->port->fcs, event);
507
508 switch (event) {
509 case NSSM_EVENT_TIMEOUT:
510 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
511 bfa_fcs_port_ns_send_rff_id(ns, NULL);
512 break;
513
514 case NSSM_EVENT_PORT_OFFLINE:
515 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
516 bfa_timer_stop(&ns->timer);
517 break;
518
519 default:
520 bfa_assert(0);
521 }
522}
523static void
524bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
525 enum vport_ns_event event)
526{
527 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
528 bfa_trc(ns->port->fcs, event);
529
530 switch (event) {
531 case NSSM_EVENT_GIDFT_SENT:
532 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft);
533 break;
534
535 case NSSM_EVENT_PORT_OFFLINE:
536 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
537 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
538 &ns->fcxp_wqe);
539 break;
540
541 default:
542 bfa_assert(0);
543 }
544}
545
546static void
547bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
548 enum vport_ns_event event)
549{
550 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
551 bfa_trc(ns->port->fcs, event);
552
553 switch (event) {
554 case NSSM_EVENT_RSP_OK:
555 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
556 break;
557
558 case NSSM_EVENT_RSP_ERROR:
559 /*
560 * TBD: for certain reject codes, we don't need to retry
561 */
562 /*
563 * Start timer for a delayed retry
564 */
565 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry);
566 ns->port->stats.ns_retries++;
567 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
568 bfa_fcs_port_ns_timeout, ns,
569 BFA_FCS_RETRY_TIMEOUT);
570 break;
571
572 case NSSM_EVENT_PORT_OFFLINE:
573 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
574 bfa_fcxp_discard(ns->fcxp);
575 break;
576
577 default:
578 bfa_assert(0);
579 }
580}
581
582static void
583bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
584 enum vport_ns_event event)
585{
586 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
587 bfa_trc(ns->port->fcs, event);
588
589 switch (event) {
590 case NSSM_EVENT_TIMEOUT:
591 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
592 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
593 break;
594
595 case NSSM_EVENT_PORT_OFFLINE:
596 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
597 bfa_timer_stop(&ns->timer);
598 break;
599
600 default:
601 bfa_assert(0);
602 }
603}
604
605static void
606bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
607 enum vport_ns_event event)
608{
609 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
610 bfa_trc(ns->port->fcs, event);
611
612 switch (event) {
613 case NSSM_EVENT_PORT_OFFLINE:
614 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
615 break;
616
617 case NSSM_EVENT_NS_QUERY:
618 /*
619 * If the port role is Initiator Mode issue NS query.
620 * If it is Target Mode, skip this and go to online.
621 */
622 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
623 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
624 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
625 };
626 break;
627
628 default:
629 bfa_assert(0);
630 }
631}
632
633
634
635/**
636 * ns_pvt Nameserver local functions
637 */
638
639static void
640bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
641{
642 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
643 struct bfa_fcs_port_s *port = ns->port;
644 struct fchs_s fchs;
645 int len;
646 struct bfa_fcxp_s *fcxp;
647
648 bfa_trc(port->fcs, port->pid);
649
650 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
651 if (!fcxp) {
652 port->stats.ns_plogi_alloc_wait++;
653 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
654 bfa_fcs_port_ns_send_plogi, ns);
655 return;
656 }
657 ns->fcxp = fcxp;
658
659 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
660 bfa_os_hton3b(FC_NAME_SERVER),
661 bfa_fcs_port_get_fcid(port), 0,
662 port->port_cfg.pwwn, port->port_cfg.nwwn,
663 bfa_pport_get_maxfrsize(port->fcs->bfa));
664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
667 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV);
668 port->stats.ns_plogi_sent++;
669
670 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
671}
672
673static void
674bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
675 void *cbarg, bfa_status_t req_status,
676 u32 rsp_len, u32 resid_len,
677 struct fchs_s *rsp_fchs)
678{
679 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
680 struct bfa_fcs_port_s *port = ns->port;
681 /* struct fc_logi_s *plogi_resp; */
682 struct fc_els_cmd_s *els_cmd;
683 struct fc_ls_rjt_s *ls_rjt;
684
685 bfa_trc(port->fcs, req_status);
686 bfa_trc(port->fcs, port->port_cfg.pwwn);
687
688 /*
689 * Sanity Checks
690 */
691 if (req_status != BFA_STATUS_OK) {
692 bfa_trc(port->fcs, req_status);
693 port->stats.ns_plogi_rsp_err++;
694 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
695 return;
696 }
697
698 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
699
700 switch (els_cmd->els_code) {
701
702 case FC_ELS_ACC:
703 if (rsp_len < sizeof(struct fc_logi_s)) {
704 bfa_trc(port->fcs, rsp_len);
705 port->stats.ns_plogi_acc_err++;
706 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
707 break;
708 }
709 port->stats.ns_plogi_accepts++;
710 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
711 break;
712
713 case FC_ELS_LS_RJT:
714 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
715
716 bfa_trc(port->fcs, ls_rjt->reason_code);
717 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
718
719 port->stats.ns_rejects++;
720
721 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
722 break;
723
724 default:
725 port->stats.ns_plogi_unknown_rsp++;
726 bfa_trc(port->fcs, els_cmd->els_code);
727 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
728 }
729}
730
731/**
732 * Register the symbolic port name.
733 */
734static void
735bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
736{
737 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
738 struct bfa_fcs_port_s *port = ns->port;
739 struct fchs_s fchs;
740 int len;
741 struct bfa_fcxp_s *fcxp;
742 u8 symbl[256];
743 u8 *psymbl = &symbl[0];
744
745 bfa_os_memset(symbl, 0, sizeof(symbl));
746
747 bfa_trc(port->fcs, port->port_cfg.pwwn);
748
749 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
750 if (!fcxp) {
751 port->stats.ns_rspnid_alloc_wait++;
752 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
753 bfa_fcs_port_ns_send_rspn_id, ns);
754 return;
755 }
756 ns->fcxp = fcxp;
757
758 /*
759 * for V-Port, form a Port Symbolic Name
760 */
761 if (port->vport) {
762 /**For Vports,
763 * we append the vport's port symbolic name to that of the base port.
764 */
765
766 strncpy((char *)psymbl,
767 (char *)
768 &(bfa_fcs_port_get_psym_name
769 (bfa_fcs_get_base_port(port->fcs))),
770 strlen((char *)
771 &bfa_fcs_port_get_psym_name(bfa_fcs_get_base_port
772 (port->fcs))));
773
774 /*
775 * Ensure we have a null terminating string.
776 */
777 ((char *)
778 psymbl)[strlen((char *)
779 &bfa_fcs_port_get_psym_name
780 (bfa_fcs_get_base_port(port->fcs)))] = 0;
781
782 strncat((char *)psymbl,
783 (char *)&(bfa_fcs_port_get_psym_name(port)),
784 strlen((char *)&bfa_fcs_port_get_psym_name(port)));
785 } else {
786 psymbl = (u8 *) &(bfa_fcs_port_get_psym_name(port));
787 }
788
789 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
790 bfa_fcs_port_get_fcid(port), 0, psymbl);
791
792 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
793 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response,
794 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV);
795
796 port->stats.ns_rspnid_sent++;
797
798 bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
799}
800
801static void
802bfa_fcs_port_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
803 void *cbarg, bfa_status_t req_status,
804 u32 rsp_len, u32 resid_len,
805 struct fchs_s *rsp_fchs)
806{
807 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
808 struct bfa_fcs_port_s *port = ns->port;
809 struct ct_hdr_s *cthdr = NULL;
810
811 bfa_trc(port->fcs, port->port_cfg.pwwn);
812
813 /*
814 * Sanity Checks
815 */
816 if (req_status != BFA_STATUS_OK) {
817 bfa_trc(port->fcs, req_status);
818 port->stats.ns_rspnid_rsp_err++;
819 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
820 return;
821 }
822
823 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
824 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
825
826 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
827 port->stats.ns_rspnid_accepts++;
828 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
829 return;
830 }
831
832 port->stats.ns_rspnid_rejects++;
833 bfa_trc(port->fcs, cthdr->reason_code);
834 bfa_trc(port->fcs, cthdr->exp_code);
835 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
836}
837
838/**
839 * Register FC4-Types
840 * TBD, Need to retrieve this from the OS driver, in case IPFC is enabled ?
841 */
842static void
843bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
844{
845 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
846 struct bfa_fcs_port_s *port = ns->port;
847 struct fchs_s fchs;
848 int len;
849 struct bfa_fcxp_s *fcxp;
850
851 bfa_trc(port->fcs, port->port_cfg.pwwn);
852
853 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
854 if (!fcxp) {
855 port->stats.ns_rftid_alloc_wait++;
856 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
857 bfa_fcs_port_ns_send_rft_id, ns);
858 return;
859 }
860 ns->fcxp = fcxp;
861
862 len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
863 bfa_fcs_port_get_fcid(port), 0,
864 port->port_cfg.roles);
865
866 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
867 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response,
868 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV);
869
870 port->stats.ns_rftid_sent++;
871 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
872}
873
874static void
875bfa_fcs_port_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
876 void *cbarg, bfa_status_t req_status,
877 u32 rsp_len, u32 resid_len,
878 struct fchs_s *rsp_fchs)
879{
880 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
881 struct bfa_fcs_port_s *port = ns->port;
882 struct ct_hdr_s *cthdr = NULL;
883
884 bfa_trc(port->fcs, port->port_cfg.pwwn);
885
886 /*
887 * Sanity Checks
888 */
889 if (req_status != BFA_STATUS_OK) {
890 bfa_trc(port->fcs, req_status);
891 port->stats.ns_rftid_rsp_err++;
892 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
893 return;
894 }
895
896 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
897 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
898
899 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
900 port->stats.ns_rftid_accepts++;
901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
902 return;
903 }
904
905 port->stats.ns_rftid_rejects++;
906 bfa_trc(port->fcs, cthdr->reason_code);
907 bfa_trc(port->fcs, cthdr->exp_code);
908 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
909}
910
911/**
912* Register FC4-Features : Should be done after RFT_ID
913 */
914static void
915bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
916{
917 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
918 struct bfa_fcs_port_s *port = ns->port;
919 struct fchs_s fchs;
920 int len;
921 struct bfa_fcxp_s *fcxp;
922 u8 fc4_ftrs = 0;
923
924 bfa_trc(port->fcs, port->port_cfg.pwwn);
925
926 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
927 if (!fcxp) {
928 port->stats.ns_rffid_alloc_wait++;
929 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
930 bfa_fcs_port_ns_send_rff_id, ns);
931 return;
932 }
933 ns->fcxp = fcxp;
934
935 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
936 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
937 } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
938 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET;
939 }
940
941 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
942 bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP,
943 fc4_ftrs);
944
945 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
946 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response,
947 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV);
948
949 port->stats.ns_rffid_sent++;
950 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
951}
952
953static void
954bfa_fcs_port_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
955 void *cbarg, bfa_status_t req_status,
956 u32 rsp_len, u32 resid_len,
957 struct fchs_s *rsp_fchs)
958{
959 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
960 struct bfa_fcs_port_s *port = ns->port;
961 struct ct_hdr_s *cthdr = NULL;
962
963 bfa_trc(port->fcs, port->port_cfg.pwwn);
964
965 /*
966 * Sanity Checks
967 */
968 if (req_status != BFA_STATUS_OK) {
969 bfa_trc(port->fcs, req_status);
970 port->stats.ns_rffid_rsp_err++;
971 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
972 return;
973 }
974
975 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
976 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
977
978 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
979 port->stats.ns_rffid_accepts++;
980 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
981 return;
982 }
983
984 port->stats.ns_rffid_rejects++;
985 bfa_trc(port->fcs, cthdr->reason_code);
986 bfa_trc(port->fcs, cthdr->exp_code);
987
988 if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
989 /*
990 * if this command is not supported, we don't retry
991 */
992 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
993 } else {
994 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
995 }
996}
997
998/**
999 * Query Fabric for FC4-Types Devices.
1000 *
1001* TBD : Need to use a local (FCS private) response buffer, since the response
1002 * can be larger than 2K.
1003 */
1004static void
1005bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1006{
1007 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
1008 struct bfa_fcs_port_s *port = ns->port;
1009 struct fchs_s fchs;
1010 int len;
1011 struct bfa_fcxp_s *fcxp;
1012
1013 bfa_trc(port->fcs, port->pid);
1014
1015 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1016 if (!fcxp) {
1017 port->stats.ns_gidft_alloc_wait++;
1018 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
1019 bfa_fcs_port_ns_send_gid_ft, ns);
1020 return;
1021 }
1022 ns->fcxp = fcxp;
1023
1024 /*
1025 * This query is only initiated for FCP initiator mode.
1026 */
1027 len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid,
1028 FC_TYPE_FCP);
1029
1030 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1031 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response,
1032 (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa),
1033 FC_RA_TOV);
1034
1035 port->stats.ns_gidft_sent++;
1036
1037 bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
1038}
1039
1040static void
1041bfa_fcs_port_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1042 void *cbarg, bfa_status_t req_status,
1043 u32 rsp_len, u32 resid_len,
1044 struct fchs_s *rsp_fchs)
1045{
1046 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
1047 struct bfa_fcs_port_s *port = ns->port;
1048 struct ct_hdr_s *cthdr = NULL;
1049 u32 n_pids;
1050
1051 bfa_trc(port->fcs, port->port_cfg.pwwn);
1052
1053 /*
1054 * Sanity Checks
1055 */
1056 if (req_status != BFA_STATUS_OK) {
1057 bfa_trc(port->fcs, req_status);
1058 port->stats.ns_gidft_rsp_err++;
1059 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1060 return;
1061 }
1062
1063 if (resid_len != 0) {
1064 /*
1065 * TBD : we will need to allocate a larger buffer & retry the
1066 * command
1067 */
1068 bfa_trc(port->fcs, rsp_len);
1069 bfa_trc(port->fcs, resid_len);
1070 return;
1071 }
1072
1073 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1074 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1075
1076 switch (cthdr->cmd_rsp_code) {
1077
1078 case CT_RSP_ACCEPT:
1079
1080 port->stats.ns_gidft_accepts++;
1081 n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
1082 bfa_trc(port->fcs, n_pids);
1083 bfa_fcs_port_ns_process_gidft_pids(port,
1084 (u32 *) (cthdr + 1),
1085 n_pids);
1086 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1087 break;
1088
1089 case CT_RSP_REJECT:
1090
1091 /*
1092 * Check the reason code & explanation.
1093 * There may not have been any FC4 devices in the fabric
1094 */
1095 port->stats.ns_gidft_rejects++;
1096 bfa_trc(port->fcs, cthdr->reason_code);
1097 bfa_trc(port->fcs, cthdr->exp_code);
1098
1099 if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
1100 && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
1101
1102 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1103 } else {
1104 /*
1105 * for all other errors, retry
1106 */
1107 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1108 }
1109 break;
1110
1111 default:
1112 port->stats.ns_gidft_unknown_rsp++;
1113 bfa_trc(port->fcs, cthdr->cmd_rsp_code);
1114 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1115 }
1116}
1117
1118/**
1119 * This routine will be called by bfa_timer on timer timeouts.
1120 *
1121 * param[in] port - pointer to bfa_fcs_port_t.
1122 *
1123 * return
1124 * void
1125 *
1126* Special Considerations:
1127 *
1128 * note
1129 */
1130static void
1131bfa_fcs_port_ns_timeout(void *arg)
1132{
1133 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)arg;
1134
1135 ns->port->stats.ns_timeouts++;
1136 bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
1137}
1138
1139/*
1140 * Process the PID list in GID_FT response
1141 */
1142static void
1143bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
1144 u32 *pid_buf, u32 n_pids)
1145{
1146 struct fcgs_gidft_resp_s *gidft_entry;
1147 struct bfa_fcs_rport_s *rport;
1148 u32 ii;
1149
1150 for (ii = 0; ii < n_pids; ii++) {
1151 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
1152
1153 if (gidft_entry->pid == port->pid)
1154 continue;
1155
1156 /*
1157 * Check if this rport already exists
1158 */
1159 rport = bfa_fcs_port_get_rport_by_pid(port, gidft_entry->pid);
1160 if (rport == NULL) {
1161 /*
1162 * this is a new device. create rport
1163 */
1164 rport = bfa_fcs_rport_create(port, gidft_entry->pid);
1165 } else {
1166 /*
1167 * this rport already exists
1168 */
1169 bfa_fcs_rport_scn(rport);
1170 }
1171
1172 bfa_trc(port->fcs, gidft_entry->pid);
1173
1174 /*
1175 * if the last entry bit is set, bail out.
1176 */
1177 if (gidft_entry->last)
1178 return;
1179 }
1180}
1181
1182/**
1183 * fcs_ns_public FCS nameserver public interfaces
1184 */
1185
1186/*
1187 * Functions called by port/fab.
1188 * These will send relevant Events to the ns state machine.
1189 */
1190void
1191bfa_fcs_port_ns_init(struct bfa_fcs_port_s *port)
1192{
1193 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1194
1195 ns->port = port;
1196 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
1197}
1198
1199void
1200bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *port)
1201{
1202 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1203
1204 ns->port = port;
1205 bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
1206}
1207
1208void
1209bfa_fcs_port_ns_online(struct bfa_fcs_port_s *port)
1210{
1211 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1212
1213 ns->port = port;
1214 bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
1215}
1216
1217void
1218bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port)
1219{
1220 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1221
1222 bfa_trc(port->fcs, port->pid);
1223 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
1224}
1225
1226static void
1227bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port)
1228{
1229
1230 struct bfa_fcs_rport_s *rport;
1231 u8 nwwns;
1232 wwn_t *wwns;
1233 int ii;
1234
1235 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, &wwns);
1236
1237 for (ii = 0; ii < nwwns; ++ii) {
1238 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
1239 bfa_assert(rport);
1240 }
1241}
1242
1243
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c
new file mode 100644
index 000000000000..86af818d17bb
--- /dev/null
+++ b/drivers/scsi/bfa/plog.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_os_inc.h>
19#include <cs/bfa_plog.h>
20#include <cs/bfa_debug.h>
21
22static int
23plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
24{
25 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
26 && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
27 return 1;
28
29 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
30 && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
31 return 1;
32
33 return 0;
34}
35
36static void
37bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
38{
39 u16 tail;
40 struct bfa_plog_rec_s *pl_recp;
41
42 if (plog->plog_enabled == 0)
43 return;
44
45 if (plkd_validate_logrec(pl_rec)) {
46 bfa_assert(0);
47 return;
48 }
49
50 tail = plog->tail;
51
52 pl_recp = &(plog->plog_recs[tail]);
53
54 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
55
56 pl_recp->tv = BFA_TRC_TS(plog);
57 BFA_PL_LOG_REC_INCR(plog->tail);
58
59 if (plog->head == plog->tail)
60 BFA_PL_LOG_REC_INCR(plog->head);
61}
62
63void
64bfa_plog_init(struct bfa_plog_s *plog)
65{
66 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
67
68 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
69 plog->head = plog->tail = 0;
70 plog->plog_enabled = 1;
71}
72
73void
74bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
75 enum bfa_plog_eid event,
76 u16 misc, char *log_str)
77{
78 struct bfa_plog_rec_s lp;
79
80 if (plog->plog_enabled) {
81 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
82 lp.mid = mid;
83 lp.eid = event;
84 lp.log_type = BFA_PL_LOG_TYPE_STRING;
85 lp.misc = misc;
86 strncpy(lp.log_entry.string_log, log_str,
87 BFA_PL_STRING_LOG_SZ - 1);
88 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
89 bfa_plog_add(plog, &lp);
90 }
91}
92
93void
94bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
95 enum bfa_plog_eid event,
96 u16 misc, u32 *intarr, u32 num_ints)
97{
98 struct bfa_plog_rec_s lp;
99 u32 i;
100
101 if (num_ints > BFA_PL_INT_LOG_SZ)
102 num_ints = BFA_PL_INT_LOG_SZ;
103
104 if (plog->plog_enabled) {
105 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
106 lp.mid = mid;
107 lp.eid = event;
108 lp.log_type = BFA_PL_LOG_TYPE_INT;
109 lp.misc = misc;
110
111 for (i = 0; i < num_ints; i++)
112 bfa_os_assign(lp.log_entry.int_log[i],
113 intarr[i]);
114
115 lp.log_num_ints = (u8) num_ints;
116
117 bfa_plog_add(plog, &lp);
118 }
119}
120
121void
122bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
123 enum bfa_plog_eid event,
124 u16 misc, struct fchs_s *fchdr)
125{
126 struct bfa_plog_rec_s lp;
127 u32 *tmp_int = (u32 *) fchdr;
128 u32 ints[BFA_PL_INT_LOG_SZ];
129
130 if (plog->plog_enabled) {
131 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
132
133 ints[0] = tmp_int[0];
134 ints[1] = tmp_int[1];
135 ints[2] = tmp_int[4];
136
137 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
138 }
139}
140
141void
142bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
143 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
144 u32 pld_w0)
145{
146 struct bfa_plog_rec_s lp;
147 u32 *tmp_int = (u32 *) fchdr;
148 u32 ints[BFA_PL_INT_LOG_SZ];
149
150 if (plog->plog_enabled) {
151 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
152
153 ints[0] = tmp_int[0];
154 ints[1] = tmp_int[1];
155 ints[2] = tmp_int[4];
156 ints[3] = pld_w0;
157
158 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
159 }
160}
161
162void
163bfa_plog_clear(struct bfa_plog_s *plog)
164{
165 plog->head = plog->tail = 0;
166}
167
168void
169bfa_plog_enable(struct bfa_plog_s *plog)
170{
171 plog->plog_enabled = 1;
172}
173
174void
175bfa_plog_disable(struct bfa_plog_s *plog)
176{
177 plog->plog_enabled = 0;
178}
179
180bfa_boolean_t
181bfa_plog_get_setting(struct bfa_plog_s *plog)
182{
183 return((bfa_boolean_t)plog->plog_enabled);
184}
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
new file mode 100644
index 000000000000..9cf58bb138dc
--- /dev/null
+++ b/drivers/scsi/bfa/rport.c
@@ -0,0 +1,2618 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * rport.c Remote port implementation.
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include "fcbuild.h"
25#include "fcs_vport.h"
26#include "fcs_lport.h"
27#include "fcs_rport.h"
28#include "fcs_fcpim.h"
29#include "fcs_fcptm.h"
30#include "fcs_trcmod.h"
31#include "fcs_fcxp.h"
32#include "fcs.h"
33#include <fcb/bfa_fcb_rport.h>
34#include <aen/bfa_aen_rport.h>
35
36BFA_TRC_FILE(FCS, RPORT);
37
38#define BFA_FCS_RPORT_MAX_RETRIES (5)
39
40/* In millisecs */
41static u32 bfa_fcs_rport_del_timeout =
42 BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
43
44/*
45 * forward declarations
46 */
47static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port,
48 wwn_t pwwn, u32 rpid);
49static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
50static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
51static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
52static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
53static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
54 struct fc_logi_s *plogi);
55static void bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport);
56static void bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport);
57static void bfa_fcs_rport_timeout(void *arg);
58static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
59 struct bfa_fcxp_s *fcxp_alloced);
60static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
61 struct bfa_fcxp_s *fcxp_alloced);
62static void bfa_fcs_rport_plogi_response(void *fcsarg,
63 struct bfa_fcxp_s *fcxp,
64 void *cbarg,
65 bfa_status_t req_status,
66 u32 rsp_len,
67 u32 resid_len,
68 struct fchs_s *rsp_fchs);
69static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
70 struct bfa_fcxp_s *fcxp_alloced);
71static void bfa_fcs_rport_adisc_response(void *fcsarg,
72 struct bfa_fcxp_s *fcxp,
73 void *cbarg,
74 bfa_status_t req_status,
75 u32 rsp_len,
76 u32 resid_len,
77 struct fchs_s *rsp_fchs);
78static void bfa_fcs_rport_send_gidpn(void *rport_cbarg,
79 struct bfa_fcxp_s *fcxp_alloced);
80static void bfa_fcs_rport_gidpn_response(void *fcsarg,
81 struct bfa_fcxp_s *fcxp,
82 void *cbarg,
83 bfa_status_t req_status,
84 u32 rsp_len,
85 u32 resid_len,
86 struct fchs_s *rsp_fchs);
87static void bfa_fcs_rport_send_logo(void *rport_cbarg,
88 struct bfa_fcxp_s *fcxp_alloced);
89static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
90static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
91 struct fchs_s *rx_fchs, u16 len);
92static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
93 struct fchs_s *rx_fchs, u8 reason_code,
94 u8 reason_code_expl);
95static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
96 struct fchs_s *rx_fchs, u16 len);
97/**
98 * fcs_rport_sm FCS rport state machine events
99 */
100
101enum rport_event {
102 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
103 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
104 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
105 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
106 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
107 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
108 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
109 RPSM_EVENT_SCN = 8, /* state change notification */
110 RPSM_EVENT_ACCEPTED = 9,/* Good response from remote device */
111 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
112 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
113 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
114 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
115 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
116 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
117 RPSM_EVENT_ADDRESS_DISC = 16 /* Need to Discover rport's PID */
118};
119
120static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
121 enum rport_event event);
122static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
123 enum rport_event event);
124static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
125 enum rport_event event);
126static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
127 enum rport_event event);
128static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
129 enum rport_event event);
130static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
131 enum rport_event event);
132static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
133 enum rport_event event);
134static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
135 enum rport_event event);
136static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
137 enum rport_event event);
138static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
139 enum rport_event event);
140static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport,
141 enum rport_event event);
142static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
143 enum rport_event event);
144static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
145 enum rport_event event);
146static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
147 enum rport_event event);
148static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
149 enum rport_event event);
150static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
151 enum rport_event event);
152static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
153 enum rport_event event);
154static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
155 enum rport_event event);
156static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
157 enum rport_event event);
158static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
159 enum rport_event event);
160static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
161 enum rport_event event);
162static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
163 enum rport_event event);
164static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
165 enum rport_event event);
166
167static struct bfa_sm_table_s rport_sm_table[] = {
168 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
169 {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
170 {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
171 {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
172 {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
173 {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
174 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
175 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
176 {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
177 {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC},
178 {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC},
179 {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
180 {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
181 {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
182 {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE},
183 {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV},
184 {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO},
185 {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO},
186 {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE},
187 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC},
188 {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC},
189 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
190};
191
192/**
193 * Beginning state.
194 */
195static void
196bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
197{
198 bfa_trc(rport->fcs, rport->pwwn);
199 bfa_trc(rport->fcs, rport->pid);
200 bfa_trc(rport->fcs, event);
201
202 switch (event) {
203 case RPSM_EVENT_PLOGI_SEND:
204 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
205 rport->plogi_retries = 0;
206 bfa_fcs_rport_send_plogi(rport, NULL);
207 break;
208
209 case RPSM_EVENT_PLOGI_RCVD:
210 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
211 bfa_fcs_rport_send_plogiacc(rport, NULL);
212 break;
213
214 case RPSM_EVENT_PLOGI_COMP:
215 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
216 bfa_fcs_rport_hal_online(rport);
217 break;
218
219 case RPSM_EVENT_ADDRESS_CHANGE:
220 case RPSM_EVENT_ADDRESS_DISC:
221 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
222 rport->ns_retries = 0;
223 bfa_fcs_rport_send_gidpn(rport, NULL);
224 break;
225
226 default:
227 bfa_assert(0);
228 }
229}
230
231/**
232 * PLOGI is being sent.
233 */
234static void
235bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
236 enum rport_event event)
237{
238 bfa_trc(rport->fcs, rport->pwwn);
239 bfa_trc(rport->fcs, rport->pid);
240 bfa_trc(rport->fcs, event);
241
242 switch (event) {
243 case RPSM_EVENT_FCXP_SENT:
244 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi);
245 break;
246
247 case RPSM_EVENT_DELETE:
248 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
249 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
250 bfa_fcs_rport_free(rport);
251 break;
252
253 case RPSM_EVENT_PLOGI_RCVD:
254 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
255 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
256 bfa_fcs_rport_send_plogiacc(rport, NULL);
257 break;
258
259 case RPSM_EVENT_ADDRESS_CHANGE:
260 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
261 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
262 rport->ns_retries = 0;
263 bfa_fcs_rport_send_gidpn(rport, NULL);
264 break;
265
266 case RPSM_EVENT_LOGO_IMP:
267 rport->pid = 0;
268 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
269 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
270 bfa_timer_start(rport->fcs->bfa, &rport->timer,
271 bfa_fcs_rport_timeout, rport,
272 bfa_fcs_rport_del_timeout);
273 break;
274
275 case RPSM_EVENT_SCN:
276 break;
277
278 default:
279 bfa_assert(0);
280 }
281}
282
283/**
284 * PLOGI is being sent.
285 */
286static void
287bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
288 enum rport_event event)
289{
290 bfa_trc(rport->fcs, rport->pwwn);
291 bfa_trc(rport->fcs, rport->pid);
292 bfa_trc(rport->fcs, event);
293
294 switch (event) {
295 case RPSM_EVENT_FCXP_SENT:
296 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
297 bfa_fcs_rport_hal_online(rport);
298 break;
299
300 case RPSM_EVENT_DELETE:
301 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
302 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
303 bfa_fcs_rport_free(rport);
304 break;
305
306 case RPSM_EVENT_SCN:
307 /**
308 * Ignore, SCN is possibly online notification.
309 */
310 break;
311
312 case RPSM_EVENT_ADDRESS_CHANGE:
313 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
314 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
315 rport->ns_retries = 0;
316 bfa_fcs_rport_send_gidpn(rport, NULL);
317 break;
318
319 case RPSM_EVENT_LOGO_IMP:
320 rport->pid = 0;
321 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
322 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
323 bfa_timer_start(rport->fcs->bfa, &rport->timer,
324 bfa_fcs_rport_timeout, rport,
325 bfa_fcs_rport_del_timeout);
326 break;
327
328 case RPSM_EVENT_HCB_OFFLINE:
329 /**
330 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
331 */
332 break;
333
334 default:
335 bfa_assert(0);
336 }
337}
338
339/**
340 * PLOGI is sent.
341 */
342static void
343bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
344 enum rport_event event)
345{
346 bfa_trc(rport->fcs, rport->pwwn);
347 bfa_trc(rport->fcs, rport->pid);
348 bfa_trc(rport->fcs, event);
349
350 switch (event) {
351 case RPSM_EVENT_SCN:
352 bfa_timer_stop(&rport->timer);
353 /*
354 * !! fall through !!
355 */
356
357 case RPSM_EVENT_TIMEOUT:
358 rport->plogi_retries++;
359 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
360 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
361 bfa_fcs_rport_send_plogi(rport, NULL);
362 } else {
363 rport->pid = 0;
364 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
365 bfa_timer_start(rport->fcs->bfa, &rport->timer,
366 bfa_fcs_rport_timeout, rport,
367 bfa_fcs_rport_del_timeout);
368 }
369 break;
370
371 case RPSM_EVENT_DELETE:
372 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
373 bfa_timer_stop(&rport->timer);
374 bfa_fcs_rport_free(rport);
375 break;
376
377 case RPSM_EVENT_LOGO_RCVD:
378 break;
379
380 case RPSM_EVENT_PLOGI_RCVD:
381 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
382 bfa_timer_stop(&rport->timer);
383 bfa_fcs_rport_send_plogiacc(rport, NULL);
384 break;
385
386 case RPSM_EVENT_ADDRESS_CHANGE:
387 bfa_timer_stop(&rport->timer);
388 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
389 rport->ns_retries = 0;
390 bfa_fcs_rport_send_gidpn(rport, NULL);
391 break;
392
393 case RPSM_EVENT_LOGO_IMP:
394 rport->pid = 0;
395 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
396 bfa_timer_stop(&rport->timer);
397 bfa_timer_start(rport->fcs->bfa, &rport->timer,
398 bfa_fcs_rport_timeout, rport,
399 bfa_fcs_rport_del_timeout);
400 break;
401
402 case RPSM_EVENT_PLOGI_COMP:
403 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
404 bfa_timer_stop(&rport->timer);
405 bfa_fcs_rport_hal_online(rport);
406 break;
407
408 default:
409 bfa_assert(0);
410 }
411}
412
413/**
414 * PLOGI is sent.
415 */
416static void
417bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
418{
419 bfa_trc(rport->fcs, rport->pwwn);
420 bfa_trc(rport->fcs, rport->pid);
421 bfa_trc(rport->fcs, event);
422
423 switch (event) {
424 case RPSM_EVENT_ACCEPTED:
425 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
426 rport->plogi_retries = 0;
427 bfa_fcs_rport_hal_online(rport);
428 break;
429
430 case RPSM_EVENT_LOGO_RCVD:
431 bfa_fcs_rport_send_logo_acc(rport);
432 bfa_fcxp_discard(rport->fcxp);
433 /*
434 * !! fall through !!
435 */
436 case RPSM_EVENT_FAILED:
437 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
438 bfa_timer_start(rport->fcs->bfa, &rport->timer,
439 bfa_fcs_rport_timeout, rport,
440 BFA_FCS_RETRY_TIMEOUT);
441 break;
442
443 case RPSM_EVENT_LOGO_IMP:
444 rport->pid = 0;
445 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
446 bfa_fcxp_discard(rport->fcxp);
447 bfa_timer_start(rport->fcs->bfa, &rport->timer,
448 bfa_fcs_rport_timeout, rport,
449 bfa_fcs_rport_del_timeout);
450 break;
451
452 case RPSM_EVENT_ADDRESS_CHANGE:
453 bfa_fcxp_discard(rport->fcxp);
454 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
455 rport->ns_retries = 0;
456 bfa_fcs_rport_send_gidpn(rport, NULL);
457 break;
458
459 case RPSM_EVENT_PLOGI_RCVD:
460 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
461 bfa_fcxp_discard(rport->fcxp);
462 bfa_fcs_rport_send_plogiacc(rport, NULL);
463 break;
464
465 case RPSM_EVENT_SCN:
466 /**
467 * Ignore SCN - wait for PLOGI response.
468 */
469 break;
470
471 case RPSM_EVENT_DELETE:
472 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
473 bfa_fcxp_discard(rport->fcxp);
474 bfa_fcs_rport_free(rport);
475 break;
476
477 case RPSM_EVENT_PLOGI_COMP:
478 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
479 bfa_fcxp_discard(rport->fcxp);
480 bfa_fcs_rport_hal_online(rport);
481 break;
482
483 default:
484 bfa_assert(0);
485 }
486}
487
488/**
489 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
490 * are offline.
491 */
492static void
493bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
494 enum rport_event event)
495{
496 bfa_trc(rport->fcs, rport->pwwn);
497 bfa_trc(rport->fcs, rport->pid);
498 bfa_trc(rport->fcs, event);
499
500 switch (event) {
501 case RPSM_EVENT_HCB_ONLINE:
502 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
503 bfa_fcs_rport_online_action(rport);
504 break;
505
506 case RPSM_EVENT_LOGO_RCVD:
507 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
508 bfa_rport_offline(rport->bfa_rport);
509 break;
510
511 case RPSM_EVENT_LOGO_IMP:
512 case RPSM_EVENT_ADDRESS_CHANGE:
513 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
514 bfa_rport_offline(rport->bfa_rport);
515 break;
516
517 case RPSM_EVENT_PLOGI_RCVD:
518 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
519 bfa_rport_offline(rport->bfa_rport);
520 bfa_fcs_rport_send_plogiacc(rport, NULL);
521 break;
522
523 case RPSM_EVENT_DELETE:
524 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
525 bfa_rport_offline(rport->bfa_rport);
526 break;
527
528 case RPSM_EVENT_SCN:
529 /**
530 * @todo
531 * Ignore SCN - PLOGI just completed, FC-4 login should detect
532 * device failures.
533 */
534 break;
535
536 default:
537 bfa_assert(0);
538 }
539}
540
541/**
542 * Rport is ONLINE. FC-4s active.
543 */
544static void
545bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
546{
547 bfa_trc(rport->fcs, rport->pwwn);
548 bfa_trc(rport->fcs, rport->pid);
549 bfa_trc(rport->fcs, event);
550
551 switch (event) {
552 case RPSM_EVENT_SCN:
553 /**
554 * Pause FC-4 activity till rport is authenticated.
555 * In switched fabrics, check presence of device in nameserver
556 * first.
557 */
558 bfa_fcs_rport_fc4_pause(rport);
559
560 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
561 bfa_sm_set_state(rport,
562 bfa_fcs_rport_sm_nsquery_sending);
563 rport->ns_retries = 0;
564 bfa_fcs_rport_send_gidpn(rport, NULL);
565 } else {
566 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
567 bfa_fcs_rport_send_adisc(rport, NULL);
568 }
569 break;
570
571 case RPSM_EVENT_PLOGI_RCVD:
572 case RPSM_EVENT_LOGO_IMP:
573 case RPSM_EVENT_ADDRESS_CHANGE:
574 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
575 bfa_fcs_rport_offline_action(rport);
576 break;
577
578 case RPSM_EVENT_DELETE:
579 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
580 bfa_fcs_rport_offline_action(rport);
581 break;
582
583 case RPSM_EVENT_LOGO_RCVD:
584 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
585 bfa_fcs_rport_offline_action(rport);
586 break;
587
588 case RPSM_EVENT_PLOGI_COMP:
589 break;
590
591 default:
592 bfa_assert(0);
593 }
594}
595
596/**
597 * An SCN event is received in ONLINE state. NS query is being sent
598 * prior to ADISC authentication with rport. FC-4s are paused.
599 */
600static void
601bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
602 enum rport_event event)
603{
604 bfa_trc(rport->fcs, rport->pwwn);
605 bfa_trc(rport->fcs, rport->pid);
606 bfa_trc(rport->fcs, event);
607
608 switch (event) {
609 case RPSM_EVENT_FCXP_SENT:
610 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery);
611 break;
612
613 case RPSM_EVENT_DELETE:
614 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
615 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
616 bfa_fcs_rport_offline_action(rport);
617 break;
618
619 case RPSM_EVENT_SCN:
620 /**
621 * ignore SCN, wait for response to query itself
622 */
623 break;
624
625 case RPSM_EVENT_LOGO_RCVD:
626 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
627 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
628 bfa_fcs_rport_offline_action(rport);
629 break;
630
631 case RPSM_EVENT_LOGO_IMP:
632 rport->pid = 0;
633 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
634 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
635 bfa_timer_start(rport->fcs->bfa, &rport->timer,
636 bfa_fcs_rport_timeout, rport,
637 bfa_fcs_rport_del_timeout);
638 break;
639
640 case RPSM_EVENT_PLOGI_RCVD:
641 case RPSM_EVENT_ADDRESS_CHANGE:
642 case RPSM_EVENT_PLOGI_COMP:
643 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
644 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
645 bfa_fcs_rport_offline_action(rport);
646 break;
647
648 default:
649 bfa_assert(0);
650 }
651}
652
653/**
654 * An SCN event is received in ONLINE state. NS query is sent to rport.
655 * FC-4s are paused.
656 */
657static void
658bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
659{
660 bfa_trc(rport->fcs, rport->pwwn);
661 bfa_trc(rport->fcs, rport->pid);
662 bfa_trc(rport->fcs, event);
663
664 switch (event) {
665 case RPSM_EVENT_ACCEPTED:
666 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
667 bfa_fcs_rport_send_adisc(rport, NULL);
668 break;
669
670 case RPSM_EVENT_FAILED:
671 rport->ns_retries++;
672 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
673 bfa_sm_set_state(rport,
674 bfa_fcs_rport_sm_nsquery_sending);
675 bfa_fcs_rport_send_gidpn(rport, NULL);
676 } else {
677 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
678 bfa_fcs_rport_offline_action(rport);
679 }
680 break;
681
682 case RPSM_EVENT_DELETE:
683 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
684 bfa_fcxp_discard(rport->fcxp);
685 bfa_fcs_rport_offline_action(rport);
686 break;
687
688 case RPSM_EVENT_SCN:
689 break;
690
691 case RPSM_EVENT_LOGO_RCVD:
692 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
693 bfa_fcxp_discard(rport->fcxp);
694 bfa_fcs_rport_offline_action(rport);
695 break;
696
697 case RPSM_EVENT_PLOGI_COMP:
698 case RPSM_EVENT_ADDRESS_CHANGE:
699 case RPSM_EVENT_PLOGI_RCVD:
700 case RPSM_EVENT_LOGO_IMP:
701 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
702 bfa_fcxp_discard(rport->fcxp);
703 bfa_fcs_rport_offline_action(rport);
704 break;
705
706 default:
707 bfa_assert(0);
708 }
709}
710
711/**
712 * An SCN event is received in ONLINE state. ADISC is being sent for
713 * authenticating with rport. FC-4s are paused.
714 */
715static void
716bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
717 enum rport_event event)
718{
719 bfa_trc(rport->fcs, rport->pwwn);
720 bfa_trc(rport->fcs, rport->pid);
721 bfa_trc(rport->fcs, event);
722
723 switch (event) {
724 case RPSM_EVENT_FCXP_SENT:
725 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc);
726 break;
727
728 case RPSM_EVENT_DELETE:
729 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
730 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
731 bfa_fcs_rport_offline_action(rport);
732 break;
733
734 case RPSM_EVENT_LOGO_IMP:
735 case RPSM_EVENT_ADDRESS_CHANGE:
736 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
737 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
738 bfa_fcs_rport_offline_action(rport);
739 break;
740
741 case RPSM_EVENT_LOGO_RCVD:
742 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
743 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
744 bfa_fcs_rport_offline_action(rport);
745 break;
746
747 case RPSM_EVENT_SCN:
748 break;
749
750 case RPSM_EVENT_PLOGI_RCVD:
751 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
752 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
753 bfa_fcs_rport_offline_action(rport);
754 break;
755
756 default:
757 bfa_assert(0);
758 }
759}
760
761/**
762 * An SCN event is received in ONLINE state. ADISC is to rport.
763 * FC-4s are paused.
764 */
765static void
766bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
767{
768 bfa_trc(rport->fcs, rport->pwwn);
769 bfa_trc(rport->fcs, rport->pid);
770 bfa_trc(rport->fcs, event);
771
772 switch (event) {
773 case RPSM_EVENT_ACCEPTED:
774 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
775 bfa_fcs_rport_fc4_resume(rport);
776 break;
777
778 case RPSM_EVENT_PLOGI_RCVD:
779 /**
780 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
781 * At least go offline when a PLOGI is received.
782 */
783 bfa_fcxp_discard(rport->fcxp);
784 /*
785 * !!! fall through !!!
786 */
787
788 case RPSM_EVENT_FAILED:
789 case RPSM_EVENT_ADDRESS_CHANGE:
790 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
791 bfa_fcs_rport_offline_action(rport);
792 break;
793
794 case RPSM_EVENT_DELETE:
795 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
796 bfa_fcxp_discard(rport->fcxp);
797 bfa_fcs_rport_offline_action(rport);
798 break;
799
800 case RPSM_EVENT_SCN:
801 /**
802 * already processing RSCN
803 */
804 break;
805
806 case RPSM_EVENT_LOGO_IMP:
807 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
808 bfa_fcxp_discard(rport->fcxp);
809 bfa_fcs_rport_offline_action(rport);
810 break;
811
812 case RPSM_EVENT_LOGO_RCVD:
813 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
814 bfa_fcxp_discard(rport->fcxp);
815 bfa_fcs_rport_offline_action(rport);
816 break;
817
818 default:
819 bfa_assert(0);
820 }
821}
822
823/**
824 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
825 */
826static void
827bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
828 enum rport_event event)
829{
830 bfa_trc(rport->fcs, rport->pwwn);
831 bfa_trc(rport->fcs, rport->pid);
832 bfa_trc(rport->fcs, event);
833
834 switch (event) {
835 case RPSM_EVENT_FC4_OFFLINE:
836 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
837 bfa_rport_offline(rport->bfa_rport);
838 break;
839
840 case RPSM_EVENT_DELETE:
841 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
842 break;
843
844 case RPSM_EVENT_LOGO_RCVD:
845 case RPSM_EVENT_ADDRESS_CHANGE:
846 break;
847
848 default:
849 bfa_assert(0);
850 }
851}
852
853/**
854 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
855 * callback.
856 */
857static void
858bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
859 enum rport_event event)
860{
861 bfa_trc(rport->fcs, rport->pwwn);
862 bfa_trc(rport->fcs, rport->pid);
863 bfa_trc(rport->fcs, event);
864
865 switch (event) {
866 case RPSM_EVENT_FC4_OFFLINE:
867 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
868 bfa_rport_offline(rport->bfa_rport);
869 break;
870
871 default:
872 bfa_assert(0);
873 }
874}
875
876/**
877 * Rport is going offline. Awaiting FC-4 offline completion callback.
878 */
879static void
880bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
881 enum rport_event event)
882{
883 bfa_trc(rport->fcs, rport->pwwn);
884 bfa_trc(rport->fcs, rport->pid);
885 bfa_trc(rport->fcs, event);
886
887 switch (event) {
888 case RPSM_EVENT_FC4_OFFLINE:
889 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
890 bfa_rport_offline(rport->bfa_rport);
891 break;
892
893 case RPSM_EVENT_SCN:
894 case RPSM_EVENT_LOGO_IMP:
895 case RPSM_EVENT_LOGO_RCVD:
896 case RPSM_EVENT_ADDRESS_CHANGE:
897 /**
898 * rport is already going offline.
899 * SCN - ignore and wait till transitioning to offline state
900 */
901 break;
902
903 case RPSM_EVENT_DELETE:
904 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
905 break;
906
907 default:
908 bfa_assert(0);
909 }
910}
911
912/**
913 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
914 * callback.
915 */
916static void
917bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
918 enum rport_event event)
919{
920 bfa_trc(rport->fcs, rport->pwwn);
921 bfa_trc(rport->fcs, rport->pid);
922 bfa_trc(rport->fcs, event);
923
924 switch (event) {
925 case RPSM_EVENT_HCB_OFFLINE:
926 case RPSM_EVENT_ADDRESS_CHANGE:
927 if (bfa_fcs_port_is_online(rport->port)) {
928 bfa_sm_set_state(rport,
929 bfa_fcs_rport_sm_nsdisc_sending);
930 rport->ns_retries = 0;
931 bfa_fcs_rport_send_gidpn(rport, NULL);
932 } else {
933 rport->pid = 0;
934 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
935 bfa_timer_start(rport->fcs->bfa, &rport->timer,
936 bfa_fcs_rport_timeout, rport,
937 bfa_fcs_rport_del_timeout);
938 }
939 break;
940
941 case RPSM_EVENT_DELETE:
942 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
943 bfa_fcs_rport_free(rport);
944 break;
945
946 case RPSM_EVENT_SCN:
947 case RPSM_EVENT_LOGO_RCVD:
948 /**
949 * Ignore, already offline.
950 */
951 break;
952
953 default:
954 bfa_assert(0);
955 }
956}
957
958/**
959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
960 * callback to send LOGO accept.
961 */
962static void
963bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
964 enum rport_event event)
965{
966 bfa_trc(rport->fcs, rport->pwwn);
967 bfa_trc(rport->fcs, rport->pid);
968 bfa_trc(rport->fcs, event);
969
970 switch (event) {
971 case RPSM_EVENT_HCB_OFFLINE:
972 case RPSM_EVENT_ADDRESS_CHANGE:
973 if (rport->pid)
974 bfa_fcs_rport_send_logo_acc(rport);
975 /*
976 * If the lport is online and if the rport is not a well known
977 * address port, we try to re-discover the r-port.
978 */
979 if (bfa_fcs_port_is_online(rport->port)
980 && (!BFA_FCS_PID_IS_WKA(rport->pid))) {
981 bfa_sm_set_state(rport,
982 bfa_fcs_rport_sm_nsdisc_sending);
983 rport->ns_retries = 0;
984 bfa_fcs_rport_send_gidpn(rport, NULL);
985 } else {
986 /*
987 * if it is not a well known address, reset the pid to
988 *
989 */
990 if (!BFA_FCS_PID_IS_WKA(rport->pid))
991 rport->pid = 0;
992 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
993 bfa_timer_start(rport->fcs->bfa, &rport->timer,
994 bfa_fcs_rport_timeout, rport,
995 bfa_fcs_rport_del_timeout);
996 }
997 break;
998
999 case RPSM_EVENT_DELETE:
1000 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
1001 break;
1002
1003 case RPSM_EVENT_LOGO_IMP:
1004 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
1005 break;
1006
1007 case RPSM_EVENT_LOGO_RCVD:
1008 /**
1009 * Ignore - already processing a LOGO.
1010 */
1011 break;
1012
1013 default:
1014 bfa_assert(0);
1015 }
1016}
1017
1018/**
1019 * Rport is being deleted. FC-4s are offline. Awaiting BFA rport offline
1020 * callback to send LOGO.
1021 */
1022static void
1023bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1024 enum rport_event event)
1025{
1026 bfa_trc(rport->fcs, rport->pwwn);
1027 bfa_trc(rport->fcs, rport->pid);
1028 bfa_trc(rport->fcs, event);
1029
1030 switch (event) {
1031 case RPSM_EVENT_HCB_OFFLINE:
1032 bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending);
1033 bfa_fcs_rport_send_logo(rport, NULL);
1034 break;
1035
1036 case RPSM_EVENT_LOGO_RCVD:
1037 case RPSM_EVENT_ADDRESS_CHANGE:
1038 break;
1039
1040 default:
1041 bfa_assert(0);
1042 }
1043}
1044
1045/**
1046 * Rport is being deleted. FC-4s are offline. LOGO is being sent.
1047 */
1048static void
1049bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1050 enum rport_event event)
1051{
1052 bfa_trc(rport->fcs, rport->pwwn);
1053 bfa_trc(rport->fcs, rport->pid);
1054 bfa_trc(rport->fcs, event);
1055
1056 switch (event) {
1057 case RPSM_EVENT_FCXP_SENT:
1058 /*
1059 * Once LOGO is sent, we donot wait for the response
1060 */
1061 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1062 bfa_fcs_rport_free(rport);
1063 break;
1064
1065 case RPSM_EVENT_SCN:
1066 case RPSM_EVENT_ADDRESS_CHANGE:
1067 break;
1068
1069 case RPSM_EVENT_LOGO_RCVD:
1070 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1071 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1072 bfa_fcs_rport_free(rport);
1073 break;
1074
1075 default:
1076 bfa_assert(0);
1077 }
1078}
1079
1080/**
1081 * Rport is offline. FC-4s are offline. BFA rport is offline.
1082 * Timer active to delete stale rport.
1083 */
1084static void
1085bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1086{
1087 bfa_trc(rport->fcs, rport->pwwn);
1088 bfa_trc(rport->fcs, rport->pid);
1089 bfa_trc(rport->fcs, event);
1090
1091 switch (event) {
1092 case RPSM_EVENT_TIMEOUT:
1093 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1094 bfa_fcs_rport_free(rport);
1095 break;
1096
1097 case RPSM_EVENT_SCN:
1098 case RPSM_EVENT_ADDRESS_CHANGE:
1099 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1100 bfa_timer_stop(&rport->timer);
1101 rport->ns_retries = 0;
1102 bfa_fcs_rport_send_gidpn(rport, NULL);
1103 break;
1104
1105 case RPSM_EVENT_DELETE:
1106 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1107 bfa_timer_stop(&rport->timer);
1108 bfa_fcs_rport_free(rport);
1109 break;
1110
1111 case RPSM_EVENT_PLOGI_RCVD:
1112 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
1113 bfa_timer_stop(&rport->timer);
1114 bfa_fcs_rport_send_plogiacc(rport, NULL);
1115 break;
1116
1117 case RPSM_EVENT_LOGO_RCVD:
1118 case RPSM_EVENT_LOGO_IMP:
1119 break;
1120
1121 case RPSM_EVENT_PLOGI_COMP:
1122 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
1123 bfa_timer_stop(&rport->timer);
1124 bfa_fcs_rport_hal_online(rport);
1125 break;
1126
1127 case RPSM_EVENT_PLOGI_SEND:
1128 bfa_timer_stop(&rport->timer);
1129 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1130 rport->plogi_retries = 0;
1131 bfa_fcs_rport_send_plogi(rport, NULL);
1132 break;
1133
1134 default:
1135 bfa_assert(0);
1136 }
1137}
1138
1139/**
1140 * Rport address has changed. Nameserver discovery request is being sent.
1141 */
1142static void
1143bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1144 enum rport_event event)
1145{
1146 bfa_trc(rport->fcs, rport->pwwn);
1147 bfa_trc(rport->fcs, rport->pid);
1148 bfa_trc(rport->fcs, event);
1149
1150 switch (event) {
1151 case RPSM_EVENT_FCXP_SENT:
1152 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent);
1153 break;
1154
1155 case RPSM_EVENT_DELETE:
1156 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1157 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1158 bfa_fcs_rport_free(rport);
1159 break;
1160
1161 case RPSM_EVENT_PLOGI_RCVD:
1162 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
1163 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1164 bfa_fcs_rport_send_plogiacc(rport, NULL);
1165 break;
1166
1167 case RPSM_EVENT_SCN:
1168 case RPSM_EVENT_LOGO_RCVD:
1169 case RPSM_EVENT_PLOGI_SEND:
1170 break;
1171
1172 case RPSM_EVENT_ADDRESS_CHANGE:
1173 rport->ns_retries = 0; /* reset the retry count */
1174 break;
1175
1176 case RPSM_EVENT_LOGO_IMP:
1177 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
1178 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1179 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1180 bfa_fcs_rport_timeout, rport,
1181 bfa_fcs_rport_del_timeout);
1182 break;
1183
1184 case RPSM_EVENT_PLOGI_COMP:
1185 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
1186 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1187 bfa_fcs_rport_hal_online(rport);
1188 break;
1189
1190 default:
1191 bfa_assert(0);
1192 }
1193}
1194
1195/**
1196 * Nameserver discovery failed. Waiting for timeout to retry.
1197 */
1198static void
1199bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1200 enum rport_event event)
1201{
1202 bfa_trc(rport->fcs, rport->pwwn);
1203 bfa_trc(rport->fcs, rport->pid);
1204 bfa_trc(rport->fcs, event);
1205
1206 switch (event) {
1207 case RPSM_EVENT_TIMEOUT:
1208 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1209 bfa_fcs_rport_send_gidpn(rport, NULL);
1210 break;
1211
1212 case RPSM_EVENT_SCN:
1213 case RPSM_EVENT_ADDRESS_CHANGE:
1214 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1215 bfa_timer_stop(&rport->timer);
1216 rport->ns_retries = 0;
1217 bfa_fcs_rport_send_gidpn(rport, NULL);
1218 break;
1219
1220 case RPSM_EVENT_DELETE:
1221 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1222 bfa_timer_stop(&rport->timer);
1223 bfa_fcs_rport_free(rport);
1224 break;
1225
1226 case RPSM_EVENT_PLOGI_RCVD:
1227 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
1228 bfa_timer_stop(&rport->timer);
1229 bfa_fcs_rport_send_plogiacc(rport, NULL);
1230 break;
1231
1232 case RPSM_EVENT_LOGO_IMP:
1233 rport->pid = 0;
1234 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
1235 bfa_timer_stop(&rport->timer);
1236 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1237 bfa_fcs_rport_timeout, rport,
1238 bfa_fcs_rport_del_timeout);
1239 break;
1240
1241 case RPSM_EVENT_LOGO_RCVD:
1242 bfa_fcs_rport_send_logo_acc(rport);
1243 break;
1244
1245 case RPSM_EVENT_PLOGI_COMP:
1246 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
1247 bfa_timer_stop(&rport->timer);
1248 bfa_fcs_rport_hal_online(rport);
1249 break;
1250
1251 default:
1252 bfa_assert(0);
1253 }
1254}
1255
1256/**
1257 * Rport address has changed. Nameserver discovery request is sent.
1258 */
1259static void
1260bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1261 enum rport_event event)
1262{
1263 bfa_trc(rport->fcs, rport->pwwn);
1264 bfa_trc(rport->fcs, rport->pid);
1265 bfa_trc(rport->fcs, event);
1266
1267 switch (event) {
1268 case RPSM_EVENT_ACCEPTED:
1269 case RPSM_EVENT_ADDRESS_CHANGE:
1270 if (rport->pid) {
1271 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1272 bfa_fcs_rport_send_plogi(rport, NULL);
1273 } else {
1274 bfa_sm_set_state(rport,
1275 bfa_fcs_rport_sm_nsdisc_sending);
1276 rport->ns_retries = 0;
1277 bfa_fcs_rport_send_gidpn(rport, NULL);
1278 }
1279 break;
1280
1281 case RPSM_EVENT_FAILED:
1282 rport->ns_retries++;
1283 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
1284 bfa_sm_set_state(rport,
1285 bfa_fcs_rport_sm_nsdisc_sending);
1286 bfa_fcs_rport_send_gidpn(rport, NULL);
1287 } else {
1288 rport->pid = 0;
1289 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
1290 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1291 bfa_fcs_rport_timeout, rport,
1292 bfa_fcs_rport_del_timeout);
1293 };
1294 break;
1295
1296 case RPSM_EVENT_DELETE:
1297 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1298 bfa_fcxp_discard(rport->fcxp);
1299 bfa_fcs_rport_free(rport);
1300 break;
1301
1302 case RPSM_EVENT_PLOGI_RCVD:
1303 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
1304 bfa_fcxp_discard(rport->fcxp);
1305 bfa_fcs_rport_send_plogiacc(rport, NULL);
1306 break;
1307
1308 case RPSM_EVENT_LOGO_IMP:
1309 rport->pid = 0;
1310 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
1311 bfa_fcxp_discard(rport->fcxp);
1312 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1313 bfa_fcs_rport_timeout, rport,
1314 bfa_fcs_rport_del_timeout);
1315 break;
1316
1317 case RPSM_EVENT_SCN:
1318 /**
1319 * ignore, wait for NS query response
1320 */
1321 break;
1322
1323 case RPSM_EVENT_LOGO_RCVD:
1324 /**
1325 * Not logged-in yet. Accept LOGO.
1326 */
1327 bfa_fcs_rport_send_logo_acc(rport);
1328 break;
1329
1330 case RPSM_EVENT_PLOGI_COMP:
1331 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
1332 bfa_fcxp_discard(rport->fcxp);
1333 bfa_fcs_rport_hal_online(rport);
1334 break;
1335
1336 default:
1337 bfa_assert(0);
1338 }
1339}
1340
1341
1342
1343/**
1344 * fcs_rport_private FCS RPORT provate functions
1345 */
1346
1347static void
1348bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1349{
1350 struct bfa_fcs_rport_s *rport = rport_cbarg;
1351 struct bfa_fcs_port_s *port = rport->port;
1352 struct fchs_s fchs;
1353 int len;
1354 struct bfa_fcxp_s *fcxp;
1355
1356 bfa_trc(rport->fcs, rport->pwwn);
1357
1358 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1359 if (!fcxp) {
1360 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1361 bfa_fcs_rport_send_plogi, rport);
1362 return;
1363 }
1364 rport->fcxp = fcxp;
1365
1366 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1367 bfa_fcs_port_get_fcid(port), 0,
1368 port->port_cfg.pwwn, port->port_cfg.nwwn,
1369 bfa_pport_get_maxfrsize(port->fcs->bfa));
1370
1371 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1372 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
1373 (void *)rport, FC_MAX_PDUSZ, FC_RA_TOV);
1374
1375 rport->stats.plogis++;
1376 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1377}
1378
1379static void
1380bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1381 bfa_status_t req_status, u32 rsp_len,
1382 u32 resid_len, struct fchs_s *rsp_fchs)
1383{
1384 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
1385 struct fc_logi_s *plogi_rsp;
1386 struct fc_ls_rjt_s *ls_rjt;
1387 struct bfa_fcs_rport_s *twin;
1388 struct list_head *qe;
1389
1390 bfa_trc(rport->fcs, rport->pwwn);
1391
1392 /*
1393 * Sanity Checks
1394 */
1395 if (req_status != BFA_STATUS_OK) {
1396 bfa_trc(rport->fcs, req_status);
1397 rport->stats.plogi_failed++;
1398 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1399 return;
1400 }
1401
1402 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
1403
1404 /**
1405 * Check for failure first.
1406 */
1407 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
1408 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
1409
1410 bfa_trc(rport->fcs, ls_rjt->reason_code);
1411 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
1412
1413 rport->stats.plogi_rejects++;
1414 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1415 return;
1416 }
1417
1418 /**
1419 * PLOGI is complete. Make sure this device is not one of the known
1420 * device with a new FC port address.
1421 */
1422 list_for_each(qe, &rport->port->rport_q) {
1423 twin = (struct bfa_fcs_rport_s *)qe;
1424 if (twin == rport)
1425 continue;
1426 if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
1427 bfa_trc(rport->fcs, twin->pid);
1428 bfa_trc(rport->fcs, rport->pid);
1429
1430 /*
1431 * Update plogi stats in twin
1432 */
1433 twin->stats.plogis += rport->stats.plogis;
1434 twin->stats.plogi_rejects += rport->stats.plogi_rejects;
1435 twin->stats.plogi_timeouts +=
1436 rport->stats.plogi_timeouts;
1437 twin->stats.plogi_failed += rport->stats.plogi_failed;
1438 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
1439 twin->stats.plogi_accs++;
1440
1441 bfa_fcs_rport_delete(rport);
1442
1443 bfa_fcs_rport_update(twin, plogi_rsp);
1444 twin->pid = rsp_fchs->s_id;
1445 bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP);
1446 return;
1447 }
1448 }
1449
1450 /**
1451 * Normal login path -- no evil twins.
1452 */
1453 rport->stats.plogi_accs++;
1454 bfa_fcs_rport_update(rport, plogi_rsp);
1455 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1456}
1457
1458static void
1459bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1460{
1461 struct bfa_fcs_rport_s *rport = rport_cbarg;
1462 struct bfa_fcs_port_s *port = rport->port;
1463 struct fchs_s fchs;
1464 int len;
1465 struct bfa_fcxp_s *fcxp;
1466
1467 bfa_trc(rport->fcs, rport->pwwn);
1468 bfa_trc(rport->fcs, rport->reply_oxid);
1469
1470 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1471 if (!fcxp) {
1472 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1473 bfa_fcs_rport_send_plogiacc, rport);
1474 return;
1475 }
1476 rport->fcxp = fcxp;
1477
1478 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1479 bfa_fcs_port_get_fcid(port), rport->reply_oxid,
1480 port->port_cfg.pwwn, port->port_cfg.nwwn,
1481 bfa_pport_get_maxfrsize(port->fcs->bfa));
1482
1483 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1484 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1485
1486 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1487}
1488
1489static void
1490bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1491{
1492 struct bfa_fcs_rport_s *rport = rport_cbarg;
1493 struct bfa_fcs_port_s *port = rport->port;
1494 struct fchs_s fchs;
1495 int len;
1496 struct bfa_fcxp_s *fcxp;
1497
1498 bfa_trc(rport->fcs, rport->pwwn);
1499
1500 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1501 if (!fcxp) {
1502 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1503 bfa_fcs_rport_send_adisc, rport);
1504 return;
1505 }
1506 rport->fcxp = fcxp;
1507
1508 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1509 bfa_fcs_port_get_fcid(port), 0,
1510 port->port_cfg.pwwn, port->port_cfg.nwwn);
1511
1512 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1513 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
1514 rport, FC_MAX_PDUSZ, FC_RA_TOV);
1515
1516 rport->stats.adisc_sent++;
1517 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1518}
1519
1520static void
1521bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1522 bfa_status_t req_status, u32 rsp_len,
1523 u32 resid_len, struct fchs_s *rsp_fchs)
1524{
1525 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
1526 void *pld = bfa_fcxp_get_rspbuf(fcxp);
1527 struct fc_ls_rjt_s *ls_rjt;
1528
1529 if (req_status != BFA_STATUS_OK) {
1530 bfa_trc(rport->fcs, req_status);
1531 rport->stats.adisc_failed++;
1532 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1533 return;
1534 }
1535
1536 if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
1537 rport->nwwn) == FC_PARSE_OK) {
1538 rport->stats.adisc_accs++;
1539 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1540 return;
1541 }
1542
1543 rport->stats.adisc_rejects++;
1544 ls_rjt = pld;
1545 bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code);
1546 bfa_trc(rport->fcs, ls_rjt->reason_code);
1547 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
1548 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1549}
1550
1551static void
1552bfa_fcs_rport_send_gidpn(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1553{
1554 struct bfa_fcs_rport_s *rport = rport_cbarg;
1555 struct bfa_fcs_port_s *port = rport->port;
1556 struct fchs_s fchs;
1557 struct bfa_fcxp_s *fcxp;
1558 int len;
1559
1560 bfa_trc(rport->fcs, rport->pid);
1561
1562 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1563 if (!fcxp) {
1564 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1565 bfa_fcs_rport_send_gidpn, rport);
1566 return;
1567 }
1568 rport->fcxp = fcxp;
1569
1570 len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1571 bfa_fcs_port_get_fcid(port), 0, rport->pwwn);
1572
1573 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1574 FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response,
1575 (void *)rport, FC_MAX_PDUSZ, FC_RA_TOV);
1576
1577 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1578}
1579
1580static void
1581bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1582 bfa_status_t req_status, u32 rsp_len,
1583 u32 resid_len, struct fchs_s *rsp_fchs)
1584{
1585 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
1586 struct bfa_fcs_rport_s *twin;
1587 struct list_head *qe;
1588 struct ct_hdr_s *cthdr;
1589 struct fcgs_gidpn_resp_s *gidpn_rsp;
1590
1591 bfa_trc(rport->fcs, rport->pwwn);
1592
1593 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1594 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1595
1596 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1597 /*
1598 * Check if the pid is the same as before.
1599 */
1600 gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
1601
1602 if (gidpn_rsp->dap == rport->pid) {
1603 /*
1604 * Device is online
1605 */
1606 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1607 } else {
1608 /*
1609 * Device's PID has changed. We need to cleanup and
1610 * re-login. If there is another device with the the
1611 * newly discovered pid, send an scn notice so that its
1612 * new pid can be discovered.
1613 */
1614 list_for_each(qe, &rport->port->rport_q) {
1615 twin = (struct bfa_fcs_rport_s *)qe;
1616 if (twin == rport)
1617 continue;
1618 if (gidpn_rsp->dap == twin->pid) {
1619 bfa_trc(rport->fcs, twin->pid);
1620 bfa_trc(rport->fcs, rport->pid);
1621
1622 twin->pid = 0;
1623 bfa_sm_send_event(twin,
1624 RPSM_EVENT_ADDRESS_CHANGE);
1625 }
1626 }
1627 rport->pid = gidpn_rsp->dap;
1628 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE);
1629 }
1630 return;
1631 }
1632
1633 /*
1634 * Reject Response
1635 */
1636 switch (cthdr->reason_code) {
1637 case CT_RSN_LOGICAL_BUSY:
1638 /*
1639 * Need to retry
1640 */
1641 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1642 break;
1643
1644 case CT_RSN_UNABLE_TO_PERF:
1645 /*
1646 * device doesn't exist : Start timer to cleanup this later.
1647 */
1648 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1649 break;
1650
1651 default:
1652 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1653 break;
1654 }
1655}
1656
1657/**
1658 * Called to send a logout to the rport.
1659 */
1660static void
1661bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1662{
1663 struct bfa_fcs_rport_s *rport = rport_cbarg;
1664 struct bfa_fcs_port_s *port;
1665 struct fchs_s fchs;
1666 struct bfa_fcxp_s *fcxp;
1667 u16 len;
1668
1669 bfa_trc(rport->fcs, rport->pid);
1670
1671 port = rport->port;
1672
1673 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1674 if (!fcxp) {
1675 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1676 bfa_fcs_rport_send_logo, rport);
1677 return;
1678 }
1679 rport->fcxp = fcxp;
1680
1681 len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1682 bfa_fcs_port_get_fcid(port), 0,
1683 bfa_fcs_port_get_pwwn(port));
1684
1685 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1686 FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ,
1687 FC_ED_TOV);
1688
1689 rport->stats.logos++;
1690 bfa_fcxp_discard(rport->fcxp);
1691 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1692}
1693
1694/**
1695 * Send ACC for a LOGO received.
1696 */
1697static void
1698bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1699{
1700 struct bfa_fcs_rport_s *rport = rport_cbarg;
1701 struct bfa_fcs_port_s *port;
1702 struct fchs_s fchs;
1703 struct bfa_fcxp_s *fcxp;
1704 u16 len;
1705
1706 bfa_trc(rport->fcs, rport->pid);
1707
1708 port = rport->port;
1709
1710 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1711 if (!fcxp)
1712 return;
1713
1714 rport->stats.logo_rcvd++;
1715 len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1716 bfa_fcs_port_get_fcid(port), rport->reply_oxid);
1717
1718 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1719 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1720}
1721
1722/**
1723 * This routine will be called by bfa_timer on timer timeouts.
1724 *
1725 * param[in] rport - pointer to bfa_fcs_port_ns_t.
1726 * param[out] rport_status - pointer to return vport status in
1727 *
1728 * return
1729 * void
1730 *
1731* Special Considerations:
1732 *
1733 * note
1734 */
1735static void
1736bfa_fcs_rport_timeout(void *arg)
1737{
1738 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)arg;
1739
1740 rport->stats.plogi_timeouts++;
1741 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1742}
1743
1744static void
1745bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
1746 struct fchs_s *rx_fchs, u16 len)
1747{
1748 struct bfa_fcxp_s *fcxp;
1749 struct fchs_s fchs;
1750 struct bfa_fcs_port_s *port = rport->port;
1751 struct fc_prli_s *prli;
1752
1753 bfa_trc(port->fcs, rx_fchs->s_id);
1754 bfa_trc(port->fcs, rx_fchs->d_id);
1755
1756 rport->stats.prli_rcvd++;
1757
1758 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1759 /*
1760 * Target Mode : Let the fcptm handle it
1761 */
1762 bfa_fcs_tin_rx_prli(rport->tin, rx_fchs, len);
1763 return;
1764 }
1765
1766 /*
1767 * We are either in Initiator or ipfc Mode
1768 */
1769 prli = (struct fc_prli_s *) (rx_fchs + 1);
1770
1771 if (prli->parampage.servparams.initiator) {
1772 bfa_trc(rport->fcs, prli->parampage.type);
1773 rport->scsi_function = BFA_RPORT_INITIATOR;
1774 bfa_fcs_itnim_is_initiator(rport->itnim);
1775 } else {
1776 /*
1777 * @todo: PRLI from a target ?
1778 */
1779 bfa_trc(port->fcs, rx_fchs->s_id);
1780 rport->scsi_function = BFA_RPORT_TARGET;
1781 }
1782
1783 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1784 if (!fcxp)
1785 return;
1786
1787 len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
1788 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
1789 port->port_cfg.roles);
1790
1791 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1792 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1793}
1794
1795static void
1796bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1797 struct fchs_s *rx_fchs, u16 len)
1798{
1799 struct bfa_fcxp_s *fcxp;
1800 struct fchs_s fchs;
1801 struct bfa_fcs_port_s *port = rport->port;
1802 struct fc_rpsc_speed_info_s speeds;
1803 struct bfa_pport_attr_s pport_attr;
1804
1805 bfa_trc(port->fcs, rx_fchs->s_id);
1806 bfa_trc(port->fcs, rx_fchs->d_id);
1807
1808 rport->stats.rpsc_rcvd++;
1809 speeds.port_speed_cap =
1810 RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G |
1811 RPSC_SPEED_CAP_8G;
1812
1813 /*
1814 * get curent speed from pport attributes from BFA
1815 */
1816 bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
1817
1818 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
1819
1820 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1821 if (!fcxp)
1822 return;
1823
1824 len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
1825 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
1826 &speeds);
1827
1828 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1829 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1830}
1831
1832static void
1833bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1834 struct fchs_s *rx_fchs, u16 len)
1835{
1836 struct bfa_fcxp_s *fcxp;
1837 struct fchs_s fchs;
1838 struct bfa_fcs_port_s *port = rport->port;
1839 struct fc_adisc_s *adisc;
1840
1841 bfa_trc(port->fcs, rx_fchs->s_id);
1842 bfa_trc(port->fcs, rx_fchs->d_id);
1843
1844 rport->stats.adisc_rcvd++;
1845
1846 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1847 /*
1848 * @todo : Target Mode handling
1849 */
1850 bfa_trc(port->fcs, rx_fchs->d_id);
1851 bfa_assert(0);
1852 return;
1853 }
1854
1855 adisc = (struct fc_adisc_s *) (rx_fchs + 1);
1856
1857 /*
1858 * Accept if the itnim for this rport is online. Else reject the ADISC
1859 */
1860 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
1861
1862 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1863 if (!fcxp)
1864 return;
1865
1866 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1867 rx_fchs->s_id,
1868 bfa_fcs_port_get_fcid(port),
1869 rx_fchs->ox_id, port->port_cfg.pwwn,
1870 port->port_cfg.nwwn);
1871
1872 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
1873 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
1874 FC_MAX_PDUSZ, 0);
1875 } else {
1876 rport->stats.adisc_rejected++;
1877 bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
1878 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
1879 FC_LS_RJT_EXP_LOGIN_REQUIRED);
1880 }
1881
1882}
1883
1884static void
1885bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1886{
1887 struct bfa_fcs_port_s *port = rport->port;
1888 struct bfa_rport_info_s rport_info;
1889
1890 rport_info.pid = rport->pid;
1891 rport_info.local_pid = port->pid;
1892 rport_info.lp_tag = port->lp_tag;
1893 rport_info.vf_id = port->fabric->vf_id;
1894 rport_info.vf_en = port->fabric->is_vf;
1895 rport_info.fc_class = rport->fc_cos;
1896 rport_info.cisc = rport->cisc;
1897 rport_info.max_frmsz = rport->maxfrsize;
1898 bfa_rport_online(rport->bfa_rport, &rport_info);
1899}
1900
1901static void
1902bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport)
1903{
1904 if (bfa_fcs_port_is_initiator(rport->port))
1905 bfa_fcs_itnim_pause(rport->itnim);
1906
1907 if (bfa_fcs_port_is_target(rport->port))
1908 bfa_fcs_tin_pause(rport->tin);
1909}
1910
1911static void
1912bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport)
1913{
1914 if (bfa_fcs_port_is_initiator(rport->port))
1915 bfa_fcs_itnim_resume(rport->itnim);
1916
1917 if (bfa_fcs_port_is_target(rport->port))
1918 bfa_fcs_tin_resume(rport->tin);
1919}
1920
1921static struct bfa_fcs_rport_s *
1922bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
1923{
1924 struct bfa_fcs_s *fcs = port->fcs;
1925 struct bfa_fcs_rport_s *rport;
1926 struct bfad_rport_s *rport_drv;
1927
1928 /**
1929 * allocate rport
1930 */
1931 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
1932 != BFA_STATUS_OK) {
1933 bfa_trc(fcs, rpid);
1934 return NULL;
1935 }
1936
1937 /*
1938 * Initialize r-port
1939 */
1940 rport->port = port;
1941 rport->fcs = fcs;
1942 rport->rp_drv = rport_drv;
1943 rport->pid = rpid;
1944 rport->pwwn = pwwn;
1945
1946 /**
1947 * allocate BFA rport
1948 */
1949 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
1950 if (!rport->bfa_rport) {
1951 bfa_trc(fcs, rpid);
1952 kfree(rport_drv);
1953 return NULL;
1954 }
1955
1956 /**
1957 * allocate FC-4s
1958 */
1959 bfa_assert(bfa_fcs_port_is_initiator(port) ^
1960 bfa_fcs_port_is_target(port));
1961
1962 if (bfa_fcs_port_is_initiator(port)) {
1963 rport->itnim = bfa_fcs_itnim_create(rport);
1964 if (!rport->itnim) {
1965 bfa_trc(fcs, rpid);
1966 bfa_rport_delete(rport->bfa_rport);
1967 kfree(rport_drv);
1968 return NULL;
1969 }
1970 }
1971
1972 if (bfa_fcs_port_is_target(port)) {
1973 rport->tin = bfa_fcs_tin_create(rport);
1974 if (!rport->tin) {
1975 bfa_trc(fcs, rpid);
1976 bfa_rport_delete(rport->bfa_rport);
1977 kfree(rport_drv);
1978 return NULL;
1979 }
1980 }
1981
1982 bfa_fcs_port_add_rport(port, rport);
1983
1984 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1985
1986 /*
1987 * Initialize the Rport Features(RPF) Sub Module
1988 */
1989 if (!BFA_FCS_PID_IS_WKA(rport->pid))
1990 bfa_fcs_rpf_init(rport);
1991
1992 return rport;
1993}
1994
1995
1996static void
1997bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
1998{
1999 struct bfa_fcs_port_s *port = rport->port;
2000
2001 /**
2002 * - delete FC-4s
2003 * - delete BFA rport
2004 * - remove from queue of rports
2005 */
2006 if (bfa_fcs_port_is_initiator(port))
2007 bfa_fcs_itnim_delete(rport->itnim);
2008
2009 if (bfa_fcs_port_is_target(port))
2010 bfa_fcs_tin_delete(rport->tin);
2011
2012 bfa_rport_delete(rport->bfa_rport);
2013 bfa_fcs_port_del_rport(port, rport);
2014 kfree(rport->rp_drv);
2015}
2016
2017static void
2018bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2019 enum bfa_rport_aen_event event,
2020 struct bfa_rport_aen_data_s *data)
2021{
2022 union bfa_aen_data_u aen_data;
2023 struct bfa_log_mod_s *logmod = rport->fcs->logm;
2024 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
2025 wwn_t rpwwn = rport->pwwn;
2026 char lpwwn_ptr[BFA_STRING_32];
2027 char rpwwn_ptr[BFA_STRING_32];
2028 char *prio_str[] = { "unknown", "high", "medium", "low" };
2029
2030 wwn2str(lpwwn_ptr, lpwwn);
2031 wwn2str(rpwwn_ptr, rpwwn);
2032
2033 switch (event) {
2034 case BFA_RPORT_AEN_ONLINE:
2035 bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr);
2036 break;
2037 case BFA_RPORT_AEN_OFFLINE:
2038 bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr);
2039 break;
2040 case BFA_RPORT_AEN_DISCONNECT:
2041 bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
2042 break;
2043 case BFA_RPORT_AEN_QOS_PRIO:
2044 aen_data.rport.priv.qos = data->priv.qos;
2045 bfa_log(logmod, BFA_AEN_RPORT_QOS_PRIO,
2046 prio_str[aen_data.rport.priv.qos.qos_priority],
2047 rpwwn_ptr, lpwwn_ptr);
2048 break;
2049 case BFA_RPORT_AEN_QOS_FLOWID:
2050 aen_data.rport.priv.qos = data->priv.qos;
2051 bfa_log(logmod, BFA_AEN_RPORT_QOS_FLOWID,
2052 aen_data.rport.priv.qos.qos_flow_id, rpwwn_ptr,
2053 lpwwn_ptr);
2054 break;
2055 default:
2056 break;
2057 }
2058
2059 aen_data.rport.vf_id = rport->port->fabric->vf_id;
2060 aen_data.rport.ppwwn =
2061 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(rport->fcs));
2062 aen_data.rport.lpwwn = lpwwn;
2063 aen_data.rport.rpwwn = rpwwn;
2064}
2065
2066static void
2067bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2068{
2069 struct bfa_fcs_port_s *port = rport->port;
2070
2071 rport->stats.onlines++;
2072
2073 if (bfa_fcs_port_is_initiator(port)) {
2074 bfa_fcs_itnim_rport_online(rport->itnim);
2075 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2076 bfa_fcs_rpf_rport_online(rport);
2077 };
2078
2079 if (bfa_fcs_port_is_target(port))
2080 bfa_fcs_tin_rport_online(rport->tin);
2081
2082 /*
2083 * Don't post events for well known addresses
2084 */
2085 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2086 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
2087}
2088
2089static void
2090bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2091{
2092 struct bfa_fcs_port_s *port = rport->port;
2093
2094 rport->stats.offlines++;
2095
2096 /*
2097 * Don't post events for well known addresses
2098 */
2099 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2100 if (bfa_fcs_port_is_online(rport->port) == BFA_TRUE) {
2101 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_DISCONNECT,
2102 NULL);
2103 } else {
2104 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_OFFLINE,
2105 NULL);
2106 }
2107 }
2108
2109 if (bfa_fcs_port_is_initiator(port)) {
2110 bfa_fcs_itnim_rport_offline(rport->itnim);
2111 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2112 bfa_fcs_rpf_rport_offline(rport);
2113 }
2114
2115 if (bfa_fcs_port_is_target(port))
2116 bfa_fcs_tin_rport_offline(rport->tin);
2117}
2118
2119/**
2120 * Update rport parameters from PLOGI or PLOGI accept.
2121 */
2122static void
2123bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2124{
2125 struct bfa_fcs_port_s *port = rport->port;
2126
2127 /**
2128 * - port name
2129 * - node name
2130 */
2131 rport->pwwn = plogi->port_name;
2132 rport->nwwn = plogi->node_name;
2133
2134 /**
2135 * - class of service
2136 */
2137 rport->fc_cos = 0;
2138 if (plogi->class3.class_valid)
2139 rport->fc_cos = FC_CLASS_3;
2140
2141 if (plogi->class2.class_valid)
2142 rport->fc_cos |= FC_CLASS_2;
2143
2144 /**
2145 * - CISC
2146 * - MAX receive frame size
2147 */
2148 rport->cisc = plogi->csp.cisc;
2149 rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz);
2150
2151 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
2152 bfa_trc(port->fcs, port->fabric->bb_credit);
2153 /**
2154 * Direct Attach P2P mode :
2155 * This is to handle a bug (233476) in IBM targets in Direct Attach
2156 * Mode. Basically, in FLOGI Accept the target would have erroneously
2157 * set the BB Credit to the value used in the FLOGI sent by the HBA.
2158 * It uses the correct value (its own BB credit) in PLOGI.
2159 */
2160 if ((!bfa_fcs_fabric_is_switched(port->fabric))
2161 && (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
2162
2163 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
2164 bfa_trc(port->fcs, port->fabric->bb_credit);
2165
2166 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
2167 bfa_pport_set_tx_bbcredit(port->fcs->bfa,
2168 port->fabric->bb_credit);
2169 }
2170
2171}
2172
2173/**
2174 * Called to handle LOGO received from an existing remote port.
2175 */
2176static void
2177bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2178{
2179 rport->reply_oxid = fchs->ox_id;
2180 bfa_trc(rport->fcs, rport->reply_oxid);
2181
2182 rport->stats.logo_rcvd++;
2183 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD);
2184}
2185
2186
2187
2188/**
2189 * fcs_rport_public FCS rport public interfaces
2190 */
2191
2192/**
2193 * Called by bport/vport to create a remote port instance for a discovered
2194 * remote device.
2195 *
2196 * @param[in] port - base port or vport
2197 * @param[in] rpid - remote port ID
2198 *
2199 * @return None
2200 */
2201struct bfa_fcs_rport_s *
2202bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid)
2203{
2204 struct bfa_fcs_rport_s *rport;
2205
2206 bfa_trc(port->fcs, rpid);
2207 rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid);
2208 if (!rport)
2209 return NULL;
2210
2211 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2212 return rport;
2213}
2214
2215/**
2216 * Called to create a rport for which only the wwn is known.
2217 *
2218 * @param[in] port - base port
2219 * @param[in] rpwwn - remote port wwn
2220 *
2221 * @return None
2222 */
2223struct bfa_fcs_rport_s *
2224bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn)
2225{
2226 struct bfa_fcs_rport_s *rport;
2227
2228 bfa_trc(port->fcs, rpwwn);
2229 rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
2230 if (!rport)
2231 return NULL;
2232
2233 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
2234 return rport;
2235}
2236
2237/**
2238 * Called by bport in private loop topology to indicate that a
2239 * rport has been discovered and plogi has been completed.
2240 *
2241 * @param[in] port - base port or vport
2242 * @param[in] rpid - remote port ID
2243 */
2244void
2245bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2246 struct fc_logi_s *plogi)
2247{
2248 struct bfa_fcs_rport_s *rport;
2249
2250 rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id);
2251 if (!rport)
2252 return;
2253
2254 bfa_fcs_rport_update(rport, plogi);
2255
2256 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
2257}
2258
2259/**
2260 * Called by bport/vport to handle PLOGI received from a new remote port.
2261 * If an existing rport does a plogi, it will be handled separately.
2262 */
2263void
2264bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2265 struct fc_logi_s *plogi)
2266{
2267 struct bfa_fcs_rport_s *rport;
2268
2269 rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id);
2270 if (!rport)
2271 return;
2272
2273 bfa_fcs_rport_update(rport, plogi);
2274
2275 rport->reply_oxid = fchs->ox_id;
2276 bfa_trc(rport->fcs, rport->reply_oxid);
2277
2278 rport->stats.plogi_rcvd++;
2279 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2280}
2281
2282static int
2283wwn_compare(wwn_t wwn1, wwn_t wwn2)
2284{
2285 u8 *b1 = (u8 *) &wwn1;
2286 u8 *b2 = (u8 *) &wwn2;
2287 int i;
2288
2289 for (i = 0; i < sizeof(wwn_t); i++) {
2290 if (b1[i] < b2[i])
2291 return -1;
2292 if (b1[i] > b2[i])
2293 return 1;
2294 }
2295 return 0;
2296}
2297
2298/**
2299 * Called by bport/vport to handle PLOGI received from an existing
2300 * remote port.
2301 */
2302void
2303bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2304 struct fc_logi_s *plogi)
2305{
2306 /**
2307 * @todo Handle P2P and initiator-initiator.
2308 */
2309
2310 bfa_fcs_rport_update(rport, plogi);
2311
2312 rport->reply_oxid = rx_fchs->ox_id;
2313 bfa_trc(rport->fcs, rport->reply_oxid);
2314
2315 /**
2316 * In Switched fabric topology,
2317 * PLOGI to each other. If our pwwn is smaller, ignore it,
2318 * if it is not a well known address.
2319 * If the link topology is N2N,
2320 * this Plogi should be accepted.
2321 */
2322 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1)
2323 && (bfa_fcs_fabric_is_switched(rport->port->fabric))
2324 && (!BFA_FCS_PID_IS_WKA(rport->pid))) {
2325 bfa_trc(rport->fcs, rport->pid);
2326 return;
2327 }
2328
2329 rport->stats.plogi_rcvd++;
2330 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2331}
2332
2333/**
2334 * Called by bport/vport to delete a remote port instance.
2335 *
2336* Rport delete is called under the following conditions:
2337 * - vport is deleted
2338 * - vf is deleted
2339 * - explicit request from OS to delete rport (vmware)
2340 */
2341void
2342bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
2343{
2344 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
2345}
2346
2347/**
2348 * Called by bport/vport to when a target goes offline.
2349 *
2350 */
2351void
2352bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
2353{
2354 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2355}
2356
2357/**
2358 * Called by bport in n2n when a target (attached port) becomes online.
2359 *
2360 */
2361void
2362bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2363{
2364 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2365}
2366
2367/**
2368 * Called by bport/vport to notify SCN for the remote port
2369 */
2370void
2371bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2372{
2373
2374 rport->stats.rscns++;
2375 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2376}
2377
2378/**
2379 * Called by fcpim to notify that the ITN cleanup is done.
2380 */
2381void
2382bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2383{
2384 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2385}
2386
2387/**
2388 * Called by fcptm to notify that the ITN cleanup is done.
2389 */
2390void
2391bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2392{
2393 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2394}
2395
2396/**
2397 * This routine BFA callback for bfa_rport_online() call.
2398 *
2399 * param[in] cb_arg - rport struct.
2400 *
2401 * return
2402 * void
2403 *
2404* Special Considerations:
2405 *
2406 * note
2407 */
2408void
2409bfa_cb_rport_online(void *cbarg)
2410{
2411
2412 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
2413
2414 bfa_trc(rport->fcs, rport->pwwn);
2415 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
2416}
2417
2418/**
2419 * This routine BFA callback for bfa_rport_offline() call.
2420 *
2421 * param[in] rport -
2422 *
2423 * return
2424 * void
2425 *
2426 * Special Considerations:
2427 *
2428 * note
2429 */
2430void
2431bfa_cb_rport_offline(void *cbarg)
2432{
2433 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
2434
2435 bfa_trc(rport->fcs, rport->pwwn);
2436 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
2437}
2438
2439/**
2440 * This routine is a static BFA callback when there is a QoS flow_id
2441 * change notification
2442 *
2443 * @param[in] rport -
2444 *
2445 * @return void
2446 *
2447 * Special Considerations:
2448 *
2449 * @note
2450 */
2451void
2452bfa_cb_rport_qos_scn_flowid(void *cbarg,
2453 struct bfa_rport_qos_attr_s old_qos_attr,
2454 struct bfa_rport_qos_attr_s new_qos_attr)
2455{
2456 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
2457 struct bfa_rport_aen_data_s aen_data;
2458
2459 bfa_trc(rport->fcs, rport->pwwn);
2460 aen_data.priv.qos = new_qos_attr;
2461 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2462}
2463
2464/**
2465 * This routine is a static BFA callback when there is a QoS priority
2466 * change notification
2467 *
2468 * @param[in] rport -
2469 *
2470 * @return void
2471 *
2472 * Special Considerations:
2473 *
2474 * @note
2475 */
2476void
2477bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr,
2478 struct bfa_rport_qos_attr_s new_qos_attr)
2479{
2480 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg;
2481 struct bfa_rport_aen_data_s aen_data;
2482
2483 bfa_trc(rport->fcs, rport->pwwn);
2484 aen_data.priv.qos = new_qos_attr;
2485 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2486}
2487
2488/**
2489 * Called to process any unsolicted frames from this remote port
2490 */
2491void
2492bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2493{
2494 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2495}
2496
2497/**
2498 * Called to process any unsolicted frames from this remote port
2499 */
2500void
2501bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2502 u16 len)
2503{
2504 struct bfa_fcs_port_s *port = rport->port;
2505 struct fc_els_cmd_s *els_cmd;
2506
2507 bfa_trc(rport->fcs, fchs->s_id);
2508 bfa_trc(rport->fcs, fchs->d_id);
2509 bfa_trc(rport->fcs, fchs->type);
2510
2511 if (fchs->type != FC_TYPE_ELS)
2512 return;
2513
2514 els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
2515
2516 bfa_trc(rport->fcs, els_cmd->els_code);
2517
2518 switch (els_cmd->els_code) {
2519 case FC_ELS_LOGO:
2520 bfa_fcs_rport_process_logo(rport, fchs);
2521 break;
2522
2523 case FC_ELS_ADISC:
2524 bfa_fcs_rport_process_adisc(rport, fchs, len);
2525 break;
2526
2527 case FC_ELS_PRLO:
2528 if (bfa_fcs_port_is_initiator(port))
2529 bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
2530
2531 if (bfa_fcs_port_is_target(port))
2532 bfa_fcs_fcptm_uf_recv(rport->tin, fchs, len);
2533 break;
2534
2535 case FC_ELS_PRLI:
2536 bfa_fcs_rport_process_prli(rport, fchs, len);
2537 break;
2538
2539 case FC_ELS_RPSC:
2540 bfa_fcs_rport_process_rpsc(rport, fchs, len);
2541 break;
2542
2543 default:
2544 bfa_fcs_rport_send_ls_rjt(rport, fchs,
2545 FC_LS_RJT_RSN_CMD_NOT_SUPP,
2546 FC_LS_RJT_EXP_NO_ADDL_INFO);
2547 break;
2548 }
2549}
2550
2551/*
2552 * Send a LS reject
2553 */
2554static void
2555bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2556 u8 reason_code, u8 reason_code_expl)
2557{
2558 struct bfa_fcs_port_s *port = rport->port;
2559 struct fchs_s fchs;
2560 struct bfa_fcxp_s *fcxp;
2561 int len;
2562
2563 bfa_trc(rport->fcs, rx_fchs->s_id);
2564
2565 fcxp = bfa_fcs_fcxp_alloc(rport->fcs);
2566 if (!fcxp)
2567 return;
2568
2569 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
2570 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
2571 reason_code, reason_code_expl);
2572
2573 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2574 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
2575}
2576
2577/**
2578 * Module initialization
2579 */
2580void
2581bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs)
2582{
2583}
2584
2585/**
2586 * Module cleanup
2587 */
2588void
2589bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs)
2590{
2591 bfa_fcs_modexit_comp(fcs);
2592}
2593
2594/**
2595 * Return state of rport.
2596 */
2597int
2598bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2599{
2600 return bfa_sm_to_state(rport_sm_table, rport->sm);
2601}
2602
2603/**
2604 * Called by the Driver to set rport delete/ageout timeout
2605 *
2606 * param[in] rport timeout value in seconds.
2607 *
2608 * return None
2609 */
2610void
2611bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
2612{
2613 /*
2614 * convert to Millisecs
2615 */
2616 if (rport_tmo > 0)
2617 bfa_fcs_rport_del_timeout = rport_tmo * 1000;
2618}
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
new file mode 100644
index 000000000000..3dae1774181e
--- /dev/null
+++ b/drivers/scsi/bfa/rport_api.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfa_svc.h>
19#include "fcs_vport.h"
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_trcmod.h"
23
24BFA_TRC_FILE(FCS, RPORT_API);
25
26/**
27 * rport_api.c Remote port implementation.
28 */
29
30/**
31 * fcs_rport_api FCS rport API.
32 */
33
34/**
35 * Direct API to add a target by port wwn. This interface is used, for
36 * example, by bios when target pwwn is known from boot lun configuration.
37 */
38bfa_status_t
39bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
40 struct bfa_fcs_rport_s *rport,
41 struct bfad_rport_s *rport_drv)
42{
43 bfa_trc(port->fcs, *pwwn);
44
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Direct API to remove a target and its associated resources. This
50 * interface is used, for example, by vmware driver to remove target
51 * ports from the target list for a VM.
52 */
53bfa_status_t
54bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
55{
56
57 struct bfa_fcs_rport_s *rport;
58
59 bfa_trc(rport_in->fcs, rport_in->pwwn);
60
61 rport = bfa_fcs_port_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
62 if (rport == NULL) {
63 /*
64 * TBD Error handling
65 */
66 bfa_trc(rport_in->fcs, rport_in->pid);
67 return BFA_STATUS_UNKNOWN_RWWN;
68 }
69
70 /*
71 * TBD if this remote port is online, send a logo
72 */
73 return BFA_STATUS_OK;
74
75}
76
77/**
78 * Remote device status for display/debug.
79 */
80void
81bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
82 struct bfa_rport_attr_s *rport_attr)
83{
84 struct bfa_rport_qos_attr_s qos_attr;
85 struct bfa_fcs_port_s *port = rport->port;
86
87 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
88
89 rport_attr->pid = rport->pid;
90 rport_attr->pwwn = rport->pwwn;
91 rport_attr->nwwn = rport->nwwn;
92 rport_attr->cos_supported = rport->fc_cos;
93 rport_attr->df_sz = rport->maxfrsize;
94 rport_attr->state = bfa_fcs_rport_get_state(rport);
95 rport_attr->fc_cos = rport->fc_cos;
96 rport_attr->cisc = rport->cisc;
97 rport_attr->scsi_function = rport->scsi_function;
98 rport_attr->curr_speed = rport->rpf.rpsc_speed;
99 rport_attr->assigned_speed = rport->rpf.assigned_speed;
100
101 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
102 rport_attr->qos_attr = qos_attr;
103
104 rport_attr->trl_enforced = BFA_FALSE;
105 if (bfa_pport_is_ratelim(port->fcs->bfa)) {
106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) ||
107 (rport->rpf.rpsc_speed <
108 bfa_fcs_port_get_rport_max_speed(port)))
109 rport_attr->trl_enforced = BFA_TRUE;
110 }
111
112 /*
113 * TODO
114 * rport->symname
115 */
116}
117
118/**
119 * Per remote device statistics.
120 */
121void
122bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
123 struct bfa_rport_stats_s *stats)
124{
125 *stats = rport->stats;
126}
127
128void
129bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
130{
131 bfa_os_memset((char *)&rport->stats, 0,
132 sizeof(struct bfa_rport_stats_s));
133}
134
135struct bfa_fcs_rport_s *
136bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
137{
138 struct bfa_fcs_rport_s *rport;
139
140 rport = bfa_fcs_port_get_rport_by_pwwn(port, rpwwn);
141 if (rport == NULL) {
142 /*
143 * TBD Error handling
144 */
145 }
146
147 return rport;
148}
149
150struct bfa_fcs_rport_s *
151bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_port_s *port, wwn_t rnwwn)
152{
153 struct bfa_fcs_rport_s *rport;
154
155 rport = bfa_fcs_port_get_rport_by_nwwn(port, rnwwn);
156 if (rport == NULL) {
157 /*
158 * TBD Error handling
159 */
160 }
161
162 return rport;
163}
164
165/*
166 * This API is to set the Rport's speed. Should be used when RPSC is not
167 * supported by the rport.
168 */
169void
170bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
171 enum bfa_pport_speed speed)
172{
173 rport->rpf.assigned_speed = speed;
174
175 /* Set this speed in f/w only if the RPSC speed is not available */
176 if (rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN)
177 bfa_rport_speed(rport->bfa_rport, speed);
178}
179
180
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
new file mode 100644
index 000000000000..8a1f59d596c1
--- /dev/null
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * rport_ftrs.c Remote port features (RPF) implementation.
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include "fcbuild.h"
25#include "fcs_rport.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30
31BFA_TRC_FILE(FCS, RPORT_FTRS);
32
33#define BFA_FCS_RPF_RETRIES (3)
34#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
35
36static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp, void *cbarg,
40 bfa_status_t req_status, u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43static void bfa_fcs_rpf_timeout(void *arg);
44
45/**
46 * fcs_rport_ftrs_sm FCS rport state machine events
47 */
48
49enum rpf_event {
50 RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
51 RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
52 RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
53 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
54 RPFSM_EVENT_RPSC_COMP = 5,
55 RPFSM_EVENT_RPSC_FAIL = 6,
56 RPFSM_EVENT_RPSC_ERROR = 7,
57};
58
59static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
60 enum rpf_event event);
61static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
62 enum rpf_event event);
63static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
64 enum rpf_event event);
65static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
66 enum rpf_event event);
67static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
68 enum rpf_event event);
69static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
70 enum rpf_event event);
71
72static void
73bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
74{
75 struct bfa_fcs_rport_s *rport = rpf->rport;
76
77 bfa_trc(rport->fcs, rport->pwwn);
78 bfa_trc(rport->fcs, rport->pid);
79 bfa_trc(rport->fcs, event);
80
81 switch (event) {
82 case RPFSM_EVENT_RPORT_ONLINE :
83 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
84 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
85 rpf->rpsc_retries = 0;
86 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
87 break;
88 };
89
90 case RPFSM_EVENT_RPORT_OFFLINE :
91 break;
92
93 default:
94 bfa_assert(0);
95 }
96}
97
98static void
99bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
100{
101 struct bfa_fcs_rport_s *rport = rpf->rport;
102
103 bfa_trc(rport->fcs, event);
104
105 switch (event) {
106 case RPFSM_EVENT_FCXP_SENT:
107 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
108 break;
109
110 case RPFSM_EVENT_RPORT_OFFLINE :
111 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
112 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
113 rpf->rpsc_retries = 0;
114 break;
115
116 default:
117 bfa_assert(0);
118 }
119}
120
121static void
122bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
123{
124 struct bfa_fcs_rport_s *rport = rpf->rport;
125
126 bfa_trc(rport->fcs, rport->pid);
127 bfa_trc(rport->fcs, event);
128
129 switch (event) {
130 case RPFSM_EVENT_RPSC_COMP:
131 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
132 /* Update speed info in f/w via BFA */
133 if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) {
134 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
135 } else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) {
136 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
137 }
138 break;
139
140 case RPFSM_EVENT_RPSC_FAIL:
141 /* RPSC not supported by rport */
142 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
143 break;
144
145 case RPFSM_EVENT_RPSC_ERROR:
146 /* need to retry...delayed a bit. */
147 if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
148 bfa_timer_start(rport->fcs->bfa, &rpf->timer,
149 bfa_fcs_rpf_timeout, rpf,
150 BFA_FCS_RPF_RETRY_TIMEOUT);
151 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
152 } else {
153 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
154 }
155 break;
156
157 case RPFSM_EVENT_RPORT_OFFLINE :
158 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
159 bfa_fcxp_discard(rpf->fcxp);
160 rpf->rpsc_retries = 0;
161 break;
162
163 default:
164 bfa_assert(0);
165 }
166}
167
168static void
169bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
170{
171 struct bfa_fcs_rport_s *rport = rpf->rport;
172
173 bfa_trc(rport->fcs, rport->pid);
174 bfa_trc(rport->fcs, event);
175
176 switch (event) {
177 case RPFSM_EVENT_TIMEOUT :
178 /* re-send the RPSC */
179 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
180 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
181 break;
182
183 case RPFSM_EVENT_RPORT_OFFLINE :
184 bfa_timer_stop(&rpf->timer);
185 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
186 rpf->rpsc_retries = 0;
187 break;
188
189 default:
190 bfa_assert(0);
191 }
192}
193
194static void
195bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
196{
197 struct bfa_fcs_rport_s *rport = rpf->rport;
198
199 bfa_trc(rport->fcs, rport->pwwn);
200 bfa_trc(rport->fcs, rport->pid);
201 bfa_trc(rport->fcs, event);
202
203 switch (event) {
204 case RPFSM_EVENT_RPORT_OFFLINE :
205 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
206 rpf->rpsc_retries = 0;
207 break;
208
209 default:
210 bfa_assert(0);
211 }
212}
213
214static void
215bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
216{
217 struct bfa_fcs_rport_s *rport = rpf->rport;
218
219 bfa_trc(rport->fcs, rport->pwwn);
220 bfa_trc(rport->fcs, rport->pid);
221 bfa_trc(rport->fcs, event);
222
223 switch (event) {
224 case RPFSM_EVENT_RPORT_ONLINE :
225 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
226 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
227 break;
228
229 case RPFSM_EVENT_RPORT_OFFLINE :
230 break;
231
232 default:
233 bfa_assert(0);
234 }
235}
236/**
237 * Called when Rport is created.
238 */
239void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
240{
241 struct bfa_fcs_rpf_s *rpf = &rport->rpf;
242
243 bfa_trc(rport->fcs, rport->pid);
244 rpf->rport = rport;
245
246 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
247}
248
249/**
250 * Called when Rport becomes online
251 */
252void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
253{
254 bfa_trc(rport->fcs, rport->pid);
255
256 if (__fcs_min_cfg(rport->port->fcs))
257 return;
258
259 if (bfa_fcs_fabric_is_switched(rport->port->fabric))
260 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
261}
262
263/**
264 * Called when Rport becomes offline
265 */
266void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
267{
268 bfa_trc(rport->fcs, rport->pid);
269
270 if (__fcs_min_cfg(rport->port->fcs))
271 return;
272
273 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
274}
275
276static void
277bfa_fcs_rpf_timeout(void *arg)
278{
279 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
280 struct bfa_fcs_rport_s *rport = rpf->rport;
281
282 bfa_trc(rport->fcs, rport->pid);
283 bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
284}
285
286static void
287bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
288{
289 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
290 struct bfa_fcs_rport_s *rport = rpf->rport;
291 struct bfa_fcs_port_s *port = rport->port;
292 struct fchs_s fchs;
293 int len;
294 struct bfa_fcxp_s *fcxp;
295
296 bfa_trc(rport->fcs, rport->pwwn);
297
298 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
299 if (!fcxp) {
300 bfa_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
301 bfa_fcs_rpf_send_rpsc2, rpf);
302 return;
303 }
304 rpf->fcxp = fcxp;
305
306 len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
307 bfa_fcs_port_get_fcid(port), &rport->pid, 1);
308
309 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
310 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
311 rpf, FC_MAX_PDUSZ, FC_RA_TOV);
312 rport->stats.rpsc_sent++;
313 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
314
315}
316
317static void
318bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
319 bfa_status_t req_status, u32 rsp_len,
320 u32 resid_len, struct fchs_s *rsp_fchs)
321{
322 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
323 struct bfa_fcs_rport_s *rport = rpf->rport;
324 struct fc_ls_rjt_s *ls_rjt;
325 struct fc_rpsc2_acc_s *rpsc2_acc;
326 u16 num_ents;
327
328 bfa_trc(rport->fcs, req_status);
329
330 if (req_status != BFA_STATUS_OK) {
331 bfa_trc(rport->fcs, req_status);
332 if (req_status == BFA_STATUS_ETIMER)
333 rport->stats.rpsc_failed++;
334 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
335 return;
336 }
337
338 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
339 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
340 rport->stats.rpsc_accs++;
341 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
342 bfa_trc(rport->fcs, num_ents);
343 if (num_ents > 0) {
344 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
345 bfa_trc(rport->fcs,
346 bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
347 bfa_trc(rport->fcs,
348 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
349 bfa_trc(rport->fcs,
350 bfa_os_ntohs(rpsc2_acc->port_info[0].index));
351 bfa_trc(rport->fcs,
352 rpsc2_acc->port_info[0].type);
353
354 if (rpsc2_acc->port_info[0].speed == 0) {
355 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
356 return;
357 }
358
359 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
360 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
361
362 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
363 }
364 } else {
365 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
366 bfa_trc(rport->fcs, ls_rjt->reason_code);
367 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
368 rport->stats.rpsc_rejects++;
369 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
370 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
371 } else {
372 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
373 }
374 }
375}
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
new file mode 100644
index 000000000000..bd4771ff62c8
--- /dev/null
+++ b/drivers/scsi/bfa/scn.c
@@ -0,0 +1,482 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_ms.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "fcs.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, SCN);
29
30#define FC_QOS_RSCN_EVENT 0x0c
31#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
32
33/*
34 * forward declarations
35 */
36static void bfa_fcs_port_scn_send_scr(void *scn_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_port_scn_scr_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp,
40 void *cbarg,
41 bfa_status_t req_status,
42 u32 rsp_len,
43 u32 resid_len,
44 struct fchs_s *rsp_fchs);
45static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
46 struct fchs_s *rx_fchs);
47static void bfa_fcs_port_scn_timeout(void *arg);
48
49/**
50 * fcs_scm_sm FCS SCN state machine
51 */
52
53/**
54 * VPort SCN State Machine events
55 */
56enum port_scn_event {
57 SCNSM_EVENT_PORT_ONLINE = 1,
58 SCNSM_EVENT_PORT_OFFLINE = 2,
59 SCNSM_EVENT_RSP_OK = 3,
60 SCNSM_EVENT_RSP_ERROR = 4,
61 SCNSM_EVENT_TIMEOUT = 5,
62 SCNSM_EVENT_SCR_SENT = 6,
63};
64
65static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
66 enum port_scn_event event);
67static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
68 enum port_scn_event event);
69static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
70 enum port_scn_event event);
71static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
72 enum port_scn_event event);
73static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
74 enum port_scn_event event);
75
76/**
77 * Starting state - awaiting link up.
78 */
79static void
80bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
81 enum port_scn_event event)
82{
83 switch (event) {
84 case SCNSM_EVENT_PORT_ONLINE:
85 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
86 bfa_fcs_port_scn_send_scr(scn, NULL);
87 break;
88
89 case SCNSM_EVENT_PORT_OFFLINE:
90 break;
91
92 default:
93 bfa_assert(0);
94 }
95}
96
97static void
98bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
99 enum port_scn_event event)
100{
101 switch (event) {
102 case SCNSM_EVENT_SCR_SENT:
103 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr);
104 break;
105
106 case SCNSM_EVENT_PORT_OFFLINE:
107 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
108 bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
109 break;
110
111 default:
112 bfa_assert(0);
113 }
114}
115
116static void
117bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
118 enum port_scn_event event)
119{
120 struct bfa_fcs_port_s *port = scn->port;
121
122 switch (event) {
123 case SCNSM_EVENT_RSP_OK:
124 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_online);
125 break;
126
127 case SCNSM_EVENT_RSP_ERROR:
128 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr_retry);
129 bfa_timer_start(port->fcs->bfa, &scn->timer,
130 bfa_fcs_port_scn_timeout, scn,
131 BFA_FCS_RETRY_TIMEOUT);
132 break;
133
134 case SCNSM_EVENT_PORT_OFFLINE:
135 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
136 bfa_fcxp_discard(scn->fcxp);
137 break;
138
139 default:
140 bfa_assert(0);
141 }
142}
143
144static void
145bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
146 enum port_scn_event event)
147{
148 switch (event) {
149 case SCNSM_EVENT_TIMEOUT:
150 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
151 bfa_fcs_port_scn_send_scr(scn, NULL);
152 break;
153
154 case SCNSM_EVENT_PORT_OFFLINE:
155 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
156 bfa_timer_stop(&scn->timer);
157 break;
158
159 default:
160 bfa_assert(0);
161 }
162}
163
164static void
165bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
166 enum port_scn_event event)
167{
168 switch (event) {
169 case SCNSM_EVENT_PORT_OFFLINE:
170 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
171 break;
172
173 default:
174 bfa_assert(0);
175 }
176}
177
178
179
180/**
181 * fcs_scn_private FCS SCN private functions
182 */
183
184/**
185 * This routine will be called to send a SCR command.
186 */
187static void
188bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
189{
190 struct bfa_fcs_port_scn_s *scn = scn_cbarg;
191 struct bfa_fcs_port_s *port = scn->port;
192 struct fchs_s fchs;
193 int len;
194 struct bfa_fcxp_s *fcxp;
195
196 bfa_trc(port->fcs, port->pid);
197 bfa_trc(port->fcs, port->port_cfg.pwwn);
198
199 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
200 if (!fcxp) {
201 bfa_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
202 bfa_fcs_port_scn_send_scr, scn);
203 return;
204 }
205 scn->fcxp = fcxp;
206
207 /*
208 * Handle VU registrations for Base port only
209 */
210 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
211 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
212 bfa_lps_is_brcd_fabric(port->fabric->lps),
213 port->pid, 0);
214 } else {
215 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE,
216 port->pid, 0);
217 }
218
219 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
220 FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response,
221 (void *)scn, FC_MAX_PDUSZ, FC_RA_TOV);
222
223 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
224}
225
226static void
227bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
228 void *cbarg, bfa_status_t req_status,
229 u32 rsp_len, u32 resid_len,
230 struct fchs_s *rsp_fchs)
231{
232 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)cbarg;
233 struct bfa_fcs_port_s *port = scn->port;
234 struct fc_els_cmd_s *els_cmd;
235 struct fc_ls_rjt_s *ls_rjt;
236
237 bfa_trc(port->fcs, port->port_cfg.pwwn);
238
239 /*
240 * Sanity Checks
241 */
242 if (req_status != BFA_STATUS_OK) {
243 bfa_trc(port->fcs, req_status);
244 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
245 return;
246 }
247
248 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
249
250 switch (els_cmd->els_code) {
251
252 case FC_ELS_ACC:
253 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
254 break;
255
256 case FC_ELS_LS_RJT:
257
258 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
259
260 bfa_trc(port->fcs, ls_rjt->reason_code);
261 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
262
263 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
264 break;
265
266 default:
267 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
268 }
269}
270
271/*
272 * Send a LS Accept
273 */
274static void
275bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
276 struct fchs_s *rx_fchs)
277{
278 struct fchs_s fchs;
279 struct bfa_fcxp_s *fcxp;
280 struct bfa_rport_s *bfa_rport = NULL;
281 int len;
282
283 bfa_trc(port->fcs, rx_fchs->s_id);
284
285 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
286 if (!fcxp)
287 return;
288
289 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
290 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id);
291
292 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
293 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
294 FC_MAX_PDUSZ, 0);
295}
296
297/**
298 * This routine will be called by bfa_timer on timer timeouts.
299 *
300 * param[in] vport - pointer to bfa_fcs_port_t.
301 * param[out] vport_status - pointer to return vport status in
302 *
303 * return
304 * void
305 *
306* Special Considerations:
307 *
308 * note
309 */
310static void
311bfa_fcs_port_scn_timeout(void *arg)
312{
313 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)arg;
314
315 bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
316}
317
318
319
320/**
321 * fcs_scn_public FCS state change notification public interfaces
322 */
323
324/*
325 * Functions called by port/fab
326 */
327void
328bfa_fcs_port_scn_init(struct bfa_fcs_port_s *port)
329{
330 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
331
332 scn->port = port;
333 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
334}
335
336void
337bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *port)
338{
339 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
340
341 scn->port = port;
342 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
343}
344
345void
346bfa_fcs_port_scn_online(struct bfa_fcs_port_s *port)
347{
348 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
349
350 scn->port = port;
351 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
352}
353
354static void
355bfa_fcs_port_scn_portid_rscn(struct bfa_fcs_port_s *port, u32 rpid)
356{
357 struct bfa_fcs_rport_s *rport;
358
359 bfa_trc(port->fcs, rpid);
360
361 /**
362 * If this is an unknown device, then it just came online.
363 * Otherwise let rport handle the RSCN event.
364 */
365 rport = bfa_fcs_port_get_rport_by_pid(port, rpid);
366 if (rport == NULL) {
367 /*
368 * If min cfg mode is enabled, we donot need to
369 * discover any new rports.
370 */
371 if (!__fcs_min_cfg(port->fcs))
372 rport = bfa_fcs_rport_create(port, rpid);
373 } else {
374 bfa_fcs_rport_scn(rport);
375 }
376}
377
378/**
379 * rscn format based PID comparison
380 */
381#define __fc_pid_match(__c0, __c1, __fmt) \
382 (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
383 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
384 ((__c0)[0] == (__c1)[0])) || \
385 (((__fmt) == FC_RSCN_FORMAT_AREA) && \
386 ((__c0)[0] == (__c1)[0]) && \
387 ((__c0)[1] == (__c1)[1])))
388
389static void
390bfa_fcs_port_scn_multiport_rscn(struct bfa_fcs_port_s *port,
391 enum fc_rscn_format format, u32 rscn_pid)
392{
393 struct bfa_fcs_rport_s *rport;
394 struct list_head *qe, *qe_next;
395 u8 *c0, *c1;
396
397 bfa_trc(port->fcs, format);
398 bfa_trc(port->fcs, rscn_pid);
399
400 c0 = (u8 *) &rscn_pid;
401
402 list_for_each_safe(qe, qe_next, &port->rport_q) {
403 rport = (struct bfa_fcs_rport_s *)qe;
404 c1 = (u8 *) &rport->pid;
405 if (__fc_pid_match(c0, c1, format))
406 bfa_fcs_rport_scn(rport);
407 }
408}
409
410void
411bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
412 u32 len)
413{
414 struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
415 int num_entries;
416 u32 rscn_pid;
417 bfa_boolean_t nsquery = BFA_FALSE;
418 int i = 0;
419
420 num_entries =
421 (bfa_os_ntohs(rscn->payldlen) -
422 sizeof(u32)) / sizeof(rscn->event[0]);
423
424 bfa_trc(port->fcs, num_entries);
425
426 port->stats.num_rscn++;
427
428 bfa_fcs_port_scn_send_ls_acc(port, fchs);
429
430 for (i = 0; i < num_entries; i++) {
431 rscn_pid = rscn->event[i].portid;
432
433 bfa_trc(port->fcs, rscn->event[i].format);
434 bfa_trc(port->fcs, rscn_pid);
435
436 switch (rscn->event[i].format) {
437 case FC_RSCN_FORMAT_PORTID:
438 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
439 /*
440 * Ignore this event. f/w would have processed
441 * it
442 */
443 bfa_trc(port->fcs, rscn_pid);
444 } else {
445 port->stats.num_portid_rscn++;
446 bfa_fcs_port_scn_portid_rscn(port, rscn_pid);
447 }
448 break;
449
450 case FC_RSCN_FORMAT_FABRIC:
451 if (rscn->event[i].qualifier ==
452 FC_FABRIC_NAME_RSCN_EVENT) {
453 bfa_fcs_port_ms_fabric_rscn(port);
454 break;
455 }
456 /*
457 * !!!!!!!!! Fall Through !!!!!!!!!!!!!
458 */
459
460 case FC_RSCN_FORMAT_AREA:
461 case FC_RSCN_FORMAT_DOMAIN:
462 nsquery = BFA_TRUE;
463 bfa_fcs_port_scn_multiport_rscn(port,
464 rscn->event[i].format,
465 rscn_pid);
466 break;
467
468 default:
469 bfa_assert(0);
470 nsquery = BFA_TRUE;
471 }
472 }
473
474 /**
475 * If any of area, domain or fabric RSCN is received, do a fresh discovery
476 * to find new devices.
477 */
478 if (nsquery)
479 bfa_fcs_port_ns_query(port);
480}
481
482
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c
new file mode 100644
index 000000000000..31d81fe2fc48
--- /dev/null
+++ b/drivers/scsi/bfa/vfapi.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * vfapi.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_trcmod.h"
24
25BFA_TRC_FILE(FCS, VFAPI);
26
27/**
28 * fcs_vf_api virtual fabrics API
29 */
30
31/**
32 * Enable VF mode.
33 *
34 * @param[in] fcs fcs module instance
35 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
36 * to use standard default vf_id of 1.
37 *
38 * @retval BFA_STATUS_OK vf mode is enabled
39 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
40 * before VF mode can be enabled.
41 */
42bfa_status_t
43bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
44{
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Disable VF mode.
50 *
51 * @param[in] fcs fcs module instance
52 *
53 * @retval BFA_STATUS_OK vf mode is disabled
54 * @retval BFA_STATUS_BUSY VFs are present and being used. All
55 * VFs must be deleted before disabling
56 * VF mode.
57 */
58bfa_status_t
59bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
60{
61 return BFA_STATUS_OK;
62}
63
64/**
65 * Create a new VF instance.
66 *
67 * A new VF is created using the given VF configuration. A VF is identified
68 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
69 * a VF is created, VF is automatically started after link initialization
70 * and EVFP exchange is completed.
71 *
72 * param[in] vf - FCS vf data structure. Memory is
73 * allocated by caller (driver)
74 * param[in] fcs - FCS module
75 * param[in] vf_cfg - VF configuration
76 * param[in] vf_drv - Opaque handle back to the driver's
77 * virtual vf structure
78 *
79 * retval BFA_STATUS_OK VF creation is successful
80 * retval BFA_STATUS_FAILED VF creation failed
81 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
82 */
83bfa_status_t
84bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
85 struct bfa_port_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
86{
87 bfa_trc(fcs, vf_id);
88 return BFA_STATUS_OK;
89}
90
91/**
92 * Use this function to delete a BFA VF object. VF object should
93 * be stopped before this function call.
94 *
95 * param[in] vf - pointer to bfa_vf_t.
96 *
97 * retval BFA_STATUS_OK On vf deletion success
98 * retval BFA_STATUS_BUSY VF is not in a stopped state
99 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
100 */
101bfa_status_t
102bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
103{
104 bfa_trc(vf->fcs, vf->vf_id);
105 return BFA_STATUS_OK;
106}
107
108/**
109 * Start participation in VF. This triggers login to the virtual fabric.
110 *
111 * param[in] vf - pointer to bfa_vf_t.
112 *
113 * return None
114 */
115void
116bfa_fcs_vf_start(bfa_fcs_vf_t *vf)
117{
118 bfa_trc(vf->fcs, vf->vf_id);
119}
120
121/**
122 * Logout with the virtual fabric.
123 *
124 * param[in] vf - pointer to bfa_vf_t.
125 *
126 * retval BFA_STATUS_OK On success.
127 * retval BFA_STATUS_INPROGRESS VF is being stopped.
128 */
129bfa_status_t
130bfa_fcs_vf_stop(bfa_fcs_vf_t *vf)
131{
132 bfa_trc(vf->fcs, vf->vf_id);
133 return BFA_STATUS_OK;
134}
135
136/**
137 * Returns attributes of the given VF.
138 *
139 * param[in] vf pointer to bfa_vf_t.
140 * param[out] vf_attr vf attributes returned
141 *
142 * return None
143 */
144void
145bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
146{
147 bfa_trc(vf->fcs, vf->vf_id);
148}
149
150/**
151 * Return statistics associated with the given vf.
152 *
153 * param[in] vf pointer to bfa_vf_t.
154 * param[out] vf_stats vf statistics returned
155 *
156 * @return None
157 */
158void
159bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
160{
161 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
162 return;
163}
164
165void
166/**
167 * clear statistics associated with the given vf.
168 *
169 * param[in] vf pointer to bfa_vf_t.
170 *
171 * @return None
172 */
173bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
174{
175 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
176 return;
177}
178
179/**
180 * Returns FCS vf structure for a given vf_id.
181 *
182 * param[in] vf_id - VF_ID
183 *
184 * return
185 * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
186 */
187bfa_fcs_vf_t *
188bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
189{
190 bfa_trc(fcs, vf_id);
191 if (vf_id == FC_VF_ID_NULL)
192 return (&fcs->fabric);
193
194 /**
195 * @todo vf support
196 */
197
198 return NULL;
199}
200
201/**
202 * Returns driver VF structure for a given FCS vf.
203 *
204 * param[in] vf - pointer to bfa_vf_t
205 *
206 * return Driver VF structure
207 */
208struct bfad_vf_s *
209bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf)
210{
211 bfa_assert(vf);
212 bfa_trc(vf->fcs, vf->vf_id);
213 return vf->vf_drv;
214}
215
216/**
217 * Return the list of VFs configured.
218 *
219 * param[in] fcs fcs module instance
220 * param[out] vf_ids returned list of vf_ids
221 * param[in,out] nvfs in:size of vf_ids array,
222 * out:total elements present,
223 * actual elements returned is limited by the size
224 *
225 * return Driver VF structure
226 */
227void
228bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
229{
230 bfa_trc(fcs, *nvfs);
231}
232
233/**
234 * Return the list of all VFs visible from fabric.
235 *
236 * param[in] fcs fcs module instance
237 * param[out] vf_ids returned list of vf_ids
238 * param[in,out] nvfs in:size of vf_ids array,
239 * out:total elements present,
240 * actual elements returned is limited by the size
241 *
242 * return Driver VF structure
243 */
244void
245bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
246{
247 bfa_trc(fcs, *nvfs);
248}
249
250/**
251 * Return the list of local logical ports present in the given VF.
252 *
253 * param[in] vf vf for which logical ports are returned
254 * param[out] lpwwn returned logical port wwn list
255 * param[in,out] nlports in:size of lpwwn list;
256 * out:total elements present,
257 * actual elements returned is limited by the size
258 *
259 */
260void
261bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
262{
263 struct list_head *qe;
264 struct bfa_fcs_vport_s *vport;
265 int i;
266 struct bfa_fcs_s *fcs;
267
268 if (vf == NULL || lpwwn == NULL || *nlports == 0)
269 return;
270
271 fcs = vf->fcs;
272
273 bfa_trc(fcs, vf->vf_id);
274 bfa_trc(fcs, (u32) *nlports);
275
276 i = 0;
277 lpwwn[i++] = vf->bport.port_cfg.pwwn;
278
279 list_for_each(qe, &vf->vport_q) {
280 if (i >= *nlports)
281 break;
282
283 vport = (struct bfa_fcs_vport_s *) qe;
284 lpwwn[i++] = vport->lport.port_cfg.pwwn;
285 }
286
287 bfa_trc(fcs, i);
288 *nlports = i;
289 return;
290}
291
292
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
new file mode 100644
index 000000000000..c10af06c5714
--- /dev/null
+++ b/drivers/scsi/bfa/vport.c
@@ -0,0 +1,891 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.c FCS virtual port state machine
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <fcbuild.h>
25#include "fcs_fabric.h"
26#include "fcs_lport.h"
27#include "fcs_vport.h"
28#include "fcs_trcmod.h"
29#include "fcs.h"
30#include <aen/bfa_aen_lport.h>
31
32BFA_TRC_FILE(FCS, VPORT);
33
34#define __vport_fcs(__vp) (__vp)->lport.fcs
35#define __vport_pwwn(__vp) (__vp)->lport.port_cfg.pwwn
36#define __vport_nwwn(__vp) (__vp)->lport.port_cfg.nwwn
37#define __vport_bfa(__vp) (__vp)->lport.fcs->bfa
38#define __vport_fcid(__vp) (__vp)->lport.pid
39#define __vport_fabric(__vp) (__vp)->lport.fabric
40#define __vport_vfid(__vp) (__vp)->lport.fabric->vf_id
41
42#define BFA_FCS_VPORT_MAX_RETRIES 5
43/*
44 * Forward declarations
45 */
46static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
47static void bfa_fcs_vport_timeout(void *vport_arg);
48static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
49static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
50
51/**
52 * fcs_vport_sm FCS virtual port state machine
53 */
54
55/**
56 * VPort State Machine events
57 */
58enum bfa_fcs_vport_event {
59 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
60 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
61 BFA_FCS_VPORT_SM_START = 3, /* vport start request */
62 BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
63 BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
64 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
65 BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
66 BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
67 BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
68 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
69 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
70 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error */
71 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
72};
73
74static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
75 enum bfa_fcs_vport_event event);
76static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
77 enum bfa_fcs_vport_event event);
78static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
79 enum bfa_fcs_vport_event event);
80static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
81 enum bfa_fcs_vport_event event);
82static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
83 enum bfa_fcs_vport_event event);
84static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
85 enum bfa_fcs_vport_event event);
86static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
87 enum bfa_fcs_vport_event event);
88static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
89 enum bfa_fcs_vport_event event);
90static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
91 enum bfa_fcs_vport_event event);
92static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
93 enum bfa_fcs_vport_event event);
94
95static struct bfa_sm_table_s vport_sm_table[] = {
96 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
97 {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
98 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
99 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
100 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
101 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
102 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
103 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
104 {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
105 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
106};
107
108/**
109 * Beginning state.
110 */
111static void
112bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
113 enum bfa_fcs_vport_event event)
114{
115 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
116 bfa_trc(__vport_fcs(vport), event);
117
118 switch (event) {
119 case BFA_FCS_VPORT_SM_CREATE:
120 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
121 bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
122 break;
123
124 default:
125 bfa_assert(0);
126 }
127}
128
129/**
130 * Created state - a start event is required to start up the state machine.
131 */
132static void
133bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
134 enum bfa_fcs_vport_event event)
135{
136 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
137 bfa_trc(__vport_fcs(vport), event);
138
139 switch (event) {
140 case BFA_FCS_VPORT_SM_START:
141 if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
142 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
143 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
144 bfa_fcs_vport_do_fdisc(vport);
145 } else {
146 /**
147 * Fabric is offline or not NPIV capable, stay in
148 * offline state.
149 */
150 vport->vport_stats.fab_no_npiv++;
151 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
152 }
153 break;
154
155 case BFA_FCS_VPORT_SM_DELETE:
156 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
157 bfa_fcs_port_delete(&vport->lport);
158 break;
159
160 case BFA_FCS_VPORT_SM_ONLINE:
161 case BFA_FCS_VPORT_SM_OFFLINE:
162 /**
163 * Ignore ONLINE/OFFLINE events from fabric till vport is started.
164 */
165 break;
166
167 default:
168 bfa_assert(0);
169 }
170}
171
172/**
173 * Offline state - awaiting ONLINE event from fabric SM.
174 */
175static void
176bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
177 enum bfa_fcs_vport_event event)
178{
179 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
180 bfa_trc(__vport_fcs(vport), event);
181
182 switch (event) {
183 case BFA_FCS_VPORT_SM_DELETE:
184 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
185 bfa_fcs_port_delete(&vport->lport);
186 break;
187
188 case BFA_FCS_VPORT_SM_ONLINE:
189 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
190 vport->fdisc_retries = 0;
191 bfa_fcs_vport_do_fdisc(vport);
192 break;
193
194 case BFA_FCS_VPORT_SM_OFFLINE:
195 /*
196 * This can happen if the vport couldn't be initialzied due
197 * the fact that the npiv was not enabled on the switch. In
198 * that case we will put the vport in offline state. However,
199 * the link can go down and cause the this event to be sent when
200 * we are already offline. Ignore it.
201 */
202 break;
203
204 default:
205 bfa_assert(0);
206 }
207}
208
209/**
210 * FDISC is sent and awaiting reply from fabric.
211 */
212static void
213bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
214 enum bfa_fcs_vport_event event)
215{
216 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
217 bfa_trc(__vport_fcs(vport), event);
218
219 switch (event) {
220 case BFA_FCS_VPORT_SM_DELETE:
221 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
222 bfa_lps_discard(vport->lps);
223 bfa_fcs_vport_do_logo(vport);
224 break;
225
226 case BFA_FCS_VPORT_SM_OFFLINE:
227 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
228 bfa_lps_discard(vport->lps);
229 break;
230
231 case BFA_FCS_VPORT_SM_RSP_OK:
232 bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
233 bfa_fcs_port_online(&vport->lport);
234 break;
235
236 case BFA_FCS_VPORT_SM_RSP_ERROR:
237 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
238 bfa_timer_start(__vport_bfa(vport), &vport->timer,
239 bfa_fcs_vport_timeout, vport,
240 BFA_FCS_RETRY_TIMEOUT);
241 break;
242
243 case BFA_FCS_VPORT_SM_RSP_FAILED:
244 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
245 break;
246
247 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
248 bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
249 break;
250
251 default:
252 bfa_assert(0);
253 }
254}
255
256/**
257 * FDISC attempt failed - a timer is active to retry FDISC.
258 */
259static void
260bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
261 enum bfa_fcs_vport_event event)
262{
263 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
264 bfa_trc(__vport_fcs(vport), event);
265
266 switch (event) {
267 case BFA_FCS_VPORT_SM_DELETE:
268 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
269 bfa_timer_stop(&vport->timer);
270 bfa_fcs_port_delete(&vport->lport);
271 break;
272
273 case BFA_FCS_VPORT_SM_OFFLINE:
274 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
275 bfa_timer_stop(&vport->timer);
276 break;
277
278 case BFA_FCS_VPORT_SM_TIMEOUT:
279 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
280 vport->vport_stats.fdisc_retries++;
281 vport->fdisc_retries++;
282 bfa_fcs_vport_do_fdisc(vport);
283 break;
284
285 default:
286 bfa_assert(0);
287 }
288}
289
290/**
291 * Vport is online (FDISC is complete).
292 */
293static void
294bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
295 enum bfa_fcs_vport_event event)
296{
297 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
298 bfa_trc(__vport_fcs(vport), event);
299
300 switch (event) {
301 case BFA_FCS_VPORT_SM_DELETE:
302 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
303 bfa_fcs_port_delete(&vport->lport);
304 break;
305
306 case BFA_FCS_VPORT_SM_OFFLINE:
307 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
308 bfa_lps_discard(vport->lps);
309 bfa_fcs_port_offline(&vport->lport);
310 break;
311
312 default:
313 bfa_assert(0);
314 }
315}
316
317/**
318 * Vport is being deleted - awaiting lport delete completion to send
319 * LOGO to fabric.
320 */
321static void
322bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
323 enum bfa_fcs_vport_event event)
324{
325 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
326 bfa_trc(__vport_fcs(vport), event);
327
328 switch (event) {
329 case BFA_FCS_VPORT_SM_DELETE:
330 break;
331
332 case BFA_FCS_VPORT_SM_DELCOMP:
333 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
334 bfa_fcs_vport_do_logo(vport);
335 break;
336
337 case BFA_FCS_VPORT_SM_OFFLINE:
338 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
339 break;
340
341 default:
342 bfa_assert(0);
343 }
344}
345
346/**
347 * Error State.
348 * This state will be set when the Vport Creation fails due to errors like
349 * Dup WWN. In this state only operation allowed is a Vport Delete.
350 */
351static void
352bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
353 enum bfa_fcs_vport_event event)
354{
355 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
356 bfa_trc(__vport_fcs(vport), event);
357
358 switch (event) {
359 case BFA_FCS_VPORT_SM_DELETE:
360 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
361 bfa_fcs_vport_free(vport);
362 break;
363
364 default:
365 bfa_trc(__vport_fcs(vport), event);
366 }
367}
368
369/**
370 * Lport cleanup is in progress since vport is being deleted. Fabric is
371 * offline, so no LOGO is needed to complete vport deletion.
372 */
373static void
374bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
375 enum bfa_fcs_vport_event event)
376{
377 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
378 bfa_trc(__vport_fcs(vport), event);
379
380 switch (event) {
381 case BFA_FCS_VPORT_SM_DELCOMP:
382 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
383 bfa_fcs_vport_free(vport);
384 break;
385
386 case BFA_FCS_VPORT_SM_DELETE:
387 break;
388
389 default:
390 bfa_assert(0);
391 }
392}
393
394/**
395 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
396 * is done.
397 */
398static void
399bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
400 enum bfa_fcs_vport_event event)
401{
402 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
403 bfa_trc(__vport_fcs(vport), event);
404
405 switch (event) {
406 case BFA_FCS_VPORT_SM_OFFLINE:
407 bfa_lps_discard(vport->lps);
408 /*
409 * !!! fall through !!!
410 */
411
412 case BFA_FCS_VPORT_SM_RSP_OK:
413 case BFA_FCS_VPORT_SM_RSP_ERROR:
414 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
415 bfa_fcs_vport_free(vport);
416 break;
417
418 case BFA_FCS_VPORT_SM_DELETE:
419 break;
420
421 default:
422 bfa_assert(0);
423 }
424}
425
426
427
428/**
429 * fcs_vport_private FCS virtual port private functions
430 */
431
432/**
433 * Send AEN notification
434 */
435static void
436bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
437{
438 union bfa_aen_data_u aen_data;
439 struct bfa_log_mod_s *logmod = port->fcs->logm;
440 enum bfa_port_role role = port->port_cfg.roles;
441 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
442 char lpwwn_ptr[BFA_STRING_32];
443 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
444 { "Initiator", "Target", "IPFC" };
445
446 wwn2str(lpwwn_ptr, lpwwn);
447
448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
449
450 switch (event) {
451 case BFA_LPORT_AEN_NPIV_DUP_WWN:
452 bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr,
453 role_str[role / 2]);
454 break;
455 case BFA_LPORT_AEN_NPIV_FABRIC_MAX:
456 bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr,
457 role_str[role / 2]);
458 break;
459 case BFA_LPORT_AEN_NPIV_UNKNOWN:
460 bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr,
461 role_str[role / 2]);
462 break;
463 default:
464 break;
465 }
466
467 aen_data.lport.vf_id = port->fabric->vf_id;
468 aen_data.lport.roles = role;
469 aen_data.lport.ppwwn =
470 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
471 aen_data.lport.lpwwn = lpwwn;
472}
473
474/**
475 * This routine will be called to send a FDISC command.
476 */
477static void
478bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
479{
480 bfa_lps_fdisc(vport->lps, vport,
481 bfa_pport_get_maxfrsize(__vport_bfa(vport)),
482 __vport_pwwn(vport), __vport_nwwn(vport));
483 vport->vport_stats.fdisc_sent++;
484}
485
486static void
487bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
488{
489 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
490 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
491
492 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
493 bfa_trc(__vport_fcs(vport), lsrjt_expl);
494
495 /*
496 * For certain reason codes, we don't want to retry.
497 */
498 switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
499 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
500 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
501 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
502 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
503 else {
504 bfa_fcs_vport_aen_post(&vport->lport,
505 BFA_LPORT_AEN_NPIV_DUP_WWN);
506 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
507 }
508 break;
509
510 case FC_LS_RJT_EXP_INSUFF_RES:
511 /*
512 * This means max logins per port/switch setting on the
513 * switch was exceeded.
514 */
515 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
516 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
517 else {
518 bfa_fcs_vport_aen_post(&vport->lport,
519 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
520 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
521 }
522 break;
523
524 default:
525 if (vport->fdisc_retries == 0) /* Print only once */
526 bfa_fcs_vport_aen_post(&vport->lport,
527 BFA_LPORT_AEN_NPIV_UNKNOWN);
528 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
529 }
530}
531
532/**
533 * Called to send a logout to the fabric. Used when a V-Port is
534 * deleted/stopped.
535 */
536static void
537bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
538{
539 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
540
541 vport->vport_stats.logo_sent++;
542 bfa_lps_fdisclogo(vport->lps);
543}
544
545/**
546 * This routine will be called by bfa_timer on timer timeouts.
547 *
548 * param[in] vport - pointer to bfa_fcs_vport_t.
549 * param[out] vport_status - pointer to return vport status in
550 *
551 * return
552 * void
553 *
554* Special Considerations:
555 *
556 * note
557 */
558static void
559bfa_fcs_vport_timeout(void *vport_arg)
560{
561 struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *)vport_arg;
562
563 vport->vport_stats.fdisc_timeouts++;
564 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
565}
566
567static void
568bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
569{
570 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
571 bfa_fcb_vport_delete(vport->vport_drv);
572 bfa_lps_delete(vport->lps);
573}
574
575
576
577/**
578 * fcs_vport_public FCS virtual port public interfaces
579 */
580
581/**
582 * Online notification from fabric SM.
583 */
584void
585bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
586{
587 vport->vport_stats.fab_online++;
588 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
589}
590
591/**
592 * Offline notification from fabric SM.
593 */
594void
595bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
596{
597 vport->vport_stats.fab_offline++;
598 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
599}
600
601/**
602 * Cleanup notification from fabric SM on link timer expiry.
603 */
604void
605bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
606{
607 vport->vport_stats.fab_cleanup++;
608}
609
610/**
611 * Delete completion callback from associated lport
612 */
613void
614bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
615{
616 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
617}
618
619/**
620 * Module initialization
621 */
622void
623bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs)
624{
625}
626
627/**
628 * Module cleanup
629 */
630void
631bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs)
632{
633 bfa_fcs_modexit_comp(fcs);
634}
635
636u32
637bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
638{
639 struct bfa_ioc_attr_s ioc_attr;
640
641 bfa_get_attr(fcs->bfa, &ioc_attr);
642
643 if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
644 return (BFA_FCS_MAX_VPORTS_SUPP_CT);
645 else
646 return (BFA_FCS_MAX_VPORTS_SUPP_CB);
647}
648
649
650
651/**
652 * fcs_vport_api Virtual port API
653 */
654
655/**
656 * Use this function to instantiate a new FCS vport object. This
657 * function will not trigger any HW initialization process (which will be
658 * done in vport_start() call)
659 *
660 * param[in] vport - pointer to bfa_fcs_vport_t. This space
661 * needs to be allocated by the driver.
662 * param[in] fcs - FCS instance
663 * param[in] vport_cfg - vport configuration
664 * param[in] vf_id - VF_ID if vport is created within a VF.
665 * FC_VF_ID_NULL to specify base fabric.
666 * param[in] vport_drv - Opaque handle back to the driver's vport
667 * structure
668 *
669 * retval BFA_STATUS_OK - on success.
670 * retval BFA_STATUS_FAILED - on failure.
671 */
672bfa_status_t
673bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
674 u16 vf_id, struct bfa_port_cfg_s *vport_cfg,
675 struct bfad_vport_s *vport_drv)
676{
677 if (vport_cfg->pwwn == 0)
678 return (BFA_STATUS_INVALID_WWN);
679
680 if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
681 return BFA_STATUS_VPORT_WWN_BP;
682
683 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
684 return BFA_STATUS_VPORT_EXISTS;
685
686 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
687 bfa_fcs_vport_get_max(fcs))
688 return BFA_STATUS_VPORT_MAX;
689
690 vport->lps = bfa_lps_alloc(fcs->bfa);
691 if (!vport->lps)
692 return BFA_STATUS_VPORT_MAX;
693
694 vport->vport_drv = vport_drv;
695 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
696
697 bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport);
698
699 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
700
701 return BFA_STATUS_OK;
702}
703
704/**
705 * Use this function initialize the vport.
706 *
707 * @param[in] vport - pointer to bfa_fcs_vport_t.
708 *
709 * @returns None
710 */
711bfa_status_t
712bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
713{
714 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
715
716 return BFA_STATUS_OK;
717}
718
719/**
720 * Use this function quiese the vport object. This function will return
721 * immediately, when the vport is actually stopped, the
722 * bfa_drv_vport_stop_cb() will be called.
723 *
724 * param[in] vport - pointer to bfa_fcs_vport_t.
725 *
726 * return None
727 */
728bfa_status_t
729bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
730{
731 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
732
733 return BFA_STATUS_OK;
734}
735
736/**
737 * Use this function to delete a vport object. Fabric object should
738 * be stopped before this function call.
739 *
740 * param[in] vport - pointer to bfa_fcs_vport_t.
741 *
742 * return None
743 */
744bfa_status_t
745bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
746{
747 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
748
749 return BFA_STATUS_OK;
750}
751
752/**
753 * Use this function to get vport's current status info.
754 *
755 * param[in] vport pointer to bfa_fcs_vport_t.
756 * param[out] attr pointer to return vport attributes
757 *
758 * return None
759 */
760void
761bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
762 struct bfa_vport_attr_s *attr)
763{
764 if (vport == NULL || attr == NULL)
765 return;
766
767 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
768
769 bfa_fcs_port_get_attr(&vport->lport, &attr->port_attr);
770 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
771}
772
773/**
774 * Use this function to get vport's statistics.
775 *
776 * param[in] vport pointer to bfa_fcs_vport_t.
777 * param[out] stats pointer to return vport statistics in
778 *
779 * return None
780 */
781void
782bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
783 struct bfa_vport_stats_s *stats)
784{
785 *stats = vport->vport_stats;
786}
787
788/**
789 * Use this function to clear vport's statistics.
790 *
791 * param[in] vport pointer to bfa_fcs_vport_t.
792 *
793 * return None
794 */
795void
796bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
797{
798 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
799}
800
801/**
802 * Lookup a virtual port. Excludes base port from lookup.
803 */
804struct bfa_fcs_vport_s *
805bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
806{
807 struct bfa_fcs_vport_s *vport;
808 struct bfa_fcs_fabric_s *fabric;
809
810 bfa_trc(fcs, vf_id);
811 bfa_trc(fcs, vpwwn);
812
813 fabric = bfa_fcs_vf_lookup(fcs, vf_id);
814 if (!fabric) {
815 bfa_trc(fcs, vf_id);
816 return NULL;
817 }
818
819 vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
820 return vport;
821}
822
823/**
824 * FDISC Response
825 */
826void
827bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
828{
829 struct bfa_fcs_vport_s *vport = uarg;
830
831 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
832 bfa_trc(__vport_fcs(vport), status);
833
834 switch (status) {
835 case BFA_STATUS_OK:
836 /*
837 * Initialiaze the V-Port fields
838 */
839 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
840 vport->vport_stats.fdisc_accepts++;
841 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
842 break;
843
844 case BFA_STATUS_INVALID_MAC:
845 /*
846 * Only for CNA
847 */
848 vport->vport_stats.fdisc_acc_bad++;
849 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
850
851 break;
852
853 case BFA_STATUS_EPROTOCOL:
854 switch (bfa_lps_get_extstatus(vport->lps)) {
855 case BFA_EPROTO_BAD_ACCEPT:
856 vport->vport_stats.fdisc_acc_bad++;
857 break;
858
859 case BFA_EPROTO_UNKNOWN_RSP:
860 vport->vport_stats.fdisc_unknown_rsp++;
861 break;
862
863 default:
864 break;
865 }
866
867 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
868 break;
869
870 case BFA_STATUS_FABRIC_RJT:
871 vport->vport_stats.fdisc_rejects++;
872 bfa_fcs_vport_fdisc_rejected(vport);
873 break;
874
875 default:
876 vport->vport_stats.fdisc_rsp_err++;
877 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
878 }
879}
880
881/**
882 * LOGO response
883 */
884void
885bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
886{
887 struct bfa_fcs_vport_s *vport = uarg;
888 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
889}
890
891
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index d7576f28c6e9..5edde1a8c04d 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -100,6 +100,8 @@
100#define CTX_OFFSET 0x10000 100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000 101#define MAX_CID_CNT 0x4000
102 102
103#define BNX2I_570X_PAGE_SIZE_DEFAULT 4096
104
103/* 5709 context registers */ 105/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00 106#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) 107#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 41e1b0e7e2ef..5c8d7630c13e 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2386,7 +2386,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; 2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz) 2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE 2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE * 2389 + BNX2I_570X_PAGE_SIZE_DEFAULT *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256); 2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else 2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); 2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 9a7ba71f1af4..cafb888c2376 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1243,7 +1243,7 @@ bnx2i_session_create(struct iscsi_endpoint *ep,
1243 cmds_max = BNX2I_SQ_WQES_MIN; 1243 cmds_max = BNX2I_SQ_WQES_MIN;
1244 1244
1245 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, 1245 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1246 cmds_max, sizeof(struct bnx2i_cmd), 1246 cmds_max, 0, sizeof(struct bnx2i_cmd),
1247 initial_cmdsn, ISCSI_MAX_TARGET); 1247 initial_cmdsn, ISCSI_MAX_TARGET);
1248 if (!cls_session) 1248 if (!cls_session)
1249 return NULL; 1249 return NULL;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index c399f485aa7d..2631bddd255e 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -422,7 +422,7 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
422 BUG_ON(hba != iscsi_host_priv(shost)); 422 BUG_ON(hba != iscsi_host_priv(shost));
423 423
424 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, 424 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
425 cmds_max, 425 cmds_max, 0,
426 sizeof(struct iscsi_tcp_task) + 426 sizeof(struct iscsi_tcp_task) +
427 sizeof(struct cxgb3i_task_data), 427 sizeof(struct cxgb3i_task_data),
428 initial_cmdsn, ISCSI_MAX_TARGET); 428 initial_cmdsn, ISCSI_MAX_TARGET);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 11c89311427e..268189d31d9c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -500,8 +500,6 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
500 if (!ret) 500 if (!ret)
501 goto done; 501 goto done;
502 502
503 err = SCSI_DH_OK;
504
505 switch (sense_hdr.sense_key) { 503 switch (sense_hdr.sense_key) {
506 case NO_SENSE: 504 case NO_SENSE:
507 case ABORTED_COMMAND: 505 case ABORTED_COMMAND:
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b6af63ca980b..496764349c41 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1918,6 +1918,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1918 } 1918 }
1919 size = size>>16; 1919 size = size>>16;
1920 size *= 4; 1920 size *= 4;
1921 if (size > MAX_MESSAGE_SIZE) {
1922 rcode = -EINVAL;
1923 goto cleanup;
1924 }
1921 /* Copy in the user's I2O command */ 1925 /* Copy in the user's I2O command */
1922 if (copy_from_user (msg, user_msg, size)) { 1926 if (copy_from_user (msg, user_msg, size)) {
1923 rcode = -EFAULT; 1927 rcode = -EFAULT;
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 185e6bc4dd40..9e8fce0f0c1b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2900,7 +2900,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2900 eindex = handle; 2900 eindex = handle;
2901 estr->event_source = 0; 2901 estr->event_source = 0;
2902 2902
2903 if (eindex >= MAX_EVENTS) { 2903 if (eindex < 0 || eindex >= MAX_EVENTS) {
2904 spin_unlock_irqrestore(&ha->smp_lock, flags); 2904 spin_unlock_irqrestore(&ha->smp_lock, flags);
2905 return eindex; 2905 return eindex;
2906 } 2906 }
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index c596ab5f05c3..a0e7e711ff9d 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * HighPoint RR3xxx/4xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
41 41
42static char driver_name[] = "hptiop"; 42static char driver_name[] = "hptiop";
43static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; 43static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
44static const char driver_ver[] = "v1.3 (071203)"; 44static const char driver_ver[] = "v1.6 (090910)";
45 45
46static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); 46static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
47static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, 47static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -115,9 +115,13 @@ static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
115static int iop_intr_itl(struct hptiop_hba *hba) 115static int iop_intr_itl(struct hptiop_hba *hba)
116{ 116{
117 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; 117 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
118 void __iomem *plx = hba->u.itl.plx;
118 u32 status; 119 u32 status;
119 int ret = 0; 120 int ret = 0;
120 121
122 if (plx && readl(plx + 0x11C5C) & 0xf)
123 writel(1, plx + 0x11C60);
124
121 status = readl(&iop->outbound_intstatus); 125 status = readl(&iop->outbound_intstatus);
122 126
123 if (status & IOPMU_OUTBOUND_INT_MSG0) { 127 if (status & IOPMU_OUTBOUND_INT_MSG0) {
@@ -460,15 +464,25 @@ static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
460 464
461static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) 465static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
462{ 466{
467 struct pci_dev *pcidev = hba->pcidev;
463 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); 468 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
464 if (hba->u.itl.iop) 469 if (hba->u.itl.iop == NULL)
465 return 0;
466 else
467 return -1; 470 return -1;
471 if ((pcidev->device & 0xff00) == 0x4400) {
472 hba->u.itl.plx = hba->u.itl.iop;
473 hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
474 if (hba->u.itl.iop == NULL) {
475 iounmap(hba->u.itl.plx);
476 return -1;
477 }
478 }
479 return 0;
468} 480}
469 481
470static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) 482static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
471{ 483{
484 if (hba->u.itl.plx)
485 iounmap(hba->u.itl.plx);
472 iounmap(hba->u.itl.iop); 486 iounmap(hba->u.itl.iop);
473} 487}
474 488
@@ -1239,22 +1253,23 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
1239static struct pci_device_id hptiop_id_table[] = { 1253static struct pci_device_id hptiop_id_table[] = {
1240 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, 1254 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1241 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, 1255 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1242 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, 1256 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1243 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1244 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, 1257 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1245 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, 1258 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1259 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1246 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, 1260 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1247 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, 1261 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1248 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1249 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1250 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, 1262 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1263 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1251 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, 1264 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1253 { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1254 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, 1265 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1255 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, 1266 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1256 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, 1267 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1257 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, 1268 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1269 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1270 { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1271 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1272 { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1258 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, 1273 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1259 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, 1274 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1260 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, 1275 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index a0289f219752..0b871c0ae568 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * HighPoint RR3xxx/4xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -228,6 +228,7 @@ struct hptiop_hba {
228 union { 228 union {
229 struct { 229 struct {
230 struct hpt_iopmu_itl __iomem *iop; 230 struct hpt_iopmu_itl __iomem *iop;
231 void __iomem *plx;
231 } itl; 232 } itl;
232 struct { 233 struct {
233 struct hpt_iopmv_regs *regs; 234 struct hpt_iopmv_regs *regs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5f045505a1f4..76d294fc7846 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4189,6 +4189,25 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4189} 4189}
4190 4190
4191/** 4191/**
4192 * ipr_isr_eh - Interrupt service routine error handler
4193 * @ioa_cfg: ioa config struct
4194 * @msg: message to log
4195 *
4196 * Return value:
4197 * none
4198 **/
4199static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4200{
4201 ioa_cfg->errors_logged++;
4202 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4203
4204 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4205 ioa_cfg->sdt_state = GET_DUMP;
4206
4207 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4208}
4209
4210/**
4192 * ipr_isr - Interrupt service routine 4211 * ipr_isr - Interrupt service routine
4193 * @irq: irq number 4212 * @irq: irq number
4194 * @devp: pointer to ioa config struct 4213 * @devp: pointer to ioa config struct
@@ -4203,6 +4222,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4203 volatile u32 int_reg, int_mask_reg; 4222 volatile u32 int_reg, int_mask_reg;
4204 u32 ioasc; 4223 u32 ioasc;
4205 u16 cmd_index; 4224 u16 cmd_index;
4225 int num_hrrq = 0;
4206 struct ipr_cmnd *ipr_cmd; 4226 struct ipr_cmnd *ipr_cmd;
4207 irqreturn_t rc = IRQ_NONE; 4227 irqreturn_t rc = IRQ_NONE;
4208 4228
@@ -4233,13 +4253,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4233 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 4253 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4234 4254
4235 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 4255 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4236 ioa_cfg->errors_logged++; 4256 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4237 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4238
4239 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4240 ioa_cfg->sdt_state = GET_DUMP;
4241
4242 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4244 return IRQ_HANDLED; 4258 return IRQ_HANDLED;
4245 } 4259 }
@@ -4266,8 +4280,18 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4266 4280
4267 if (ipr_cmd != NULL) { 4281 if (ipr_cmd != NULL) {
4268 /* Clear the PCI interrupt */ 4282 /* Clear the PCI interrupt */
4269 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4283 do {
4270 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4284 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4286 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4287 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4288
4289 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4290 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4292 return IRQ_HANDLED;
4293 }
4294
4271 } else 4295 } else
4272 break; 4296 break;
4273 } 4297 }
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 163245a1c3e5..19bbcf39f0c9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -144,6 +144,7 @@
144#define IPR_IOA_MAX_SECTORS 32767 144#define IPR_IOA_MAX_SECTORS 32767
145#define IPR_VSET_MAX_SECTORS 512 145#define IPR_VSET_MAX_SECTORS 512
146#define IPR_MAX_CDB_LEN 16 146#define IPR_MAX_CDB_LEN 16
147#define IPR_MAX_HRRQ_RETRIES 3
147 148
148#define IPR_DEFAULT_BUS_WIDTH 16 149#define IPR_DEFAULT_BUS_WIDTH 16
149#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) 150#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2b1b834a098b..edc49ca49cea 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -811,7 +811,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
811 goto free_host; 811 goto free_host;
812 812
813 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost, 813 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
814 cmds_max, 814 cmds_max, 0,
815 sizeof(struct iscsi_tcp_task) + 815 sizeof(struct iscsi_tcp_task) +
816 sizeof(struct iscsi_sw_tcp_hdrbuf), 816 sizeof(struct iscsi_sw_tcp_hdrbuf),
817 initial_cmdsn, 0); 817 initial_cmdsn, 0);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8dc73c489a17..f1a4246f890c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2436,7 +2436,7 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2436 */ 2436 */
2437struct iscsi_cls_session * 2437struct iscsi_cls_session *
2438iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, 2438iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2439 uint16_t cmds_max, int cmd_task_size, 2439 uint16_t cmds_max, int dd_size, int cmd_task_size,
2440 uint32_t initial_cmdsn, unsigned int id) 2440 uint32_t initial_cmdsn, unsigned int id)
2441{ 2441{
2442 struct iscsi_host *ihost = shost_priv(shost); 2442 struct iscsi_host *ihost = shost_priv(shost);
@@ -2486,7 +2486,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2486 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; 2486 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
2487 2487
2488 cls_session = iscsi_alloc_session(shost, iscsit, 2488 cls_session = iscsi_alloc_session(shost, iscsit,
2489 sizeof(struct iscsi_session)); 2489 sizeof(struct iscsi_session) +
2490 dd_size);
2490 if (!cls_session) 2491 if (!cls_session)
2491 goto dec_session_count; 2492 goto dec_session_count;
2492 session = cls_session->dd_data; 2493 session = cls_session->dd_data;
@@ -2503,6 +2504,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2503 session->max_cmdsn = initial_cmdsn + 1; 2504 session->max_cmdsn = initial_cmdsn + 1;
2504 session->max_r2t = 1; 2505 session->max_r2t = 1;
2505 session->tt = iscsit; 2506 session->tt = iscsit;
2507 session->dd_data = cls_session->dd_data + sizeof(*session);
2506 mutex_init(&session->eh_mutex); 2508 mutex_init(&session->eh_mutex);
2507 spin_lock_init(&session->lock); 2509 spin_lock_init(&session->lock);
2508 2510
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index b3381959acce..33cf988c8c8a 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -960,7 +960,6 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
960 960
961 } 961 }
962 } 962 }
963 res = 0;
964 } 963 }
965 964
966 return res; 965 return res;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 61d089703806..c88f59f0ce30 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -56,8 +56,6 @@ static char *dif_op_str[] = {
56 "SCSI_PROT_WRITE_INSERT", 56 "SCSI_PROT_WRITE_INSERT",
57 "SCSI_PROT_READ_PASS", 57 "SCSI_PROT_READ_PASS",
58 "SCSI_PROT_WRITE_PASS", 58 "SCSI_PROT_WRITE_PASS",
59 "SCSI_PROT_READ_CONVERT",
60 "SCSI_PROT_WRITE_CONVERT"
61}; 59};
62static void 60static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 61lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
@@ -1131,13 +1129,11 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1131 ret_prof = LPFC_PROF_A1; 1129 ret_prof = LPFC_PROF_A1;
1132 break; 1130 break;
1133 1131
1134 case SCSI_PROT_READ_CONVERT: 1132 case SCSI_PROT_READ_PASS:
1135 case SCSI_PROT_WRITE_CONVERT: 1133 case SCSI_PROT_WRITE_PASS:
1136 ret_prof = LPFC_PROF_AST1; 1134 ret_prof = LPFC_PROF_AST1;
1137 break; 1135 break;
1138 1136
1139 case SCSI_PROT_READ_PASS:
1140 case SCSI_PROT_WRITE_PASS:
1141 case SCSI_PROT_NORMAL: 1137 case SCSI_PROT_NORMAL:
1142 default: 1138 default:
1143 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1139 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
@@ -1157,8 +1153,6 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1157 ret_prof = LPFC_PROF_C1; 1153 ret_prof = LPFC_PROF_C1;
1158 break; 1154 break;
1159 1155
1160 case SCSI_PROT_READ_CONVERT:
1161 case SCSI_PROT_WRITE_CONVERT:
1162 case SCSI_PROT_READ_INSERT: 1156 case SCSI_PROT_READ_INSERT:
1163 case SCSI_PROT_WRITE_STRIP: 1157 case SCSI_PROT_WRITE_STRIP:
1164 case SCSI_PROT_NORMAL: 1158 case SCSI_PROT_NORMAL:
@@ -1209,8 +1203,7 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1209 static int cnt; 1203 static int cnt;
1210 1204
1211 if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 1205 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1212 op == SCSI_PROT_WRITE_PASS || 1206 op == SCSI_PROT_WRITE_PASS)) {
1213 op == SCSI_PROT_WRITE_CONVERT)) {
1214 1207
1215 cnt++; 1208 cnt++;
1216 spt = page_address(sg_page(scsi_prot_sglist(sc))) + 1209 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
@@ -1501,8 +1494,6 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1501 case SCSI_PROT_WRITE_STRIP: 1494 case SCSI_PROT_WRITE_STRIP:
1502 case SCSI_PROT_READ_PASS: 1495 case SCSI_PROT_READ_PASS:
1503 case SCSI_PROT_WRITE_PASS: 1496 case SCSI_PROT_WRITE_PASS:
1504 case SCSI_PROT_WRITE_CONVERT:
1505 case SCSI_PROT_READ_CONVERT:
1506 ret = LPFC_PG_TYPE_DIF_BUF; 1497 ret = LPFC_PG_TYPE_DIF_BUF;
1507 break; 1498 break;
1508 default: 1499 default:
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 4a86855c23b3..70c4c2467dd8 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
2# Kernel configuration file for the MPT2SAS 2# Kernel configuration file for the MPT2SAS
3# 3#
4# This code is based on drivers/scsi/mpt2sas/Kconfig 4# This code is based on drivers/scsi/mpt2sas/Kconfig
5# Copyright (C) 2007-2008 LSI Corporation 5# Copyright (C) 2007-2009 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com) 6# (mailto:DL-MPTFusionLinux@lsi.com)
7 7
8# This program is free software; you can redistribute it and/or 8# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 7bb2ece8b2e4..f9f6c0839276 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.11 11 * mpi2.h Version: 02.00.12
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -45,6 +45,13 @@
45 * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. 45 * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
46 * Moved LUN field defines from mpi2_init.h. 46 * Moved LUN field defines from mpi2_init.h.
47 * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. 47 * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
48 * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
49 * In all request and reply descriptors, replaced VF_ID
50 * field with MSIxIndex field.
51 * Removed DevHandle field from
52 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
53 * bytes reserved.
54 * Added RAID Accelerator functionality.
48 * -------------------------------------------------------------------------- 55 * --------------------------------------------------------------------------
49 */ 56 */
50 57
@@ -70,7 +77,7 @@
70#define MPI2_VERSION_02_00 (0x0200) 77#define MPI2_VERSION_02_00 (0x0200)
71 78
72/* versioning for this MPI header set */ 79/* versioning for this MPI header set */
73#define MPI2_HEADER_VERSION_UNIT (0x0B) 80#define MPI2_HEADER_VERSION_UNIT (0x0C)
74#define MPI2_HEADER_VERSION_DEV (0x00) 81#define MPI2_HEADER_VERSION_DEV (0x00)
75#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 82#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
76#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 83#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -257,7 +264,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
257typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR 264typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
258{ 265{
259 U8 RequestFlags; /* 0x00 */ 266 U8 RequestFlags; /* 0x00 */
260 U8 VF_ID; /* 0x01 */ 267 U8 MSIxIndex; /* 0x01 */
261 U16 SMID; /* 0x02 */ 268 U16 SMID; /* 0x02 */
262 U16 LMID; /* 0x04 */ 269 U16 LMID; /* 0x04 */
263 U16 DescriptorTypeDependent; /* 0x06 */ 270 U16 DescriptorTypeDependent; /* 0x06 */
@@ -271,6 +278,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
271#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) 278#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
272#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) 279#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
273#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) 280#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
281#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
274 282
275#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) 283#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
276 284
@@ -279,7 +287,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
279typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR 287typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
280{ 288{
281 U8 RequestFlags; /* 0x00 */ 289 U8 RequestFlags; /* 0x00 */
282 U8 VF_ID; /* 0x01 */ 290 U8 MSIxIndex; /* 0x01 */
283 U16 SMID; /* 0x02 */ 291 U16 SMID; /* 0x02 */
284 U16 LMID; /* 0x04 */ 292 U16 LMID; /* 0x04 */
285 U16 Reserved1; /* 0x06 */ 293 U16 Reserved1; /* 0x06 */
@@ -293,7 +301,7 @@ typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
293typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR 301typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
294{ 302{
295 U8 RequestFlags; /* 0x00 */ 303 U8 RequestFlags; /* 0x00 */
296 U8 VF_ID; /* 0x01 */ 304 U8 MSIxIndex; /* 0x01 */
297 U16 SMID; /* 0x02 */ 305 U16 SMID; /* 0x02 */
298 U16 LMID; /* 0x04 */ 306 U16 LMID; /* 0x04 */
299 U16 DevHandle; /* 0x06 */ 307 U16 DevHandle; /* 0x06 */
@@ -306,7 +314,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
306typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR 314typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
307{ 315{
308 U8 RequestFlags; /* 0x00 */ 316 U8 RequestFlags; /* 0x00 */
309 U8 VF_ID; /* 0x01 */ 317 U8 MSIxIndex; /* 0x01 */
310 U16 SMID; /* 0x02 */ 318 U16 SMID; /* 0x02 */
311 U16 LMID; /* 0x04 */ 319 U16 LMID; /* 0x04 */
312 U16 IoIndex; /* 0x06 */ 320 U16 IoIndex; /* 0x06 */
@@ -315,14 +323,29 @@ typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
315 Mpi2SCSITargetRequestDescriptor_t, 323 Mpi2SCSITargetRequestDescriptor_t,
316 MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; 324 MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
317 325
326
327/* RAID Accelerator Request Descriptor */
328typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
329 U8 RequestFlags; /* 0x00 */
330 U8 MSIxIndex; /* 0x01 */
331 U16 SMID; /* 0x02 */
332 U16 LMID; /* 0x04 */
333 U16 Reserved; /* 0x06 */
334} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
335 MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
336 Mpi2RAIDAcceleratorRequestDescriptor_t,
337 MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
338
339
318/* union of Request Descriptors */ 340/* union of Request Descriptors */
319typedef union _MPI2_REQUEST_DESCRIPTOR_UNION 341typedef union _MPI2_REQUEST_DESCRIPTOR_UNION
320{ 342{
321 MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; 343 MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
322 MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; 344 MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
323 MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; 345 MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
324 MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; 346 MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
325 U64 Words; 347 MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
348 U64 Words;
326} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION, 349} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
327 Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t; 350 Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t;
328 351
@@ -333,19 +356,20 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION
333typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR 356typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
334{ 357{
335 U8 ReplyFlags; /* 0x00 */ 358 U8 ReplyFlags; /* 0x00 */
336 U8 VF_ID; /* 0x01 */ 359 U8 MSIxIndex; /* 0x01 */
337 U16 DescriptorTypeDependent1; /* 0x02 */ 360 U16 DescriptorTypeDependent1; /* 0x02 */
338 U32 DescriptorTypeDependent2; /* 0x04 */ 361 U32 DescriptorTypeDependent2; /* 0x04 */
339} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, 362} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
340 Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; 363 Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
341 364
342/* defines for the ReplyFlags field */ 365/* defines for the ReplyFlags field */
343#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) 366#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
344#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) 367#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
345#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) 368#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
346#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) 369#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
347#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) 370#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
348#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) 371#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
372#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
349 373
350/* values for marking a reply descriptor as unused */ 374/* values for marking a reply descriptor as unused */
351#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) 375#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
@@ -355,7 +379,7 @@ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
355typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR 379typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
356{ 380{
357 U8 ReplyFlags; /* 0x00 */ 381 U8 ReplyFlags; /* 0x00 */
358 U8 VF_ID; /* 0x01 */ 382 U8 MSIxIndex; /* 0x01 */
359 U16 SMID; /* 0x02 */ 383 U16 SMID; /* 0x02 */
360 U32 ReplyFrameAddress; /* 0x04 */ 384 U32 ReplyFrameAddress; /* 0x04 */
361} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, 385} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
@@ -368,10 +392,10 @@ typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
368typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR 392typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
369{ 393{
370 U8 ReplyFlags; /* 0x00 */ 394 U8 ReplyFlags; /* 0x00 */
371 U8 VF_ID; /* 0x01 */ 395 U8 MSIxIndex; /* 0x01 */
372 U16 SMID; /* 0x02 */ 396 U16 SMID; /* 0x02 */
373 U16 TaskTag; /* 0x04 */ 397 U16 TaskTag; /* 0x04 */
374 U16 DevHandle; /* 0x06 */ 398 U16 Reserved1; /* 0x06 */
375} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, 399} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
376 MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, 400 MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
377 Mpi2SCSIIOSuccessReplyDescriptor_t, 401 Mpi2SCSIIOSuccessReplyDescriptor_t,
@@ -382,7 +406,7 @@ typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
382typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR 406typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
383{ 407{
384 U8 ReplyFlags; /* 0x00 */ 408 U8 ReplyFlags; /* 0x00 */
385 U8 VF_ID; /* 0x01 */ 409 U8 MSIxIndex; /* 0x01 */
386 U16 SMID; /* 0x02 */ 410 U16 SMID; /* 0x02 */
387 U8 SequenceNumber; /* 0x04 */ 411 U8 SequenceNumber; /* 0x04 */
388 U8 Reserved1; /* 0x05 */ 412 U8 Reserved1; /* 0x05 */
@@ -397,7 +421,7 @@ typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
397typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR 421typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
398{ 422{
399 U8 ReplyFlags; /* 0x00 */ 423 U8 ReplyFlags; /* 0x00 */
400 U8 VF_ID; /* 0x01 */ 424 U8 MSIxIndex; /* 0x01 */
401 U8 VP_ID; /* 0x02 */ 425 U8 VP_ID; /* 0x02 */
402 U8 Flags; /* 0x03 */ 426 U8 Flags; /* 0x03 */
403 U16 InitiatorDevHandle; /* 0x04 */ 427 U16 InitiatorDevHandle; /* 0x04 */
@@ -411,15 +435,28 @@ typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
411#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) 435#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
412 436
413 437
438/* RAID Accelerator Success Reply Descriptor */
439typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
440 U8 ReplyFlags; /* 0x00 */
441 U8 MSIxIndex; /* 0x01 */
442 U16 SMID; /* 0x02 */
443 U32 Reserved; /* 0x04 */
444} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
445 MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
446 Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
447 MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
448
449
414/* union of Reply Descriptors */ 450/* union of Reply Descriptors */
415typedef union _MPI2_REPLY_DESCRIPTORS_UNION 451typedef union _MPI2_REPLY_DESCRIPTORS_UNION
416{ 452{
417 MPI2_DEFAULT_REPLY_DESCRIPTOR Default; 453 MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
418 MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; 454 MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
419 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; 455 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
420 MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; 456 MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
421 MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; 457 MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
422 U64 Words; 458 MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
459 U64 Words;
423} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, 460} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
424 Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; 461 Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
425 462
@@ -458,6 +495,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
458#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */ 495#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */
459#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ 496#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
460#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ 497#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
498#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
461 499
462 500
463 501
@@ -555,12 +593,17 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
555 593
556#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) 594#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
557 595
596/****************************************************************************
597* RAID Accelerator values
598****************************************************************************/
599
600#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
558 601
559/**************************************************************************** 602/****************************************************************************
560* IOCStatus flag to indicate that log info is available 603* IOCStatus flag to indicate that log info is available
561****************************************************************************/ 604****************************************************************************/
562 605
563#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) 606#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
564 607
565/**************************************************************************** 608/****************************************************************************
566* IOCLogInfo Types 609* IOCLogInfo Types
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 2f27cf6d6c65..ab47c4679640 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.10 9 * mpi2_cnfg.h Version: 02.00.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -95,6 +95,11 @@
95 * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. 95 * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
96 * Added PortGroups, DmaGroup, and ControlGroup fields to 96 * Added PortGroups, DmaGroup, and ControlGroup fields to
97 * SAS Device Page 0. 97 * SAS Device Page 0.
98 * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
99 * Unit Page 6.
100 * Added expander reduced functionality data to SAS
101 * Expander Page 0.
102 * Added SAS PHY Page 2 and SAS PHY Page 3.
98 * -------------------------------------------------------------------------- 103 * --------------------------------------------------------------------------
99 */ 104 */
100 105
@@ -723,6 +728,65 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
723#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) 728#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
724 729
725 730
731/* IO Unit Page 5 */
732
733/*
734 * Upper layer code (drivers, utilities, etc.) should leave this define set to
735 * one and check Header.PageLength or NumDmaEngines at runtime.
736 */
737#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
738#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
739#endif
740
741typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
742 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
743 U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */
744 U64 RaidAcceleratorBufferSize; /* 0x0C */
745 U64 RaidAcceleratorControlBaseAddress; /* 0x14 */
746 U8 RAControlSize; /* 0x1C */
747 U8 NumDmaEngines; /* 0x1D */
748 U8 RAMinControlSize; /* 0x1E */
749 U8 RAMaxControlSize; /* 0x1F */
750 U32 Reserved1; /* 0x20 */
751 U32 Reserved2; /* 0x24 */
752 U32 Reserved3; /* 0x28 */
753 U32 DmaEngineCapabilities
754 [MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */
755} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
756 Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t;
757
758#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
759
760/* defines for IO Unit Page 5 DmaEngineCapabilities field */
761#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
762#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
763
764#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
765#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
766#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
767#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
768
769
770/* IO Unit Page 6 */
771
772typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
773 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
774 U16 Flags; /* 0x04 */
775 U8 RAHostControlSize; /* 0x06 */
776 U8 Reserved0; /* 0x07 */
777 U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */
778 U32 Reserved1; /* 0x10 */
779 U32 Reserved2; /* 0x14 */
780 U32 Reserved3; /* 0x18 */
781} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
782 Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t;
783
784#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
785
786/* defines for IO Unit Page 6 Flags field */
787#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
788
789
726/**************************************************************************** 790/****************************************************************************
727* IOC Config Pages 791* IOC Config Pages
728****************************************************************************/ 792****************************************************************************/
@@ -1709,10 +1773,14 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0
1709 U64 ActiveZoneManagerSASAddress;/* 0x2C */ 1773 U64 ActiveZoneManagerSASAddress;/* 0x2C */
1710 U16 ZoneLockInactivityLimit; /* 0x34 */ 1774 U16 ZoneLockInactivityLimit; /* 0x34 */
1711 U16 Reserved1; /* 0x36 */ 1775 U16 Reserved1; /* 0x36 */
1776 U8 TimeToReducedFunc; /* 0x38 */
1777 U8 InitialTimeToReducedFunc; /* 0x39 */
1778 U8 MaxReducedFuncTime; /* 0x3A */
1779 U8 Reserved2; /* 0x3B */
1712} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0, 1780} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
1713 Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t; 1781 Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t;
1714 1782
1715#define MPI2_SASEXPANDER0_PAGEVERSION (0x05) 1783#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
1716 1784
1717/* values for SAS Expander Page 0 DiscoveryStatus field */ 1785/* values for SAS Expander Page 0 DiscoveryStatus field */
1718#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) 1786#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
@@ -1737,6 +1805,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0
1737#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) 1805#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
1738 1806
1739/* values for SAS Expander Page 0 Flags field */ 1807/* values for SAS Expander Page 0 Flags field */
1808#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
1740#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) 1809#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
1741#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) 1810#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
1742#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) 1811#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
@@ -1944,6 +2013,133 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1
1944#define MPI2_SASPHY1_PAGEVERSION (0x01) 2013#define MPI2_SASPHY1_PAGEVERSION (0x01)
1945 2014
1946 2015
2016/* SAS PHY Page 2 */
2017
2018typedef struct _MPI2_SASPHY2_PHY_EVENT {
2019 U8 PhyEventCode; /* 0x00 */
2020 U8 Reserved1; /* 0x01 */
2021 U16 Reserved2; /* 0x02 */
2022 U32 PhyEventInfo; /* 0x04 */
2023} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT,
2024 Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t;
2025
2026/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
2027
2028
2029/*
2030 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2031 * one and check Header.ExtPageLength or NumPhyEvents at runtime.
2032 */
2033#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
2034#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
2035#endif
2036
2037typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
2038 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2039 U32 Reserved1; /* 0x08 */
2040 U8 NumPhyEvents; /* 0x0C */
2041 U8 Reserved2; /* 0x0D */
2042 U16 Reserved3; /* 0x0E */
2043 MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX];
2044 /* 0x10 */
2045} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
2046 Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t;
2047
2048#define MPI2_SASPHY2_PAGEVERSION (0x00)
2049
2050
2051/* SAS PHY Page 3 */
2052
2053typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
2054 U8 PhyEventCode; /* 0x00 */
2055 U8 Reserved1; /* 0x01 */
2056 U16 Reserved2; /* 0x02 */
2057 U8 CounterType; /* 0x04 */
2058 U8 ThresholdWindow; /* 0x05 */
2059 U8 TimeUnits; /* 0x06 */
2060 U8 Reserved3; /* 0x07 */
2061 U32 EventThreshold; /* 0x08 */
2062 U16 ThresholdFlags; /* 0x0C */
2063 U16 Reserved4; /* 0x0E */
2064} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
2065 Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t;
2066
2067/* values for PhyEventCode field */
2068#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
2069#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
2070#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
2071#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
2072#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
2073#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
2074#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
2075#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
2076#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
2077#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
2078#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
2079#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
2080#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
2081#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
2082#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
2083#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
2084#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
2085#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
2086#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
2087#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
2088#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
2089#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
2090#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
2091#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
2092#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
2093#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
2094#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
2095#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
2096#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
2097#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
2098#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
2099#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
2100#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
2101#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
2102#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
2103#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
2104#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
2105
2106/* values for the CounterType field */
2107#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
2108#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
2109#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
2110
2111/* values for the TimeUnits field */
2112#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
2113#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
2114#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
2115#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
2116
2117/* values for the ThresholdFlags field */
2118#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
2119#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
2120
2121/*
2122 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2123 * one and check Header.ExtPageLength or NumPhyEvents at runtime.
2124 */
2125#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
2126#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
2127#endif
2128
2129typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
2130 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2131 U32 Reserved1; /* 0x08 */
2132 U8 NumPhyEvents; /* 0x0C */
2133 U8 Reserved2; /* 0x0D */
2134 U16 Reserved3; /* 0x0E */
2135 MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig
2136 [MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */
2137} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
2138 Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t;
2139
2140#define MPI2_SASPHY3_PAGEVERSION (0x00)
2141
2142
1947/**************************************************************************** 2143/****************************************************************************
1948* SAS Port Config Pages 2144* SAS Port Config Pages
1949****************************************************************************/ 2145****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
new file mode 100644
index 000000000000..65fcaa31cb30
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -0,0 +1,334 @@
1 ==============================
2 Fusion-MPT MPI 2.0 Header File Change History
3 ==============================
4
5 Copyright (c) 2000-2009 LSI Corporation.
6
7 ---------------------------------------
8 Header Set Release Version: 02.00.12
9 Header Set Release Date: 05-06-09
10 ---------------------------------------
11
12 Filename Current version Prior version
13 ---------- --------------- -------------
14 mpi2.h 02.00.12 02.00.11
15 mpi2_cnfg.h 02.00.11 02.00.10
16 mpi2_init.h 02.00.07 02.00.06
17 mpi2_ioc.h 02.00.11 02.00.10
18 mpi2_raid.h 02.00.03 02.00.03
19 mpi2_sas.h 02.00.02 02.00.02
20 mpi2_targ.h 02.00.03 02.00.03
21 mpi2_tool.h 02.00.03 02.00.02
22 mpi2_type.h 02.00.00 02.00.00
23 mpi2_ra.h 02.00.00
24 mpi2_history.txt 02.00.11 02.00.12
25
26
27 * Date Version Description
28 * -------- -------- ------------------------------------------------------
29
30mpi2.h
31 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
32 * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
33 * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
34 * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
35 * Moved ReplyPostHostIndex register to offset 0x6C of the
36 * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
37 * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
38 * Added union of request descriptors.
39 * Added union of reply descriptors.
40 * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
41 * Added define for MPI2_VERSION_02_00.
42 * Fixed the size of the FunctionDependent5 field in the
43 * MPI2_DEFAULT_REPLY structure.
44 * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
45 * Removed the MPI-defined Fault Codes and extended the
46 * product specific codes up to 0xEFFF.
47 * Added a sixth key value for the WriteSequence register
48 * and changed the flush value to 0x0.
49 * Added message function codes for Diagnostic Buffer Post
50 * and Diagnsotic Release.
51 * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
52 * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
53 * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
54 * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
55 * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
56 * Added #defines for marking a reply descriptor as unused.
57 * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
58 * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
59 * Moved LUN field defines from mpi2_init.h.
60 * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
61 * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
62 * In all request and reply descriptors, replaced VF_ID
63 * field with MSIxIndex field.
64 * Removed DevHandle field from
65 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
66 * bytes reserved.
67 * Added RAID Accelerator functionality.
68 * --------------------------------------------------------------------------
69
70mpi2_cnfg.h
71 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
72 * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
73 * Added Manufacturing Page 11.
74 * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
75 * define.
76 * 06-26-07 02.00.02 Adding generic structure for product-specific
77 * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
78 * Rework of BIOS Page 2 configuration page.
79 * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
80 * forms.
81 * Added configuration pages IOC Page 8 and Driver
82 * Persistent Mapping Page 0.
83 * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
84 * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
85 * RAID Physical Disk Pages 0 and 1, RAID Configuration
86 * Page 0).
87 * Added new value for AccessStatus field of SAS Device
88 * Page 0 (_SATA_NEEDS_INITIALIZATION).
89 * 10-31-07 02.00.04 Added missing SEPDevHandle field to
90 * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
91 * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
92 * NVDATA.
93 * Modified IOC Page 7 to use masks and added field for
94 * SASBroadcastPrimitiveMasks.
95 * Added MPI2_CONFIG_PAGE_BIOS_4.
96 * Added MPI2_CONFIG_PAGE_LOG_0.
97 * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
98 * Added SAS Device IDs.
99 * Updated Integrated RAID configuration pages including
100 * Manufacturing Page 4, IOC Page 6, and RAID Configuration
101 * Page 0.
102 * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
103 * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
104 * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
105 * Added missing MaxNumRoutedSasAddresses field to
106 * MPI2_CONFIG_PAGE_EXPANDER_0.
107 * Added SAS Port Page 0.
108 * Modified structure layout for
109 * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
110 * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
111 * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
112 * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
113 * to 0x000000FF.
114 * Added two new values for the Physical Disk Coercion Size
115 * bits in the Flags field of Manufacturing Page 4.
116 * Added product-specific Manufacturing pages 16 to 31.
117 * Modified Flags bits for controlling write cache on SATA
118 * drives in IO Unit Page 1.
119 * Added new bit to AdditionalControlFlags of SAS IO Unit
120 * Page 1 to control Invalid Topology Correction.
121 * Added SupportedPhysDisks field to RAID Volume Page 1 and
122 * added related defines.
123 * Added additional defines for RAID Volume Page 0
124 * VolumeStatusFlags field.
125 * Modified meaning of RAID Volume Page 0 VolumeSettings
126 * define for auto-configure of hot-swap drives.
127 * Added PhysDiskAttributes field (and related defines) to
128 * RAID Physical Disk Page 0.
129 * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
130 * Added three new DiscoveryStatus bits for SAS IO Unit
131 * Page 0 and SAS Expander Page 0.
132 * Removed multiplexing information from SAS IO Unit pages.
133 * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
134 * Removed Zone Address Resolved bit from PhyInfo and from
135 * Expander Page 0 Flags field.
136 * Added two new AccessStatus values to SAS Device Page 0
137 * for indicating routing problems. Added 3 reserved words
138 * to this page.
139 * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
140 * Inserted missing reserved field into structure for IOC
141 * Page 6.
142 * Added more pending task bits to RAID Volume Page 0
143 * VolumeStatusFlags defines.
144 * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
145 * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
146 * and SAS Expander Page 0 to flag a downstream initiator
147 * when in simplified routing mode.
148 * Removed SATA Init Failure defines for DiscoveryStatus
149 * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
150 * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
151 * Added PortGroups, DmaGroup, and ControlGroup fields to
152 * SAS Device Page 0.
153 * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
154 * Unit Page 6.
155 * Added expander reduced functionality data to SAS
156 * Expander Page 0.
157 * Added SAS PHY Page 2 and SAS PHY Page 3.
158 * --------------------------------------------------------------------------
159
160mpi2_init.h
161 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
162 * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
163 * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
164 * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
165 * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
166 * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
167 * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
168 * Control field Task Attribute flags.
169 * Moved LUN field defines to mpi2.h becasue they are
170 * common to many structures.
171 * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
172 * Query Asynchronous Event.
173 * Defined two new bits in the SlotStatus field of the SCSI
174 * Enclosure Processor Request and Reply.
175 * --------------------------------------------------------------------------
176
177mpi2_ioc.h
178 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
179 * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
180 * MaxTargets.
181 * Added TotalImageSize field to FWDownload Request.
182 * Added reserved words to FWUpload Request.
183 * 06-26-07 02.00.02 Added IR Configuration Change List Event.
184 * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
185 * request and replaced it with
186 * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
187 * Replaced the MinReplyQueueDepth field of the IOCFacts
188 * reply with MaxReplyDescriptorPostQueueDepth.
189 * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
190 * depth for the Reply Descriptor Post Queue.
191 * Added SASAddress field to Initiator Device Table
192 * Overflow Event data.
193 * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
194 * for SAS Initiator Device Status Change Event data.
195 * Modified Reason Code defines for SAS Topology Change
196 * List Event data, including adding a bit for PHY Vacant
197 * status, and adding a mask for the Reason Code.
198 * Added define for
199 * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
200 * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
201 * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
202 * the IOCFacts Reply.
203 * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
204 * Moved MPI2_VERSION_UNION to mpi2.h.
205 * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
206 * instead of enables, and added SASBroadcastPrimitiveMasks
207 * field.
208 * Added Log Entry Added Event and related structure.
209 * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
210 * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
211 * Added MaxVolumes and MaxPersistentEntries fields to
212 * IOCFacts reply.
213 * Added ProtocalFlags and IOCCapabilities fields to
214 * MPI2_FW_IMAGE_HEADER.
215 * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
216 * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
217 * a U16 (from a U32).
218 * Removed extra 's' from EventMasks name.
219 * 06-27-08 02.00.08 Fixed an offset in a comment.
220 * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
221 * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
222 * renamed MinReplyFrameSize to ReplyFrameSize.
223 * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
224 * Added two new RAIDOperation values for Integrated RAID
225 * Operations Status Event data.
226 * Added four new IR Configuration Change List Event data
227 * ReasonCode values.
228 * Added two new ReasonCode defines for SAS Device Status
229 * Change Event data.
230 * Added three new DiscoveryStatus bits for the SAS
231 * Discovery event data.
232 * Added Multiplexing Status Change bit to the PhyStatus
233 * field of the SAS Topology Change List event data.
234 * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
235 * BootFlags are now product-specific.
236 * Added defines for the indivdual signature bytes
237 * for MPI2_INIT_IMAGE_FOOTER.
238 * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
239 * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
240 * define.
241 * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
242 * define.
243 * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
244 * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
245 * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
246 * Added two new reason codes for SAS Device Status Change
247 * Event.
248 * Added new event: SAS PHY Counter.
249 * --------------------------------------------------------------------------
250
251mpi2_raid.h
252 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
253 * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
254 * including the Actions and ActionData.
255 * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
256 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
257 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
258 * can be sized by the build environment.
259 * --------------------------------------------------------------------------
260
261mpi2_sas.h
262 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
263 * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
264 * Control Request.
265 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
266 * Request.
267 * --------------------------------------------------------------------------
268
269mpi2_targ.h
270 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
271 * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
272 * BufferPostFlags field of CommandBufferPostBase Request.
273 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
274 * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
275 * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
276 * Target Status Send Request only takes a single SGE for
277 * response data.
278 * --------------------------------------------------------------------------
279
280mpi2_tool.h
281 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
282 * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
283 * structures and defines.
284 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
285 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
286 * --------------------------------------------------------------------------
287
288mpi2_type.h
289 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
290 * --------------------------------------------------------------------------
291
292mpi2_ra.h
293 * 05-06-09 02.00.00 Initial version.
294 * --------------------------------------------------------------------------
295
296mpi2_history.txt Parts list history
297
298Filename 02.00.12
299---------- --------
300mpi2.h 02.00.12
301mpi2_cnfg.h 02.00.11
302mpi2_init.h 02.00.07
303mpi2_ioc.h 02.00.11
304mpi2_raid.h 02.00.03
305mpi2_sas.h 02.00.02
306mpi2_targ.h 02.00.03
307mpi2_tool.h 02.00.03
308mpi2_type.h 02.00.00
309mpi2_ra.h 02.00.00
310
311Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
312---------- -------- -------- -------- -------- -------- --------
313mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
314mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
315mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
316mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
317mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
318mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
319mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
320mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
321mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
322
323Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
324---------- -------- -------- -------- -------- -------- --------
325mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
326mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
327mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
328mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
329mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
330mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
331mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
332mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
333mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
334
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index f1115f0f0eb2..563e56d2e945 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2008 LSI Corporation. 2 * Copyright (c) 2000-2009 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.06 9 * mpi2_init.h Version: 02.00.07
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -23,6 +23,10 @@
23 * Control field Task Attribute flags. 23 * Control field Task Attribute flags.
24 * Moved LUN field defines to mpi2.h becasue they are 24 * Moved LUN field defines to mpi2.h becasue they are
25 * common to many structures. 25 * common to many structures.
26 * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
27 * Query Asynchronous Event.
28 * Defined two new bits in the SlotStatus field of the SCSI
29 * Enclosure Processor Request and Reply.
26 * -------------------------------------------------------------------------- 30 * --------------------------------------------------------------------------
27 */ 31 */
28 32
@@ -289,7 +293,11 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST
289#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) 293#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
290#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) 294#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
291#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) 295#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
292#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (0x0A) 296#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
297
298/* obsolete TaskType name */
299#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
300 (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
293 301
294/* MsgFlags bits */ 302/* MsgFlags bits */
295 303
@@ -375,6 +383,8 @@ typedef struct _MPI2_SEP_REQUEST
375#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) 383#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
376#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) 384#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
377#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) 385#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
386#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
387#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
378#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) 388#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
379#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) 389#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
380#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) 390#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
@@ -410,6 +420,8 @@ typedef struct _MPI2_SEP_REPLY
410#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) 420#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
411#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) 421#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
412#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) 422#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
423#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
424#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
413#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) 425#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
414#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) 426#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
415#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) 427#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 8c5d81870c03..c294128bdeb4 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.10 9 * mpi2_ioc.h Version: 02.00.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -79,6 +79,11 @@
79 * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE 79 * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
80 * define. 80 * define.
81 * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. 81 * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
82 * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
83 * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
84 * Added two new reason codes for SAS Device Status Change
85 * Event.
86 * Added new event: SAS PHY Counter.
82 * -------------------------------------------------------------------------- 87 * --------------------------------------------------------------------------
83 */ 88 */
84 89
@@ -261,6 +266,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY
261/* ProductID field uses MPI2_FW_HEADER_PID_ */ 266/* ProductID field uses MPI2_FW_HEADER_PID_ */
262 267
263/* IOCCapabilities */ 268/* IOCCapabilities */
269#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
270#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
264#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) 271#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
265#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) 272#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
266#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) 273#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
@@ -440,6 +447,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
440#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) 447#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
441#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) 448#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
442#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) 449#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
450#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
443 451
444 452
445/* Log Entry Added Event data */ 453/* Log Entry Added Event data */
@@ -502,17 +510,19 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
502 MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t; 510 MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t;
503 511
504/* SAS Device Status Change Event data ReasonCode values */ 512/* SAS Device Status Change Event data ReasonCode values */
505#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) 513#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
506#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) 514#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
507#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) 515#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
508#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) 516#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
509#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) 517#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
510#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) 518#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
511#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) 519#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
512#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) 520#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
513#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) 521#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
514#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) 522#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
515#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) 523#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
524#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
525#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
516 526
517 527
518/* Integrated RAID Operation Status Event data */ 528/* Integrated RAID Operation Status Event data */
@@ -822,6 +832,37 @@ typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE
822#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) 832#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
823 833
824 834
835/* SAS PHY Counter Event data */
836
837typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
838 U64 TimeStamp; /* 0x00 */
839 U32 Reserved1; /* 0x08 */
840 U8 PhyEventCode; /* 0x0C */
841 U8 PhyNum; /* 0x0D */
842 U16 Reserved2; /* 0x0E */
843 U32 PhyEventInfo; /* 0x10 */
844 U8 CounterType; /* 0x14 */
845 U8 ThresholdWindow; /* 0x15 */
846 U8 TimeUnits; /* 0x16 */
847 U8 Reserved3; /* 0x17 */
848 U32 EventThreshold; /* 0x18 */
849 U16 ThresholdFlags; /* 0x1C */
850 U16 Reserved4; /* 0x1E */
851} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
852 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
853 Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t;
854
855/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the
856 * PhyEventCode field
857 * use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the
858 * CounterType field
859 * use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the
860 * TimeUnits field
861 * use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the
862 * ThresholdFlags field
863 * */
864
865
825/**************************************************************************** 866/****************************************************************************
826* EventAck message 867* EventAck message
827****************************************************************************/ 868****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 2ff4e936bd39..007e950f7bfa 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2008 LSI Corporation. 2 * Copyright (c) 2000-2009 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_tool.h 5 * Name: mpi2_tool.h
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.02 9 * mpi2_tool.h Version: 02.00.03
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -17,6 +17,7 @@
17 * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release 17 * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
18 * structures and defines. 18 * structures and defines.
19 * 02-29-08 02.00.02 Modified various names to make them 32-character unique. 19 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
20 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
20 * -------------------------------------------------------------------------- 21 * --------------------------------------------------------------------------
21 */ 22 */
22 23
@@ -32,7 +33,10 @@
32/* defines for the Tools */ 33/* defines for the Tools */
33#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) 34#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
34#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) 35#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
36#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
35#define MPI2_TOOLBOX_BEACON_TOOL (0x05) 37#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
38#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
39
36 40
37/**************************************************************************** 41/****************************************************************************
38* Toolbox reply 42* Toolbox reply
@@ -112,6 +116,77 @@ typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
112 116
113 117
114/**************************************************************************** 118/****************************************************************************
119* Toolbox ISTWI Read Write Tool
120****************************************************************************/
121
122/* Toolbox ISTWI Read Write Tool request message */
123typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
124 U8 Tool; /* 0x00 */
125 U8 Reserved1; /* 0x01 */
126 U8 ChainOffset; /* 0x02 */
127 U8 Function; /* 0x03 */
128 U16 Reserved2; /* 0x04 */
129 U8 Reserved3; /* 0x06 */
130 U8 MsgFlags; /* 0x07 */
131 U8 VP_ID; /* 0x08 */
132 U8 VF_ID; /* 0x09 */
133 U16 Reserved4; /* 0x0A */
134 U32 Reserved5; /* 0x0C */
135 U32 Reserved6; /* 0x10 */
136 U8 DevIndex; /* 0x14 */
137 U8 Action; /* 0x15 */
138 U8 SGLFlags; /* 0x16 */
139 U8 Reserved7; /* 0x17 */
140 U16 TxDataLength; /* 0x18 */
141 U16 RxDataLength; /* 0x1A */
142 U32 Reserved8; /* 0x1C */
143 U32 Reserved9; /* 0x20 */
144 U32 Reserved10; /* 0x24 */
145 U32 Reserved11; /* 0x28 */
146 U32 Reserved12; /* 0x2C */
147 MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */
148} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
149 MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
150 Mpi2ToolboxIstwiReadWriteRequest_t,
151 MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t;
152
153/* values for the Action field */
154#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
155#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
156#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
157#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
158#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
159#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
160
161/* values for SGLFlags field are in the SGL section of mpi2.h */
162
163
164/* Toolbox ISTWI Read Write Tool reply message */
165typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
166 U8 Tool; /* 0x00 */
167 U8 Reserved1; /* 0x01 */
168 U8 MsgLength; /* 0x02 */
169 U8 Function; /* 0x03 */
170 U16 Reserved2; /* 0x04 */
171 U8 Reserved3; /* 0x06 */
172 U8 MsgFlags; /* 0x07 */
173 U8 VP_ID; /* 0x08 */
174 U8 VF_ID; /* 0x09 */
175 U16 Reserved4; /* 0x0A */
176 U16 Reserved5; /* 0x0C */
177 U16 IOCStatus; /* 0x0E */
178 U32 IOCLogInfo; /* 0x10 */
179 U8 DevIndex; /* 0x14 */
180 U8 Action; /* 0x15 */
181 U8 IstwiStatus; /* 0x16 */
182 U8 Reserved6; /* 0x17 */
183 U16 TxDataCount; /* 0x18 */
184 U16 RxDataCount; /* 0x1A */
185} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY,
186 Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t;
187
188
189/****************************************************************************
115* Toolbox Beacon Tool request 190* Toolbox Beacon Tool request
116****************************************************************************/ 191****************************************************************************/
117 192
@@ -139,6 +214,61 @@ typedef struct _MPI2_TOOLBOX_BEACON_REQUEST
139#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) 214#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
140 215
141 216
217/****************************************************************************
218* Toolbox Diagnostic CLI Tool
219****************************************************************************/
220
221#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
222
223/* Toolbox Diagnostic CLI Tool request message */
224typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
225 U8 Tool; /* 0x00 */
226 U8 Reserved1; /* 0x01 */
227 U8 ChainOffset; /* 0x02 */
228 U8 Function; /* 0x03 */
229 U16 Reserved2; /* 0x04 */
230 U8 Reserved3; /* 0x06 */
231 U8 MsgFlags; /* 0x07 */
232 U8 VP_ID; /* 0x08 */
233 U8 VF_ID; /* 0x09 */
234 U16 Reserved4; /* 0x0A */
235 U8 SGLFlags; /* 0x0C */
236 U8 Reserved5; /* 0x0D */
237 U16 Reserved6; /* 0x0E */
238 U32 DataLength; /* 0x10 */
239 U8 DiagnosticCliCommand
240 [MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */
241 MPI2_SGE_SIMPLE_UNION SGL; /* 0x70 */
242} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
243 MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
244 Mpi2ToolboxDiagnosticCliRequest_t,
245 MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
246
247/* values for SGLFlags field are in the SGL section of mpi2.h */
248
249
250/* Toolbox Diagnostic CLI Tool reply message */
251typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
252 U8 Tool; /* 0x00 */
253 U8 Reserved1; /* 0x01 */
254 U8 MsgLength; /* 0x02 */
255 U8 Function; /* 0x03 */
256 U16 Reserved2; /* 0x04 */
257 U8 Reserved3; /* 0x06 */
258 U8 MsgFlags; /* 0x07 */
259 U8 VP_ID; /* 0x08 */
260 U8 VF_ID; /* 0x09 */
261 U16 Reserved4; /* 0x0A */
262 U16 Reserved5; /* 0x0C */
263 U16 IOCStatus; /* 0x0E */
264 U32 IOCLogInfo; /* 0x10 */
265 U32 ReturnedDataLength; /* 0x14 */
266} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
267 MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
268 Mpi2ToolboxDiagnosticCliReply_t,
269 MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
270
271
142/***************************************************************************** 272/*****************************************************************************
143* 273*
144* Diagnostic Buffer Messages 274* Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index d95d2f274cb3..670241efa4b5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2008 LSI Corporation 6 * Copyright (C) 2007-2009 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -63,7 +63,7 @@
63static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 63static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
64 64
65#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 65#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
66#define MPT2SAS_MAX_REQUEST_QUEUE 500 /* maximum controller queue depth */ 66#define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */
67 67
68static int max_queue_depth = -1; 68static int max_queue_depth = -1;
69module_param(max_queue_depth, int, 0); 69module_param(max_queue_depth, int, 0);
@@ -543,13 +543,13 @@ mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
543 * _base_display_reply_info - 543 * _base_display_reply_info -
544 * @ioc: pointer to scsi command object 544 * @ioc: pointer to scsi command object
545 * @smid: system request message index 545 * @smid: system request message index
546 * @VF_ID: virtual function id 546 * @msix_index: MSIX table index supplied by the OS
547 * @reply: reply message frame(lower 32bit addr) 547 * @reply: reply message frame(lower 32bit addr)
548 * 548 *
549 * Return nothing. 549 * Return nothing.
550 */ 550 */
551static void 551static void
552_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, 552_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
553 u32 reply) 553 u32 reply)
554{ 554{
555 MPI2DefaultReply_t *mpi_reply; 555 MPI2DefaultReply_t *mpi_reply;
@@ -572,22 +572,24 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
572 * mpt2sas_base_done - base internal command completion routine 572 * mpt2sas_base_done - base internal command completion routine
573 * @ioc: pointer to scsi command object 573 * @ioc: pointer to scsi command object
574 * @smid: system request message index 574 * @smid: system request message index
575 * @VF_ID: virtual function id 575 * @msix_index: MSIX table index supplied by the OS
576 * @reply: reply message frame(lower 32bit addr) 576 * @reply: reply message frame(lower 32bit addr)
577 * 577 *
578 * Return nothing. 578 * Return 1 meaning mf should be freed from _base_interrupt
579 * 0 means the mf is freed from this function.
579 */ 580 */
580void 581u8
581mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 582mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
583 u32 reply)
582{ 584{
583 MPI2DefaultReply_t *mpi_reply; 585 MPI2DefaultReply_t *mpi_reply;
584 586
585 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 587 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
586 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 588 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
587 return; 589 return 1;
588 590
589 if (ioc->base_cmds.status == MPT2_CMD_NOT_USED) 591 if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
590 return; 592 return 1;
591 593
592 ioc->base_cmds.status |= MPT2_CMD_COMPLETE; 594 ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
593 if (mpi_reply) { 595 if (mpi_reply) {
@@ -596,18 +598,20 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
596 } 598 }
597 ioc->base_cmds.status &= ~MPT2_CMD_PENDING; 599 ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
598 complete(&ioc->base_cmds.done); 600 complete(&ioc->base_cmds.done);
601 return 1;
599} 602}
600 603
601/** 604/**
602 * _base_async_event - main callback handler for firmware asyn events 605 * _base_async_event - main callback handler for firmware asyn events
603 * @ioc: pointer to scsi command object 606 * @ioc: pointer to scsi command object
604 * @VF_ID: virtual function id 607 * @msix_index: MSIX table index supplied by the OS
605 * @reply: reply message frame(lower 32bit addr) 608 * @reply: reply message frame(lower 32bit addr)
606 * 609 *
607 * Return nothing. 610 * Return 1 meaning mf should be freed from _base_interrupt
611 * 0 means the mf is freed from this function.
608 */ 612 */
609static void 613static u8
610_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply) 614_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
611{ 615{
612 Mpi2EventNotificationReply_t *mpi_reply; 616 Mpi2EventNotificationReply_t *mpi_reply;
613 Mpi2EventAckRequest_t *ack_request; 617 Mpi2EventAckRequest_t *ack_request;
@@ -615,9 +619,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
615 619
616 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 620 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
617 if (!mpi_reply) 621 if (!mpi_reply)
618 return; 622 return 1;
619 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 623 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
620 return; 624 return 1;
621#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 625#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
622 _base_display_event_data(ioc, mpi_reply); 626 _base_display_event_data(ioc, mpi_reply);
623#endif 627#endif
@@ -635,16 +639,47 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
635 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 639 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
636 ack_request->Event = mpi_reply->Event; 640 ack_request->Event = mpi_reply->Event;
637 ack_request->EventContext = mpi_reply->EventContext; 641 ack_request->EventContext = mpi_reply->EventContext;
638 ack_request->VF_ID = VF_ID; 642 ack_request->VF_ID = 0; /* TODO */
639 mpt2sas_base_put_smid_default(ioc, smid, VF_ID); 643 ack_request->VP_ID = 0;
644 mpt2sas_base_put_smid_default(ioc, smid);
640 645
641 out: 646 out:
642 647
643 /* scsih callback handler */ 648 /* scsih callback handler */
644 mpt2sas_scsih_event_callback(ioc, VF_ID, reply); 649 mpt2sas_scsih_event_callback(ioc, msix_index, reply);
645 650
646 /* ctl callback handler */ 651 /* ctl callback handler */
647 mpt2sas_ctl_event_callback(ioc, VF_ID, reply); 652 mpt2sas_ctl_event_callback(ioc, msix_index, reply);
653
654 return 1;
655}
656
657/**
658 * _base_get_cb_idx - obtain the callback index
659 * @ioc: per adapter object
660 * @smid: system request message index
661 *
662 * Return callback index.
663 */
664static u8
665_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
666{
667 int i;
668 u8 cb_idx = 0xFF;
669
670 if (smid >= ioc->hi_priority_smid) {
671 if (smid < ioc->internal_smid) {
672 i = smid - ioc->hi_priority_smid;
673 cb_idx = ioc->hpr_lookup[i].cb_idx;
674 } else {
675 i = smid - ioc->internal_smid;
676 cb_idx = ioc->internal_lookup[i].cb_idx;
677 }
678 } else {
679 i = smid - 1;
680 cb_idx = ioc->scsi_lookup[i].cb_idx;
681 }
682 return cb_idx;
648} 683}
649 684
650/** 685/**
@@ -680,7 +715,6 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
680{ 715{
681 u32 him_register; 716 u32 him_register;
682 717
683 writel(0, &ioc->chip->HostInterruptStatus);
684 him_register = readl(&ioc->chip->HostInterruptMask); 718 him_register = readl(&ioc->chip->HostInterruptMask);
685 him_register &= ~MPI2_HIM_RIM; 719 him_register &= ~MPI2_HIM_RIM;
686 writel(him_register, &ioc->chip->HostInterruptMask); 720 writel(him_register, &ioc->chip->HostInterruptMask);
@@ -712,9 +746,10 @@ _base_interrupt(int irq, void *bus_id)
712 u16 smid; 746 u16 smid;
713 u8 cb_idx; 747 u8 cb_idx;
714 u32 reply; 748 u32 reply;
715 u8 VF_ID; 749 u8 msix_index;
716 struct MPT2SAS_ADAPTER *ioc = bus_id; 750 struct MPT2SAS_ADAPTER *ioc = bus_id;
717 Mpi2ReplyDescriptorsUnion_t *rpf; 751 Mpi2ReplyDescriptorsUnion_t *rpf;
752 u8 rc;
718 753
719 if (ioc->mask_interrupts) 754 if (ioc->mask_interrupts)
720 return IRQ_NONE; 755 return IRQ_NONE;
@@ -733,7 +768,7 @@ _base_interrupt(int irq, void *bus_id)
733 reply = 0; 768 reply = 0;
734 cb_idx = 0xFF; 769 cb_idx = 0xFF;
735 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 770 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
736 VF_ID = rpf->Default.VF_ID; 771 msix_index = rpf->Default.MSIxIndex;
737 if (request_desript_type == 772 if (request_desript_type ==
738 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 773 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
739 reply = le32_to_cpu 774 reply = le32_to_cpu
@@ -745,16 +780,18 @@ _base_interrupt(int irq, void *bus_id)
745 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS) 780 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
746 goto next; 781 goto next;
747 if (smid) 782 if (smid)
748 cb_idx = ioc->scsi_lookup[smid - 1].cb_idx; 783 cb_idx = _base_get_cb_idx(ioc, smid);
749 if (smid && cb_idx != 0xFF) { 784 if (smid && cb_idx != 0xFF) {
750 mpt_callbacks[cb_idx](ioc, smid, VF_ID, reply); 785 rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
786 reply);
751 if (reply) 787 if (reply)
752 _base_display_reply_info(ioc, smid, VF_ID, 788 _base_display_reply_info(ioc, smid, msix_index,
753 reply); 789 reply);
754 mpt2sas_base_free_smid(ioc, smid); 790 if (rc)
791 mpt2sas_base_free_smid(ioc, smid);
755 } 792 }
756 if (!smid) 793 if (!smid)
757 _base_async_event(ioc, VF_ID, reply); 794 _base_async_event(ioc, msix_index, reply);
758 795
759 /* reply free queue handling */ 796 /* reply free queue handling */
760 if (reply) { 797 if (reply) {
@@ -1191,19 +1228,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1191} 1228}
1192 1229
1193/** 1230/**
1194 * mpt2sas_base_get_msg_frame_dma - obtain request mf pointer phys addr
1195 * @ioc: per adapter object
1196 * @smid: system request message index(smid zero is invalid)
1197 *
1198 * Returns phys pointer to message frame.
1199 */
1200dma_addr_t
1201mpt2sas_base_get_msg_frame_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1202{
1203 return ioc->request_dma + (smid * ioc->request_sz);
1204}
1205
1206/**
1207 * mpt2sas_base_get_msg_frame - obtain request mf pointer 1231 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1208 * @ioc: per adapter object 1232 * @ioc: per adapter object
1209 * @smid: system request message index(smid zero is invalid) 1233 * @smid: system request message index(smid zero is invalid)
@@ -1258,7 +1282,7 @@ mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1258} 1282}
1259 1283
1260/** 1284/**
1261 * mpt2sas_base_get_smid - obtain a free smid 1285 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1262 * @ioc: per adapter object 1286 * @ioc: per adapter object
1263 * @cb_idx: callback index 1287 * @cb_idx: callback index
1264 * 1288 *
@@ -1272,6 +1296,39 @@ mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1272 u16 smid; 1296 u16 smid;
1273 1297
1274 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1298 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1299 if (list_empty(&ioc->internal_free_list)) {
1300 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1301 printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1302 ioc->name, __func__);
1303 return 0;
1304 }
1305
1306 request = list_entry(ioc->internal_free_list.next,
1307 struct request_tracker, tracker_list);
1308 request->cb_idx = cb_idx;
1309 smid = request->smid;
1310 list_del(&request->tracker_list);
1311 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1312 return smid;
1313}
1314
1315/**
1316 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1317 * @ioc: per adapter object
1318 * @cb_idx: callback index
1319 * @scmd: pointer to scsi command object
1320 *
1321 * Returns smid (zero is invalid)
1322 */
1323u16
1324mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1325 struct scsi_cmnd *scmd)
1326{
1327 unsigned long flags;
1328 struct request_tracker *request;
1329 u16 smid;
1330
1331 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1275 if (list_empty(&ioc->free_list)) { 1332 if (list_empty(&ioc->free_list)) {
1276 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1333 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1277 printk(MPT2SAS_ERR_FMT "%s: smid not available\n", 1334 printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
@@ -1281,6 +1338,36 @@ mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1281 1338
1282 request = list_entry(ioc->free_list.next, 1339 request = list_entry(ioc->free_list.next,
1283 struct request_tracker, tracker_list); 1340 struct request_tracker, tracker_list);
1341 request->scmd = scmd;
1342 request->cb_idx = cb_idx;
1343 smid = request->smid;
1344 list_del(&request->tracker_list);
1345 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1346 return smid;
1347}
1348
1349/**
1350 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1351 * @ioc: per adapter object
1352 * @cb_idx: callback index
1353 *
1354 * Returns smid (zero is invalid)
1355 */
1356u16
1357mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1358{
1359 unsigned long flags;
1360 struct request_tracker *request;
1361 u16 smid;
1362
1363 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1364 if (list_empty(&ioc->hpr_free_list)) {
1365 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1366 return 0;
1367 }
1368
1369 request = list_entry(ioc->hpr_free_list.next,
1370 struct request_tracker, tracker_list);
1284 request->cb_idx = cb_idx; 1371 request->cb_idx = cb_idx;
1285 smid = request->smid; 1372 smid = request->smid;
1286 list_del(&request->tracker_list); 1373 list_del(&request->tracker_list);
@@ -1300,10 +1387,32 @@ void
1300mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1387mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1301{ 1388{
1302 unsigned long flags; 1389 unsigned long flags;
1390 int i;
1303 1391
1304 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1392 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1305 ioc->scsi_lookup[smid - 1].cb_idx = 0xFF; 1393 if (smid >= ioc->hi_priority_smid) {
1306 list_add_tail(&ioc->scsi_lookup[smid - 1].tracker_list, 1394 if (smid < ioc->internal_smid) {
1395 /* hi-priority */
1396 i = smid - ioc->hi_priority_smid;
1397 ioc->hpr_lookup[i].cb_idx = 0xFF;
1398 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1399 &ioc->hpr_free_list);
1400 } else {
1401 /* internal queue */
1402 i = smid - ioc->internal_smid;
1403 ioc->internal_lookup[i].cb_idx = 0xFF;
1404 list_add_tail(&ioc->internal_lookup[i].tracker_list,
1405 &ioc->internal_free_list);
1406 }
1407 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1408 return;
1409 }
1410
1411 /* scsiio queue */
1412 i = smid - 1;
1413 ioc->scsi_lookup[i].cb_idx = 0xFF;
1414 ioc->scsi_lookup[i].scmd = NULL;
1415 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1307 &ioc->free_list); 1416 &ioc->free_list);
1308 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1417 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1309 1418
@@ -1352,21 +1461,19 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1352 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 1461 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1353 * @ioc: per adapter object 1462 * @ioc: per adapter object
1354 * @smid: system request message index 1463 * @smid: system request message index
1355 * @vf_id: virtual function id
1356 * @handle: device handle 1464 * @handle: device handle
1357 * 1465 *
1358 * Return nothing. 1466 * Return nothing.
1359 */ 1467 */
1360void 1468void
1361mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id, 1469mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1362 u16 handle)
1363{ 1470{
1364 Mpi2RequestDescriptorUnion_t descriptor; 1471 Mpi2RequestDescriptorUnion_t descriptor;
1365 u64 *request = (u64 *)&descriptor; 1472 u64 *request = (u64 *)&descriptor;
1366 1473
1367 1474
1368 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1475 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1369 descriptor.SCSIIO.VF_ID = vf_id; 1476 descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
1370 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 1477 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1371 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 1478 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1372 descriptor.SCSIIO.LMID = 0; 1479 descriptor.SCSIIO.LMID = 0;
@@ -1379,20 +1486,18 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id,
1379 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware 1486 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware
1380 * @ioc: per adapter object 1487 * @ioc: per adapter object
1381 * @smid: system request message index 1488 * @smid: system request message index
1382 * @vf_id: virtual function id
1383 * 1489 *
1384 * Return nothing. 1490 * Return nothing.
1385 */ 1491 */
1386void 1492void
1387mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid, 1493mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1388 u8 vf_id)
1389{ 1494{
1390 Mpi2RequestDescriptorUnion_t descriptor; 1495 Mpi2RequestDescriptorUnion_t descriptor;
1391 u64 *request = (u64 *)&descriptor; 1496 u64 *request = (u64 *)&descriptor;
1392 1497
1393 descriptor.HighPriority.RequestFlags = 1498 descriptor.HighPriority.RequestFlags =
1394 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1499 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1395 descriptor.HighPriority.VF_ID = vf_id; 1500 descriptor.HighPriority.MSIxIndex = 0; /* TODO */
1396 descriptor.HighPriority.SMID = cpu_to_le16(smid); 1501 descriptor.HighPriority.SMID = cpu_to_le16(smid);
1397 descriptor.HighPriority.LMID = 0; 1502 descriptor.HighPriority.LMID = 0;
1398 descriptor.HighPriority.Reserved1 = 0; 1503 descriptor.HighPriority.Reserved1 = 0;
@@ -1404,18 +1509,17 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1404 * mpt2sas_base_put_smid_default - Default, primarily used for config pages 1509 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1405 * @ioc: per adapter object 1510 * @ioc: per adapter object
1406 * @smid: system request message index 1511 * @smid: system request message index
1407 * @vf_id: virtual function id
1408 * 1512 *
1409 * Return nothing. 1513 * Return nothing.
1410 */ 1514 */
1411void 1515void
1412mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id) 1516mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1413{ 1517{
1414 Mpi2RequestDescriptorUnion_t descriptor; 1518 Mpi2RequestDescriptorUnion_t descriptor;
1415 u64 *request = (u64 *)&descriptor; 1519 u64 *request = (u64 *)&descriptor;
1416 1520
1417 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1521 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1418 descriptor.Default.VF_ID = vf_id; 1522 descriptor.Default.MSIxIndex = 0; /* TODO */
1419 descriptor.Default.SMID = cpu_to_le16(smid); 1523 descriptor.Default.SMID = cpu_to_le16(smid);
1420 descriptor.Default.LMID = 0; 1524 descriptor.Default.LMID = 0;
1421 descriptor.Default.DescriptorTypeDependent = 0; 1525 descriptor.Default.DescriptorTypeDependent = 0;
@@ -1427,21 +1531,20 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id)
1427 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware 1531 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1428 * @ioc: per adapter object 1532 * @ioc: per adapter object
1429 * @smid: system request message index 1533 * @smid: system request message index
1430 * @vf_id: virtual function id
1431 * @io_index: value used to track the IO 1534 * @io_index: value used to track the IO
1432 * 1535 *
1433 * Return nothing. 1536 * Return nothing.
1434 */ 1537 */
1435void 1538void
1436mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid, 1539mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1437 u8 vf_id, u16 io_index) 1540 u16 io_index)
1438{ 1541{
1439 Mpi2RequestDescriptorUnion_t descriptor; 1542 Mpi2RequestDescriptorUnion_t descriptor;
1440 u64 *request = (u64 *)&descriptor; 1543 u64 *request = (u64 *)&descriptor;
1441 1544
1442 descriptor.SCSITarget.RequestFlags = 1545 descriptor.SCSITarget.RequestFlags =
1443 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; 1546 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1444 descriptor.SCSITarget.VF_ID = vf_id; 1547 descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
1445 descriptor.SCSITarget.SMID = cpu_to_le16(smid); 1548 descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1446 descriptor.SCSITarget.LMID = 0; 1549 descriptor.SCSITarget.LMID = 0;
1447 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); 1550 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
@@ -1717,6 +1820,8 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1717 } 1820 }
1718 1821
1719 kfree(ioc->scsi_lookup); 1822 kfree(ioc->scsi_lookup);
1823 kfree(ioc->hpr_lookup);
1824 kfree(ioc->internal_lookup);
1720} 1825}
1721 1826
1722 1827
@@ -1736,7 +1841,6 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1736 u16 num_of_reply_frames; 1841 u16 num_of_reply_frames;
1737 u16 chains_needed_per_io; 1842 u16 chains_needed_per_io;
1738 u32 sz, total_sz; 1843 u32 sz, total_sz;
1739 u16 i;
1740 u32 retry_sz; 1844 u32 retry_sz;
1741 u16 max_request_credit; 1845 u16 max_request_credit;
1742 1846
@@ -1764,7 +1868,10 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1764 MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE : 1868 MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE :
1765 facts->RequestCredit; 1869 facts->RequestCredit;
1766 } 1870 }
1767 ioc->request_depth = max_request_credit; 1871
1872 ioc->hba_queue_depth = max_request_credit;
1873 ioc->hi_priority_depth = facts->HighPriorityCredit;
1874 ioc->internal_depth = ioc->hi_priority_depth + 5;
1768 1875
1769 /* request frame size */ 1876 /* request frame size */
1770 ioc->request_sz = facts->IOCRequestFrameSize * 4; 1877 ioc->request_sz = facts->IOCRequestFrameSize * 4;
@@ -1802,7 +1909,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1802 ioc->chains_needed_per_io = chains_needed_per_io; 1909 ioc->chains_needed_per_io = chains_needed_per_io;
1803 1910
1804 /* reply free queue sizing - taking into account for events */ 1911 /* reply free queue sizing - taking into account for events */
1805 num_of_reply_frames = ioc->request_depth + 32; 1912 num_of_reply_frames = ioc->hba_queue_depth + 32;
1806 1913
1807 /* number of replies frames can't be a multiple of 16 */ 1914 /* number of replies frames can't be a multiple of 16 */
1808 /* decrease number of reply frames by 1 */ 1915 /* decrease number of reply frames by 1 */
@@ -1823,7 +1930,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1823 * frames 1930 * frames
1824 */ 1931 */
1825 1932
1826 queue_size = ioc->request_depth + num_of_reply_frames + 1; 1933 queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
1827 /* round up to 16 byte boundary */ 1934 /* round up to 16 byte boundary */
1828 if (queue_size % 16) 1935 if (queue_size % 16)
1829 queue_size += 16 - (queue_size % 16); 1936 queue_size += 16 - (queue_size % 16);
@@ -1837,60 +1944,85 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1837 if (queue_diff % 16) 1944 if (queue_diff % 16)
1838 queue_diff += 16 - (queue_diff % 16); 1945 queue_diff += 16 - (queue_diff % 16);
1839 1946
1840 /* adjust request_depth, reply_free_queue_depth, 1947 /* adjust hba_queue_depth, reply_free_queue_depth,
1841 * and queue_size 1948 * and queue_size
1842 */ 1949 */
1843 ioc->request_depth -= queue_diff; 1950 ioc->hba_queue_depth -= queue_diff;
1844 ioc->reply_free_queue_depth -= queue_diff; 1951 ioc->reply_free_queue_depth -= queue_diff;
1845 queue_size -= queue_diff; 1952 queue_size -= queue_diff;
1846 } 1953 }
1847 ioc->reply_post_queue_depth = queue_size; 1954 ioc->reply_post_queue_depth = queue_size;
1848 1955
1849 /* max scsi host queue depth */
1850 ioc->shost->can_queue = ioc->request_depth - INTERNAL_CMDS_COUNT;
1851 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host queue: depth"
1852 "(%d)\n", ioc->name, ioc->shost->can_queue));
1853
1854 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " 1956 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
1855 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 1957 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
1856 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 1958 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
1857 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 1959 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
1858 ioc->chains_needed_per_io)); 1960 ioc->chains_needed_per_io));
1859 1961
1962 ioc->scsiio_depth = ioc->hba_queue_depth -
1963 ioc->hi_priority_depth - ioc->internal_depth;
1964
1965 /* set the scsi host can_queue depth
1966 * with some internal commands that could be outstanding
1967 */
1968 ioc->shost->can_queue = ioc->scsiio_depth - (2);
1969 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
1970 "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
1971
1860 /* contiguous pool for request and chains, 16 byte align, one extra " 1972 /* contiguous pool for request and chains, 16 byte align, one extra "
1861 * "frame for smid=0 1973 * "frame for smid=0
1862 */ 1974 */
1863 ioc->chain_depth = ioc->chains_needed_per_io * ioc->request_depth; 1975 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
1864 sz = ((ioc->request_depth + 1 + ioc->chain_depth) * ioc->request_sz); 1976 sz = ((ioc->scsiio_depth + 1 + ioc->chain_depth) * ioc->request_sz);
1977
1978 /* hi-priority queue */
1979 sz += (ioc->hi_priority_depth * ioc->request_sz);
1980
1981 /* internal queue */
1982 sz += (ioc->internal_depth * ioc->request_sz);
1865 1983
1866 ioc->request_dma_sz = sz; 1984 ioc->request_dma_sz = sz;
1867 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 1985 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
1868 if (!ioc->request) { 1986 if (!ioc->request) {
1869 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent " 1987 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
1870 "failed: req_depth(%d), chains_per_io(%d), frame_sz(%d), " 1988 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
1871 "total(%d kB)\n", ioc->name, ioc->request_depth, 1989 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
1872 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 1990 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
1873 if (ioc->request_depth < MPT2SAS_SAS_QUEUE_DEPTH) 1991 if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
1874 goto out; 1992 goto out;
1875 retry_sz += 64; 1993 retry_sz += 64;
1876 ioc->request_depth = max_request_credit - retry_sz; 1994 ioc->hba_queue_depth = max_request_credit - retry_sz;
1877 goto retry_allocation; 1995 goto retry_allocation;
1878 } 1996 }
1879 1997
1880 if (retry_sz) 1998 if (retry_sz)
1881 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent " 1999 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
1882 "succeed: req_depth(%d), chains_per_io(%d), frame_sz(%d), " 2000 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
1883 "total(%d kb)\n", ioc->name, ioc->request_depth, 2001 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
1884 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 2002 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
1885 2003
1886 ioc->chain = ioc->request + ((ioc->request_depth + 1) * 2004
2005 /* hi-priority queue */
2006 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2007 ioc->request_sz);
2008 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2009 ioc->request_sz);
2010
2011 /* internal queue */
2012 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2013 ioc->request_sz);
2014 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2015 ioc->request_sz);
2016
2017 ioc->chain = ioc->internal + (ioc->internal_depth *
1887 ioc->request_sz); 2018 ioc->request_sz);
1888 ioc->chain_dma = ioc->request_dma + ((ioc->request_depth + 1) * 2019 ioc->chain_dma = ioc->internal_dma + (ioc->internal_depth *
1889 ioc->request_sz); 2020 ioc->request_sz);
2021
1890 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): " 2022 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
1891 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, 2023 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
1892 ioc->request, ioc->request_depth, ioc->request_sz, 2024 ioc->request, ioc->hba_queue_depth, ioc->request_sz,
1893 ((ioc->request_depth + 1) * ioc->request_sz)/1024)); 2025 (ioc->hba_queue_depth * ioc->request_sz)/1024));
1894 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth" 2026 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth"
1895 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain, 2027 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain,
1896 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth * 2028 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
@@ -1899,7 +2031,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1899 ioc->name, (unsigned long long) ioc->request_dma)); 2031 ioc->name, (unsigned long long) ioc->request_dma));
1900 total_sz += sz; 2032 total_sz += sz;
1901 2033
1902 ioc->scsi_lookup = kcalloc(ioc->request_depth, 2034 ioc->scsi_lookup = kcalloc(ioc->scsiio_depth,
1903 sizeof(struct request_tracker), GFP_KERNEL); 2035 sizeof(struct request_tracker), GFP_KERNEL);
1904 if (!ioc->scsi_lookup) { 2036 if (!ioc->scsi_lookup) {
1905 printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n", 2037 printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
@@ -1907,12 +2039,38 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1907 goto out; 2039 goto out;
1908 } 2040 }
1909 2041
1910 /* initialize some bits */ 2042 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
1911 for (i = 0; i < ioc->request_depth; i++) 2043 "depth(%d)\n", ioc->name, ioc->request,
1912 ioc->scsi_lookup[i].smid = i + 1; 2044 ioc->scsiio_depth));
2045
2046 /* initialize hi-priority queue smid's */
2047 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2048 sizeof(struct request_tracker), GFP_KERNEL);
2049 if (!ioc->hpr_lookup) {
2050 printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2051 ioc->name);
2052 goto out;
2053 }
2054 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2055 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2056 "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2057 ioc->hi_priority_depth, ioc->hi_priority_smid));
2058
2059 /* initialize internal queue smid's */
2060 ioc->internal_lookup = kcalloc(ioc->internal_depth,
2061 sizeof(struct request_tracker), GFP_KERNEL);
2062 if (!ioc->internal_lookup) {
2063 printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2064 ioc->name);
2065 goto out;
2066 }
2067 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2068 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2069 "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2070 ioc->internal_depth, ioc->internal_smid));
1913 2071
1914 /* sense buffers, 4 byte align */ 2072 /* sense buffers, 4 byte align */
1915 sz = ioc->request_depth * SCSI_SENSE_BUFFERSIZE; 2073 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
1916 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, 2074 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
1917 0); 2075 0);
1918 if (!ioc->sense_dma_pool) { 2076 if (!ioc->sense_dma_pool) {
@@ -1929,7 +2087,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1929 } 2087 }
1930 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT 2088 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
1931 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 2089 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
1932 "(%d kB)\n", ioc->name, ioc->sense, ioc->request_depth, 2090 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
1933 SCSI_SENSE_BUFFERSIZE, sz/1024)); 2091 SCSI_SENSE_BUFFERSIZE, sz/1024));
1934 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n", 2092 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
1935 ioc->name, (unsigned long long)ioc->sense_dma)); 2093 ioc->name, (unsigned long long)ioc->sense_dma));
@@ -2304,7 +2462,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2304 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 2462 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
2305 &ioc->chip->Doorbell); 2463 &ioc->chip->Doorbell);
2306 2464
2307 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 2465 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
2308 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2466 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2309 "int failed (line=%d)\n", ioc->name, __LINE__); 2467 "int failed (line=%d)\n", ioc->name, __LINE__);
2310 return -EFAULT; 2468 return -EFAULT;
@@ -2454,7 +2612,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
2454 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 2612 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2455 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 2613 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
2456 ioc->ioc_link_reset_in_progress = 1; 2614 ioc->ioc_link_reset_in_progress = 1;
2457 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 2615 mpt2sas_base_put_smid_default(ioc, smid);
2616 init_completion(&ioc->base_cmds.done);
2458 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 2617 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2459 msecs_to_jiffies(10000)); 2618 msecs_to_jiffies(10000));
2460 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 2619 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -2555,7 +2714,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
2555 request = mpt2sas_base_get_msg_frame(ioc, smid); 2714 request = mpt2sas_base_get_msg_frame(ioc, smid);
2556 ioc->base_cmds.smid = smid; 2715 ioc->base_cmds.smid = smid;
2557 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 2716 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
2558 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 2717 mpt2sas_base_put_smid_default(ioc, smid);
2718 init_completion(&ioc->base_cmds.done);
2559 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 2719 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2560 msecs_to_jiffies(10000)); 2720 msecs_to_jiffies(10000));
2561 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 2721 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -2701,13 +2861,12 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2701/** 2861/**
2702 * _base_send_ioc_init - send ioc_init to firmware 2862 * _base_send_ioc_init - send ioc_init to firmware
2703 * @ioc: per adapter object 2863 * @ioc: per adapter object
2704 * @VF_ID: virtual function id
2705 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2864 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2706 * 2865 *
2707 * Returns 0 for success, non-zero for failure. 2866 * Returns 0 for success, non-zero for failure.
2708 */ 2867 */
2709static int 2868static int
2710_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag) 2869_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2711{ 2870{
2712 Mpi2IOCInitRequest_t mpi_request; 2871 Mpi2IOCInitRequest_t mpi_request;
2713 Mpi2IOCInitReply_t mpi_reply; 2872 Mpi2IOCInitReply_t mpi_reply;
@@ -2719,7 +2878,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2719 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 2878 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
2720 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 2879 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
2721 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2880 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2722 mpi_request.VF_ID = VF_ID; 2881 mpi_request.VF_ID = 0; /* TODO */
2882 mpi_request.VP_ID = 0;
2723 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 2883 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
2724 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 2884 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
2725 2885
@@ -2795,13 +2955,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2795/** 2955/**
2796 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 2956 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
2797 * @ioc: per adapter object 2957 * @ioc: per adapter object
2798 * @VF_ID: virtual function id
2799 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2958 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2800 * 2959 *
2801 * Returns 0 for success, non-zero for failure. 2960 * Returns 0 for success, non-zero for failure.
2802 */ 2961 */
2803static int 2962static int
2804_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag) 2963_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2805{ 2964{
2806 Mpi2PortEnableRequest_t *mpi_request; 2965 Mpi2PortEnableRequest_t *mpi_request;
2807 u32 ioc_state; 2966 u32 ioc_state;
@@ -2829,9 +2988,11 @@ _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2829 ioc->base_cmds.smid = smid; 2988 ioc->base_cmds.smid = smid;
2830 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 2989 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
2831 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 2990 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
2832 mpi_request->VF_ID = VF_ID; 2991 mpi_request->VF_ID = 0; /* TODO */
2992 mpi_request->VP_ID = 0;
2833 2993
2834 mpt2sas_base_put_smid_default(ioc, smid, VF_ID); 2994 mpt2sas_base_put_smid_default(ioc, smid);
2995 init_completion(&ioc->base_cmds.done);
2835 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 2996 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2836 300*HZ); 2997 300*HZ);
2837 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 2998 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -2892,13 +3053,12 @@ _base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
2892/** 3053/**
2893 * _base_event_notification - send event notification 3054 * _base_event_notification - send event notification
2894 * @ioc: per adapter object 3055 * @ioc: per adapter object
2895 * @VF_ID: virtual function id
2896 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3056 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2897 * 3057 *
2898 * Returns 0 for success, non-zero for failure. 3058 * Returns 0 for success, non-zero for failure.
2899 */ 3059 */
2900static int 3060static int
2901_base_event_notification(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag) 3061_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2902{ 3062{
2903 Mpi2EventNotificationRequest_t *mpi_request; 3063 Mpi2EventNotificationRequest_t *mpi_request;
2904 unsigned long timeleft; 3064 unsigned long timeleft;
@@ -2926,11 +3086,13 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2926 ioc->base_cmds.smid = smid; 3086 ioc->base_cmds.smid = smid;
2927 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 3087 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
2928 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 3088 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2929 mpi_request->VF_ID = VF_ID; 3089 mpi_request->VF_ID = 0; /* TODO */
3090 mpi_request->VP_ID = 0;
2930 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3091 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2931 mpi_request->EventMasks[i] = 3092 mpi_request->EventMasks[i] =
2932 le32_to_cpu(ioc->event_masks[i]); 3093 le32_to_cpu(ioc->event_masks[i]);
2933 mpt2sas_base_put_smid_default(ioc, smid, VF_ID); 3094 mpt2sas_base_put_smid_default(ioc, smid);
3095 init_completion(&ioc->base_cmds.done);
2934 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 3096 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
2935 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 3097 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2936 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 3098 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -2981,7 +3143,7 @@ mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
2981 return; 3143 return;
2982 3144
2983 mutex_lock(&ioc->base_cmds.mutex); 3145 mutex_lock(&ioc->base_cmds.mutex);
2984 _base_event_notification(ioc, 0, CAN_SLEEP); 3146 _base_event_notification(ioc, CAN_SLEEP);
2985 mutex_unlock(&ioc->base_cmds.mutex); 3147 mutex_unlock(&ioc->base_cmds.mutex);
2986} 3148}
2987 3149
@@ -3006,7 +3168,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3006 3168
3007 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "clear interrupts\n", 3169 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "clear interrupts\n",
3008 ioc->name)); 3170 ioc->name));
3009 writel(0, &ioc->chip->HostInterruptStatus);
3010 3171
3011 count = 0; 3172 count = 0;
3012 do { 3173 do {
@@ -3160,30 +3321,60 @@ _base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3160/** 3321/**
3161 * _base_make_ioc_operational - put controller in OPERATIONAL state 3322 * _base_make_ioc_operational - put controller in OPERATIONAL state
3162 * @ioc: per adapter object 3323 * @ioc: per adapter object
3163 * @VF_ID: virtual function id
3164 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3324 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3165 * 3325 *
3166 * Returns 0 for success, non-zero for failure. 3326 * Returns 0 for success, non-zero for failure.
3167 */ 3327 */
3168static int 3328static int
3169_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 3329_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3170 int sleep_flag)
3171{ 3330{
3172 int r, i; 3331 int r, i;
3173 unsigned long flags; 3332 unsigned long flags;
3174 u32 reply_address; 3333 u32 reply_address;
3334 u16 smid;
3335 struct _tr_list *delayed_tr, *delayed_tr_next;
3175 3336
3176 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3337 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3177 __func__)); 3338 __func__));
3178 3339
3340 /* clean the delayed target reset list */
3341 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
3342 &ioc->delayed_tr_list, list) {
3343 list_del(&delayed_tr->list);
3344 kfree(delayed_tr);
3345 }
3346
3179 /* initialize the scsi lookup free list */ 3347 /* initialize the scsi lookup free list */
3180 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3348 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3181 INIT_LIST_HEAD(&ioc->free_list); 3349 INIT_LIST_HEAD(&ioc->free_list);
3182 for (i = 0; i < ioc->request_depth; i++) { 3350 smid = 1;
3351 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
3183 ioc->scsi_lookup[i].cb_idx = 0xFF; 3352 ioc->scsi_lookup[i].cb_idx = 0xFF;
3353 ioc->scsi_lookup[i].smid = smid;
3354 ioc->scsi_lookup[i].scmd = NULL;
3184 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 3355 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3185 &ioc->free_list); 3356 &ioc->free_list);
3186 } 3357 }
3358
3359 /* hi-priority queue */
3360 INIT_LIST_HEAD(&ioc->hpr_free_list);
3361 smid = ioc->hi_priority_smid;
3362 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
3363 ioc->hpr_lookup[i].cb_idx = 0xFF;
3364 ioc->hpr_lookup[i].smid = smid;
3365 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
3366 &ioc->hpr_free_list);
3367 }
3368
3369 /* internal queue */
3370 INIT_LIST_HEAD(&ioc->internal_free_list);
3371 smid = ioc->internal_smid;
3372 for (i = 0; i < ioc->internal_depth; i++, smid++) {
3373 ioc->internal_lookup[i].cb_idx = 0xFF;
3374 ioc->internal_lookup[i].smid = smid;
3375 list_add_tail(&ioc->internal_lookup[i].tracker_list,
3376 &ioc->internal_free_list);
3377 }
3187 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3378 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3188 3379
3189 /* initialize Reply Free Queue */ 3380 /* initialize Reply Free Queue */
@@ -3196,7 +3387,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3196 for (i = 0; i < ioc->reply_post_queue_depth; i++) 3387 for (i = 0; i < ioc->reply_post_queue_depth; i++)
3197 ioc->reply_post_free[i].Words = ULLONG_MAX; 3388 ioc->reply_post_free[i].Words = ULLONG_MAX;
3198 3389
3199 r = _base_send_ioc_init(ioc, VF_ID, sleep_flag); 3390 r = _base_send_ioc_init(ioc, sleep_flag);
3200 if (r) 3391 if (r)
3201 return r; 3392 return r;
3202 3393
@@ -3207,14 +3398,14 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3207 writel(0, &ioc->chip->ReplyPostHostIndex); 3398 writel(0, &ioc->chip->ReplyPostHostIndex);
3208 3399
3209 _base_unmask_interrupts(ioc); 3400 _base_unmask_interrupts(ioc);
3210 r = _base_event_notification(ioc, VF_ID, sleep_flag); 3401 r = _base_event_notification(ioc, sleep_flag);
3211 if (r) 3402 if (r)
3212 return r; 3403 return r;
3213 3404
3214 if (sleep_flag == CAN_SLEEP) 3405 if (sleep_flag == CAN_SLEEP)
3215 _base_static_config_pages(ioc); 3406 _base_static_config_pages(ioc);
3216 3407
3217 r = _base_send_port_enable(ioc, VF_ID, sleep_flag); 3408 r = _base_send_port_enable(ioc, sleep_flag);
3218 if (r) 3409 if (r)
3219 return r; 3410 return r;
3220 3411
@@ -3278,6 +3469,17 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3278 if (r) 3469 if (r)
3279 goto out_free_resources; 3470 goto out_free_resources;
3280 3471
3472 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3473 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3474 if (!ioc->pfacts)
3475 goto out_free_resources;
3476
3477 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3478 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
3479 if (r)
3480 goto out_free_resources;
3481 }
3482
3281 r = _base_allocate_memory_pools(ioc, CAN_SLEEP); 3483 r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
3282 if (r) 3484 if (r)
3283 goto out_free_resources; 3485 goto out_free_resources;
@@ -3286,7 +3488,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3286 3488
3287 /* base internal command bits */ 3489 /* base internal command bits */
3288 mutex_init(&ioc->base_cmds.mutex); 3490 mutex_init(&ioc->base_cmds.mutex);
3289 init_completion(&ioc->base_cmds.done);
3290 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3491 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3291 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3492 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3292 3493
@@ -3294,7 +3495,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3294 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3495 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3295 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3496 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3296 mutex_init(&ioc->transport_cmds.mutex); 3497 mutex_init(&ioc->transport_cmds.mutex);
3297 init_completion(&ioc->transport_cmds.done);
3298 3498
3299 /* task management internal command bits */ 3499 /* task management internal command bits */
3300 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3500 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
@@ -3310,7 +3510,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3310 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3510 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3311 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 3511 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3312 mutex_init(&ioc->ctl_cmds.mutex); 3512 mutex_init(&ioc->ctl_cmds.mutex);
3313 init_completion(&ioc->ctl_cmds.done);
3314 3513
3315 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3514 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3316 ioc->event_masks[i] = -1; 3515 ioc->event_masks[i] = -1;
@@ -3327,18 +3526,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3327 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 3526 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
3328 _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL); 3527 _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL);
3329 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 3528 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
3330 3529 r = _base_make_ioc_operational(ioc, CAN_SLEEP);
3331 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3332 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3333 if (!ioc->pfacts)
3334 goto out_free_resources;
3335
3336 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3337 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
3338 if (r)
3339 goto out_free_resources;
3340 }
3341 r = _base_make_ioc_operational(ioc, 0, CAN_SLEEP);
3342 if (r) 3530 if (r)
3343 goto out_free_resources; 3531 goto out_free_resources;
3344 3532
@@ -3466,7 +3654,7 @@ _wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3466 3654
3467 /* pending command count */ 3655 /* pending command count */
3468 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3656 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3469 for (i = 0; i < ioc->request_depth; i++) 3657 for (i = 0; i < ioc->scsiio_depth; i++)
3470 if (ioc->scsi_lookup[i].cb_idx != 0xFF) 3658 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
3471 ioc->pending_io_count++; 3659 ioc->pending_io_count++;
3472 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3660 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -3490,7 +3678,7 @@ int
3490mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, 3678mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3491 enum reset_type type) 3679 enum reset_type type)
3492{ 3680{
3493 int r, i; 3681 int r;
3494 unsigned long flags; 3682 unsigned long flags;
3495 3683
3496 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 3684 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
@@ -3513,9 +3701,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3513 if (r) 3701 if (r)
3514 goto out; 3702 goto out;
3515 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); 3703 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
3516 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) 3704 r = _base_make_ioc_operational(ioc, sleep_flag);
3517 r = _base_make_ioc_operational(ioc, ioc->pfacts[i].VF_ID,
3518 sleep_flag);
3519 if (!r) 3705 if (!r)
3520 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 3706 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
3521 out: 3707 out:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2faab1e690e9..0cf6bc236e4d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2008 LSI Corporation 6 * Copyright (C) 2007-2009 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -69,10 +69,10 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "01.100.06.00" 72#define MPT2SAS_DRIVER_VERSION "02.100.03.00"
73#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 02
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 06 75#define MPT2SAS_BUILD_VERSION 03
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
@@ -264,6 +264,13 @@ struct _internal_cmd {
264 * SAS Topology Structures 264 * SAS Topology Structures
265 */ 265 */
266 266
267#define MPTSAS_STATE_TR_SEND 0x0001
268#define MPTSAS_STATE_TR_COMPLETE 0x0002
269#define MPTSAS_STATE_CNTRL_SEND 0x0004
270#define MPTSAS_STATE_CNTRL_COMPLETE 0x0008
271
272#define MPT2SAS_REQ_SAS_CNTRL 0x0010
273
267/** 274/**
268 * struct _sas_device - attached device information 275 * struct _sas_device - attached device information
269 * @list: sas device list 276 * @list: sas device list
@@ -300,6 +307,7 @@ struct _sas_device {
300 u16 slot; 307 u16 slot;
301 u8 hidden_raid_component; 308 u8 hidden_raid_component;
302 u8 responding; 309 u8 responding;
310 u16 state;
303}; 311};
304 312
305/** 313/**
@@ -436,6 +444,17 @@ struct request_tracker {
436 struct list_head tracker_list; 444 struct list_head tracker_list;
437}; 445};
438 446
447/**
448 * struct _tr_list - target reset list
449 * @handle: device handle
450 * @state: state machine
451 */
452struct _tr_list {
453 struct list_head list;
454 u16 handle;
455 u16 state;
456};
457
439typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); 458typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
440 459
441/** 460/**
@@ -510,8 +529,9 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
510 * @config_page_sz: config page size 529 * @config_page_sz: config page size
511 * @config_page: reserve memory for config page payload 530 * @config_page: reserve memory for config page payload
512 * @config_page_dma: 531 * @config_page_dma:
532 * @hba_queue_depth: hba request queue depth
513 * @sge_size: sg element size for either 32/64 bit 533 * @sge_size: sg element size for either 32/64 bit
514 * @request_depth: hba request queue depth 534 * @scsiio_depth: SCSI_IO queue depth
515 * @request_sz: per request frame size 535 * @request_sz: per request frame size
516 * @request: pool of request frames 536 * @request: pool of request frames
517 * @request_dma: 537 * @request_dma:
@@ -528,6 +548,18 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
528 * @chains_needed_per_io: max chains per io 548 * @chains_needed_per_io: max chains per io
529 * @chain_offset_value_for_main_message: location 1st sg in main 549 * @chain_offset_value_for_main_message: location 1st sg in main
530 * @chain_depth: total chains allocated 550 * @chain_depth: total chains allocated
551 * @hi_priority_smid:
552 * @hi_priority:
553 * @hi_priority_dma:
554 * @hi_priority_depth:
555 * @hpr_lookup:
556 * @hpr_free_list:
557 * @internal_smid:
558 * @internal:
559 * @internal_dma:
560 * @internal_depth:
561 * @internal_lookup:
562 * @internal_free_list:
531 * @sense: pool of sense 563 * @sense: pool of sense
532 * @sense_dma: 564 * @sense_dma:
533 * @sense_dma_pool: 565 * @sense_dma_pool:
@@ -597,6 +629,8 @@ struct MPT2SAS_ADAPTER {
597 u8 ctl_cb_idx; 629 u8 ctl_cb_idx;
598 u8 base_cb_idx; 630 u8 base_cb_idx;
599 u8 config_cb_idx; 631 u8 config_cb_idx;
632 u8 tm_tr_cb_idx;
633 u8 tm_sas_control_cb_idx;
600 struct _internal_cmd base_cmds; 634 struct _internal_cmd base_cmds;
601 struct _internal_cmd transport_cmds; 635 struct _internal_cmd transport_cmds;
602 struct _internal_cmd tm_cmds; 636 struct _internal_cmd tm_cmds;
@@ -643,9 +677,10 @@ struct MPT2SAS_ADAPTER {
643 void *config_page; 677 void *config_page;
644 dma_addr_t config_page_dma; 678 dma_addr_t config_page_dma;
645 679
646 /* request */ 680 /* scsiio request */
681 u16 hba_queue_depth;
647 u16 sge_size; 682 u16 sge_size;
648 u16 request_depth; 683 u16 scsiio_depth;
649 u16 request_sz; 684 u16 request_sz;
650 u8 *request; 685 u8 *request;
651 dma_addr_t request_dma; 686 dma_addr_t request_dma;
@@ -665,6 +700,22 @@ struct MPT2SAS_ADAPTER {
665 u16 chain_offset_value_for_main_message; 700 u16 chain_offset_value_for_main_message;
666 u16 chain_depth; 701 u16 chain_depth;
667 702
703 /* hi-priority queue */
704 u16 hi_priority_smid;
705 u8 *hi_priority;
706 dma_addr_t hi_priority_dma;
707 u16 hi_priority_depth;
708 struct request_tracker *hpr_lookup;
709 struct list_head hpr_free_list;
710
711 /* internal queue */
712 u16 internal_smid;
713 u8 *internal;
714 dma_addr_t internal_dma;
715 u16 internal_depth;
716 struct request_tracker *internal_lookup;
717 struct list_head internal_free_list;
718
668 /* sense */ 719 /* sense */
669 u8 *sense; 720 u8 *sense;
670 dma_addr_t sense_dma; 721 dma_addr_t sense_dma;
@@ -690,6 +741,8 @@ struct MPT2SAS_ADAPTER {
690 struct dma_pool *reply_post_free_dma_pool; 741 struct dma_pool *reply_post_free_dma_pool;
691 u32 reply_post_host_index; 742 u32 reply_post_host_index;
692 743
744 struct list_head delayed_tr_list;
745
693 /* diag buffer support */ 746 /* diag buffer support */
694 u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; 747 u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
695 u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT]; 748 u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -701,7 +754,7 @@ struct MPT2SAS_ADAPTER {
701 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; 754 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
702}; 755};
703 756
704typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, 757typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
705 u32 reply); 758 u32 reply);
706 759
707 760
@@ -720,22 +773,28 @@ int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
720void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid); 773void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid);
721void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); 774void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
722void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); 775void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
723dma_addr_t mpt2sas_base_get_msg_frame_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid); 776dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
724dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid); 777 u16 smid);
778
779/* hi-priority queue */
780u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
781u16 mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
782 struct scsi_cmnd *scmd);
725 783
726u16 mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx); 784u16 mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
727void mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid); 785void mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid);
728void mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id, 786void mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid,
729 u16 handle); 787 u16 handle);
730void mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id); 788void mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid);
731void mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid, 789void mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
732 u8 vf_id, u16 io_index); 790 u16 io_index);
733void mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id); 791void mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid);
734void mpt2sas_base_initialize_callback_handler(void); 792void mpt2sas_base_initialize_callback_handler(void);
735u8 mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func); 793u8 mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func);
736void mpt2sas_base_release_callback_handler(u8 cb_idx); 794void mpt2sas_base_release_callback_handler(u8 cb_idx);
737 795
738void mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply); 796u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
797 u32 reply);
739void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr); 798void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
740 799
741u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked); 800u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
@@ -749,6 +808,8 @@ int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
749void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type); 808void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type);
750 809
751/* scsih shared API */ 810/* scsih shared API */
811u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
812 u32 reply);
752void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, 813void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
753 u8 type, u16 smid_task, ulong timeout); 814 u8 type, u16 smid_task, ulong timeout);
754void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 815void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
@@ -760,11 +821,11 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAP
760struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( 821struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
761 struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 822 struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
762 823
763void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply);
764void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); 824void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
765 825
766/* config shared API */ 826/* config shared API */
767void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply); 827u8 mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
828 u32 reply);
768int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys); 829int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
769int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, 830int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
770 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); 831 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
@@ -817,14 +878,17 @@ extern struct device_attribute *mpt2sas_host_attrs[];
817extern struct device_attribute *mpt2sas_dev_attrs[]; 878extern struct device_attribute *mpt2sas_dev_attrs[];
818void mpt2sas_ctl_init(void); 879void mpt2sas_ctl_init(void);
819void mpt2sas_ctl_exit(void); 880void mpt2sas_ctl_exit(void);
820void mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply); 881u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
882 u32 reply);
821void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); 883void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
822void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply); 884u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
885 u32 reply);
823void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, 886void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
824 Mpi2EventNotificationReply_t *mpi_reply); 887 Mpi2EventNotificationReply_t *mpi_reply);
825 888
826/* transport shared API */ 889/* transport shared API */
827void mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply); 890u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
891 u32 reply);
828struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, 892struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc,
829 u16 handle, u16 parent_handle); 893 u16 handle, u16 parent_handle);
830void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, 894void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
@@ -838,6 +902,8 @@ void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle,
838extern struct sas_function_template mpt2sas_transport_functions; 902extern struct sas_function_template mpt2sas_transport_functions;
839extern struct scsi_transport_template *mpt2sas_transport_template; 903extern struct scsi_transport_template *mpt2sas_transport_template;
840extern int scsi_internal_device_block(struct scsi_device *sdev); 904extern int scsi_internal_device_block(struct scsi_device *sdev);
905extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc,
906 u8 msix_index, u32 reply);
841extern int scsi_internal_device_unblock(struct scsi_device *sdev); 907extern int scsi_internal_device_unblock(struct scsi_device *sdev);
842 908
843#endif /* MPT2SAS_BASE_H_INCLUDED */ 909#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index ab8c560865d8..594a389c6526 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2008 LSI Corporation 5 * Copyright (C) 2007-2009 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -227,23 +227,25 @@ _config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
227 * mpt2sas_config_done - config page completion routine 227 * mpt2sas_config_done - config page completion routine
228 * @ioc: per adapter object 228 * @ioc: per adapter object
229 * @smid: system request message index 229 * @smid: system request message index
230 * @VF_ID: virtual function id 230 * @msix_index: MSIX table index supplied by the OS
231 * @reply: reply message frame(lower 32bit addr) 231 * @reply: reply message frame(lower 32bit addr)
232 * Context: none. 232 * Context: none.
233 * 233 *
234 * The callback handler when using _config_request. 234 * The callback handler when using _config_request.
235 * 235 *
236 * Return nothing. 236 * Return 1 meaning mf should be freed from _base_interrupt
237 * 0 means the mf is freed from this function.
237 */ 238 */
238void 239u8
239mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 240mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
241 u32 reply)
240{ 242{
241 MPI2DefaultReply_t *mpi_reply; 243 MPI2DefaultReply_t *mpi_reply;
242 244
243 if (ioc->config_cmds.status == MPT2_CMD_NOT_USED) 245 if (ioc->config_cmds.status == MPT2_CMD_NOT_USED)
244 return; 246 return 1;
245 if (ioc->config_cmds.smid != smid) 247 if (ioc->config_cmds.smid != smid)
246 return; 248 return 1;
247 ioc->config_cmds.status |= MPT2_CMD_COMPLETE; 249 ioc->config_cmds.status |= MPT2_CMD_COMPLETE;
248 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 250 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
249 if (mpi_reply) { 251 if (mpi_reply) {
@@ -257,6 +259,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
257#endif 259#endif
258 ioc->config_cmds.smid = USHORT_MAX; 260 ioc->config_cmds.smid = USHORT_MAX;
259 complete(&ioc->config_cmds.done); 261 complete(&ioc->config_cmds.done);
262 return 1;
260} 263}
261 264
262/** 265/**
@@ -303,6 +306,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
303 retry_count = 0; 306 retry_count = 0;
304 memset(&mem, 0, sizeof(struct config_request)); 307 memset(&mem, 0, sizeof(struct config_request));
305 308
309 mpi_request->VF_ID = 0; /* TODO */
310 mpi_request->VP_ID = 0;
311
306 if (config_page) { 312 if (config_page) {
307 mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; 313 mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
308 mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; 314 mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
@@ -380,7 +386,7 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
380 _config_display_some_debug(ioc, smid, "config_request", NULL); 386 _config_display_some_debug(ioc, smid, "config_request", NULL);
381#endif 387#endif
382 init_completion(&ioc->config_cmds.done); 388 init_completion(&ioc->config_cmds.done);
383 mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID); 389 mpt2sas_base_put_smid_default(ioc, smid);
384 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, 390 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
385 timeout*HZ); 391 timeout*HZ);
386 if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) { 392 if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index c2a51018910f..57d724633906 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2008 LSI Corporation 6 * Copyright (C) 2007-2009 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -219,23 +219,25 @@ _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
219 * mpt2sas_ctl_done - ctl module completion routine 219 * mpt2sas_ctl_done - ctl module completion routine
220 * @ioc: per adapter object 220 * @ioc: per adapter object
221 * @smid: system request message index 221 * @smid: system request message index
222 * @VF_ID: virtual function id 222 * @msix_index: MSIX table index supplied by the OS
223 * @reply: reply message frame(lower 32bit addr) 223 * @reply: reply message frame(lower 32bit addr)
224 * Context: none. 224 * Context: none.
225 * 225 *
226 * The callback handler when using ioc->ctl_cb_idx. 226 * The callback handler when using ioc->ctl_cb_idx.
227 * 227 *
228 * Return nothing. 228 * Return 1 meaning mf should be freed from _base_interrupt
229 * 0 means the mf is freed from this function.
229 */ 230 */
230void 231u8
231mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 232mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
233 u32 reply)
232{ 234{
233 MPI2DefaultReply_t *mpi_reply; 235 MPI2DefaultReply_t *mpi_reply;
234 236
235 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED) 237 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED)
236 return; 238 return 1;
237 if (ioc->ctl_cmds.smid != smid) 239 if (ioc->ctl_cmds.smid != smid)
238 return; 240 return 1;
239 ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE; 241 ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE;
240 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 242 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
241 if (mpi_reply) { 243 if (mpi_reply) {
@@ -247,6 +249,7 @@ mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
247#endif 249#endif
248 ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING; 250 ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING;
249 complete(&ioc->ctl_cmds.done); 251 complete(&ioc->ctl_cmds.done);
252 return 1;
250} 253}
251 254
252/** 255/**
@@ -328,22 +331,25 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
328/** 331/**
329 * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time) 332 * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time)
330 * @ioc: per adapter object 333 * @ioc: per adapter object
331 * @VF_ID: virtual function id 334 * @msix_index: MSIX table index supplied by the OS
332 * @reply: reply message frame(lower 32bit addr) 335 * @reply: reply message frame(lower 32bit addr)
333 * Context: interrupt. 336 * Context: interrupt.
334 * 337 *
335 * This function merely adds a new work task into ioc->firmware_event_thread. 338 * This function merely adds a new work task into ioc->firmware_event_thread.
336 * The tasks are worked from _firmware_event_work in user context. 339 * The tasks are worked from _firmware_event_work in user context.
337 * 340 *
338 * Return nothing. 341 * Return 1 meaning mf should be freed from _base_interrupt
342 * 0 means the mf is freed from this function.
339 */ 343 */
340void 344u8
341mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply) 345mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
346 u32 reply)
342{ 347{
343 Mpi2EventNotificationReply_t *mpi_reply; 348 Mpi2EventNotificationReply_t *mpi_reply;
344 349
345 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 350 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
346 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); 351 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
352 return 1;
347} 353}
348 354
349/** 355/**
@@ -507,7 +513,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
507 513
508 handle = le16_to_cpu(tm_request->DevHandle); 514 handle = le16_to_cpu(tm_request->DevHandle);
509 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 515 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
510 for (i = ioc->request_depth; i && !found; i--) { 516 for (i = ioc->scsiio_depth; i && !found; i--) {
511 scmd = ioc->scsi_lookup[i - 1].scmd; 517 scmd = ioc->scsi_lookup[i - 1].scmd;
512 if (scmd == NULL || scmd->device == NULL || 518 if (scmd == NULL || scmd->device == NULL ||
513 scmd->device->hostdata == NULL) 519 scmd->device->hostdata == NULL)
@@ -614,7 +620,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
614 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", 620 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
615 ioc->name, __func__); 621 ioc->name, __func__);
616 622
617 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); 623 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
618 if (!smid) { 624 if (!smid) {
619 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 625 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
620 ioc->name, __func__); 626 ioc->name, __func__);
@@ -737,7 +743,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
737 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); 743 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid);
738 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); 744 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
739 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); 745 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
740 mpt2sas_base_put_smid_scsi_io(ioc, smid, 0, 746 mpt2sas_base_put_smid_scsi_io(ioc, smid,
741 le16_to_cpu(mpi_request->FunctionDependent1)); 747 le16_to_cpu(mpi_request->FunctionDependent1));
742 break; 748 break;
743 } 749 }
@@ -759,8 +765,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
759 mutex_lock(&ioc->tm_cmds.mutex); 765 mutex_lock(&ioc->tm_cmds.mutex);
760 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu( 766 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
761 tm_request->DevHandle)); 767 tm_request->DevHandle));
762 mpt2sas_base_put_smid_hi_priority(ioc, smid, 768 mpt2sas_base_put_smid_hi_priority(ioc, smid);
763 mpi_request->VF_ID);
764 break; 769 break;
765 } 770 }
766 case MPI2_FUNCTION_SMP_PASSTHROUGH: 771 case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -781,7 +786,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
781 ioc->ioc_link_reset_in_progress = 1; 786 ioc->ioc_link_reset_in_progress = 1;
782 ioc->ignore_loginfos = 1; 787 ioc->ignore_loginfos = 1;
783 } 788 }
784 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 789 mpt2sas_base_put_smid_default(ioc, smid);
785 break; 790 break;
786 } 791 }
787 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 792 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -795,11 +800,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
795 ioc->ioc_link_reset_in_progress = 1; 800 ioc->ioc_link_reset_in_progress = 1;
796 ioc->ignore_loginfos = 1; 801 ioc->ignore_loginfos = 1;
797 } 802 }
798 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 803 mpt2sas_base_put_smid_default(ioc, smid);
799 break; 804 break;
800 } 805 }
801 default: 806 default:
802 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 807 mpt2sas_base_put_smid_default(ioc, smid);
803 break; 808 break;
804 } 809 }
805 810
@@ -807,6 +812,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
807 timeout = MPT2_IOCTL_DEFAULT_TIMEOUT; 812 timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
808 else 813 else
809 timeout = karg.timeout; 814 timeout = karg.timeout;
815 init_completion(&ioc->ctl_cmds.done);
810 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 816 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
811 timeout*HZ); 817 timeout*HZ);
812 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 818 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1371,6 +1377,8 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1371 mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags); 1377 mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags);
1372 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1378 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1373 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1379 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1380 mpi_request->VF_ID = 0; /* TODO */
1381 mpi_request->VP_ID = 0;
1374 1382
1375 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), " 1383 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
1376 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, 1384 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
@@ -1380,7 +1388,8 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1380 mpi_request->ProductSpecific[i] = 1388 mpi_request->ProductSpecific[i] =
1381 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1389 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1382 1390
1383 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 1391 mpt2sas_base_put_smid_default(ioc, smid);
1392 init_completion(&ioc->ctl_cmds.done);
1384 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1393 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1385 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 1394 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1386 1395
@@ -1643,8 +1652,11 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1643 1652
1644 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1653 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1645 mpi_request->BufferType = buffer_type; 1654 mpi_request->BufferType = buffer_type;
1655 mpi_request->VF_ID = 0; /* TODO */
1656 mpi_request->VP_ID = 0;
1646 1657
1647 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 1658 mpt2sas_base_put_smid_default(ioc, smid);
1659 init_completion(&ioc->ctl_cmds.done);
1648 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1660 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1649 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 1661 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1650 1662
@@ -1902,8 +1914,11 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1902 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) 1914 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
1903 mpi_request->ProductSpecific[i] = 1915 mpi_request->ProductSpecific[i] =
1904 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1916 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1917 mpi_request->VF_ID = 0; /* TODO */
1918 mpi_request->VP_ID = 0;
1905 1919
1906 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID); 1920 mpt2sas_base_put_smid_default(ioc, smid);
1921 init_completion(&ioc->ctl_cmds.done);
1907 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1922 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1908 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 1923 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1909 1924
@@ -2069,6 +2084,7 @@ static long
2069_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2084_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2070{ 2085{
2071 long ret; 2086 long ret;
2087
2072 lock_kernel(); 2088 lock_kernel();
2073 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); 2089 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
2074 unlock_kernel(); 2090 unlock_kernel();
@@ -2143,6 +2159,7 @@ static long
2143_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2159_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2144{ 2160{
2145 long ret; 2161 long ret;
2162
2146 lock_kernel(); 2163 lock_kernel();
2147 if (cmd == MPT2COMMAND32) 2164 if (cmd == MPT2COMMAND32)
2148 ret = _ctl_compat_mpt_command(file, cmd, arg); 2165 ret = _ctl_compat_mpt_command(file, cmd, arg);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 4da11435533f..211f296dd191 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2008 LSI Corporation 6 * Copyright (C) 2007-2009 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index ad325096e842..5308a25cb307 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2008 LSI Corporation 5 * Copyright (C) 2007-2009 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 774b34525bba..86ab32d7ab15 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2008 LSI Corporation 5 * Copyright (C) 2007-2009 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -79,6 +79,9 @@ static u8 transport_cb_idx = -1;
79static u8 config_cb_idx = -1; 79static u8 config_cb_idx = -1;
80static int mpt_ids; 80static int mpt_ids;
81 81
82static u8 tm_tr_cb_idx = -1 ;
83static u8 tm_sas_control_cb_idx = -1;
84
82/* command line options */ 85/* command line options */
83static u32 logging_level; 86static u32 logging_level;
84MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " 87MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
@@ -109,6 +112,7 @@ struct sense_info {
109 * @work: work object (ioc->fault_reset_work_q) 112 * @work: work object (ioc->fault_reset_work_q)
110 * @ioc: per adapter object 113 * @ioc: per adapter object
111 * @VF_ID: virtual function id 114 * @VF_ID: virtual function id
115 * @VP_ID: virtual port id
112 * @host_reset_handling: handling events during host reset 116 * @host_reset_handling: handling events during host reset
113 * @ignore: flag meaning this event has been marked to ignore 117 * @ignore: flag meaning this event has been marked to ignore
114 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h 118 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
@@ -121,6 +125,7 @@ struct fw_event_work {
121 struct work_struct work; 125 struct work_struct work;
122 struct MPT2SAS_ADAPTER *ioc; 126 struct MPT2SAS_ADAPTER *ioc;
123 u8 VF_ID; 127 u8 VF_ID;
128 u8 VP_ID;
124 u8 host_reset_handling; 129 u8 host_reset_handling;
125 u8 ignore; 130 u8 ignore;
126 u16 event; 131 u16 event;
@@ -138,8 +143,10 @@ struct fw_event_work {
138 * @lun: lun number 143 * @lun: lun number
139 * @cdb_length: cdb length 144 * @cdb_length: cdb length
140 * @cdb: cdb contents 145 * @cdb: cdb contents
141 * @valid_reply: flag set for reply message
142 * @timeout: timeout for this command 146 * @timeout: timeout for this command
147 * @VF_ID: virtual function id
148 * @VP_ID: virtual port id
149 * @valid_reply: flag set for reply message
143 * @sense_length: sense length 150 * @sense_length: sense length
144 * @ioc_status: ioc status 151 * @ioc_status: ioc status
145 * @scsi_state: scsi state 152 * @scsi_state: scsi state
@@ -161,6 +168,8 @@ struct _scsi_io_transfer {
161 u8 cdb_length; 168 u8 cdb_length;
162 u8 cdb[32]; 169 u8 cdb[32];
163 u8 timeout; 170 u8 timeout;
171 u8 VF_ID;
172 u8 VP_ID;
164 u8 valid_reply; 173 u8 valid_reply;
165 /* the following bits are only valid when 'valid_reply = 1' */ 174 /* the following bits are only valid when 'valid_reply = 1' */
166 u32 sense_length; 175 u32 sense_length;
@@ -756,66 +765,16 @@ _scsih_is_end_device(u32 device_info)
756} 765}
757 766
758/** 767/**
759 * _scsih_scsi_lookup_get - returns scmd entry 768 * mptscsih_get_scsi_lookup - returns scmd entry
760 * @ioc: per adapter object 769 * @ioc: per adapter object
761 * @smid: system request message index 770 * @smid: system request message index
762 * Context: This function will acquire ioc->scsi_lookup_lock.
763 * 771 *
764 * Returns the smid stored scmd pointer. 772 * Returns the smid stored scmd pointer.
765 */ 773 */
766static struct scsi_cmnd * 774static struct scsi_cmnd *
767_scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) 775_scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
768{ 776{
769 unsigned long flags; 777 return ioc->scsi_lookup[smid - 1].scmd;
770 struct scsi_cmnd *scmd;
771
772 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
773 scmd = ioc->scsi_lookup[smid - 1].scmd;
774 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
775 return scmd;
776}
777
778/**
779 * mptscsih_getclear_scsi_lookup - returns scmd entry
780 * @ioc: per adapter object
781 * @smid: system request message index
782 * Context: This function will acquire ioc->scsi_lookup_lock.
783 *
784 * Returns the smid stored scmd pointer, as well as clearing the scmd pointer.
785 */
786static struct scsi_cmnd *
787_scsih_scsi_lookup_getclear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
788{
789 unsigned long flags;
790 struct scsi_cmnd *scmd;
791
792 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
793 scmd = ioc->scsi_lookup[smid - 1].scmd;
794 ioc->scsi_lookup[smid - 1].scmd = NULL;
795 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
796 return scmd;
797}
798
799/**
800 * _scsih_scsi_lookup_set - updates scmd entry in lookup
801 * @ioc: per adapter object
802 * @smid: system request message index
803 * @scmd: pointer to scsi command object
804 * Context: This function will acquire ioc->scsi_lookup_lock.
805 *
806 * This will save scmd pointer in the scsi_lookup array.
807 *
808 * Return nothing.
809 */
810static void
811_scsih_scsi_lookup_set(struct MPT2SAS_ADAPTER *ioc, u16 smid,
812 struct scsi_cmnd *scmd)
813{
814 unsigned long flags;
815
816 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
817 ioc->scsi_lookup[smid - 1].scmd = scmd;
818 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
819} 778}
820 779
821/** 780/**
@@ -838,9 +797,9 @@ _scsih_scsi_lookup_find_by_scmd(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd
838 797
839 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 798 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
840 smid = 0; 799 smid = 0;
841 for (i = 0; i < ioc->request_depth; i++) { 800 for (i = 0; i < ioc->scsiio_depth; i++) {
842 if (ioc->scsi_lookup[i].scmd == scmd) { 801 if (ioc->scsi_lookup[i].scmd == scmd) {
843 smid = i + 1; 802 smid = ioc->scsi_lookup[i].smid;
844 goto out; 803 goto out;
845 } 804 }
846 } 805 }
@@ -869,7 +828,7 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
869 828
870 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 829 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
871 found = 0; 830 found = 0;
872 for (i = 0 ; i < ioc->request_depth; i++) { 831 for (i = 0 ; i < ioc->scsiio_depth; i++) {
873 if (ioc->scsi_lookup[i].scmd && 832 if (ioc->scsi_lookup[i].scmd &&
874 (ioc->scsi_lookup[i].scmd->device->id == id && 833 (ioc->scsi_lookup[i].scmd->device->id == id &&
875 ioc->scsi_lookup[i].scmd->device->channel == channel)) { 834 ioc->scsi_lookup[i].scmd->device->channel == channel)) {
@@ -903,7 +862,7 @@ _scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
903 862
904 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 863 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
905 found = 0; 864 found = 0;
906 for (i = 0 ; i < ioc->request_depth; i++) { 865 for (i = 0 ; i < ioc->scsiio_depth; i++) {
907 if (ioc->scsi_lookup[i].scmd && 866 if (ioc->scsi_lookup[i].scmd &&
908 (ioc->scsi_lookup[i].scmd->device->id == id && 867 (ioc->scsi_lookup[i].scmd->device->id == id &&
909 ioc->scsi_lookup[i].scmd->device->channel == channel && 868 ioc->scsi_lookup[i].scmd->device->channel == channel &&
@@ -1113,7 +1072,7 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1113} 1072}
1114 1073
1115/** 1074/**
1116 * _scsih_change_queue_depth - changing device queue tag type 1075 * _scsih_change_queue_type - changing device queue tag type
1117 * @sdev: scsi device struct 1076 * @sdev: scsi device struct
1118 * @tag_type: requested tag type 1077 * @tag_type: requested tag type
1119 * 1078 *
@@ -1679,23 +1638,24 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1679 * _scsih_tm_done - tm completion routine 1638 * _scsih_tm_done - tm completion routine
1680 * @ioc: per adapter object 1639 * @ioc: per adapter object
1681 * @smid: system request message index 1640 * @smid: system request message index
1682 * @VF_ID: virtual function id 1641 * @msix_index: MSIX table index supplied by the OS
1683 * @reply: reply message frame(lower 32bit addr) 1642 * @reply: reply message frame(lower 32bit addr)
1684 * Context: none. 1643 * Context: none.
1685 * 1644 *
1686 * The callback handler when using scsih_issue_tm. 1645 * The callback handler when using scsih_issue_tm.
1687 * 1646 *
1688 * Return nothing. 1647 * Return 1 meaning mf should be freed from _base_interrupt
1648 * 0 means the mf is freed from this function.
1689 */ 1649 */
1690static void 1650static u8
1691_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1651_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1692{ 1652{
1693 MPI2DefaultReply_t *mpi_reply; 1653 MPI2DefaultReply_t *mpi_reply;
1694 1654
1695 if (ioc->tm_cmds.status == MPT2_CMD_NOT_USED) 1655 if (ioc->tm_cmds.status == MPT2_CMD_NOT_USED)
1696 return; 1656 return 1;
1697 if (ioc->tm_cmds.smid != smid) 1657 if (ioc->tm_cmds.smid != smid)
1698 return; 1658 return 1;
1699 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE; 1659 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
1700 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 1660 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
1701 if (mpi_reply) { 1661 if (mpi_reply) {
@@ -1704,6 +1664,7 @@ _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1704 } 1664 }
1705 ioc->tm_cmds.status &= ~MPT2_CMD_PENDING; 1665 ioc->tm_cmds.status &= ~MPT2_CMD_PENDING;
1706 complete(&ioc->tm_cmds.done); 1666 complete(&ioc->tm_cmds.done);
1667 return 1;
1707} 1668}
1708 1669
1709/** 1670/**
@@ -1790,7 +1751,6 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1790 u16 smid = 0; 1751 u16 smid = 0;
1791 u32 ioc_state; 1752 u32 ioc_state;
1792 unsigned long timeleft; 1753 unsigned long timeleft;
1793 u8 VF_ID = 0;
1794 1754
1795 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { 1755 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
1796 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", 1756 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
@@ -1817,7 +1777,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1817 goto issue_host_reset; 1777 goto issue_host_reset;
1818 } 1778 }
1819 1779
1820 smid = mpt2sas_base_get_smid(ioc, ioc->tm_cb_idx); 1780 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
1821 if (!smid) { 1781 if (!smid) {
1822 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 1782 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1823 ioc->name, __func__); 1783 ioc->name, __func__);
@@ -1825,7 +1785,8 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1825 } 1785 }
1826 1786
1827 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x)," 1787 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
1828 " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, smid)); 1788 " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type,
1789 smid_task));
1829 ioc->tm_cmds.status = MPT2_CMD_PENDING; 1790 ioc->tm_cmds.status = MPT2_CMD_PENDING;
1830 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 1791 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1831 ioc->tm_cmds.smid = smid; 1792 ioc->tm_cmds.smid = smid;
@@ -1834,10 +1795,12 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1834 mpi_request->DevHandle = cpu_to_le16(handle); 1795 mpi_request->DevHandle = cpu_to_le16(handle);
1835 mpi_request->TaskType = type; 1796 mpi_request->TaskType = type;
1836 mpi_request->TaskMID = cpu_to_le16(smid_task); 1797 mpi_request->TaskMID = cpu_to_le16(smid_task);
1798 mpi_request->VP_ID = 0; /* TODO */
1799 mpi_request->VF_ID = 0;
1837 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 1800 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
1838 mpt2sas_scsih_set_tm_flag(ioc, handle); 1801 mpt2sas_scsih_set_tm_flag(ioc, handle);
1839 init_completion(&ioc->tm_cmds.done); 1802 init_completion(&ioc->tm_cmds.done);
1840 mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID); 1803 mpt2sas_base_put_smid_hi_priority(ioc, smid);
1841 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 1804 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
1842 mpt2sas_scsih_clear_tm_flag(ioc, handle); 1805 mpt2sas_scsih_clear_tm_flag(ioc, handle);
1843 if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) { 1806 if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -2075,7 +2038,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2075} 2038}
2076 2039
2077/** 2040/**
2078 * _scsih_abort - eh threads main host reset routine 2041 * _scsih_host_reset - eh threads main host reset routine
2079 * @sdev: scsi device struct 2042 * @sdev: scsi device struct
2080 * 2043 *
2081 * Returns SUCCESS if command aborted else FAILED 2044 * Returns SUCCESS if command aborted else FAILED
@@ -2354,6 +2317,231 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2354} 2317}
2355 2318
2356/** 2319/**
2320 * _scsih_tm_tr_send - send task management request
2321 * @ioc: per adapter object
2322 * @handle: device handle
2323 * Context: interrupt time.
2324 *
2325 * This code is to initiate the device removal handshake protocal
2326 * with controller firmware. This function will issue target reset
2327 * using high priority request queue. It will send a sas iounit
2328 * controll request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
2329 *
2330 * This is designed to send muliple task management request at the same
2331 * time to the fifo. If the fifo is full, we will append the request,
2332 * and process it in a future completion.
2333 */
2334static void
2335_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2336{
2337 Mpi2SCSITaskManagementRequest_t *mpi_request;
2338 struct MPT2SAS_TARGET *sas_target_priv_data;
2339 u16 smid;
2340 struct _sas_device *sas_device;
2341 unsigned long flags;
2342 struct _tr_list *delayed_tr;
2343
2344 if (ioc->shost_recovery) {
2345 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
2346 __func__, ioc->name);
2347 return;
2348 }
2349
2350 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2351 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2352 if (!sas_device) {
2353 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2354 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2355 ioc->name, __func__);
2356 return;
2357 }
2358 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2359
2360 /* skip is hidden raid component */
2361 if (sas_device->hidden_raid_component)
2362 return;
2363
2364 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
2365 if (!smid) {
2366 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
2367 if (!delayed_tr)
2368 return;
2369 INIT_LIST_HEAD(&delayed_tr->list);
2370 delayed_tr->handle = handle;
2371 delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
2372 list_add_tail(&delayed_tr->list,
2373 &ioc->delayed_tr_list);
2374 if (sas_device->starget)
2375 dewtprintk(ioc, starget_printk(KERN_INFO,
2376 sas_device->starget, "DELAYED:tr:handle(0x%04x), "
2377 "(open)\n", sas_device->handle));
2378 return;
2379 }
2380
2381 if (sas_device->starget && sas_device->starget->hostdata) {
2382 sas_target_priv_data = sas_device->starget->hostdata;
2383 sas_target_priv_data->tm_busy = 1;
2384 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
2385 "tr:handle(0x%04x), (open)\n", sas_device->handle));
2386 }
2387
2388 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2389 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2390 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2391 mpi_request->DevHandle = cpu_to_le16(handle);
2392 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2393 sas_device->state |= MPTSAS_STATE_TR_SEND;
2394 sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
2395 mpt2sas_base_put_smid_hi_priority(ioc, smid);
2396}
2397
2398
2399
2400/**
2401 * _scsih_sas_control_complete - completion routine
2402 * @ioc: per adapter object
2403 * @smid: system request message index
2404 * @msix_index: MSIX table index supplied by the OS
2405 * @reply: reply message frame(lower 32bit addr)
2406 * Context: interrupt time.
2407 *
2408 * This is the sas iounit controll completion routine.
2409 * This code is part of the code to initiate the device removal
2410 * handshake protocal with controller firmware.
2411 *
2412 * Return 1 meaning mf should be freed from _base_interrupt
2413 * 0 means the mf is freed from this function.
2414 */
2415static u8
2416_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2417 u8 msix_index, u32 reply)
2418{
2419 unsigned long flags;
2420 u16 handle;
2421 struct _sas_device *sas_device;
2422 Mpi2SasIoUnitControlReply_t *mpi_reply =
2423 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2424
2425 handle = le16_to_cpu(mpi_reply->DevHandle);
2426
2427 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2428 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2429 if (!sas_device) {
2430 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2431 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2432 ioc->name, __func__);
2433 return 1;
2434 }
2435 sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
2436 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2437
2438 if (sas_device->starget)
2439 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
2440 "sc_complete:handle(0x%04x), "
2441 "ioc_status(0x%04x), loginfo(0x%08x)\n",
2442 handle, le16_to_cpu(mpi_reply->IOCStatus),
2443 le32_to_cpu(mpi_reply->IOCLogInfo)));
2444 return 1;
2445}
2446
2447/**
2448 * _scsih_tm_tr_complete -
2449 * @ioc: per adapter object
2450 * @smid: system request message index
2451 * @msix_index: MSIX table index supplied by the OS
2452 * @reply: reply message frame(lower 32bit addr)
2453 * Context: interrupt time.
2454 *
2455 * This is the target reset completion routine.
2456 * This code is part of the code to initiate the device removal
2457 * handshake protocal with controller firmware.
2458 * It will send a sas iounit controll request (MPI2_SAS_OP_REMOVE_DEVICE)
2459 *
2460 * Return 1 meaning mf should be freed from _base_interrupt
2461 * 0 means the mf is freed from this function.
2462 */
2463static u8
2464_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2465 u32 reply)
2466{
2467 unsigned long flags;
2468 u16 handle;
2469 struct _sas_device *sas_device;
2470 Mpi2SCSITaskManagementReply_t *mpi_reply =
2471 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2472 Mpi2SasIoUnitControlRequest_t *mpi_request;
2473 u16 smid_sas_ctrl;
2474 struct MPT2SAS_TARGET *sas_target_priv_data;
2475 struct _tr_list *delayed_tr;
2476 u8 rc;
2477
2478 handle = le16_to_cpu(mpi_reply->DevHandle);
2479 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2480 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2481 if (!sas_device) {
2482 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2483 printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
2484 ioc->name, __func__);
2485 return 1;
2486 }
2487 sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
2488 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2489
2490 if (sas_device->starget)
2491 dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
2492 "tr_complete:handle(0x%04x), (%s) ioc_status(0x%04x), "
2493 "loginfo(0x%08x), completed(%d)\n",
2494 sas_device->handle, (sas_device->state &
2495 MPT2SAS_REQ_SAS_CNTRL) ? "open" : "active",
2496 le16_to_cpu(mpi_reply->IOCStatus),
2497 le32_to_cpu(mpi_reply->IOCLogInfo),
2498 le32_to_cpu(mpi_reply->TerminationCount)));
2499
2500 if (sas_device->starget && sas_device->starget->hostdata) {
2501 sas_target_priv_data = sas_device->starget->hostdata;
2502 sas_target_priv_data->tm_busy = 0;
2503 }
2504
2505 if (!list_empty(&ioc->delayed_tr_list)) {
2506 delayed_tr = list_entry(ioc->delayed_tr_list.next,
2507 struct _tr_list, list);
2508 mpt2sas_base_free_smid(ioc, smid);
2509 if (delayed_tr->state & MPT2SAS_REQ_SAS_CNTRL)
2510 _scsih_tm_tr_send(ioc, delayed_tr->handle);
2511 list_del(&delayed_tr->list);
2512 kfree(delayed_tr);
2513 rc = 0; /* tells base_interrupt not to free mf */
2514 } else
2515 rc = 1;
2516
2517
2518 if (!(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
2519 return rc;
2520
2521 if (ioc->shost_recovery) {
2522 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
2523 __func__, ioc->name);
2524 return rc;
2525 }
2526
2527 smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
2528 if (!smid_sas_ctrl) {
2529 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2530 ioc->name, __func__);
2531 return rc;
2532 }
2533
2534 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
2535 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
2536 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
2537 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
2538 mpi_request->DevHandle = mpi_reply->DevHandle;
2539 sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
2540 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
2541 return rc;
2542}
2543
2544/**
2357 * _scsih_check_topo_delete_events - sanity check on topo events 2545 * _scsih_check_topo_delete_events - sanity check on topo events
2358 * @ioc: per adapter object 2546 * @ioc: per adapter object
2359 * @event_data: the event data payload 2547 * @event_data: the event data payload
@@ -2375,6 +2563,21 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2375 u16 expander_handle; 2563 u16 expander_handle;
2376 struct _sas_node *sas_expander; 2564 struct _sas_node *sas_expander;
2377 unsigned long flags; 2565 unsigned long flags;
2566 int i, reason_code;
2567 u16 handle;
2568
2569 for (i = 0 ; i < event_data->NumEntries; i++) {
2570 if (event_data->PHY[i].PhyStatus &
2571 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
2572 continue;
2573 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2574 if (!handle)
2575 continue;
2576 reason_code = event_data->PHY[i].PhyStatus &
2577 MPI2_EVENT_SAS_TOPO_RC_MASK;
2578 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
2579 _scsih_tm_tr_send(ioc, handle);
2580 }
2378 2581
2379 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 2582 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
2380 if (expander_handle < ioc->sas_hba.num_phys) { 2583 if (expander_handle < ioc->sas_hba.num_phys) {
@@ -2433,8 +2636,8 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
2433 u16 smid; 2636 u16 smid;
2434 u16 count = 0; 2637 u16 count = 0;
2435 2638
2436 for (smid = 1; smid <= ioc->request_depth; smid++) { 2639 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
2437 scmd = _scsih_scsi_lookup_getclear(ioc, smid); 2640 scmd = _scsih_scsi_lookup_get(ioc, smid);
2438 if (!scmd) 2641 if (!scmd)
2439 continue; 2642 continue;
2440 count++; 2643 count++;
@@ -2616,7 +2819,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2616 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON)) 2819 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON))
2617 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 2820 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
2618 2821
2619 smid = mpt2sas_base_get_smid(ioc, ioc->scsi_io_cb_idx); 2822 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
2620 if (!smid) { 2823 if (!smid) {
2621 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2824 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2622 ioc->name, __func__); 2825 ioc->name, __func__);
@@ -2643,7 +2846,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2643 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; 2846 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
2644 mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + 2847 mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI +
2645 MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); 2848 MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR);
2646 2849 mpi_request->VF_ID = 0; /* TODO */
2850 mpi_request->VP_ID = 0;
2647 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 2851 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
2648 mpi_request->LUN); 2852 mpi_request->LUN);
2649 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 2853 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
@@ -2657,8 +2861,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2657 } 2861 }
2658 } 2862 }
2659 2863
2660 _scsih_scsi_lookup_set(ioc, smid, scmd); 2864 mpt2sas_base_put_smid_scsi_io(ioc, smid,
2661 mpt2sas_base_put_smid_scsi_io(ioc, smid, 0,
2662 sas_device_priv_data->sas_target->handle); 2865 sas_device_priv_data->sas_target->handle);
2663 return 0; 2866 return 0;
2664 2867
@@ -2954,15 +3157,16 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2954 * _scsih_io_done - scsi request callback 3157 * _scsih_io_done - scsi request callback
2955 * @ioc: per adapter object 3158 * @ioc: per adapter object
2956 * @smid: system request message index 3159 * @smid: system request message index
2957 * @VF_ID: virtual function id 3160 * @msix_index: MSIX table index supplied by the OS
2958 * @reply: reply message frame(lower 32bit addr) 3161 * @reply: reply message frame(lower 32bit addr)
2959 * 3162 *
2960 * Callback handler when using scsih_qcmd. 3163 * Callback handler when using _scsih_qcmd.
2961 * 3164 *
2962 * Return nothing. 3165 * Return 1 meaning mf should be freed from _base_interrupt
3166 * 0 means the mf is freed from this function.
2963 */ 3167 */
2964static void 3168static u8
2965_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3169_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2966{ 3170{
2967 Mpi2SCSIIORequest_t *mpi_request; 3171 Mpi2SCSIIORequest_t *mpi_request;
2968 Mpi2SCSIIOReply_t *mpi_reply; 3172 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2976,9 +3180,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2976 u32 response_code; 3180 u32 response_code;
2977 3181
2978 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 3182 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
2979 scmd = _scsih_scsi_lookup_getclear(ioc, smid); 3183 scmd = _scsih_scsi_lookup_get(ioc, smid);
2980 if (scmd == NULL) 3184 if (scmd == NULL)
2981 return; 3185 return 1;
2982 3186
2983 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 3187 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2984 3188
@@ -3134,6 +3338,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
3134 out: 3338 out:
3135 scsi_dma_unmap(scmd); 3339 scsi_dma_unmap(scmd);
3136 scmd->scsi_done(scmd); 3340 scmd->scsi_done(scmd);
3341 return 1;
3137} 3342}
3138 3343
3139/** 3344/**
@@ -3398,9 +3603,8 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3398 } 3603 }
3399 } 3604 }
3400 3605
3401 sas_address = le64_to_cpu(expander_pg0.SASAddress);
3402
3403 spin_lock_irqsave(&ioc->sas_node_lock, flags); 3606 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3607 sas_address = le64_to_cpu(expander_pg0.SASAddress);
3404 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, 3608 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
3405 sas_address); 3609 sas_address);
3406 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 3610 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -3666,6 +3870,12 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3666 if (ioc->remove_host) 3870 if (ioc->remove_host)
3667 goto out; 3871 goto out;
3668 3872
3873 if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
3874 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
3875 "target_reset handle(0x%04x)\n", ioc->name, handle));
3876 goto skip_tr;
3877 }
3878
3669 /* Target Reset to flush out all the outstanding IO */ 3879 /* Target Reset to flush out all the outstanding IO */
3670 device_handle = (sas_device->hidden_raid_component) ? 3880 device_handle = (sas_device->hidden_raid_component) ?
3671 sas_device->volume_handle : handle; 3881 sas_device->volume_handle : handle;
@@ -3682,6 +3892,13 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3682 if (ioc->shost_recovery) 3892 if (ioc->shost_recovery)
3683 goto out; 3893 goto out;
3684 } 3894 }
3895 skip_tr:
3896
3897 if ((sas_device->state & MPTSAS_STATE_CNTRL_COMPLETE)) {
3898 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
3899 "sas_cntrl handle(0x%04x)\n", ioc->name, handle));
3900 goto out;
3901 }
3685 3902
3686 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */ 3903 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
3687 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle" 3904 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
@@ -3690,7 +3907,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3690 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3907 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3691 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE; 3908 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3692 mpi_request.DevHandle = handle; 3909 mpi_request.DevHandle = handle;
3693 mpi_request.VF_ID = 0; 3910 mpi_request.VF_ID = 0; /* TODO */
3911 mpi_request.VP_ID = 0;
3694 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, 3912 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
3695 &mpi_request)) != 0) { 3913 &mpi_request)) != 0) {
3696 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 3914 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -3800,15 +4018,12 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3800/** 4018/**
3801 * _scsih_sas_topology_change_event - handle topology changes 4019 * _scsih_sas_topology_change_event - handle topology changes
3802 * @ioc: per adapter object 4020 * @ioc: per adapter object
3803 * @VF_ID: 4021 * @fw_event: The fw_event_work object
3804 * @event_data: event data payload
3805 * fw_event:
3806 * Context: user. 4022 * Context: user.
3807 * 4023 *
3808 */ 4024 */
3809static void 4025static void
3810_scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4026_scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
3811 Mpi2EventDataSasTopologyChangeList_t *event_data,
3812 struct fw_event_work *fw_event) 4027 struct fw_event_work *fw_event)
3813{ 4028{
3814 int i; 4029 int i;
@@ -3818,6 +4033,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3818 struct _sas_node *sas_expander; 4033 struct _sas_node *sas_expander;
3819 unsigned long flags; 4034 unsigned long flags;
3820 u8 link_rate_; 4035 u8 link_rate_;
4036 Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
3821 4037
3822#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4038#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3823 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4039 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -3851,15 +4067,16 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3851 } 4067 }
3852 if (ioc->shost_recovery) 4068 if (ioc->shost_recovery)
3853 return; 4069 return;
3854 if (event_data->PHY[i].PhyStatus & 4070 phy_number = event_data->StartPhyNum + i;
3855 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) 4071 reason_code = event_data->PHY[i].PhyStatus &
4072 MPI2_EVENT_SAS_TOPO_RC_MASK;
4073 if ((event_data->PHY[i].PhyStatus &
4074 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
4075 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
3856 continue; 4076 continue;
3857 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4077 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3858 if (!handle) 4078 if (!handle)
3859 continue; 4079 continue;
3860 phy_number = event_data->StartPhyNum + i;
3861 reason_code = event_data->PHY[i].PhyStatus &
3862 MPI2_EVENT_SAS_TOPO_RC_MASK;
3863 link_rate_ = event_data->PHY[i].LinkRate >> 4; 4080 link_rate_ = event_data->PHY[i].LinkRate >> 4;
3864 switch (reason_code) { 4081 switch (reason_code) {
3865 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 4082 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
@@ -3971,19 +4188,19 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3971/** 4188/**
3972 * _scsih_sas_device_status_change_event - handle device status change 4189 * _scsih_sas_device_status_change_event - handle device status change
3973 * @ioc: per adapter object 4190 * @ioc: per adapter object
3974 * @VF_ID: 4191 * @fw_event: The fw_event_work object
3975 * @event_data: event data payload
3976 * Context: user. 4192 * Context: user.
3977 * 4193 *
3978 * Return nothing. 4194 * Return nothing.
3979 */ 4195 */
3980static void 4196static void
3981_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4197_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
3982 Mpi2EventDataSasDeviceStatusChange_t *event_data) 4198 struct fw_event_work *fw_event)
3983{ 4199{
3984#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4200#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3985 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4201 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
3986 _scsih_sas_device_status_change_event_debug(ioc, event_data); 4202 _scsih_sas_device_status_change_event_debug(ioc,
4203 fw_event->event_data);
3987#endif 4204#endif
3988} 4205}
3989 4206
@@ -4026,34 +4243,33 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4026/** 4243/**
4027 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 4244 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
4028 * @ioc: per adapter object 4245 * @ioc: per adapter object
4029 * @VF_ID: 4246 * @fw_event: The fw_event_work object
4030 * @event_data: event data payload
4031 * Context: user. 4247 * Context: user.
4032 * 4248 *
4033 * Return nothing. 4249 * Return nothing.
4034 */ 4250 */
4035static void 4251static void
4036_scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc, 4252_scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
4037 u8 VF_ID, Mpi2EventDataSasEnclDevStatusChange_t *event_data) 4253 struct fw_event_work *fw_event)
4038{ 4254{
4039#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4255#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4040 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4256 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4041 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 4257 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
4042 event_data); 4258 fw_event->event_data);
4043#endif 4259#endif
4044} 4260}
4045 4261
4046/** 4262/**
4047 * _scsih_sas_broadcast_primative_event - handle broadcast events 4263 * _scsih_sas_broadcast_primative_event - handle broadcast events
4048 * @ioc: per adapter object 4264 * @ioc: per adapter object
4049 * @event_data: event data payload 4265 * @fw_event: The fw_event_work object
4050 * Context: user. 4266 * Context: user.
4051 * 4267 *
4052 * Return nothing. 4268 * Return nothing.
4053 */ 4269 */
4054static void 4270static void
4055_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4271_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4056 Mpi2EventDataSasBroadcastPrimitive_t *event_data) 4272 struct fw_event_work *fw_event)
4057{ 4273{
4058 struct scsi_cmnd *scmd; 4274 struct scsi_cmnd *scmd;
4059 u16 smid, handle; 4275 u16 smid, handle;
@@ -4062,11 +4278,12 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4062 u32 termination_count; 4278 u32 termination_count;
4063 u32 query_count; 4279 u32 query_count;
4064 Mpi2SCSITaskManagementReply_t *mpi_reply; 4280 Mpi2SCSITaskManagementReply_t *mpi_reply;
4065 4281#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4282 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
4283#endif
4066 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " 4284 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: "
4067 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 4285 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
4068 event_data->PortWidth)); 4286 event_data->PortWidth));
4069
4070 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 4287 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
4071 __func__)); 4288 __func__));
4072 4289
@@ -4074,7 +4291,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4074 termination_count = 0; 4291 termination_count = 0;
4075 query_count = 0; 4292 query_count = 0;
4076 mpi_reply = ioc->tm_cmds.reply; 4293 mpi_reply = ioc->tm_cmds.reply;
4077 for (smid = 1; smid <= ioc->request_depth; smid++) { 4294 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4078 scmd = _scsih_scsi_lookup_get(ioc, smid); 4295 scmd = _scsih_scsi_lookup_get(ioc, smid);
4079 if (!scmd) 4296 if (!scmd)
4080 continue; 4297 continue;
@@ -4121,23 +4338,25 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4121/** 4338/**
4122 * _scsih_sas_discovery_event - handle discovery events 4339 * _scsih_sas_discovery_event - handle discovery events
4123 * @ioc: per adapter object 4340 * @ioc: per adapter object
4124 * @event_data: event data payload 4341 * @fw_event: The fw_event_work object
4125 * Context: user. 4342 * Context: user.
4126 * 4343 *
4127 * Return nothing. 4344 * Return nothing.
4128 */ 4345 */
4129static void 4346static void
4130_scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4347_scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
4131 Mpi2EventDataSasDiscovery_t *event_data) 4348 struct fw_event_work *fw_event)
4132{ 4349{
4350 Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
4351
4133#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4352#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4134 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 4353 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
4135 printk(MPT2SAS_DEBUG_FMT "discovery event: (%s)", ioc->name, 4354 printk(MPT2SAS_DEBUG_FMT "discovery event: (%s)", ioc->name,
4136 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 4355 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
4137 "start" : "stop"); 4356 "start" : "stop");
4138 if (event_data->DiscoveryStatus) 4357 if (event_data->DiscoveryStatus)
4139 printk(MPT2SAS_DEBUG_FMT ", discovery_status(0x%08x)", 4358 printk("discovery_status(0x%08x)",
4140 ioc->name, le32_to_cpu(event_data->DiscoveryStatus)); 4359 le32_to_cpu(event_data->DiscoveryStatus));
4141 printk("\n"); 4360 printk("\n");
4142 } 4361 }
4143#endif 4362#endif
@@ -4488,19 +4707,19 @@ _scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4488/** 4707/**
4489 * _scsih_sas_ir_config_change_event - handle ir configuration change events 4708 * _scsih_sas_ir_config_change_event - handle ir configuration change events
4490 * @ioc: per adapter object 4709 * @ioc: per adapter object
4491 * @VF_ID: 4710 * @fw_event: The fw_event_work object
4492 * @event_data: event data payload
4493 * Context: user. 4711 * Context: user.
4494 * 4712 *
4495 * Return nothing. 4713 * Return nothing.
4496 */ 4714 */
4497static void 4715static void
4498_scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4716_scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
4499 Mpi2EventDataIrConfigChangeList_t *event_data) 4717 struct fw_event_work *fw_event)
4500{ 4718{
4501 Mpi2EventIrConfigElement_t *element; 4719 Mpi2EventIrConfigElement_t *element;
4502 int i; 4720 int i;
4503 u8 foreign_config; 4721 u8 foreign_config;
4722 Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
4504 4723
4505#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4724#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4506 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4725 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -4543,14 +4762,14 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4543/** 4762/**
4544 * _scsih_sas_ir_volume_event - IR volume event 4763 * _scsih_sas_ir_volume_event - IR volume event
4545 * @ioc: per adapter object 4764 * @ioc: per adapter object
4546 * @event_data: event data payload 4765 * @fw_event: The fw_event_work object
4547 * Context: user. 4766 * Context: user.
4548 * 4767 *
4549 * Return nothing. 4768 * Return nothing.
4550 */ 4769 */
4551static void 4770static void
4552_scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4771_scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
4553 Mpi2EventDataIrVolume_t *event_data) 4772 struct fw_event_work *fw_event)
4554{ 4773{
4555 u64 wwid; 4774 u64 wwid;
4556 unsigned long flags; 4775 unsigned long flags;
@@ -4559,6 +4778,7 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4559 u32 state; 4778 u32 state;
4560 int rc; 4779 int rc;
4561 struct MPT2SAS_TARGET *sas_target_priv_data; 4780 struct MPT2SAS_TARGET *sas_target_priv_data;
4781 Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
4562 4782
4563 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4783 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4564 return; 4784 return;
@@ -4628,14 +4848,14 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4628/** 4848/**
4629 * _scsih_sas_ir_physical_disk_event - PD event 4849 * _scsih_sas_ir_physical_disk_event - PD event
4630 * @ioc: per adapter object 4850 * @ioc: per adapter object
4631 * @event_data: event data payload 4851 * @fw_event: The fw_event_work object
4632 * Context: user. 4852 * Context: user.
4633 * 4853 *
4634 * Return nothing. 4854 * Return nothing.
4635 */ 4855 */
4636static void 4856static void
4637_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4857_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
4638 Mpi2EventDataIrPhysicalDisk_t *event_data) 4858 struct fw_event_work *fw_event)
4639{ 4859{
4640 u16 handle; 4860 u16 handle;
4641 u32 state; 4861 u32 state;
@@ -4644,6 +4864,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4644 Mpi2ConfigReply_t mpi_reply; 4864 Mpi2ConfigReply_t mpi_reply;
4645 Mpi2SasDevicePage0_t sas_device_pg0; 4865 Mpi2SasDevicePage0_t sas_device_pg0;
4646 u32 ioc_status; 4866 u32 ioc_status;
4867 Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
4647 4868
4648 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 4869 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
4649 return; 4870 return;
@@ -4743,33 +4964,33 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc,
4743/** 4964/**
4744 * _scsih_sas_ir_operation_status_event - handle RAID operation events 4965 * _scsih_sas_ir_operation_status_event - handle RAID operation events
4745 * @ioc: per adapter object 4966 * @ioc: per adapter object
4746 * @VF_ID: 4967 * @fw_event: The fw_event_work object
4747 * @event_data: event data payload
4748 * Context: user. 4968 * Context: user.
4749 * 4969 *
4750 * Return nothing. 4970 * Return nothing.
4751 */ 4971 */
4752static void 4972static void
4753_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4973_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
4754 Mpi2EventDataIrOperationStatus_t *event_data) 4974 struct fw_event_work *fw_event)
4755{ 4975{
4756#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4976#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4757 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4977 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4758 _scsih_sas_ir_operation_status_event_debug(ioc, event_data); 4978 _scsih_sas_ir_operation_status_event_debug(ioc,
4979 fw_event->event_data);
4759#endif 4980#endif
4760} 4981}
4761 4982
4762/** 4983/**
4763 * _scsih_task_set_full - handle task set full 4984 * _scsih_task_set_full - handle task set full
4764 * @ioc: per adapter object 4985 * @ioc: per adapter object
4765 * @event_data: event data payload 4986 * @fw_event: The fw_event_work object
4766 * Context: user. 4987 * Context: user.
4767 * 4988 *
4768 * Throttle back qdepth. 4989 * Throttle back qdepth.
4769 */ 4990 */
4770static void 4991static void
4771_scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, 4992_scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
4772 Mpi2EventDataTaskSetFull_t *event_data) 4993 *fw_event)
4773{ 4994{
4774 unsigned long flags; 4995 unsigned long flags;
4775 struct _sas_device *sas_device; 4996 struct _sas_device *sas_device;
@@ -4780,6 +5001,7 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4780 u16 handle; 5001 u16 handle;
4781 int id, channel; 5002 int id, channel;
4782 u64 sas_address; 5003 u64 sas_address;
5004 Mpi2EventDataTaskSetFull_t *event_data = fw_event->event_data;
4783 5005
4784 current_depth = le16_to_cpu(event_data->CurrentDepth); 5006 current_depth = le16_to_cpu(event_data->CurrentDepth);
4785 handle = le16_to_cpu(event_data->DevHandle); 5007 handle = le16_to_cpu(event_data->DevHandle);
@@ -4868,6 +5090,10 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
4868 if (sas_device->sas_address == sas_address && 5090 if (sas_device->sas_address == sas_address &&
4869 sas_device->slot == slot && sas_device->starget) { 5091 sas_device->slot == slot && sas_device->starget) {
4870 sas_device->responding = 1; 5092 sas_device->responding = 1;
5093 sas_device->state = 0;
5094 starget = sas_device->starget;
5095 sas_target_priv_data = starget->hostdata;
5096 sas_target_priv_data->tm_busy = 0;
4871 starget_printk(KERN_INFO, sas_device->starget, 5097 starget_printk(KERN_INFO, sas_device->starget,
4872 "handle(0x%04x), sas_addr(0x%016llx), enclosure " 5098 "handle(0x%04x), sas_addr(0x%016llx), enclosure "
4873 "logical id(0x%016llx), slot(%d)\n", handle, 5099 "logical id(0x%016llx), slot(%d)\n", handle,
@@ -4880,8 +5106,6 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
4880 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 5106 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
4881 sas_device->handle); 5107 sas_device->handle);
4882 sas_device->handle = handle; 5108 sas_device->handle = handle;
4883 starget = sas_device->starget;
4884 sas_target_priv_data = starget->hostdata;
4885 sas_target_priv_data->handle = handle; 5109 sas_target_priv_data->handle = handle;
4886 goto out; 5110 goto out;
4887 } 5111 }
@@ -5227,44 +5451,38 @@ _firmware_event_work(struct work_struct *work)
5227 5451
5228 switch (fw_event->event) { 5452 switch (fw_event->event) {
5229 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 5453 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
5230 _scsih_sas_topology_change_event(ioc, fw_event->VF_ID, 5454 _scsih_sas_topology_change_event(ioc, fw_event);
5231 fw_event->event_data, fw_event);
5232 break; 5455 break;
5233 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 5456 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
5234 _scsih_sas_device_status_change_event(ioc, fw_event->VF_ID, 5457 _scsih_sas_device_status_change_event(ioc,
5235 fw_event->event_data); 5458 fw_event);
5236 break; 5459 break;
5237 case MPI2_EVENT_SAS_DISCOVERY: 5460 case MPI2_EVENT_SAS_DISCOVERY:
5238 _scsih_sas_discovery_event(ioc, fw_event->VF_ID, 5461 _scsih_sas_discovery_event(ioc,
5239 fw_event->event_data); 5462 fw_event);
5240 break; 5463 break;
5241 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 5464 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
5242 _scsih_sas_broadcast_primative_event(ioc, fw_event->VF_ID, 5465 _scsih_sas_broadcast_primative_event(ioc,
5243 fw_event->event_data); 5466 fw_event);
5244 break; 5467 break;
5245 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 5468 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
5246 _scsih_sas_enclosure_dev_status_change_event(ioc, 5469 _scsih_sas_enclosure_dev_status_change_event(ioc,
5247 fw_event->VF_ID, fw_event->event_data); 5470 fw_event);
5248 break; 5471 break;
5249 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 5472 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
5250 _scsih_sas_ir_config_change_event(ioc, fw_event->VF_ID, 5473 _scsih_sas_ir_config_change_event(ioc, fw_event);
5251 fw_event->event_data);
5252 break; 5474 break;
5253 case MPI2_EVENT_IR_VOLUME: 5475 case MPI2_EVENT_IR_VOLUME:
5254 _scsih_sas_ir_volume_event(ioc, fw_event->VF_ID, 5476 _scsih_sas_ir_volume_event(ioc, fw_event);
5255 fw_event->event_data);
5256 break; 5477 break;
5257 case MPI2_EVENT_IR_PHYSICAL_DISK: 5478 case MPI2_EVENT_IR_PHYSICAL_DISK:
5258 _scsih_sas_ir_physical_disk_event(ioc, fw_event->VF_ID, 5479 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
5259 fw_event->event_data);
5260 break; 5480 break;
5261 case MPI2_EVENT_IR_OPERATION_STATUS: 5481 case MPI2_EVENT_IR_OPERATION_STATUS:
5262 _scsih_sas_ir_operation_status_event(ioc, fw_event->VF_ID, 5482 _scsih_sas_ir_operation_status_event(ioc, fw_event);
5263 fw_event->event_data);
5264 break; 5483 break;
5265 case MPI2_EVENT_TASK_SET_FULL: 5484 case MPI2_EVENT_TASK_SET_FULL:
5266 _scsih_task_set_full(ioc, fw_event->VF_ID, 5485 _scsih_task_set_full(ioc, fw_event);
5267 fw_event->event_data);
5268 break; 5486 break;
5269 } 5487 }
5270 _scsih_fw_event_free(ioc, fw_event); 5488 _scsih_fw_event_free(ioc, fw_event);
@@ -5273,17 +5491,19 @@ _firmware_event_work(struct work_struct *work)
5273/** 5491/**
5274 * mpt2sas_scsih_event_callback - firmware event handler (called at ISR time) 5492 * mpt2sas_scsih_event_callback - firmware event handler (called at ISR time)
5275 * @ioc: per adapter object 5493 * @ioc: per adapter object
5276 * @VF_ID: virtual function id 5494 * @msix_index: MSIX table index supplied by the OS
5277 * @reply: reply message frame(lower 32bit addr) 5495 * @reply: reply message frame(lower 32bit addr)
5278 * Context: interrupt. 5496 * Context: interrupt.
5279 * 5497 *
5280 * This function merely adds a new work task into ioc->firmware_event_thread. 5498 * This function merely adds a new work task into ioc->firmware_event_thread.
5281 * The tasks are worked from _firmware_event_work in user context. 5499 * The tasks are worked from _firmware_event_work in user context.
5282 * 5500 *
5283 * Return nothing. 5501 * Return 1 meaning mf should be freed from _base_interrupt
5502 * 0 means the mf is freed from this function.
5284 */ 5503 */
5285void 5504u8
5286mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply) 5505mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
5506 u32 reply)
5287{ 5507{
5288 struct fw_event_work *fw_event; 5508 struct fw_event_work *fw_event;
5289 Mpi2EventNotificationReply_t *mpi_reply; 5509 Mpi2EventNotificationReply_t *mpi_reply;
@@ -5294,11 +5514,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
5294 spin_lock_irqsave(&ioc->fw_event_lock, flags); 5514 spin_lock_irqsave(&ioc->fw_event_lock, flags);
5295 if (ioc->fw_events_off || ioc->remove_host) { 5515 if (ioc->fw_events_off || ioc->remove_host) {
5296 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 5516 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5297 return; 5517 return 1;
5298 } 5518 }
5299 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 5519 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5300 5520
5301 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 5521 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
5302 event = le16_to_cpu(mpi_reply->Event); 5522 event = le16_to_cpu(mpi_reply->Event);
5303 5523
5304 switch (event) { 5524 switch (event) {
@@ -5312,7 +5532,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
5312 if (baen_data->Primitive != 5532 if (baen_data->Primitive !=
5313 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT || 5533 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT ||
5314 ioc->broadcast_aen_busy) 5534 ioc->broadcast_aen_busy)
5315 return; 5535 return 1;
5316 ioc->broadcast_aen_busy = 1; 5536 ioc->broadcast_aen_busy = 1;
5317 break; 5537 break;
5318 } 5538 }
@@ -5334,14 +5554,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
5334 break; 5554 break;
5335 5555
5336 default: /* ignore the rest */ 5556 default: /* ignore the rest */
5337 return; 5557 return 1;
5338 } 5558 }
5339 5559
5340 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 5560 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
5341 if (!fw_event) { 5561 if (!fw_event) {
5342 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 5562 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5343 ioc->name, __FILE__, __LINE__, __func__); 5563 ioc->name, __FILE__, __LINE__, __func__);
5344 return; 5564 return 1;
5345 } 5565 }
5346 fw_event->event_data = 5566 fw_event->event_data =
5347 kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC); 5567 kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC);
@@ -5349,15 +5569,17 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
5349 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 5569 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5350 ioc->name, __FILE__, __LINE__, __func__); 5570 ioc->name, __FILE__, __LINE__, __func__);
5351 kfree(fw_event); 5571 kfree(fw_event);
5352 return; 5572 return 1;
5353 } 5573 }
5354 5574
5355 memcpy(fw_event->event_data, mpi_reply->EventData, 5575 memcpy(fw_event->event_data, mpi_reply->EventData,
5356 mpi_reply->EventDataLength*4); 5576 mpi_reply->EventDataLength*4);
5357 fw_event->ioc = ioc; 5577 fw_event->ioc = ioc;
5358 fw_event->VF_ID = VF_ID; 5578 fw_event->VF_ID = mpi_reply->VF_ID;
5579 fw_event->VP_ID = mpi_reply->VP_ID;
5359 fw_event->event = event; 5580 fw_event->event = event;
5360 _scsih_fw_event_add(ioc, fw_event); 5581 _scsih_fw_event_add(ioc, fw_event);
5582 return 1;
5361} 5583}
5362 5584
5363/* shost template */ 5585/* shost template */
@@ -5617,7 +5839,7 @@ _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
5617} 5839}
5618 5840
5619/** 5841/**
5620 * _scsih_probe_sas - reporting raid volumes to sas transport 5842 * _scsih_probe_sas - reporting sas devices to sas transport
5621 * @ioc: per adapter object 5843 * @ioc: per adapter object
5622 * 5844 *
5623 * Called during initial loading of the driver. 5845 * Called during initial loading of the driver.
@@ -5714,6 +5936,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5714 ioc->base_cb_idx = base_cb_idx; 5936 ioc->base_cb_idx = base_cb_idx;
5715 ioc->transport_cb_idx = transport_cb_idx; 5937 ioc->transport_cb_idx = transport_cb_idx;
5716 ioc->config_cb_idx = config_cb_idx; 5938 ioc->config_cb_idx = config_cb_idx;
5939 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
5940 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
5717 ioc->logging_level = logging_level; 5941 ioc->logging_level = logging_level;
5718 /* misc semaphores and spin locks */ 5942 /* misc semaphores and spin locks */
5719 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 5943 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -5729,6 +5953,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5729 INIT_LIST_HEAD(&ioc->fw_event_list); 5953 INIT_LIST_HEAD(&ioc->fw_event_list);
5730 INIT_LIST_HEAD(&ioc->raid_device_list); 5954 INIT_LIST_HEAD(&ioc->raid_device_list);
5731 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 5955 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
5956 INIT_LIST_HEAD(&ioc->delayed_tr_list);
5732 5957
5733 /* init shost parameters */ 5958 /* init shost parameters */
5734 shost->max_cmd_len = 16; 5959 shost->max_cmd_len = 16;
@@ -5745,6 +5970,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5745 5970
5746 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 5971 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5747 | SHOST_DIF_TYPE3_PROTECTION); 5972 | SHOST_DIF_TYPE3_PROTECTION);
5973 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
5748 5974
5749 /* event thread */ 5975 /* event thread */
5750 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5976 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
@@ -5894,6 +6120,11 @@ _scsih_init(void)
5894 /* ctl module callback handler */ 6120 /* ctl module callback handler */
5895 ctl_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_ctl_done); 6121 ctl_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_ctl_done);
5896 6122
6123 tm_tr_cb_idx = mpt2sas_base_register_callback_handler(
6124 _scsih_tm_tr_complete);
6125 tm_sas_control_cb_idx = mpt2sas_base_register_callback_handler(
6126 _scsih_sas_control_complete);
6127
5897 mpt2sas_ctl_init(); 6128 mpt2sas_ctl_init();
5898 6129
5899 error = pci_register_driver(&scsih_driver); 6130 error = pci_register_driver(&scsih_driver);
@@ -5924,6 +6155,9 @@ _scsih_exit(void)
5924 mpt2sas_base_release_callback_handler(config_cb_idx); 6155 mpt2sas_base_release_callback_handler(config_cb_idx);
5925 mpt2sas_base_release_callback_handler(ctl_cb_idx); 6156 mpt2sas_base_release_callback_handler(ctl_cb_idx);
5926 6157
6158 mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
6159 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
6160
5927 mpt2sas_ctl_exit(); 6161 mpt2sas_ctl_exit();
5928} 6162}
5929 6163
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 742324a0a11e..eb98188c7f3f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2008 LSI Corporation 5 * Copyright (C) 2007-2009 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -212,25 +212,26 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
212 * mpt2sas_transport_done - internal transport layer callback handler. 212 * mpt2sas_transport_done - internal transport layer callback handler.
213 * @ioc: per adapter object 213 * @ioc: per adapter object
214 * @smid: system request message index 214 * @smid: system request message index
215 * @VF_ID: virtual function id 215 * @msix_index: MSIX table index supplied by the OS
216 * @reply: reply message frame(lower 32bit addr) 216 * @reply: reply message frame(lower 32bit addr)
217 * 217 *
218 * Callback handler when sending internal generated transport cmds. 218 * Callback handler when sending internal generated transport cmds.
219 * The callback index passed is `ioc->transport_cb_idx` 219 * The callback index passed is `ioc->transport_cb_idx`
220 * 220 *
221 * Return nothing. 221 * Return 1 meaning mf should be freed from _base_interrupt
222 * 0 means the mf is freed from this function.
222 */ 223 */
223void 224u8
224mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, 225mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
225 u32 reply) 226 u32 reply)
226{ 227{
227 MPI2DefaultReply_t *mpi_reply; 228 MPI2DefaultReply_t *mpi_reply;
228 229
229 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 230 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
230 if (ioc->transport_cmds.status == MPT2_CMD_NOT_USED) 231 if (ioc->transport_cmds.status == MPT2_CMD_NOT_USED)
231 return; 232 return 1;
232 if (ioc->transport_cmds.smid != smid) 233 if (ioc->transport_cmds.smid != smid)
233 return; 234 return 1;
234 ioc->transport_cmds.status |= MPT2_CMD_COMPLETE; 235 ioc->transport_cmds.status |= MPT2_CMD_COMPLETE;
235 if (mpi_reply) { 236 if (mpi_reply) {
236 memcpy(ioc->transport_cmds.reply, mpi_reply, 237 memcpy(ioc->transport_cmds.reply, mpi_reply,
@@ -239,6 +240,7 @@ mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
239 } 240 }
240 ioc->transport_cmds.status &= ~MPT2_CMD_PENDING; 241 ioc->transport_cmds.status &= ~MPT2_CMD_PENDING;
241 complete(&ioc->transport_cmds.done); 242 complete(&ioc->transport_cmds.done);
243 return 1;
242} 244}
243 245
244/* report manufacture request structure */ 246/* report manufacture request structure */
@@ -369,6 +371,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
369 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); 371 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
370 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 372 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
371 mpi_request->PhysicalPort = 0xFF; 373 mpi_request->PhysicalPort = 0xFF;
374 mpi_request->VF_ID = 0; /* TODO */
375 mpi_request->VP_ID = 0;
372 sas_address_le = (u64 *)&mpi_request->SASAddress; 376 sas_address_le = (u64 *)&mpi_request->SASAddress;
373 *sas_address_le = cpu_to_le64(sas_address); 377 *sas_address_le = cpu_to_le64(sas_address);
374 mpi_request->RequestDataLength = sizeof(struct rep_manu_request); 378 mpi_request->RequestDataLength = sizeof(struct rep_manu_request);
@@ -396,7 +400,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
396 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - " 400 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - "
397 "send to sas_addr(0x%016llx)\n", ioc->name, 401 "send to sas_addr(0x%016llx)\n", ioc->name,
398 (unsigned long long)sas_address)); 402 (unsigned long long)sas_address));
399 mpt2sas_base_put_smid_default(ioc, smid, 0 /* VF_ID */); 403 mpt2sas_base_put_smid_default(ioc, smid);
404 init_completion(&ioc->transport_cmds.done);
400 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 405 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
401 10*HZ); 406 10*HZ);
402 407
@@ -1106,6 +1111,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1106 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); 1111 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
1107 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 1112 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
1108 mpi_request->PhysicalPort = 0xFF; 1113 mpi_request->PhysicalPort = 0xFF;
1114 mpi_request->VF_ID = 0; /* TODO */
1115 mpi_request->VP_ID = 0;
1109 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1116 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1110 cpu_to_le64(rphy->identify.sas_address) : 1117 cpu_to_le64(rphy->identify.sas_address) :
1111 cpu_to_le64(ioc->sas_hba.sas_address); 1118 cpu_to_le64(ioc->sas_hba.sas_address);
@@ -1147,7 +1154,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1147 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1154 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
1148 "sending smp request\n", ioc->name, __func__)); 1155 "sending smp request\n", ioc->name, __func__));
1149 1156
1150 mpt2sas_base_put_smid_default(ioc, smid, 0 /* VF_ID */); 1157 mpt2sas_base_put_smid_default(ioc, smid);
1158 init_completion(&ioc->transport_cmds.done);
1151 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1159 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1152 10*HZ); 1160 10*HZ);
1153 1161
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index f8cb9defb961..1849da1f030d 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -25,6 +25,8 @@
25#ifndef _MV_DEFS_H_ 25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_ 26#define _MV_DEFS_H_
27 27
28#define PCI_DEVICE_ID_ARECA_1300 0x1300
29#define PCI_DEVICE_ID_ARECA_1320 0x1320
28 30
29enum chip_flavors { 31enum chip_flavors {
30 chip_6320, 32 chip_6320,
@@ -32,6 +34,8 @@ enum chip_flavors {
32 chip_6485, 34 chip_6485,
33 chip_9480, 35 chip_9480,
34 chip_9180, 36 chip_9180,
37 chip_1300,
38 chip_1320
35}; 39};
36 40
37/* driver compile-time configuration */ 41/* driver compile-time configuration */
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8646a19f999d..c790d45876c4 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -32,6 +32,8 @@ static const struct mvs_chip_info mvs_chips[] = {
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
36 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35}; 37};
36 38
37#define SOC_SAS_NUM 2 39#define SOC_SAS_NUM 2
@@ -653,6 +655,8 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, 655 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, 656 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
656 660
657 { } /* terminate list */ 661 { } /* terminate list */
658}; 662};
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4302f06e4ec9..0a97bc9074bb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -46,6 +46,7 @@
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <scsi/scsi.h> 47#include <scsi/scsi.h>
48#include <scsi/scsi_host.h> 48#include <scsi/scsi_host.h>
49#include <scsi/scsi_device.h>
49#include <scsi/scsi_tcq.h> 50#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_eh.h> 51#include <scsi/scsi_eh.h>
51#include <scsi/scsi_cmnd.h> 52#include <scsi/scsi_cmnd.h>
@@ -684,7 +685,7 @@ static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
684 struct pmcraid_instance *pinstance = cmd->drv_inst; 685 struct pmcraid_instance *pinstance = cmd->drv_inst;
685 unsigned long lock_flags; 686 unsigned long lock_flags;
686 687
687 dev_err(&pinstance->pdev->dev, 688 dev_info(&pinstance->pdev->dev,
688 "Adapter being reset due to command timeout.\n"); 689 "Adapter being reset due to command timeout.\n");
689 690
690 /* Command timeouts result in hard reset sequence. The command that got 691 /* Command timeouts result in hard reset sequence. The command that got
@@ -815,8 +816,9 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
815 816
816 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) { 817 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
817 scsi_cmd->result |= (DID_ERROR << 16); 818 scsi_cmd->result |= (DID_ERROR << 16);
818 pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n", 819 scmd_printk(KERN_INFO, scsi_cmd,
819 cmd->ioa_cb->ioarcb.cdb[0], ioasc); 820 "command CDB[0] = %x failed with IOASC: 0x%08X\n",
821 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
820 } 822 }
821 823
822 /* if we had allocated sense buffers for request sense, copy the sense 824 /* if we had allocated sense buffers for request sense, copy the sense
@@ -1069,7 +1071,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
1069 1071
1070 ioarcb->data_transfer_length = cpu_to_le32(rcb_size); 1072 ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1071 1073
1072 ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST); 1074 ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
1073 ioadl[0].data_len = cpu_to_le32(rcb_size); 1075 ioadl[0].data_len = cpu_to_le32(rcb_size);
1074 ioadl[0].address = cpu_to_le32(dma); 1076 ioadl[0].address = cpu_to_le32(dma);
1075 1077
@@ -1541,13 +1543,13 @@ static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1541 1543
1542 if (pinstance->ldn.hcam->notification_lost == 1544 if (pinstance->ldn.hcam->notification_lost ==
1543 HOSTRCB_NOTIFICATIONS_LOST) 1545 HOSTRCB_NOTIFICATIONS_LOST)
1544 dev_err(&pinstance->pdev->dev, "Error notifications lost\n"); 1546 dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
1545 1547
1546 ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc); 1548 ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1547 1549
1548 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || 1550 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1549 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) { 1551 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
1550 dev_err(&pinstance->pdev->dev, 1552 dev_info(&pinstance->pdev->dev,
1551 "UnitAttention due to IOA Bus Reset\n"); 1553 "UnitAttention due to IOA Bus Reset\n");
1552 scsi_report_bus_reset( 1554 scsi_report_bus_reset(
1553 pinstance->host, 1555 pinstance->host,
@@ -1584,7 +1586,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1584 atomic_read(&pinstance->ccn.ignore) == 1) { 1586 atomic_read(&pinstance->ccn.ignore) == 1) {
1585 return; 1587 return;
1586 } else if (ioasc) { 1588 } else if (ioasc) {
1587 dev_err(&pinstance->pdev->dev, 1589 dev_info(&pinstance->pdev->dev,
1588 "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc); 1590 "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1589 spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 1591 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1590 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); 1592 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
@@ -1634,7 +1636,7 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1634 return; 1636 return;
1635 } 1637 }
1636 } else { 1638 } else {
1637 dev_err(&pinstance->pdev->dev, 1639 dev_info(&pinstance->pdev->dev,
1638 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); 1640 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1639 } 1641 }
1640 /* send netlink message for HCAM notification if enabled */ 1642 /* send netlink message for HCAM notification if enabled */
@@ -1822,7 +1824,6 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
1822 scsi_dma_unmap(scsi_cmd); 1824 scsi_dma_unmap(scsi_cmd);
1823 pmcraid_return_cmd(cmd); 1825 pmcraid_return_cmd(cmd);
1824 1826
1825
1826 pmcraid_info("failing(%d) CDB[0] = %x result: %x\n", 1827 pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
1827 le32_to_cpu(resp) >> 2, 1828 le32_to_cpu(resp) >> 2,
1828 cmd->ioa_cb->ioarcb.cdb[0], 1829 cmd->ioa_cb->ioarcb.cdb[0],
@@ -2250,7 +2251,7 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2250 2251
2251 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma); 2252 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2252 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); 2253 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2253 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 2254 ioadl->flags = IOADL_FLAGS_LAST_DESC;
2254 2255
2255 /* request sense might be called as part of error response processing 2256 /* request sense might be called as part of error response processing
2256 * which runs in tasklets context. It is possible that mid-layer might 2257 * which runs in tasklets context. It is possible that mid-layer might
@@ -2514,7 +2515,8 @@ static int pmcraid_reset_device(
2514 res = scsi_cmd->device->hostdata; 2515 res = scsi_cmd->device->hostdata;
2515 2516
2516 if (!res) { 2517 if (!res) {
2517 pmcraid_err("reset_device: NULL resource pointer\n"); 2518 sdev_printk(KERN_ERR, scsi_cmd->device,
2519 "reset_device: NULL resource pointer\n");
2518 return FAILED; 2520 return FAILED;
2519 } 2521 }
2520 2522
@@ -2752,8 +2754,8 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2752 pinstance = 2754 pinstance =
2753 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; 2755 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2754 2756
2755 dev_err(&pinstance->pdev->dev, 2757 scmd_printk(KERN_INFO, scsi_cmd,
2756 "I/O command timed out, aborting it.\n"); 2758 "I/O command timed out, aborting it.\n");
2757 2759
2758 res = scsi_cmd->device->hostdata; 2760 res = scsi_cmd->device->hostdata;
2759 2761
@@ -2824,7 +2826,8 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2824 */ 2826 */
2825static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd) 2827static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2826{ 2828{
2827 pmcraid_err("Doing device reset due to an I/O command timeout.\n"); 2829 scmd_printk(KERN_INFO, scmd,
2830 "resetting device due to an I/O command timeout.\n");
2828 return pmcraid_reset_device(scmd, 2831 return pmcraid_reset_device(scmd,
2829 PMCRAID_INTERNAL_TIMEOUT, 2832 PMCRAID_INTERNAL_TIMEOUT,
2830 RESET_DEVICE_LUN); 2833 RESET_DEVICE_LUN);
@@ -2832,7 +2835,8 @@ static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2832 2835
2833static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd) 2836static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
2834{ 2837{
2835 pmcraid_err("Doing bus reset due to an I/O command timeout.\n"); 2838 scmd_printk(KERN_INFO, scmd,
2839 "Doing bus reset due to an I/O command timeout.\n");
2836 return pmcraid_reset_device(scmd, 2840 return pmcraid_reset_device(scmd,
2837 PMCRAID_RESET_BUS_TIMEOUT, 2841 PMCRAID_RESET_BUS_TIMEOUT,
2838 RESET_DEVICE_BUS); 2842 RESET_DEVICE_BUS);
@@ -2840,7 +2844,8 @@ static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
2840 2844
2841static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd) 2845static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
2842{ 2846{
2843 pmcraid_err("Doing target reset due to an I/O command timeout.\n"); 2847 scmd_printk(KERN_INFO, scmd,
2848 "Doing target reset due to an I/O command timeout.\n");
2844 return pmcraid_reset_device(scmd, 2849 return pmcraid_reset_device(scmd,
2845 PMCRAID_INTERNAL_TIMEOUT, 2850 PMCRAID_INTERNAL_TIMEOUT,
2846 RESET_DEVICE_TARGET); 2851 RESET_DEVICE_TARGET);
@@ -2988,11 +2993,11 @@ static int pmcraid_build_ioadl(
2988 nseg = scsi_dma_map(scsi_cmd); 2993 nseg = scsi_dma_map(scsi_cmd);
2989 2994
2990 if (nseg < 0) { 2995 if (nseg < 0) {
2991 dev_err(&pinstance->pdev->dev, "scsi_map_dma failed!\n"); 2996 scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
2992 return -1; 2997 return -1;
2993 } else if (nseg > PMCRAID_MAX_IOADLS) { 2998 } else if (nseg > PMCRAID_MAX_IOADLS) {
2994 scsi_dma_unmap(scsi_cmd); 2999 scsi_dma_unmap(scsi_cmd);
2995 dev_err(&pinstance->pdev->dev, 3000 scmd_printk(KERN_ERR, scsi_cmd,
2996 "sg count is (%d) more than allowed!\n", nseg); 3001 "sg count is (%d) more than allowed!\n", nseg);
2997 return -1; 3002 return -1;
2998 } 3003 }
@@ -3012,7 +3017,7 @@ static int pmcraid_build_ioadl(
3012 ioadl[i].flags = 0; 3017 ioadl[i].flags = 0;
3013 } 3018 }
3014 /* setup last descriptor */ 3019 /* setup last descriptor */
3015 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 3020 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3016 3021
3017 return 0; 3022 return 0;
3018} 3023}
@@ -3382,7 +3387,7 @@ static int pmcraid_build_passthrough_ioadls(
3382 } 3387 }
3383 3388
3384 /* setup the last descriptor */ 3389 /* setup the last descriptor */
3385 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 3390 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3386 3391
3387 return 0; 3392 return 0;
3388} 3393}
@@ -5040,7 +5045,7 @@ static int pmcraid_resume(struct pci_dev *pdev)
5040 rc = pci_enable_device(pdev); 5045 rc = pci_enable_device(pdev);
5041 5046
5042 if (rc) { 5047 if (rc) {
5043 pmcraid_err("pmcraid: Enable device failed\n"); 5048 dev_err(&pdev->dev, "resume: Enable device failed\n");
5044 return rc; 5049 return rc;
5045 } 5050 }
5046 5051
@@ -5054,7 +5059,7 @@ static int pmcraid_resume(struct pci_dev *pdev)
5054 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5059 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5055 5060
5056 if (rc != 0) { 5061 if (rc != 0) {
5057 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 5062 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
5058 goto disable_device; 5063 goto disable_device;
5059 } 5064 }
5060 5065
@@ -5063,7 +5068,8 @@ static int pmcraid_resume(struct pci_dev *pdev)
5063 rc = pmcraid_register_interrupt_handler(pinstance); 5068 rc = pmcraid_register_interrupt_handler(pinstance);
5064 5069
5065 if (rc) { 5070 if (rc) {
5066 pmcraid_err("resume: couldn't register interrupt handlers\n"); 5071 dev_err(&pdev->dev,
5072 "resume: couldn't register interrupt handlers\n");
5067 rc = -ENODEV; 5073 rc = -ENODEV;
5068 goto release_host; 5074 goto release_host;
5069 } 5075 }
@@ -5080,7 +5086,7 @@ static int pmcraid_resume(struct pci_dev *pdev)
5080 * state. 5086 * state.
5081 */ 5087 */
5082 if (pmcraid_reset_bringup(pinstance)) { 5088 if (pmcraid_reset_bringup(pinstance)) {
5083 pmcraid_err("couldn't initialize IOA \n"); 5089 dev_err(&pdev->dev, "couldn't initialize IOA \n");
5084 rc = -ENODEV; 5090 rc = -ENODEV;
5085 goto release_tasklets; 5091 goto release_tasklets;
5086 } 5092 }
@@ -5187,7 +5193,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5187 LIST_HEAD(old_res); 5193 LIST_HEAD(old_res);
5188 5194
5189 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED) 5195 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5190 dev_err(&pinstance->pdev->dev, "Require microcode download\n"); 5196 pmcraid_err("IOA requires microcode download\n");
5191 5197
5192 /* resource list is protected by pinstance->resource_lock. 5198 /* resource list is protected by pinstance->resource_lock.
5193 * init_res_table can be called from probe (user-thread) or runtime 5199 * init_res_table can be called from probe (user-thread) or runtime
@@ -5224,8 +5230,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5224 if (!found) { 5230 if (!found) {
5225 5231
5226 if (list_empty(&pinstance->free_res_q)) { 5232 if (list_empty(&pinstance->free_res_q)) {
5227 dev_err(&pinstance->pdev->dev, 5233 pmcraid_err("Too many devices attached\n");
5228 "Too many devices attached\n");
5229 break; 5234 break;
5230 } 5235 }
5231 5236
@@ -5309,7 +5314,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5309 cpu_to_le32(sizeof(struct pmcraid_config_table)); 5314 cpu_to_le32(sizeof(struct pmcraid_config_table));
5310 5315
5311 ioadl = &(ioarcb->add_data.u.ioadl[0]); 5316 ioadl = &(ioarcb->add_data.u.ioadl[0]);
5312 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 5317 ioadl->flags = IOADL_FLAGS_LAST_DESC;
5313 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr); 5318 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5314 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table)); 5319 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5315 5320
@@ -5442,7 +5447,7 @@ static int __devinit pmcraid_probe(
5442 rc = pmcraid_register_interrupt_handler(pinstance); 5447 rc = pmcraid_register_interrupt_handler(pinstance);
5443 5448
5444 if (rc) { 5449 if (rc) {
5445 pmcraid_err("couldn't register interrupt handler\n"); 5450 dev_err(&pdev->dev, "couldn't register interrupt handler\n");
5446 goto out_scsi_host_put; 5451 goto out_scsi_host_put;
5447 } 5452 }
5448 5453
@@ -5466,7 +5471,7 @@ static int __devinit pmcraid_probe(
5466 */ 5471 */
5467 pmcraid_info("starting IOA initialization sequence\n"); 5472 pmcraid_info("starting IOA initialization sequence\n");
5468 if (pmcraid_reset_bringup(pinstance)) { 5473 if (pmcraid_reset_bringup(pinstance)) {
5469 pmcraid_err("couldn't initialize IOA \n"); 5474 dev_err(&pdev->dev, "couldn't initialize IOA \n");
5470 rc = 1; 5475 rc = 1;
5471 goto out_release_bufs; 5476 goto out_release_bufs;
5472 } 5477 }
@@ -5534,7 +5539,6 @@ static struct pci_driver pmcraid_driver = {
5534 .shutdown = pmcraid_shutdown 5539 .shutdown = pmcraid_shutdown
5535}; 5540};
5536 5541
5537
5538/** 5542/**
5539 * pmcraid_init - module load entry point 5543 * pmcraid_init - module load entry point
5540 */ 5544 */
@@ -5566,7 +5570,6 @@ static int __init pmcraid_init(void)
5566 goto out_unreg_chrdev; 5570 goto out_unreg_chrdev;
5567 } 5571 }
5568 5572
5569
5570 error = pmcraid_netlink_init(); 5573 error = pmcraid_netlink_init();
5571 5574
5572 if (error) 5575 if (error)
@@ -5584,6 +5587,7 @@ static int __init pmcraid_init(void)
5584 5587
5585out_unreg_chrdev: 5588out_unreg_chrdev:
5586 unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); 5589 unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
5590
5587out_init: 5591out_init:
5588 return error; 5592 return error;
5589} 5593}
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 614b3a764fed..3441b3f90827 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -26,7 +26,6 @@
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <linux/kref.h>
30#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
31#include <linux/cdev.h> 30#include <linux/cdev.h>
32#include <net/netlink.h> 31#include <net/netlink.h>
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 42b799abba57..e07b3617f019 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -568,7 +568,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
568 if (req == NULL) { 568 if (req == NULL) {
569 qla_printk(KERN_WARNING, ha, "could not allocate memory" 569 qla_printk(KERN_WARNING, ha, "could not allocate memory"
570 "for request que\n"); 570 "for request que\n");
571 goto que_failed; 571 goto failed;
572 } 572 }
573 573
574 req->length = REQUEST_ENTRY_CNT_24XX; 574 req->length = REQUEST_ENTRY_CNT_24XX;
@@ -632,6 +632,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
632 632
633que_failed: 633que_failed:
634 qla25xx_free_req_que(base_vha, req); 634 qla25xx_free_req_que(base_vha, req);
635failed:
635 return 0; 636 return 0;
636} 637}
637 638
@@ -659,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
659 if (rsp == NULL) { 660 if (rsp == NULL) {
660 qla_printk(KERN_WARNING, ha, "could not allocate memory for" 661 qla_printk(KERN_WARNING, ha, "could not allocate memory for"
661 " response que\n"); 662 " response que\n");
662 goto que_failed; 663 goto failed;
663 } 664 }
664 665
665 rsp->length = RESPONSE_ENTRY_CNT_MQ; 666 rsp->length = RESPONSE_ENTRY_CNT_MQ;
@@ -728,6 +729,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 729
729que_failed: 730que_failed:
730 qla25xx_free_rsp_que(base_vha, rsp); 731 qla25xx_free_rsp_que(base_vha, rsp);
732failed:
731 return 0; 733 return 0;
732} 734}
733 735
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b6e03074cb8f..dd098cad337b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -241,10 +241,7 @@ scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
241 */ 241 */
242struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) 242struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
243{ 243{
244 struct scsi_cmnd *cmd; 244 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
245 unsigned char *buf;
246
247 cmd = scsi_host_alloc_command(shost, gfp_mask);
248 245
249 if (unlikely(!cmd)) { 246 if (unlikely(!cmd)) {
250 unsigned long flags; 247 unsigned long flags;
@@ -258,9 +255,15 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
258 spin_unlock_irqrestore(&shost->free_list_lock, flags); 255 spin_unlock_irqrestore(&shost->free_list_lock, flags);
259 256
260 if (cmd) { 257 if (cmd) {
258 void *buf, *prot;
259
261 buf = cmd->sense_buffer; 260 buf = cmd->sense_buffer;
261 prot = cmd->prot_sdb;
262
262 memset(cmd, 0, sizeof(*cmd)); 263 memset(cmd, 0, sizeof(*cmd));
264
263 cmd->sense_buffer = buf; 265 cmd->sense_buffer = buf;
266 cmd->prot_sdb = prot;
264 } 267 }
265 } 268 }
266 269
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index fb9af207d61d..c4103bef41b5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -50,6 +50,7 @@
50#include <scsi/scsi_host.h> 50#include <scsi/scsi_host.h>
51#include <scsi/scsicam.h> 51#include <scsi/scsicam.h>
52#include <scsi/scsi_eh.h> 52#include <scsi/scsi_eh.h>
53#include <scsi/scsi_dbg.h>
53 54
54#include "sd.h" 55#include "sd.h"
55#include "scsi_logging.h" 56#include "scsi_logging.h"
@@ -64,6 +65,7 @@ static const char * scsi_debug_version_date = "20070104";
64#define PARAMETER_LIST_LENGTH_ERR 0x1a 65#define PARAMETER_LIST_LENGTH_ERR 0x1a
65#define INVALID_OPCODE 0x20 66#define INVALID_OPCODE 0x20
66#define ADDR_OUT_OF_RANGE 0x21 67#define ADDR_OUT_OF_RANGE 0x21
68#define INVALID_COMMAND_OPCODE 0x20
67#define INVALID_FIELD_IN_CDB 0x24 69#define INVALID_FIELD_IN_CDB 0x24
68#define INVALID_FIELD_IN_PARAM_LIST 0x26 70#define INVALID_FIELD_IN_PARAM_LIST 0x26
69#define POWERON_RESET 0x29 71#define POWERON_RESET 0x29
@@ -180,7 +182,7 @@ static int sdebug_sectors_per; /* sectors per cylinder */
180#define SDEBUG_SENSE_LEN 32 182#define SDEBUG_SENSE_LEN 32
181 183
182#define SCSI_DEBUG_CANQUEUE 255 184#define SCSI_DEBUG_CANQUEUE 255
183#define SCSI_DEBUG_MAX_CMD_LEN 16 185#define SCSI_DEBUG_MAX_CMD_LEN 32
184 186
185struct sdebug_dev_info { 187struct sdebug_dev_info {
186 struct list_head dev_list; 188 struct list_head dev_list;
@@ -296,9 +298,25 @@ static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
296} 298}
297 299
298static void get_data_transfer_info(unsigned char *cmd, 300static void get_data_transfer_info(unsigned char *cmd,
299 unsigned long long *lba, unsigned int *num) 301 unsigned long long *lba, unsigned int *num,
302 u32 *ei_lba)
300{ 303{
304 *ei_lba = 0;
305
301 switch (*cmd) { 306 switch (*cmd) {
307 case VARIABLE_LENGTH_CMD:
308 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
309 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
310 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
311 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
312
313 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
314 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
315
316 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
317 (u32)cmd[28] << 24;
318 break;
319
302 case WRITE_16: 320 case WRITE_16:
303 case READ_16: 321 case READ_16:
304 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | 322 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
@@ -1589,7 +1607,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
1589} 1607}
1590 1608
1591static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, 1609static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1592 unsigned int sectors) 1610 unsigned int sectors, u32 ei_lba)
1593{ 1611{
1594 unsigned int i, resid; 1612 unsigned int i, resid;
1595 struct scatterlist *psgl; 1613 struct scatterlist *psgl;
@@ -1636,13 +1654,23 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1636 return 0x01; 1654 return 0x01;
1637 } 1655 }
1638 1656
1639 if (scsi_debug_dif != SD_DIF_TYPE3_PROTECTION && 1657 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1640 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) { 1658 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1641 printk(KERN_ERR "%s: REF check failed on sector %lu\n", 1659 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1642 __func__, (unsigned long)sector); 1660 __func__, (unsigned long)sector);
1643 dif_errors++; 1661 dif_errors++;
1644 return 0x03; 1662 return 0x03;
1645 } 1663 }
1664
1665 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1666 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1667 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1668 __func__, (unsigned long)sector);
1669 dif_errors++;
1670 return 0x03;
1671 }
1672
1673 ei_lba++;
1646 } 1674 }
1647 1675
1648 resid = sectors * 8; /* Bytes of protection data to copy into sgl */ 1676 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
@@ -1670,7 +1698,8 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1670} 1698}
1671 1699
1672static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, 1700static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1673 unsigned int num, struct sdebug_dev_info *devip) 1701 unsigned int num, struct sdebug_dev_info *devip,
1702 u32 ei_lba)
1674{ 1703{
1675 unsigned long iflags; 1704 unsigned long iflags;
1676 int ret; 1705 int ret;
@@ -1699,7 +1728,7 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1699 1728
1700 /* DIX + T10 DIF */ 1729 /* DIX + T10 DIF */
1701 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 1730 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1702 int prot_ret = prot_verify_read(SCpnt, lba, num); 1731 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1703 1732
1704 if (prot_ret) { 1733 if (prot_ret) {
1705 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret); 1734 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
@@ -1735,7 +1764,7 @@ void dump_sector(unsigned char *buf, int len)
1735} 1764}
1736 1765
1737static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, 1766static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1738 unsigned int sectors) 1767 unsigned int sectors, u32 ei_lba)
1739{ 1768{
1740 int i, j, ret; 1769 int i, j, ret;
1741 struct sd_dif_tuple *sdt; 1770 struct sd_dif_tuple *sdt;
@@ -1749,11 +1778,6 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1749 1778
1750 sector = do_div(tmp_sec, sdebug_store_sectors); 1779 sector = do_div(tmp_sec, sdebug_store_sectors);
1751 1780
1752 if (((SCpnt->cmnd[1] >> 5) & 7) != 1) {
1753 printk(KERN_WARNING "scsi_debug: WRPROTECT != 1\n");
1754 return 0;
1755 }
1756
1757 BUG_ON(scsi_sg_count(SCpnt) == 0); 1781 BUG_ON(scsi_sg_count(SCpnt) == 0);
1758 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); 1782 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1759 1783
@@ -1808,7 +1832,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1808 goto out; 1832 goto out;
1809 } 1833 }
1810 1834
1811 if (scsi_debug_dif != SD_DIF_TYPE3_PROTECTION && 1835 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1812 be32_to_cpu(sdt->ref_tag) 1836 be32_to_cpu(sdt->ref_tag)
1813 != (start_sec & 0xffffffff)) { 1837 != (start_sec & 0xffffffff)) {
1814 printk(KERN_ERR 1838 printk(KERN_ERR
@@ -1819,6 +1843,16 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1819 goto out; 1843 goto out;
1820 } 1844 }
1821 1845
1846 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1847 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1848 printk(KERN_ERR
1849 "%s: REF check failed on sector %lu\n",
1850 __func__, (unsigned long)sector);
1851 ret = 0x03;
1852 dump_sector(daddr, scsi_debug_sector_size);
1853 goto out;
1854 }
1855
1822 /* Would be great to copy this in bigger 1856 /* Would be great to copy this in bigger
1823 * chunks. However, for the sake of 1857 * chunks. However, for the sake of
1824 * correctness we need to verify each sector 1858 * correctness we need to verify each sector
@@ -1832,6 +1866,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1832 sector = 0; /* Force wrap */ 1866 sector = 0; /* Force wrap */
1833 1867
1834 start_sec++; 1868 start_sec++;
1869 ei_lba++;
1835 daddr += scsi_debug_sector_size; 1870 daddr += scsi_debug_sector_size;
1836 ppage_offset += sizeof(struct sd_dif_tuple); 1871 ppage_offset += sizeof(struct sd_dif_tuple);
1837 } 1872 }
@@ -1853,7 +1888,8 @@ out:
1853} 1888}
1854 1889
1855static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 1890static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1856 unsigned int num, struct sdebug_dev_info *devip) 1891 unsigned int num, struct sdebug_dev_info *devip,
1892 u32 ei_lba)
1857{ 1893{
1858 unsigned long iflags; 1894 unsigned long iflags;
1859 int ret; 1895 int ret;
@@ -1864,7 +1900,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1864 1900
1865 /* DIX + T10 DIF */ 1901 /* DIX + T10 DIF */
1866 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 1902 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1867 int prot_ret = prot_verify_write(SCpnt, lba, num); 1903 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
1868 1904
1869 if (prot_ret) { 1905 if (prot_ret) {
1870 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret); 1906 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
@@ -2872,11 +2908,12 @@ static int __init scsi_debug_init(void)
2872 2908
2873 case SD_DIF_TYPE0_PROTECTION: 2909 case SD_DIF_TYPE0_PROTECTION:
2874 case SD_DIF_TYPE1_PROTECTION: 2910 case SD_DIF_TYPE1_PROTECTION:
2911 case SD_DIF_TYPE2_PROTECTION:
2875 case SD_DIF_TYPE3_PROTECTION: 2912 case SD_DIF_TYPE3_PROTECTION:
2876 break; 2913 break;
2877 2914
2878 default: 2915 default:
2879 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1 or 3\n"); 2916 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
2880 return -EINVAL; 2917 return -EINVAL;
2881 } 2918 }
2882 2919
@@ -3121,6 +3158,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3121 int len, k; 3158 int len, k;
3122 unsigned int num; 3159 unsigned int num;
3123 unsigned long long lba; 3160 unsigned long long lba;
3161 u32 ei_lba;
3124 int errsts = 0; 3162 int errsts = 0;
3125 int target = SCpnt->device->id; 3163 int target = SCpnt->device->id;
3126 struct sdebug_dev_info *devip = NULL; 3164 struct sdebug_dev_info *devip = NULL;
@@ -3254,14 +3292,30 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3254 case READ_16: 3292 case READ_16:
3255 case READ_12: 3293 case READ_12:
3256 case READ_10: 3294 case READ_10:
3295 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3296 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3297 cmd[1] & 0xe0) {
3298 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3299 INVALID_COMMAND_OPCODE, 0);
3300 errsts = check_condition_result;
3301 break;
3302 }
3303
3304 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3305 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3306 (cmd[1] & 0xe0) == 0)
3307 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3308
3309 /* fall through */
3257 case READ_6: 3310 case READ_6:
3311read:
3258 errsts = check_readiness(SCpnt, 0, devip); 3312 errsts = check_readiness(SCpnt, 0, devip);
3259 if (errsts) 3313 if (errsts)
3260 break; 3314 break;
3261 if (scsi_debug_fake_rw) 3315 if (scsi_debug_fake_rw)
3262 break; 3316 break;
3263 get_data_transfer_info(cmd, &lba, &num); 3317 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3264 errsts = resp_read(SCpnt, lba, num, devip); 3318 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3265 if (inj_recovered && (0 == errsts)) { 3319 if (inj_recovered && (0 == errsts)) {
3266 mk_sense_buffer(devip, RECOVERED_ERROR, 3320 mk_sense_buffer(devip, RECOVERED_ERROR,
3267 THRESHOLD_EXCEEDED, 0); 3321 THRESHOLD_EXCEEDED, 0);
@@ -3288,14 +3342,30 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3288 case WRITE_16: 3342 case WRITE_16:
3289 case WRITE_12: 3343 case WRITE_12:
3290 case WRITE_10: 3344 case WRITE_10:
3345 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3346 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3347 cmd[1] & 0xe0) {
3348 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3349 INVALID_COMMAND_OPCODE, 0);
3350 errsts = check_condition_result;
3351 break;
3352 }
3353
3354 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3355 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3356 (cmd[1] & 0xe0) == 0)
3357 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3358
3359 /* fall through */
3291 case WRITE_6: 3360 case WRITE_6:
3361write:
3292 errsts = check_readiness(SCpnt, 0, devip); 3362 errsts = check_readiness(SCpnt, 0, devip);
3293 if (errsts) 3363 if (errsts)
3294 break; 3364 break;
3295 if (scsi_debug_fake_rw) 3365 if (scsi_debug_fake_rw)
3296 break; 3366 break;
3297 get_data_transfer_info(cmd, &lba, &num); 3367 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3298 errsts = resp_write(SCpnt, lba, num, devip); 3368 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3299 if (inj_recovered && (0 == errsts)) { 3369 if (inj_recovered && (0 == errsts)) {
3300 mk_sense_buffer(devip, RECOVERED_ERROR, 3370 mk_sense_buffer(devip, RECOVERED_ERROR,
3301 THRESHOLD_EXCEEDED, 0); 3371 THRESHOLD_EXCEEDED, 0);
@@ -3341,15 +3411,38 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3341 break; 3411 break;
3342 if (scsi_debug_fake_rw) 3412 if (scsi_debug_fake_rw)
3343 break; 3413 break;
3344 get_data_transfer_info(cmd, &lba, &num); 3414 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3345 errsts = resp_read(SCpnt, lba, num, devip); 3415 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3346 if (errsts) 3416 if (errsts)
3347 break; 3417 break;
3348 errsts = resp_write(SCpnt, lba, num, devip); 3418 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3349 if (errsts) 3419 if (errsts)
3350 break; 3420 break;
3351 errsts = resp_xdwriteread(SCpnt, lba, num, devip); 3421 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3352 break; 3422 break;
3423 case VARIABLE_LENGTH_CMD:
3424 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3425
3426 if ((cmd[10] & 0xe0) == 0)
3427 printk(KERN_ERR
3428 "Unprotected RD/WR to DIF device\n");
3429
3430 if (cmd[9] == READ_32) {
3431 BUG_ON(SCpnt->cmd_len < 32);
3432 goto read;
3433 }
3434
3435 if (cmd[9] == WRITE_32) {
3436 BUG_ON(SCpnt->cmd_len < 32);
3437 goto write;
3438 }
3439 }
3440
3441 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3442 INVALID_FIELD_IN_CDB, 0);
3443 errsts = check_condition_result;
3444 break;
3445
3353 default: 3446 default:
3354 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3447 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3355 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " 3448 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 877204daf549..1b0060b791e8 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -725,6 +725,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
725 case NEEDS_RETRY: 725 case NEEDS_RETRY:
726 case FAILED: 726 case FAILED:
727 break; 727 break;
728 case ADD_TO_MLQUEUE:
729 rtn = NEEDS_RETRY;
730 break;
728 default: 731 default:
729 rtn = FAILED; 732 rtn = FAILED;
730 break; 733 break;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index c44783801402..0547a7f44d42 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -317,6 +317,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
317out_device_destroy: 317out_device_destroy:
318 scsi_device_set_state(sdev, SDEV_DEL); 318 scsi_device_set_state(sdev, SDEV_DEL);
319 transport_destroy_device(&sdev->sdev_gendev); 319 transport_destroy_device(&sdev->sdev_gendev);
320 put_device(&sdev->sdev_dev);
320 put_device(&sdev->sdev_gendev); 321 put_device(&sdev->sdev_gendev);
321out: 322out:
322 if (display_failure_msg) 323 if (display_failure_msg)
@@ -957,6 +958,7 @@ static inline void scsi_destroy_sdev(struct scsi_device *sdev)
957 if (sdev->host->hostt->slave_destroy) 958 if (sdev->host->hostt->slave_destroy)
958 sdev->host->hostt->slave_destroy(sdev); 959 sdev->host->hostt->slave_destroy(sdev);
959 transport_destroy_device(&sdev->sdev_gendev); 960 transport_destroy_device(&sdev->sdev_gendev);
961 put_device(&sdev->sdev_dev);
960 put_device(&sdev->sdev_gendev); 962 put_device(&sdev->sdev_gendev);
961} 963}
962 964
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index fde54537d715..5c7eb63a19d1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -864,10 +864,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
864 goto clean_device; 864 goto clean_device;
865 } 865 }
866 866
867 /* take a reference for the sdev_dev; this is
868 * released by the sdev_class .release */
869 get_device(&sdev->sdev_gendev);
870
871 /* create queue files, which may be writable, depending on the host */ 867 /* create queue files, which may be writable, depending on the host */
872 if (sdev->host->hostt->change_queue_depth) 868 if (sdev->host->hostt->change_queue_depth)
873 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); 869 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw);
@@ -917,6 +913,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
917 913
918 device_del(&sdev->sdev_gendev); 914 device_del(&sdev->sdev_gendev);
919 transport_destroy_device(&sdev->sdev_gendev); 915 transport_destroy_device(&sdev->sdev_gendev);
916 put_device(&sdev->sdev_dev);
920 put_device(&sdev->sdev_gendev); 917 put_device(&sdev->sdev_gendev);
921 918
922 return error; 919 return error;
@@ -1065,7 +1062,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1065 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1062 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1066 1063
1067 device_initialize(&sdev->sdev_dev); 1064 device_initialize(&sdev->sdev_dev);
1068 sdev->sdev_dev.parent = &sdev->sdev_gendev; 1065 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1069 sdev->sdev_dev.class = &sdev_class; 1066 sdev->sdev_dev.class = &sdev_class;
1070 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d", 1067 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1071 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1068 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b98885de6876..c6f70dae9b2e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3586,6 +3586,7 @@ enum fc_dispatch_result {
3586 3586
3587/** 3587/**
3588 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD 3588 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3589 * @q: fc host request queue
3589 * @shost: scsi host rport attached to 3590 * @shost: scsi host rport attached to
3590 * @job: bsg job to be processed 3591 * @job: bsg job to be processed
3591 */ 3592 */
@@ -3655,6 +3656,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3655fail_host_msg: 3656fail_host_msg:
3656 /* return the errno failure code as the only status */ 3657 /* return the errno failure code as the only status */
3657 BUG_ON(job->reply_len < sizeof(uint32_t)); 3658 BUG_ON(job->reply_len < sizeof(uint32_t));
3659 job->reply->reply_payload_rcv_len = 0;
3658 job->reply->result = ret; 3660 job->reply->result = ret;
3659 job->reply_len = sizeof(uint32_t); 3661 job->reply_len = sizeof(uint32_t);
3660 fc_bsg_jobdone(job); 3662 fc_bsg_jobdone(job);
@@ -3693,6 +3695,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3693 3695
3694/** 3696/**
3695 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD 3697 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3698 * @q: rport request queue
3696 * @shost: scsi host rport attached to 3699 * @shost: scsi host rport attached to
3697 * @rport: rport request destined to 3700 * @rport: rport request destined to
3698 * @job: bsg job to be processed 3701 * @job: bsg job to be processed
@@ -3739,6 +3742,7 @@ check_bidi:
3739fail_rport_msg: 3742fail_rport_msg:
3740 /* return the errno failure code as the only status */ 3743 /* return the errno failure code as the only status */
3741 BUG_ON(job->reply_len < sizeof(uint32_t)); 3744 BUG_ON(job->reply_len < sizeof(uint32_t));
3745 job->reply->reply_payload_rcv_len = 0;
3742 job->reply->result = ret; 3746 job->reply->result = ret;
3743 job->reply_len = sizeof(uint32_t); 3747 job->reply_len = sizeof(uint32_t);
3744 fc_bsg_jobdone(job); 3748 fc_bsg_jobdone(job);
@@ -3795,6 +3799,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3795 /* check if we have the msgcode value at least */ 3799 /* check if we have the msgcode value at least */
3796 if (job->request_len < sizeof(uint32_t)) { 3800 if (job->request_len < sizeof(uint32_t)) {
3797 BUG_ON(job->reply_len < sizeof(uint32_t)); 3801 BUG_ON(job->reply_len < sizeof(uint32_t));
3802 job->reply->reply_payload_rcv_len = 0;
3798 job->reply->result = -ENOMSG; 3803 job->reply->result = -ENOMSG;
3799 job->reply_len = sizeof(uint32_t); 3804 job->reply_len = sizeof(uint32_t);
3800 fc_bsg_jobdone(job); 3805 fc_bsg_jobdone(job);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8dd96dcd716c..9093c7261f33 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -116,6 +116,9 @@ static DEFINE_IDA(sd_index_ida);
116 * object after last put) */ 116 * object after last put) */
117static DEFINE_MUTEX(sd_ref_mutex); 117static DEFINE_MUTEX(sd_ref_mutex);
118 118
119struct kmem_cache *sd_cdb_cache;
120mempool_t *sd_cdb_pool;
121
119static const char *sd_cache_types[] = { 122static const char *sd_cache_types[] = {
120 "write through", "none", "write back", 123 "write through", "none", "write back",
121 "write back, no read (daft)" 124 "write back, no read (daft)"
@@ -370,6 +373,31 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
370 mutex_unlock(&sd_ref_mutex); 373 mutex_unlock(&sd_ref_mutex);
371} 374}
372 375
376static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
377{
378 unsigned int prot_op = SCSI_PROT_NORMAL;
379 unsigned int dix = scsi_prot_sg_count(scmd);
380
381 if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
382 if (dif && dix)
383 prot_op = SCSI_PROT_READ_PASS;
384 else if (dif && !dix)
385 prot_op = SCSI_PROT_READ_STRIP;
386 else if (!dif && dix)
387 prot_op = SCSI_PROT_READ_INSERT;
388 } else {
389 if (dif && dix)
390 prot_op = SCSI_PROT_WRITE_PASS;
391 else if (dif && !dix)
392 prot_op = SCSI_PROT_WRITE_INSERT;
393 else if (!dif && dix)
394 prot_op = SCSI_PROT_WRITE_STRIP;
395 }
396
397 scsi_set_prot_op(scmd, prot_op);
398 scsi_set_prot_type(scmd, dif);
399}
400
373/** 401/**
374 * sd_init_command - build a scsi (read or write) command from 402 * sd_init_command - build a scsi (read or write) command from
375 * information in the request structure. 403 * information in the request structure.
@@ -388,6 +416,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
388 sector_t threshold; 416 sector_t threshold;
389 unsigned int this_count = blk_rq_sectors(rq); 417 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 418 int ret, host_dif;
419 unsigned char protect;
391 420
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 421 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
393 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 422 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
@@ -520,13 +549,49 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 549 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 550 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
522 if (host_dif) 551 if (host_dif)
523 SCpnt->cmnd[1] = 1 << 5; 552 protect = 1 << 5;
524 else 553 else
525 SCpnt->cmnd[1] = 0; 554 protect = 0;
526 555
527 if (block > 0xffffffff) { 556 if (host_dif == SD_DIF_TYPE2_PROTECTION) {
557 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
558
559 if (unlikely(SCpnt->cmnd == NULL)) {
560 ret = BLKPREP_DEFER;
561 goto out;
562 }
563
564 SCpnt->cmd_len = SD_EXT_CDB_SIZE;
565 memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
566 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
567 SCpnt->cmnd[7] = 0x18;
568 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
569 SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
570
571 /* LBA */
572 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
573 SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
574 SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
575 SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
576 SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
577 SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
578 SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
579 SCpnt->cmnd[19] = (unsigned char) block & 0xff;
580
581 /* Expected Indirect LBA */
582 SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
583 SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
584 SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
585 SCpnt->cmnd[23] = (unsigned char) block & 0xff;
586
587 /* Transfer length */
588 SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
589 SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
590 SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
591 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
592 } else if (block > 0xffffffff) {
528 SCpnt->cmnd[0] += READ_16 - READ_6; 593 SCpnt->cmnd[0] += READ_16 - READ_6;
529 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; 594 SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
530 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 595 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
531 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 596 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
532 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; 597 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@@ -547,7 +612,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
547 this_count = 0xffff; 612 this_count = 0xffff;
548 613
549 SCpnt->cmnd[0] += READ_10 - READ_6; 614 SCpnt->cmnd[0] += READ_10 - READ_6;
550 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; 615 SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
551 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 616 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
552 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 617 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
553 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; 618 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@@ -578,8 +643,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 643
579 /* If DIF or DIX is enabled, tell HBA how to handle request */ 644 /* If DIF or DIX is enabled, tell HBA how to handle request */
580 if (host_dif || scsi_prot_sg_count(SCpnt)) 645 if (host_dif || scsi_prot_sg_count(SCpnt))
581 sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt), 646 sd_prot_op(SCpnt, host_dif);
582 sdkp->protection_type);
583 647
584 /* 648 /*
585 * We shouldn't disconnect in the middle of a sector, so with a dumb 649 * We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -1023,6 +1087,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1023 int result = SCpnt->result; 1087 int result = SCpnt->result;
1024 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1088 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1025 struct scsi_sense_hdr sshdr; 1089 struct scsi_sense_hdr sshdr;
1090 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
1026 int sense_valid = 0; 1091 int sense_valid = 0;
1027 int sense_deferred = 0; 1092 int sense_deferred = 0;
1028 1093
@@ -1084,6 +1149,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1084 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1149 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1085 sd_dif_complete(SCpnt, good_bytes); 1150 sd_dif_complete(SCpnt, good_bytes);
1086 1151
1152 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1153 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd)
1154 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1155
1087 return good_bytes; 1156 return good_bytes;
1088} 1157}
1089 1158
@@ -1238,34 +1307,28 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1238 u8 type; 1307 u8 type;
1239 1308
1240 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) 1309 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1241 type = 0; 1310 return;
1242 else
1243 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1244 1311
1245 sdkp->protection_type = type; 1312 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1246 1313
1247 switch (type) { 1314 if (type == sdkp->protection_type || !sdkp->first_scan)
1248 case SD_DIF_TYPE0_PROTECTION: 1315 return;
1249 case SD_DIF_TYPE1_PROTECTION:
1250 case SD_DIF_TYPE3_PROTECTION:
1251 break;
1252 1316
1253 case SD_DIF_TYPE2_PROTECTION: 1317 sdkp->protection_type = type;
1254 sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \
1255 "protection which is currently unsupported. " \
1256 "Disabling disk!\n");
1257 goto disable;
1258 1318
1259 default: 1319 if (type > SD_DIF_TYPE3_PROTECTION) {
1260 sd_printk(KERN_ERR, sdkp, "formatted with unknown " \ 1320 sd_printk(KERN_ERR, sdkp, "formatted with unsupported " \
1261 "protection type %d. Disabling disk!\n", type); 1321 "protection type %u. Disabling disk!\n", type);
1262 goto disable; 1322 sdkp->capacity = 0;
1323 return;
1263 } 1324 }
1264 1325
1265 return; 1326 if (scsi_host_dif_capable(sdp->host, type))
1266 1327 sd_printk(KERN_NOTICE, sdkp,
1267disable: 1328 "Enabling DIF Type %u protection\n", type);
1268 sdkp->capacity = 0; 1329 else
1330 sd_printk(KERN_NOTICE, sdkp,
1331 "Disabling DIF Type %u protection\n", type);
1269} 1332}
1270 1333
1271static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 1334static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -2300,8 +2363,24 @@ static int __init init_sd(void)
2300 if (err) 2363 if (err)
2301 goto err_out_class; 2364 goto err_out_class;
2302 2365
2366 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
2367 0, 0, NULL);
2368 if (!sd_cdb_cache) {
2369 printk(KERN_ERR "sd: can't init extended cdb cache\n");
2370 goto err_out_class;
2371 }
2372
2373 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
2374 if (!sd_cdb_pool) {
2375 printk(KERN_ERR "sd: can't init extended cdb pool\n");
2376 goto err_out_cache;
2377 }
2378
2303 return 0; 2379 return 0;
2304 2380
2381err_out_cache:
2382 kmem_cache_destroy(sd_cdb_cache);
2383
2305err_out_class: 2384err_out_class:
2306 class_unregister(&sd_disk_class); 2385 class_unregister(&sd_disk_class);
2307err_out: 2386err_out:
@@ -2321,6 +2400,9 @@ static void __exit exit_sd(void)
2321 2400
2322 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 2401 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
2323 2402
2403 mempool_destroy(sd_cdb_pool);
2404 kmem_cache_destroy(sd_cdb_cache);
2405
2324 scsi_unregister_driver(&sd_template.gendrv); 2406 scsi_unregister_driver(&sd_template.gendrv);
2325 class_unregister(&sd_disk_class); 2407 class_unregister(&sd_disk_class);
2326 2408
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 8474b5bad3fe..e374804d26fb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -37,6 +37,11 @@
37 */ 37 */
38#define SD_LAST_BUGGY_SECTORS 8 38#define SD_LAST_BUGGY_SECTORS 8
39 39
40enum {
41 SD_EXT_CDB_SIZE = 32, /* Extended CDB size */
42 SD_MEMPOOL_SIZE = 2, /* CDB pool size */
43};
44
40struct scsi_disk { 45struct scsi_disk {
41 struct scsi_driver *driver; /* always &sd_template */ 46 struct scsi_driver *driver; /* always &sd_template */
42 struct scsi_device *device; 47 struct scsi_device *device;
@@ -101,16 +106,12 @@ struct sd_dif_tuple {
101 106
102#ifdef CONFIG_BLK_DEV_INTEGRITY 107#ifdef CONFIG_BLK_DEV_INTEGRITY
103 108
104extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int);
105extern void sd_dif_config_host(struct scsi_disk *); 109extern void sd_dif_config_host(struct scsi_disk *);
106extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); 110extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
107extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); 111extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
108 112
109#else /* CONFIG_BLK_DEV_INTEGRITY */ 113#else /* CONFIG_BLK_DEV_INTEGRITY */
110 114
111static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c)
112{
113}
114static inline void sd_dif_config_host(struct scsi_disk *disk) 115static inline void sd_dif_config_host(struct scsi_disk *disk)
115{ 116{
116} 117}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 82f14a9482d0..88da97745710 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -320,15 +320,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
320 dif = 0; dix = 1; 320 dif = 0; dix = 1;
321 } 321 }
322 322
323 if (type) {
324 if (dif)
325 sd_printk(KERN_NOTICE, sdkp,
326 "Enabling DIF Type %d protection\n", type);
327 else
328 sd_printk(KERN_NOTICE, sdkp,
329 "Disabling DIF Type %d protection\n", type);
330 }
331
332 if (!dix) 323 if (!dix)
333 return; 324 return;
334 325
@@ -360,62 +351,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
360} 351}
361 352
362/* 353/*
363 * DIF DMA operation magic decoder ring.
364 */
365void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type)
366{
367 int csum_convert, prot_op;
368
369 prot_op = 0;
370
371 /* Convert checksum? */
372 if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
373 csum_convert = 1;
374 else
375 csum_convert = 0;
376
377 BUG_ON(dif && (scmd->cmnd[0] == READ_6 || scmd->cmnd[0] == WRITE_6));
378
379 switch (scmd->cmnd[0]) {
380 case READ_6:
381 case READ_10:
382 case READ_12:
383 case READ_16:
384 if (dif && dix)
385 if (csum_convert)
386 prot_op = SCSI_PROT_READ_CONVERT;
387 else
388 prot_op = SCSI_PROT_READ_PASS;
389 else if (dif && !dix)
390 prot_op = SCSI_PROT_READ_STRIP;
391 else if (!dif && dix)
392 prot_op = SCSI_PROT_READ_INSERT;
393
394 break;
395
396 case WRITE_6:
397 case WRITE_10:
398 case WRITE_12:
399 case WRITE_16:
400 if (dif && dix)
401 if (csum_convert)
402 prot_op = SCSI_PROT_WRITE_CONVERT;
403 else
404 prot_op = SCSI_PROT_WRITE_PASS;
405 else if (dif && !dix)
406 prot_op = SCSI_PROT_WRITE_INSERT;
407 else if (!dif && dix)
408 prot_op = SCSI_PROT_WRITE_STRIP;
409
410 break;
411 }
412
413 scsi_set_prot_op(scmd, prot_op);
414 if (dif)
415 scsi_set_prot_type(scmd, type);
416}
417
418/*
419 * The virtual start sector is the one that was originally submitted 354 * The virtual start sector is the one that was originally submitted
420 * by the block layer. Due to partitioning, MD/DM cloning, etc. the 355 * by the block layer. Due to partitioning, MD/DM cloning, etc. the
421 * actual physical start sector is likely to be different. Remap 356 * actual physical start sector is likely to be different. Remap
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 848b59466850..040f751809ea 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1185,7 +1185,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185 return VM_FAULT_SIGBUS; 1185 return VM_FAULT_SIGBUS;
1186} 1186}
1187 1187
1188static struct vm_operations_struct sg_mmap_vm_ops = { 1188static const struct vm_operations_struct sg_mmap_vm_ops = {
1189 .fault = sg_vma_fault, 1189 .fault = sg_vma_fault,
1190}; 1190};
1191 1191
@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1317 } 1317 }
1318} 1318}
1319 1319
1320static struct file_operations sg_fops = { 1320static const struct file_operations sg_fops = {
1321 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
1322 .read = sg_read, 1322 .read = sg_read,
1323 .write = sg_write, 1323 .write = sg_write,
@@ -1708,11 +1708,6 @@ static int sg_finish_rem_req(Sg_request * srp)
1708 Sg_scatter_hold *req_schp = &srp->data; 1708 Sg_scatter_hold *req_schp = &srp->data;
1709 1709
1710 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); 1710 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1711 if (srp->res_used)
1712 sg_unlink_reserve(sfp, srp);
1713 else
1714 sg_remove_scat(req_schp);
1715
1716 if (srp->rq) { 1711 if (srp->rq) {
1717 if (srp->bio) 1712 if (srp->bio)
1718 ret = blk_rq_unmap_user(srp->bio); 1713 ret = blk_rq_unmap_user(srp->bio);
@@ -1720,6 +1715,11 @@ static int sg_finish_rem_req(Sg_request * srp)
1720 blk_put_request(srp->rq); 1715 blk_put_request(srp->rq);
1721 } 1716 }
1722 1717
1718 if (srp->res_used)
1719 sg_unlink_reserve(sfp, srp);
1720 else
1721 sg_remove_scat(req_schp);
1722
1723 sg_remove_request(sfp, srp); 1723 sg_remove_request(sfp, srp);
1724 1724
1725 return ret; 1725 return ret;
@@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2196 size_t count, loff_t *off); 2196 size_t count, loff_t *off);
2197static struct file_operations adio_fops = { 2197static const struct file_operations adio_fops = {
2198 /* .owner, .read and .llseek added in sg_proc_init() */ 2198 .owner = THIS_MODULE,
2199 .open = sg_proc_single_open_adio, 2199 .open = sg_proc_single_open_adio,
2200 .read = seq_read,
2201 .llseek = seq_lseek,
2200 .write = sg_proc_write_adio, 2202 .write = sg_proc_write_adio,
2201 .release = single_release, 2203 .release = single_release,
2202}; 2204};
@@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = {
2204static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2206static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2205static ssize_t sg_proc_write_dressz(struct file *filp, 2207static ssize_t sg_proc_write_dressz(struct file *filp,
2206 const char __user *buffer, size_t count, loff_t *off); 2208 const char __user *buffer, size_t count, loff_t *off);
2207static struct file_operations dressz_fops = { 2209static const struct file_operations dressz_fops = {
2210 .owner = THIS_MODULE,
2208 .open = sg_proc_single_open_dressz, 2211 .open = sg_proc_single_open_dressz,
2212 .read = seq_read,
2213 .llseek = seq_lseek,
2209 .write = sg_proc_write_dressz, 2214 .write = sg_proc_write_dressz,
2210 .release = single_release, 2215 .release = single_release,
2211}; 2216};
2212 2217
2213static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2218static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2214static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2219static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2215static struct file_operations version_fops = { 2220static const struct file_operations version_fops = {
2221 .owner = THIS_MODULE,
2216 .open = sg_proc_single_open_version, 2222 .open = sg_proc_single_open_version,
2223 .read = seq_read,
2224 .llseek = seq_lseek,
2217 .release = single_release, 2225 .release = single_release,
2218}; 2226};
2219 2227
2220static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2228static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2221static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2229static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2222static struct file_operations devhdr_fops = { 2230static const struct file_operations devhdr_fops = {
2231 .owner = THIS_MODULE,
2223 .open = sg_proc_single_open_devhdr, 2232 .open = sg_proc_single_open_devhdr,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2224 .release = single_release, 2235 .release = single_release,
2225}; 2236};
2226 2237
@@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file);
2229static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2240static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2230static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2241static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2231static void dev_seq_stop(struct seq_file *s, void *v); 2242static void dev_seq_stop(struct seq_file *s, void *v);
2232static struct file_operations dev_fops = { 2243static const struct file_operations dev_fops = {
2244 .owner = THIS_MODULE,
2233 .open = sg_proc_open_dev, 2245 .open = sg_proc_open_dev,
2246 .read = seq_read,
2247 .llseek = seq_lseek,
2234 .release = seq_release, 2248 .release = seq_release,
2235}; 2249};
2236static const struct seq_operations dev_seq_ops = { 2250static const struct seq_operations dev_seq_ops = {
@@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = {
2242 2256
2243static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2257static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2244static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2258static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2245static struct file_operations devstrs_fops = { 2259static const struct file_operations devstrs_fops = {
2260 .owner = THIS_MODULE,
2246 .open = sg_proc_open_devstrs, 2261 .open = sg_proc_open_devstrs,
2262 .read = seq_read,
2263 .llseek = seq_lseek,
2247 .release = seq_release, 2264 .release = seq_release,
2248}; 2265};
2249static const struct seq_operations devstrs_seq_ops = { 2266static const struct seq_operations devstrs_seq_ops = {
@@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = {
2255 2272
2256static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2273static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2257static int sg_proc_open_debug(struct inode *inode, struct file *file); 2274static int sg_proc_open_debug(struct inode *inode, struct file *file);
2258static struct file_operations debug_fops = { 2275static const struct file_operations debug_fops = {
2276 .owner = THIS_MODULE,
2259 .open = sg_proc_open_debug, 2277 .open = sg_proc_open_debug,
2278 .read = seq_read,
2279 .llseek = seq_lseek,
2260 .release = seq_release, 2280 .release = seq_release,
2261}; 2281};
2262static const struct seq_operations debug_seq_ops = { 2282static const struct seq_operations debug_seq_ops = {
@@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = {
2269 2289
2270struct sg_proc_leaf { 2290struct sg_proc_leaf {
2271 const char * name; 2291 const char * name;
2272 struct file_operations * fops; 2292 const struct file_operations * fops;
2273}; 2293};
2274 2294
2275static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2295static struct sg_proc_leaf sg_proc_leaf_arr[] = {
@@ -2295,9 +2315,6 @@ sg_proc_init(void)
2295 for (k = 0; k < num_leaves; ++k) { 2315 for (k = 0; k < num_leaves; ++k) {
2296 leaf = &sg_proc_leaf_arr[k]; 2316 leaf = &sg_proc_leaf_arr[k];
2297 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2317 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2298 leaf->fops->owner = THIS_MODULE;
2299 leaf->fops->read = seq_read;
2300 leaf->fops->llseek = seq_lseek;
2301 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2318 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2302 } 2319 }
2303 return 0; 2320 return 0;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index eb61f7a70e1d..d6f340f48a3b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -684,14 +684,20 @@ static void get_sectorsize(struct scsi_cd *cd)
684 cd->capacity = 0x1fffff; 684 cd->capacity = 0x1fffff;
685 sector_size = 2048; /* A guess, just in case */ 685 sector_size = 2048; /* A guess, just in case */
686 } else { 686 } else {
687#if 0 687 long last_written;
688 if (cdrom_get_last_written(&cd->cdi, 688
689 &cd->capacity)) 689 cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
690#endif 690 (buffer[2] << 8) | buffer[3]);
691 cd->capacity = 1 + ((buffer[0] << 24) | 691 /*
692 (buffer[1] << 16) | 692 * READ_CAPACITY doesn't return the correct size on
693 (buffer[2] << 8) | 693 * certain UDF media. If last_written is larger, use
694 buffer[3]); 694 * it instead.
695 *
696 * http://bugzilla.kernel.org/show_bug.cgi?id=9668
697 */
698 if (!cdrom_get_last_written(&cd->cdi, &last_written))
699 cd->capacity = max_t(long, cd->capacity, last_written);
700
695 sector_size = (buffer[4] << 24) | 701 sector_size = (buffer[4] << 24) |
696 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; 702 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
697 switch (sector_size) { 703 switch (sector_size) {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b33d04250bbc..12d58a7ed6bc 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2859,11 +2859,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2859 ioctl_result = st_int_ioctl(STp, MTBSF, 1); 2859 ioctl_result = st_int_ioctl(STp, MTBSF, 1);
2860 2860
2861 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { 2861 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
2862 int old_block_size = STp->block_size;
2863 STp->block_size = arg & MT_ST_BLKSIZE_MASK; 2862 STp->block_size = arg & MT_ST_BLKSIZE_MASK;
2864 if (STp->block_size != 0) { 2863 if (STp->block_size != 0) {
2865 if (old_block_size == 0)
2866 normalize_buffer(STp->buffer);
2867 (STp->buffer)->buffer_blocks = 2864 (STp->buffer)->buffer_blocks =
2868 (STp->buffer)->buffer_size / STp->block_size; 2865 (STp->buffer)->buffer_size / STp->block_size;
2869 } 2866 }
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2209620d2349..737b4c960971 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port)
64 return (serial8250_reg.minor - 64) + port->line; 64 return (serial8250_reg.minor - 64) + port->line;
65} 65}
66 66
67static unsigned int skip_txen_test; /* force skip of txen test at init time */
68
67/* 69/*
68 * Debugging. 70 * Debugging.
69 */ 71 */
@@ -1087,7 +1089,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
1087 if (!up->port.iobase && !up->port.mapbase && !up->port.membase) 1089 if (!up->port.iobase && !up->port.mapbase && !up->port.membase)
1088 return; 1090 return;
1089 1091
1090 DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04x, 0x%p): ", 1092 DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
1091 serial_index(&up->port), up->port.iobase, up->port.membase); 1093 serial_index(&up->port), up->port.iobase, up->port.membase);
1092 1094
1093 /* 1095 /*
@@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port)
2108 is variable. So, let's just don't test if we receive 2110 is variable. So, let's just don't test if we receive
2109 TX irq. This way, we'll never enable UART_BUG_TXEN. 2111 TX irq. This way, we'll never enable UART_BUG_TXEN.
2110 */ 2112 */
2111 if (up->port.flags & UPF_NO_TXEN_TEST) 2113 if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
2112 goto dont_test_tx_en; 2114 goto dont_test_tx_en;
2113 2115
2114 /* 2116 /*
@@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
3248module_param(nr_uarts, uint, 0644); 3250module_param(nr_uarts, uint, 0644);
3249MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); 3251MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
3250 3252
3253module_param(skip_txen_test, uint, 0644);
3254MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
3255
3251#ifdef CONFIG_SERIAL_8250_RSA 3256#ifdef CONFIG_SERIAL_8250_RSA
3252module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); 3257module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
3253MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 3258MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index e7108e75653d..b28af13c45a1 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -1561,11 +1561,16 @@ enum pci_board_num_t {
1561 pbn_exar_XR17C152, 1561 pbn_exar_XR17C152,
1562 pbn_exar_XR17C154, 1562 pbn_exar_XR17C154,
1563 pbn_exar_XR17C158, 1563 pbn_exar_XR17C158,
1564 pbn_exar_ibm_saturn,
1564 pbn_pasemi_1682M, 1565 pbn_pasemi_1682M,
1565 pbn_ni8430_2, 1566 pbn_ni8430_2,
1566 pbn_ni8430_4, 1567 pbn_ni8430_4,
1567 pbn_ni8430_8, 1568 pbn_ni8430_8,
1568 pbn_ni8430_16, 1569 pbn_ni8430_16,
1570 pbn_ADDIDATA_PCIe_1_3906250,
1571 pbn_ADDIDATA_PCIe_2_3906250,
1572 pbn_ADDIDATA_PCIe_4_3906250,
1573 pbn_ADDIDATA_PCIe_8_3906250,
1569}; 1574};
1570 1575
1571/* 1576/*
@@ -2146,6 +2151,13 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2146 .base_baud = 921600, 2151 .base_baud = 921600,
2147 .uart_offset = 0x200, 2152 .uart_offset = 0x200,
2148 }, 2153 },
2154 [pbn_exar_ibm_saturn] = {
2155 .flags = FL_BASE0,
2156 .num_ports = 1,
2157 .base_baud = 921600,
2158 .uart_offset = 0x200,
2159 },
2160
2149 /* 2161 /*
2150 * PA Semi PWRficient PA6T-1682M on-chip UART 2162 * PA Semi PWRficient PA6T-1682M on-chip UART
2151 */ 2163 */
@@ -2185,6 +2197,37 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2185 .uart_offset = 0x10, 2197 .uart_offset = 0x10,
2186 .first_offset = 0x800, 2198 .first_offset = 0x800,
2187 }, 2199 },
2200 /*
2201 * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
2202 */
2203 [pbn_ADDIDATA_PCIe_1_3906250] = {
2204 .flags = FL_BASE0,
2205 .num_ports = 1,
2206 .base_baud = 3906250,
2207 .uart_offset = 0x200,
2208 .first_offset = 0x1000,
2209 },
2210 [pbn_ADDIDATA_PCIe_2_3906250] = {
2211 .flags = FL_BASE0,
2212 .num_ports = 2,
2213 .base_baud = 3906250,
2214 .uart_offset = 0x200,
2215 .first_offset = 0x1000,
2216 },
2217 [pbn_ADDIDATA_PCIe_4_3906250] = {
2218 .flags = FL_BASE0,
2219 .num_ports = 4,
2220 .base_baud = 3906250,
2221 .uart_offset = 0x200,
2222 .first_offset = 0x1000,
2223 },
2224 [pbn_ADDIDATA_PCIe_8_3906250] = {
2225 .flags = FL_BASE0,
2226 .num_ports = 8,
2227 .base_baud = 3906250,
2228 .uart_offset = 0x200,
2229 .first_offset = 0x1000,
2230 },
2188}; 2231};
2189 2232
2190static const struct pci_device_id softmodem_blacklist[] = { 2233static const struct pci_device_id softmodem_blacklist[] = {
@@ -2340,7 +2383,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
2340 break; 2383 break;
2341 2384
2342#ifdef SERIAL_DEBUG_PCI 2385#ifdef SERIAL_DEBUG_PCI
2343 printk(KERN_DEBUG "Setup PCI port: port %x, irq %d, type %d\n", 2386 printk(KERN_DEBUG "Setup PCI port: port %lx, irq %d, type %d\n",
2344 serial_port.iobase, serial_port.irq, serial_port.iotype); 2387 serial_port.iobase, serial_port.irq, serial_port.iotype);
2345#endif 2388#endif
2346 2389
@@ -2649,6 +2692,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2649 PCI_SUBVENDOR_ID_CONNECT_TECH, 2692 PCI_SUBVENDOR_ID_CONNECT_TECH,
2650 PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0, 2693 PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0,
2651 pbn_b0_8_1843200_200 }, 2694 pbn_b0_8_1843200_200 },
2695 { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
2696 PCI_VENDOR_ID_IBM, PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT,
2697 0, 0, pbn_exar_ibm_saturn },
2652 2698
2653 { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, 2699 { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530,
2654 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2700 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
@@ -3093,6 +3139,12 @@ static struct pci_device_id serial_pci_tbl[] = {
3093 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B, 3139 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
3094 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3140 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
3095 pbn_b0_bt_2_115200 }, 3141 pbn_b0_bt_2_115200 },
3142 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
3143 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
3144 pbn_b0_bt_2_115200 },
3145 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
3146 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
3147 pbn_b0_bt_2_115200 },
3096 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, 3148 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
3097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3149 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
3098 pbn_b0_bt_4_460800 }, 3150 pbn_b0_bt_4_460800 },
@@ -3556,6 +3608,38 @@ static struct pci_device_id serial_pci_tbl[] = {
3556 0, 3608 0,
3557 pbn_b0_8_115200 }, 3609 pbn_b0_8_115200 },
3558 3610
3611 { PCI_VENDOR_ID_ADDIDATA,
3612 PCI_DEVICE_ID_ADDIDATA_APCIe7500,
3613 PCI_ANY_ID,
3614 PCI_ANY_ID,
3615 0,
3616 0,
3617 pbn_ADDIDATA_PCIe_4_3906250 },
3618
3619 { PCI_VENDOR_ID_ADDIDATA,
3620 PCI_DEVICE_ID_ADDIDATA_APCIe7420,
3621 PCI_ANY_ID,
3622 PCI_ANY_ID,
3623 0,
3624 0,
3625 pbn_ADDIDATA_PCIe_2_3906250 },
3626
3627 { PCI_VENDOR_ID_ADDIDATA,
3628 PCI_DEVICE_ID_ADDIDATA_APCIe7300,
3629 PCI_ANY_ID,
3630 PCI_ANY_ID,
3631 0,
3632 0,
3633 pbn_ADDIDATA_PCIe_1_3906250 },
3634
3635 { PCI_VENDOR_ID_ADDIDATA,
3636 PCI_DEVICE_ID_ADDIDATA_APCIe7800,
3637 PCI_ANY_ID,
3638 PCI_ANY_ID,
3639 0,
3640 0,
3641 pbn_ADDIDATA_PCIe_8_3906250 },
3642
3559 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, 3643 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
3560 PCI_VENDOR_ID_IBM, 0x0299, 3644 PCI_VENDOR_ID_IBM, 0x0299,
3561 0, 0, pbn_b0_bt_2_115200 }, 3645 0, 0, pbn_b0_bt_2_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 03422ce878cf..e52257257279 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE
862 862
863config SERIAL_UARTLITE 863config SERIAL_UARTLITE
864 tristate "Xilinx uartlite serial port support" 864 tristate "Xilinx uartlite serial port support"
865 depends on PPC32 || MICROBLAZE 865 depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
866 select SERIAL_CORE 866 select SERIAL_CORE
867 help 867 help
868 Say Y here if you want to use the Xilinx uartlite serial controller. 868 Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1458,4 +1458,23 @@ config SERIAL_TIMBERDALE
1458 ---help--- 1458 ---help---
1459 Add support for UART controller on timberdale. 1459 Add support for UART controller on timberdale.
1460 1460
1461config SERIAL_BCM63XX
1462 tristate "bcm63xx serial port support"
1463 select SERIAL_CORE
1464 depends on BCM63XX
1465 help
1466 If you have a bcm63xx CPU, you can enable its onboard
1467 serial port by enabling this options.
1468
1469 To compile this driver as a module, choose M here: the
1470 module will be called bcm963xx_uart.
1471
1472config SERIAL_BCM63XX_CONSOLE
1473 bool "Console on bcm63xx serial port"
1474 depends on SERIAL_BCM63XX=y
1475 select SERIAL_CORE_CONSOLE
1476 help
1477 If you have enabled the serial port on the bcm63xx CPU
1478 you can make it the console by answering Y to this option.
1479
1461endmenu 1480endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 97f6fcc8b432..d21d5dd5d048 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
34obj-$(CONFIG_SERIAL_PXA) += pxa.o 34obj-$(CONFIG_SERIAL_PXA) += pxa.o
35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o 35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
37obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
37obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o 38obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
38obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o 39obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
39obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o 40obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 3551c5cb7094..9d948bccafaf 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1531,7 +1531,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1531 void *data; 1531 void *data;
1532 int ret; 1532 int ret;
1533 1533
1534 BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); 1534 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
1535 1535
1536 port = &atmel_ports[pdev->id]; 1536 port = &atmel_ports[pdev->id];
1537 port->backup_imr = 0; 1537 port->backup_imr = 0;
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
new file mode 100644
index 000000000000..beddaa6e9069
--- /dev/null
+++ b/drivers/serial/bcm63xx_uart.c
@@ -0,0 +1,890 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Derived from many drivers using generic_serial interface.
7 *
8 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
9 *
10 * Serial driver for BCM63xx integrated UART.
11 *
12 * Hardware flow control was _not_ tested since I only have RX/TX on
13 * my board.
14 */
15
16#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
17#define SUPPORT_SYSRQ
18#endif
19
20#include <linux/kernel.h>
21#include <linux/platform_device.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/module.h>
25#include <linux/console.h>
26#include <linux/clk.h>
27#include <linux/tty.h>
28#include <linux/tty_flip.h>
29#include <linux/sysrq.h>
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32
33#include <bcm63xx_clk.h>
34#include <bcm63xx_irq.h>
35#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h>
37
38#define BCM63XX_NR_UARTS 1
39
40static struct uart_port ports[BCM63XX_NR_UARTS];
41
42/*
43 * rx interrupt mask / stat
44 *
45 * mask:
46 * - rx fifo full
47 * - rx fifo above threshold
48 * - rx fifo not empty for too long
49 */
50#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
51 UART_IR_MASK(UART_IR_RXTHRESH) | \
52 UART_IR_MASK(UART_IR_RXTIMEOUT))
53
54#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
55 UART_IR_STAT(UART_IR_RXTHRESH) | \
56 UART_IR_STAT(UART_IR_RXTIMEOUT))
57
58/*
59 * tx interrupt mask / stat
60 *
61 * mask:
62 * - tx fifo empty
63 * - tx fifo below threshold
64 */
65#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
66 UART_IR_MASK(UART_IR_TXTRESH))
67
68#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
69 UART_IR_STAT(UART_IR_TXTRESH))
70
71/*
72 * external input interrupt
73 *
74 * mask: any edge on CTS, DCD
75 */
76#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
77 UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
78
79/*
80 * handy uart register accessor
81 */
82static inline unsigned int bcm_uart_readl(struct uart_port *port,
83 unsigned int offset)
84{
85 return bcm_readl(port->membase + offset);
86}
87
88static inline void bcm_uart_writel(struct uart_port *port,
89 unsigned int value, unsigned int offset)
90{
91 bcm_writel(value, port->membase + offset);
92}
93
94/*
95 * serial core request to check if uart tx fifo is empty
96 */
97static unsigned int bcm_uart_tx_empty(struct uart_port *port)
98{
99 unsigned int val;
100
101 val = bcm_uart_readl(port, UART_IR_REG);
102 return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
103}
104
105/*
106 * serial core request to set RTS and DTR pin state and loopback mode
107 */
108static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
109{
110 unsigned int val;
111
112 val = bcm_uart_readl(port, UART_MCTL_REG);
113 val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
114 /* invert of written value is reflected on the pin */
115 if (!(mctrl & TIOCM_DTR))
116 val |= UART_MCTL_DTR_MASK;
117 if (!(mctrl & TIOCM_RTS))
118 val |= UART_MCTL_RTS_MASK;
119 bcm_uart_writel(port, val, UART_MCTL_REG);
120
121 val = bcm_uart_readl(port, UART_CTL_REG);
122 if (mctrl & TIOCM_LOOP)
123 val |= UART_CTL_LOOPBACK_MASK;
124 else
125 val &= ~UART_CTL_LOOPBACK_MASK;
126 bcm_uart_writel(port, val, UART_CTL_REG);
127}
128
129/*
130 * serial core request to return RI, CTS, DCD and DSR pin state
131 */
132static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
133{
134 unsigned int val, mctrl;
135
136 mctrl = 0;
137 val = bcm_uart_readl(port, UART_EXTINP_REG);
138 if (val & UART_EXTINP_RI_MASK)
139 mctrl |= TIOCM_RI;
140 if (val & UART_EXTINP_CTS_MASK)
141 mctrl |= TIOCM_CTS;
142 if (val & UART_EXTINP_DCD_MASK)
143 mctrl |= TIOCM_CD;
144 if (val & UART_EXTINP_DSR_MASK)
145 mctrl |= TIOCM_DSR;
146 return mctrl;
147}
148
149/*
150 * serial core request to disable tx ASAP (used for flow control)
151 */
152static void bcm_uart_stop_tx(struct uart_port *port)
153{
154 unsigned int val;
155
156 val = bcm_uart_readl(port, UART_CTL_REG);
157 val &= ~(UART_CTL_TXEN_MASK);
158 bcm_uart_writel(port, val, UART_CTL_REG);
159
160 val = bcm_uart_readl(port, UART_IR_REG);
161 val &= ~UART_TX_INT_MASK;
162 bcm_uart_writel(port, val, UART_IR_REG);
163}
164
165/*
166 * serial core request to (re)enable tx
167 */
168static void bcm_uart_start_tx(struct uart_port *port)
169{
170 unsigned int val;
171
172 val = bcm_uart_readl(port, UART_IR_REG);
173 val |= UART_TX_INT_MASK;
174 bcm_uart_writel(port, val, UART_IR_REG);
175
176 val = bcm_uart_readl(port, UART_CTL_REG);
177 val |= UART_CTL_TXEN_MASK;
178 bcm_uart_writel(port, val, UART_CTL_REG);
179}
180
181/*
182 * serial core request to stop rx, called before port shutdown
183 */
184static void bcm_uart_stop_rx(struct uart_port *port)
185{
186 unsigned int val;
187
188 val = bcm_uart_readl(port, UART_IR_REG);
189 val &= ~UART_RX_INT_MASK;
190 bcm_uart_writel(port, val, UART_IR_REG);
191}
192
193/*
194 * serial core request to enable modem status interrupt reporting
195 */
196static void bcm_uart_enable_ms(struct uart_port *port)
197{
198 unsigned int val;
199
200 val = bcm_uart_readl(port, UART_IR_REG);
201 val |= UART_IR_MASK(UART_IR_EXTIP);
202 bcm_uart_writel(port, val, UART_IR_REG);
203}
204
205/*
206 * serial core request to start/stop emitting break char
207 */
208static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
209{
210 unsigned long flags;
211 unsigned int val;
212
213 spin_lock_irqsave(&port->lock, flags);
214
215 val = bcm_uart_readl(port, UART_CTL_REG);
216 if (ctl)
217 val |= UART_CTL_XMITBRK_MASK;
218 else
219 val &= ~UART_CTL_XMITBRK_MASK;
220 bcm_uart_writel(port, val, UART_CTL_REG);
221
222 spin_unlock_irqrestore(&port->lock, flags);
223}
224
225/*
226 * return port type in string format
227 */
228static const char *bcm_uart_type(struct uart_port *port)
229{
230 return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
231}
232
233/*
234 * read all chars in rx fifo and send them to core
235 */
236static void bcm_uart_do_rx(struct uart_port *port)
237{
238 struct tty_struct *tty;
239 unsigned int max_count;
240
241 /* limit number of char read in interrupt, should not be
242 * higher than fifo size anyway since we're much faster than
243 * serial port */
244 max_count = 32;
245 tty = port->info->port.tty;
246 do {
247 unsigned int iestat, c, cstat;
248 char flag;
249
250 /* get overrun/fifo empty information from ier
251 * register */
252 iestat = bcm_uart_readl(port, UART_IR_REG);
253 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
254 break;
255
256 cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
257 port->icount.rx++;
258 flag = TTY_NORMAL;
259 c &= 0xff;
260
261 if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
262 /* do stats first */
263 if (cstat & UART_FIFO_BRKDET_MASK) {
264 port->icount.brk++;
265 if (uart_handle_break(port))
266 continue;
267 }
268
269 if (cstat & UART_FIFO_PARERR_MASK)
270 port->icount.parity++;
271 if (cstat & UART_FIFO_FRAMEERR_MASK)
272 port->icount.frame++;
273
274 /* update flag wrt read_status_mask */
275 cstat &= port->read_status_mask;
276 if (cstat & UART_FIFO_BRKDET_MASK)
277 flag = TTY_BREAK;
278 if (cstat & UART_FIFO_FRAMEERR_MASK)
279 flag = TTY_FRAME;
280 if (cstat & UART_FIFO_PARERR_MASK)
281 flag = TTY_PARITY;
282 }
283
284 if (uart_handle_sysrq_char(port, c))
285 continue;
286
287 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
288 port->icount.overrun++;
289 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
290 }
291
292 if ((cstat & port->ignore_status_mask) == 0)
293 tty_insert_flip_char(tty, c, flag);
294
295 } while (--max_count);
296
297 tty_flip_buffer_push(tty);
298}
299
300/*
301 * fill tx fifo with chars to send, stop when fifo is about to be full
302 * or when all chars have been sent.
303 */
304static void bcm_uart_do_tx(struct uart_port *port)
305{
306 struct circ_buf *xmit;
307 unsigned int val, max_count;
308
309 if (port->x_char) {
310 bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
311 port->icount.tx++;
312 port->x_char = 0;
313 return;
314 }
315
316 if (uart_tx_stopped(port)) {
317 bcm_uart_stop_tx(port);
318 return;
319 }
320
321 xmit = &port->info->xmit;
322 if (uart_circ_empty(xmit))
323 goto txq_empty;
324
325 val = bcm_uart_readl(port, UART_MCTL_REG);
326 val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
327 max_count = port->fifosize - val;
328
329 while (max_count--) {
330 unsigned int c;
331
332 c = xmit->buf[xmit->tail];
333 bcm_uart_writel(port, c, UART_FIFO_REG);
334 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
335 port->icount.tx++;
336 if (uart_circ_empty(xmit))
337 break;
338 }
339
340 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
341 uart_write_wakeup(port);
342
343 if (uart_circ_empty(xmit))
344 goto txq_empty;
345 return;
346
347txq_empty:
348 /* nothing to send, disable transmit interrupt */
349 val = bcm_uart_readl(port, UART_IR_REG);
350 val &= ~UART_TX_INT_MASK;
351 bcm_uart_writel(port, val, UART_IR_REG);
352 return;
353}
354
355/*
356 * process uart interrupt
357 */
358static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
359{
360 struct uart_port *port;
361 unsigned int irqstat;
362
363 port = dev_id;
364 spin_lock(&port->lock);
365
366 irqstat = bcm_uart_readl(port, UART_IR_REG);
367 if (irqstat & UART_RX_INT_STAT)
368 bcm_uart_do_rx(port);
369
370 if (irqstat & UART_TX_INT_STAT)
371 bcm_uart_do_tx(port);
372
373 if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
374 unsigned int estat;
375
376 estat = bcm_uart_readl(port, UART_EXTINP_REG);
377 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
378 uart_handle_cts_change(port,
379 estat & UART_EXTINP_CTS_MASK);
380 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
381 uart_handle_dcd_change(port,
382 estat & UART_EXTINP_DCD_MASK);
383 }
384
385 spin_unlock(&port->lock);
386 return IRQ_HANDLED;
387}
388
389/*
390 * enable rx & tx operation on uart
391 */
392static void bcm_uart_enable(struct uart_port *port)
393{
394 unsigned int val;
395
396 val = bcm_uart_readl(port, UART_CTL_REG);
397 val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
398 bcm_uart_writel(port, val, UART_CTL_REG);
399}
400
401/*
402 * disable rx & tx operation on uart
403 */
404static void bcm_uart_disable(struct uart_port *port)
405{
406 unsigned int val;
407
408 val = bcm_uart_readl(port, UART_CTL_REG);
409 val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
410 UART_CTL_RXEN_MASK);
411 bcm_uart_writel(port, val, UART_CTL_REG);
412}
413
414/*
415 * clear all unread data in rx fifo and unsent data in tx fifo
416 */
417static void bcm_uart_flush(struct uart_port *port)
418{
419 unsigned int val;
420
421 /* empty rx and tx fifo */
422 val = bcm_uart_readl(port, UART_CTL_REG);
423 val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
424 bcm_uart_writel(port, val, UART_CTL_REG);
425
426 /* read any pending char to make sure all irq status are
427 * cleared */
428 (void)bcm_uart_readl(port, UART_FIFO_REG);
429}
430
431/*
432 * serial core request to initialize uart and start rx operation
433 */
434static int bcm_uart_startup(struct uart_port *port)
435{
436 unsigned int val;
437 int ret;
438
439 /* mask all irq and flush port */
440 bcm_uart_disable(port);
441 bcm_uart_writel(port, 0, UART_IR_REG);
442 bcm_uart_flush(port);
443
444 /* clear any pending external input interrupt */
445 (void)bcm_uart_readl(port, UART_EXTINP_REG);
446
447 /* set rx/tx fifo thresh to fifo half size */
448 val = bcm_uart_readl(port, UART_MCTL_REG);
449 val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
450 val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
451 val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
452 bcm_uart_writel(port, val, UART_MCTL_REG);
453
454 /* set rx fifo timeout to 1 char time */
455 val = bcm_uart_readl(port, UART_CTL_REG);
456 val &= ~UART_CTL_RXTMOUTCNT_MASK;
457 val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
458 bcm_uart_writel(port, val, UART_CTL_REG);
459
460 /* report any edge on dcd and cts */
461 val = UART_EXTINP_INT_MASK;
462 val |= UART_EXTINP_DCD_NOSENSE_MASK;
463 val |= UART_EXTINP_CTS_NOSENSE_MASK;
464 bcm_uart_writel(port, val, UART_EXTINP_REG);
465
466 /* register irq and enable rx interrupts */
467 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
468 bcm_uart_type(port), port);
469 if (ret)
470 return ret;
471 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
472 bcm_uart_enable(port);
473 return 0;
474}
475
476/*
477 * serial core request to flush & disable uart
478 */
479static void bcm_uart_shutdown(struct uart_port *port)
480{
481 unsigned long flags;
482
483 spin_lock_irqsave(&port->lock, flags);
484 bcm_uart_writel(port, 0, UART_IR_REG);
485 spin_unlock_irqrestore(&port->lock, flags);
486
487 bcm_uart_disable(port);
488 bcm_uart_flush(port);
489 free_irq(port->irq, port);
490}
491
492/*
493 * serial core request to change current uart setting
494 */
495static void bcm_uart_set_termios(struct uart_port *port,
496 struct ktermios *new,
497 struct ktermios *old)
498{
499 unsigned int ctl, baud, quot, ier;
500 unsigned long flags;
501
502 spin_lock_irqsave(&port->lock, flags);
503
504 /* disable uart while changing speed */
505 bcm_uart_disable(port);
506 bcm_uart_flush(port);
507
508 /* update Control register */
509 ctl = bcm_uart_readl(port, UART_CTL_REG);
510 ctl &= ~UART_CTL_BITSPERSYM_MASK;
511
512 switch (new->c_cflag & CSIZE) {
513 case CS5:
514 ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
515 break;
516 case CS6:
517 ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
518 break;
519 case CS7:
520 ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
521 break;
522 default:
523 ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
524 break;
525 }
526
527 ctl &= ~UART_CTL_STOPBITS_MASK;
528 if (new->c_cflag & CSTOPB)
529 ctl |= UART_CTL_STOPBITS_2;
530 else
531 ctl |= UART_CTL_STOPBITS_1;
532
533 ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
534 if (new->c_cflag & PARENB)
535 ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
536 ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
537 if (new->c_cflag & PARODD)
538 ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
539 bcm_uart_writel(port, ctl, UART_CTL_REG);
540
541 /* update Baudword register */
542 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
543 quot = uart_get_divisor(port, baud) - 1;
544 bcm_uart_writel(port, quot, UART_BAUD_REG);
545
546 /* update Interrupt register */
547 ier = bcm_uart_readl(port, UART_IR_REG);
548
549 ier &= ~UART_IR_MASK(UART_IR_EXTIP);
550 if (UART_ENABLE_MS(port, new->c_cflag))
551 ier |= UART_IR_MASK(UART_IR_EXTIP);
552
553 bcm_uart_writel(port, ier, UART_IR_REG);
554
555 /* update read/ignore mask */
556 port->read_status_mask = UART_FIFO_VALID_MASK;
557 if (new->c_iflag & INPCK) {
558 port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
559 port->read_status_mask |= UART_FIFO_PARERR_MASK;
560 }
561 if (new->c_iflag & (BRKINT))
562 port->read_status_mask |= UART_FIFO_BRKDET_MASK;
563
564 port->ignore_status_mask = 0;
565 if (new->c_iflag & IGNPAR)
566 port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
567 if (new->c_iflag & IGNBRK)
568 port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
569 if (!(new->c_cflag & CREAD))
570 port->ignore_status_mask |= UART_FIFO_VALID_MASK;
571
572 uart_update_timeout(port, new->c_cflag, baud);
573 bcm_uart_enable(port);
574 spin_unlock_irqrestore(&port->lock, flags);
575}
576
577/*
578 * serial core request to claim uart iomem
579 */
580static int bcm_uart_request_port(struct uart_port *port)
581{
582 unsigned int size;
583
584 size = RSET_UART_SIZE;
585 if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
586 dev_err(port->dev, "Memory region busy\n");
587 return -EBUSY;
588 }
589
590 port->membase = ioremap(port->mapbase, size);
591 if (!port->membase) {
592 dev_err(port->dev, "Unable to map registers\n");
593 release_mem_region(port->mapbase, size);
594 return -EBUSY;
595 }
596 return 0;
597}
598
599/*
600 * serial core request to release uart iomem
601 */
602static void bcm_uart_release_port(struct uart_port *port)
603{
604 release_mem_region(port->mapbase, RSET_UART_SIZE);
605 iounmap(port->membase);
606}
607
608/*
609 * serial core request to do any port required autoconfiguration
610 */
611static void bcm_uart_config_port(struct uart_port *port, int flags)
612{
613 if (flags & UART_CONFIG_TYPE) {
614 if (bcm_uart_request_port(port))
615 return;
616 port->type = PORT_BCM63XX;
617 }
618}
619
620/*
621 * serial core request to check that port information in serinfo are
622 * suitable
623 */
624static int bcm_uart_verify_port(struct uart_port *port,
625 struct serial_struct *serinfo)
626{
627 if (port->type != PORT_BCM63XX)
628 return -EINVAL;
629 if (port->irq != serinfo->irq)
630 return -EINVAL;
631 if (port->iotype != serinfo->io_type)
632 return -EINVAL;
633 if (port->mapbase != (unsigned long)serinfo->iomem_base)
634 return -EINVAL;
635 return 0;
636}
637
638/* serial core callbacks */
639static struct uart_ops bcm_uart_ops = {
640 .tx_empty = bcm_uart_tx_empty,
641 .get_mctrl = bcm_uart_get_mctrl,
642 .set_mctrl = bcm_uart_set_mctrl,
643 .start_tx = bcm_uart_start_tx,
644 .stop_tx = bcm_uart_stop_tx,
645 .stop_rx = bcm_uart_stop_rx,
646 .enable_ms = bcm_uart_enable_ms,
647 .break_ctl = bcm_uart_break_ctl,
648 .startup = bcm_uart_startup,
649 .shutdown = bcm_uart_shutdown,
650 .set_termios = bcm_uart_set_termios,
651 .type = bcm_uart_type,
652 .release_port = bcm_uart_release_port,
653 .request_port = bcm_uart_request_port,
654 .config_port = bcm_uart_config_port,
655 .verify_port = bcm_uart_verify_port,
656};
657
658
659
660#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
661static inline void wait_for_xmitr(struct uart_port *port)
662{
663 unsigned int tmout;
664
665 /* Wait up to 10ms for the character(s) to be sent. */
666 tmout = 10000;
667 while (--tmout) {
668 unsigned int val;
669
670 val = bcm_uart_readl(port, UART_IR_REG);
671 if (val & UART_IR_STAT(UART_IR_TXEMPTY))
672 break;
673 udelay(1);
674 }
675
676 /* Wait up to 1s for flow control if necessary */
677 if (port->flags & UPF_CONS_FLOW) {
678 tmout = 1000000;
679 while (--tmout) {
680 unsigned int val;
681
682 val = bcm_uart_readl(port, UART_EXTINP_REG);
683 if (val & UART_EXTINP_CTS_MASK)
684 break;
685 udelay(1);
686 }
687 }
688}
689
690/*
691 * output given char
692 */
693static void bcm_console_putchar(struct uart_port *port, int ch)
694{
695 wait_for_xmitr(port);
696 bcm_uart_writel(port, ch, UART_FIFO_REG);
697}
698
699/*
700 * console core request to output given string
701 */
702static void bcm_console_write(struct console *co, const char *s,
703 unsigned int count)
704{
705 struct uart_port *port;
706 unsigned long flags;
707 int locked;
708
709 port = &ports[co->index];
710
711 local_irq_save(flags);
712 if (port->sysrq) {
713 /* bcm_uart_interrupt() already took the lock */
714 locked = 0;
715 } else if (oops_in_progress) {
716 locked = spin_trylock(&port->lock);
717 } else {
718 spin_lock(&port->lock);
719 locked = 1;
720 }
721
722 /* call helper to deal with \r\n */
723 uart_console_write(port, s, count, bcm_console_putchar);
724
725 /* and wait for char to be transmitted */
726 wait_for_xmitr(port);
727
728 if (locked)
729 spin_unlock(&port->lock);
730 local_irq_restore(flags);
731}
732
733/*
734 * console core request to setup given console, find matching uart
735 * port and setup it.
736 */
737static int bcm_console_setup(struct console *co, char *options)
738{
739 struct uart_port *port;
740 int baud = 9600;
741 int bits = 8;
742 int parity = 'n';
743 int flow = 'n';
744
745 if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
746 return -EINVAL;
747 port = &ports[co->index];
748 if (!port->membase)
749 return -ENODEV;
750 if (options)
751 uart_parse_options(options, &baud, &parity, &bits, &flow);
752
753 return uart_set_options(port, co, baud, parity, bits, flow);
754}
755
756static struct uart_driver bcm_uart_driver;
757
758static struct console bcm63xx_console = {
759 .name = "ttyS",
760 .write = bcm_console_write,
761 .device = uart_console_device,
762 .setup = bcm_console_setup,
763 .flags = CON_PRINTBUFFER,
764 .index = -1,
765 .data = &bcm_uart_driver,
766};
767
768static int __init bcm63xx_console_init(void)
769{
770 register_console(&bcm63xx_console);
771 return 0;
772}
773
774console_initcall(bcm63xx_console_init);
775
776#define BCM63XX_CONSOLE (&bcm63xx_console)
777#else
778#define BCM63XX_CONSOLE NULL
779#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
780
781static struct uart_driver bcm_uart_driver = {
782 .owner = THIS_MODULE,
783 .driver_name = "bcm63xx_uart",
784 .dev_name = "ttyS",
785 .major = TTY_MAJOR,
786 .minor = 64,
787 .nr = 1,
788 .cons = BCM63XX_CONSOLE,
789};
790
791/*
792 * platform driver probe/remove callback
793 */
794static int __devinit bcm_uart_probe(struct platform_device *pdev)
795{
796 struct resource *res_mem, *res_irq;
797 struct uart_port *port;
798 struct clk *clk;
799 int ret;
800
801 if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
802 return -EINVAL;
803
804 if (ports[pdev->id].membase)
805 return -EBUSY;
806
807 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808 if (!res_mem)
809 return -ENODEV;
810
811 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
812 if (!res_irq)
813 return -ENODEV;
814
815 clk = clk_get(&pdev->dev, "periph");
816 if (IS_ERR(clk))
817 return -ENODEV;
818
819 port = &ports[pdev->id];
820 memset(port, 0, sizeof(*port));
821 port->iotype = UPIO_MEM;
822 port->mapbase = res_mem->start;
823 port->irq = res_irq->start;
824 port->ops = &bcm_uart_ops;
825 port->flags = UPF_BOOT_AUTOCONF;
826 port->dev = &pdev->dev;
827 port->fifosize = 16;
828 port->uartclk = clk_get_rate(clk) / 2;
829 clk_put(clk);
830
831 ret = uart_add_one_port(&bcm_uart_driver, port);
832 if (ret) {
833 kfree(port);
834 return ret;
835 }
836 platform_set_drvdata(pdev, port);
837 return 0;
838}
839
840static int __devexit bcm_uart_remove(struct platform_device *pdev)
841{
842 struct uart_port *port;
843
844 port = platform_get_drvdata(pdev);
845 uart_remove_one_port(&bcm_uart_driver, port);
846 platform_set_drvdata(pdev, NULL);
847 /* mark port as free */
848 ports[pdev->id].membase = 0;
849 return 0;
850}
851
852/*
853 * platform driver stuff
854 */
855static struct platform_driver bcm_uart_platform_driver = {
856 .probe = bcm_uart_probe,
857 .remove = __devexit_p(bcm_uart_remove),
858 .driver = {
859 .owner = THIS_MODULE,
860 .name = "bcm63xx_uart",
861 },
862};
863
864static int __init bcm_uart_init(void)
865{
866 int ret;
867
868 ret = uart_register_driver(&bcm_uart_driver);
869 if (ret)
870 return ret;
871
872 ret = platform_driver_register(&bcm_uart_platform_driver);
873 if (ret)
874 uart_unregister_driver(&bcm_uart_driver);
875
876 return ret;
877}
878
879static void __exit bcm_uart_exit(void)
880{
881 platform_driver_unregister(&bcm_uart_platform_driver);
882 uart_unregister_driver(&bcm_uart_driver);
883}
884
885module_init(bcm_uart_init);
886module_exit(bcm_uart_exit);
887
888MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
889MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
890MODULE_LICENSE("GPL");
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 8d349b23249a..300cea768d74 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -649,7 +649,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
649 u8 *p; 649 u8 *p;
650 int count; 650 int count;
651 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; 651 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
652 struct circ_buf *xmit = &port->info->xmit; 652 struct circ_buf *xmit = &port->state->xmit;
653 653
654 /* Handle xon/xoff */ 654 /* Handle xon/xoff */
655 if (port->x_char) { 655 if (port->x_char) {
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 7be52fe288eb..31f172397af3 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -18,6 +18,7 @@ static char *serial_version = "$Revision: 1.25 $";
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/tty_flip.h> 19#include <linux/tty_flip.h>
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/smp_lock.h>
21#include <linux/string.h> 22#include <linux/string.h>
22#include <linux/fcntl.h> 23#include <linux/fcntl.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 2d7feecaf492..0028b6f89ce6 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port)
307 if (port < 4) { 307 if (port < 4) {
308 temp = readl(stop_proc[port].global_control_reg); 308 temp = readl(stop_proc[port].global_control_reg);
309 temp = 309 temp =
310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; 310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
311 writel(temp, stop_proc[port].global_control_reg); 311 writel(temp, stop_proc[port].global_control_reg);
312 312
313 /* write flush */ 313 /* write flush */
@@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port)
336 if (port < 4) { 336 if (port < 4) {
337 temp = readl(start_proc[port].global_control_reg); 337 temp = readl(start_proc[port].global_control_reg);
338 temp = 338 temp =
339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; 339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
340 writel(temp, start_proc[port].global_control_reg); 340 writel(temp, start_proc[port].global_control_reg);
341 341
342 /* write flush */ 342 /* write flush */
@@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port)
509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n"); 509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
510 } 510 }
511 511
512 if (new_page != NULL) 512 if (new_page != NULL)
513 pci_free_consistent(dev, 4096, new_page, temp_pci); 513 pci_free_consistent(dev, 4096, new_page, temp_pci);
514} 514}
515 515
516static int startup(struct icom_port *icom_port) 516static int startup(struct icom_port *icom_port)
@@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev,
1493 const struct pci_device_id *ent) 1493 const struct pci_device_id *ent)
1494{ 1494{
1495 int index; 1495 int index;
1496 unsigned int command_reg; 1496 unsigned int command_reg;
1497 int retval; 1497 int retval;
1498 struct icom_adapter *icom_adapter; 1498 struct icom_adapter *icom_adapter;
1499 struct icom_port *icom_port; 1499 struct icom_port *icom_port;
1500 1500
1501 retval = pci_enable_device(dev); 1501 retval = pci_enable_device(dev);
1502 if (retval) { 1502 if (retval) {
1503 dev_err(&dev->dev, "Device enable FAILED\n"); 1503 dev_err(&dev->dev, "Device enable FAILED\n");
1504 return retval; 1504 return retval;
1505 } 1505 }
1506 1506
1507 if ( (retval = pci_request_regions(dev, "icom"))) { 1507 if ( (retval = pci_request_regions(dev, "icom"))) {
@@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 return retval; 1510 return retval;
1511 } 1511 }
1512 1512
1513 pci_set_master(dev); 1513 pci_set_master(dev);
1514 1514
1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { 1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
1516 dev_err(&dev->dev, "PCI Config read FAILED\n"); 1516 dev_err(&dev->dev, "PCI Config read FAILED\n");
1517 return retval; 1517 return retval;
1518 } 1518 }
1519 1519
1520 pci_write_config_dword(dev, PCI_COMMAND, 1520 pci_write_config_dword(dev, PCI_COMMAND,
1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER 1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1523 1523
1524 if (ent->driver_data == ADAPTER_V1) { 1524 if (ent->driver_data == ADAPTER_V1) {
1525 pci_write_config_dword(dev, 0x44, 0x8300830A); 1525 pci_write_config_dword(dev, 0x44, 0x8300830A);
1526 } else { 1526 } else {
1527 pci_write_config_dword(dev, 0x44, 0x42004200); 1527 pci_write_config_dword(dev, 0x44, 0x42004200);
1528 pci_write_config_dword(dev, 0x48, 0x42004200); 1528 pci_write_config_dword(dev, 0x48, 0x42004200);
1529 } 1529 }
1530 1530
1531 1531
1532 retval = icom_alloc_adapter(&icom_adapter); 1532 retval = icom_alloc_adapter(&icom_adapter);
@@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev,
1536 goto probe_exit0; 1536 goto probe_exit0;
1537 } 1537 }
1538 1538
1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0); 1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
1540 icom_adapter->pci_dev = dev; 1540 icom_adapter->pci_dev = dev;
1541 icom_adapter->version = ent->driver_data; 1541 icom_adapter->version = ent->driver_data;
1542 icom_adapter->subsystem_id = ent->subdevice; 1542 icom_adapter->subsystem_id = ent->subdevice;
1543 1543
1544 1544
1545 retval = icom_init_ports(icom_adapter); 1545 retval = icom_init_ports(icom_adapter);
@@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1548 goto probe_exit1; 1548 goto probe_exit1;
1549 } 1549 }
1550 1550
1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0); 1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
1552 1552
1553 if (!icom_adapter->base_addr) 1553 if (!icom_adapter->base_addr)
1554 goto probe_exit1; 1554 goto probe_exit1;
@@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1562 1562
1563 retval = icom_load_ports(icom_adapter); 1563 retval = icom_load_ports(icom_adapter);
1564 1564
1565 for (index = 0; index < icom_adapter->numb_ports; index++) { 1565 for (index = 0; index < icom_adapter->numb_ports; index++) {
1566 icom_port = &icom_adapter->port_info[index]; 1566 icom_port = &icom_adapter->port_info[index];
1567 1567
1568 if (icom_port->status == ICOM_PORT_ACTIVE) { 1568 if (icom_port->status == ICOM_PORT_ACTIVE) {
@@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1579 icom_port->status = ICOM_PORT_OFF; 1579 icom_port->status = ICOM_PORT_OFF;
1580 dev_err(&dev->dev, "Device add failed\n"); 1580 dev_err(&dev->dev, "Device add failed\n");
1581 } else 1581 } else
1582 dev_info(&dev->dev, "Device added\n"); 1582 dev_info(&dev->dev, "Device added\n");
1583 } 1583 }
1584 } 1584 }
1585 1585
@@ -1595,9 +1595,7 @@ probe_exit0:
1595 pci_release_regions(dev); 1595 pci_release_regions(dev);
1596 pci_disable_device(dev); 1596 pci_disable_device(dev);
1597 1597
1598 return retval; 1598 return retval;
1599
1600
1601} 1599}
1602 1600
1603static void __devexit icom_remove(struct pci_dev *dev) 1601static void __devexit icom_remove(struct pci_dev *dev)
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index d7bcd074d383..7ce9e9f567a3 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -705,7 +705,7 @@ mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
705 return -EINVAL; 705 return -EINVAL;
706 706
707 if ((ser->irq != port->irq) || 707 if ((ser->irq != port->irq) ||
708 (ser->io_type != SERIAL_IO_MEM) || 708 (ser->io_type != UPIO_MEM) ||
709 (ser->baud_base != port->uartclk) || 709 (ser->baud_base != port->uartclk) ||
710 (ser->iomem_base != (void *)port->mapbase) || 710 (ser->iomem_base != (void *)port->mapbase) ||
711 (ser->hub6 != 0)) 711 (ser->hub6 != 0))
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 6443b7ff274a..b8629d74f6a2 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -726,9 +726,10 @@ static struct uart_driver serial_pxa_reg = {
726 .cons = PXA_CONSOLE, 726 .cons = PXA_CONSOLE,
727}; 727};
728 728
729static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state) 729#ifdef CONFIG_PM
730static int serial_pxa_suspend(struct device *dev)
730{ 731{
731 struct uart_pxa_port *sport = platform_get_drvdata(dev); 732 struct uart_pxa_port *sport = dev_get_drvdata(dev);
732 733
733 if (sport) 734 if (sport)
734 uart_suspend_port(&serial_pxa_reg, &sport->port); 735 uart_suspend_port(&serial_pxa_reg, &sport->port);
@@ -736,9 +737,9 @@ static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state)
736 return 0; 737 return 0;
737} 738}
738 739
739static int serial_pxa_resume(struct platform_device *dev) 740static int serial_pxa_resume(struct device *dev)
740{ 741{
741 struct uart_pxa_port *sport = platform_get_drvdata(dev); 742 struct uart_pxa_port *sport = dev_get_drvdata(dev);
742 743
743 if (sport) 744 if (sport)
744 uart_resume_port(&serial_pxa_reg, &sport->port); 745 uart_resume_port(&serial_pxa_reg, &sport->port);
@@ -746,6 +747,12 @@ static int serial_pxa_resume(struct platform_device *dev)
746 return 0; 747 return 0;
747} 748}
748 749
750static struct dev_pm_ops serial_pxa_pm_ops = {
751 .suspend = serial_pxa_suspend,
752 .resume = serial_pxa_resume,
753};
754#endif
755
749static int serial_pxa_probe(struct platform_device *dev) 756static int serial_pxa_probe(struct platform_device *dev)
750{ 757{
751 struct uart_pxa_port *sport; 758 struct uart_pxa_port *sport;
@@ -825,11 +832,12 @@ static struct platform_driver serial_pxa_driver = {
825 .probe = serial_pxa_probe, 832 .probe = serial_pxa_probe,
826 .remove = serial_pxa_remove, 833 .remove = serial_pxa_remove,
827 834
828 .suspend = serial_pxa_suspend,
829 .resume = serial_pxa_resume,
830 .driver = { 835 .driver = {
831 .name = "pxa2xx-uart", 836 .name = "pxa2xx-uart",
832 .owner = THIS_MODULE, 837 .owner = THIS_MODULE,
838#ifdef CONFIG_PM
839 .pm = &serial_pxa_pm_ops,
840#endif
833 }, 841 },
834}; 842};
835 843
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 7f5e26873220..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -638,7 +638,7 @@ static void __init sa1100_init_ports(void)
638 PPSR |= PPC_TXD1 | PPC_TXD3; 638 PPSR |= PPC_TXD1 | PPC_TXD3;
639} 639}
640 640
641void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns) 641void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
642{ 642{
643 if (fns->get_mctrl) 643 if (fns->get_mctrl)
644 sa1100_pops.get_mctrl = fns->get_mctrl; 644 sa1100_pops.get_mctrl = fns->get_mctrl;
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 1689bda1d13b..dcc72444e8e7 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1270,6 +1270,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1270 1270
1271 BUG_ON(!kernel_locked()); 1271 BUG_ON(!kernel_locked());
1272 1272
1273 if (!state)
1274 return;
1275
1273 uport = state->uart_port; 1276 uport = state->uart_port;
1274 port = &state->port; 1277 port = &state->port;
1275 1278
@@ -1316,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1316 */ 1319 */
1317 if (port->flags & ASYNC_INITIALIZED) { 1320 if (port->flags & ASYNC_INITIALIZED) {
1318 unsigned long flags; 1321 unsigned long flags;
1319 spin_lock_irqsave(&port->lock, flags); 1322 spin_lock_irqsave(&uport->lock, flags);
1320 uport->ops->stop_rx(uport); 1323 uport->ops->stop_rx(uport);
1321 spin_unlock_irqrestore(&port->lock, flags); 1324 spin_unlock_irqrestore(&uport->lock, flags);
1322 /* 1325 /*
1323 * Before we drop DTR, make sure the UART transmitter 1326 * Before we drop DTR, make sure the UART transmitter
1324 * has completely drained; this is especially 1327 * has completely drained; this is especially
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 0f7cf4c453e6..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); 221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
222} 222}
223 223
224static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
225{
226 return container_of(port, struct uart_txx9_port, port);
227}
228
224static void serial_txx9_stop_tx(struct uart_port *port) 229static void serial_txx9_stop_tx(struct uart_port *port)
225{ 230{
226 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 231 struct uart_txx9_port *up = to_uart_txx9_port(port);
227 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 232 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
228} 233}
229 234
230static void serial_txx9_start_tx(struct uart_port *port) 235static void serial_txx9_start_tx(struct uart_port *port)
231{ 236{
232 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 237 struct uart_txx9_port *up = to_uart_txx9_port(port);
233 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 238 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
234} 239}
235 240
236static void serial_txx9_stop_rx(struct uart_port *port) 241static void serial_txx9_stop_rx(struct uart_port *port)
237{ 242{
238 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 243 struct uart_txx9_port *up = to_uart_txx9_port(port);
239 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; 244 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
240} 245}
241 246
@@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port)
246 251
247static void serial_txx9_initialize(struct uart_port *port) 252static void serial_txx9_initialize(struct uart_port *port)
248{ 253{
249 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 254 struct uart_txx9_port *up = to_uart_txx9_port(port);
250 unsigned int tmout = 10000; 255 unsigned int tmout = 10000;
251 256
252 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); 257 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
@@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
414 419
415static unsigned int serial_txx9_tx_empty(struct uart_port *port) 420static unsigned int serial_txx9_tx_empty(struct uart_port *port)
416{ 421{
417 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 422 struct uart_txx9_port *up = to_uart_txx9_port(port);
418 unsigned long flags; 423 unsigned long flags;
419 unsigned int ret; 424 unsigned int ret;
420 425
@@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port)
427 432
428static unsigned int serial_txx9_get_mctrl(struct uart_port *port) 433static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
429{ 434{
430 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 435 struct uart_txx9_port *up = to_uart_txx9_port(port);
431 unsigned int ret; 436 unsigned int ret;
432 437
433 /* no modem control lines */ 438 /* no modem control lines */
@@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
440 445
441static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) 446static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
442{ 447{
443 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 448 struct uart_txx9_port *up = to_uart_txx9_port(port);
444 449
445 if (mctrl & TIOCM_RTS) 450 if (mctrl & TIOCM_RTS)
446 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); 451 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
@@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
450 455
451static void serial_txx9_break_ctl(struct uart_port *port, int break_state) 456static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
452{ 457{
453 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 458 struct uart_txx9_port *up = to_uart_txx9_port(port);
454 unsigned long flags; 459 unsigned long flags;
455 460
456 spin_lock_irqsave(&up->port.lock, flags); 461 spin_lock_irqsave(&up->port.lock, flags);
@@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
494{ 499{
495 unsigned int ier; 500 unsigned int ier;
496 unsigned char c; 501 unsigned char c;
497 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 502 struct uart_txx9_port *up = to_uart_txx9_port(port);
498 503
499 /* 504 /*
500 * First save the IER then disable the interrupts 505 * First save the IER then disable the interrupts
@@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
520static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) 525static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
521{ 526{
522 unsigned int ier; 527 unsigned int ier;
523 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 528 struct uart_txx9_port *up = to_uart_txx9_port(port);
524 529
525 /* 530 /*
526 * First save the IER then disable the interrupts 531 * First save the IER then disable the interrupts
@@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
551 556
552static int serial_txx9_startup(struct uart_port *port) 557static int serial_txx9_startup(struct uart_port *port)
553{ 558{
554 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 559 struct uart_txx9_port *up = to_uart_txx9_port(port);
555 unsigned long flags; 560 unsigned long flags;
556 int retval; 561 int retval;
557 562
@@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port)
596 601
597static void serial_txx9_shutdown(struct uart_port *port) 602static void serial_txx9_shutdown(struct uart_port *port)
598{ 603{
599 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 604 struct uart_txx9_port *up = to_uart_txx9_port(port);
600 unsigned long flags; 605 unsigned long flags;
601 606
602 /* 607 /*
@@ -636,7 +641,7 @@ static void
636serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, 641serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
637 struct ktermios *old) 642 struct ktermios *old)
638{ 643{
639 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 644 struct uart_txx9_port *up = to_uart_txx9_port(port);
640 unsigned int cval, fcr = 0; 645 unsigned int cval, fcr = 0;
641 unsigned long flags; 646 unsigned long flags;
642 unsigned int baud, quot; 647 unsigned int baud, quot;
@@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up)
814 819
815static void serial_txx9_release_port(struct uart_port *port) 820static void serial_txx9_release_port(struct uart_port *port)
816{ 821{
817 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 822 struct uart_txx9_port *up = to_uart_txx9_port(port);
818 serial_txx9_release_resource(up); 823 serial_txx9_release_resource(up);
819} 824}
820 825
821static int serial_txx9_request_port(struct uart_port *port) 826static int serial_txx9_request_port(struct uart_port *port)
822{ 827{
823 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 828 struct uart_txx9_port *up = to_uart_txx9_port(port);
824 return serial_txx9_request_resource(up); 829 return serial_txx9_request_resource(up);
825} 830}
826 831
827static void serial_txx9_config_port(struct uart_port *port, int uflags) 832static void serial_txx9_config_port(struct uart_port *port, int uflags)
828{ 833{
829 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 834 struct uart_txx9_port *up = to_uart_txx9_port(port);
830 int ret; 835 int ret;
831 836
832 /* 837 /*
@@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
897 902
898static void serial_txx9_console_putchar(struct uart_port *port, int ch) 903static void serial_txx9_console_putchar(struct uart_port *port, int ch)
899{ 904{
900 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 905 struct uart_txx9_port *up = to_uart_txx9_port(port);
901 906
902 wait_for_xmitr(up); 907 wait_for_xmitr(up);
903 sio_out(up, TXX9_SITFIFO, ch); 908 sio_out(up, TXX9_SITFIFO, ch);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 85119fb7cb50..6498bd1fb6dd 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1143,7 +1143,7 @@ static void serial_console_write(struct console *co, const char *s,
1143 while ((sci_in(port, SCxSR) & bits) != bits) 1143 while ((sci_in(port, SCxSR) & bits) != bits)
1144 cpu_relax(); 1144 cpu_relax();
1145 1145
1146 if (sci_port->disable); 1146 if (sci_port->disable)
1147 sci_port->disable(port); 1147 sci_port->disable(port);
1148} 1148}
1149 1149
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index d3b496800477..b204a0929139 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly;
90 */ 90 */
91static u32 sfi_use_ioremap __read_mostly; 91static u32 sfi_use_ioremap __read_mostly;
92 92
93static void __iomem *sfi_map_memory(u64 phys, u32 size) 93/*
94 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
95 * and introduces section mismatch. So use __ref to make it calm.
96 */
97static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
94{ 98{
95 if (!phys || !size) 99 if (!phys || !size)
96 return NULL; 100 return NULL;
@@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size)
101 return early_ioremap(phys, size); 105 return early_ioremap(phys, size);
102} 106}
103 107
104static void sfi_unmap_memory(void __iomem *virt, u32 size) 108static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
105{ 109{
106 if (!virt || !size) 110 if (!virt || !size)
107 return; 111 return;
@@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa,
125 * sfi_verify_table() 129 * sfi_verify_table()
126 * Sanity check table lengh, calculate checksum 130 * Sanity check table lengh, calculate checksum
127 */ 131 */
128static __init int sfi_verify_table(struct sfi_table_header *table) 132static int sfi_verify_table(struct sfi_table_header *table)
129{ 133{
130 134
131 u8 checksum = 0; 135 u8 checksum = 0;
@@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th,
213 * the mapped virt address will be returned, and the virt space 217 * the mapped virt address will be returned, and the virt space
214 * will be released by call sfi_put_table() later 218 * will be released by call sfi_put_table() later
215 * 219 *
220 * This two cases are from two different functions with two different
221 * sections and causes section mismatch warning. So use __ref to tell
222 * modpost not to make any noise.
223 *
216 * Return value: 224 * Return value:
217 * NULL: when can't find a table matching the key 225 * NULL: when can't find a table matching the key
218 * ERR_PTR(error): error value 226 * ERR_PTR(error): error value
219 * virt table address: when a matched table is found 227 * virt table address: when a matched table is found
220 */ 228 */
221struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) 229struct sfi_table_header *
230 __ref sfi_check_table(u64 pa, struct sfi_table_key *key)
222{ 231{
223 struct sfi_table_header *th; 232 struct sfi_table_header *th;
224 void *ret = NULL; 233 void *ret = NULL;
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6d7a3f82c54b..21a118269cac 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += mxc_spi.o 20obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index c0f950a7cbec..ff5bbb9c43c9 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -532,7 +532,7 @@ static void restore_state(struct pl022 *pl022)
532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ 533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
535 GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ 535 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ 537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ 538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
@@ -1247,8 +1247,8 @@ static int verify_controller_parameters(struct pl022 *pl022,
1247 return -EINVAL; 1247 return -EINVAL;
1248 } 1248 }
1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { 1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1250 if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) 1250 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1251 && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { 1251 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
1252 dev_err(chip_info->dev, 1252 dev_err(chip_info->dev,
1253 "Clock Phase is configured incorrectly\n"); 1253 "Clock Phase is configured incorrectly\n");
1254 return -EINVAL; 1254 return -EINVAL;
@@ -1485,7 +1485,7 @@ static int pl022_setup(struct spi_device *spi)
1485 chip_info->data_size = SSP_DATA_BITS_12; 1485 chip_info->data_size = SSP_DATA_BITS_12;
1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; 1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; 1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1488 chip_info->clk_phase = SSP_CLK_FALLING_EDGE; 1488 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
1489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; 1489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1490 chip_info->ctrl_len = SSP_BITS_8; 1490 chip_info->ctrl_len = SSP_BITS_8;
1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; 1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
@@ -1826,7 +1826,7 @@ static struct amba_id pl022_ids[] = {
1826 * ST Micro derivative, this has 32bit wide 1826 * ST Micro derivative, this has 32bit wide
1827 * and 32 locations deep TX/RX FIFO 1827 * and 32 locations deep TX/RX FIFO
1828 */ 1828 */
1829 .id = 0x00108022, 1829 .id = 0x01080022,
1830 .mask = 0xffffffff, 1830 .mask = 0xffffffff,
1831 .data = &vendor_st, 1831 .data = &vendor_st,
1832 }, 1832 },
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 31dd56f0dae9..c8c2b693ffac 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1668,10 +1668,9 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1668} 1668}
1669 1669
1670#ifdef CONFIG_PM 1670#ifdef CONFIG_PM
1671 1671static int pxa2xx_spi_suspend(struct device *dev)
1672static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1673{ 1672{
1674 struct driver_data *drv_data = platform_get_drvdata(pdev); 1673 struct driver_data *drv_data = dev_get_drvdata(dev);
1675 struct ssp_device *ssp = drv_data->ssp; 1674 struct ssp_device *ssp = drv_data->ssp;
1676 int status = 0; 1675 int status = 0;
1677 1676
@@ -1684,9 +1683,9 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1684 return 0; 1683 return 0;
1685} 1684}
1686 1685
1687static int pxa2xx_spi_resume(struct platform_device *pdev) 1686static int pxa2xx_spi_resume(struct device *dev)
1688{ 1687{
1689 struct driver_data *drv_data = platform_get_drvdata(pdev); 1688 struct driver_data *drv_data = dev_get_drvdata(dev);
1690 struct ssp_device *ssp = drv_data->ssp; 1689 struct ssp_device *ssp = drv_data->ssp;
1691 int status = 0; 1690 int status = 0;
1692 1691
@@ -1703,26 +1702,29 @@ static int pxa2xx_spi_resume(struct platform_device *pdev)
1703 /* Start the queue running */ 1702 /* Start the queue running */
1704 status = start_queue(drv_data); 1703 status = start_queue(drv_data);
1705 if (status != 0) { 1704 if (status != 0) {
1706 dev_err(&pdev->dev, "problem starting queue (%d)\n", status); 1705 dev_err(dev, "problem starting queue (%d)\n", status);
1707 return status; 1706 return status;
1708 } 1707 }
1709 1708
1710 return 0; 1709 return 0;
1711} 1710}
1712#else 1711
1713#define pxa2xx_spi_suspend NULL 1712static struct dev_pm_ops pxa2xx_spi_pm_ops = {
1714#define pxa2xx_spi_resume NULL 1713 .suspend = pxa2xx_spi_suspend,
1715#endif /* CONFIG_PM */ 1714 .resume = pxa2xx_spi_resume,
1715};
1716#endif
1716 1717
1717static struct platform_driver driver = { 1718static struct platform_driver driver = {
1718 .driver = { 1719 .driver = {
1719 .name = "pxa2xx-spi", 1720 .name = "pxa2xx-spi",
1720 .owner = THIS_MODULE, 1721 .owner = THIS_MODULE,
1722#ifdef CONFIG_PM
1723 .pm = &pxa2xx_spi_pm_ops,
1724#endif
1721 }, 1725 },
1722 .remove = pxa2xx_spi_remove, 1726 .remove = pxa2xx_spi_remove,
1723 .shutdown = pxa2xx_spi_shutdown, 1727 .shutdown = pxa2xx_spi_shutdown,
1724 .suspend = pxa2xx_spi_suspend,
1725 .resume = pxa2xx_spi_resume,
1726}; 1728};
1727 1729
1728static int __init pxa2xx_spi_init(void) 1730static int __init pxa2xx_spi_init(void)
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/spi_imx.c
index b1447236ae81..89c22efedfb0 100644
--- a/drivers/spi/mxc_spi.c
+++ b/drivers/spi/spi_imx.c
@@ -48,14 +48,14 @@
48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
50 50
51struct mxc_spi_config { 51struct spi_imx_config {
52 unsigned int speed_hz; 52 unsigned int speed_hz;
53 unsigned int bpw; 53 unsigned int bpw;
54 unsigned int mode; 54 unsigned int mode;
55 int cs; 55 int cs;
56}; 56};
57 57
58struct mxc_spi_data { 58struct spi_imx_data {
59 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
60 60
61 struct completion xfer_done; 61 struct completion xfer_done;
@@ -66,43 +66,43 @@ struct mxc_spi_data {
66 int *chipselect; 66 int *chipselect;
67 67
68 unsigned int count; 68 unsigned int count;
69 void (*tx)(struct mxc_spi_data *); 69 void (*tx)(struct spi_imx_data *);
70 void (*rx)(struct mxc_spi_data *); 70 void (*rx)(struct spi_imx_data *);
71 void *rx_buf; 71 void *rx_buf;
72 const void *tx_buf; 72 const void *tx_buf;
73 unsigned int txfifo; /* number of words pushed in tx FIFO */ 73 unsigned int txfifo; /* number of words pushed in tx FIFO */
74 74
75 /* SoC specific functions */ 75 /* SoC specific functions */
76 void (*intctrl)(struct mxc_spi_data *, int); 76 void (*intctrl)(struct spi_imx_data *, int);
77 int (*config)(struct mxc_spi_data *, struct mxc_spi_config *); 77 int (*config)(struct spi_imx_data *, struct spi_imx_config *);
78 void (*trigger)(struct mxc_spi_data *); 78 void (*trigger)(struct spi_imx_data *);
79 int (*rx_available)(struct mxc_spi_data *); 79 int (*rx_available)(struct spi_imx_data *);
80}; 80};
81 81
82#define MXC_SPI_BUF_RX(type) \ 82#define MXC_SPI_BUF_RX(type) \
83static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \ 83static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
84{ \ 84{ \
85 unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \ 85 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
86 \ 86 \
87 if (mxc_spi->rx_buf) { \ 87 if (spi_imx->rx_buf) { \
88 *(type *)mxc_spi->rx_buf = val; \ 88 *(type *)spi_imx->rx_buf = val; \
89 mxc_spi->rx_buf += sizeof(type); \ 89 spi_imx->rx_buf += sizeof(type); \
90 } \ 90 } \
91} 91}
92 92
93#define MXC_SPI_BUF_TX(type) \ 93#define MXC_SPI_BUF_TX(type) \
94static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \ 94static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
95{ \ 95{ \
96 type val = 0; \ 96 type val = 0; \
97 \ 97 \
98 if (mxc_spi->tx_buf) { \ 98 if (spi_imx->tx_buf) { \
99 val = *(type *)mxc_spi->tx_buf; \ 99 val = *(type *)spi_imx->tx_buf; \
100 mxc_spi->tx_buf += sizeof(type); \ 100 spi_imx->tx_buf += sizeof(type); \
101 } \ 101 } \
102 \ 102 \
103 mxc_spi->count -= sizeof(type); \ 103 spi_imx->count -= sizeof(type); \
104 \ 104 \
105 writel(val, mxc_spi->base + MXC_CSPITXDATA); \ 105 writel(val, spi_imx->base + MXC_CSPITXDATA); \
106} 106}
107 107
108MXC_SPI_BUF_RX(u8) 108MXC_SPI_BUF_RX(u8)
@@ -119,7 +119,7 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
119 256, 384, 512, 768, 1024}; 119 256, 384, 512, 768, 1024};
120 120
121/* MX21, MX27 */ 121/* MX21, MX27 */
122static unsigned int mxc_spi_clkdiv_1(unsigned int fin, 122static unsigned int spi_imx_clkdiv_1(unsigned int fin,
123 unsigned int fspi) 123 unsigned int fspi)
124{ 124{
125 int i, max; 125 int i, max;
@@ -137,7 +137,7 @@ static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
137} 137}
138 138
139/* MX1, MX31, MX35 */ 139/* MX1, MX31, MX35 */
140static unsigned int mxc_spi_clkdiv_2(unsigned int fin, 140static unsigned int spi_imx_clkdiv_2(unsigned int fin,
141 unsigned int fspi) 141 unsigned int fspi)
142{ 142{
143 int i, div = 4; 143 int i, div = 4;
@@ -174,7 +174,7 @@ static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
174 * the i.MX35 has a slightly different register layout for bits 174 * the i.MX35 has a slightly different register layout for bits
175 * we do not use here. 175 * we do not use here.
176 */ 176 */
177static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) 177static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
178{ 178{
179 unsigned int val = 0; 179 unsigned int val = 0;
180 180
@@ -183,24 +183,24 @@ static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
183 if (enable & MXC_INT_RR) 183 if (enable & MXC_INT_RR)
184 val |= MX31_INTREG_RREN; 184 val |= MX31_INTREG_RREN;
185 185
186 writel(val, mxc_spi->base + MXC_CSPIINT); 186 writel(val, spi_imx->base + MXC_CSPIINT);
187} 187}
188 188
189static void mx31_trigger(struct mxc_spi_data *mxc_spi) 189static void mx31_trigger(struct spi_imx_data *spi_imx)
190{ 190{
191 unsigned int reg; 191 unsigned int reg;
192 192
193 reg = readl(mxc_spi->base + MXC_CSPICTRL); 193 reg = readl(spi_imx->base + MXC_CSPICTRL);
194 reg |= MX31_CSPICTRL_XCH; 194 reg |= MX31_CSPICTRL_XCH;
195 writel(reg, mxc_spi->base + MXC_CSPICTRL); 195 writel(reg, spi_imx->base + MXC_CSPICTRL);
196} 196}
197 197
198static int mx31_config(struct mxc_spi_data *mxc_spi, 198static int mx31_config(struct spi_imx_data *spi_imx,
199 struct mxc_spi_config *config) 199 struct spi_imx_config *config)
200{ 200{
201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
202 202
203 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 203 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
204 MX31_CSPICTRL_DR_SHIFT; 204 MX31_CSPICTRL_DR_SHIFT;
205 205
206 if (cpu_is_mx31()) 206 if (cpu_is_mx31())
@@ -223,14 +223,14 @@ static int mx31_config(struct mxc_spi_data *mxc_spi,
223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; 223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
224 } 224 }
225 225
226 writel(reg, mxc_spi->base + MXC_CSPICTRL); 226 writel(reg, spi_imx->base + MXC_CSPICTRL);
227 227
228 return 0; 228 return 0;
229} 229}
230 230
231static int mx31_rx_available(struct mxc_spi_data *mxc_spi) 231static int mx31_rx_available(struct spi_imx_data *spi_imx)
232{ 232{
233 return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 233 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
234} 234}
235 235
236#define MX27_INTREG_RR (1 << 4) 236#define MX27_INTREG_RR (1 << 4)
@@ -246,7 +246,7 @@ static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
246#define MX27_CSPICTRL_DR_SHIFT 14 246#define MX27_CSPICTRL_DR_SHIFT 14
247#define MX27_CSPICTRL_CS_SHIFT 19 247#define MX27_CSPICTRL_CS_SHIFT 19
248 248
249static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) 249static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
250{ 250{
251 unsigned int val = 0; 251 unsigned int val = 0;
252 252
@@ -255,24 +255,24 @@ static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
255 if (enable & MXC_INT_RR) 255 if (enable & MXC_INT_RR)
256 val |= MX27_INTREG_RREN; 256 val |= MX27_INTREG_RREN;
257 257
258 writel(val, mxc_spi->base + MXC_CSPIINT); 258 writel(val, spi_imx->base + MXC_CSPIINT);
259} 259}
260 260
261static void mx27_trigger(struct mxc_spi_data *mxc_spi) 261static void mx27_trigger(struct spi_imx_data *spi_imx)
262{ 262{
263 unsigned int reg; 263 unsigned int reg;
264 264
265 reg = readl(mxc_spi->base + MXC_CSPICTRL); 265 reg = readl(spi_imx->base + MXC_CSPICTRL);
266 reg |= MX27_CSPICTRL_XCH; 266 reg |= MX27_CSPICTRL_XCH;
267 writel(reg, mxc_spi->base + MXC_CSPICTRL); 267 writel(reg, spi_imx->base + MXC_CSPICTRL);
268} 268}
269 269
270static int mx27_config(struct mxc_spi_data *mxc_spi, 270static int mx27_config(struct spi_imx_data *spi_imx,
271 struct mxc_spi_config *config) 271 struct spi_imx_config *config)
272{ 272{
273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; 273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
274 274
275 reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) << 275 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
276 MX27_CSPICTRL_DR_SHIFT; 276 MX27_CSPICTRL_DR_SHIFT;
277 reg |= config->bpw - 1; 277 reg |= config->bpw - 1;
278 278
@@ -285,14 +285,14 @@ static int mx27_config(struct mxc_spi_data *mxc_spi,
285 if (config->cs < 0) 285 if (config->cs < 0)
286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; 286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
287 287
288 writel(reg, mxc_spi->base + MXC_CSPICTRL); 288 writel(reg, spi_imx->base + MXC_CSPICTRL);
289 289
290 return 0; 290 return 0;
291} 291}
292 292
293static int mx27_rx_available(struct mxc_spi_data *mxc_spi) 293static int mx27_rx_available(struct spi_imx_data *spi_imx)
294{ 294{
295 return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR; 295 return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
296} 296}
297 297
298#define MX1_INTREG_RR (1 << 3) 298#define MX1_INTREG_RR (1 << 3)
@@ -306,7 +306,7 @@ static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
306#define MX1_CSPICTRL_MASTER (1 << 10) 306#define MX1_CSPICTRL_MASTER (1 << 10)
307#define MX1_CSPICTRL_DR_SHIFT 13 307#define MX1_CSPICTRL_DR_SHIFT 13
308 308
309static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) 309static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
310{ 310{
311 unsigned int val = 0; 311 unsigned int val = 0;
312 312
@@ -315,24 +315,24 @@ static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
315 if (enable & MXC_INT_RR) 315 if (enable & MXC_INT_RR)
316 val |= MX1_INTREG_RREN; 316 val |= MX1_INTREG_RREN;
317 317
318 writel(val, mxc_spi->base + MXC_CSPIINT); 318 writel(val, spi_imx->base + MXC_CSPIINT);
319} 319}
320 320
321static void mx1_trigger(struct mxc_spi_data *mxc_spi) 321static void mx1_trigger(struct spi_imx_data *spi_imx)
322{ 322{
323 unsigned int reg; 323 unsigned int reg;
324 324
325 reg = readl(mxc_spi->base + MXC_CSPICTRL); 325 reg = readl(spi_imx->base + MXC_CSPICTRL);
326 reg |= MX1_CSPICTRL_XCH; 326 reg |= MX1_CSPICTRL_XCH;
327 writel(reg, mxc_spi->base + MXC_CSPICTRL); 327 writel(reg, spi_imx->base + MXC_CSPICTRL);
328} 328}
329 329
330static int mx1_config(struct mxc_spi_data *mxc_spi, 330static int mx1_config(struct spi_imx_data *spi_imx,
331 struct mxc_spi_config *config) 331 struct spi_imx_config *config)
332{ 332{
333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
334 334
335 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 335 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
336 MX1_CSPICTRL_DR_SHIFT; 336 MX1_CSPICTRL_DR_SHIFT;
337 reg |= config->bpw - 1; 337 reg |= config->bpw - 1;
338 338
@@ -341,156 +341,151 @@ static int mx1_config(struct mxc_spi_data *mxc_spi,
341 if (config->mode & SPI_CPOL) 341 if (config->mode & SPI_CPOL)
342 reg |= MX1_CSPICTRL_POL; 342 reg |= MX1_CSPICTRL_POL;
343 343
344 writel(reg, mxc_spi->base + MXC_CSPICTRL); 344 writel(reg, spi_imx->base + MXC_CSPICTRL);
345 345
346 return 0; 346 return 0;
347} 347}
348 348
349static int mx1_rx_available(struct mxc_spi_data *mxc_spi) 349static int mx1_rx_available(struct spi_imx_data *spi_imx)
350{ 350{
351 return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR; 351 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
352} 352}
353 353
354static void mxc_spi_chipselect(struct spi_device *spi, int is_active) 354static void spi_imx_chipselect(struct spi_device *spi, int is_active)
355{ 355{
356 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 356 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
357 unsigned int cs = 0; 357 int gpio = spi_imx->chipselect[spi->chip_select];
358 int gpio = mxc_spi->chipselect[spi->chip_select]; 358 int active = is_active != BITBANG_CS_INACTIVE;
359 struct mxc_spi_config config; 359 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
360 360
361 if (spi->mode & SPI_CS_HIGH) 361 if (gpio < 0)
362 cs = 1;
363
364 if (is_active == BITBANG_CS_INACTIVE) {
365 if (gpio >= 0)
366 gpio_set_value(gpio, !cs);
367 return; 362 return;
368 }
369
370 config.bpw = spi->bits_per_word;
371 config.speed_hz = spi->max_speed_hz;
372 config.mode = spi->mode;
373 config.cs = mxc_spi->chipselect[spi->chip_select];
374
375 mxc_spi->config(mxc_spi, &config);
376
377 /* Initialize the functions for transfer */
378 if (config.bpw <= 8) {
379 mxc_spi->rx = mxc_spi_buf_rx_u8;
380 mxc_spi->tx = mxc_spi_buf_tx_u8;
381 } else if (config.bpw <= 16) {
382 mxc_spi->rx = mxc_spi_buf_rx_u16;
383 mxc_spi->tx = mxc_spi_buf_tx_u16;
384 } else if (config.bpw <= 32) {
385 mxc_spi->rx = mxc_spi_buf_rx_u32;
386 mxc_spi->tx = mxc_spi_buf_tx_u32;
387 } else
388 BUG();
389 363
390 if (gpio >= 0) 364 gpio_set_value(gpio, dev_is_lowactive ^ active);
391 gpio_set_value(gpio, cs);
392
393 return;
394} 365}
395 366
396static void mxc_spi_push(struct mxc_spi_data *mxc_spi) 367static void spi_imx_push(struct spi_imx_data *spi_imx)
397{ 368{
398 while (mxc_spi->txfifo < 8) { 369 while (spi_imx->txfifo < 8) {
399 if (!mxc_spi->count) 370 if (!spi_imx->count)
400 break; 371 break;
401 mxc_spi->tx(mxc_spi); 372 spi_imx->tx(spi_imx);
402 mxc_spi->txfifo++; 373 spi_imx->txfifo++;
403 } 374 }
404 375
405 mxc_spi->trigger(mxc_spi); 376 spi_imx->trigger(spi_imx);
406} 377}
407 378
408static irqreturn_t mxc_spi_isr(int irq, void *dev_id) 379static irqreturn_t spi_imx_isr(int irq, void *dev_id)
409{ 380{
410 struct mxc_spi_data *mxc_spi = dev_id; 381 struct spi_imx_data *spi_imx = dev_id;
411 382
412 while (mxc_spi->rx_available(mxc_spi)) { 383 while (spi_imx->rx_available(spi_imx)) {
413 mxc_spi->rx(mxc_spi); 384 spi_imx->rx(spi_imx);
414 mxc_spi->txfifo--; 385 spi_imx->txfifo--;
415 } 386 }
416 387
417 if (mxc_spi->count) { 388 if (spi_imx->count) {
418 mxc_spi_push(mxc_spi); 389 spi_imx_push(spi_imx);
419 return IRQ_HANDLED; 390 return IRQ_HANDLED;
420 } 391 }
421 392
422 if (mxc_spi->txfifo) { 393 if (spi_imx->txfifo) {
423 /* No data left to push, but still waiting for rx data, 394 /* No data left to push, but still waiting for rx data,
424 * enable receive data available interrupt. 395 * enable receive data available interrupt.
425 */ 396 */
426 mxc_spi->intctrl(mxc_spi, MXC_INT_RR); 397 spi_imx->intctrl(spi_imx, MXC_INT_RR);
427 return IRQ_HANDLED; 398 return IRQ_HANDLED;
428 } 399 }
429 400
430 mxc_spi->intctrl(mxc_spi, 0); 401 spi_imx->intctrl(spi_imx, 0);
431 complete(&mxc_spi->xfer_done); 402 complete(&spi_imx->xfer_done);
432 403
433 return IRQ_HANDLED; 404 return IRQ_HANDLED;
434} 405}
435 406
436static int mxc_spi_setupxfer(struct spi_device *spi, 407static int spi_imx_setupxfer(struct spi_device *spi,
437 struct spi_transfer *t) 408 struct spi_transfer *t)
438{ 409{
439 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 410 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
440 struct mxc_spi_config config; 411 struct spi_imx_config config;
441 412
442 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 413 config.bpw = t ? t->bits_per_word : spi->bits_per_word;
443 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 414 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
444 config.mode = spi->mode; 415 config.mode = spi->mode;
416 config.cs = spi_imx->chipselect[spi->chip_select];
417
418 if (!config.speed_hz)
419 config.speed_hz = spi->max_speed_hz;
420 if (!config.bpw)
421 config.bpw = spi->bits_per_word;
422 if (!config.speed_hz)
423 config.speed_hz = spi->max_speed_hz;
424
425 /* Initialize the functions for transfer */
426 if (config.bpw <= 8) {
427 spi_imx->rx = spi_imx_buf_rx_u8;
428 spi_imx->tx = spi_imx_buf_tx_u8;
429 } else if (config.bpw <= 16) {
430 spi_imx->rx = spi_imx_buf_rx_u16;
431 spi_imx->tx = spi_imx_buf_tx_u16;
432 } else if (config.bpw <= 32) {
433 spi_imx->rx = spi_imx_buf_rx_u32;
434 spi_imx->tx = spi_imx_buf_tx_u32;
435 } else
436 BUG();
445 437
446 mxc_spi->config(mxc_spi, &config); 438 spi_imx->config(spi_imx, &config);
447 439
448 return 0; 440 return 0;
449} 441}
450 442
451static int mxc_spi_transfer(struct spi_device *spi, 443static int spi_imx_transfer(struct spi_device *spi,
452 struct spi_transfer *transfer) 444 struct spi_transfer *transfer)
453{ 445{
454 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 446 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
455 447
456 mxc_spi->tx_buf = transfer->tx_buf; 448 spi_imx->tx_buf = transfer->tx_buf;
457 mxc_spi->rx_buf = transfer->rx_buf; 449 spi_imx->rx_buf = transfer->rx_buf;
458 mxc_spi->count = transfer->len; 450 spi_imx->count = transfer->len;
459 mxc_spi->txfifo = 0; 451 spi_imx->txfifo = 0;
460 452
461 init_completion(&mxc_spi->xfer_done); 453 init_completion(&spi_imx->xfer_done);
462 454
463 mxc_spi_push(mxc_spi); 455 spi_imx_push(spi_imx);
464 456
465 mxc_spi->intctrl(mxc_spi, MXC_INT_TE); 457 spi_imx->intctrl(spi_imx, MXC_INT_TE);
466 458
467 wait_for_completion(&mxc_spi->xfer_done); 459 wait_for_completion(&spi_imx->xfer_done);
468 460
469 return transfer->len; 461 return transfer->len;
470} 462}
471 463
472static int mxc_spi_setup(struct spi_device *spi) 464static int spi_imx_setup(struct spi_device *spi)
473{ 465{
474 if (!spi->bits_per_word) 466 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
475 spi->bits_per_word = 8; 467 int gpio = spi_imx->chipselect[spi->chip_select];
476 468
477 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 469 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
478 spi->mode, spi->bits_per_word, spi->max_speed_hz); 470 spi->mode, spi->bits_per_word, spi->max_speed_hz);
479 471
480 mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE); 472 if (gpio >= 0)
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
474
475 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
481 476
482 return 0; 477 return 0;
483} 478}
484 479
485static void mxc_spi_cleanup(struct spi_device *spi) 480static void spi_imx_cleanup(struct spi_device *spi)
486{ 481{
487} 482}
488 483
489static int __init mxc_spi_probe(struct platform_device *pdev) 484static int __init spi_imx_probe(struct platform_device *pdev)
490{ 485{
491 struct spi_imx_master *mxc_platform_info; 486 struct spi_imx_master *mxc_platform_info;
492 struct spi_master *master; 487 struct spi_master *master;
493 struct mxc_spi_data *mxc_spi; 488 struct spi_imx_data *spi_imx;
494 struct resource *res; 489 struct resource *res;
495 int i, ret; 490 int i, ret;
496 491
@@ -500,7 +495,7 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
500 return -EINVAL; 495 return -EINVAL;
501 } 496 }
502 497
503 master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data)); 498 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
504 if (!master) 499 if (!master)
505 return -ENOMEM; 500 return -ENOMEM;
506 501
@@ -509,32 +504,32 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
509 master->bus_num = pdev->id; 504 master->bus_num = pdev->id;
510 master->num_chipselect = mxc_platform_info->num_chipselect; 505 master->num_chipselect = mxc_platform_info->num_chipselect;
511 506
512 mxc_spi = spi_master_get_devdata(master); 507 spi_imx = spi_master_get_devdata(master);
513 mxc_spi->bitbang.master = spi_master_get(master); 508 spi_imx->bitbang.master = spi_master_get(master);
514 mxc_spi->chipselect = mxc_platform_info->chipselect; 509 spi_imx->chipselect = mxc_platform_info->chipselect;
515 510
516 for (i = 0; i < master->num_chipselect; i++) { 511 for (i = 0; i < master->num_chipselect; i++) {
517 if (mxc_spi->chipselect[i] < 0) 512 if (spi_imx->chipselect[i] < 0)
518 continue; 513 continue;
519 ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME); 514 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
520 if (ret) { 515 if (ret) {
521 i--; 516 i--;
522 while (i > 0) 517 while (i > 0)
523 if (mxc_spi->chipselect[i] >= 0) 518 if (spi_imx->chipselect[i] >= 0)
524 gpio_free(mxc_spi->chipselect[i--]); 519 gpio_free(spi_imx->chipselect[i--]);
525 dev_err(&pdev->dev, "can't get cs gpios"); 520 dev_err(&pdev->dev, "can't get cs gpios");
526 goto out_master_put; 521 goto out_master_put;
527 } 522 }
528 gpio_direction_output(mxc_spi->chipselect[i], 1);
529 } 523 }
530 524
531 mxc_spi->bitbang.chipselect = mxc_spi_chipselect; 525 spi_imx->bitbang.chipselect = spi_imx_chipselect;
532 mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer; 526 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
533 mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer; 527 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
534 mxc_spi->bitbang.master->setup = mxc_spi_setup; 528 spi_imx->bitbang.master->setup = spi_imx_setup;
535 mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup; 529 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
530 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
536 531
537 init_completion(&mxc_spi->xfer_done); 532 init_completion(&spi_imx->xfer_done);
538 533
539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 if (!res) { 535 if (!res) {
@@ -549,58 +544,58 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
549 goto out_gpio_free; 544 goto out_gpio_free;
550 } 545 }
551 546
552 mxc_spi->base = ioremap(res->start, resource_size(res)); 547 spi_imx->base = ioremap(res->start, resource_size(res));
553 if (!mxc_spi->base) { 548 if (!spi_imx->base) {
554 ret = -EINVAL; 549 ret = -EINVAL;
555 goto out_release_mem; 550 goto out_release_mem;
556 } 551 }
557 552
558 mxc_spi->irq = platform_get_irq(pdev, 0); 553 spi_imx->irq = platform_get_irq(pdev, 0);
559 if (!mxc_spi->irq) { 554 if (!spi_imx->irq) {
560 ret = -EINVAL; 555 ret = -EINVAL;
561 goto out_iounmap; 556 goto out_iounmap;
562 } 557 }
563 558
564 ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi); 559 ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
565 if (ret) { 560 if (ret) {
566 dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret); 561 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
567 goto out_iounmap; 562 goto out_iounmap;
568 } 563 }
569 564
570 if (cpu_is_mx31() || cpu_is_mx35()) { 565 if (cpu_is_mx31() || cpu_is_mx35()) {
571 mxc_spi->intctrl = mx31_intctrl; 566 spi_imx->intctrl = mx31_intctrl;
572 mxc_spi->config = mx31_config; 567 spi_imx->config = mx31_config;
573 mxc_spi->trigger = mx31_trigger; 568 spi_imx->trigger = mx31_trigger;
574 mxc_spi->rx_available = mx31_rx_available; 569 spi_imx->rx_available = mx31_rx_available;
575 } else if (cpu_is_mx27() || cpu_is_mx21()) { 570 } else if (cpu_is_mx27() || cpu_is_mx21()) {
576 mxc_spi->intctrl = mx27_intctrl; 571 spi_imx->intctrl = mx27_intctrl;
577 mxc_spi->config = mx27_config; 572 spi_imx->config = mx27_config;
578 mxc_spi->trigger = mx27_trigger; 573 spi_imx->trigger = mx27_trigger;
579 mxc_spi->rx_available = mx27_rx_available; 574 spi_imx->rx_available = mx27_rx_available;
580 } else if (cpu_is_mx1()) { 575 } else if (cpu_is_mx1()) {
581 mxc_spi->intctrl = mx1_intctrl; 576 spi_imx->intctrl = mx1_intctrl;
582 mxc_spi->config = mx1_config; 577 spi_imx->config = mx1_config;
583 mxc_spi->trigger = mx1_trigger; 578 spi_imx->trigger = mx1_trigger;
584 mxc_spi->rx_available = mx1_rx_available; 579 spi_imx->rx_available = mx1_rx_available;
585 } else 580 } else
586 BUG(); 581 BUG();
587 582
588 mxc_spi->clk = clk_get(&pdev->dev, NULL); 583 spi_imx->clk = clk_get(&pdev->dev, NULL);
589 if (IS_ERR(mxc_spi->clk)) { 584 if (IS_ERR(spi_imx->clk)) {
590 dev_err(&pdev->dev, "unable to get clock\n"); 585 dev_err(&pdev->dev, "unable to get clock\n");
591 ret = PTR_ERR(mxc_spi->clk); 586 ret = PTR_ERR(spi_imx->clk);
592 goto out_free_irq; 587 goto out_free_irq;
593 } 588 }
594 589
595 clk_enable(mxc_spi->clk); 590 clk_enable(spi_imx->clk);
596 mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk); 591 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
597 592
598 if (!cpu_is_mx31() || !cpu_is_mx35()) 593 if (!cpu_is_mx31() || !cpu_is_mx35())
599 writel(1, mxc_spi->base + MXC_RESET); 594 writel(1, spi_imx->base + MXC_RESET);
600 595
601 mxc_spi->intctrl(mxc_spi, 0); 596 spi_imx->intctrl(spi_imx, 0);
602 597
603 ret = spi_bitbang_start(&mxc_spi->bitbang); 598 ret = spi_bitbang_start(&spi_imx->bitbang);
604 if (ret) { 599 if (ret) {
605 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 600 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
606 goto out_clk_put; 601 goto out_clk_put;
@@ -611,18 +606,18 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
611 return ret; 606 return ret;
612 607
613out_clk_put: 608out_clk_put:
614 clk_disable(mxc_spi->clk); 609 clk_disable(spi_imx->clk);
615 clk_put(mxc_spi->clk); 610 clk_put(spi_imx->clk);
616out_free_irq: 611out_free_irq:
617 free_irq(mxc_spi->irq, mxc_spi); 612 free_irq(spi_imx->irq, spi_imx);
618out_iounmap: 613out_iounmap:
619 iounmap(mxc_spi->base); 614 iounmap(spi_imx->base);
620out_release_mem: 615out_release_mem:
621 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
622out_gpio_free: 617out_gpio_free:
623 for (i = 0; i < master->num_chipselect; i++) 618 for (i = 0; i < master->num_chipselect; i++)
624 if (mxc_spi->chipselect[i] >= 0) 619 if (spi_imx->chipselect[i] >= 0)
625 gpio_free(mxc_spi->chipselect[i]); 620 gpio_free(spi_imx->chipselect[i]);
626out_master_put: 621out_master_put:
627 spi_master_put(master); 622 spi_master_put(master);
628 kfree(master); 623 kfree(master);
@@ -630,24 +625,24 @@ out_master_put:
630 return ret; 625 return ret;
631} 626}
632 627
633static int __exit mxc_spi_remove(struct platform_device *pdev) 628static int __exit spi_imx_remove(struct platform_device *pdev)
634{ 629{
635 struct spi_master *master = platform_get_drvdata(pdev); 630 struct spi_master *master = platform_get_drvdata(pdev);
636 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 631 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
637 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master); 632 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
638 int i; 633 int i;
639 634
640 spi_bitbang_stop(&mxc_spi->bitbang); 635 spi_bitbang_stop(&spi_imx->bitbang);
641 636
642 writel(0, mxc_spi->base + MXC_CSPICTRL); 637 writel(0, spi_imx->base + MXC_CSPICTRL);
643 clk_disable(mxc_spi->clk); 638 clk_disable(spi_imx->clk);
644 clk_put(mxc_spi->clk); 639 clk_put(spi_imx->clk);
645 free_irq(mxc_spi->irq, mxc_spi); 640 free_irq(spi_imx->irq, spi_imx);
646 iounmap(mxc_spi->base); 641 iounmap(spi_imx->base);
647 642
648 for (i = 0; i < master->num_chipselect; i++) 643 for (i = 0; i < master->num_chipselect; i++)
649 if (mxc_spi->chipselect[i] >= 0) 644 if (spi_imx->chipselect[i] >= 0)
650 gpio_free(mxc_spi->chipselect[i]); 645 gpio_free(spi_imx->chipselect[i]);
651 646
652 spi_master_put(master); 647 spi_master_put(master);
653 648
@@ -658,27 +653,27 @@ static int __exit mxc_spi_remove(struct platform_device *pdev)
658 return 0; 653 return 0;
659} 654}
660 655
661static struct platform_driver mxc_spi_driver = { 656static struct platform_driver spi_imx_driver = {
662 .driver = { 657 .driver = {
663 .name = DRIVER_NAME, 658 .name = DRIVER_NAME,
664 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
665 }, 660 },
666 .probe = mxc_spi_probe, 661 .probe = spi_imx_probe,
667 .remove = __exit_p(mxc_spi_remove), 662 .remove = __exit_p(spi_imx_remove),
668}; 663};
669 664
670static int __init mxc_spi_init(void) 665static int __init spi_imx_init(void)
671{ 666{
672 return platform_driver_register(&mxc_spi_driver); 667 return platform_driver_register(&spi_imx_driver);
673} 668}
674 669
675static void __exit mxc_spi_exit(void) 670static void __exit spi_imx_exit(void)
676{ 671{
677 platform_driver_unregister(&mxc_spi_driver); 672 platform_driver_unregister(&spi_imx_driver);
678} 673}
679 674
680module_init(mxc_spi_init); 675module_init(spi_imx_init);
681module_exit(mxc_spi_exit); 676module_exit(spi_imx_exit);
682 677
683MODULE_DESCRIPTION("SPI Master Controller driver"); 678MODULE_DESCRIPTION("SPI Master Controller driver");
684MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 679MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
index d871dc23909c..2552bb364005 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi_stmp.c
@@ -242,7 +242,7 @@ static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
242 wait_for_completion(&ss->done); 242 wait_for_completion(&ss->done);
243 243
244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN)) 244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
245 status = ETIMEDOUT; 245 status = -ETIMEDOUT;
246 246
247 if (!dma_buf) 247 if (!dma_buf)
248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir); 248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f921bd1109e1..5d23983f02fc 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
537 return status; 537 return status;
538} 538}
539 539
540static struct file_operations spidev_fops = { 540static const struct file_operations spidev_fops = {
541 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 /* REVISIT switch to aio primitives, so that userspace 542 /* REVISIT switch to aio primitives, so that userspace
543 * gets more complete API coverage. It'll simplify things 543 * gets more complete API coverage. It'll simplify things
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9a4dd5992f65..d21b3469f6d7 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -59,8 +59,6 @@ source "drivers/staging/echo/Kconfig"
59 59
60source "drivers/staging/poch/Kconfig" 60source "drivers/staging/poch/Kconfig"
61 61
62source "drivers/staging/agnx/Kconfig"
63
64source "drivers/staging/otus/Kconfig" 62source "drivers/staging/otus/Kconfig"
65 63
66source "drivers/staging/rt2860/Kconfig" 64source "drivers/staging/rt2860/Kconfig"
@@ -95,8 +93,6 @@ source "drivers/staging/dst/Kconfig"
95 93
96source "drivers/staging/pohmelfs/Kconfig" 94source "drivers/staging/pohmelfs/Kconfig"
97 95
98source "drivers/staging/stlc45xx/Kconfig"
99
100source "drivers/staging/b3dfg/Kconfig" 96source "drivers/staging/b3dfg/Kconfig"
101 97
102source "drivers/staging/phison/Kconfig" 98source "drivers/staging/phison/Kconfig"
@@ -129,7 +125,5 @@ source "drivers/staging/sep/Kconfig"
129 125
130source "drivers/staging/iio/Kconfig" 126source "drivers/staging/iio/Kconfig"
131 127
132source "drivers/staging/cowloop/Kconfig"
133
134endif # !STAGING_EXCLUDE_BUILD 128endif # !STAGING_EXCLUDE_BUILD
135endif # STAGING 129endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 104f2f8897ec..8cbf1aebea2e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_W35UND) += winbond/
12obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 12obj-$(CONFIG_PRISM2_USB) += wlan-ng/
13obj-$(CONFIG_ECHO) += echo/ 13obj-$(CONFIG_ECHO) += echo/
14obj-$(CONFIG_POCH) += poch/ 14obj-$(CONFIG_POCH) += poch/
15obj-$(CONFIG_AGNX) += agnx/
16obj-$(CONFIG_OTUS) += otus/ 15obj-$(CONFIG_OTUS) += otus/
17obj-$(CONFIG_RT2860) += rt2860/ 16obj-$(CONFIG_RT2860) += rt2860/
18obj-$(CONFIG_RT2870) += rt2870/ 17obj-$(CONFIG_RT2870) += rt2870/
@@ -30,7 +29,6 @@ obj-$(CONFIG_ANDROID) += android/
30obj-$(CONFIG_ANDROID) += dream/ 29obj-$(CONFIG_ANDROID) += dream/
31obj-$(CONFIG_DST) += dst/ 30obj-$(CONFIG_DST) += dst/
32obj-$(CONFIG_POHMELFS) += pohmelfs/ 31obj-$(CONFIG_POHMELFS) += pohmelfs/
33obj-$(CONFIG_STLC45XX) += stlc45xx/
34obj-$(CONFIG_B3DFG) += b3dfg/ 32obj-$(CONFIG_B3DFG) += b3dfg/
35obj-$(CONFIG_IDE_PHISON) += phison/ 33obj-$(CONFIG_IDE_PHISON) += phison/
36obj-$(CONFIG_PLAN9AUTH) += p9auth/ 34obj-$(CONFIG_PLAN9AUTH) += p9auth/
@@ -46,4 +44,3 @@ obj-$(CONFIG_VME_BUS) += vme/
46obj-$(CONFIG_RAR_REGISTER) += rar/ 44obj-$(CONFIG_RAR_REGISTER) += rar/
47obj-$(CONFIG_DX_SEP) += sep/ 45obj-$(CONFIG_DX_SEP) += sep/
48obj-$(CONFIG_IIO) += iio/ 46obj-$(CONFIG_IIO) += iio/
49obj-$(CONFIG_COWLOOP) += cowloop/
diff --git a/drivers/staging/agnx/Kconfig b/drivers/staging/agnx/Kconfig
deleted file mode 100644
index 7f43549e36dd..000000000000
--- a/drivers/staging/agnx/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
1config AGNX
2 tristate "Wireless Airgo AGNX support"
3 depends on WLAN_80211 && MAC80211
4 ---help---
5 This is an experimental driver for Airgo AGNX00 wireless chip.
diff --git a/drivers/staging/agnx/Makefile b/drivers/staging/agnx/Makefile
deleted file mode 100644
index 1216564a312d..000000000000
--- a/drivers/staging/agnx/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1obj-$(CONFIG_AGNX) += agnx.o
2
3agnx-objs := rf.o \
4 pci.o \
5 xmit.o \
6 table.o \
7 sta.o \
8 phy.o
diff --git a/drivers/staging/agnx/TODO b/drivers/staging/agnx/TODO
deleted file mode 100644
index 89bec74318aa..000000000000
--- a/drivers/staging/agnx/TODO
+++ /dev/null
@@ -1,22 +0,0 @@
12008 7/18
2
3The RX has can't receive OFDM packet correctly,
4Guess it need be do RX calibrate.
5
6
7before 2008 3/1
8
91: The RX get too much "CRC failed" pakets, it make the card work very unstable,
102: After running a while, the card will get infinity "RX Frame" and "Error"
11interrupt, not know the root reason so far, try to fix it
123: Using two tx queue txd and txm but not only txm.
134: Set the hdr correctly.
145: Try to do recalibrate correvtly
156: To support G mode in future
167: Fix the mac address can't be readed and set correctly in BE machine.
178: Fix include and exclude FCS in promisous mode and manage mode
189: Using sta_notify to notice sta change
1910: Turn on frame reception at the end of start
2011: Guess the card support HW_MULTICAST_FILTER
2112: The tx process should be implment atomic?
2213: Using mac80211 function to control the TX&RX LED.
diff --git a/drivers/staging/agnx/agnx.h b/drivers/staging/agnx/agnx.h
deleted file mode 100644
index 3963d2597a11..000000000000
--- a/drivers/staging/agnx/agnx.h
+++ /dev/null
@@ -1,156 +0,0 @@
1#ifndef AGNX_H_
2#define AGNX_H_
3
4#include <linux/io.h>
5
6#include "xmit.h"
7
8#define PFX KBUILD_MODNAME ": "
9
10static inline u32 agnx_read32(void __iomem *mem_region, u32 offset)
11{
12 return ioread32(mem_region + offset);
13}
14
15static inline void agnx_write32(void __iomem *mem_region, u32 offset, u32 val)
16{
17 iowrite32(val, mem_region + offset);
18}
19
20/* static const struct ieee80211_rate agnx_rates_80211b[] = { */
21/* { .rate = 10, */
22/* .val = 0xa, */
23/* .flags = IEEE80211_RATE_CCK }, */
24/* { .rate = 20, */
25/* .val = 0x14, */
26/* .hw_value = -0x14, */
27/* .flags = IEEE80211_RATE_CCK_2 }, */
28/* { .rate = 55, */
29/* .val = 0x37, */
30/* .val2 = -0x37, */
31/* .flags = IEEE80211_RATE_CCK_2 }, */
32/* { .rate = 110, */
33/* .val = 0x6e, */
34/* .val2 = -0x6e, */
35/* .flags = IEEE80211_RATE_CCK_2 } */
36/* }; */
37
38
39static const struct ieee80211_rate agnx_rates_80211g[] = {
40/* { .bitrate = 10, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
41/* { .bitrate = 20, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
42/* { .bitrate = 55, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
43/* { .bitrate = 110, .hw_value = 4, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
44 { .bitrate = 10, .hw_value = 1, },
45 { .bitrate = 20, .hw_value = 2, },
46 { .bitrate = 55, .hw_value = 3, },
47 { .bitrate = 110, .hw_value = 4,},
48
49 { .bitrate = 60, .hw_value = 0xB, },
50 { .bitrate = 90, .hw_value = 0xF, },
51 { .bitrate = 120, .hw_value = 0xA },
52 { .bitrate = 180, .hw_value = 0xE, },
53/* { .bitrate = 240, .hw_value = 0xd, }, */
54 { .bitrate = 360, .hw_value = 0xD, },
55 { .bitrate = 480, .hw_value = 0x8, },
56 { .bitrate = 540, .hw_value = 0xC, },
57};
58
59static const struct ieee80211_channel agnx_channels[] = {
60 { .center_freq = 2412, .hw_value = 1, },
61 { .center_freq = 2417, .hw_value = 2, },
62 { .center_freq = 2422, .hw_value = 3, },
63 { .center_freq = 2427, .hw_value = 4, },
64 { .center_freq = 2432, .hw_value = 5, },
65 { .center_freq = 2437, .hw_value = 6, },
66 { .center_freq = 2442, .hw_value = 7, },
67 { .center_freq = 2447, .hw_value = 8, },
68 { .center_freq = 2452, .hw_value = 9, },
69 { .center_freq = 2457, .hw_value = 10, },
70 { .center_freq = 2462, .hw_value = 11, },
71 { .center_freq = 2467, .hw_value = 12, },
72 { .center_freq = 2472, .hw_value = 13, },
73 { .center_freq = 2484, .hw_value = 14, },
74};
75
76#define NUM_DRIVE_MODES 2
77/* Agnx operate mode */
78enum {
79 AGNX_MODE_80211A,
80 AGNX_MODE_80211A_OOB,
81 AGNX_MODE_80211A_MIMO,
82 AGNX_MODE_80211B_SHORT,
83 AGNX_MODE_80211B_LONG,
84 AGNX_MODE_80211G,
85 AGNX_MODE_80211G_OOB,
86 AGNX_MODE_80211G_MIMO,
87};
88
89enum {
90 AGNX_UNINIT,
91 AGNX_START,
92 AGNX_STOP,
93};
94
95struct agnx_priv {
96 struct pci_dev *pdev;
97 struct ieee80211_hw *hw;
98
99 spinlock_t lock;
100 struct mutex mutex;
101 unsigned int init_status;
102
103 void __iomem *ctl; /* pointer to base ram address */
104 void __iomem *data; /* pointer to mem region #2 */
105
106 struct agnx_ring rx;
107 struct agnx_ring txm;
108 struct agnx_ring txd;
109
110 /* Need volatile? */
111 u32 irq_status;
112
113 struct delayed_work periodic_work; /* Periodic tasks like recalibrate */
114 struct ieee80211_low_level_stats stats;
115
116 /* unsigned int phymode; */
117 int mode;
118 int channel;
119 u8 bssid[ETH_ALEN];
120
121 u8 mac_addr[ETH_ALEN];
122 u8 revid;
123
124 struct ieee80211_supported_band band;
125};
126
127
128#define AGNX_CHAINS_MAX 6
129#define AGNX_PERIODIC_DELAY 60000 /* unit: ms */
130#define LOCAL_STAID 0 /* the station entry for the card itself */
131#define BSSID_STAID 1 /* the station entry for the bsssid AP */
132#define spi_delay() udelay(40)
133#define eeprom_delay() udelay(40)
134#define routing_table_delay() udelay(50)
135
136/* PDU pool MEM region #2 */
137#define AGNX_PDUPOOL 0x40000 /* PDU pool */
138#define AGNX_PDUPOOL_SIZE 0x8000 /* PDU pool size*/
139#define AGNX_PDU_TX_WQ 0x41000 /* PDU list TX workqueue */
140#define AGNX_PDU_FREE 0x41800 /* Free Pool */
141#define PDU_SIZE 0x80 /* Free Pool node size */
142#define PDU_FREE_CNT 0xd0 /* Free pool node count */
143
144
145/* RF stuffs */
146extern void rf_chips_init(struct agnx_priv *priv);
147extern void spi_rc_write(void __iomem *mem_region, u32 chip_ids, u32 sw);
148extern void calibrate_oscillator(struct agnx_priv *priv);
149extern void do_calibration(struct agnx_priv *priv);
150extern void antenna_calibrate(struct agnx_priv *priv);
151extern void __antenna_calibrate(struct agnx_priv *priv);
152extern void print_offsets(struct agnx_priv *priv);
153extern int agnx_set_channel(struct agnx_priv *priv, unsigned int channel);
154
155
156#endif /* AGNX_H_ */
diff --git a/drivers/staging/agnx/debug.h b/drivers/staging/agnx/debug.h
deleted file mode 100644
index 7947f327a214..000000000000
--- a/drivers/staging/agnx/debug.h
+++ /dev/null
@@ -1,416 +0,0 @@
1#ifndef AGNX_DEBUG_H_
2#define AGNX_DEBUG_H_
3
4#include "agnx.h"
5#include "phy.h"
6#include "sta.h"
7#include "xmit.h"
8
9#define AGNX_TRACE printk(KERN_ERR PFX "function:%s line:%d\n", __func__, __LINE__)
10
11#define PRINTK_LE16(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", le16_to_cpu(var))
12#define PRINTK_LE32(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", le32_to_cpu(var))
13#define PRINTK_U8(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.2x\n", var)
14#define PRINTK_BE16(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", be16_to_cpu(var))
15#define PRINTK_BE32(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", be32_to_cpu(var))
16#define PRINTK_BITS(prefix, field) printk(KERN_DEBUG PFX #prefix ": " #field ": 0x%x\n", (reg & field) >> field##_SHIFT)
17
18static inline void agnx_bug(char *reason)
19{
20 printk(KERN_ERR PFX "%s\n", reason);
21 BUG();
22}
23
24static inline void agnx_print_desc(struct agnx_desc *desc)
25{
26 u32 reg = be32_to_cpu(desc->frag);
27
28 PRINTK_BITS(DESC, PACKET_LEN);
29
30 if (reg & FIRST_FRAG) {
31 PRINTK_BITS(DESC, FIRST_PACKET_MASK);
32 PRINTK_BITS(DESC, FIRST_RESERV2);
33 PRINTK_BITS(DESC, FIRST_TKIP_ERROR);
34 PRINTK_BITS(DESC, FIRST_TKIP_PACKET);
35 PRINTK_BITS(DESC, FIRST_RESERV1);
36 PRINTK_BITS(DESC, FIRST_FRAG_LEN);
37 } else {
38 PRINTK_BITS(DESC, SUB_RESERV2);
39 PRINTK_BITS(DESC, SUB_TKIP_ERROR);
40 PRINTK_BITS(DESC, SUB_TKIP_PACKET);
41 PRINTK_BITS(DESC, SUB_RESERV1);
42 PRINTK_BITS(DESC, SUB_FRAG_LEN);
43 }
44
45 PRINTK_BITS(DESC, FIRST_FRAG);
46 PRINTK_BITS(DESC, LAST_FRAG);
47 PRINTK_BITS(DESC, OWNER);
48}
49
50
51static inline void dump_ieee80211b_phy_hdr(__be32 _11b0, __be32 _11b1)
52{
53
54}
55
56static inline void agnx_print_hdr(struct agnx_hdr *hdr)
57{
58 u32 reg;
59 int i;
60
61 reg = be32_to_cpu(hdr->reg0);
62 PRINTK_BITS(HDR, RTS);
63 PRINTK_BITS(HDR, MULTICAST);
64 PRINTK_BITS(HDR, ACK);
65 PRINTK_BITS(HDR, TM);
66 PRINTK_BITS(HDR, RELAY);
67 PRINTK_BITS(HDR, REVISED_FCS);
68 PRINTK_BITS(HDR, NEXT_BUFFER_ADDR);
69
70 reg = be32_to_cpu(hdr->reg1);
71 PRINTK_BITS(HDR, MAC_HDR_LEN);
72 PRINTK_BITS(HDR, DURATION_OVERIDE);
73 PRINTK_BITS(HDR, PHY_HDR_OVERIDE);
74 PRINTK_BITS(HDR, CRC_FAIL);
75 PRINTK_BITS(HDR, SEQUENCE_NUMBER);
76 PRINTK_BITS(HDR, BUFF_HEAD_ADDR);
77
78 reg = be32_to_cpu(hdr->reg2);
79 PRINTK_BITS(HDR, PDU_COUNT);
80 PRINTK_BITS(HDR, WEP_KEY);
81 PRINTK_BITS(HDR, USES_WEP_KEY);
82 PRINTK_BITS(HDR, KEEP_ALIVE);
83 PRINTK_BITS(HDR, BUFF_TAIL_ADDR);
84
85 reg = be32_to_cpu(hdr->reg3);
86 PRINTK_BITS(HDR, CTS_11G);
87 PRINTK_BITS(HDR, RTS_11G);
88 PRINTK_BITS(HDR, FRAG_SIZE);
89 PRINTK_BITS(HDR, PAYLOAD_LEN);
90 PRINTK_BITS(HDR, FRAG_NUM);
91
92 reg = be32_to_cpu(hdr->reg4);
93 PRINTK_BITS(HDR, RELAY_STAID);
94 PRINTK_BITS(HDR, STATION_ID);
95 PRINTK_BITS(HDR, WORKQUEUE_ID);
96
97 reg = be32_to_cpu(hdr->reg5);
98 /* printf the route flag */
99 PRINTK_BITS(HDR, ROUTE_HOST);
100 PRINTK_BITS(HDR, ROUTE_CARD_CPU);
101 PRINTK_BITS(HDR, ROUTE_ENCRYPTION);
102 PRINTK_BITS(HDR, ROUTE_TX);
103 PRINTK_BITS(HDR, ROUTE_RX1);
104 PRINTK_BITS(HDR, ROUTE_RX2);
105 PRINTK_BITS(HDR, ROUTE_COMPRESSION);
106
107 PRINTK_BE32(HDR, hdr->_11g0);
108 PRINTK_BE32(HDR, hdr->_11g1);
109 PRINTK_BE32(HDR, hdr->_11b0);
110 PRINTK_BE32(HDR, hdr->_11b1);
111
112 dump_ieee80211b_phy_hdr(hdr->_11b0, hdr->_11b1);
113
114 /* Fixme */
115 for (i = 0; i < ARRAY_SIZE(hdr->mac_hdr); i++) {
116 if (i == 0)
117 printk(KERN_DEBUG PFX "IEEE80211 HDR: ");
118 printk("%.2x ", hdr->mac_hdr[i]);
119 if (i + 1 == ARRAY_SIZE(hdr->mac_hdr))
120 printk("\n");
121 }
122
123 PRINTK_BE16(HDR, hdr->rts_duration);
124 PRINTK_BE16(HDR, hdr->last_duration);
125 PRINTK_BE16(HDR, hdr->sec_last_duration);
126 PRINTK_BE16(HDR, hdr->other_duration);
127 PRINTK_BE16(HDR, hdr->tx_other_duration);
128 PRINTK_BE16(HDR, hdr->last_11g_len);
129 PRINTK_BE16(HDR, hdr->other_11g_len);
130 PRINTK_BE16(HDR, hdr->last_11b_len);
131 PRINTK_BE16(HDR, hdr->other_11b_len);
132
133 /* FIXME */
134 reg = be16_to_cpu(hdr->reg6);
135 PRINTK_BITS(HDR, MBF);
136 PRINTK_BITS(HDR, RSVD4);
137
138 PRINTK_BE16(HDR, hdr->rx_frag_stat);
139
140 PRINTK_BE32(HDR, hdr->time_stamp);
141 PRINTK_BE32(HDR, hdr->phy_stats_hi);
142 PRINTK_BE32(HDR, hdr->phy_stats_lo);
143 PRINTK_BE32(HDR, hdr->mic_key0);
144 PRINTK_BE32(HDR, hdr->mic_key1);
145} /* agnx_print_hdr */
146
147
148static inline void agnx_print_rx_hdr(struct agnx_hdr *hdr)
149{
150 agnx_print_hdr(hdr);
151
152 PRINTK_BE16(HDR, hdr->rx.rx_packet_duration);
153 PRINTK_BE16(HDR, hdr->rx.replay_cnt);
154
155 PRINTK_U8(HDR, hdr->rx_channel);
156}
157
158static inline void agnx_print_tx_hdr(struct agnx_hdr *hdr)
159{
160 agnx_print_hdr(hdr);
161
162 PRINTK_U8(HDR, hdr->tx.long_retry_limit);
163 PRINTK_U8(HDR, hdr->tx.short_retry_limit);
164 PRINTK_U8(HDR, hdr->tx.long_retry_cnt);
165 PRINTK_U8(HDR, hdr->tx.short_retry_cnt);
166
167 PRINTK_U8(HDR, hdr->rx_channel);
168}
169
170static inline void
171agnx_print_sta_power(struct agnx_priv *priv, unsigned int sta_idx)
172{
173 struct agnx_sta_power power;
174 u32 reg;
175
176 get_sta_power(priv, &power, sta_idx);
177
178 reg = le32_to_cpu(power.reg);
179 PRINTK_BITS(STA_POWER, SIGNAL);
180 PRINTK_BITS(STA_POWER, RATE);
181 PRINTK_BITS(STA_POWER, TIFS);
182 PRINTK_BITS(STA_POWER, EDCF);
183 PRINTK_BITS(STA_POWER, CHANNEL_BOND);
184 PRINTK_BITS(STA_POWER, PHY_MODE);
185 PRINTK_BITS(STA_POWER, POWER_LEVEL);
186 PRINTK_BITS(STA_POWER, NUM_TRANSMITTERS);
187}
188
189static inline void
190agnx_print_sta_tx_wq(struct agnx_priv *priv, unsigned int sta_idx, unsigned int wq_idx)
191{
192 struct agnx_sta_tx_wq tx_wq;
193 u32 reg;
194
195 get_sta_tx_wq(priv, &tx_wq, sta_idx, wq_idx);
196
197 reg = le32_to_cpu(tx_wq.reg0);
198 PRINTK_BITS(STA_TX_WQ, TAIL_POINTER);
199 PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_LOW);
200
201 reg = le32_to_cpu(tx_wq.reg3);
202 PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_HIGH);
203 PRINTK_BITS(STA_TX_WQ, ACK_POINTER_LOW);
204
205 reg = le32_to_cpu(tx_wq.reg1);
206 PRINTK_BITS(STA_TX_WQ, ACK_POINTER_HIGH);
207 PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_TAIL_PACK_CNT);
208 PRINTK_BITS(STA_TX_WQ, ACK_TIMOUT_TAIL_PACK_CNT);
209
210 reg = le32_to_cpu(tx_wq.reg2);
211 PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_BYTE_CNT);
212 PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_FRAG_CNT);
213 PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_ACK_TYPE);
214 PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_VALID);
215}
216
217static inline void agnx_print_sta_traffic(struct agnx_sta_traffic *traffic)
218{
219 u32 reg;
220
221 reg = le32_to_cpu(traffic->reg0);
222 PRINTK_BITS(STA_TRAFFIC, ACK_TIMOUT_CNT);
223 PRINTK_BITS(STA_TRAFFIC, TRAFFIC_ACK_TYPE);
224 PRINTK_BITS(STA_TRAFFIC, NEW_PACKET);
225 PRINTK_BITS(STA_TRAFFIC, TRAFFIC_VALID);
226 PRINTK_BITS(STA_TRAFFIC, RX_HDR_DESC_POINTER);
227
228 reg = le32_to_cpu(traffic->reg1);
229 PRINTK_BITS(STA_TRAFFIC, RX_PACKET_TIMESTAMP);
230 PRINTK_BITS(STA_TRAFFIC, TRAFFIC_RESERVED);
231 PRINTK_BITS(STA_TRAFFIC, SV);
232 PRINTK_BITS(STA_TRAFFIC, RX_SEQUENCE_NUM);
233
234 PRINTK_LE32(STA_TRAFFIC, traffic->tx_replay_cnt_low);
235
236 PRINTK_LE16(STA_TRAFFIC, traffic->tx_replay_cnt_high);
237 PRINTK_LE16(STA_TRAFFIC, traffic->rx_replay_cnt_high);
238
239 PRINTK_LE32(STA_TRAFFIC, traffic->rx_replay_cnt_low);
240}
241
242static inline void agnx_print_sta(struct agnx_priv *priv, unsigned int sta_idx)
243{
244 struct agnx_sta station;
245 struct agnx_sta *sta = &station;
246 u32 reg;
247 unsigned int i;
248
249 get_sta(priv, sta, sta_idx);
250
251 for (i = 0; i < 4; i++)
252 PRINTK_LE32(STA, sta->tx_session_keys[i]);
253 for (i = 0; i < 4; i++)
254 PRINTK_LE32(STA, sta->rx_session_keys[i]);
255
256 reg = le32_to_cpu(sta->reg);
257 PRINTK_BITS(STA, ID_1);
258 PRINTK_BITS(STA, ID_0);
259 PRINTK_BITS(STA, ENABLE_CONCATENATION);
260 PRINTK_BITS(STA, ENABLE_DECOMPRESSION);
261 PRINTK_BITS(STA, STA_RESERVED);
262 PRINTK_BITS(STA, EAP);
263 PRINTK_BITS(STA, ED_NULL);
264 PRINTK_BITS(STA, ENCRYPTION_POLICY);
265 PRINTK_BITS(STA, DEFINED_KEY_ID);
266 PRINTK_BITS(STA, FIXED_KEY);
267 PRINTK_BITS(STA, KEY_VALID);
268 PRINTK_BITS(STA, STATION_VALID);
269
270 PRINTK_LE32(STA, sta->tx_aes_blks_unicast);
271 PRINTK_LE32(STA, sta->rx_aes_blks_unicast);
272
273 PRINTK_LE16(STA, sta->aes_format_err_unicast_cnt);
274 PRINTK_LE16(STA, sta->aes_replay_unicast);
275
276 PRINTK_LE16(STA, sta->aes_decrypt_err_unicast);
277 PRINTK_LE16(STA, sta->aes_decrypt_err_default);
278
279 PRINTK_LE16(STA, sta->single_retry_packets);
280 PRINTK_LE16(STA, sta->failed_tx_packets);
281
282 PRINTK_LE16(STA, sta->muti_retry_packets);
283 PRINTK_LE16(STA, sta->ack_timeouts);
284
285 PRINTK_LE16(STA, sta->frag_tx_cnt);
286 PRINTK_LE16(STA, sta->rts_brq_sent);
287
288 PRINTK_LE16(STA, sta->tx_packets);
289 PRINTK_LE16(STA, sta->cts_back_timeout);
290
291 PRINTK_LE32(STA, sta->phy_stats_high);
292 PRINTK_LE32(STA, sta->phy_stats_low);
293
294 /* for (i = 0; i < 8; i++) */
295 agnx_print_sta_traffic(sta->traffic + 0);
296
297 PRINTK_LE16(STA, sta->traffic_class0_frag_success);
298 PRINTK_LE16(STA, sta->traffic_class1_frag_success);
299 PRINTK_LE16(STA, sta->traffic_class2_frag_success);
300 PRINTK_LE16(STA, sta->traffic_class3_frag_success);
301 PRINTK_LE16(STA, sta->traffic_class4_frag_success);
302 PRINTK_LE16(STA, sta->traffic_class5_frag_success);
303 PRINTK_LE16(STA, sta->traffic_class6_frag_success);
304 PRINTK_LE16(STA, sta->traffic_class7_frag_success);
305
306 PRINTK_LE16(STA, sta->num_frag_non_prime_rates);
307 PRINTK_LE16(STA, sta->ack_timeout_non_prime_rates);
308}
309
310
311static inline void dump_ieee80211_hdr(struct ieee80211_hdr *hdr, char *tag)
312{
313 u16 fctl;
314 int hdrlen;
315
316 fctl = le16_to_cpu(hdr->frame_control);
317 switch (fctl & IEEE80211_FCTL_FTYPE) {
318 case IEEE80211_FTYPE_DATA:
319 printk(PFX "%s DATA ", tag);
320 break;
321 case IEEE80211_FTYPE_CTL:
322 printk(PFX "%s CTL ", tag);
323 break;
324 case IEEE80211_FTYPE_MGMT:
325 printk(PFX "%s MGMT ", tag);
326 switch (fctl & IEEE80211_FCTL_STYPE) {
327 case IEEE80211_STYPE_ASSOC_REQ:
328 printk("SubType: ASSOC_REQ ");
329 break;
330 case IEEE80211_STYPE_ASSOC_RESP:
331 printk("SubType: ASSOC_RESP ");
332 break;
333 case IEEE80211_STYPE_REASSOC_REQ:
334 printk("SubType: REASSOC_REQ ");
335 break;
336 case IEEE80211_STYPE_REASSOC_RESP:
337 printk("SubType: REASSOC_RESP ");
338 break;
339 case IEEE80211_STYPE_PROBE_REQ:
340 printk("SubType: PROBE_REQ ");
341 break;
342 case IEEE80211_STYPE_PROBE_RESP:
343 printk("SubType: PROBE_RESP ");
344 break;
345 case IEEE80211_STYPE_BEACON:
346 printk("SubType: BEACON ");
347 break;
348 case IEEE80211_STYPE_ATIM:
349 printk("SubType: ATIM ");
350 break;
351 case IEEE80211_STYPE_DISASSOC:
352 printk("SubType: DISASSOC ");
353 break;
354 case IEEE80211_STYPE_AUTH:
355 printk("SubType: AUTH ");
356 break;
357 case IEEE80211_STYPE_DEAUTH:
358 printk("SubType: DEAUTH ");
359 break;
360 case IEEE80211_STYPE_ACTION:
361 printk("SubType: ACTION ");
362 break;
363 default:
364 printk("SubType: Unknow\n");
365 }
366 break;
367 default:
368 printk(PFX "%s Packet type: Unknow\n", tag);
369 }
370
371 hdrlen = ieee80211_hdrlen(fctl);
372
373 if (hdrlen >= 4)
374 printk("FC=0x%04x DUR=0x%04x",
375 fctl, le16_to_cpu(hdr->duration_id));
376 if (hdrlen >= 10)
377 printk(" A1=%pM", hdr->addr1);
378 if (hdrlen >= 16)
379 printk(" A2=%pM", hdr->addr2);
380 if (hdrlen >= 24)
381 printk(" A3=%pM", hdr->addr3);
382 if (hdrlen >= 30)
383 printk(" A4=%pM", hdr->addr4);
384 printk("\n");
385}
386
387static inline void dump_txm_registers(struct agnx_priv *priv)
388{
389 void __iomem *ctl = priv->ctl;
390 int i;
391 for (i = 0; i <= 0x1e8; i += 4)
392 printk(KERN_DEBUG PFX "TXM: %x---> 0x%.8x\n", i, ioread32(ctl + i));
393}
394static inline void dump_rxm_registers(struct agnx_priv *priv)
395{
396 void __iomem *ctl = priv->ctl;
397 int i;
398 for (i = 0; i <= 0x108; i += 4)
399 printk(KERN_DEBUG PFX "RXM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2000 + i));
400}
401static inline void dump_bm_registers(struct agnx_priv *priv)
402{
403 void __iomem *ctl = priv->ctl;
404 int i;
405 for (i = 0; i <= 0x90; i += 4)
406 printk(KERN_DEBUG PFX "BM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2c00 + i));
407}
408static inline void dump_cir_registers(struct agnx_priv *priv)
409{
410 void __iomem *ctl = priv->ctl;
411 int i;
412 for (i = 0; i <= 0xb8; i += 4)
413 printk(KERN_DEBUG PFX "CIR: %x---> 0x%.8x\n", i, ioread32(ctl + 0x3000 + i));
414}
415
416#endif /* AGNX_DEBUG_H_ */
diff --git a/drivers/staging/agnx/pci.c b/drivers/staging/agnx/pci.c
deleted file mode 100644
index 32b5489456a8..000000000000
--- a/drivers/staging/agnx/pci.c
+++ /dev/null
@@ -1,635 +0,0 @@
1/**
2 * Airgo MIMO wireless driver
3 *
4 * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
5
6 * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
7 * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
8
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/init.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18
19#include "agnx.h"
20#include "debug.h"
21#include "xmit.h"
22#include "phy.h"
23
24MODULE_AUTHOR("Li YanBo <dreamfly281@gmail.com>");
25MODULE_DESCRIPTION("Airgo MIMO PCI wireless driver");
26MODULE_LICENSE("GPL");
27
28static struct pci_device_id agnx_pci_id_tbl[] __devinitdata = {
29 { PCI_DEVICE(0x17cb, 0x0001) }, /* Beklin F5d8010, Netgear WGM511 etc */
30 { PCI_DEVICE(0x17cb, 0x0002) }, /* Netgear Wpnt511 */
31 { 0 }
32};
33
34MODULE_DEVICE_TABLE(pci, agnx_pci_id_tbl);
35
36
37static inline void agnx_interrupt_ack(struct agnx_priv *priv, u32 *reason)
38{
39 void __iomem *ctl = priv->ctl;
40 u32 reg;
41
42 if (*reason & AGNX_STAT_RX) {
43 /* Mark complete RX */
44 reg = ioread32(ctl + AGNX_CIR_RXCTL);
45 reg |= 0x4;
46 iowrite32(reg, ctl + AGNX_CIR_RXCTL);
47 /* disable Rx interrupt */
48 }
49 if (*reason & AGNX_STAT_TX) {
50 reg = ioread32(ctl + AGNX_CIR_TXDCTL);
51 if (reg & 0x4) {
52 iowrite32(reg, ctl + AGNX_CIR_TXDCTL);
53 *reason |= AGNX_STAT_TXD;
54 }
55 reg = ioread32(ctl + AGNX_CIR_TXMCTL);
56 if (reg & 0x4) {
57 iowrite32(reg, ctl + AGNX_CIR_TXMCTL);
58 *reason |= AGNX_STAT_TXM;
59 }
60 }
61#if 0
62 if (*reason & AGNX_STAT_X) {
63 reg = ioread32(ctl + AGNX_INT_STAT);
64 iowrite32(reg, ctl + AGNX_INT_STAT);
65 /* FIXME reinit interrupt mask */
66 reg = 0xc390bf9 & ~IRQ_TX_BEACON;
67 reg &= ~IRQ_TX_DISABLE;
68 iowrite32(reg, ctl + AGNX_INT_MASK);
69 iowrite32(0x800, ctl + AGNX_CIR_BLKCTL);
70 }
71#endif
72} /* agnx_interrupt_ack */
73
74static irqreturn_t agnx_interrupt_handler(int irq, void *dev_id)
75{
76 struct ieee80211_hw *dev = dev_id;
77 struct agnx_priv *priv = dev->priv;
78 void __iomem *ctl = priv->ctl;
79 irqreturn_t ret = IRQ_NONE;
80 u32 irq_reason;
81
82 spin_lock(&priv->lock);
83
84/* printk(KERN_ERR PFX "Get a interrupt %s\n", __func__); */
85
86 if (priv->init_status != AGNX_START)
87 goto out;
88
89 /* FiXME Here has no lock, Is this will lead to race? */
90 irq_reason = ioread32(ctl + AGNX_CIR_BLKCTL);
91 if (!(irq_reason & 0x7))
92 goto out;
93
94 ret = IRQ_HANDLED;
95 priv->irq_status = ioread32(ctl + AGNX_INT_STAT);
96
97/* printk(PFX "Interrupt reason is 0x%x\n", irq_reason); */
98 /* Make sure the txm and txd flags don't conflict with other unknown
99 interrupt flag, maybe is not necessary */
100 irq_reason &= 0xF;
101
102 disable_rx_interrupt(priv);
103 /* TODO Make sure the card finished initialized */
104 agnx_interrupt_ack(priv, &irq_reason);
105
106 if (irq_reason & AGNX_STAT_RX)
107 handle_rx_irq(priv);
108 if (irq_reason & AGNX_STAT_TXD)
109 handle_txd_irq(priv);
110 if (irq_reason & AGNX_STAT_TXM)
111 handle_txm_irq(priv);
112 if (irq_reason & AGNX_STAT_X)
113 handle_other_irq(priv);
114
115 enable_rx_interrupt(priv);
116out:
117 spin_unlock(&priv->lock);
118 return ret;
119} /* agnx_interrupt_handler */
120
121
122/* FIXME */
123static int agnx_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
124{
125 AGNX_TRACE;
126 return _agnx_tx(dev->priv, skb);
127} /* agnx_tx */
128
129
130static int agnx_get_mac_address(struct agnx_priv *priv)
131{
132 void __iomem *ctl = priv->ctl;
133 u32 reg;
134 AGNX_TRACE;
135
136 /* Attention! directly read the MAC or other date from EEPROM will
137 lead to cardbus(WGM511) lock up when write to PM PLL register */
138 reg = agnx_read32(ctl, 0x3544);
139 udelay(40);
140 reg = agnx_read32(ctl, 0x354c);
141 udelay(50);
142 /* Get the mac address */
143 reg = agnx_read32(ctl, 0x3544);
144 udelay(40);
145
146 /* HACK */
147 reg = cpu_to_le32(reg);
148 priv->mac_addr[0] = ((u8 *)&reg)[2];
149 priv->mac_addr[1] = ((u8 *)&reg)[3];
150 reg = agnx_read32(ctl, 0x3548);
151 udelay(50);
152 *((u32 *)(priv->mac_addr + 2)) = cpu_to_le32(reg);
153
154 if (!is_valid_ether_addr(priv->mac_addr)) {
155 printk(KERN_WARNING PFX "read mac %pM\n", priv->mac_addr);
156 printk(KERN_WARNING PFX "Invalid hwaddr! Using random hwaddr\n");
157 random_ether_addr(priv->mac_addr);
158 }
159
160 return 0;
161} /* agnx_get_mac_address */
162
163static int agnx_alloc_rings(struct agnx_priv *priv)
164{
165 unsigned int len;
166 AGNX_TRACE;
167
168 /* Allocate RX/TXM/TXD rings info */
169 priv->rx.size = AGNX_RX_RING_SIZE;
170 priv->txm.size = AGNX_TXM_RING_SIZE;
171 priv->txd.size = AGNX_TXD_RING_SIZE;
172
173 len = priv->rx.size + priv->txm.size + priv->txd.size;
174
175/* priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_KERNEL); */
176 priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_ATOMIC);
177 if (!priv->rx.info)
178 return -ENOMEM;
179 priv->txm.info = priv->rx.info + priv->rx.size;
180 priv->txd.info = priv->txm.info + priv->txm.size;
181
182 /* Allocate RX/TXM/TXD descriptors */
183 priv->rx.desc = pci_alloc_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
184 &priv->rx.dma);
185 if (!priv->rx.desc) {
186 kfree(priv->rx.info);
187 return -ENOMEM;
188 }
189
190 priv->txm.desc = priv->rx.desc + priv->rx.size;
191 priv->txm.dma = priv->rx.dma + sizeof(struct agnx_desc) * priv->rx.size;
192 priv->txd.desc = priv->txm.desc + priv->txm.size;
193 priv->txd.dma = priv->txm.dma + sizeof(struct agnx_desc) * priv->txm.size;
194
195 return 0;
196} /* agnx_alloc_rings */
197
198static void rings_free(struct agnx_priv *priv)
199{
200 unsigned int len = priv->rx.size + priv->txm.size + priv->txd.size;
201 unsigned long flags;
202 AGNX_TRACE;
203
204 spin_lock_irqsave(&priv->lock, flags);
205 kfree(priv->rx.info);
206 pci_free_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
207 priv->rx.desc, priv->rx.dma);
208 spin_unlock_irqrestore(&priv->lock, flags);
209}
210
211#if 0
212static void agnx_periodic_work_handler(struct work_struct *work)
213{
214 struct agnx_priv *priv = container_of(work, struct agnx_priv, periodic_work.work);
215/* unsigned long flags; */
216 unsigned long delay;
217
218 /* fixme: using mutex?? */
219/* spin_lock_irqsave(&priv->lock, flags); */
220
221 /* TODO Recalibrate*/
222/* calibrate_oscillator(priv); */
223/* antenna_calibrate(priv); */
224/* agnx_send_packet(priv, 997); */
225 /* FIXME */
226/* if (debug == 3) */
227/* delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
228/* else */
229 delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY);
230/* delay = round_jiffies(HZ * 15); */
231
232 queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay);
233
234/* spin_unlock_irqrestore(&priv->lock, flags); */
235}
236#endif
237
238static int agnx_start(struct ieee80211_hw *dev)
239{
240 struct agnx_priv *priv = dev->priv;
241 /* unsigned long delay; */
242 int err = 0;
243 AGNX_TRACE;
244
245 err = agnx_alloc_rings(priv);
246 if (err) {
247 printk(KERN_ERR PFX "Can't alloc RX/TXM/TXD rings\n");
248 goto out;
249 }
250 err = request_irq(priv->pdev->irq, &agnx_interrupt_handler,
251 IRQF_SHARED, "agnx_pci", dev);
252 if (err) {
253 printk(KERN_ERR PFX "Failed to register IRQ handler\n");
254 rings_free(priv);
255 goto out;
256 }
257
258/* mdelay(500); */
259
260 might_sleep();
261 agnx_hw_init(priv);
262
263/* mdelay(500); */
264 might_sleep();
265
266 priv->init_status = AGNX_START;
267/* INIT_DELAYED_WORK(&priv->periodic_work, agnx_periodic_work_handler); */
268/* delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
269/* queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay); */
270out:
271 return err;
272} /* agnx_start */
273
274static void agnx_stop(struct ieee80211_hw *dev)
275{
276 struct agnx_priv *priv = dev->priv;
277 AGNX_TRACE;
278
279 priv->init_status = AGNX_STOP;
280 /* make sure hardware will not generate irq */
281 agnx_hw_reset(priv);
282 free_irq(priv->pdev->irq, dev);
283/* flush_workqueue(priv->hw->workqueue); */
284/* cancel_delayed_work_sync(&priv->periodic_work); */
285 unfill_rings(priv);
286 rings_free(priv);
287}
288
289static int agnx_config(struct ieee80211_hw *dev, u32 changed)
290{
291 struct agnx_priv *priv = dev->priv;
292 struct ieee80211_conf *conf = &dev->conf;
293 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
294 AGNX_TRACE;
295
296 spin_lock(&priv->lock);
297 /* FIXME need priv lock? */
298 if (channel != priv->channel) {
299 priv->channel = channel;
300 agnx_set_channel(priv, priv->channel);
301 }
302
303 spin_unlock(&priv->lock);
304 return 0;
305}
306
307static void agnx_bss_info_changed(struct ieee80211_hw *dev,
308 struct ieee80211_vif *vif,
309 struct ieee80211_bss_conf *conf,
310 u32 changed)
311{
312 struct agnx_priv *priv = dev->priv;
313 void __iomem *ctl = priv->ctl;
314 AGNX_TRACE;
315
316 if (!(changed & BSS_CHANGED_BSSID))
317 return;
318
319 spin_lock(&priv->lock);
320
321 if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
322 agnx_set_bssid(priv, conf->bssid);
323 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
324 hash_write(priv, conf->bssid, BSSID_STAID);
325 sta_init(priv, BSSID_STAID);
326 /* FIXME needed? */
327 sta_power_init(priv, BSSID_STAID);
328 agnx_write32(ctl, AGNX_BM_MTSM, 0xff & ~0x1);
329 }
330 spin_unlock(&priv->lock);
331} /* agnx_bss_info_changed */
332
333
334static void agnx_configure_filter(struct ieee80211_hw *dev,
335 unsigned int changed_flags,
336 unsigned int *total_flags,
337 int mc_count, struct dev_mc_list *mclist)
338{
339 unsigned int new_flags = 0;
340
341 *total_flags = new_flags;
342 /* TODO */
343}
344
345static int agnx_add_interface(struct ieee80211_hw *dev,
346 struct ieee80211_if_init_conf *conf)
347{
348 struct agnx_priv *priv = dev->priv;
349 AGNX_TRACE;
350
351 spin_lock(&priv->lock);
352 /* FIXME */
353 if (priv->mode != NL80211_IFTYPE_MONITOR)
354 return -EOPNOTSUPP;
355
356 switch (conf->type) {
357 case NL80211_IFTYPE_STATION:
358 priv->mode = conf->type;
359 break;
360 default:
361 return -EOPNOTSUPP;
362 }
363
364 spin_unlock(&priv->lock);
365
366 return 0;
367}
368
369static void agnx_remove_interface(struct ieee80211_hw *dev,
370 struct ieee80211_if_init_conf *conf)
371{
372 struct agnx_priv *priv = dev->priv;
373 AGNX_TRACE;
374
375 /* TODO */
376 priv->mode = NL80211_IFTYPE_MONITOR;
377}
378
379static int agnx_get_stats(struct ieee80211_hw *dev,
380 struct ieee80211_low_level_stats *stats)
381{
382 struct agnx_priv *priv = dev->priv;
383 AGNX_TRACE;
384 spin_lock(&priv->lock);
385 /* TODO !! */
386 memcpy(stats, &priv->stats, sizeof(*stats));
387 spin_unlock(&priv->lock);
388
389 return 0;
390}
391
392static u64 agnx_get_tsft(struct ieee80211_hw *dev)
393{
394 void __iomem *ctl = ((struct agnx_priv *)dev->priv)->ctl;
395 u32 tsftl;
396 u64 tsft;
397 AGNX_TRACE;
398
399 /* FIXME */
400 tsftl = ioread32(ctl + AGNX_TXM_TIMESTAMPLO);
401 tsft = ioread32(ctl + AGNX_TXM_TIMESTAMPHI);
402 tsft <<= 32;
403 tsft |= tsftl;
404
405 return tsft;
406}
407
408static int agnx_get_tx_stats(struct ieee80211_hw *dev,
409 struct ieee80211_tx_queue_stats *stats)
410{
411 struct agnx_priv *priv = dev->priv;
412 AGNX_TRACE;
413
414 /* FIXME now we just using txd queue, but should using txm queue too */
415 stats[0].len = (priv->txd.idx - priv->txd.idx_sent) / 2;
416 stats[0].limit = priv->txd.size - 2;
417 stats[0].count = priv->txd.idx / 2;
418
419 return 0;
420}
421
422static struct ieee80211_ops agnx_ops = {
423 .tx = agnx_tx,
424 .start = agnx_start,
425 .stop = agnx_stop,
426 .add_interface = agnx_add_interface,
427 .remove_interface = agnx_remove_interface,
428 .config = agnx_config,
429 .bss_info_changed = agnx_bss_info_changed,
430 .configure_filter = agnx_configure_filter,
431 .get_stats = agnx_get_stats,
432 .get_tx_stats = agnx_get_tx_stats,
433 .get_tsf = agnx_get_tsft
434};
435
436static void __devexit agnx_pci_remove(struct pci_dev *pdev)
437{
438 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
439 struct agnx_priv *priv;
440 AGNX_TRACE;
441
442 if (!dev)
443 return;
444 priv = dev->priv;
445 ieee80211_unregister_hw(dev);
446 pci_iounmap(pdev, priv->ctl);
447 pci_iounmap(pdev, priv->data);
448 pci_release_regions(pdev);
449 pci_disable_device(pdev);
450
451 ieee80211_free_hw(dev);
452}
453
454static int __devinit agnx_pci_probe(struct pci_dev *pdev,
455 const struct pci_device_id *id)
456{
457 struct ieee80211_hw *dev;
458 struct agnx_priv *priv;
459 int err;
460
461 err = pci_enable_device(pdev);
462 if (err) {
463 dev_err(&pdev->dev, "can't enable pci device\n");
464 return err;
465 }
466
467 err = pci_request_regions(pdev, "agnx-pci");
468 if (err) {
469 dev_err(&pdev->dev, "can't reserve PCI resources\n");
470 return err;
471 }
472
473 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
474 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
475 dev_err(&pdev->dev, "no suitable DMA available\n");
476 err = -EIO;
477 goto err_free_reg;
478 }
479
480 pci_set_master(pdev);
481
482 dev = ieee80211_alloc_hw(sizeof(*priv), &agnx_ops);
483 if (!dev) {
484 dev_err(&pdev->dev, "ieee80211 alloc failed\n");
485 err = -ENOMEM;
486 goto err_free_reg;
487 }
488 priv = dev->priv;
489 memset(priv, 0, sizeof(*priv));
490 priv->mode = NL80211_IFTYPE_MONITOR;
491 priv->pdev = pdev;
492 priv->hw = dev;
493 spin_lock_init(&priv->lock);
494 priv->init_status = AGNX_UNINIT;
495
496 priv->ctl = pci_iomap(pdev, 0, 0);
497/* dev_dbg(&pdev->dev, "MEM1 mapped address is 0x%p\n", priv->ctl); */
498 if (!priv->ctl) {
499 dev_err(&pdev->dev, "can't map device memory\n");
500 err = -ENOMEM;
501 goto err_free_dev;
502 }
503 priv->data = pci_iomap(pdev, 1, 0);
504 if (!priv->data) {
505 dev_err(&pdev->dev, "can't map device memory\n");
506 err = -ENOMEM;
507 goto err_iounmap2;
508 }
509
510 pci_read_config_byte(pdev, PCI_REVISION_ID, &priv->revid);
511
512 priv->band.channels = (struct ieee80211_channel *)agnx_channels;
513 priv->band.n_channels = ARRAY_SIZE(agnx_channels);
514 priv->band.bitrates = (struct ieee80211_rate *)agnx_rates_80211g;
515 priv->band.n_bitrates = ARRAY_SIZE(agnx_rates_80211g);
516
517 /* Init ieee802.11 dev */
518 SET_IEEE80211_DEV(dev, &pdev->dev);
519 pci_set_drvdata(pdev, dev);
520 dev->extra_tx_headroom = sizeof(struct agnx_hdr);
521
522 /* FIXME It only include FCS in promious mode but not manage mode */
523/* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS; */
524 dev->channel_change_time = 5000;
525 dev->max_signal = 100;
526 /* FIXME */
527 dev->queues = 1;
528
529 agnx_get_mac_address(priv);
530
531 SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
532
533/* /\* FIXME *\/ */
534/* for (i = 1; i < NUM_DRIVE_MODES; i++) { */
535/* err = ieee80211_register_hwmode(dev, &priv->modes[i]); */
536/* if (err) { */
537/* printk(KERN_ERR PFX "Can't register hwmode\n"); */
538/* goto err_iounmap; */
539/* } */
540/* } */
541
542 priv->channel = 1;
543 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
544
545 err = ieee80211_register_hw(dev);
546 if (err) {
547 dev_err(&pdev->dev, "can't register hardware\n");
548 goto err_iounmap;
549 }
550
551 agnx_hw_reset(priv);
552
553 dev_info(&pdev->dev, "%s: hwaddr %pM, Rev 0x%02x\n",
554 wiphy_name(dev->wiphy),
555 dev->wiphy->perm_addr, priv->revid);
556 return 0;
557
558 err_iounmap:
559 pci_iounmap(pdev, priv->data);
560
561 err_iounmap2:
562 pci_iounmap(pdev, priv->ctl);
563
564 err_free_dev:
565 pci_set_drvdata(pdev, NULL);
566 ieee80211_free_hw(dev);
567
568 err_free_reg:
569 pci_release_regions(pdev);
570
571 pci_disable_device(pdev);
572 return err;
573} /* agnx_pci_probe*/
574
575#ifdef CONFIG_PM
576
577static int agnx_pci_suspend(struct pci_dev *pdev, pm_message_t state)
578{
579 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
580 AGNX_TRACE;
581
582 ieee80211_stop_queues(dev);
583 agnx_stop(dev);
584
585 pci_save_state(pdev);
586 pci_set_power_state(pdev, pci_choose_state(pdev, state));
587 return 0;
588}
589
590static int agnx_pci_resume(struct pci_dev *pdev)
591{
592 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
593 AGNX_TRACE;
594
595 pci_set_power_state(pdev, PCI_D0);
596 pci_restore_state(pdev);
597
598 agnx_start(dev);
599 ieee80211_wake_queues(dev);
600
601 return 0;
602}
603
604#else
605
606#define agnx_pci_suspend NULL
607#define agnx_pci_resume NULL
608
609#endif /* CONFIG_PM */
610
611
612static struct pci_driver agnx_pci_driver = {
613 .name = "agnx-pci",
614 .id_table = agnx_pci_id_tbl,
615 .probe = agnx_pci_probe,
616 .remove = __devexit_p(agnx_pci_remove),
617 .suspend = agnx_pci_suspend,
618 .resume = agnx_pci_resume,
619};
620
621static int __init agnx_pci_init(void)
622{
623 AGNX_TRACE;
624 return pci_register_driver(&agnx_pci_driver);
625}
626
627static void __exit agnx_pci_exit(void)
628{
629 AGNX_TRACE;
630 pci_unregister_driver(&agnx_pci_driver);
631}
632
633
634module_init(agnx_pci_init);
635module_exit(agnx_pci_exit);
diff --git a/drivers/staging/agnx/phy.c b/drivers/staging/agnx/phy.c
deleted file mode 100644
index ec1ca86fa0c4..000000000000
--- a/drivers/staging/agnx/phy.c
+++ /dev/null
@@ -1,960 +0,0 @@
1/**
2 * Airgo MIMO wireless driver
3 *
4 * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
5
6 * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
7 * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
8
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/init.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include "agnx.h"
19#include "debug.h"
20#include "phy.h"
21#include "table.h"
22#include "sta.h"
23#include "xmit.h"
24
25u8 read_from_eeprom(struct agnx_priv *priv, u16 address)
26{
27 void __iomem *ctl = priv->ctl;
28 struct agnx_eeprom cmd;
29 u32 reg;
30
31 memset(&cmd, 0, sizeof(cmd));
32 cmd.cmd = EEPROM_CMD_READ << AGNX_EEPROM_COMMAND_SHIFT;
33 cmd.address = address;
34 /* Verify that the Status bit is clear */
35 /* Read Command and Address are written to the Serial Interface */
36 iowrite32(*(__le32 *)&cmd, ctl + AGNX_CIR_SERIALITF);
37 /* Wait for the Status bit to clear again */
38 eeprom_delay();
39 /* Read from Data */
40 reg = ioread32(ctl + AGNX_CIR_SERIALITF);
41
42 cmd = *(struct agnx_eeprom *)&reg;
43
44 return cmd.data;
45}
46
47static int card_full_reset(struct agnx_priv *priv)
48{
49 void __iomem *ctl = priv->ctl;
50 u32 reg;
51 AGNX_TRACE;
52
53 reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
54 agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x80);
55 reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
56 return 0;
57}
58
59inline void enable_power_saving(struct agnx_priv *priv)
60{
61 void __iomem *ctl = priv->ctl;
62 u32 reg;
63
64 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
65 reg &= ~0x8;
66 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
67}
68
69inline void disable_power_saving(struct agnx_priv *priv)
70{
71 void __iomem *ctl = priv->ctl;
72 u32 reg;
73
74 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
75 reg |= 0x8;
76 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
77}
78
79
80void disable_receiver(struct agnx_priv *priv)
81{
82 void __iomem *ctl = priv->ctl;
83 AGNX_TRACE;
84
85 /* FIXME Disable the receiver */
86 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
87 /* Set gain control reset */
88 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
89 /* Reset gain control reset */
90 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
91}
92
93
94/* Fixme this shoule be disable RX, above is enable RX */
95void enable_receiver(struct agnx_priv *priv)
96{
97 void __iomem *ctl = priv->ctl;
98 AGNX_TRACE;
99
100 /* Set adaptive gain control discovery mode */
101 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
102 /* Set gain control reset */
103 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
104 /* Clear gain control reset */
105 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
106}
107
108static void mac_address_set(struct agnx_priv *priv)
109{
110 void __iomem *ctl = priv->ctl;
111 u8 *mac_addr = priv->mac_addr;
112 u32 reg;
113
114 /* FIXME */
115 reg = (mac_addr[0] << 24) | (mac_addr[1] << 16) | mac_addr[2] << 8 | mac_addr[3];
116 iowrite32(reg, ctl + AGNX_RXM_MACHI);
117 reg = (mac_addr[4] << 8) | mac_addr[5];
118 iowrite32(reg, ctl + AGNX_RXM_MACLO);
119}
120
121static void receiver_bssid_set(struct agnx_priv *priv, const u8 *bssid)
122{
123 void __iomem *ctl = priv->ctl;
124 u32 reg;
125
126 disable_receiver(priv);
127 /* FIXME */
128 reg = bssid[0] << 24 | (bssid[1] << 16) | (bssid[2] << 8) | bssid[3];
129 iowrite32(reg, ctl + AGNX_RXM_BSSIDHI);
130 reg = (bssid[4] << 8) | bssid[5];
131 iowrite32(reg, ctl + AGNX_RXM_BSSIDLO);
132
133 /* Enable the receiver */
134 enable_receiver(priv);
135
136 /* Clear the TSF */
137/* agnx_write32(ctl, AGNX_TXM_TSFLO, 0x0); */
138/* agnx_write32(ctl, AGNX_TXM_TSFHI, 0x0); */
139 /* Clear the TBTT */
140 agnx_write32(ctl, AGNX_TXM_TBTTLO, 0x0);
141 agnx_write32(ctl, AGNX_TXM_TBTTHI, 0x0);
142 disable_receiver(priv);
143} /* receiver_bssid_set */
144
145static void band_management_init(struct agnx_priv *priv)
146{
147 void __iomem *ctl = priv->ctl;
148 void __iomem *data = priv->data;
149 u32 reg;
150 int i;
151 AGNX_TRACE;
152
153 agnx_write32(ctl, AGNX_BM_TXWADDR, AGNX_PDU_TX_WQ);
154 agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
155 memset_io(data + AGNX_PDUPOOL, 0x0, AGNX_PDUPOOL_SIZE);
156 agnx_write32(ctl, AGNX_BM_BMCTL, 0x200);
157
158 agnx_write32(ctl, AGNX_BM_CIPDUWCNT, 0x40);
159 agnx_write32(ctl, AGNX_BM_SPPDUWCNT, 0x2);
160 agnx_write32(ctl, AGNX_BM_RFPPDUWCNT, 0x0);
161 agnx_write32(ctl, AGNX_BM_RHPPDUWCNT, 0x22);
162
163 /* FIXME Initialize the Free Pool Linked List */
164 /* 1. Write the Address of the Next Node ((0x41800 + node*size)/size)
165 to the first word of each node. */
166 for (i = 0; i < PDU_FREE_CNT; i++) {
167 iowrite32((AGNX_PDU_FREE + (i+1)*PDU_SIZE)/PDU_SIZE,
168 data + AGNX_PDU_FREE + (PDU_SIZE * i));
169 /* The last node should be set to 0x0 */
170 if ((i + 1) == PDU_FREE_CNT)
171 memset_io(data + AGNX_PDU_FREE + (PDU_SIZE * i),
172 0x0, PDU_SIZE);
173 }
174
175 /* Head is First Pool address (0x41800) / size (0x80) */
176 agnx_write32(ctl, AGNX_BM_FPLHP, AGNX_PDU_FREE/PDU_SIZE);
177 /* Tail is Last Pool Address (0x47f80) / size (0x80) */
178 agnx_write32(ctl, AGNX_BM_FPLTP, 0x47f80/PDU_SIZE);
179 /* Count is Number of Nodes in the Pool (0xd0) */
180 agnx_write32(ctl, AGNX_BM_FPCNT, PDU_FREE_CNT);
181
182 /* Start all workqueue */
183 agnx_write32(ctl, AGNX_BM_CIWQCTL, 0x80000);
184 agnx_write32(ctl, AGNX_BM_CPULWCTL, 0x80000);
185 agnx_write32(ctl, AGNX_BM_CPUHWCTL, 0x80000);
186 agnx_write32(ctl, AGNX_BM_CPUTXWCTL, 0x80000);
187 agnx_write32(ctl, AGNX_BM_CPURXWCTL, 0x80000);
188 agnx_write32(ctl, AGNX_BM_SPRXWCTL, 0x80000);
189 agnx_write32(ctl, AGNX_BM_SPTXWCTL, 0x80000);
190 agnx_write32(ctl, AGNX_BM_RFPWCTL, 0x80000);
191
192 /* Enable the Band Management */
193 reg = agnx_read32(ctl, AGNX_BM_BMCTL);
194 reg |= 0x1;
195 agnx_write32(ctl, AGNX_BM_BMCTL, reg);
196} /* band_managment_init */
197
198
199static void system_itf_init(struct agnx_priv *priv)
200{
201 void __iomem *ctl = priv->ctl;
202 u32 reg;
203 AGNX_TRACE;
204
205 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x0);
206 agnx_write32(ctl, AGNX_PM_TESTPHY, 0x11e143a);
207
208 if (priv->revid == 0) {
209 reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
210 reg |= 0x11;
211 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
212 }
213 /* ??? What is that means? it should difference for differice type
214 of cards */
215 agnx_write32(ctl, AGNX_CIR_SERIALITF, 0xfff81006);
216
217 agnx_write32(ctl, AGNX_SYSITF_GPIOIN, 0x1f0000);
218 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
219 reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
220}
221
222static void encryption_init(struct agnx_priv *priv)
223{
224 void __iomem *ctl = priv->ctl;
225 AGNX_TRACE;
226
227 agnx_write32(ctl, AGNX_ENCRY_WEPKEY0, 0x0);
228 agnx_write32(ctl, AGNX_ENCRY_WEPKEY1, 0x0);
229 agnx_write32(ctl, AGNX_ENCRY_WEPKEY2, 0x0);
230 agnx_write32(ctl, AGNX_ENCRY_WEPKEY3, 0x0);
231 agnx_write32(ctl, AGNX_ENCRY_CCMRECTL, 0x8);
232}
233
234static void tx_management_init(struct agnx_priv *priv)
235{
236 void __iomem *ctl = priv->ctl;
237 void __iomem *data = priv->data;
238 u32 reg;
239 AGNX_TRACE;
240
241 /* Fill out the ComputationalEngineLookupTable
242 * starting at memory #2 offset 0x800
243 */
244 tx_engine_lookup_tbl_init(priv);
245 memset_io(data + 0x1000, 0, 0xfe0);
246 /* Enable Transmission Management Functions */
247 agnx_write32(ctl, AGNX_TXM_ETMF, 0x3ff);
248 /* Write 0x3f to Transmission Template */
249 agnx_write32(ctl, AGNX_TXM_TXTEMP, 0x3f);
250
251 if (priv->revid >= 2)
252 agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e140a0b);
253 else
254 agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e190a0b);
255
256 reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
257 reg &= 0xff00;
258 reg |= 0xb;
259 agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
260 reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
261 reg &= 0xffff00ff;
262 reg |= 0xa00;
263 agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
264 /* Enable TIFS */
265 agnx_write32(ctl, AGNX_TXM_CTL, 0x40000);
266
267 reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
268 reg &= 0xff00ffff;
269 reg |= 0x510000;
270 agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
271 reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
272 reg &= 0xff00ffff;
273 agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
274 reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
275 reg &= 0x00ffffff;
276 reg |= 0x1c000000;
277 agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
278 reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
279 reg &= 0x00ffffff;
280 reg |= 0x01000000;
281 agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
282
283 /* # Set DIF 0-1,2-3,4-5,6-7 to defaults */
284 agnx_write32(ctl, AGNX_TXM_DIF01, 0x321d321d);
285 agnx_write32(ctl, AGNX_TXM_DIF23, 0x321d321d);
286 agnx_write32(ctl, AGNX_TXM_DIF45, 0x321d321d);
287 agnx_write32(ctl, AGNX_TXM_DIF67, 0x321d321d);
288
289 /* Max Ack timeout limit */
290 agnx_write32(ctl, AGNX_TXM_MAXACKTIM, 0x1e19);
291 /* Max RX Data Timeout count, */
292 reg = agnx_read32(ctl, AGNX_TXM_MAXRXTIME);
293 reg &= 0xffff0000;
294 reg |= 0xff;
295 agnx_write32(ctl, AGNX_TXM_MAXRXTIME, reg);
296
297 /* CF poll RX Timeout count */
298 reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
299 reg &= 0xffff;
300 reg |= 0xff0000;
301 agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
302
303 /* Max Timeout Exceeded count, */
304 reg = agnx_read32(ctl, AGNX_TXM_MAXTIMOUT);
305 reg &= 0xff00ffff;
306 reg |= 0x190000;
307 agnx_write32(ctl, AGNX_TXM_MAXTIMOUT, reg);
308
309 /* CF ack timeout limit for 11b */
310 reg = agnx_read32(ctl, AGNX_TXM_CFACKT11B);
311 reg &= 0xff00;
312 reg |= 0x1e;
313 agnx_write32(ctl, AGNX_TXM_CFACKT11B, reg);
314
315 /* Max CF Poll Timeout Count */
316 reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
317 reg &= 0xffff0000;
318 reg |= 0x19;
319 agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
320 /* CF Poll RX Timeout Count */
321 reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
322 reg &= 0xffff0000;
323 reg |= 0x1e;
324 agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
325
326 /* # write default to */
327 /* 1. Schedule Empty Count */
328 agnx_write32(ctl, AGNX_TXM_SCHEMPCNT, 0x5);
329 /* 2. CFP Period Count */
330 agnx_write32(ctl, AGNX_TXM_CFPERCNT, 0x1);
331 /* 3. CFP MDV */
332 agnx_write32(ctl, AGNX_TXM_CFPMDV, 0x10000);
333
334 /* Probe Delay */
335 reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
336 reg &= 0xffff0000;
337 reg |= 0x400;
338 agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
339
340 /* Max CCA count Slot */
341 reg = agnx_read32(ctl, AGNX_TXM_MAXCCACNTSLOT);
342 reg &= 0xffff00ff;
343 reg |= 0x900;
344 agnx_write32(ctl, AGNX_TXM_MAXCCACNTSLOT, reg);
345
346 /* Slot limit/1 msec Limit */
347 reg = agnx_read32(ctl, AGNX_TXM_SLOTLIMIT);
348 reg &= 0xff00ffff;
349 reg |= 0x140077;
350 agnx_write32(ctl, AGNX_TXM_SLOTLIMIT, reg);
351
352 /* # Set CW #(0-7) to default */
353 agnx_write32(ctl, AGNX_TXM_CW0, 0xff0007);
354 agnx_write32(ctl, AGNX_TXM_CW1, 0xff0007);
355 agnx_write32(ctl, AGNX_TXM_CW2, 0xff0007);
356 agnx_write32(ctl, AGNX_TXM_CW3, 0xff0007);
357 agnx_write32(ctl, AGNX_TXM_CW4, 0xff0007);
358 agnx_write32(ctl, AGNX_TXM_CW5, 0xff0007);
359 agnx_write32(ctl, AGNX_TXM_CW6, 0xff0007);
360 agnx_write32(ctl, AGNX_TXM_CW7, 0xff0007);
361
362 /* # Set Short/Long limit #(0-7) to default */
363 agnx_write32(ctl, AGNX_TXM_SLBEALIM0, 0xa000a);
364 agnx_write32(ctl, AGNX_TXM_SLBEALIM1, 0xa000a);
365 agnx_write32(ctl, AGNX_TXM_SLBEALIM2, 0xa000a);
366 agnx_write32(ctl, AGNX_TXM_SLBEALIM3, 0xa000a);
367 agnx_write32(ctl, AGNX_TXM_SLBEALIM4, 0xa000a);
368 agnx_write32(ctl, AGNX_TXM_SLBEALIM5, 0xa000a);
369 agnx_write32(ctl, AGNX_TXM_SLBEALIM6, 0xa000a);
370 agnx_write32(ctl, AGNX_TXM_SLBEALIM7, 0xa000a);
371
372 reg = agnx_read32(ctl, AGNX_TXM_CTL);
373 reg |= 0x1400;
374 agnx_write32(ctl, AGNX_TXM_CTL, reg);
375 /* Wait for bit 0 in Control Reg to clear */
376 udelay(80);
377 reg = agnx_read32(ctl, AGNX_TXM_CTL);
378 /* Or 0x18000 to Control reg */
379 reg = agnx_read32(ctl, AGNX_TXM_CTL);
380 reg |= 0x18000;
381 agnx_write32(ctl, AGNX_TXM_CTL, reg);
382 /* Wait for bit 0 in Control Reg to clear */
383 udelay(80);
384 reg = agnx_read32(ctl, AGNX_TXM_CTL);
385
386 /* Set Listen Interval Count to default */
387 agnx_write32(ctl, AGNX_TXM_LISINTERCNT, 0x1);
388 /* Set DTIM period count to default */
389 agnx_write32(ctl, AGNX_TXM_DTIMPERICNT, 0x2000);
390} /* tx_management_init */
391
392static void rx_management_init(struct agnx_priv *priv)
393{
394 void __iomem *ctl = priv->ctl;
395 AGNX_TRACE;
396
397 /* Initialize the Routing Table */
398 routing_table_init(priv);
399
400 if (priv->revid >= 3) {
401 agnx_write32(ctl, 0x2074, 0x1f171710);
402 agnx_write32(ctl, 0x2078, 0x10100d0d);
403 agnx_write32(ctl, 0x207c, 0x11111010);
404 } else {
405 agnx_write32(ctl, AGNX_RXM_DELAY11, 0x0);
406 }
407 agnx_write32(ctl, AGNX_RXM_REQRATE, 0x8195e00);
408}
409
410
411static void agnx_timer_init(struct agnx_priv *priv)
412{
413 void __iomem *ctl = priv->ctl;
414 AGNX_TRACE;
415
416/* /\* Write 0x249f00 (tick duration?) to Timer 1 *\/ */
417/* agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x249f00); */
418/* /\* Write 0xe2 to Timer 1 Control *\/ */
419/* agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0xe2); */
420
421 /* Write 0x249f00 (tick duration?) to Timer 1 */
422 agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x0);
423 /* Write 0xe2 to Timer 1 Control */
424 agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0x0);
425
426 iowrite32(0xFFFFFFFF, priv->ctl + AGNX_TXM_BEACON_CTL);
427}
428
429static void power_manage_init(struct agnx_priv *priv)
430{
431 void __iomem *ctl = priv->ctl;
432 u32 reg;
433 AGNX_TRACE;
434
435 agnx_write32(ctl, AGNX_PM_MACMSW, 0x1f);
436 agnx_write32(ctl, AGNX_PM_RFCTL, 0x1f);
437
438 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
439 reg &= 0xf00f;
440 reg |= 0xa0;
441 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
442
443 if (priv->revid >= 3) {
444 reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
445 reg |= 0x18;
446 agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
447 }
448} /* power_manage_init */
449
450
451static void gain_ctlcnt_init(struct agnx_priv *priv)
452{
453 void __iomem *ctl = priv->ctl;
454 u32 reg;
455 AGNX_TRACE;
456
457 agnx_write32(ctl, AGNX_GCR_TRACNT5, 0x119);
458 agnx_write32(ctl, AGNX_GCR_TRACNT6, 0x118);
459 agnx_write32(ctl, AGNX_GCR_TRACNT7, 0x117);
460
461 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
462 reg |= 0x8;
463 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
464
465 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
466 reg &= ~0x8;
467 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
468
469 agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
470
471 /* FIXME Write the initial Station Descriptor for the card */
472 sta_init(priv, LOCAL_STAID);
473 sta_init(priv, BSSID_STAID);
474
475 /* Enable staion 0 and 1 can do TX */
476 /* It seemed if we set other bit to 1 the bit 0 will
477 be auto change to 0 */
478 agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x2 | 0x1);
479/* agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x1); */
480} /* gain_ctlcnt_init */
481
482
483static void phy_init(struct agnx_priv *priv)
484{
485 void __iomem *ctl = priv->ctl;
486 void __iomem *data = priv->data;
487 u32 reg;
488 AGNX_TRACE;
489
490 /* Load InitialGainTable */
491 gain_table_init(priv);
492
493 agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x2000000);
494
495 /* Clear the following offsets in Memory Range #2: */
496 memset_io(data + 0x5040, 0, 0xa * 4);
497 memset_io(data + 0x5080, 0, 0xa * 4);
498 memset_io(data + 0x50c0, 0, 0xa * 4);
499 memset_io(data + 0x5400, 0, 0x80 * 4);
500 memset_io(data + 0x6000, 0, 0x280 * 4);
501 memset_io(data + 0x7000, 0, 0x280 * 4);
502 memset_io(data + 0x8000, 0, 0x280 * 4);
503
504 /* Initialize the Following Registers According to PCI Revision ID */
505 if (priv->revid == 0) {
506 /* fixme the part hasn't been update but below has been update
507 based on WGM511 */
508 agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
509 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x1d);
510 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x3);
511 agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
512 agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
513 agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
514 agnx_write32(ctl, AGNX_GCR_THD0AL, 0x4b);
515 agnx_write32(ctl, AGNX_GCR_THD0B, 0x4b);
516 agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
517 agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
518 agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
519 agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
520 agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
521 agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
522 agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
523 agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
524 agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
525 agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
526 reg = agnx_read32(ctl, AGNX_GCR_CWDETEC);
527 reg |= 0x1;
528 agnx_write32(ctl, AGNX_GCR_CWDETEC, reg);
529 agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
530 agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);
531 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
532 agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
533 agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
534 agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
535 agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
536 agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x0);
537 agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x0);
538 agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x0);
539 agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x0);
540 agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
541 agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1);
542 agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0x1);
543 agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
544 agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x78);
545 agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x1c);
546 agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
547 agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
548 agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x1);
549 agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
550 agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
551 agnx_write32(ctl, AGNX_GCR_THJUMP, 0x14);
552 agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
553 agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x30);
554 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x32);
555 agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
556 agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
557 agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
558 agnx_write32(ctl, 0x9400, 0x0);
559 agnx_write32(ctl, 0x940c, 0x6ff);
560 agnx_write32(ctl, 0x9428, 0xa0);
561 agnx_write32(ctl, 0x9434, 0x0);
562 agnx_write32(ctl, 0x9c04, 0x15);
563 agnx_write32(ctl, 0x9c0c, 0x7f);
564 agnx_write32(ctl, 0x9c34, 0x0);
565 agnx_write32(ctl, 0xc000, 0x38d);
566 agnx_write32(ctl, 0x14018, 0x0);
567 agnx_write32(ctl, 0x16000, 0x1);
568 agnx_write32(ctl, 0x11004, 0x0);
569 agnx_write32(ctl, 0xec54, 0xa);
570 agnx_write32(ctl, 0xec1c, 0x5);
571 } else if (priv->revid > 0) {
572 agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
573 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
574 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
575 agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
576 agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
577 agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
578 agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
579 agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
580 agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
581 agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
582 agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
583 agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
584 agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
585 agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
586 agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
587 agnx_write32(ctl, AGNX_GCR_CWDETEC, 0x0);
588 agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
589/* agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);*/
590 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
591
592 agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x32);
593 agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x32);
594 agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x32);
595 agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x32);
596 agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
597 agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1ad);
598 agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0xa10);
599 agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
600 agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
601 agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
602 agnx_write32(ctl, AGNX_GCR_THCS, 0x0);
603 agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x4);
604 agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
605 agnx_write32(ctl, AGNX_GCR_THJUMP, 0x1e);
606 agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
607 agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x2a);
608 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
609 agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
610 agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
611 agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
612 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
613 agnx_write32(ctl, AGNX_GCR_WATCHDOG, 0x37);
614 agnx_write32(ctl, 0x9400, 0x0);
615 agnx_write32(ctl, 0x940c, 0x6ff);
616 agnx_write32(ctl, 0x9428, 0xa0);
617 agnx_write32(ctl, 0x9434, 0x0);
618 agnx_write32(ctl, 0x9c04, 0x15);
619 agnx_write32(ctl, 0x9c0c, 0x7f);
620 agnx_write32(ctl, 0x9c34, 0x0);
621 agnx_write32(ctl, 0xc000, 0x38d);
622 agnx_write32(ctl, 0x14014, 0x1000);
623 agnx_write32(ctl, 0x14018, 0x0);
624 agnx_write32(ctl, 0x16000, 0x1);
625 agnx_write32(ctl, 0x11004, 0x0);
626 agnx_write32(ctl, 0xec54, 0xa);
627 agnx_write32(ctl, 0xec1c, 0x50);
628 } else if (priv->revid > 1) {
629 reg = agnx_read32(ctl, 0xec18);
630 reg |= 0x8;
631 agnx_write32(ctl, 0xec18, reg);
632 }
633
634 /* Write the TX Fir Coefficient Table */
635 tx_fir_table_init(priv);
636
637 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
638 reg &= ~0x8;
639 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
640 reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
641 reg |= 0x1;
642 agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
643
644/* reg = agnx_read32(ctl, 0x1a030); */
645/* reg &= ~0x4; */
646/* agnx_write32(ctl, 0x1a030, reg); */
647
648 agnx_write32(ctl, AGNX_GCR_TRACNT4, 0x113);
649} /* phy_init */
650
651static void chip_init(struct agnx_priv *priv)
652{
653 void __iomem *ctl = priv->ctl;
654 u32 reg;
655 AGNX_TRACE;
656
657 band_management_init(priv);
658
659 rf_chips_init(priv);
660
661 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
662 reg |= 0x8;
663 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
664
665 /* Initialize the PHY */
666 phy_init(priv);
667
668 encryption_init(priv);
669
670 tx_management_init(priv);
671
672 rx_management_init(priv);
673
674 power_manage_init(priv);
675
676 /* Initialize the Timers */
677 agnx_timer_init(priv);
678
679 /* Write 0xc390bf9 to Interrupt Mask (Disable TX) */
680 reg = 0xc390bf9 & ~IRQ_TX_BEACON;
681 reg &= ~IRQ_TX_DISABLE;
682 agnx_write32(ctl, AGNX_INT_MASK, reg);
683
684 reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
685 reg |= 0x800;
686 agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
687
688 /* set it when need get multicast enable? */
689 agnx_write32(ctl, AGNX_BM_MTSM, 0xff);
690} /* chip_init */
691
692
693static inline void set_promis_and_managed(struct agnx_priv *priv)
694{
695 void __iomem *ctl = priv->ctl;
696 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
697 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
698}
699static inline void set_learn_mode(struct agnx_priv *priv)
700{
701 void __iomem *ctl = priv->ctl;
702 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x8);
703}
704static inline void set_scan_mode(struct agnx_priv *priv)
705{
706 void __iomem *ctl = priv->ctl;
707 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x20);
708}
709static inline void set_promiscuous_mode(struct agnx_priv *priv)
710{
711 void __iomem *ctl = priv->ctl;
712 /* agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x210);*/
713 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10);
714}
715static inline void set_managed_mode(struct agnx_priv *priv)
716{
717 void __iomem *ctl = priv->ctl;
718 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x2);
719}
720static inline void set_adhoc_mode(struct agnx_priv *priv)
721{
722 void __iomem *ctl = priv->ctl;
723 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x0);
724}
725
726#if 0
727static void unknow_register_write(struct agnx_priv *priv)
728{
729 void __iomem *ctl = priv->ctl;
730
731 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x0, 0x3e);
732 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4, 0xb2);
733 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x8, 0x140);
734 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0xc, 0x1C0);
735 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x10, 0x1FF);
736 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x14, 0x1DD);
737 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x18, 0x15F);
738 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x1c, 0xA1);
739 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x20, 0x3E7);
740 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x24, 0x36B);
741 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x28, 0x348);
742 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x2c, 0x37D);
743 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x30, 0x3DE);
744 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x34, 0x36);
745 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x38, 0x64);
746 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x3c, 0x57);
747 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x40, 0x23);
748 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x44, 0x3ED);
749 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x48, 0x3C9);
750 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4c, 0x3CA);
751 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x50, 0x3E7);
752 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x54, 0x8);
753 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x58, 0x1F);
754 agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x5c, 0x1a);
755}
756#endif
757
758static void card_interface_init(struct agnx_priv *priv)
759{
760 void __iomem *ctl = priv->ctl;
761 u8 bssid[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
762 u32 reg;
763 unsigned int i;
764 AGNX_TRACE;
765
766 might_sleep();
767 /* Clear RX Control and Enable RX queues */
768 agnx_write32(ctl, AGNX_CIR_RXCTL, 0x8);
769
770 might_sleep();
771 /* Do a full reset of the card */
772 card_full_reset(priv);
773 might_sleep();
774
775 /* Check and set Card Endianness */
776 reg = ioread32(priv->ctl + AGNX_CIR_ENDIAN);
777 /* TODO If not 0xB3B2B1B0 set to 0xB3B2B1B0 */
778 printk(KERN_INFO PFX "CIR_ENDIAN is %x\n", reg);
779
780
781 /* Config the eeprom */
782 agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x7000086);
783 udelay(10);
784 reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
785
786
787 agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
788 reg = agnx_read32(ctl, 0xec50);
789 reg |= 0xf;
790 agnx_write32(ctl, 0xec50, reg);
791 agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
792
793
794 reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
795 udelay(10);
796 reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
797
798 /* Dump the eeprom */
799 do {
800 char eeprom[0x100000/0x100];
801
802 for (i = 0; i < 0x100000; i += 0x100) {
803 agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x3000000 + i);
804 udelay(13);
805 reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
806 udelay(70);
807 reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
808 eeprom[i/0x100] = reg & 0xFF;
809 udelay(10);
810 }
811 print_hex_dump_bytes(PFX "EEPROM: ", DUMP_PREFIX_NONE, eeprom,
812 ARRAY_SIZE(eeprom));
813 } while (0);
814
815 spi_rc_write(ctl, RF_CHIP0, 0x26);
816 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
817
818 /* Initialize the system interface */
819 system_itf_init(priv);
820
821 might_sleep();
822 /* Chip Initialization (Polaris) */
823 chip_init(priv);
824 might_sleep();
825
826 /* Calibrate the antennae */
827 antenna_calibrate(priv);
828
829 reg = agnx_read32(ctl, 0xec50);
830 reg &= ~0x40;
831 agnx_write32(ctl, 0xec50, reg);
832 agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
833 agnx_write32(ctl, AGNX_PM_PLLCTL, 0x1);
834
835 reg = agnx_read32(ctl, AGNX_BM_BMCTL);
836 reg |= 0x8000;
837 agnx_write32(ctl, AGNX_BM_BMCTL, reg);
838 enable_receiver(priv);
839 reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
840 reg |= 0x200;
841 agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
842 enable_receiver(priv);
843
844 might_sleep();
845 /* Initialize Gain Control Counts */
846 gain_ctlcnt_init(priv);
847
848 /* Write Initial Station Power Template for this station(#0) */
849 sta_power_init(priv, LOCAL_STAID);
850
851 might_sleep();
852 /* Initialize the rx,td,tm rings, for each node in the ring */
853 fill_rings(priv);
854
855 might_sleep();
856
857
858 agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
859 agnx_write32(ctl, 0xec50, 0xc);
860 agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
861
862 /* FIXME Initialize the transmit control register */
863 agnx_write32(ctl, AGNX_TXM_CTL, 0x194c1);
864
865 enable_receiver(priv);
866
867 might_sleep();
868 /* FIXME Set the Receive Control Mac Address to card address */
869 mac_address_set(priv);
870 enable_receiver(priv);
871 might_sleep();
872
873 /* Set the recieve request rate */
874 /* FIXME Enable the request */
875 /* Check packet length */
876 /* Set maximum packet length */
877/* agnx_write32(ctl, AGNX_RXM_REQRATE, 0x88195e00); */
878/* enable_receiver(priv); */
879
880 /* Set the Receiver BSSID */
881 receiver_bssid_set(priv, bssid);
882
883 /* FIXME Set to managed mode */
884 set_managed_mode(priv);
885/* set_promiscuous_mode(priv); */
886/* set_scan_mode(priv); */
887/* set_learn_mode(priv); */
888/* set_promis_and_managed(priv); */
889/* set_adhoc_mode(priv); */
890
891 /* Set the recieve request rate */
892 /* Check packet length */
893 agnx_write32(ctl, AGNX_RXM_REQRATE, 0x08000000);
894 reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
895 /* Set maximum packet length */
896 reg |= 0x00195e00;
897 agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
898
899 /* Configure the RX and TX interrupt */
900 reg = ENABLE_RX_INTERRUPT | RX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
901 agnx_write32(ctl, AGNX_CIR_RXCFG, reg);
902 /* FIXME */
903 reg = ENABLE_TX_INTERRUPT | TX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
904 agnx_write32(ctl, AGNX_CIR_TXCFG, reg);
905
906 /* Enable RX TX Interrupts */
907 agnx_write32(ctl, AGNX_CIR_RXCTL, 0x80);
908 agnx_write32(ctl, AGNX_CIR_TXMCTL, 0x80);
909 agnx_write32(ctl, AGNX_CIR_TXDCTL, 0x80);
910
911 /* FIXME Set the master control interrupt in block control */
912 agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x800);
913
914 /* Enable RX and TX queues */
915 reg = agnx_read32(ctl, AGNX_CIR_RXCTL);
916 reg |= 0x8;
917 agnx_write32(ctl, AGNX_CIR_RXCTL, reg);
918 reg = agnx_read32(ctl, AGNX_CIR_TXMCTL);
919 reg |= 0x8;
920 agnx_write32(ctl, AGNX_CIR_TXMCTL, reg);
921 reg = agnx_read32(ctl, AGNX_CIR_TXDCTL);
922 reg |= 0x8;
923 agnx_write32(ctl, AGNX_CIR_TXDCTL, reg);
924
925 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
926 /* FIXME */
927 /* unknow_register_write(priv); */
928 /* Update local card hash entry */
929 hash_write(priv, priv->mac_addr, LOCAL_STAID);
930
931 might_sleep();
932
933 /* FIXME */
934 agnx_set_channel(priv, 1);
935 might_sleep();
936} /* agnx_card_interface_init */
937
938
939void agnx_hw_init(struct agnx_priv *priv)
940{
941 AGNX_TRACE;
942 might_sleep();
943 card_interface_init(priv);
944}
945
946int agnx_hw_reset(struct agnx_priv *priv)
947{
948 return card_full_reset(priv);
949}
950
951int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len)
952{
953 AGNX_TRACE;
954 return 0;
955}
956
957void agnx_set_bssid(struct agnx_priv *priv, const u8 *bssid)
958{
959 receiver_bssid_set(priv, bssid);
960}
diff --git a/drivers/staging/agnx/phy.h b/drivers/staging/agnx/phy.h
deleted file mode 100644
index a955f05361e7..000000000000
--- a/drivers/staging/agnx/phy.h
+++ /dev/null
@@ -1,409 +0,0 @@
1#ifndef AGNX_PHY_H_
2#define AGNX_PHY_H_
3
4#include "agnx.h"
5
6/* Transmission Managment Registers */
7#define AGNX_TXM_BASE 0x0000
8#define AGNX_TXM_CTL 0x0000 /* control register */
9#define AGNX_TXM_ETMF 0x0004 /* enable transmission management functions */
10#define AGNX_TXM_TXTEMP 0x0008 /* transmission template */
11#define AGNX_TXM_RETRYSTAID 0x000c /* Retry Station ID */
12#define AGNX_TXM_TIMESTAMPLO 0x0010 /* Timestamp Lo */
13#define AGNX_TXM_TIMESTAMPHI 0x0014 /* Timestamp Hi */
14#define AGNX_TXM_TXDELAY 0x0018 /* tx delay */
15#define AGNX_TXM_TBTTLO 0x0020 /* tbtt Lo */
16#define AGNX_TXM_TBTTHI 0x0024 /* tbtt Hi */
17#define AGNX_TXM_BEAINTER 0x0028 /* Beacon Interval */
18#define AGNX_TXM_NAV 0x0030 /* NAV */
19#define AGNX_TXM_CFPMDV 0x0034 /* CFP MDV */
20#define AGNX_TXM_CFPERCNT 0x0038 /* CFP period count */
21#define AGNX_TXM_PROBDELAY 0x003c /* probe delay */
22#define AGNX_TXM_LISINTERCNT 0x0040 /* listen interval count */
23#define AGNX_TXM_DTIMPERICNT 0x004c /* DTIM period count */
24
25#define AGNX_TXM_BEACON_CTL 0x005c /* beacon control */
26
27#define AGNX_TXM_SCHEMPCNT 0x007c /* schedule empty count */
28#define AGNX_TXM_MAXTIMOUT 0x0084 /* max timeout exceed count */
29#define AGNX_TXM_MAXCFPTIM 0x0088 /* max CF poll timeout count */
30#define AGNX_TXM_MAXRXTIME 0x008c /* max RX timeout count */
31#define AGNX_TXM_MAXACKTIM 0x0090 /* max ACK timeout count */
32#define AGNX_TXM_DIF01 0x00a0 /* DIF 0-1 */
33#define AGNX_TXM_DIF23 0x00a4 /* DIF 2-3 */
34#define AGNX_TXM_DIF45 0x00a8 /* DIF 4-5 */
35#define AGNX_TXM_DIF67 0x00ac /* DIF 6-7 */
36#define AGNX_TXM_SIFSPIFS 0x00b0 /* SIFS/PIFS */
37#define AGNX_TXM_TIFSEIFS 0x00b4 /* TIFS/EIFS */
38#define AGNX_TXM_MAXCCACNTSLOT 0x00b8 /* max CCA count slot */
39#define AGNX_TXM_SLOTLIMIT 0x00bc /* slot limit/1 msec limit */
40#define AGNX_TXM_CFPOLLRXTIM 0x00f0 /* CF poll RX timeout count */
41#define AGNX_TXM_CFACKT11B 0x00f4 /* CF ack timeout limit for 11b */
42#define AGNX_TXM_CW0 0x0100 /* CW 0 */
43#define AGNX_TXM_SLBEALIM0 0x0108 /* short/long beacon limit 0 */
44#define AGNX_TXM_CW1 0x0120 /* CW 1 */
45#define AGNX_TXM_SLBEALIM1 0x0128 /* short/long beacon limit 1 */
46#define AGNX_TXM_CW2 0x0140 /* CW 2 */
47#define AGNX_TXM_SLBEALIM2 0x0148 /* short/long beacon limit 2 */
48#define AGNX_TXM_CW3 0x0160 /* CW 3 */
49#define AGNX_TXM_SLBEALIM3 0x0168 /* short/long beacon limit 3 */
50#define AGNX_TXM_CW4 0x0180 /* CW 4 */
51#define AGNX_TXM_SLBEALIM4 0x0188 /* short/long beacon limit 4 */
52#define AGNX_TXM_CW5 0x01a0 /* CW 5 */
53#define AGNX_TXM_SLBEALIM5 0x01a8 /* short/long beacon limit 5 */
54#define AGNX_TXM_CW6 0x01c0 /* CW 6 */
55#define AGNX_TXM_SLBEALIM6 0x01c8 /* short/long beacon limit 6 */
56#define AGNX_TXM_CW7 0x01e0 /* CW 7 */
57#define AGNX_TXM_SLBEALIM7 0x01e8 /* short/long beacon limit 7 */
58#define AGNX_TXM_BEACONTEMP 0x1000 /* beacon template */
59#define AGNX_TXM_STAPOWTEMP 0x1a00 /* Station Power Template */
60
61/* Receive Management Control Registers */
62#define AGNX_RXM_BASE 0x2000
63#define AGNX_RXM_REQRATE 0x2000 /* requested rate */
64#define AGNX_RXM_MACHI 0x2004 /* first 4 bytes of mac address */
65#define AGNX_RXM_MACLO 0x2008 /* last 2 bytes of mac address */
66#define AGNX_RXM_BSSIDHI 0x200c /* bssid hi */
67#define AGNX_RXM_BSSIDLO 0x2010 /* bssid lo */
68#define AGNX_RXM_HASH_CMD_FLAG 0x2014 /* Flags for the RX Hash Command Default:0 */
69#define AGNX_RXM_HASH_CMD_HIGH 0x2018 /* The High half of the Hash Command */
70#define AGNX_RXM_HASH_CMD_LOW 0x201c /* The Low half of the Hash Command */
71#define AGNX_RXM_ROUTAB 0x2020 /* routing table */
72#define ROUTAB_SUBTYPE_SHIFT 24
73#define ROUTAB_TYPE_SHIFT 28
74#define ROUTAB_STATUS_SHIFT 30
75#define ROUTAB_RW_SHIFT 31
76#define ROUTAB_ROUTE_DROP 0xf00000 /* Drop */
77#define ROUTAB_ROUTE_CPU 0x400000 /* CPU */
78#define ROUTAB_ROUTE_ENCRY 0x500800 /* Encryption */
79#define ROUTAB_ROUTE_RFP 0x800000 /* RFP */
80
81#define ROUTAB_TYPE_MANAG 0x0 /* Management */
82#define ROUTAB_TYPE_CTL 0x1 /* Control */
83#define ROUTAB_TYPE_DATA 0x2 /* Data */
84
85#define ROUTAB_SUBTYPE_DATA 0x0
86#define ROUTAB_SUBTYPE_DATAACK 0x1
87#define ROUTAB_SUBTYPE_DATAPOLL 0x2
88#define ROUTAB_SUBTYPE_DATAPOLLACK 0x3
89#define ROUTAB_SUBTYPE_NULL 0x4 /* NULL */
90#define ROUTAB_SUBTYPE_NULLACK 0x5
91#define ROUTAB_SUBTYPE_NULLPOLL 0x6
92#define ROUTAB_SUBTYPE_NULLPOLLACK 0x7
93#define ROUTAB_SUBTYPE_QOSDATA 0x8 /* QOS DATA */
94#define ROUTAB_SUBTYPE_QOSDATAACK 0x9
95#define ROUTAB_SUBTYPE_QOSDATAPOLL 0xa
96#define ROUTAB_SUBTYPE_QOSDATAACKPOLL 0xb
97#define ROUTAB_SUBTYPE_QOSNULL 0xc
98#define ROUTAB_SUBTYPE_QOSNULLACK 0xd
99#define ROUTAB_SUBTYPE_QOSNULLPOLL 0xe
100#define ROUTAB_SUBTYPE_QOSNULLPOLLACK 0xf
101#define AGNX_RXM_DELAY11 0x2024 /* delay 11(AB) */
102#define AGNX_RXM_SOF_CNT 0x2028 /* SOF Count */
103#define AGNX_RXM_FRAG_CNT 0x202c /* Fragment Count*/
104#define AGNX_RXM_FCS_CNT 0x2030 /* FCS Count */
105#define AGNX_RXM_BSSID_MISS_CNT 0x2034 /* BSSID Miss Count */
106#define AGNX_RXM_PDU_ERR_CNT 0x2038 /* PDU Error Count */
107#define AGNX_RXM_DEST_MISS_CNT 0x203C /* Destination Miss Count */
108#define AGNX_RXM_DROP_CNT 0x2040 /* Drop Count */
109#define AGNX_RXM_ABORT_CNT 0x2044 /* Abort Count */
110#define AGNX_RXM_RELAY_CNT 0x2048 /* Relay Count */
111#define AGNX_RXM_HASH_MISS_CNT 0x204c /* Hash Miss Count */
112#define AGNX_RXM_SA_HI 0x2050 /* Address of received packet Hi */
113#define AGNX_RXM_SA_LO 0x2054 /* Address of received packet Lo */
114#define AGNX_RXM_HASH_DUMP_LST 0x2100 /* Contains Hash Data */
115#define AGNX_RXM_HASH_DUMP_MST 0x2104 /* Contains Hash Data */
116#define AGNX_RXM_HASH_DUMP_DATA 0x2108 /* The Station ID to dump */
117
118
119/* Encryption Managment */
120#define AGNX_ENCRY_BASE 0x2400
121#define AGNX_ENCRY_WEPKEY0 0x2440 /* wep key #0 */
122#define AGNX_ENCRY_WEPKEY1 0x2444 /* wep key #1 */
123#define AGNX_ENCRY_WEPKEY2 0x2448 /* wep key #2 */
124#define AGNX_ENCRY_WEPKEY3 0x244c /* wep key #3 */
125#define AGNX_ENCRY_CCMRECTL 0x2460 /* ccm replay control */
126
127
128/* Band Management Registers */
129#define AGNX_BM_BASE 0x2c00
130#define AGNX_BM_BMCTL 0x2c00 /* band management control */
131#define AGNX_BM_TXWADDR 0x2c18 /* tx workqueue address start */
132#define AGNX_BM_TXTOPEER 0x2c24 /* transmit to peers */
133#define AGNX_BM_FPLHP 0x2c2c /* free pool list head pointer */
134#define AGNX_BM_FPLTP 0x2c30 /* free pool list tail pointer */
135#define AGNX_BM_FPCNT 0x2c34 /* free pool count */
136#define AGNX_BM_CIPDUWCNT 0x2c38 /* card interface pdu workqueue count */
137#define AGNX_BM_SPPDUWCNT 0x2c3c /* sp pdu workqueue count */
138#define AGNX_BM_RFPPDUWCNT 0x2c40 /* rfp pdu workqueue count */
139#define AGNX_BM_RHPPDUWCNT 0x2c44 /* rhp pdu workqueue count */
140#define AGNX_BM_CIWQCTL 0x2c48 /* Card Interface WorkQueue Control */
141#define AGNX_BM_CPUTXWCTL 0x2c50 /* cpu tx workqueue control */
142#define AGNX_BM_CPURXWCTL 0x2c58 /* cpu rx workqueue control */
143#define AGNX_BM_CPULWCTL 0x2c60 /* cpu low workqueue control */
144#define AGNX_BM_CPUHWCTL 0x2c68 /* cpu high workqueue control */
145#define AGNX_BM_SPTXWCTL 0x2c70 /* sp tx workqueue control */
146#define AGNX_BM_SPRXWCTL 0x2c78 /* sp rx workqueue control */
147#define AGNX_BM_RFPWCTL 0x2c80 /* RFP workqueue control */
148#define AGNX_BM_MTSM 0x2c90 /* Multicast Transmit Station Mask */
149
150/* Card Interface Registers (32bits) */
151#define AGNX_CIR_BASE 0x3000
152#define AGNX_CIR_BLKCTL 0x3000 /* block control*/
153#define AGNX_STAT_TX 0x1
154#define AGNX_STAT_RX 0x2
155#define AGNX_STAT_X 0x4
156/* Below two interrupt flags will be set by our but not CPU or the card */
157#define AGNX_STAT_TXD 0x10
158#define AGNX_STAT_TXM 0x20
159
160#define AGNX_CIR_ADDRWIN 0x3004 /* Addressable Windows*/
161#define AGNX_CIR_ENDIAN 0x3008 /* card endianness */
162#define AGNX_CIR_SERIALITF 0x3020 /* serial interface */
163#define AGNX_CIR_RXCFG 0x3040 /* receive config */
164#define ENABLE_RX_INTERRUPT 0x20
165#define RX_CACHE_LINE 0x8
166/* the RX fragment length */
167#define FRAG_LEN_256 0x0 /* 256B */
168#define FRAG_LEN_512 0x1
169#define FRAG_LEN_1024 0x2
170#define FRAG_LEN_2048 0x3
171#define FRAG_BE 0x10
172#define AGNX_CIR_RXCTL 0x3050 /* receive control */
173/* memory address, chipside */
174#define AGNX_CIR_RXCMSTART 0x3054 /* receive client memory start */
175#define AGNX_CIR_RXCMEND 0x3058 /* receive client memory end */
176/* memory address, pci */
177#define AGNX_CIR_RXHOSTADDR 0x3060 /* receive hostside address */
178/* memory address, chipside */
179#define AGNX_CIR_RXCLIADDR 0x3064 /* receive clientside address */
180#define AGNX_CIR_RXDMACTL 0x3068 /* receive dma control */
181#define AGNX_CIR_TXCFG 0x3080 /* transmit config */
182#define AGNX_CIR_TXMCTL 0x3090 /* Transmit Management Control */
183#define ENABLE_TX_INTERRUPT 0x20
184#define TX_CACHE_LINE 0x8
185#define AGNX_CIR_TXMSTART 0x3094 /* Transmit Management Start */
186#define AGNX_CIR_TXMEND 0x3098 /* Transmit Management End */
187#define AGNX_CIR_TXDCTL 0x30a0 /* transmit data control */
188/* memeory address, chipset */
189#define AGNX_CIR_TXDSTART 0x30a4 /* transmit data start */
190#define AGNX_CIR_TXDEND 0x30a8 /* transmit data end */
191#define AGNX_CIR_TXMHADDR 0x30b0 /* Transmit Management Hostside Address */
192#define AGNX_CIR_TXMCADDR 0x30b4 /* Transmit Management Clientside Address */
193#define AGNX_CIR_TXDMACTL 0x30b8 /* transmit dma control */
194
195
196/* Power Managment Unit */
197#define AGNX_PM_BASE 0x3c00
198#define AGNX_PM_PMCTL 0x3c00 /* PM Control*/
199#define AGNX_PM_MACMSW 0x3c08 /* MAC Manual Slow Work Enable */
200#define AGNX_PM_RFCTL 0x3c0c /* RF Control */
201#define AGNX_PM_PHYMW 0x3c14 /* Phy Mannal Work */
202#define AGNX_PM_SOFTRST 0x3c18 /* PMU Soft Reset */
203#define AGNX_PM_PLLCTL 0x3c1c /* PMU PLL control*/
204#define AGNX_PM_TESTPHY 0x3c24 /* PMU Test Phy */
205
206
207/* Interrupt Control interface */
208#define AGNX_INT_BASE 0x4000
209#define AGNX_INT_STAT 0x4000 /* interrupt status */
210#define AGNX_INT_MASK 0x400c /* interrupt mask */
211/* FIXME */
212#define IRQ_TX_BEACON 0x1 /* TX Beacon */
213#define IRQ_TX_RETRY 0x8 /* TX Retry Interrupt */
214#define IRQ_TX_ACTIVITY 0x10 /* TX Activity */
215#define IRQ_RX_ACTIVITY 0x20 /* RX Activity */
216/* FIXME I guess that instead RX a none exist staion's packet or
217 the station hasn't been init */
218#define IRQ_RX_X 0x40
219#define IRQ_RX_Y 0x80 /* RX ? */
220#define IRQ_RX_HASHHIT 0x100 /* RX Hash Hit */
221#define IRQ_RX_FRAME 0x200 /* RX Frame */
222#define IRQ_ERR_INT 0x400 /* Error Interrupt */
223#define IRQ_TX_QUE_FULL 0x800 /* TX Workqueue Full */
224#define IRQ_BANDMAN_ERR 0x10000 /* Bandwidth Management Error */
225#define IRQ_TX_DISABLE 0x20000 /* TX Disable */
226#define IRQ_RX_IVASESKEY 0x80000 /* RX Invalid Session Key */
227#define IRQ_RX_KEYIDMIS 0x100000 /* RX key ID Mismatch */
228#define IRQ_REP_THHIT 0x200000 /* Replay Threshold Hit */
229#define IRQ_TIMER1 0x4000000 /* Timer1 */
230#define IRQ_TIMER_CNT 0x10000000 /* Timer Count */
231#define IRQ_PHY_FASTINT 0x20000000 /* Phy Fast Interrupt */
232#define IRQ_PHY_SLOWINT 0x40000000 /* Phy Slow Interrupt */
233#define IRQ_OTHER 0x80000000 /* Unknow interrupt */
234#define AGNX_IRQ_ALL 0xffffffff
235
236/* System Interface */
237#define AGNX_SYSITF_BASE 0x4400
238#define AGNX_SYSITF_SYSMODE 0x4400 /* system mode */
239#define AGNX_SYSITF_GPIOIN 0x4410 /* GPIO In */
240/* PIN lines for leds? */
241#define AGNX_SYSITF_GPIOUT 0x4414 /* GPIO Out */
242
243/* Timer Control */
244#define AGNX_TIMCTL_TIMER1 0x4800 /* Timer 1 */
245#define AGNX_TIMCTL_TIM1CTL 0x4808 /* Timer 1 Control */
246
247
248/* Antenna Calibration Interface */
249#define AGNX_ACI_BASE 0x5000
250#define AGNX_ACI_MODE 0x5000 /* Mode */
251#define AGNX_ACI_MEASURE 0x5004 /* Measure */
252#define AGNX_ACI_SELCHAIN 0x5008 /* Select Chain */
253#define AGNX_ACI_LEN 0x500c /* Length */
254#define AGNX_ACI_TIMER1 0x5018 /* Timer 1 */
255#define AGNX_ACI_TIMER2 0x501c /* Timer 2 */
256#define AGNX_ACI_OFFSET 0x5020 /* Offset */
257#define AGNX_ACI_STATUS 0x5030 /* Status */
258#define CALI_IDLE 0x0
259#define CALI_DONE 0x1
260#define CALI_BUSY 0x2
261#define CALI_ERR 0x3
262#define AGNX_ACI_AICCHA0OVE 0x5034 /* AIC Channel 0 Override */
263#define AGNX_ACI_AICCHA1OVE 0x5038 /* AIC Channel 1 Override */
264
265/* Gain Control Registers */
266#define AGNX_GCR_BASE 0x9000
267/* threshold of primary antenna */
268#define AGNX_GCR_THD0A 0x9000 /* threshold? D0 A */
269/* low threshold of primary antenna */
270#define AGNX_GCR_THD0AL 0x9004 /* threshold? D0 A low */
271/* threshold of secondary antenna */
272#define AGNX_GCR_THD0B 0x9008 /* threshold? D0_B */
273#define AGNX_GCR_DUNSAT 0x900c /* d unsaturated */
274#define AGNX_GCR_DSAT 0x9010 /* d saturated */
275#define AGNX_GCR_DFIRCAL 0x9014 /* D Fir/Cal */
276#define AGNX_GCR_DGCTL11A 0x9018 /* d gain control 11a */
277#define AGNX_GCR_DGCTL11B 0x901c /* d gain control 11b */
278/* strength of gain */
279#define AGNX_GCR_GAININIT 0x9020 /* gain initialization */
280#define AGNX_GCR_THNOSIG 0x9024 /* threhold no signal */
281#define AGNX_GCR_COARSTEP 0x9028 /* coarse stepping */
282#define AGNX_GCR_SIFST11A 0x902c /* sifx time 11a */
283#define AGNX_GCR_SIFST11B 0x9030 /* sifx time 11b */
284#define AGNX_GCR_CWDETEC 0x9034 /* cw detection */
285#define AGNX_GCR_0X38 0x9038 /* ???? */
286#define AGNX_GCR_BOACT 0x903c /* BO Active */
287#define AGNX_GCR_BOINACT 0x9040 /* BO Inactive */
288#define AGNX_GCR_BODYNA 0x9044 /* BO dynamic */
289/* 802.11 mode(a,b,g) */
290#define AGNX_GCR_DISCOVMOD 0x9048 /* discovery mode */
291#define AGNX_GCR_NLISTANT 0x904c /* number of listening antenna */
292#define AGNX_GCR_NACTIANT 0x9050 /* number of active antenna */
293#define AGNX_GCR_NMEASANT 0x9054 /* number of measuring antenna */
294#define AGNX_GCR_NCAPTANT 0x9058 /* number of capture antenna */
295#define AGNX_GCR_THCAP11A 0x905c /* threshold capture 11a */
296#define AGNX_GCR_THCAP11B 0x9060 /* threshold capture 11b */
297#define AGNX_GCR_THCAPRX11A 0x9064 /* threshold capture rx 11a */
298#define AGNX_GCR_THCAPRX11B 0x9068 /* threshold capture rx 11b */
299#define AGNX_GCR_THLEVDRO 0x906c /* threshold level drop */
300#define AGNX_GCR_GAINSET0 0x9070 /* Gainset 0 */
301#define AGNX_GCR_GAINSET1 0x9074 /* Gainset 1 */
302#define AGNX_GCR_GAINSET2 0x9078 /* Gainset 2 */
303#define AGNX_GCR_MAXRXTIME11A 0x907c /* maximum rx time 11a */
304#define AGNX_GCR_MAXRXTIME11B 0x9080 /* maximum rx time 11b */
305#define AGNX_GCR_CORRTIME 0x9084 /* correction time */
306/* reset the subsystem, 0 = disable, 1 = enable */
307#define AGNX_GCR_RSTGCTL 0x9088 /* reset gain control */
308/* channel receiving */
309#define AGNX_GCR_RXCHANEL 0x908c /* receive channel */
310#define AGNX_GCR_NOISE0 0x9090 /* Noise 0 */
311#define AGNX_GCR_NOISE1 0x9094 /* Noise 1 */
312#define AGNX_GCR_NOISE2 0x9098 /* Noise 2 */
313#define AGNX_GCR_SIGHTH 0x909c /* Signal High Threshold */
314#define AGNX_GCR_SIGLTH 0x90a0 /* Signal Low Threshold */
315#define AGNX_GCR_CORRDROP 0x90a4 /* correction drop */
316/* threshold of tertiay antenna */
317#define AGNX_GCR_THCD 0x90a8 /* threshold? CD */
318#define AGNX_GCR_THCS 0x90ac /* threshold? CS */
319#define AGNX_GCR_MAXPOWDIFF 0x90b8 /* maximum power difference */
320#define AGNX_GCR_TRACNT4 0x90ec /* Transition Count 4 */
321#define AGNX_GCR_TRACNT5 0x90f0 /* transition count 5 */
322#define AGNX_GCR_TRACNT6 0x90f4 /* transition count 6 */
323#define AGNX_GCR_TRACNT7 0x90f8 /* transition coutn 7 */
324#define AGNX_GCR_TESTBUS 0x911c /* test bus */
325#define AGNX_GCR_CHAINNUM 0x9120 /* Number of Chains */
326#define AGNX_GCR_ANTCFG 0x9124 /* Antenna Config */
327#define AGNX_GCR_THJUMP 0x912c /* threhold jump */
328#define AGNX_GCR_THPOWER 0x9130 /* threshold power */
329#define AGNX_GCR_THPOWCLIP 0x9134 /* threshold power clip*/
330#define AGNX_GCR_FORCECTLCLK 0x9138 /* Force Gain Control Clock */
331#define AGNX_GCR_GAINSETWRITE 0x913c /* Gainset Write */
332#define AGNX_GCR_THD0BTFEST 0x9140 /* threshold d0 b tf estimate */
333#define AGNX_GCR_THRX11BPOWMIN 0x9144 /* threshold rx 11b power minimum */
334#define AGNX_GCR_0X14c 0x914c /* ?? */
335#define AGNX_GCR_0X150 0x9150 /* ?? */
336#define AGNX_GCR_RXOVERIDE 0x9194 /* recieve override */
337#define AGNX_GCR_WATCHDOG 0x91b0 /* watchdog timeout */
338
339
340/* Spi Interface */
341#define AGNX_SPI_BASE 0xdc00
342#define AGNX_SPI_CFG 0xdc00 /* spi configuration */
343/* Only accept 16 bits */
344#define AGNX_SPI_WMSW 0xdc04 /* write most significant word */
345/* Only accept 16 bits */
346#define AGNX_SPI_WLSW 0xdc08 /* write least significant word */
347#define AGNX_SPI_CTL 0xdc0c /* spi control */
348#define AGNX_SPI_RMSW 0xdc10 /* read most significant word */
349#define AGNX_SPI_RLSW 0xdc14 /* read least significant word */
350/* SPI Control Mask */
351#define SPI_READ_CTL 0x4000 /* read control */
352#define SPI_BUSY_CTL 0x8000 /* busy control */
353/* RF and synth chips in spi */
354#define RF_CHIP0 0x400
355#define RF_CHIP1 0x800
356#define RF_CHIP2 0x1000
357#define SYNTH_CHIP 0x2000
358
359/* Unknown register */
360#define AGNX_UNKNOWN_BASE 0x7800
361
362/* FIXME MonitorGain */
363#define AGNX_MONGCR_BASE 0x12000
364
365/* Gain Table */
366#define AGNX_GAIN_TABLE 0x12400
367
368/* The initial FIR coefficient table */
369#define AGNX_FIR_BASE 0x19804
370
371#define AGNX_ENGINE_LOOKUP_TBL 0x800
372
373/* eeprom commands */
374#define EEPROM_CMD_NULL 0x0 /* NULL */
375#define EEPROM_CMD_WRITE 0x2 /* write */
376#define EEPROM_CMD_READ 0x3 /* read */
377#define EEPROM_CMD_STATUSREAD 0x5 /* status register read */
378#define EEPROM_CMD_WRITEENABLE 0x6 /* write enable */
379#define EEPROM_CMD_CONFIGURE 0x7 /* configure */
380
381#define EEPROM_DATAFORCOFIGURE 0x6 /* ??? */
382
383/* eeprom address */
384#define EEPROM_ADDR_SUBVID 0x0 /* Sub Vendor ID */
385#define EEPROM_ADDR_SUBSID 0x2 /* Sub System ID */
386#define EEPROM_ADDR_MACADDR 0x146 /* MAC Address */
387#define EEPROM_ADDR_LOTYPE 0x14f /* LO type */
388
389struct agnx_eeprom {
390 u8 data; /* date */
391 u16 address; /* address in EEPROM */
392 u8 cmd; /* command, unknown, status */
393} __attribute__((__packed__));
394
395#define AGNX_EEPROM_COMMAND_SHIFT 5
396#define AGNX_EEPROM_COMMAND_STAT 0x01
397
398void disable_receiver(struct agnx_priv *priv);
399void enable_receiver(struct agnx_priv *priv);
400u8 read_from_eeprom(struct agnx_priv *priv, u16 address);
401void agnx_hw_init(struct agnx_priv *priv);
402int agnx_hw_reset(struct agnx_priv *priv);
403int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len);
404void agnx_set_bssid(struct agnx_priv *priv, const u8 *bssid);
405void enable_power_saving(struct agnx_priv *priv);
406void disable_power_saving(struct agnx_priv *priv);
407void calibrate_antenna_period(unsigned long data);
408
409#endif /* AGNX_PHY_H_ */
diff --git a/drivers/staging/agnx/rf.c b/drivers/staging/agnx/rf.c
deleted file mode 100644
index 42e457a1844f..000000000000
--- a/drivers/staging/agnx/rf.c
+++ /dev/null
@@ -1,893 +0,0 @@
1/**
2 * Airgo MIMO wireless driver
3 *
4 * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
5
6 * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
7 * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
8
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation;
12 */
13
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include "agnx.h"
17#include "debug.h"
18#include "phy.h"
19#include "table.h"
20
21/* FIXME! */
22static inline void spi_write(void __iomem *region, u32 chip_ids, u32 sw,
23 u16 size, u32 control)
24{
25 u32 reg;
26 u32 lsw = sw & 0xffff; /* lower 16 bits of sw*/
27 u32 msw = sw >> 16; /* high 16 bits of sw */
28
29 /* FIXME Write Most Significant Word of the 32bit data to MSW */
30 /* FIXME And Least Significant Word to LSW */
31 iowrite32((lsw), region + AGNX_SPI_WLSW);
32 iowrite32((msw), region + AGNX_SPI_WMSW);
33 reg = chip_ids | size | control;
34 /* Write chip id(s), write size and busy control to Control Register */
35 iowrite32((reg), region + AGNX_SPI_CTL);
36 /* Wait for Busy control to clear */
37 spi_delay();
38}
39
40/*
41 * Write to SPI Synth register
42 */
43static inline void spi_sy_write(void __iomem *region, u32 chip_ids, u32 sw)
44{
45 /* FIXME the size 0x15 is a magic value*/
46 spi_write(region, chip_ids, sw, 0x15, SPI_BUSY_CTL);
47}
48
49/*
50 * Write to SPI RF register
51 */
52static inline void spi_rf_write(void __iomem *region, u32 chip_ids, u32 sw)
53{
54 /* FIXME the size 0xd is a magic value*/
55 spi_write(region, chip_ids, sw, 0xd, SPI_BUSY_CTL);
56} /* spi_rf_write */
57
58/*
59 * Write to SPI with Read Control bit set
60 */
61inline void spi_rc_write(void __iomem *region, u32 chip_ids, u32 sw)
62{
63 /* FIXME the size 0xe5 is a magic value */
64 spi_write(region, chip_ids, sw, 0xe5, SPI_BUSY_CTL|SPI_READ_CTL);
65}
66
67/* Get the active chains's count */
68static int get_active_chains(struct agnx_priv *priv)
69{
70 void __iomem *ctl = priv->ctl;
71 int num = 0;
72 u32 reg;
73 AGNX_TRACE;
74
75 spi_rc_write(ctl, RF_CHIP0, 0x21);
76 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
77 if (reg == 1)
78 num++;
79
80 spi_rc_write(ctl, RF_CHIP1, 0x21);
81 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
82 if (reg == 1)
83 num++;
84
85 spi_rc_write(ctl, RF_CHIP2, 0x21);
86 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
87 if (reg == 1)
88 num++;
89
90 spi_rc_write(ctl, RF_CHIP0, 0x26);
91 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
92 if (0x33 != reg)
93 printk(KERN_WARNING PFX "Unmatched rf chips result\n");
94
95 return num;
96} /* get_active_chains */
97
98void rf_chips_init(struct agnx_priv *priv)
99{
100 void __iomem *ctl = priv->ctl;
101 u32 reg;
102 int num;
103 AGNX_TRACE;
104
105 if (priv->revid == 1) {
106 reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
107 reg |= 0x8;
108 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
109 }
110
111 /* Set SPI clock speed to 200NS */
112 reg = agnx_read32(ctl, AGNX_SPI_CFG);
113 reg &= ~0xF;
114 reg |= 0x3;
115 agnx_write32(ctl, AGNX_SPI_CFG, reg);
116
117 /* Set SPI clock speed to 50NS */
118 reg = agnx_read32(ctl, AGNX_SPI_CFG);
119 reg &= ~0xF;
120 reg |= 0x1;
121 agnx_write32(ctl, AGNX_SPI_CFG, reg);
122
123 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1101);
124
125 num = get_active_chains(priv);
126 printk(KERN_INFO PFX "Active chains are %d\n", num);
127
128 reg = agnx_read32(ctl, AGNX_SPI_CFG);
129 reg &= ~0xF;
130 agnx_write32(ctl, AGNX_SPI_CFG, reg);
131
132 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1908);
133} /* rf_chips_init */
134
135
136static u32 channel_tbl[15][9] = {
137 {0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
138 {1, 0x00, 0x00, 0x624, 0x00, 0x1a4, 0x28, 0x00, 0x1e},
139 {2, 0x00, 0x00, 0x615, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
140 {3, 0x00, 0x00, 0x61a, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
141 {4, 0x00, 0x00, 0x61f, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
142 {5, 0x00, 0x00, 0x624, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
143 {6, 0x00, 0x00, 0x61f, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
144 {7, 0x00, 0x00, 0x624, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
145 {8, 0x00, 0x00, 0x629, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
146 {9, 0x00, 0x00, 0x624, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
147 {10, 0x00, 0x00, 0x629, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
148 {11, 0x00, 0x00, 0x62e, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
149 {12, 0x00, 0x00, 0x633, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
150 {13, 0x00, 0x00, 0x628, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
151 {14, 0x00, 0x00, 0x644, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
152};
153
154
155static inline void
156channel_tbl_write(struct agnx_priv *priv, unsigned int channel, unsigned int reg_num)
157{
158 void __iomem *ctl = priv->ctl;
159 u32 reg;
160
161 reg = channel_tbl[channel][reg_num];
162 reg <<= 4;
163 reg |= reg_num;
164 spi_sy_write(ctl, SYNTH_CHIP, reg);
165}
166
167static void synth_freq_set(struct agnx_priv *priv, unsigned int channel)
168{
169 void __iomem *ctl = priv->ctl;
170 u32 reg;
171 AGNX_TRACE;
172
173 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
174
175 /* Set the Clock bits to 50NS */
176 reg = agnx_read32(ctl, AGNX_SPI_CFG);
177 reg &= ~0xF;
178 reg |= 0x1;
179 agnx_write32(ctl, AGNX_SPI_CFG, reg);
180
181 /* Write 0x00c0 to LSW and 0x3 to MSW of Synth Chip */
182 spi_sy_write(ctl, SYNTH_CHIP, 0x300c0);
183
184 spi_sy_write(ctl, SYNTH_CHIP, 0x32);
185
186 /* # Write to Register 1 on the Synth Chip */
187 channel_tbl_write(priv, channel, 1);
188 /* # Write to Register 3 on the Synth Chip */
189 channel_tbl_write(priv, channel, 3);
190 /* # Write to Register 6 on the Synth Chip */
191 channel_tbl_write(priv, channel, 6);
192 /* # Write to Register 5 on the Synth Chip */
193 channel_tbl_write(priv, channel, 5);
194 /* # Write to register 8 on the Synth Chip */
195 channel_tbl_write(priv, channel, 8);
196
197 /* FIXME Clear the clock bits */
198 reg = agnx_read32(ctl, AGNX_SPI_CFG);
199 reg &= ~0xf;
200 agnx_write32(ctl, AGNX_SPI_CFG, reg);
201} /* synth_chip_init */
202
203
204static void antenna_init(struct agnx_priv *priv, int num_antenna)
205{
206 void __iomem *ctl = priv->ctl;
207
208 switch (num_antenna) {
209 case 1:
210 agnx_write32(ctl, AGNX_GCR_NLISTANT, 1);
211 agnx_write32(ctl, AGNX_GCR_NMEASANT, 1);
212 agnx_write32(ctl, AGNX_GCR_NACTIANT, 1);
213 agnx_write32(ctl, AGNX_GCR_NCAPTANT, 1);
214
215 agnx_write32(ctl, AGNX_GCR_ANTCFG, 7);
216 agnx_write32(ctl, AGNX_GCR_BOACT, 34);
217 agnx_write32(ctl, AGNX_GCR_BOINACT, 34);
218 agnx_write32(ctl, AGNX_GCR_BODYNA, 30);
219
220 agnx_write32(ctl, AGNX_GCR_THD0A, 125);
221 agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
222 agnx_write32(ctl, AGNX_GCR_THD0B, 90);
223
224 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 80);
225 agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
226 agnx_write32(ctl, AGNX_GCR_SIGLTH, 16);
227 break;
228 case 2:
229 agnx_write32(ctl, AGNX_GCR_NLISTANT, 2);
230 agnx_write32(ctl, AGNX_GCR_NMEASANT, 2);
231 agnx_write32(ctl, AGNX_GCR_NACTIANT, 2);
232 agnx_write32(ctl, AGNX_GCR_NCAPTANT, 2);
233 agnx_write32(ctl, AGNX_GCR_ANTCFG, 15);
234 agnx_write32(ctl, AGNX_GCR_BOACT, 36);
235 agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
236 agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
237 agnx_write32(ctl, AGNX_GCR_THD0A, 120);
238 agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
239 agnx_write32(ctl, AGNX_GCR_THD0B, 80);
240 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
241 agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
242 agnx_write32(ctl, AGNX_GCR_SIGLTH, 32);
243 break;
244 case 3:
245 agnx_write32(ctl, AGNX_GCR_NLISTANT, 3);
246 agnx_write32(ctl, AGNX_GCR_NMEASANT, 3);
247 agnx_write32(ctl, AGNX_GCR_NACTIANT, 3);
248 agnx_write32(ctl, AGNX_GCR_NCAPTANT, 3);
249 agnx_write32(ctl, AGNX_GCR_ANTCFG, 31);
250 agnx_write32(ctl, AGNX_GCR_BOACT, 36);
251 agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
252 agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
253 agnx_write32(ctl, AGNX_GCR_THD0A, 100);
254 agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
255 agnx_write32(ctl, AGNX_GCR_THD0B, 70);
256 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
257 agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
258 agnx_write32(ctl, AGNX_GCR_SIGLTH, 48);
259/* agnx_write32(ctl, AGNX_GCR_SIGLTH, 16); */
260 break;
261 default:
262 printk(KERN_WARNING PFX "Unknow antenna number\n");
263 }
264} /* antenna_init */
265
266static void chain_update(struct agnx_priv *priv, u32 chain)
267{
268 void __iomem *ctl = priv->ctl;
269 u32 reg;
270 AGNX_TRACE;
271
272 spi_rc_write(ctl, RF_CHIP0, 0x20);
273 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
274
275 if (reg == 0x4)
276 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
277 else if (reg != 0x0)
278 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
279 else {
280 if (chain == 3 || chain == 6) {
281 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
282 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
283 } else if (chain == 2 || chain == 4) {
284 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
285 spi_rf_write(ctl, RF_CHIP2, 0x1005);
286 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x824);
287 } else if (chain == 1) {
288 spi_rf_write(ctl, RF_CHIP0, reg|0x1000);
289 spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1004);
290 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xc36);
291 }
292 }
293
294 spi_rc_write(ctl, RF_CHIP0, 0x22);
295 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
296
297 switch (reg) {
298 case 0:
299 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
300 break;
301 case 1:
302 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
303 break;
304 case 2:
305 if (chain == 6 || chain == 4) {
306 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1202);
307 spi_rf_write(ctl, RF_CHIP2, 0x1005);
308 } else if (chain < 3) {
309 spi_rf_write(ctl, RF_CHIP0, 0x1202);
310 spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1005);
311 }
312 break;
313 default:
314 if (chain == 3) {
315 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
316 spi_rf_write(ctl, RF_CHIP2, 0x1201);
317 } else if (chain == 2) {
318 spi_rf_write(ctl, RF_CHIP0, 0x1203);
319 spi_rf_write(ctl, RF_CHIP2, 0x1200);
320 spi_rf_write(ctl, RF_CHIP1, 0x1201);
321 } else if (chain == 1) {
322 spi_rf_write(ctl, RF_CHIP0, 0x1203);
323 spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1200);
324 } else if (chain == 4) {
325 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
326 spi_rf_write(ctl, RF_CHIP2, 0x1201);
327 } else {
328 spi_rf_write(ctl, RF_CHIP0, 0x1203);
329 spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1201);
330 }
331 }
332} /* chain_update */
333
334static void antenna_config(struct agnx_priv *priv)
335{
336 void __iomem *ctl = priv->ctl;
337 u32 reg;
338 AGNX_TRACE;
339
340 /* Write 0x0 to the TX Management Control Register Enable bit */
341 reg = agnx_read32(ctl, AGNX_TXM_CTL);
342 reg &= ~0x1;
343 agnx_write32(ctl, AGNX_TXM_CTL, reg);
344
345 /* FIXME */
346 /* Set initial value based on number of Antennae */
347 antenna_init(priv, 3);
348
349 /* FIXME Update Power Templates for current valid Stations */
350 /* sta_power_init(priv, 0);*/
351
352 /* FIXME the number of chains should get from eeprom*/
353 chain_update(priv, AGNX_CHAINS_MAX);
354} /* antenna_config */
355
356void calibrate_oscillator(struct agnx_priv *priv)
357{
358 void __iomem *ctl = priv->ctl;
359 u32 reg;
360 AGNX_TRACE;
361
362 spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
363 reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
364 reg |= 0x10;
365 agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
366
367 agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 1);
368 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 1);
369
370 agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
371
372 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
373 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
374 /* (Residual DC Calibration) to Calibration Mode */
375 agnx_write32(ctl, AGNX_ACI_MODE, 0x2);
376
377 spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1004);
378 agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
379 /* (TX LO Calibration) to Calibration Mode */
380 agnx_write32(ctl, AGNX_ACI_MODE, 0x4);
381
382 do {
383 u32 reg1, reg2, reg3;
384 /* Enable Power Saving Control */
385 enable_power_saving(priv);
386 /* Save the following registers to restore */
387 reg1 = ioread32(ctl + 0x11000);
388 reg2 = ioread32(ctl + 0xec50);
389 reg3 = ioread32(ctl + 0xec54);
390 wmb();
391
392 agnx_write32(ctl, 0x11000, 0xcfdf);
393 agnx_write32(ctl, 0xec50, 0x70);
394 /* Restore the registers */
395 agnx_write32(ctl, 0x11000, reg1);
396 agnx_write32(ctl, 0xec50, reg2);
397 agnx_write32(ctl, 0xec54, reg3);
398 /* Disable Power Saving Control */
399 disable_power_saving(priv);
400 } while (0);
401
402 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0);
403} /* calibrate_oscillator */
404
405
406static void radio_channel_set(struct agnx_priv *priv, unsigned int channel)
407{
408 void __iomem *ctl = priv->ctl;
409 unsigned int freq = priv->band.channels[channel - 1].center_freq;
410 u32 reg;
411 AGNX_TRACE;
412
413 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
414 /* Set SPI Clock to 50 Ns */
415 reg = agnx_read32(ctl, AGNX_SPI_CFG);
416 reg &= ~0xF;
417 reg |= 0x1;
418 agnx_write32(ctl, AGNX_SPI_CFG, reg);
419
420 /* Clear the Disable Tx interrupt bit in Interrupt Mask */
421/* reg = agnx_read32(ctl, AGNX_INT_MASK); */
422/* reg &= ~IRQ_TX_DISABLE; */
423/* agnx_write32(ctl, AGNX_INT_MASK, reg); */
424
425 /* Band Selection */
426 reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
427 reg |= 0x8;
428 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
429
430 /* FIXME Set the SiLabs Chip Frequency */
431 synth_freq_set(priv, channel);
432
433 reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
434 reg |= 0x80100030;
435 agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
436 reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
437 reg |= 0x20009;
438 agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
439
440 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
441
442 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1100);
443
444 /* Load the MonitorGain Table */
445 monitor_gain_table_init(priv);
446
447 /* Load the TX Fir table */
448 tx_fir_table_init(priv);
449
450 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
451 reg |= 0x8;
452 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
453
454 spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x22);
455 udelay(80);
456 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
457
458
459 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff);
460 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
461
462 reg = agnx_read32(ctl, 0xec50);
463 reg |= 0x4f;
464 agnx_write32(ctl, 0xec50, reg);
465
466 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
467 agnx_write32(ctl, 0x11008, 0x1);
468 agnx_write32(ctl, 0x1100c, 0x0);
469 agnx_write32(ctl, 0x11008, 0x0);
470 agnx_write32(ctl, 0xec50, 0xc);
471
472 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
473 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
474 agnx_write32(ctl, 0x11010, 0x6e);
475 agnx_write32(ctl, 0x11014, 0x6c);
476
477 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
478
479 /* Calibrate the Antenna */
480 /* antenna_calibrate(priv); */
481 /* Calibrate the TxLocalOscillator */
482 calibrate_oscillator(priv);
483
484 reg = agnx_read32(ctl, AGNX_PM_PMCTL);
485 reg &= ~0x8;
486 agnx_write32(ctl, AGNX_PM_PMCTL, reg);
487 agnx_write32(ctl, AGNX_GCR_GAININIT, 0xa);
488 agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
489
490 agnx_write32(ctl, 0x11018, 0xb);
491 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
492
493 /* Write Frequency to Gain Control Channel */
494 agnx_write32(ctl, AGNX_GCR_RXCHANEL, freq);
495 /* Write 0x140000/Freq to 0x9c08 */
496 reg = 0x140000/freq;
497 agnx_write32(ctl, 0x9c08, reg);
498
499 reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
500 reg &= ~0x80100030;
501 agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
502
503 reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
504 reg &= ~0x20009;
505 reg |= 0x1;
506 agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
507
508 agnx_write32(ctl, AGNX_ACI_MODE, 0x0);
509
510/* FIXME According to Number of Chains: */
511
512/* 1. 1: */
513/* 1. Write 0x1203 to RF Chip 0 */
514/* 2. Write 0x1200 to RF Chips 1 +2 */
515/* 2. 2: */
516/* 1. Write 0x1203 to RF Chip 0 */
517/* 2. Write 0x1200 to RF Chip 2 */
518/* 3. Write 0x1201 to RF Chip 1 */
519/* 3. 3: */
520/* 1. Write 0x1203 to RF Chip 0 */
521/* 2. Write 0x1201 to RF Chip 1 + 2 */
522/* 4. 4: */
523/* 1. Write 0x1203 to RF Chip 0 + 1 */
524/* 2. Write 0x1200 to RF Chip 2 */
525
526/* 5. 6: */
527 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
528 spi_rf_write(ctl, RF_CHIP2, 0x1201);
529
530 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
531 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
532
533 /* FIXME Set the Disable Tx interrupt bit in Interrupt Mask
534 (Or 0x20000 to Interrupt Mask) */
535/* reg = agnx_read32(ctl, AGNX_INT_MASK); */
536/* reg |= IRQ_TX_DISABLE; */
537/* agnx_write32(ctl, AGNX_INT_MASK, reg); */
538
539 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
540 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
541
542 /* Configure the Antenna */
543 antenna_config(priv);
544
545 /* Write 0x0 to Discovery Mode Enable detect G, B, A packet? */
546 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0);
547
548 reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
549 reg |= 0x80000000;
550 agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
551 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
552 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
553
554 /* enable radio on and the power LED */
555 reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
556 reg &= ~0x1;
557 reg |= 0x2;
558 agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
559
560 reg = agnx_read32(ctl, AGNX_TXM_CTL);
561 reg |= 0x1;
562 agnx_write32(ctl, AGNX_TXM_CTL, reg);
563} /* radio_channel_set */
564
565static void base_band_filter_calibrate(struct agnx_priv *priv)
566{
567 void __iomem *ctl = priv->ctl;
568
569 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
570 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1001);
571 agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x0);
572 spi_rc_write(ctl, RF_CHIP0, 0x27);
573 spi_rc_write(ctl, RF_CHIP1, 0x27);
574 spi_rc_write(ctl, RF_CHIP2, 0x27);
575 agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x1);
576}
577
578static void print_offset(struct agnx_priv *priv, u32 chain)
579{
580 void __iomem *ctl = priv->ctl;
581 u32 offset;
582
583 iowrite32((chain), ctl + AGNX_ACI_SELCHAIN);
584 udelay(10);
585 offset = (ioread32(ctl + AGNX_ACI_OFFSET));
586 printk(PFX "Chain is 0x%x, Offset is 0x%x\n", chain, offset);
587}
588
589void print_offsets(struct agnx_priv *priv)
590{
591 print_offset(priv, 0);
592 print_offset(priv, 4);
593 print_offset(priv, 1);
594 print_offset(priv, 5);
595 print_offset(priv, 2);
596 print_offset(priv, 6);
597}
598
599
600struct chains {
601 u32 cali; /* calibrate value*/
602
603#define NEED_CALIBRATE 0
604#define SUCCESS_CALIBRATE 1
605 int status;
606};
607
608static void chain_calibrate(struct agnx_priv *priv, struct chains *chains,
609 unsigned int num)
610{
611 void __iomem *ctl = priv->ctl;
612 u32 calibra = chains[num].cali;
613
614 if (num < 3)
615 calibra |= 0x1400;
616 else
617 calibra |= 0x1500;
618
619 switch (num) {
620 case 0:
621 case 4:
622 spi_rf_write(ctl, RF_CHIP0, calibra);
623 break;
624 case 1:
625 case 5:
626 spi_rf_write(ctl, RF_CHIP1, calibra);
627 break;
628 case 2:
629 case 6:
630 spi_rf_write(ctl, RF_CHIP2, calibra);
631 break;
632 default:
633 BUG();
634 }
635} /* chain_calibrate */
636
637static inline void get_calibrete_value(struct agnx_priv *priv, struct chains *chains,
638 unsigned int num)
639{
640 void __iomem *ctl = priv->ctl;
641 u32 offset;
642
643 iowrite32((num), ctl + AGNX_ACI_SELCHAIN);
644 /* FIXME */
645 udelay(10);
646 offset = (ioread32(ctl + AGNX_ACI_OFFSET));
647
648 if (offset < 0xf) {
649 chains[num].status = SUCCESS_CALIBRATE;
650 return;
651 }
652
653 if (num == 0 || num == 1 || num == 2) {
654 if (0 == chains[num].cali)
655 chains[num].cali = 0xff;
656 else
657 chains[num].cali--;
658 } else
659 chains[num].cali++;
660
661 chains[num].status = NEED_CALIBRATE;
662}
663
664static inline void calibra_delay(struct agnx_priv *priv)
665{
666 void __iomem *ctl = priv->ctl;
667 u32 reg;
668 unsigned int i = 100;
669
670 wmb();
671 while (--i) {
672 reg = (ioread32(ctl + AGNX_ACI_STATUS));
673 if (reg == 0x4000)
674 break;
675 udelay(10);
676 }
677 if (!i)
678 printk(PFX "calibration failed\n");
679}
680
681void do_calibration(struct agnx_priv *priv)
682{
683 void __iomem *ctl = priv->ctl;
684 struct chains chains[7];
685 unsigned int i, j;
686 AGNX_TRACE;
687
688 for (i = 0; i < 7; i++) {
689 if (i == 3)
690 continue;
691
692 chains[i].cali = 0x7f;
693 chains[i].status = NEED_CALIBRATE;
694 }
695
696 /* FIXME 0x300 is a magic number */
697 for (j = 0; j < 0x300; j++) {
698 if (chains[0].status == SUCCESS_CALIBRATE &&
699 chains[1].status == SUCCESS_CALIBRATE &&
700 chains[2].status == SUCCESS_CALIBRATE &&
701 chains[4].status == SUCCESS_CALIBRATE &&
702 chains[5].status == SUCCESS_CALIBRATE &&
703 chains[6].status == SUCCESS_CALIBRATE)
704 break;
705
706 /* Attention, there is no chain 3 */
707 for (i = 0; i < 7; i++) {
708 if (i == 3)
709 continue;
710 if (chains[i].status == NEED_CALIBRATE)
711 chain_calibrate(priv, chains, i);
712 }
713 /* Write 0x1 to Calibration Measure */
714 iowrite32((0x1), ctl + AGNX_ACI_MEASURE);
715 calibra_delay(priv);
716
717 for (i = 0; i < 7; i++) {
718 if (i == 3)
719 continue;
720
721 get_calibrete_value(priv, chains, i);
722 }
723 }
724 printk(PFX "Clibrate times is %d\n", j);
725 print_offsets(priv);
726} /* do_calibration */
727
728void antenna_calibrate(struct agnx_priv *priv)
729{
730 void __iomem *ctl = priv->ctl;
731 u32 reg;
732 AGNX_TRACE;
733
734 agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
735 agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
736 agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
737 agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
738
739 agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
740 agnx_write32(ctl, AGNX_GCR_BOACT, 0x24);
741 agnx_write32(ctl, AGNX_GCR_BOINACT, 0x24);
742 agnx_write32(ctl, AGNX_GCR_BODYNA, 0x20);
743 agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
744 agnx_write32(ctl, AGNX_GCR_THD0AL, 0x64);
745 agnx_write32(ctl, AGNX_GCR_THD0B, 0x46);
746 agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
747 agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x64);
748 agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x30);
749
750 spi_rc_write(ctl, RF_CHIP0, 0x20);
751 /* Fixme */
752 udelay(80);
753 /* 1. Should read 0x0 */
754 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
755 if (0x0 != reg)
756 printk(KERN_WARNING PFX "Unmatched rf chips result\n");
757 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
758
759 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
760
761 spi_rc_write(ctl, RF_CHIP0, 0x22);
762 udelay(80);
763 reg = agnx_read32(ctl, AGNX_SPI_RLSW);
764 if (0x0 != reg)
765 printk(KERN_WARNING PFX "Unmatched rf chips result\n");
766 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
767
768 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
769 agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
770
771 reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
772 reg |= 0x1c000032;
773 agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
774 reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
775 reg |= 0x0003f07;
776 agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
777
778 reg = agnx_read32(ctl, 0xec50);
779 reg |= 0x40;
780 agnx_write32(ctl, 0xec50, reg);
781
782 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff8);
783 agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
784
785 agnx_write32(ctl, AGNX_GCR_CHAINNUM, 0x6);
786 agnx_write32(ctl, 0x19874, 0x0);
787 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
788
789 /* Calibrate the BaseBandFilter */
790 base_band_filter_calibrate(priv);
791
792 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
793
794 agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
795 agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
796 agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
797 agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
798
799 agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
800 agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
801
802 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
803 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
804
805 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
806 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
807
808 /* Measure Calibration */
809 agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
810 calibra_delay(priv);
811
812 /* do calibration */
813 do_calibration(priv);
814
815 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
816 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
817 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
818 agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
819
820 reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
821 reg &= 0xf;
822 agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
823 reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
824 reg &= 0xf;
825 agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
826 reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
827 reg &= 0xf;
828 agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
829
830 agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
831 disable_receiver(priv);
832} /* antenna_calibrate */
833
834void __antenna_calibrate(struct agnx_priv *priv)
835{
836 void __iomem *ctl = priv->ctl;
837 u32 reg;
838
839 /* Calibrate the BaseBandFilter */
840 /* base_band_filter_calibrate(priv); */
841 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
842
843
844 agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
845 agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
846 agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
847
848 agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
849
850 agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
851 agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
852
853
854 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
855 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
856 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
857 spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
858 /* Measure Calibration */
859 agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
860 calibra_delay(priv);
861 do_calibration(priv);
862 agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
863
864 agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
865 agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
866
867 agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
868
869 reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
870 reg &= 0xf;
871 agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
872 reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
873 reg &= 0xf;
874 agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
875 reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
876 reg &= 0xf;
877 agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
878
879
880 agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
881
882 /* Write 0x3 Gain Control Discovery Mode */
883 enable_receiver(priv);
884}
885
886int agnx_set_channel(struct agnx_priv *priv, unsigned int channel)
887{
888 AGNX_TRACE;
889
890 printk(KERN_ERR PFX "Channel is %d %s\n", channel, __func__);
891 radio_channel_set(priv, channel);
892 return 0;
893}
diff --git a/drivers/staging/agnx/sta.c b/drivers/staging/agnx/sta.c
deleted file mode 100644
index 3e7db5e2811a..000000000000
--- a/drivers/staging/agnx/sta.c
+++ /dev/null
@@ -1,218 +0,0 @@
1#include <linux/delay.h>
2#include <linux/etherdevice.h>
3#include "phy.h"
4#include "sta.h"
5#include "debug.h"
6
7void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
8{
9 void __iomem *ctl = priv->ctl;
10
11 reglo &= 0xFFFF;
12 reglo |= 0x30000000;
13 reglo |= 0x40000000; /* Set status busy */
14 reglo |= sta_id << 16;
15
16 iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
17 iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
18 iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
19
20 reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
21 reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
22 printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
23}
24
25void hash_write(struct agnx_priv *priv, const u8 *mac_addr, u8 sta_id)
26{
27 void __iomem *ctl = priv->ctl;
28 u32 reghi, reglo;
29
30 if (!is_valid_ether_addr(mac_addr))
31 printk(KERN_WARNING PFX "Update hash table: Invalid hwaddr!\n");
32
33 reghi = mac_addr[0] << 24 | mac_addr[1] << 16 | mac_addr[2] << 8 | mac_addr[3];
34 reglo = mac_addr[4] << 8 | mac_addr[5];
35 reglo |= 0x10000000; /* Set hash commmand */
36 reglo |= 0x40000000; /* Set status busy */
37 reglo |= sta_id << 16;
38
39 iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
40 iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
41 iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
42
43 reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
44 if (!(reglo & 0x80000000))
45 printk(KERN_WARNING PFX "Update hash table failed\n");
46}
47
48void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
49{
50 void __iomem *ctl = priv->ctl;
51
52 reglo &= 0xFFFF;
53 reglo |= 0x20000000;
54 reglo |= 0x40000000; /* Set status busy */
55 reglo |= sta_id << 16;
56
57 iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
58 iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
59 iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
60 reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
61
62 reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
63 printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
64
65}
66
67void hash_dump(struct agnx_priv *priv, u8 sta_id)
68{
69 void __iomem *ctl = priv->ctl;
70 u32 reghi, reglo;
71
72 reglo = 0x40000000; /* status bit */
73 iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
74 iowrite32(sta_id << 16, ctl + AGNX_RXM_HASH_DUMP_DATA);
75
76 udelay(80);
77
78 reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
79 reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
80 printk(PFX "hash cmd are : %.8x%.8x\n", reghi, reglo);
81 reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_FLAG);
82 printk(PFX "hash flag is : %.8x\n", reghi);
83 reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_MST);
84 reglo = ioread32(ctl + AGNX_RXM_HASH_DUMP_LST);
85 printk(PFX "hash dump mst lst: %.8x%.8x\n", reghi, reglo);
86 reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_DATA);
87 printk(PFX "hash dump data: %.8x\n", reghi);
88}
89
90void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
91{
92 void __iomem *ctl = priv->ctl;
93 memcpy_fromio(power, ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
94 sizeof(*power));
95}
96
97inline void
98set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
99{
100 void __iomem *ctl = priv->ctl;
101 /* FIXME 2. Write Template to offset + station number */
102 memcpy_toio(ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
103 power, sizeof(*power));
104}
105
106
107void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
108 unsigned int sta_idx, unsigned int wq_idx)
109{
110 void __iomem *data = priv->data;
111 memcpy_fromio(tx_wq, data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
112 sizeof(*tx_wq) * wq_idx, sizeof(*tx_wq));
113
114}
115
116inline void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
117 unsigned int sta_idx, unsigned int wq_idx)
118{
119 void __iomem *data = priv->data;
120 memcpy_toio(data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
121 sizeof(*tx_wq) * wq_idx, tx_wq, sizeof(*tx_wq));
122}
123
124
125void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
126{
127 void __iomem *data = priv->data;
128
129 memcpy_fromio(sta, data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
130 sizeof(*sta));
131}
132
133inline void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
134{
135 void __iomem *data = priv->data;
136
137 memcpy_toio(data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
138 sta, sizeof(*sta));
139}
140
141/* FIXME */
142void sta_power_init(struct agnx_priv *priv, unsigned int sta_idx)
143{
144 struct agnx_sta_power power;
145 u32 reg;
146 AGNX_TRACE;
147
148 memset(&power, 0, sizeof(power));
149 reg = agnx_set_bits(EDCF, EDCF_SHIFT, 0x1);
150 power.reg = cpu_to_le32(reg);
151 set_sta_power(priv, &power, sta_idx);
152 udelay(40);
153} /* add_power_template */
154
155
156/* @num: The #number of station that is visible to the card */
157static void sta_tx_workqueue_init(struct agnx_priv *priv, unsigned int sta_idx)
158{
159 struct agnx_sta_tx_wq tx_wq;
160 u32 reg;
161 unsigned int i;
162
163 memset(&tx_wq, 0, sizeof(tx_wq));
164
165 reg = agnx_set_bits(WORK_QUEUE_VALID, WORK_QUEUE_VALID_SHIFT, 1);
166 reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 1);
167/* reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 0); */
168 tx_wq.reg2 |= cpu_to_le32(reg);
169
170 /* Suppose all 8 traffic class are used */
171 for (i = 0; i < STA_TX_WQ_NUM; i++)
172 set_sta_tx_wq(priv, &tx_wq, sta_idx, i);
173} /* sta_tx_workqueue_init */
174
175
176static void sta_traffic_init(struct agnx_sta_traffic *traffic)
177{
178 u32 reg;
179 memset(traffic, 0, sizeof(*traffic));
180
181 reg = agnx_set_bits(NEW_PACKET, NEW_PACKET_SHIFT, 1);
182 reg |= agnx_set_bits(TRAFFIC_VALID, TRAFFIC_VALID_SHIFT, 1);
183/* reg |= agnx_set_bits(TRAFFIC_ACK_TYPE, TRAFFIC_ACK_TYPE_SHIFT, 1); */
184 traffic->reg0 = cpu_to_le32(reg);
185
186 /* 3. setting RX Sequence Number to 4095 */
187 reg = agnx_set_bits(RX_SEQUENCE_NUM, RX_SEQUENCE_NUM_SHIFT, 4095);
188 traffic->reg1 = cpu_to_le32(reg);
189}
190
191
192/* @num: The #number of station that is visible to the card */
193void sta_init(struct agnx_priv *priv, unsigned int sta_idx)
194{
195 /* FIXME the length of sta is 256 bytes Is that
196 * dangerous to stack overflow? */
197 struct agnx_sta sta;
198 u32 reg;
199 int i;
200
201 memset(&sta, 0, sizeof(sta));
202 /* Set valid to 1 */
203 reg = agnx_set_bits(STATION_VALID, STATION_VALID_SHIFT, 1);
204 /* Set Enable Concatenation to 0 (?) */
205 reg |= agnx_set_bits(ENABLE_CONCATENATION, ENABLE_CONCATENATION_SHIFT, 0);
206 /* Set Enable Decompression to 0 (?) */
207 reg |= agnx_set_bits(ENABLE_DECOMPRESSION, ENABLE_DECOMPRESSION_SHIFT, 0);
208 sta.reg = cpu_to_le32(reg);
209
210 /* Initialize each of the Traffic Class Structures by: */
211 for (i = 0; i < 8; i++)
212 sta_traffic_init(sta.traffic + i);
213
214 set_sta(priv, &sta, sta_idx);
215 sta_tx_workqueue_init(priv, sta_idx);
216} /* sta_descriptor_init */
217
218
diff --git a/drivers/staging/agnx/sta.h b/drivers/staging/agnx/sta.h
deleted file mode 100644
index fd504e3f3870..000000000000
--- a/drivers/staging/agnx/sta.h
+++ /dev/null
@@ -1,222 +0,0 @@
1#ifndef AGNX_STA_H_
2#define AGNX_STA_H_
3
4#define STA_TX_WQ_NUM 8 /* The number of TX workqueue one STA has */
5
6struct agnx_hash_cmd {
7 __be32 cmdhi;
8#define MACLO 0xFFFF0000
9#define MACLO_SHIFT 16
10#define STA_ID 0x0000FFF0
11#define STA_ID_SHIFT 4
12#define CMD 0x0000000C
13#define CMD_SHIFT 2
14#define STATUS 0x00000002
15#define STATUS_SHIFT 1
16#define PASS 0x00000001
17#define PASS_SHIFT 1
18 __be32 cmdlo;
19} __attribute__((__packed__));
20
21
22/*
23 * Station Power Template
24 * FIXME Just for agn100 yet
25 */
26struct agnx_sta_power {
27 __le32 reg;
28#define SIGNAL 0x000000FF /* signal */
29#define SIGNAL_SHIFT 0
30#define RATE 0x00000F00
31#define RATE_SHIFT 8
32#define TIFS 0x00001000
33#define TIFS_SHIFT 12
34#define EDCF 0x00002000
35#define EDCF_SHIFT 13
36#define CHANNEL_BOND 0x00004000
37#define CHANNEL_BOND_SHIFT 14
38#define PHY_MODE 0x00038000
39#define PHY_MODE_SHIFT 15
40#define POWER_LEVEL 0x007C0000
41#define POWER_LEVEL_SHIFT 18
42#define NUM_TRANSMITTERS 0x00800000
43#define NUM_TRANSMITTERS_SHIFT 23
44} __attribute__((__packed__));
45
46/*
47 * TX Workqueue Descriptor
48 */
49struct agnx_sta_tx_wq {
50 __le32 reg0;
51#define HEAD_POINTER_LOW 0xFF000000 /* Head pointer low */
52#define HEAD_POINTER_LOW_SHIFT 24
53#define TAIL_POINTER 0x00FFFFFF /* Tail pointer */
54#define TAIL_POINTER_SHIFT 0
55
56 __le32 reg3;
57#define ACK_POINTER_LOW 0xFFFF0000 /* ACK pointer low */
58#define ACK_POINTER_LOW_SHIFT 16
59#define HEAD_POINTER_HIGH 0x0000FFFF /* Head pointer high */
60#define HEAD_POINTER_HIGH_SHIFT 0
61
62 __le32 reg1;
63/* ACK timeout tail packet count */
64#define ACK_TIMOUT_TAIL_PACK_CNT 0xFFF00000
65#define ACK_TIMOUT_TAIL_PACK_CNT_SHIFT 20
66/* Head timeout tail packet count */
67#define HEAD_TIMOUT_TAIL_PACK_CNT 0x000FFF00
68#define HEAD_TIMOUT_TAIL_PACK_CNT_SHIFT 8
69#define ACK_POINTER_HIGH 0x000000FF /* ACK pointer high */
70#define ACK_POINTER_HIGH_SHIFT 0
71
72 __le32 reg2;
73#define WORK_QUEUE_VALID 0x80000000 /* valid */
74#define WORK_QUEUE_VALID_SHIFT 31
75#define WORK_QUEUE_ACK_TYPE 0x40000000 /* ACK type */
76#define WORK_QUEUE_ACK_TYPE_SHIFT 30
77/* Head timeout window limit fragmentation count */
78#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT 0x3FFF0000
79#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT_SHIFT 16
80/* Head timeout window limit byte count */
81#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT 0x0000FFFF
82#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT_SHIFT 0
83} __attribute__((__packed__));
84
85
86/*
87 * Traffic Class Structure
88 */
89struct agnx_sta_traffic {
90 __le32 reg0;
91#define ACK_TIMOUT_CNT 0xFF800000 /* ACK Timeout Counts */
92#define ACK_TIMOUT_CNT_SHIFT 23
93#define TRAFFIC_ACK_TYPE 0x00600000 /* ACK Type */
94#define TRAFFIC_ACK_TYPE_SHIFT 21
95#define NEW_PACKET 0x00100000 /* New Packet */
96#define NEW_PACKET_SHIFT 20
97#define TRAFFIC_VALID 0x00080000 /* Valid */
98#define TRAFFIC_VALID_SHIFT 19
99#define RX_HDR_DESC_POINTER 0x0007FFFF /* RX Header Descripter pointer */
100#define RX_HDR_DESC_POINTER_SHIFT 0
101
102 __le32 reg1;
103#define RX_PACKET_TIMESTAMP 0xFFFF0000 /* RX Packet Timestamp */
104#define RX_PACKET_TIMESTAMP_SHIFT 16
105#define TRAFFIC_RESERVED 0x0000E000 /* Reserved */
106#define TRAFFIC_RESERVED_SHIFT 13
107#define SV 0x00001000 /* sv */
108#define SV_SHIFT 12
109#define RX_SEQUENCE_NUM 0x00000FFF /* RX Sequence Number */
110#define RX_SEQUENCE_NUM_SHIFT 0
111
112 __le32 tx_replay_cnt_low; /* TX Replay Counter Low */
113
114 __le16 tx_replay_cnt_high; /* TX Replay Counter High */
115 __le16 rx_replay_cnt_high; /* RX Replay Counter High */
116
117 __be32 rx_replay_cnt_low; /* RX Replay Counter Low */
118} __attribute__((__packed__));
119
120/*
121 * Station Descriptors
122 */
123struct agnx_sta {
124 __le32 tx_session_keys[4]; /* Transmit Session Key (0-3) */
125 __le32 rx_session_keys[4]; /* Receive Session Key (0-3) */
126
127 __le32 reg;
128#define ID_1 0xC0000000 /* id 1 */
129#define ID_1_SHIFT 30
130#define ID_0 0x30000000 /* id 0 */
131#define ID_0_SHIFT 28
132#define ENABLE_CONCATENATION 0x0FF00000 /* Enable concatenation */
133#define ENABLE_CONCATENATION_SHIFT 20
134#define ENABLE_DECOMPRESSION 0x000FF000 /* Enable decompression */
135#define ENABLE_DECOMPRESSION_SHIFT 12
136#define STA_RESERVED 0x00000C00 /* Reserved */
137#define STA_RESERVED_SHIFT 10
138#define EAP 0x00000200 /* EAP */
139#define EAP_SHIFT 9
140#define ED_NULL 0x00000100 /* ED NULL */
141#define ED_NULL_SHIFT 8
142#define ENCRYPTION_POLICY 0x000000E0 /* Encryption Policy */
143#define ENCRYPTION_POLICY_SHIFT 5
144#define DEFINED_KEY_ID 0x00000018 /* Defined Key ID */
145#define DEFINED_KEY_ID_SHIFT 3
146#define FIXED_KEY 0x00000004 /* Fixed Key */
147#define FIXED_KEY_SHIFT 2
148#define KEY_VALID 0x00000002 /* Key Valid */
149#define KEY_VALID_SHIFT 1
150#define STATION_VALID 0x00000001 /* Station Valid */
151#define STATION_VALID_SHIFT 0
152
153 __le32 tx_aes_blks_unicast; /* TX AES Blks Unicast */
154 __le32 rx_aes_blks_unicast; /* RX AES Blks Unicast */
155
156 __le16 aes_format_err_unicast_cnt; /* AES Format Error Unicast Counts */
157 __le16 aes_replay_unicast; /* AES Replay Unicast */
158
159 __le16 aes_decrypt_err_unicast; /* AES Decrypt Error Unicast */
160 __le16 aes_decrypt_err_default; /* AES Decrypt Error default */
161
162 __le16 single_retry_packets; /* Single Retry Packets */
163 __le16 failed_tx_packets; /* Failed Tx Packets */
164
165 __le16 muti_retry_packets; /* Multiple Retry Packets */
166 __le16 ack_timeouts; /* ACK Timeouts */
167
168 __le16 frag_tx_cnt; /* Fragment TX Counts */
169 __le16 rts_brq_sent; /* RTS Brq Sent */
170
171 __le16 tx_packets; /* TX Packets */
172 __le16 cts_back_timeout; /* CTS Back Timeout */
173
174 __le32 phy_stats_high; /* PHY Stats High */
175 __le32 phy_stats_low; /* PHY Stats Low */
176
177 struct agnx_sta_traffic traffic[8]; /* Traffic Class Structure (8) */
178
179 __le16 traffic_class0_frag_success; /* Traffic Class 0 Fragment Success */
180 __le16 traffic_class1_frag_success; /* Traffic Class 1 Fragment Success */
181 __le16 traffic_class2_frag_success; /* Traffic Class 2 Fragment Success */
182 __le16 traffic_class3_frag_success; /* Traffic Class 3 Fragment Success */
183 __le16 traffic_class4_frag_success; /* Traffic Class 4 Fragment Success */
184 __le16 traffic_class5_frag_success; /* Traffic Class 5 Fragment Success */
185 __le16 traffic_class6_frag_success; /* Traffic Class 6 Fragment Success */
186 __le16 traffic_class7_frag_success; /* Traffic Class 7 Fragment Success */
187
188 __le16 num_frag_non_prime_rates; /* number of Fragments for non-prime rates */
189 __le16 ack_timeout_non_prime_rates; /* ACK Timeout for non-prime rates */
190
191} __attribute__((__packed__));
192
193
194struct agnx_beacon_hdr {
195 struct agnx_sta_power power; /* Tx Station Power Template */
196 u8 phy_hdr[6]; /* PHY Hdr */
197 u8 frame_len_lo; /* Frame Length Lo */
198 u8 frame_len_hi; /* Frame Length Hi */
199 u8 mac_hdr[24]; /* MAC Header */
200 /* FIXME */
201 /* 802.11(abg) beacon */
202} __attribute__((__packed__));
203
204void hash_write(struct agnx_priv *priv, const u8 *mac_addr, u8 sta_id);
205void hash_dump(struct agnx_priv *priv, u8 sta_id);
206void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
207void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
208
209void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx);
210void set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power,
211 unsigned int sta_idx);
212void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
213 unsigned int sta_idx, unsigned int wq_idx);
214void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
215 unsigned int sta_idx, unsigned int wq_idx);
216void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
217void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
218
219void sta_power_init(struct agnx_priv *priv, unsigned int num);
220void sta_init(struct agnx_priv *priv, unsigned int num);
221
222#endif /* AGNX_STA_H_ */
diff --git a/drivers/staging/agnx/table.c b/drivers/staging/agnx/table.c
deleted file mode 100644
index b52fef9db0e3..000000000000
--- a/drivers/staging/agnx/table.c
+++ /dev/null
@@ -1,168 +0,0 @@
1#include <linux/pci.h>
2#include <linux/delay.h>
3#include "agnx.h"
4#include "debug.h"
5#include "phy.h"
6
7static const u32
8tx_fir_table[] = { 0x19, 0x5d, 0xce, 0x151, 0x1c3, 0x1ff, 0x1ea, 0x17c, 0xcf,
9 0x19, 0x38e, 0x350, 0x362, 0x3ad, 0x5, 0x44, 0x59, 0x49,
10 0x21, 0x3f7, 0x3e0, 0x3e3, 0x3f3, 0x0 };
11
12void tx_fir_table_init(struct agnx_priv *priv)
13{
14 void __iomem *ctl = priv->ctl;
15 int i;
16
17 for (i = 0; i < ARRAY_SIZE(tx_fir_table); i++)
18 iowrite32(tx_fir_table[i], ctl + AGNX_FIR_BASE + i*4);
19} /* fir_table_setup */
20
21
22static const u32
23gain_table[] = { 0x8, 0x8, 0xf, 0x13, 0x17, 0x1b, 0x1f, 0x23, 0x27, 0x2b,
24 0x2f, 0x33, 0x37, 0x3b, 0x3f, 0x43, 0x47, 0x4b, 0x4f,
25 0x53, 0x57, 0x5b, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
26 0x5f, 0x5f, 0x5f, 0x5f };
27
28void gain_table_init(struct agnx_priv *priv)
29{
30 void __iomem *ctl = priv->ctl;
31 int i;
32
33 for (i = 0; i < ARRAY_SIZE(gain_table); i++) {
34 iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4);
35 iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4 + 0x80);
36 }
37} /* gain_table_init */
38
39void monitor_gain_table_init(struct agnx_priv *priv)
40{
41 void __iomem *ctl = priv->ctl;
42 unsigned int i;
43
44 for (i = 0; i < 0x44; i += 4) {
45 iowrite32(0x61, ctl + AGNX_MONGCR_BASE + i);
46 iowrite32(0x61, ctl + AGNX_MONGCR_BASE + 0x200 + i);
47 }
48 for (i = 0x44; i < 0x64; i += 4) {
49 iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + i);
50 iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + 0x200 + i);
51 }
52 for (i = 0x64; i < 0x94; i += 4) {
53 iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + i);
54 iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + 0x200 + i);
55 }
56 for (i = 0x94; i < 0xdc; i += 4) {
57 iowrite32(0x87, ctl + AGNX_MONGCR_BASE + i);
58 iowrite32(0x87, ctl + AGNX_MONGCR_BASE + 0x200 + i);
59 }
60 for (i = 0xdc; i < 0x148; i += 4) {
61 iowrite32(0x95, ctl + AGNX_MONGCR_BASE + i);
62 iowrite32(0x95, ctl + AGNX_MONGCR_BASE + 0x200 + i);
63 }
64 for (i = 0x148; i < 0x1e8; i += 4) {
65 iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + i);
66 iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + 0x200 + i);
67 }
68 for (i = 0x1e8; i <= 0x1fc; i += 4) {
69 iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + i);
70 iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + 0x200 + i);
71 }
72} /* monitor_gain_table_init */
73
74
75void routing_table_init(struct agnx_priv *priv)
76{
77 void __iomem *ctl = priv->ctl;
78 unsigned int type, subtype;
79 u32 reg;
80
81 disable_receiver(priv);
82
83 for (type = 0; type < 0x3; type++) {
84 for (subtype = 0; subtype < 0x10; subtype++) {
85 /* 1. Set Routing table to R/W and to Return status on Read */
86 reg = (type << ROUTAB_TYPE_SHIFT) |
87 (subtype << ROUTAB_SUBTYPE_SHIFT);
88 reg |= (1 << ROUTAB_RW_SHIFT) | (1 << ROUTAB_STATUS_SHIFT);
89 if (type == ROUTAB_TYPE_DATA) {
90 /* NULL goes to RFP */
91 if (subtype == ROUTAB_SUBTYPE_NULL)
92/* reg |= ROUTAB_ROUTE_RFP; */
93 reg |= ROUTAB_ROUTE_CPU;
94 /* QOS NULL goes to CPU */
95 else if (subtype == ROUTAB_SUBTYPE_QOSNULL)
96 reg |= ROUTAB_ROUTE_CPU;
97 /* All Data and QOS data subtypes go to Encryption */
98 else if ((subtype == ROUTAB_SUBTYPE_DATA) ||
99 (subtype == ROUTAB_SUBTYPE_DATAACK) ||
100 (subtype == ROUTAB_SUBTYPE_DATAPOLL) ||
101 (subtype == ROUTAB_SUBTYPE_DATAPOLLACK) ||
102 (subtype == ROUTAB_SUBTYPE_QOSDATA) ||
103 (subtype == ROUTAB_SUBTYPE_QOSDATAACK) ||
104 (subtype == ROUTAB_SUBTYPE_QOSDATAPOLL) ||
105 (subtype == ROUTAB_SUBTYPE_QOSDATAACKPOLL))
106 reg |= ROUTAB_ROUTE_ENCRY;
107/* reg |= ROUTAB_ROUTE_CPU; */
108 /*Drop NULL and QOS NULL ack, poll and poll ack*/
109 else if ((subtype == ROUTAB_SUBTYPE_NULLACK) ||
110 (subtype == ROUTAB_SUBTYPE_QOSNULLACK) ||
111 (subtype == ROUTAB_SUBTYPE_NULLPOLL) ||
112 (subtype == ROUTAB_SUBTYPE_QOSNULLPOLL) ||
113 (subtype == ROUTAB_SUBTYPE_NULLPOLLACK) ||
114 (subtype == ROUTAB_SUBTYPE_QOSNULLPOLLACK))
115/* reg |= ROUTAB_ROUTE_DROP; */
116 reg |= ROUTAB_ROUTE_CPU;
117 } else {
118 reg |= (ROUTAB_ROUTE_CPU);
119 }
120 iowrite32(reg, ctl + AGNX_RXM_ROUTAB);
121 /* Check to verify that the status bit cleared */
122 routing_table_delay();
123 }
124 }
125 enable_receiver(priv);
126} /* routing_table_init */
127
128void tx_engine_lookup_tbl_init(struct agnx_priv *priv)
129{
130 void __iomem *data = priv->data;
131 unsigned int i;
132
133 for (i = 0; i <= 28; i += 4)
134 iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
135 for (i = 32; i <= 120; i += 8) {
136 iowrite32(0x1e58, data + AGNX_ENGINE_LOOKUP_TBL + i);
137 iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
138 }
139
140 for (i = 128; i <= 156; i += 4)
141 iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
142 for (i = 160; i <= 248; i += 8) {
143 iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i);
144 iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
145 }
146
147 for (i = 256; i <= 284; i += 4)
148 iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
149 for (i = 288; i <= 376; i += 8) {
150 iowrite32(0x1a58, data + AGNX_ENGINE_LOOKUP_TBL + i);
151 iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
152 }
153
154 for (i = 512; i <= 540; i += 4)
155 iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
156 for (i = 544; i <= 632; i += 8) {
157 iowrite32(0x2058, data + AGNX_ENGINE_LOOKUP_TBL + i);
158 iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
159 }
160
161 for (i = 640; i <= 668; i += 4)
162 iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i);
163 for (i = 672; i <= 764; i += 8) {
164 iowrite32(0x2258, data + AGNX_ENGINE_LOOKUP_TBL + i);
165 iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
166 }
167}
168
diff --git a/drivers/staging/agnx/table.h b/drivers/staging/agnx/table.h
deleted file mode 100644
index f0626b5ee86b..000000000000
--- a/drivers/staging/agnx/table.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef AGNX_TABLE_H_
2#define AGNX_TABLE_H_
3
4void tx_fir_table_init(struct agnx_priv *priv);
5void gain_table_init(struct agnx_priv *priv);
6void monitor_gain_table_init(struct agnx_priv *priv);
7void routing_table_init(struct agnx_priv *priv);
8void tx_engine_lookup_tbl_init(struct agnx_priv *priv);
9
10#endif /* AGNX_TABLE_H_ */
diff --git a/drivers/staging/agnx/xmit.c b/drivers/staging/agnx/xmit.c
deleted file mode 100644
index 42db41070cf0..000000000000
--- a/drivers/staging/agnx/xmit.c
+++ /dev/null
@@ -1,836 +0,0 @@
1/**
2 * Airgo MIMO wireless driver
3 *
4 * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
5
6 * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
7 * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
8
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include "agnx.h"
17#include "debug.h"
18#include "phy.h"
19
20unsigned int rx_frame_cnt;
21/* unsigned int local_tx_sent_cnt = 0; */
22
23static inline void disable_rx_engine(struct agnx_priv *priv)
24{
25 void __iomem *ctl = priv->ctl;
26 iowrite32(0x100, ctl + AGNX_CIR_RXCTL);
27 /* Wait for RX Control to have the Disable Rx Interrupt (0x100) set */
28 ioread32(ctl + AGNX_CIR_RXCTL);
29}
30
31static inline void enable_rx_engine(struct agnx_priv *priv)
32{
33 void __iomem *ctl = priv->ctl;
34 iowrite32(0x80, ctl + AGNX_CIR_RXCTL);
35 ioread32(ctl + AGNX_CIR_RXCTL);
36}
37
38inline void disable_rx_interrupt(struct agnx_priv *priv)
39{
40 void __iomem *ctl = priv->ctl;
41 u32 reg;
42
43 disable_rx_engine(priv);
44 reg = ioread32(ctl + AGNX_CIR_RXCFG);
45 reg &= ~0x20;
46 iowrite32(reg, ctl + AGNX_CIR_RXCFG);
47 ioread32(ctl + AGNX_CIR_RXCFG);
48}
49
50inline void enable_rx_interrupt(struct agnx_priv *priv)
51{
52 void __iomem *ctl = priv->ctl;
53 u32 reg;
54
55 reg = ioread32(ctl + AGNX_CIR_RXCFG);
56 reg |= 0x20;
57 iowrite32(reg, ctl + AGNX_CIR_RXCFG);
58 ioread32(ctl + AGNX_CIR_RXCFG);
59 enable_rx_engine(priv);
60}
61
62static inline void rx_desc_init(struct agnx_priv *priv, unsigned int idx)
63{
64 struct agnx_desc *desc = priv->rx.desc + idx;
65 struct agnx_info *info = priv->rx.info + idx;
66
67 memset(info, 0, sizeof(*info));
68
69 info->dma_len = IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct agnx_hdr);
70 info->skb = dev_alloc_skb(info->dma_len);
71 if (info->skb == NULL)
72 agnx_bug("refill err");
73
74 info->mapping = pci_map_single(priv->pdev, skb_tail_pointer(info->skb),
75 info->dma_len, PCI_DMA_FROMDEVICE);
76 memset(desc, 0, sizeof(*desc));
77 desc->dma_addr = cpu_to_be32(info->mapping);
78 /* Set the owner to the card */
79 desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
80}
81
82static inline void rx_desc_reinit(struct agnx_priv *priv, unsigned int idx)
83{
84 struct agnx_info *info = priv->rx.info + idx;
85
86 /* Cause ieee80211 will free the skb buffer, so we needn't to free it again?! */
87 pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
88 rx_desc_init(priv, idx);
89}
90
91static inline void rx_desc_reusing(struct agnx_priv *priv, unsigned int idx)
92{
93 struct agnx_desc *desc = priv->rx.desc + idx;
94 struct agnx_info *info = priv->rx.info + idx;
95
96 memset(desc, 0, sizeof(*desc));
97 desc->dma_addr = cpu_to_be32(info->mapping);
98 /* Set the owner to the card */
99 desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
100}
101
102static void rx_desc_free(struct agnx_priv *priv, unsigned int idx)
103{
104 struct agnx_desc *desc = priv->rx.desc + idx;
105 struct agnx_info *info = priv->rx.info + idx;
106
107 BUG_ON(!desc || !info);
108 if (info->mapping)
109 pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
110 if (info->skb)
111 dev_kfree_skb(info->skb);
112 memset(info, 0, sizeof(*info));
113 memset(desc, 0, sizeof(*desc));
114}
115
116static inline void __tx_desc_free(struct agnx_priv *priv,
117 struct agnx_desc *desc, struct agnx_info *info)
118{
119 BUG_ON(!desc || !info);
120 /* TODO make sure mapping, skb and len are consistency */
121 if (info->mapping)
122 pci_unmap_single(priv->pdev, info->mapping,
123 info->dma_len, PCI_DMA_TODEVICE);
124 if (info->type == PACKET)
125 dev_kfree_skb(info->skb);
126
127 memset(info, 0, sizeof(*info));
128 memset(desc, 0, sizeof(*desc));
129}
130
131static void txm_desc_free(struct agnx_priv *priv, unsigned int idx)
132{
133 struct agnx_desc *desc = priv->txm.desc + idx;
134 struct agnx_info *info = priv->txm.info + idx;
135
136 __tx_desc_free(priv, desc, info);
137}
138
139static void txd_desc_free(struct agnx_priv *priv, unsigned int idx)
140{
141 struct agnx_desc *desc = priv->txd.desc + idx;
142 struct agnx_info *info = priv->txd.info + idx;
143
144 __tx_desc_free(priv, desc, info);
145}
146
147int fill_rings(struct agnx_priv *priv)
148{
149 void __iomem *ctl = priv->ctl;
150 unsigned int i;
151 u32 reg;
152 AGNX_TRACE;
153
154 priv->txd.idx_sent = priv->txm.idx_sent = 0;
155 priv->rx.idx = priv->txm.idx = priv->txd.idx = 0;
156
157 for (i = 0; i < priv->rx.size; i++)
158 rx_desc_init(priv, i);
159 for (i = 0; i < priv->txm.size; i++) {
160 memset(priv->txm.desc + i, 0, sizeof(struct agnx_desc));
161 memset(priv->txm.info + i, 0, sizeof(struct agnx_info));
162 }
163 for (i = 0; i < priv->txd.size; i++) {
164 memset(priv->txd.desc + i, 0, sizeof(struct agnx_desc));
165 memset(priv->txd.info + i, 0, sizeof(struct agnx_info));
166 }
167
168 /* FIXME Set the card RX TXM and TXD address */
169 agnx_write32(ctl, AGNX_CIR_RXCMSTART, priv->rx.dma);
170 agnx_write32(ctl, AGNX_CIR_RXCMEND, priv->txm.dma);
171
172 agnx_write32(ctl, AGNX_CIR_TXMSTART, priv->txm.dma);
173 agnx_write32(ctl, AGNX_CIR_TXMEND, priv->txd.dma);
174
175 agnx_write32(ctl, AGNX_CIR_TXDSTART, priv->txd.dma);
176 agnx_write32(ctl, AGNX_CIR_TXDEND, priv->txd.dma +
177 sizeof(struct agnx_desc) * priv->txd.size);
178
179 /* FIXME Relinquish control of rings to card */
180 reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
181 reg &= ~0x800;
182 agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
183 return 0;
184} /* fill_rings */
185
186void unfill_rings(struct agnx_priv *priv)
187{
188 unsigned long flags;
189 unsigned int i;
190 AGNX_TRACE;
191
192 spin_lock_irqsave(&priv->lock, flags);
193
194 for (i = 0; i < priv->rx.size; i++)
195 rx_desc_free(priv, i);
196 for (i = 0; i < priv->txm.size; i++)
197 txm_desc_free(priv, i);
198 for (i = 0; i < priv->txd.size; i++)
199 txd_desc_free(priv, i);
200
201 spin_unlock_irqrestore(&priv->lock, flags);
202}
203
204/* Extract the bitrate out of a CCK PLCP header.
205 copy from bcm43xx driver */
206static inline u8 agnx_plcp_get_bitrate_cck(__be32 *phyhdr_11b)
207{
208 /* FIXME */
209 switch (*(u8 *)phyhdr_11b) {
210 case 0x0A:
211 return 0;
212 case 0x14:
213 return 1;
214 case 0x37:
215 return 2;
216 case 0x6E:
217 return 3;
218 }
219 agnx_bug("Wrong plcp rate");
220 return 0;
221}
222
223/* FIXME */
224static inline u8 agnx_plcp_get_bitrate_ofdm(__be32 *phyhdr_11g)
225{
226 u8 rate = *(u8 *)phyhdr_11g & 0xF;
227
228 printk(PFX "G mode rate is 0x%x\n", rate);
229 return rate;
230}
231
232/* FIXME */
233static void get_rx_stats(struct agnx_priv *priv, struct agnx_hdr *hdr,
234 struct ieee80211_rx_status *stat)
235{
236 void __iomem *ctl = priv->ctl;
237 u8 *rssi;
238 u32 noise;
239 /* FIXME just for test */
240 int snr = 40; /* signal-to-noise ratio */
241
242 memset(stat, 0, sizeof(*stat));
243 /* RSSI */
244 rssi = (u8 *)&hdr->phy_stats_lo;
245/* stat->ssi = (rssi[0] + rssi[1] + rssi[2]) / 3; */
246 /* Noise */
247 noise = ioread32(ctl + AGNX_GCR_NOISE0);
248 noise += ioread32(ctl + AGNX_GCR_NOISE1);
249 noise += ioread32(ctl + AGNX_GCR_NOISE2);
250 stat->noise = noise / 3;
251 /* Signal quality */
252/* snr = stat->ssi - stat->noise; */
253 if (snr >= 0 && snr < 40)
254 stat->signal = 5 * snr / 2;
255 else if (snr >= 40)
256 stat->signal = 100;
257 else
258 stat->signal = 0;
259
260
261 if (hdr->_11b0 && !hdr->_11g0) {
262 stat->rate_idx = agnx_plcp_get_bitrate_cck(&hdr->_11b0);
263 } else if (!hdr->_11b0 && hdr->_11g0) {
264 printk(PFX "RX: Found G mode packet\n");
265 stat->rate_idx = agnx_plcp_get_bitrate_ofdm(&hdr->_11g0);
266 } else
267 agnx_bug("Unknown packets type");
268
269
270 stat->band = IEEE80211_BAND_2GHZ;
271 stat->freq = agnx_channels[priv->channel - 1].center_freq;
272/* stat->antenna = 3;
273 stat->mactime = be32_to_cpu(hdr->time_stamp);
274 stat->channel = priv->channel; */
275}
276
277static inline void combine_hdr_frag(struct ieee80211_hdr *ieeehdr,
278 struct sk_buff *skb)
279{
280 u16 fctl;
281 unsigned int hdrlen;
282
283 fctl = le16_to_cpu(ieeehdr->frame_control);
284 hdrlen = ieee80211_hdrlen(fctl);
285 /* FIXME */
286 if (hdrlen < (2+2+6)/*minimum hdr*/ ||
287 hdrlen > sizeof(struct ieee80211_mgmt)) {
288 printk(KERN_ERR PFX "hdr len is %d\n", hdrlen);
289 agnx_bug("Wrong ieee80211 hdr detected");
290 }
291 skb_push(skb, hdrlen);
292 memcpy(skb->data, ieeehdr, hdrlen);
293} /* combine_hdr_frag */
294
295static inline int agnx_packet_check(struct agnx_priv *priv, struct agnx_hdr *agnxhdr,
296 unsigned packet_len)
297{
298 if (agnx_get_bits(CRC_FAIL, CRC_FAIL_SHIFT, be32_to_cpu(agnxhdr->reg1)) == 1) {
299 printk(PFX "RX: CRC check fail\n");
300 goto drop;
301 }
302 if (packet_len > 2048) {
303 printk(PFX "RX: Too long packet detected\n");
304 goto drop;
305 }
306
307 /* FIXME Just usable for Promious Mode, for Manage mode exclude FCS */
308/* if (packet_len - sizeof(*agnxhdr) < FCS_LEN) { */
309/* printk(PFX "RX: Too short packet detected\n"); */
310/* goto drop; */
311/* } */
312 return 0;
313drop:
314 priv->stats.dot11FCSErrorCount++;
315 return -1;
316}
317
318void handle_rx_irq(struct agnx_priv *priv)
319{
320 struct ieee80211_rx_status status;
321 unsigned int len;
322/* AGNX_TRACE; */
323
324 do {
325 struct agnx_desc *desc;
326 u32 frag;
327 struct agnx_info *info;
328 struct agnx_hdr *hdr;
329 struct sk_buff *skb;
330 unsigned int i = priv->rx.idx % priv->rx.size;
331
332 desc = priv->rx.desc + i;
333 frag = be32_to_cpu(desc->frag);
334 if (frag & OWNER)
335 break;
336
337 info = priv->rx.info + i;
338 skb = info->skb;
339 hdr = (struct agnx_hdr *)(skb->data);
340
341 len = (frag & PACKET_LEN) >> PACKET_LEN_SHIFT;
342 if (agnx_packet_check(priv, hdr, len) == -1) {
343 rx_desc_reusing(priv, i);
344 continue;
345 }
346 skb_put(skb, len);
347
348 do {
349 u16 fctl;
350 fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr)->frame_control);
351 if ((fctl & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_BEACON)/* && !(fctl & IEEE80211_STYPE_BEACON)) */
352 dump_ieee80211_hdr((struct ieee80211_hdr *)hdr->mac_hdr, "RX");
353 } while (0);
354
355 if (hdr->_11b0 && !hdr->_11g0) {
356/* int j;
357 u16 fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr)
358 ->frame_control);
359 if ( (fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
360 agnx_print_rx_hdr(hdr);
361 agnx_print_sta(priv, BSSID_STAID);
362 for (j = 0; j < 8; j++)
363 agnx_print_sta_tx_wq(priv, BSSID_STAID, j);
364 } */
365
366 get_rx_stats(priv, hdr, &status);
367 skb_pull(skb, sizeof(*hdr));
368 combine_hdr_frag((struct ieee80211_hdr *)hdr->mac_hdr, skb);
369 } else if (!hdr->_11b0 && hdr->_11g0) {
370/* int j; */
371 agnx_print_rx_hdr(hdr);
372 agnx_print_sta(priv, BSSID_STAID);
373/* for (j = 0; j < 8; j++) */
374 agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
375
376 print_hex_dump_bytes("agnx: RX_PACKET: ", DUMP_PREFIX_NONE,
377 skb->data, skb->len + 8);
378
379/* if (agnx_plcp_get_bitrate_ofdm(&hdr->_11g0) == 0) */
380 get_rx_stats(priv, hdr, &status);
381 skb_pull(skb, sizeof(*hdr));
382 combine_hdr_frag((struct ieee80211_hdr *)
383 ((void *)&hdr->mac_hdr), skb);
384/* dump_ieee80211_hdr((struct ieee80211_hdr *)skb->data, "RX G"); */
385 } else
386 agnx_bug("Unknown packets type");
387 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
388 ieee80211_rx_irqsafe(priv->hw, skb);
389 rx_desc_reinit(priv, i);
390
391 } while (priv->rx.idx++);
392} /* handle_rx_irq */
393
394static inline void handle_tx_irq(struct agnx_priv *priv, struct agnx_ring *ring)
395{
396 struct agnx_desc *desc;
397 struct agnx_info *info;
398 unsigned int idx;
399
400 for (idx = ring->idx_sent; idx < ring->idx; idx++) {
401 unsigned int i = idx % ring->size;
402 u32 frag;
403
404 desc = ring->desc + i;
405 info = ring->info + i;
406
407 frag = be32_to_cpu(desc->frag);
408 if (frag & OWNER) {
409 if (info->type == HEADER)
410 break;
411 else
412 agnx_bug("TX error");
413 }
414
415 pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_TODEVICE);
416
417 do {
418/* int j; */
419 size_t len;
420 len = info->skb->len - sizeof(struct agnx_hdr) + info->hdr_len;
421/* if (len == 614) { */
422/* agnx_print_desc(desc); */
423 if (info->type == PACKET) {
424/* agnx_print_tx_hdr((struct agnx_hdr *)info->skb->data); */
425/* agnx_print_sta_power(priv, LOCAL_STAID); */
426/* agnx_print_sta(priv, LOCAL_STAID); */
427/* for (j = 0; j < 8; j++) */
428/* agnx_print_sta_tx_wq(priv, LOCAL_STAID, 0); */
429/* agnx_print_sta_power(priv, BSSID_STAID); */
430/* agnx_print_sta(priv, BSSID_STAID); */
431/* for (j = 0; j < 8; j++) */
432/* agnx_print_sta_tx_wq(priv, BSSID_STAID, 0); */
433 }
434/* } */
435 } while (0);
436
437 if (info->type == PACKET) {
438/* dump_txm_registers(priv);
439 dump_rxm_registers(priv);
440 dump_bm_registers(priv);
441 dump_cir_registers(priv); */
442 }
443
444 if (info->type == PACKET) {
445/* struct ieee80211_hdr *hdr; */
446 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(info->skb);
447
448 skb_pull(info->skb, sizeof(struct agnx_hdr));
449 memcpy(skb_push(info->skb, info->hdr_len), &info->hdr, info->hdr_len);
450
451/* dump_ieee80211_hdr((struct ieee80211_hdr *)info->skb->data, "TX_HANDLE"); */
452/* print_hex_dump_bytes("agnx: TX_HANDLE: ", DUMP_PREFIX_NONE, */
453/* info->skb->data, info->skb->len); */
454
455 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK))
456 txi->flags |= IEEE80211_TX_STAT_ACK;
457
458 ieee80211_tx_status_irqsafe(priv->hw, info->skb);
459
460
461/* info->tx_status.queue_number = (ring->size - i) / 2; */
462/* ieee80211_tx_status_irqsafe(priv->hw, info->skb, &(info->tx_status)); */
463/* } else */
464/* dev_kfree_skb_irq(info->skb); */
465 }
466 memset(desc, 0, sizeof(*desc));
467 memset(info, 0, sizeof(*info));
468 }
469
470 ring->idx_sent = idx;
471 /* TODO fill the priv->low_level_stats */
472
473 /* ieee80211_wake_queue(priv->hw, 0); */
474}
475
476void handle_txm_irq(struct agnx_priv *priv)
477{
478 handle_tx_irq(priv, &priv->txm);
479}
480
481void handle_txd_irq(struct agnx_priv *priv)
482{
483 handle_tx_irq(priv, &priv->txd);
484}
485
486void handle_other_irq(struct agnx_priv *priv)
487{
488/* void __iomem *ctl = priv->ctl; */
489 u32 status = priv->irq_status;
490 void __iomem *ctl = priv->ctl;
491 u32 reg;
492
493 if (status & IRQ_TX_BEACON) {
494 iowrite32(IRQ_TX_BEACON, ctl + AGNX_INT_STAT);
495 printk(PFX "IRQ: TX Beacon control is 0X%.8X\n", ioread32(ctl + AGNX_TXM_BEACON_CTL));
496 printk(PFX "IRQ: TX Beacon rx frame num: %d\n", rx_frame_cnt);
497 }
498 if (status & IRQ_TX_RETRY) {
499 reg = ioread32(ctl + AGNX_TXM_RETRYSTAID);
500 printk(PFX "IRQ: TX Retry, RETRY STA ID is %x\n", reg);
501 }
502 if (status & IRQ_TX_ACTIVITY)
503 printk(PFX "IRQ: TX Activity\n");
504 if (status & IRQ_RX_ACTIVITY)
505 printk(PFX "IRQ: RX Activity\n");
506 if (status & IRQ_RX_X)
507 printk(PFX "IRQ: RX X\n");
508 if (status & IRQ_RX_Y) {
509 reg = ioread32(ctl + AGNX_INT_MASK);
510 reg &= ~IRQ_RX_Y;
511 iowrite32(reg, ctl + AGNX_INT_MASK);
512 iowrite32(IRQ_RX_Y, ctl + AGNX_INT_STAT);
513 printk(PFX "IRQ: RX Y\n");
514 }
515 if (status & IRQ_RX_HASHHIT) {
516 reg = ioread32(ctl + AGNX_INT_MASK);
517 reg &= ~IRQ_RX_HASHHIT;
518 iowrite32(reg, ctl + AGNX_INT_MASK);
519 iowrite32(IRQ_RX_HASHHIT, ctl + AGNX_INT_STAT);
520 printk(PFX "IRQ: RX Hash Hit\n");
521
522 }
523 if (status & IRQ_RX_FRAME) {
524 reg = ioread32(ctl + AGNX_INT_MASK);
525 reg &= ~IRQ_RX_FRAME;
526 iowrite32(reg, ctl + AGNX_INT_MASK);
527 iowrite32(IRQ_RX_FRAME, ctl + AGNX_INT_STAT);
528 printk(PFX "IRQ: RX Frame\n");
529 rx_frame_cnt++;
530 }
531 if (status & IRQ_ERR_INT) {
532 iowrite32(IRQ_ERR_INT, ctl + AGNX_INT_STAT);
533/* agnx_hw_reset(priv); */
534 printk(PFX "IRQ: Error Interrupt\n");
535 }
536 if (status & IRQ_TX_QUE_FULL)
537 printk(PFX "IRQ: TX Workqueue Full\n");
538 if (status & IRQ_BANDMAN_ERR)
539 printk(PFX "IRQ: Bandwidth Management Error\n");
540 if (status & IRQ_TX_DISABLE)
541 printk(PFX "IRQ: TX Disable\n");
542 if (status & IRQ_RX_IVASESKEY)
543 printk(PFX "IRQ: RX Invalid Session Key\n");
544 if (status & IRQ_REP_THHIT)
545 printk(PFX "IRQ: Replay Threshold Hit\n");
546 if (status & IRQ_TIMER1)
547 printk(PFX "IRQ: Timer1\n");
548 if (status & IRQ_TIMER_CNT)
549 printk(PFX "IRQ: Timer Count\n");
550 if (status & IRQ_PHY_FASTINT)
551 printk(PFX "IRQ: Phy Fast Interrupt\n");
552 if (status & IRQ_PHY_SLOWINT)
553 printk(PFX "IRQ: Phy Slow Interrupt\n");
554 if (status & IRQ_OTHER)
555 printk(PFX "IRQ: 0x80000000\n");
556} /* handle_other_irq */
557
558
559static inline void route_flag_set(struct agnx_hdr *txhdr)
560{
561/* u32 reg = 0; */
562
563 /* FIXME */
564/* reg = (0x7 << ROUTE_COMPRESSION_SHIFT) & ROUTE_COMPRESSION; */
565/* txhdr->reg5 = cpu_to_be32(reg); */
566 txhdr->reg5 = (0xa << 0x0) | (0x7 << 0x18);
567/* txhdr->reg5 = cpu_to_be32((0xa << 0x0) | (0x7 << 0x18)); */
568/* txhdr->reg5 = cpu_to_be32(0x7 << 0x0); */
569}
570
571/* Return 0 if no match */
572static inline unsigned int get_power_level(unsigned int rate, unsigned int antennas_num)
573{
574 unsigned int power_level;
575
576 switch (rate) {
577 case 10:
578 case 20:
579 case 55:
580 case 60:
581 case 90:
582 case 120:
583 power_level = 22;
584 break;
585
586 case 180:
587 power_level = 19;
588 break;
589
590 case 240:
591 power_level = 18;
592 break;
593
594 case 360:
595 power_level = 16;
596 break;
597
598 case 480:
599 power_level = 15;
600 break;
601
602 case 540:
603 power_level = 14;
604 break;
605 default:
606 agnx_bug("Error rate setting\n");
607 }
608
609 if (power_level && (antennas_num == 2))
610 power_level -= 3;
611
612 return power_level;
613}
614
615static inline void fill_agnx_hdr(struct agnx_priv *priv, struct agnx_info *tx_info)
616{
617 struct agnx_hdr *txhdr = (struct agnx_hdr *)tx_info->skb->data;
618 size_t len;
619 u16 fc = le16_to_cpu(*(__le16 *)&tx_info->hdr);
620 u32 reg;
621
622 memset(txhdr, 0, sizeof(*txhdr));
623
624/* reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, LOCAL_STAID); */
625 reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, BSSID_STAID);
626 reg |= agnx_set_bits(WORKQUEUE_ID, WORKQUEUE_ID_SHIFT, 0);
627 txhdr->reg4 = cpu_to_be32(reg);
628
629 /* Set the Hardware Sequence Number to 1? */
630 reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 0);
631/* reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 1); */
632 reg |= agnx_set_bits(MAC_HDR_LEN, MAC_HDR_LEN_SHIFT, tx_info->hdr_len);
633 txhdr->reg1 = cpu_to_be32(reg);
634 /* Set the agnx_hdr's MAC header */
635 memcpy(txhdr->mac_hdr, &tx_info->hdr, tx_info->hdr_len);
636
637 reg = agnx_set_bits(ACK, ACK_SHIFT, 1);
638/* reg = agnx_set_bits(ACK, ACK_SHIFT, 0); */
639 reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 0);
640/* reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 1); */
641 reg |= agnx_set_bits(RELAY, RELAY_SHIFT, 0);
642 reg |= agnx_set_bits(TM, TM_SHIFT, 0);
643 txhdr->reg0 = cpu_to_be32(reg);
644
645 /* Set the long and short retry limits */
646 txhdr->tx.short_retry_limit = tx_info->txi->control.rates[0].count;
647 txhdr->tx.long_retry_limit = tx_info->txi->control.rates[0].count;
648
649 /* FIXME */
650 len = tx_info->skb->len - sizeof(*txhdr) + tx_info->hdr_len + FCS_LEN;
651 if (fc & IEEE80211_FCTL_PROTECTED)
652 len += 8;
653 len = 2398;
654 reg = agnx_set_bits(FRAG_SIZE, FRAG_SIZE_SHIFT, len);
655 len = tx_info->skb->len - sizeof(*txhdr);
656 reg |= agnx_set_bits(PAYLOAD_LEN, PAYLOAD_LEN_SHIFT, len);
657 txhdr->reg3 = cpu_to_be32(reg);
658
659 route_flag_set(txhdr);
660} /* fill_hdr */
661
662static void txm_power_set(struct agnx_priv *priv,
663 struct ieee80211_tx_info *txi)
664{
665 struct agnx_sta_power power;
666 u32 reg;
667
668 /* FIXME */
669 if (txi->control.rates[0].idx < 0) {
670 /* For B mode Short Preamble */
671 reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_SHORT);
672/* control->tx_rate = -control->tx_rate; */
673 } else
674 reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211G);
675/* reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_LONG); */
676 reg |= agnx_set_bits(SIGNAL, SIGNAL_SHIFT, 0xB);
677 reg |= agnx_set_bits(RATE, RATE_SHIFT, 0xB);
678/* reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 15); */
679 reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 20);
680 /* if rate < 11M set it to 0 */
681 reg |= agnx_set_bits(NUM_TRANSMITTERS, NUM_TRANSMITTERS_SHIFT, 1);
682/* reg |= agnx_set_bits(EDCF, EDCF_SHIFT, 1); */
683/* reg |= agnx_set_bits(TIFS, TIFS_SHIFT, 1); */
684
685 power.reg = reg;
686/* power.reg = cpu_to_le32(reg); */
687
688/* set_sta_power(priv, &power, LOCAL_STAID); */
689 set_sta_power(priv, &power, BSSID_STAID);
690}
691
692static inline int tx_packet_check(struct sk_buff *skb)
693{
694 unsigned int ieee_len = ieee80211_get_hdrlen_from_skb(skb);
695 if (skb->len > 2048) {
696 printk(KERN_ERR PFX "length is %d\n", skb->len);
697 agnx_bug("Too long TX skb");
698 return -1;
699 }
700 /* FIXME */
701 if (skb->len == ieee_len) {
702 printk(PFX "A strange TX packet\n");
703 return -1;
704 /* tx_faile_irqsafe(); */
705 }
706 return 0;
707}
708
709static int __agnx_tx(struct agnx_priv *priv, struct sk_buff *skb,
710 struct agnx_ring *ring)
711{
712 struct agnx_desc *hdr_desc, *frag_desc;
713 struct agnx_info *hdr_info, *frag_info;
714 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
715 unsigned long flags;
716 unsigned int i;
717
718 spin_lock_irqsave(&priv->lock, flags);
719
720 /* The RX interrupt need be Disable until this TX packet
721 is handled in the next tx interrupt */
722 disable_rx_interrupt(priv);
723
724 i = ring->idx;
725 ring->idx += 2;
726/* if (priv->txm_idx - priv->txm_idx_sent == AGNX_TXM_RING_SIZE - 2) */
727/* ieee80211_stop_queue(priv->hw, 0); */
728
729 /* Set agnx header's info and desc */
730 i %= ring->size;
731 hdr_desc = ring->desc + i;
732 hdr_info = ring->info + i;
733 hdr_info->hdr_len = ieee80211_get_hdrlen_from_skb(skb);
734 memcpy(&hdr_info->hdr, skb->data, hdr_info->hdr_len);
735
736 /* Add the agnx header to the front of the SKB */
737 skb_push(skb, sizeof(struct agnx_hdr) - hdr_info->hdr_len);
738
739 hdr_info->txi = txi;
740 hdr_info->dma_len = sizeof(struct agnx_hdr);
741 hdr_info->skb = skb;
742 hdr_info->type = HEADER;
743 fill_agnx_hdr(priv, hdr_info);
744 hdr_info->mapping = pci_map_single(priv->pdev, skb->data,
745 hdr_info->dma_len, PCI_DMA_TODEVICE);
746 do {
747 u32 frag = 0;
748 frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 1);
749 frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 0);
750 frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
751 frag |= agnx_set_bits(FIRST_FRAG_LEN, FIRST_FRAG_LEN_SHIFT, 1);
752 frag |= agnx_set_bits(OWNER, OWNER_SHIFT, 1);
753 hdr_desc->frag = cpu_to_be32(frag);
754 } while (0);
755 hdr_desc->dma_addr = cpu_to_be32(hdr_info->mapping);
756
757
758 /* Set Frag's info and desc */
759 i = (i + 1) % ring->size;
760 frag_desc = ring->desc + i;
761 frag_info = ring->info + i;
762 memcpy(frag_info, hdr_info, sizeof(struct agnx_info));
763 frag_info->type = PACKET;
764 frag_info->dma_len = skb->len - hdr_info->dma_len;
765 frag_info->mapping = pci_map_single(priv->pdev, skb->data + hdr_info->dma_len,
766 frag_info->dma_len, PCI_DMA_TODEVICE);
767 do {
768 u32 frag = 0;
769 frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 0);
770 frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 1);
771 frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
772 frag |= agnx_set_bits(SUB_FRAG_LEN, SUB_FRAG_LEN_SHIFT, frag_info->dma_len);
773 frag_desc->frag = cpu_to_be32(frag);
774 } while (0);
775 frag_desc->dma_addr = cpu_to_be32(frag_info->mapping);
776
777 txm_power_set(priv, txi);
778
779/* do { */
780/* int j; */
781/* size_t len; */
782/* len = skb->len - hdr_info->dma_len + hdr_info->hdr_len; */
783/* if (len == 614) { */
784/* agnx_print_desc(hdr_desc); */
785/* agnx_print_desc(frag_desc); */
786/* agnx_print_tx_hdr((struct agnx_hdr *)skb->data); */
787/* agnx_print_sta_power(priv, LOCAL_STAID); */
788/* agnx_print_sta(priv, LOCAL_STAID); */
789/* for (j = 0; j < 8; j++) */
790/* agnx_print_sta_tx_wq(priv, LOCAL_STAID, j); */
791/* agnx_print_sta_power(priv, BSSID_STAID); */
792/* agnx_print_sta(priv, BSSID_STAID); */
793/* for (j = 0; j < 8; j++) */
794/* agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
795/* } */
796/* } while (0); */
797
798 spin_unlock_irqrestore(&priv->lock, flags);
799
800 /* FIXME ugly code */
801 /* Trigger TXM */
802 do {
803 u32 reg;
804 reg = (ioread32(priv->ctl + AGNX_CIR_TXMCTL));
805 reg |= 0x8;
806 iowrite32((reg), priv->ctl + AGNX_CIR_TXMCTL);
807 } while (0);
808
809 /* Trigger TXD */
810 do {
811 u32 reg;
812 reg = (ioread32(priv->ctl + AGNX_CIR_TXDCTL));
813 reg |= 0x8;
814 iowrite32((reg), priv->ctl + AGNX_CIR_TXDCTL);
815 } while (0);
816
817 return 0;
818}
819
820int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb)
821{
822 u16 fctl;
823
824 if (tx_packet_check(skb))
825 return 0;
826
827/* print_hex_dump_bytes("agnx: TX_PACKET: ", DUMP_PREFIX_NONE, */
828/* skb->data, skb->len); */
829
830 fctl = le16_to_cpu(*((__le16 *)skb->data));
831
832 if ((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
833 return __agnx_tx(priv, skb, &priv->txd);
834 else
835 return __agnx_tx(priv, skb, &priv->txm);
836}
diff --git a/drivers/staging/agnx/xmit.h b/drivers/staging/agnx/xmit.h
deleted file mode 100644
index 93ac4157e697..000000000000
--- a/drivers/staging/agnx/xmit.h
+++ /dev/null
@@ -1,250 +0,0 @@
1#ifndef AGNX_XMIT_H_
2#define AGNX_XMIT_H_
3
4#include <net/mac80211.h>
5
6struct agnx_priv;
7
8static inline u32 agnx_set_bits(u32 mask, u8 shift, u32 value)
9{
10 return (value << shift) & mask;
11}
12
13static inline u32 agnx_get_bits(u32 mask, u8 shift, u32 value)
14{
15 return (value & mask) >> shift;
16}
17
18
19struct agnx_rx {
20 __be16 rx_packet_duration; /* RX Packet Duration */
21 __be16 replay_cnt; /* Replay Count */
22} __attribute__((__packed__));
23
24
25struct agnx_tx {
26 u8 long_retry_limit; /* Long Retry Limit */
27 u8 short_retry_limit; /* Short Retry Limit */
28 u8 long_retry_cnt; /* Long Retry Count */
29 u8 short_retry_cnt; /* Short Retry Count */
30} __attribute__((__packed__));
31
32
33/* Copy from bcm43xx */
34#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
35#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
36#define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__, nr_bytes)
37
38#define P4D_BIT3S(magic, nr_bits) __be32 __padding##magic:nr_bits
39#define P4D_BITS(line, nr_bits) P4D_BIT3S(line, nr_bits)
40#define PAD_BITS(nr_bits) P4D_BITS(__LINE__, nr_bits)
41
42
43struct agnx_hdr {
44 __be32 reg0;
45#define RTS 0x80000000 /* RTS */
46#define RTS_SHIFT 31
47#define MULTICAST 0x40000000 /* multicast */
48#define MULTICAST_SHIFT 30
49#define ACK 0x30000000 /* ACK */
50#define ACK_SHIFT 28
51#define TM 0x08000000 /* TM */
52#define TM_SHIFT 27
53#define RELAY 0x04000000 /* Relay */
54#define RELAY_SHIFT 26
55/* PAD_BITS(4); */
56#define REVISED_FCS 0x00380000 /* revised FCS */
57#define REVISED_FCS_SHIFT 19
58#define NEXT_BUFFER_ADDR 0x0007FFFF /* Next Buffer Address */
59#define NEXT_BUFFER_ADDR_SHIFT 0
60
61 __be32 reg1;
62#define MAC_HDR_LEN 0xFC000000 /* MAC Header Length */
63#define MAC_HDR_LEN_SHIFT 26
64#define DURATION_OVERIDE 0x02000000 /* Duration Override */
65#define DURATION_OVERIDE_SHIFT 25
66#define PHY_HDR_OVERIDE 0x01000000 /* PHY Header Override */
67#define PHY_HDR_OVERIDE_SHIFT 24
68#define CRC_FAIL 0x00800000 /* CRC fail */
69#define CRC_FAIL_SHIFT 23
70/* PAD_BITS(1); */
71#define SEQUENCE_NUMBER 0x00200000 /* Sequence Number */
72#define SEQUENCE_NUMBER_SHIFT 21
73/* PAD_BITS(2); */
74#define BUFF_HEAD_ADDR 0x0007FFFF /* Buffer Head Address */
75#define BUFF_HEAD_ADDR_SHIFT 0
76
77 __be32 reg2;
78#define PDU_COUNT 0xFC000000 /* PDU Count */
79#define PDU_COUNT_SHIFT 26
80/* PAD_BITS(3); */
81#define WEP_KEY 0x00600000 /* WEP Key # */
82#define WEP_KEY_SHIFT 21
83#define USES_WEP_KEY 0x00100000 /* Uses WEP Key */
84#define USES_WEP_KEY_SHIFT 20
85#define KEEP_ALIVE 0x00080000 /* Keep alive */
86#define KEEP_ALIVE_SHIFT 19
87#define BUFF_TAIL_ADDR 0x0007FFFF /* Buffer Tail Address */
88#define BUFF_TAIL_ADDR_SHIFT 0
89
90 __be32 reg3;
91#define CTS_11G 0x80000000 /* CTS in 11g */
92#define CTS_11G_SHIFT 31
93#define RTS_11G 0x40000000 /* RTS in 11g */
94#define RTS_11G_SHIFT 30
95/* PAD_BITS(2); */
96#define FRAG_SIZE 0x0FFF0000 /* fragment size */
97#define FRAG_SIZE_SHIFT 16
98#define PAYLOAD_LEN 0x0000FFF0 /* payload length */
99#define PAYLOAD_LEN_SHIFT 4
100#define FRAG_NUM 0x0000000F /* number of frags */
101#define FRAG_NUM_SHIFT 0
102
103 __be32 reg4;
104/* PAD_BITS(4); */
105#define RELAY_STAID 0x0FFF0000 /* relayStald */
106#define RELAY_STAID_SHIFT 16
107#define STATION_ID 0x0000FFF0 /* Station ID */
108#define STATION_ID_SHIFT 4
109#define WORKQUEUE_ID 0x0000000F /* Workqueue ID */
110#define WORKQUEUE_ID_SHIFT 0
111
112 /* FIXME this register maybe is LE? */
113 __be32 reg5;
114/* PAD_BITS(4); */
115#define ROUTE_HOST 0x0F000000
116#define ROUTE_HOST_SHIFT 24
117#define ROUTE_CARD_CPU 0x00F00000
118#define ROUTE_CARD_CPU_SHIFT 20
119#define ROUTE_ENCRYPTION 0x000F0000
120#define ROUTE_ENCRYPTION_SHIFT 16
121#define ROUTE_TX 0x0000F000
122#define ROUTE_TX_SHIFT 12
123#define ROUTE_RX1 0x00000F00
124#define ROUTE_RX1_SHIFT 8
125#define ROUTE_RX2 0x000000F0
126#define ROUTE_RX2_SHIFT 4
127#define ROUTE_COMPRESSION 0x0000000F
128#define ROUTE_COMPRESSION_SHIFT 0
129
130 __be32 _11g0; /* 11g */
131 __be32 _11g1; /* 11g */
132 __be32 _11b0; /* 11b */
133 __be32 _11b1; /* 11b */
134 u8 mac_hdr[32]; /* MAC header */
135
136 __be16 rts_duration; /* RTS duration */
137 __be16 last_duration; /* Last duration */
138 __be16 sec_last_duration; /* Second to Last duration */
139 __be16 other_duration; /* Other duration */
140 __be16 tx_last_duration; /* TX Last duration */
141 __be16 tx_other_duration; /* TX Other Duration */
142 __be16 last_11g_len; /* Length of last 11g */
143 __be16 other_11g_len; /* Lenght of other 11g */
144
145 __be16 last_11b_len; /* Length of last 11b */
146 __be16 other_11b_len; /* Lenght of other 11b */
147
148
149 __be16 reg6;
150#define MBF 0xF000 /* mbf */
151#define MBF_SHIFT 12
152#define RSVD4 0x0FFF /* rsvd4 */
153#define RSVD4_SHIFT 0
154
155 __be16 rx_frag_stat; /* RX fragmentation status */
156
157 __be32 time_stamp; /* TimeStamp */
158 __be32 phy_stats_hi; /* PHY stats hi */
159 __be32 phy_stats_lo; /* PHY stats lo */
160 __be32 mic_key0; /* MIC key 0 */
161 __be32 mic_key1; /* MIC key 1 */
162
163 union { /* RX/TX Union */
164 struct agnx_rx rx;
165 struct agnx_tx tx;
166 };
167
168 u8 rx_channel; /* Recieve Channel */
169 PAD_BYTES(3);
170
171 u8 reserved[4];
172} __attribute__((__packed__));
173
174
175struct agnx_desc {
176#define PACKET_LEN 0xFFF00000
177#define PACKET_LEN_SHIFT 20
178/* ------------------------------------------------ */
179#define FIRST_PACKET_MASK 0x00080000
180#define FIRST_PACKET_MASK_SHIFT 19
181#define FIRST_RESERV2 0x00040000
182#define FIRST_RESERV2_SHIFT 18
183#define FIRST_TKIP_ERROR 0x00020000
184#define FIRST_TKIP_ERROR_SHIFT 17
185#define FIRST_TKIP_PACKET 0x00010000
186#define FIRST_TKIP_PACKET_SHIFT 16
187#define FIRST_RESERV1 0x0000F000
188#define FIRST_RESERV1_SHIFT 12
189#define FIRST_FRAG_LEN 0x00000FF8
190#define FIRST_FRAG_LEN_SHIFT 3
191/* ------------------------------------------------ */
192#define SUB_RESERV2 0x000c0000
193#define SUB_RESERV2_SHIFT 18
194#define SUB_TKIP_ERROR 0x00020000
195#define SUB_TKIP_ERROR_SHIFT 17
196#define SUB_TKIP_PACKET 0x00010000
197#define SUB_TKIP_PACKET_SHIFT 16
198#define SUB_RESERV1 0x00008000
199#define SUB_RESERV1_SHIFT 15
200#define SUB_FRAG_LEN 0x00007FF8
201#define SUB_FRAG_LEN_SHIFT 3
202/* ------------------------------------------------ */
203#define FIRST_FRAG 0x00000004
204#define FIRST_FRAG_SHIFT 2
205#define LAST_FRAG 0x00000002
206#define LAST_FRAG_SHIFT 1
207#define OWNER 0x00000001
208#define OWNER_SHIFT 0
209 __be32 frag;
210 __be32 dma_addr;
211} __attribute__((__packed__));
212
213enum {HEADER, PACKET};
214
215struct agnx_info {
216 struct sk_buff *skb;
217 dma_addr_t mapping;
218 u32 dma_len; /* dma buffer len */
219 /* Below fields only usful for tx */
220 u32 hdr_len; /* ieee80211 header length */
221 unsigned int type;
222 struct ieee80211_tx_info *txi;
223 struct ieee80211_hdr hdr;
224};
225
226
227struct agnx_ring {
228 struct agnx_desc *desc;
229 dma_addr_t dma;
230 struct agnx_info *info;
231 /* Will lead to overflow when sent packet number enough? */
232 unsigned int idx;
233 unsigned int idx_sent; /* only usful for txd and txm */
234 unsigned int size;
235};
236
237#define AGNX_RX_RING_SIZE 128
238#define AGNX_TXD_RING_SIZE 256
239#define AGNX_TXM_RING_SIZE 128
240
241void disable_rx_interrupt(struct agnx_priv *priv);
242void enable_rx_interrupt(struct agnx_priv *priv);
243int fill_rings(struct agnx_priv *priv);
244void unfill_rings(struct agnx_priv *priv);
245void handle_rx_irq(struct agnx_priv *priv);
246void handle_txd_irq(struct agnx_priv *priv);
247void handle_txm_irq(struct agnx_priv *priv);
248void handle_other_irq(struct agnx_priv *priv);
249int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb);
250#endif /* AGNX_XMIT_H_ */
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 247194992374..eb675635ae60 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -2,6 +2,7 @@ menu "Android"
2 2
3config ANDROID 3config ANDROID
4 bool "Android Drivers" 4 bool "Android Drivers"
5 depends on BROKEN
5 default N 6 default N
6 ---help--- 7 ---help---
7 Enable support for various drivers needed on the Android platform 8 Enable support for various drivers needed on the Android platform
diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
index 94c5d27d24d7..cda26bb493b3 100644
--- a/drivers/staging/b3dfg/b3dfg.c
+++ b/drivers/staging/b3dfg/b3dfg.c
@@ -36,6 +36,7 @@
36#include <linux/wait.h> 36#include <linux/wait.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39#include <linux/sched.h>
39 40
40static unsigned int b3dfg_nbuf = 2; 41static unsigned int b3dfg_nbuf = 2;
41 42
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index af723cb9d08f..d63c889ce557 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,7 +1,7 @@
1config COMEDI 1config COMEDI
2 tristate "Data acquisition support (comedi)" 2 tristate "Data acquisition support (comedi)"
3 default N 3 default N
4 depends on m 4 depends on m && (PCI || PCMCIA || PCCARD || USB)
5 ---help--- 5 ---help---
6 Enable support a wide range of data acquisition devices 6 Enable support a wide range of data acquisition devices
7 for Linux. 7 for Linux.
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index f54bb9b3ee37..aaad76e0a76a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2337,7 +2337,7 @@ static int resize_async_buffer(struct comedi_device *dev,
2337 } 2337 }
2338 2338
2339 DPRINTK("comedi%i subd %d buffer resized to %i bytes\n", 2339 DPRINTK("comedi%i subd %d buffer resized to %i bytes\n",
2340 dev->minor, s - dev->subdevices, async->prealloc_bufsz); 2340 dev->minor, (int)(s - dev->subdevices), async->prealloc_bufsz);
2341 return 0; 2341 return 0;
2342} 2342}
2343 2343
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 12d12b43a6f1..80c0df8656f3 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -744,7 +744,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
744 744
745 /* Initialize the pcmcia_device structure */ 745 /* Initialize the pcmcia_device structure */
746 /* Interrupt setup */ 746 /* Interrupt setup */
747 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 747 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
748 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 748 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
749 link->irq.Handler = NULL; 749 link->irq.Handler = NULL;
750 750
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 4d10bc31d461..09e6e3bdfb3e 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -53,7 +53,8 @@ Passing a zero for an option is the same as leaving it unspecified.
53 * Some drivers use arrays such as this, other do not. 53 * Some drivers use arrays such as this, other do not.
54 */ 54 */
55struct pcidio_board { 55struct pcidio_board {
56 const char *name; /* anme of the board */ 56 const char *name; /* name of the board */
57 int dev_id;
57 int n_8255; /* number of 8255 chips on board */ 58 int n_8255; /* number of 8255 chips on board */
58 59
59 /* indices of base address regions */ 60 /* indices of base address regions */
@@ -64,18 +65,21 @@ struct pcidio_board {
64static const struct pcidio_board pcidio_boards[] = { 65static const struct pcidio_board pcidio_boards[] = {
65 { 66 {
66 .name = "pci-dio24", 67 .name = "pci-dio24",
68 .dev_id = 0x0028,
67 .n_8255 = 1, 69 .n_8255 = 1,
68 .pcicontroler_badrindex = 1, 70 .pcicontroler_badrindex = 1,
69 .dioregs_badrindex = 2, 71 .dioregs_badrindex = 2,
70 }, 72 },
71 { 73 {
72 .name = "pci-dio24h", 74 .name = "pci-dio24h",
75 .dev_id = 0x0014,
73 .n_8255 = 1, 76 .n_8255 = 1,
74 .pcicontroler_badrindex = 1, 77 .pcicontroler_badrindex = 1,
75 .dioregs_badrindex = 2, 78 .dioregs_badrindex = 2,
76 }, 79 },
77 { 80 {
78 .name = "pci-dio48h", 81 .name = "pci-dio48h",
82 .dev_id = 0x000b,
79 .n_8255 = 2, 83 .n_8255 = 2,
80 .pcicontroler_badrindex = 0, 84 .pcicontroler_badrindex = 0,
81 .dioregs_badrindex = 1, 85 .dioregs_badrindex = 1,
@@ -206,7 +210,7 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
206 continue; 210 continue;
207 /* loop through cards supported by this driver */ 211 /* loop through cards supported by this driver */
208 for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) { 212 for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) {
209 if (pcidio_pci_table[index].device != pcidev->device) 213 if (pcidio_boards[index].dev_id != pcidev->device)
210 continue; 214 continue;
211 215
212 /* was a particular bus/slot requested? */ 216 /* was a particular bus/slot requested? */
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 14bf29bf5781..0d2c2eb23b23 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -515,6 +515,7 @@ static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
515{ 515{
516 struct poll_delay_t result = poll_delay_min_max(1000, 2000); 516 struct poll_delay_t result = poll_delay_min_max(1000, 2000);
517 struct jr3_pci_subdev_private *p = s->private; 517 struct jr3_pci_subdev_private *p = s->private;
518 int i;
518 519
519 if (p) { 520 if (p) {
520 volatile struct jr3_channel *channel = p->channel; 521 volatile struct jr3_channel *channel = p->channel;
@@ -570,18 +571,11 @@ static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
570 p->serial_no); 571 p->serial_no);
571 572
572 /* Transformation all zeros */ 573 /* Transformation all zeros */
573 transf.link[0].link_type = 574 for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
574 (enum link_types)0; 575 transf.link[i].link_type =
575 transf.link[0].link_amount = 0; 576 (enum link_types)0;
576 transf.link[1].link_type = 577 transf.link[i].link_amount = 0;
577 (enum link_types)0; 578 }
578 transf.link[1].link_amount = 0;
579 transf.link[2].link_type =
580 (enum link_types)0;
581 transf.link[2].link_amount = 0;
582 transf.link[3].link_type =
583 (enum link_types)0;
584 transf.link[3].link_amount = 0;
585 579
586 set_transforms(channel, transf, 0); 580 set_transforms(channel, transf, 0);
587 use_transform(channel, 0); 581 use_transform(channel, 0);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 2cda7ad1d32f..80e192d2e77e 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -51,6 +51,7 @@ from http://www.comedi.org
51*/ 51*/
52 52
53#include <linux/interrupt.h> 53#include <linux/interrupt.h>
54#include <linux/sched.h>
54#include "../comedidev.h" 55#include "../comedidev.h"
55 56
56#include "comedi_pci.h" 57#include "comedi_pci.h"
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 6b118c15b49e..bbf75eb6d7f2 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -418,15 +418,15 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
418 return -EINVAL; 418 return -EINVAL;
419 base_bitfield_channel = CR_CHAN(insn->chanspec); 419 base_bitfield_channel = CR_CHAN(insn->chanspec);
420 for (j = 0; j < max_ports_per_bitfield; ++j) { 420 for (j = 0; j < max_ports_per_bitfield; ++j) {
421 const unsigned port_offset = ni_65xx_port_by_channel(base_bitfield_channel) + j;
421 const unsigned port = 422 const unsigned port =
422 sprivate(s)->base_port + 423 sprivate(s)->base_port + port_offset;
423 ni_65xx_port_by_channel(base_bitfield_channel) + j;
424 unsigned base_port_channel; 424 unsigned base_port_channel;
425 unsigned port_mask, port_data, port_read_bits; 425 unsigned port_mask, port_data, port_read_bits;
426 int bitshift; 426 int bitshift;
427 if (port >= ni_65xx_total_num_ports(board(dev))) 427 if (port >= ni_65xx_total_num_ports(board(dev)))
428 break; 428 break;
429 base_port_channel = port * ni_65xx_channels_per_port; 429 base_port_channel = port_offset * ni_65xx_channels_per_port;
430 port_mask = data[0]; 430 port_mask = data[0];
431 port_data = data[1]; 431 port_data = data[1];
432 bitshift = base_port_channel - base_bitfield_channel; 432 bitshift = base_port_channel - base_bitfield_channel;
@@ -457,6 +457,12 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
457 port_read_bits = 457 port_read_bits =
458 readb(private(dev)->mite->daq_io_addr + Port_Data(port)); 458 readb(private(dev)->mite->daq_io_addr + Port_Data(port));
459/* printk("read 0x%x from port %i\n", port_read_bits, port); */ 459/* printk("read 0x%x from port %i\n", port_read_bits, port); */
460 if (s->type == COMEDI_SUBD_DO && board(dev)->invert_outputs) {
461 /* Outputs inverted, so invert value read back from
462 * DO subdevice. (Does not apply to boards with DIO
463 * subdevice.) */
464 port_read_bits ^= 0xFF;
465 }
460 if (bitshift > 0) { 466 if (bitshift > 0) {
461 port_read_bits <<= bitshift; 467 port_read_bits <<= bitshift;
462 } else { 468 } else {
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 6a7797604c97..ec31a3970664 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -520,7 +520,7 @@ static int dio700_cs_attach(struct pcmcia_device *link)
520 link->priv = local; 520 link->priv = local;
521 521
522 /* Interrupt setup */ 522 /* Interrupt setup */
523 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 523 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
524 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 524 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
525 link->irq.Handler = NULL; 525 link->irq.Handler = NULL;
526 526
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index b06e81c526e8..0700a8bddd1e 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -271,7 +271,7 @@ static int dio24_cs_attach(struct pcmcia_device *link)
271 link->priv = local; 271 link->priv = local;
272 272
273 /* Interrupt setup */ 273 /* Interrupt setup */
274 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 274 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
275 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 275 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
276 link->irq.Handler = NULL; 276 link->irq.Handler = NULL;
277 277
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 57aecfa883c7..a3053b8da1c6 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -246,7 +246,7 @@ static int labpc_cs_attach(struct pcmcia_device *link)
246 link->priv = local; 246 link->priv = local;
247 247
248 /* Interrupt setup */ 248 /* Interrupt setup */
249 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE; 249 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
250 link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_PULSE_ID; 250 link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_PULSE_ID;
251 link->irq.Handler = NULL; 251 link->irq.Handler = NULL;
252 252
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index e3ffb067ead1..753ee0512342 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -62,6 +62,7 @@
62/* #define DEBUG_STATUS_B */ 62/* #define DEBUG_STATUS_B */
63 63
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include <linux/sched.h>
65#include "8255.h" 66#include "8255.h"
66#include "mite.h" 67#include "mite.h"
67#include "comedi_fc.h" 68#include "comedi_fc.h"
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index b7322963cf78..9aef87fc81dc 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -273,7 +273,7 @@ static int cs_attach(struct pcmcia_device *link)
273{ 273{
274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
275 link->io.NumPorts1 = 16; 275 link->io.NumPorts1 = 16;
276 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 276 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
277 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 277 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
278 link->conf.Attributes = CONF_ENABLE_IRQ; 278 link->conf.Attributes = CONF_ENABLE_IRQ;
279 link->conf.IntType = INT_MEMORY_AND_IO; 279 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 52b2eca9e73d..d544698f2414 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -70,6 +70,7 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
70/* #define DEBUG_FLAGS */ 70/* #define DEBUG_FLAGS */
71 71
72#include <linux/interrupt.h> 72#include <linux/interrupt.h>
73#include <linux/sched.h>
73#include "../comedidev.h" 74#include "../comedidev.h"
74 75
75#include "mite.h" 76#include "mite.h"
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 19d87553d906..24c8b8ed5b4c 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -29,7 +29,7 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
29 PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E, 29 PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
30 PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E, 30 PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
31 PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E, 31 PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
32 PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, 32 PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225,
33 PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259, 33 PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
34 PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289, 34 PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
35 PCI-6711, PXI-6711, PCI-6713, PXI-6713, 35 PCI-6711, PXI-6711, PCI-6713, PXI-6713,
@@ -179,6 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
179 PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 179 PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
180 PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 180 PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
181 PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 181 PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
182 PCI_VENDOR_ID_NATINST, 0x716d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
182 PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 183 PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
183 PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 184 PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
184 PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 185 PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
@@ -953,6 +954,25 @@ static const struct ni_board_struct ni_boards[] = {
953 .has_8255 = 0, 954 .has_8255 = 0,
954 }, 955 },
955 { 956 {
957 .device_id = 0x716d,
958 .name = "pxi-6225",
959 .n_adchan = 80,
960 .adbits = 16,
961 .ai_fifo_depth = 4095,
962 .gainlkup = ai_gain_622x,
963 .ai_speed = 4000,
964 .n_aochan = 2,
965 .aobits = 16,
966 .ao_fifo_depth = 8191,
967 .ao_range_table = &range_ni_M_622x_ao,
968 .reg_type = ni_reg_622x,
969 .ao_unipolar = 0,
970 .ao_speed = 1200,
971 .num_p0_dio_channels = 32,
972 .caldac = {caldac_none},
973 .has_8255 = 0,
974 },
975 {
956 .device_id = 0x70aa, 976 .device_id = 0x70aa,
957 .name = "pci-6229", 977 .name = "pci-6229",
958 .n_adchan = 32, 978 .n_adchan = 32,
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index f63bdc35cffd..344b82353e08 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -1079,7 +1079,7 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1079 link->priv = local; 1079 link->priv = local;
1080 1080
1081 /* Interrupt setup */ 1081 /* Interrupt setup */
1082 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1082 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
1083 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 1083 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1084 link->irq.Handler = daqp_interrupt; 1084 link->irq.Handler = daqp_interrupt;
1085 link->irq.Instance = local; 1085 link->irq.Instance = local;
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index b89e1ec267c5..07c21e686f27 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -43,6 +43,7 @@ comedi_config /dev/comedi0 s526 0x2C0,0x3
43 43
44#include "../comedidev.h" 44#include "../comedidev.h"
45#include <linux/ioport.h> 45#include <linux/ioport.h>
46#include <asm/byteorder.h>
46 47
47#define S526_SIZE 64 48#define S526_SIZE 64
48 49
@@ -113,6 +114,7 @@ static const int s526_ports[] = {
113}; 114};
114 115
115struct counter_mode_register_t { 116struct counter_mode_register_t {
117#if defined (__LITTLE_ENDIAN_BITFIELD)
116 unsigned short coutSource:1; 118 unsigned short coutSource:1;
117 unsigned short coutPolarity:1; 119 unsigned short coutPolarity:1;
118 unsigned short autoLoadResetRcap:3; 120 unsigned short autoLoadResetRcap:3;
@@ -124,12 +126,27 @@ struct counter_mode_register_t {
124 unsigned short outputRegLatchCtrl:1; 126 unsigned short outputRegLatchCtrl:1;
125 unsigned short preloadRegSel:1; 127 unsigned short preloadRegSel:1;
126 unsigned short reserved:1; 128 unsigned short reserved:1;
129 #elif defined(__BIG_ENDIAN_BITFIELD)
130 unsigned short reserved:1;
131 unsigned short preloadRegSel:1;
132 unsigned short outputRegLatchCtrl:1;
133 unsigned short countDirCtrl:1;
134 unsigned short countDir:1;
135 unsigned short clockSource:2;
136 unsigned short ctEnableCtrl:2;
137 unsigned short hwCtEnableSource:2;
138 unsigned short autoLoadResetRcap:3;
139 unsigned short coutPolarity:1;
140 unsigned short coutSource:1;
141#else
142#error Unknown bit field order
143#endif
127}; 144};
128 145
129union { 146union cmReg {
130 struct counter_mode_register_t reg; 147 struct counter_mode_register_t reg;
131 unsigned short value; 148 unsigned short value;
132} cmReg; 149};
133 150
134#define MAX_GPCT_CONFIG_DATA 6 151#define MAX_GPCT_CONFIG_DATA 6
135 152
@@ -285,6 +302,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
285 int i, n; 302 int i, n;
286/* short value; */ 303/* short value; */
287/* int subdev_channel = 0; */ 304/* int subdev_channel = 0; */
305 union cmReg cmReg;
288 306
289 printk("comedi%d: s526: ", dev->minor); 307 printk("comedi%d: s526: ", dev->minor);
290 308
@@ -375,7 +393,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
375 if (thisboard->have_dio) { 393 if (thisboard->have_dio) {
376 s->type = COMEDI_SUBD_DIO; 394 s->type = COMEDI_SUBD_DIO;
377 s->subdev_flags = SDF_READABLE | SDF_WRITABLE; 395 s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
378 s->n_chan = 2; 396 s->n_chan = 8;
379 s->maxdata = 1; 397 s->maxdata = 1;
380 s->range_table = &range_digital; 398 s->range_table = &range_digital;
381 s->insn_bits = s526_dio_insn_bits; 399 s->insn_bits = s526_dio_insn_bits;
@@ -435,11 +453,11 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
435 udelay(1000); 453 udelay(1000);
436 printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n))); 454 printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n)));
437 455
438 /* Load the pre-laod register high word */ 456 /* Load the pre-load register high word */
439/* value = (short) (0x55); */ 457/* value = (short) (0x55); */
440/* outw(value, ADDR_CHAN_REG(REG_C0H, n)); */ 458/* outw(value, ADDR_CHAN_REG(REG_C0H, n)); */
441 459
442 /* Load the pre-laod register low word */ 460 /* Load the pre-load register low word */
443/* value = (short)(0xaa55); */ 461/* value = (short)(0xaa55); */
444/* outw(value, ADDR_CHAN_REG(REG_C0L, n)); */ 462/* outw(value, ADDR_CHAN_REG(REG_C0L, n)); */
445 463
@@ -516,6 +534,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
516 int subdev_channel = CR_CHAN(insn->chanspec); /* Unpack chanspec */ 534 int subdev_channel = CR_CHAN(insn->chanspec); /* Unpack chanspec */
517 int i; 535 int i;
518 short value; 536 short value;
537 union cmReg cmReg;
519 538
520/* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n", subdev_channel); */ 539/* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n", subdev_channel); */
521 540
@@ -568,19 +587,8 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
568 587
569#if 1 588#if 1
570 /* Set Counter Mode Register */ 589 /* Set Counter Mode Register */
571 cmReg.reg.coutSource = 0; /* out RCAP */ 590 cmReg.value = insn->data[1] & 0xFFFF;
572 cmReg.reg.coutPolarity = 0; /* Polarity inverted */
573 cmReg.reg.autoLoadResetRcap = 0; /* Auto load disabled */
574 cmReg.reg.hwCtEnableSource = 2; /* NOT RCAP */
575 cmReg.reg.ctEnableCtrl = 1; /* 1: Software, >1 : Hardware */
576 cmReg.reg.clockSource = 3; /* x4 */
577 cmReg.reg.countDir = 0; /* up */
578 cmReg.reg.countDirCtrl = 0; /* quadrature */
579 cmReg.reg.outputRegLatchCtrl = 0; /* latch on read */
580 cmReg.reg.preloadRegSel = 0; /* PR0 */
581 cmReg.reg.reserved = 0;
582 591
583 /* Set Counter Mode Register */
584/* printk("s526: Counter Mode register=%x\n", cmReg.value); */ 592/* printk("s526: Counter Mode register=%x\n", cmReg.value); */
585 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 593 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
586 594
@@ -615,11 +623,11 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
615 cmReg.value = (short)(insn->data[1] & 0xFFFF); 623 cmReg.value = (short)(insn->data[1] & 0xFFFF);
616 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 624 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
617 625
618 /* Load the pre-laod register high word */ 626 /* Load the pre-load register high word */
619 value = (short)((insn->data[2] >> 16) & 0xFFFF); 627 value = (short)((insn->data[2] >> 16) & 0xFFFF);
620 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); 628 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
621 629
622 /* Load the pre-laod register low word */ 630 /* Load the pre-load register low word */
623 value = (short)(insn->data[2] & 0xFFFF); 631 value = (short)(insn->data[2] & 0xFFFF);
624 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); 632 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
625 633
@@ -653,11 +661,11 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
653 cmReg.reg.preloadRegSel = 0; /* PR0 */ 661 cmReg.reg.preloadRegSel = 0; /* PR0 */
654 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 662 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
655 663
656 /* Load the pre-laod register 0 high word */ 664 /* Load the pre-load register 0 high word */
657 value = (short)((insn->data[2] >> 16) & 0xFFFF); 665 value = (short)((insn->data[2] >> 16) & 0xFFFF);
658 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); 666 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
659 667
660 /* Load the pre-laod register 0 low word */ 668 /* Load the pre-load register 0 low word */
661 value = (short)(insn->data[2] & 0xFFFF); 669 value = (short)(insn->data[2] & 0xFFFF);
662 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); 670 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
663 671
@@ -666,17 +674,17 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
666 cmReg.reg.preloadRegSel = 1; /* PR1 */ 674 cmReg.reg.preloadRegSel = 1; /* PR1 */
667 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 675 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
668 676
669 /* Load the pre-laod register 1 high word */ 677 /* Load the pre-load register 1 high word */
670 value = (short)((insn->data[3] >> 16) & 0xFFFF); 678 value = (short)((insn->data[3] >> 16) & 0xFFFF);
671 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); 679 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
672 680
673 /* Load the pre-laod register 1 low word */ 681 /* Load the pre-load register 1 low word */
674 value = (short)(insn->data[3] & 0xFFFF); 682 value = (short)(insn->data[3] & 0xFFFF);
675 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); 683 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
676 684
677 /* Write the Counter Control Register */ 685 /* Write the Counter Control Register */
678 if (insn->data[3] != 0) { 686 if (insn->data[4] != 0) {
679 value = (short)(insn->data[3] & 0xFFFF); 687 value = (short)(insn->data[4] & 0xFFFF);
680 outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel)); 688 outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
681 } 689 }
682 break; 690 break;
@@ -698,11 +706,11 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
698 cmReg.reg.preloadRegSel = 0; /* PR0 */ 706 cmReg.reg.preloadRegSel = 0; /* PR0 */
699 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 707 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
700 708
701 /* Load the pre-laod register 0 high word */ 709 /* Load the pre-load register 0 high word */
702 value = (short)((insn->data[2] >> 16) & 0xFFFF); 710 value = (short)((insn->data[2] >> 16) & 0xFFFF);
703 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); 711 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
704 712
705 /* Load the pre-laod register 0 low word */ 713 /* Load the pre-load register 0 low word */
706 value = (short)(insn->data[2] & 0xFFFF); 714 value = (short)(insn->data[2] & 0xFFFF);
707 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); 715 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
708 716
@@ -711,17 +719,17 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
711 cmReg.reg.preloadRegSel = 1; /* PR1 */ 719 cmReg.reg.preloadRegSel = 1; /* PR1 */
712 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); 720 outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
713 721
714 /* Load the pre-laod register 1 high word */ 722 /* Load the pre-load register 1 high word */
715 value = (short)((insn->data[3] >> 16) & 0xFFFF); 723 value = (short)((insn->data[3] >> 16) & 0xFFFF);
716 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); 724 outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
717 725
718 /* Load the pre-laod register 1 low word */ 726 /* Load the pre-load register 1 low word */
719 value = (short)(insn->data[3] & 0xFFFF); 727 value = (short)(insn->data[3] & 0xFFFF);
720 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); 728 outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
721 729
722 /* Write the Counter Control Register */ 730 /* Write the Counter Control Register */
723 if (insn->data[3] != 0) { 731 if (insn->data[4] != 0) {
724 value = (short)(insn->data[3] & 0xFFFF); 732 value = (short)(insn->data[4] & 0xFFFF);
725 outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel)); 733 outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
726 } 734 }
727 break; 735 break;
@@ -741,6 +749,7 @@ static int s526_gpct_winsn(struct comedi_device *dev,
741{ 749{
742 int subdev_channel = CR_CHAN(insn->chanspec); /* Unpack chanspec */ 750 int subdev_channel = CR_CHAN(insn->chanspec); /* Unpack chanspec */
743 short value; 751 short value;
752 union cmReg cmReg;
744 753
745 printk("s526: GPCT_INSN_WRITE on channel %d\n", subdev_channel); 754 printk("s526: GPCT_INSN_WRITE on channel %d\n", subdev_channel);
746 cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel)); 755 cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel));
@@ -775,9 +784,8 @@ static int s526_gpct_winsn(struct comedi_device *dev,
775 (devpriv->s526_gpct_config[subdev_channel]).data[1] = 784 (devpriv->s526_gpct_config[subdev_channel]).data[1] =
776 insn->data[1]; 785 insn->data[1];
777 } else { 786 } else {
778 printk("%d \t %d\n", insn->data[1], insn->data[2]); 787 printk("s526: INSN_WRITE: PTG: Problem with Pulse params -> %d %d\n",
779 printk 788 insn->data[0], insn->data[1]);
780 ("s526: INSN_WRITE: PTG: Problem with Pulse params\n");
781 return -EINVAL; 789 return -EINVAL;
782 } 790 }
783 791
@@ -949,7 +957,7 @@ static int s526_dio_insn_bits(struct comedi_device *dev,
949 data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */ 957 data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
950 /* or we could just return the software copy of the output values if 958 /* or we could just return the software copy of the output values if
951 * it was a purely digital output subdevice */ 959 * it was a purely digital output subdevice */
952 /* data[1]=s->state; */ 960 /* data[1]=s->state & 0xFF; */
953 961
954 return 2; 962 return 2;
955} 963}
@@ -959,28 +967,33 @@ static int s526_dio_insn_config(struct comedi_device *dev,
959 struct comedi_insn *insn, unsigned int *data) 967 struct comedi_insn *insn, unsigned int *data)
960{ 968{
961 int chan = CR_CHAN(insn->chanspec); 969 int chan = CR_CHAN(insn->chanspec);
962 short value; 970 int group, mask;
963 971
964 printk("S526 DIO insn_config\n"); 972 printk("S526 DIO insn_config\n");
965 973
966 if (insn->n != 1)
967 return -EINVAL;
968
969 value = inw(ADDR_REG(REG_DIO));
970
971 /* The input or output configuration of each digital line is 974 /* The input or output configuration of each digital line is
972 * configured by a special insn_config instruction. chanspec 975 * configured by a special insn_config instruction. chanspec
973 * contains the channel to be changed, and data[0] contains the 976 * contains the channel to be changed, and data[0] contains the
974 * value COMEDI_INPUT or COMEDI_OUTPUT. */ 977 * value COMEDI_INPUT or COMEDI_OUTPUT. */
975 978
976 if (data[0] == COMEDI_OUTPUT) { 979 group = chan >> 2;
977 value |= 1 << (chan + 10); /* bit 10/11 set the group 1/2's mode */ 980 mask = 0xF << (group << 2);
978 s->io_bits |= (0xF << chan); 981 switch (data[0]) {
979 } else { 982 case INSN_CONFIG_DIO_OUTPUT:
980 value &= ~(1 << (chan + 10)); /* 1 is output, 0 is input. */ 983 s->state |= 1 << (group + 10); // bit 10/11 set the group 1/2's mode
981 s->io_bits &= ~(0xF << chan); 984 s->io_bits |= mask;
985 break;
986 case INSN_CONFIG_DIO_INPUT:
987 s->state &= ~(1 << (group + 10));// 1 is output, 0 is input.
988 s->io_bits &= ~mask;
989 break;
990 case INSN_CONFIG_DIO_QUERY:
991 data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
992 return insn->n;
993 default:
994 return -EINVAL;
982 } 995 }
983 outw(value, ADDR_REG(REG_DIO)); 996 outw(s->state, ADDR_REG(REG_DIO));
984 997
985 return 1; 998 return 1;
986} 999}
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index a21967983942..82aa86e718b2 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -35,6 +35,7 @@ Status: in development
35 35
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/ioport.h> 37#include <linux/ioport.h>
38#include <linux/sched.h>
38 39
39#include <asm/termios.h> 40#include <asm/termios.h>
40#include <asm/ioctls.h> 41#include <asm/ioctls.h>
diff --git a/drivers/staging/cowloop/Kconfig b/drivers/staging/cowloop/Kconfig
deleted file mode 100644
index 58d2a23bd2c1..000000000000
--- a/drivers/staging/cowloop/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
1config COWLOOP
2 tristate "copy-on-write pseudo Block Driver"
3 depends on BLOCK
4 default n
5 ---help---
6 Cowloop is a "copy-on-write" pseudo block driver. It can be
7 stacked on top of a "real" block driver, and catches all write
8 operations on their way from the file systems layer above to
9 the real driver below, effectively shielding the lower driver
10 from those write accesses. The requests are then diverted to
11 an ordinary file, located somewhere else (configurable). Later
12 read requests are checked to see whether they can be serviced
13 by the "real" block driver below, or must be pulled in from
14 the diverted location. More information and userspace tools to
15 use the driver are on the project's website
16 http://www.ATComputing.nl/cowloop/
diff --git a/drivers/staging/cowloop/Makefile b/drivers/staging/cowloop/Makefile
deleted file mode 100644
index 2b6b81a63d21..000000000000
--- a/drivers/staging/cowloop/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_COWLOOP) += cowloop.o
diff --git a/drivers/staging/cowloop/TODO b/drivers/staging/cowloop/TODO
deleted file mode 100644
index 9399d1c16e15..000000000000
--- a/drivers/staging/cowloop/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
1TODO:
2 - checkpatch.pl cleanups
3 - run sparse to ensure clean
4 - fix up 32/64bit ioctl issues
5 - move proc file usage to debugfs
6 - audit ioctls
7 - add documentation
8 - get linux-fsdevel to review it
9
10Please send patches to "H.J. Thomassen" <hjt@ATComputing.nl> and
11Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/cowloop/cowloop.c b/drivers/staging/cowloop/cowloop.c
deleted file mode 100644
index a71c743a1196..000000000000
--- a/drivers/staging/cowloop/cowloop.c
+++ /dev/null
@@ -1,2842 +0,0 @@
1/*
2** COWLOOP block device driver (2.6 kernel compliant)
3** =======================================================================
4** Read-write loop-driver with copy-on-write functionality.
5**
6** Synopsis:
7**
8** modprobe cowloop [maxcows=..] [rdofile=..... cowfile=.... [option=r]]
9**
10** Definition of number of configured cowdevices:
11** maxcows= number of configured cowdevices (default: 16)
12** (do not confuse this with MAXCOWS: absolute maximum as compiled)
13**
14** One pair of filenames can be supplied during insmod/modprobe to open
15** the first cowdevice:
16** rdofile= read-only file (or filesystem)
17** cowfile= storage-space for modified blocks of read-only file(system)
18** option=r repair cowfile automatically if it appears to be dirty
19**
20** Other cowdevices can be activated via the command "cowdev"
21** whenever the cowloop-driver is loaded.
22**
23** The read-only file may be of type 'regular' or 'block-device'.
24**
25** The cowfile must be of type 'regular'.
26** If an existing regular file is used as cowfile, its contents will be
27** used again for the current read-only file. When the cowfile has not been
28** closed properly during a previous session (i.e. rmmod cowloop), the
29** cowloop-driver refuses to open it unless the parameter "option=r" is
30** specified.
31**
32** Layout of cowfile:
33**
34** +-----------------------------+
35** | cow head block | MAPUNIT bytes
36** |-----------------------------|
37** | | MAPUNIT bytes
38** |--- ---|
39** | | MAPUNIT bytes
40** |--- ---|
41** | used-block bitmap | MAPUNIT bytes
42** |-----------------------------|
43** | gap to align start-offset |
44** | to 4K multiple |
45** |-----------------------------| <---- start-offset cow blocks
46** | |
47** | written cow blocks | MAPUNIT bytes
48** | ..... |
49**
50** cowhead block:
51** - contains general info about the rdofile which is related
52** to this cowfile
53**
54** used-block bitmap:
55** - contains one bit per block with a size of MAPUNIT bytes
56** - bit-value '1' = block has been written on cow
57** '0' = block unused on cow
58** - total bitmap rounded to multiples of MAPUNIT
59**
60** ============================================================================
61** Author: Gerlof Langeveld - AT Computing (March 2003)
62** Current maintainer: Hendrik-Jan Thomassen - AT Computing (Summer 2006)
63** Email: hjt@ATComputing.nl
64** ----------------------------------------------------------------------------
65** Copyright (C) 2003-2009 AT Consultancy
66**
67** This program is free software; you can redistribute it and/or modify it
68** under the terms of the GNU General Public License as published by the
69** Free Software Foundation; either version 2, or (at your option) any
70** later version.
71**
72** This program is distributed in the hope that it will be useful, but
73** WITHOUT ANY WARRANTY; without even the implied warranty of
74** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
75** See the GNU General Public License for more details.
76**
77** You should have received a copy of the GNU General Public License
78** along with this program; if not, write to the Free Software
79** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
80** ----------------------------------------------------------------------------
81**
82** Major modifications:
83**
84** 200405 Ported to kernel-version 2.6 Hendrik-Jan Thomassen
85** 200405 Added cowhead to cowfile to garantee
86** consistency with read-only file Gerlof Langeveld
87** 200405 Postponed flushing of bitmaps to improve
88** performance. Gerlof Langeveld
89** 200405 Inline recovery for dirty cowfiles. Gerlof Langeveld
90** 200502 Redesign to support more cowdevices. Gerlof Langeveld
91** 200502 Support devices/file > 2 Gbytes. Gerlof Langeveld
92** 200507 Check for free space to expand cowfile. Gerlof Langeveld
93** 200902 Upgrade for kernel 2.6.28 Hendrik-Jan Thomassen
94**
95** Inspired by
96** loop.c by Theodore Ts'o and
97** cloop.c by Paul `Rusty' Russell & Klaus Knopper.
98**
99** Design-considerations:
100**
101** For the first experiments with the cowloop-driver, the request-queue
102** made use of the do_generic_file_read() which worked fine except
103** in combination with the cloop-driver; that combination
104** resulted in a non-interruptible hangup of the system during
105** heavy load. Other experiments using the `make_request' interface also
106** resulted in unpredictable system hangups (with proper use of spinlocks).
107**
108** To overcome these problems, the cowloop-driver starts a kernel-thread
109** for every active cowdevice.
110** All read- and write-request on the read-only file and copy-on-write file
111** are handled in the context of that thread.
112** A scheme has been designed to wakeup the kernel-thread as
113** soon as I/O-requests are available in the request-queue; this thread
114** handles the requests one-by-one by calling the proper read- or
115** write-function related to the open read-only file or copy-on-write file.
116** When all pending requests have been handled, the kernel-thread goes
117** back to sleep-state.
118** This approach requires some additional context-switches; however the
119** performance loss during heavy I/O is less than 3%.
120**
121** -------------------------------------------------------------------------*/
122/* The following is the cowloop package version number. It must be
123 identical to the content of the include-file "version.h" that is
124 used in all supporting utilities: */
125char revision[] = "$Revision: 3.1 $"; /* cowlo_init_module() has
126 assumptions about this string's format */
127
128/* Note that the following numbers are *not* the cowloop package version
129 numbers, but separate revision history numbers to track the
130 modifications of this particular source file: */
131/* $Log: cowloop.c,v $
132**
133** Revision 1.30 2009/02/08 hjt
134** Integrated earlier fixes
135** Upgraded to kernel 2.6.28 (thanks Jerome Poulin)
136**
137** Revision 1.29 2006/12/03 22:12:00 hjt
138** changed 'cowdevlock' from spinlock to semaphore, to avoid
139** "scheduling while atomic". Contributed by Juergen Christ.
140** Added version.h again
141**
142** Revision 1.28 2006/08/16 16:00:00 hjt
143** malloc each individual cowloopdevice struct separately
144**
145** Revision 1.27 2006/03/14 14:57:03 root
146** Removed include version.h
147**
148** Revision 1.26 2005/08/08 11:22:48 root
149** Implement possibility to close a cow file or reopen a cowfile read-only.
150**
151** Revision 1.25 2005/08/03 14:00:39 root
152** Added modinfo info to driver.
153**
154** Revision 1.24 2005/07/21 06:14:53 root
155** Cosmetic changes source code.
156**
157** Revision 1.23 2005/07/20 13:07:32 root
158** Supply ioctl to write watchdog program to react on lack of cowfile space.
159**
160** Revision 1.22 2005/07/20 07:53:34 root
161** Regular verification of free space in filesystem holding the cowfile
162** (give warnings whenever space is almost exhausted).
163** Terminology change: checksum renamed to fingerprint.
164**
165** Revision 1.21 2005/07/19 09:21:52 root
166** Removing maximum limit of 16 Gb per cowdevice.
167**
168** Revision 1.20 2005/07/19 07:50:33 root
169** Minor bugfixes and cosmetic changes.
170**
171** Revision 1.19 2005/06/10 12:29:55 root
172** Removed lock/unlock operation from cowlo_open().
173**
174** Revision 1.18 2005/05/09 12:56:26 root
175** Allow a cowdevice to be open more than once
176** (needed for support of ReiserFS and XFS).
177**
178** Revision 1.17 2005/03/17 14:36:16 root
179** Fixed some license issues.
180**
181** Revision 1.16 2005/03/07 14:42:05 root
182** Only allow one parallel open per cowdevice.
183**
184** Revision 1.15 2005/02/18 11:52:04 gerlof
185** Redesign to support more than one cowdevice > 2 Gb space.
186**
187** Revision 1.14 2004/08/17 14:19:16 gerlof
188** Modified output of /proc/cowloop.
189**
190** Revision 1.13 2004/08/16 07:21:10 gerlof
191** Separate statistical counter for read on rdofile and cowfile.
192**
193** Revision 1.12 2004/08/11 06:52:11 gerlof
194** Modified messages.
195**
196** Revision 1.11 2004/08/11 06:44:11 gerlof
197** Modified log messages.
198**
199** Revision 1.10 2004/08/10 12:27:27 gerlof
200** Cosmetic changes.
201**
202** Revision 1.9 2004/08/09 11:43:37 gerlof
203** Removed double definition of major number (COWMAJOR).
204**
205** Revision 1.8 2004/08/09 08:03:39 gerlof
206** Cleanup of messages.
207**
208** Revision 1.7 2004/05/27 06:37:33 gerlof
209** Modified /proc message.
210**
211** Revision 1.6 2004/05/26 21:23:28 gerlof
212** Modified /proc output.
213**
214** Revision 1.5 2004/05/26 13:23:34 gerlof
215** Support cowsync to force flushing the bitmaps and cowhead.
216**
217** Revision 1.4 2004/05/26 11:11:10 gerlof
218** Updated the comment to the actual situation.
219**
220** Revision 1.3 2004/05/26 10:50:00 gerlof
221** Implemented recovery-option.
222**
223** Revision 1.2 2004/05/25 15:14:41 gerlof
224** Modified bitmap flushing strategy.
225**
226*/
227
228#define COWMAJOR 241
229
230// #define COWDEBUG
231
232#ifdef COWDEBUG
233#define DEBUGP printk
234#define DCOW KERN_ALERT
235#else
236#define DEBUGP(format, x...)
237#endif
238
239#include <linux/types.h>
240#include <linux/autoconf.h>
241#ifndef AUTOCONF_INCLUDED
242#include <linux/config.h>
243#endif
244#include <linux/module.h>
245#include <linux/version.h>
246#include <linux/moduleparam.h>
247#include <linux/init.h>
248#include <linux/errno.h>
249#include <linux/kernel.h>
250#include <linux/major.h>
251#include <linux/sched.h>
252#include <linux/fs.h>
253#include <linux/file.h>
254#include <linux/stat.h>
255#include <linux/vmalloc.h>
256#include <linux/slab.h>
257#include <linux/semaphore.h>
258#include <asm/uaccess.h>
259#include <linux/proc_fs.h>
260#include <linux/blkdev.h>
261#include <linux/buffer_head.h>
262#include <linux/hdreg.h>
263#include <linux/genhd.h>
264#include <linux/statfs.h>
265
266#include "cowloop.h"
267
268MODULE_LICENSE("GPL");
269/* MODULE_AUTHOR("Gerlof Langeveld <gerlof@ATComputing.nl>"); obsolete address */
270MODULE_AUTHOR("Hendrik-Jan Thomassen <hjt@ATComputing.nl>"); /* current maintainer */
271MODULE_DESCRIPTION("Copy-on-write loop driver");
272MODULE_PARM_DESC(maxcows, " Number of configured cowdevices (default 16)");
273MODULE_PARM_DESC(rdofile, " Read-only file for /dev/cow/0");
274MODULE_PARM_DESC(cowfile, " Cowfile for /dev/cow/0");
275MODULE_PARM_DESC(option, " Repair cowfile if inconsistent: option=r");
276
277#define DEVICE_NAME "cow"
278
279#define DFLCOWS 16 /* default cowloop devices */
280
281static int maxcows = DFLCOWS;
282module_param(maxcows, int, 0);
283static char *rdofile = "";
284module_param(rdofile, charp, 0);
285static char *cowfile = "";
286module_param(cowfile, charp, 0);
287static char *option = "";
288module_param(option, charp, 0);
289
290/*
291** per cowdevice several bitmap chunks are allowed of MAPCHUNKSZ each
292**
293** each bitmap chunk can describe MAPCHUNKSZ * 8 * MAPUNIT bytes of data
294** suppose:
295** MAPCHUNKSZ 4096 and MAPUNIT 1024 --> 4096 * 8 * 1024 = 32 Mb per chunk
296*/
297#define MAPCHUNKSZ 4096 /* #bytes per bitmap chunk (do not change) */
298
299#define SPCMINBLK 100 /* space threshold to give warning messages */
300#define SPCDFLINTVL 16 /* once every SPCDFLINTVL writes to cowfile, */
301 /* available space in filesystem is checked */
302
303#define CALCMAP(x) ((x)/(MAPCHUNKSZ*8))
304#define CALCBYTE(x) (((x)%(MAPCHUNKSZ*8))>>3)
305#define CALCBIT(x) ((x)&7)
306
307#define ALLCOW 1
308#define ALLRDO 2
309#define MIXEDUP 3
310
311static char allzeroes[MAPUNIT];
312
313/*
314** administration per cowdevice (pair of cowfile/rdofile)
315*/
316
317/* bit-values for state */
318#define COWDEVOPEN 0x01 /* cowdevice opened */
319#define COWRWCOWOPEN 0x02 /* cowfile opened read-write */
320#define COWRDCOWOPEN 0x04 /* cowfile opened read-only */
321#define COWWATCHDOG 0x08 /* ioctl for watchdog cowfile space active */
322
323#define COWCOWOPEN (COWRWCOWOPEN|COWRDCOWOPEN)
324
325struct cowloop_device
326{
327 /*
328 ** current status
329 */
330 int state; /* bit-values (see above) */
331 int opencnt; /* # opens for cowdevice */
332
333 /*
334 ** open file pointers
335 */
336 struct file *rdofp, *cowfp; /* open file pointers */
337 char *rdoname, *cowname; /* file names */
338
339 /*
340 ** request queue administration
341 */
342 struct request_queue *rqueue;
343 spinlock_t rqlock;
344 struct gendisk *gd;
345
346 /*
347 ** administration about read-only file
348 */
349 unsigned int numblocks; /* # blocks input file in MAPUNIT */
350 unsigned int blocksz; /* minimum unit to access this dev */
351 unsigned long fingerprint; /* fingerprint of current rdofile */
352 struct block_device *belowdev; /* block device below us */
353 struct gendisk *belowgd; /* gendisk for blk dev below us */
354 struct request_queue *belowq; /* req. queue of blk dev below us */
355
356 /*
357 ** bitmap administration to register which blocks are modified
358 */
359 long int mapsize; /* total size of bitmap (bytes) */
360 long int mapremain; /* remaining bytes in last bitmap */
361 int mapcount; /* number of bitmaps in use */
362 char **mapcache; /* area with pointers to bitmaps */
363
364 char *iobuf; /* databuffer of MAPUNIT bytes */
365 struct cowhead *cowhead; /* buffer containing cowhead */
366
367 /*
368 ** administration for interface with the kernel-thread
369 */
370 int pid; /* pid==0: no thread available */
371 struct request *req; /* request to be handled now */
372 wait_queue_head_t waitq; /* wait-Q: thread waits for work */
373 char closedown; /* boolean: thread exit required */
374 char qfilled; /* boolean: I/O request pending */
375 char iobusy; /* boolean: req under treatment */
376
377 /*
378 ** administration to keep track of free space in cowfile filesystem
379 */
380 unsigned long blksize; /* block size of fs (bytes) */
381 unsigned long blktotal; /* recent total space in fs (blocks) */
382 unsigned long blkavail; /* recent free space in fs (blocks) */
383
384 wait_queue_head_t watchq; /* wait-Q: watcher awaits threshold */
385 unsigned long watchthresh; /* threshold of watcher (blocks) */
386
387 /*
388 ** statistical counters
389 */
390 unsigned long rdoreads; /* number of read-actions rdo */
391 unsigned long cowreads; /* number of read-actions cow */
392 unsigned long cowwrites; /* number of write-actions */
393 unsigned long nrcowblocks; /* number of blocks in use on cow */
394};
395
396static struct cowloop_device **cowdevall; /* ptr to ptrs to all cowdevices */
397static struct semaphore cowdevlock; /* generic lock for cowdevs */
398
399static struct gendisk *cowctlgd; /* gendisk control channel */
400static spinlock_t cowctlrqlock; /* for req.q. of ctrl. channel */
401
402/*
403** private directory /proc/cow
404*/
405struct proc_dir_entry *cowlo_procdir;
406
407/*
408** function prototypes
409*/
410static long int cowlo_do_request (struct request *req);
411static void cowlo_sync (void);
412static int cowlo_checkio (struct cowloop_device *, int, loff_t);
413static int cowlo_readmix (struct cowloop_device *, void *, int, loff_t);
414static int cowlo_writemix (struct cowloop_device *, void *, int, loff_t);
415static long int cowlo_readrdo (struct cowloop_device *, void *, int, loff_t);
416static long int cowlo_readcow (struct cowloop_device *, void *, int, loff_t);
417static long int cowlo_readcowraw (struct cowloop_device *, void *, int, loff_t);
418static long int cowlo_writecow (struct cowloop_device *, void *, int, loff_t);
419static long int cowlo_writecowraw(struct cowloop_device *, void *, int, loff_t);
420static int cowlo_ioctl (struct block_device *, fmode_t,
421 unsigned int, unsigned long);
422static int cowlo_makepair (struct cowpair __user *);
423static int cowlo_removepair (unsigned long __user *);
424static int cowlo_watch (struct cowpair __user *);
425static int cowlo_cowctl (unsigned long __user *, int);
426static int cowlo_openpair (char *, char *, int, int);
427static int cowlo_closepair (struct cowloop_device *);
428static int cowlo_openrdo (struct cowloop_device *, char *);
429static int cowlo_opencow (struct cowloop_device *, char *, int);
430static void cowlo_undo_openrdo(struct cowloop_device *);
431static void cowlo_undo_opencow(struct cowloop_device *);
432
433/*****************************************************************************/
434/* System call handling */
435/*****************************************************************************/
436
437/*
438** handle system call open()/mount()
439**
440** returns:
441** 0 - okay
442** < 0 - error value
443*/
444static int cowlo_open(struct block_device *bdev, fmode_t mode)
445{
446 struct inode *inode = bdev->bd_inode;
447
448 if (!inode)
449 return -EINVAL;
450
451 if (imajor(inode) != COWMAJOR) {
452 printk(KERN_WARNING
453 "cowloop - unexpected major %d\n", imajor(inode));
454 return -ENODEV;
455 }
456
457 switch (iminor(inode)) {
458 case COWCTL:
459 DEBUGP(DCOW"cowloop - open %d control\n", COWCTL);
460 break;
461
462 default:
463 DEBUGP(DCOW"cowloop - open minor %d\n", iminor(inode));
464
465 if ( iminor(inode) >= maxcows )
466 return -ENODEV;
467
468 if ( !((cowdevall[iminor(inode)])->state & COWDEVOPEN) )
469 return -ENODEV;
470
471 (cowdevall[iminor(inode)])->opencnt++;
472 }
473
474 return 0;
475}
476
477/*
478** handle system call close()/umount()
479**
480** returns:
481** 0 - okay
482*/
483static int cowlo_release(struct gendisk *gd, fmode_t mode)
484{
485 struct block_device *bdev;
486 struct inode *inode;
487
488 bdev = bdget_disk(gd, 0);
489 inode = bdev->bd_inode;
490 if (!inode)
491 return 0;
492
493 DEBUGP(DCOW"cowloop - release (close) minor %d\n", iminor(inode));
494
495 if ( iminor(inode) != COWCTL)
496 (cowdevall[iminor(inode)])->opencnt--;
497
498 return 0;
499}
500
501/*
502** handle system call ioctl()
503**
504** returns:
505** 0 - okay
506** < 0 - error value
507*/
508static int cowlo_ioctl(struct block_device *bdev, fmode_t mode,
509 unsigned int cmd, unsigned long arg)
510{
511 struct hd_geometry geo;
512 struct inode *inode = bdev->bd_inode;
513
514 DEBUGP(DCOW "cowloop - ioctl cmd %x\n", cmd);
515
516 switch ( iminor(inode) ) {
517
518 /*
519 ** allowed via control device only
520 */
521 case COWCTL:
522 switch (cmd) {
523 /*
524 ** write all bitmap chunks and cowheaders to cowfiles
525 */
526 case COWSYNC:
527 down(&cowdevlock);
528 cowlo_sync();
529 up(&cowdevlock);
530 return 0;
531
532 /*
533 ** open a new cowdevice (pair of rdofile/cowfile)
534 */
535 case COWMKPAIR:
536 return cowlo_makepair((void __user *)arg);
537
538 /*
539 ** close a cowdevice (pair of rdofile/cowfile)
540 */
541 case COWRMPAIR:
542 return cowlo_removepair((void __user *)arg);
543
544 /*
545 ** watch free space of filesystem containing cowfile
546 */
547 case COWWATCH:
548 return cowlo_watch((void __user *)arg);
549
550 /*
551 ** close cowfile for active device
552 */
553 case COWCLOSE:
554 return cowlo_cowctl((void __user *)arg, COWCLOSE);
555
556 /*
557 ** reopen cowfile read-only for active device
558 */
559 case COWRDOPEN:
560 return cowlo_cowctl((void __user *)arg, COWRDOPEN);
561
562 default:
563 return -EINVAL;
564 } /* end of switch on command */
565
566 /*
567 ** allowed for any other cowdevice
568 */
569 default:
570 switch (cmd) {
571 /*
572 ** HDIO_GETGEO must be supported for fdisk, etc
573 */
574 case HDIO_GETGEO:
575 geo.cylinders = 0;
576 geo.heads = 0;
577 geo.sectors = 0;
578
579 if (copy_to_user((void __user *)arg, &geo, sizeof geo))
580 return -EFAULT;
581 return 0;
582
583 default:
584 return -EINVAL;
585 } /* end of switch on ioctl-cmd code parameter */
586 } /* end of switch on minor number */
587}
588
589static struct block_device_operations cowlo_fops =
590{
591 .owner = THIS_MODULE,
592 .open = cowlo_open, /* called upon open */
593 .release = cowlo_release, /* called upon close */
594 .ioctl = cowlo_ioctl, /* called upon ioctl */
595};
596
597/*
598** handle ioctl-command COWMKPAIR:
599** open a new cowdevice (pair of rdofile/cowfile) on-the-fly
600**
601** returns:
602** 0 - okay
603** < 0 - error value
604*/
605static int
606cowlo_makepair(struct cowpair __user *arg)
607{
608 int i, rv=0;
609 struct cowpair cowpair;
610 unsigned char *cowpath;
611 unsigned char *rdopath;
612
613 /*
614 ** retrieve info about pathnames
615 */
616 if ( copy_from_user(&cowpair, arg, sizeof cowpair) )
617 return -EFAULT;
618
619 if ( (MAJOR(cowpair.device) != COWMAJOR) && (cowpair.device != ANYDEV) )
620 return -EINVAL;
621
622 if ( (MINOR(cowpair.device) >= maxcows) && (cowpair.device != ANYDEV) )
623 return -EINVAL;
624
625 /*
626 ** retrieve pathname strings
627 */
628 if ( (cowpair.cowflen > PATH_MAX) || (cowpair.rdoflen > PATH_MAX) )
629 return -ENAMETOOLONG;
630
631 if ( !(cowpath = kmalloc(cowpair.cowflen+1, GFP_KERNEL)) )
632 return -ENOMEM;
633
634 if ( copy_from_user(cowpath, (void __user *)cowpair.cowfile,
635 cowpair.cowflen) ) {
636 kfree(cowpath);
637 return -EFAULT;
638 }
639 *(cowpath+cowpair.cowflen) = 0;
640
641 if ( !(rdopath = kmalloc(cowpair.rdoflen+1, GFP_KERNEL)) ) {
642 kfree(cowpath);
643 return -ENOMEM;
644 }
645
646 if ( copy_from_user(rdopath, (void __user *)cowpair.rdofile,
647 cowpair.rdoflen) ) {
648 kfree(rdopath);
649 kfree(cowpath);
650 return -EFAULT;
651 }
652 *(rdopath+cowpair.rdoflen) = 0;
653
654 /*
655 ** open new cowdevice
656 */
657 if ( cowpair.device == ANYDEV) {
658 /*
659 ** search first unused minor
660 */
661 for (i=0, rv=-EBUSY; i < maxcows; i++) {
662 if ( !((cowdevall[i])->state & COWDEVOPEN) ) {
663 rv = cowlo_openpair(rdopath, cowpath, 0, i);
664 break;
665 }
666 }
667
668 if (rv) { /* open failed? */
669 kfree(rdopath);
670 kfree(cowpath);
671 return rv;
672 }
673
674 /*
675 ** return newly allocated cowdevice to user space
676 */
677 cowpair.device = MKDEV(COWMAJOR, i);
678
679 if ( copy_to_user(arg, &cowpair, sizeof cowpair)) {
680 kfree(rdopath);
681 kfree(cowpath);
682 return -EFAULT;
683 }
684 } else { /* specific minor requested */
685 if ( (rv = cowlo_openpair(rdopath, cowpath, 0,
686 MINOR(cowpair.device)))) {
687 kfree(rdopath);
688 kfree(cowpath);
689 return rv;
690 }
691 }
692
693 return 0;
694}
695
696/*
697** handle ioctl-command COWRMPAIR:
698** deactivate an existing cowdevice (pair of rdofile/cowfile) on-the-fly
699**
700** returns:
701** 0 - okay
702** < 0 - error value
703*/
704static int
705cowlo_removepair(unsigned long __user *arg)
706{
707 unsigned long cowdevice;
708 struct cowloop_device *cowdev;
709
710 /*
711 ** retrieve info about device to be removed
712 */
713 if ( copy_from_user(&cowdevice, arg, sizeof cowdevice))
714 return -EFAULT;
715
716 /*
717 ** verify major-minor number
718 */
719 if ( MAJOR(cowdevice) != COWMAJOR)
720 return -EINVAL;
721
722 if ( MINOR(cowdevice) >= maxcows)
723 return -EINVAL;
724
725 cowdev = cowdevall[MINOR(cowdevice)];
726
727 if ( !(cowdev->state & COWDEVOPEN) )
728 return -ENODEV;
729
730 /*
731 ** synchronize bitmaps and close cowdevice
732 */
733 if (cowdev->state & COWRWCOWOPEN) {
734 down(&cowdevlock);
735 cowlo_sync();
736 up(&cowdevlock);
737 }
738
739 return cowlo_closepair(cowdev);
740}
741
742/*
743** handle ioctl-command COWWATCH:
744** watch the free space of the filesystem containing a cowfile
745** of an open cowdevice
746**
747** returns:
748** 0 - okay
749** < 0 - error value
750*/
751static int
752cowlo_watch(struct cowpair __user *arg)
753{
754 struct cowloop_device *cowdev;
755 struct cowwatch cowwatch;
756
757 /*
758 ** retrieve structure holding info
759 */
760 if ( copy_from_user(&cowwatch, arg, sizeof cowwatch))
761 return -EFAULT;
762
763 /*
764 ** verify if cowdevice exists and is currently open
765 */
766 if ( MINOR(cowwatch.device) >= maxcows)
767 return -EINVAL;
768
769 cowdev = cowdevall[MINOR(cowwatch.device)];
770
771 if ( !(cowdev->state & COWDEVOPEN) )
772 return -ENODEV;
773
774 /*
775 ** if the WATCHWAIT-option is set, wait until the indicated
776 ** threshold is reached (only one waiter allowed)
777 */
778 if (cowwatch.flags & WATCHWAIT) {
779 /*
780 ** check if already another waiter active
781 ** for this cowdevice
782 */
783 if (cowdev->state & COWWATCHDOG)
784 return -EAGAIN;
785
786 cowdev->state |= COWWATCHDOG;
787
788 cowdev->watchthresh = (unsigned long long)
789 cowwatch.threshold /
790 (cowdev->blksize / 1024);
791
792 if (wait_event_interruptible(cowdev->watchq,
793 cowdev->watchthresh >= cowdev->blkavail)) {
794 cowdev->state &= ~COWWATCHDOG;
795 return EINTR;
796 }
797
798 cowdev->state &= ~COWWATCHDOG;
799 }
800
801 cowwatch.totalkb = (unsigned long long)cowdev->blktotal *
802 cowdev->blksize / 1024;
803 cowwatch.availkb = (unsigned long long)cowdev->blkavail *
804 cowdev->blksize / 1024;
805
806 if ( copy_to_user(arg, &cowwatch, sizeof cowwatch))
807 return -EFAULT;
808
809 return 0;
810}
811
812/*
813** handle ioctl-commands COWCLOSE and COWRDOPEN:
814** COWCLOSE - close the cowfile while the cowdevice remains open;
815** this allows an unmount of the filesystem on which
816** the cowfile resides
817** COWRDOPEN - close the cowfile and reopen it for read-only;
818** this allows a remount read-ony of the filesystem
819** on which the cowfile resides
820**
821** returns:
822** 0 - okay
823** < 0 - error value
824*/
825static int
826cowlo_cowctl(unsigned long __user *arg, int cmd)
827{
828 struct cowloop_device *cowdev;
829 unsigned long cowdevice;
830
831 /*
832 ** retrieve info about device to be removed
833 */
834 if ( copy_from_user(&cowdevice, arg, sizeof cowdevice))
835 return -EFAULT;
836
837 /*
838 ** verify major-minor number
839 */
840 if ( MAJOR(cowdevice) != COWMAJOR)
841 return -EINVAL;
842
843 if ( MINOR(cowdevice) >= maxcows)
844 return -EINVAL;
845
846 cowdev = cowdevall[MINOR(cowdevice)];
847
848 if ( !(cowdev->state & COWDEVOPEN) )
849 return -ENODEV;
850
851 /*
852 ** synchronize bitmaps and close cowfile
853 */
854 if (cowdev->state & COWRWCOWOPEN) {
855 down(&cowdevlock);
856 cowlo_sync();
857 up(&cowdevlock);
858 }
859
860 /*
861 ** handle specific ioctl-command
862 */
863 switch (cmd) {
864 case COWRDOPEN:
865 /*
866 ** if the cowfile is still opened read-write
867 */
868 if (cowdev->state & COWRWCOWOPEN) {
869 /*
870 ** close the cowfile
871 */
872 if (cowdev->cowfp)
873 filp_close(cowdev->cowfp, 0);
874
875 cowdev->state &= ~COWRWCOWOPEN;
876
877 /*
878 ** open again for read-only
879 */
880 cowdev->cowfp = filp_open(cowdev->cowname,
881 O_RDONLY|O_LARGEFILE, 0600);
882
883 if ( (cowdev->cowfp == NULL) || IS_ERR(cowdev->cowfp) ) {
884 printk(KERN_ERR
885 "cowloop - failed to reopen cowfile %s\n",
886 cowdev->cowname);
887 return -EINVAL;
888 }
889
890 /*
891 ** mark cowfile open for read-only
892 */
893 cowdev->state |= COWRDCOWOPEN;
894 } else {
895 return -EINVAL;
896 }
897 break;
898
899 case COWCLOSE:
900 /*
901 ** if the cowfile is still open
902 */
903 if (cowdev->state & COWCOWOPEN) {
904 /*
905 ** close the cowfile
906 */
907 if (cowdev->cowfp)
908 filp_close(cowdev->cowfp, 0);
909
910 cowdev->state &= ~COWCOWOPEN;
911 }
912 }
913
914 return 0;
915}
916
917
918/*****************************************************************************/
919/* Handling of I/O-requests for a cowdevice */
920/*****************************************************************************/
921
922/*
923** function to be called by core-kernel to handle the I/O-requests
924** in the queue
925*/
926static void cowlo_request(struct request_queue *q)
927{
928 struct request *req;
929 struct cowloop_device *cowdev;
930
931 DEBUGP(DCOW "cowloop - request function called....\n");
932
933 while((req = blk_peek_request(q)) != NULL) {
934 DEBUGP(DCOW "cowloop - got next request\n");
935
936 if (! blk_fs_request(req)) {
937 /* this is not a normal file system request */
938 __blk_end_request_cur(req, -EIO);
939 continue;
940 }
941 cowdev = req->rq_disk->private_data;
942
943 if (cowdev->iobusy)
944 return;
945 else
946 cowdev->iobusy = 1;
947
948 /*
949 ** when no kernel-thread is available, the request will
950 ** produce an I/O-error
951 */
952 if (!cowdev->pid) {
953 printk(KERN_ERR"cowloop - no thread available\n");
954 __blk_end_request_cur(req, -EIO); /* request failed */
955 cowdev->iobusy = 0;
956 continue;
957 }
958
959 /*
960 ** handle I/O-request in the context of the kernel-thread
961 */
962 cowdev->req = req;
963 cowdev->qfilled = 1;
964
965 wake_up_interruptible_sync(&cowdev->waitq);
966
967 /*
968 ** get out of this function now while the I/O-request is
969 ** under treatment of the kernel-thread; this function
970 ** will be called again after the current I/O-request has
971 ** been finished by the thread
972 */
973 return;
974 }
975}
976
977/*
978** daemon-process (kernel-thread) executes this function
979*/
980static int
981cowlo_daemon(struct cowloop_device *cowdev)
982{
983 int rv;
984 int minor;
985 char myname[16];
986
987 for (minor = 0; minor < maxcows; minor++) {
988 if (cowdev == cowdevall[minor]) break;
989 }
990 sprintf(myname, "cowloopd%d", minor);
991
992 daemonize(myname);
993
994 while (!cowdev->closedown) {
995 /*
996 ** sleep while waiting for an I/O request;
997 ** note that no non-interruptible wait has been used
998 ** because the non-interruptible version of
999 ** a *synchronous* wake_up does not exist (any more)
1000 */
1001 if (wait_event_interruptible(cowdev->waitq, cowdev->qfilled)){
1002 flush_signals(current); /* ignore signal-based wakeup */
1003 continue;
1004 }
1005
1006 if (cowdev->closedown) /* module will be unloaded ? */{
1007 cowdev->pid = 0;
1008 return 0;
1009 }
1010
1011 /*
1012 ** woken up by the I/O-request handler: treat requested I/O
1013 */
1014 cowdev->qfilled = 0;
1015
1016 rv = cowlo_do_request(cowdev->req);
1017
1018 /*
1019 ** reacquire the queue-spinlock for manipulating
1020 ** the request-queue and dequeue the request
1021 */
1022 spin_lock_irq(&cowdev->rqlock);
1023
1024 __blk_end_request_cur(cowdev->req, rv);
1025 cowdev->iobusy = 0;
1026
1027 /*
1028 ** initiate the next request from the queue
1029 */
1030 cowlo_request(cowdev->rqueue);
1031
1032 spin_unlock_irq(&cowdev->rqlock);
1033 }
1034 return 0;
1035}
1036
1037/*
1038** function to be called in the context of the kernel thread
1039** to handle the queued I/O-requests
1040**
1041** returns:
1042** 0 - fail
1043** 1 - success
1044*/
1045static long int
1046cowlo_do_request(struct request *req)
1047{
1048 unsigned long len;
1049 long int rv;
1050 loff_t offset;
1051 struct cowloop_device *cowdev = req->rq_disk->private_data;
1052
1053 /*
1054 ** calculate some variables which are needed later on
1055 */
1056 len = blk_rq_cur_sectors(req) << 9;
1057 offset = (loff_t) blk_rq_pos(req) << 9;
1058
1059 DEBUGP(DCOW"cowloop - req cmd=%d offset=%lld len=%lu addr=%p\n",
1060 *(req->cmd), offset, len, req->buffer);
1061
1062 /*
1063 ** handle READ- or WRITE-request
1064 */
1065 switch (rq_data_dir(req)) {
1066 /**********************************************************/
1067 case READ:
1068 switch ( cowlo_checkio(cowdev, len, offset) ) {
1069 case ALLCOW:
1070 rv = cowlo_readcow(cowdev, req->buffer, len, offset);
1071 break;
1072
1073 case ALLRDO:
1074 rv = cowlo_readrdo(cowdev, req->buffer, len, offset);
1075 break;
1076
1077 case MIXEDUP:
1078 rv = cowlo_readmix(cowdev, req->buffer, len, offset);
1079 break;
1080
1081 default:
1082 rv = 0; /* never happens */
1083 }
1084 break;
1085
1086 /**********************************************************/
1087 case WRITE:
1088 switch ( cowlo_checkio(cowdev, len, offset) ) {
1089 case ALLCOW:
1090 /*
1091 ** straight-forward write will do...
1092 */
1093 DEBUGP(DCOW"cowloop - write straight ");
1094
1095 rv = cowlo_writecow(cowdev, req->buffer, len, offset);
1096 break; /* from switch */
1097
1098 case ALLRDO:
1099 if ( (len & MUMASK) == 0) {
1100 DEBUGP(DCOW"cowloop - write straight ");
1101
1102 rv = cowlo_writecow(cowdev, req->buffer,
1103 len, offset);
1104 break;
1105 }
1106
1107 case MIXEDUP:
1108 rv = cowlo_writemix(cowdev, req->buffer, len, offset);
1109 break;
1110
1111 default:
1112 rv = 0; /* never happens */
1113 }
1114 break;
1115
1116 default:
1117 printk(KERN_ERR
1118 "cowloop - unrecognized command %d\n", *(req->cmd));
1119 rv = 0;
1120 }
1121
1122 return (rv <= 0 ? 0 : 1);
1123}
1124
1125/*
1126** check for a given I/O-request if all underlying blocks
1127** (with size MAPUNIT) are either in the read-only file or in
1128** the cowfile (or a combination of the two)
1129**
1130** returns:
1131** ALLRDO - all underlying blocks in rdofile
1132** ALLCOW - all underlying blocks in cowfile
1133** MIXEDUP - underlying blocks partly in rdofile and partly in cowfile
1134*/
1135static int
1136cowlo_checkio(struct cowloop_device *cowdev, int len, loff_t offset)
1137{
1138 unsigned long mapnum, bytenum, bitnum, blocknr, partlen;
1139 long int totcnt, cowcnt;
1140 char *mc;
1141
1142 /*
1143 ** notice that the requested block might cross
1144 ** a blocksize boundary while one of the concerned
1145 ** blocks resides in the read-only file and another
1146 ** one in the copy-on-write file; in that case the
1147 ** request will be broken up into pieces
1148 */
1149 if ( (len <= MAPUNIT) &&
1150 (MAPUNIT - (offset & MUMASK) <= len) ) {
1151 /*
1152 ** easy situation:
1153 ** requested data-block entirely fits within
1154 ** the mapunit used for the bitmap
1155 ** check if that block is located in rdofile or
1156 ** cowfile
1157 */
1158 blocknr = offset >> MUSHIFT;
1159
1160 mapnum = CALCMAP (blocknr);
1161 bytenum = CALCBYTE(blocknr);
1162 bitnum = CALCBIT (blocknr);
1163
1164 if (*(*(cowdev->mapcache+mapnum)+bytenum)&(1<<bitnum))
1165 return ALLCOW;
1166 else
1167 return ALLRDO;
1168 }
1169
1170 /*
1171 ** less easy situation:
1172 ** the requested data-block does not fit within the mapunit
1173 ** used for the bitmap
1174 ** check if *all* underlying blocks involved reside on the rdofile
1175 ** or the cowfile (so still no breakup required)
1176 */
1177 for (cowcnt=totcnt=0; len > 0; len-=partlen, offset+=partlen, totcnt++){
1178 /*
1179 ** calculate blocknr of involved block
1180 */
1181 blocknr = offset >> MUSHIFT;
1182
1183 /*
1184 ** calculate partial length for this transfer
1185 */
1186 partlen = MAPUNIT - (offset & MUMASK);
1187 if (partlen > len)
1188 partlen = len;
1189
1190 /*
1191 ** is this block located in the cowfile
1192 */
1193 mapnum = CALCMAP (blocknr);
1194 bytenum = CALCBYTE(blocknr);
1195 bitnum = CALCBIT (blocknr);
1196
1197 mc = *(cowdev->mapcache+mapnum);
1198
1199 if (*(mc+bytenum)&(1<<bitnum))
1200 cowcnt++;;
1201
1202 DEBUGP(DCOW
1203 "cowloop - check %lu - map %lu, byte %lu, bit %lu, "
1204 "cowcnt %ld, totcnt %ld %02x %p\n",
1205 blocknr, mapnum, bytenum, bitnum, cowcnt, totcnt,
1206 *(mc+bytenum), mc);
1207 }
1208
1209 if (cowcnt == 0) /* all involved blocks on rdofile? */
1210 return ALLRDO;
1211
1212 if (cowcnt == totcnt) /* all involved blocks on cowfile? */
1213 return ALLCOW;
1214
1215 /*
1216 ** situation somewhat more complicated:
1217 ** involved underlying blocks spread over both files
1218 */
1219 return MIXEDUP;
1220}
1221
1222/*
1223** read requested chunk partly from rdofile and partly from cowfile
1224**
1225** returns:
1226** 0 - fail
1227** 1 - success
1228*/
1229static int
1230cowlo_readmix(struct cowloop_device *cowdev, void *buf, int len, loff_t offset)
1231{
1232 unsigned long mapnum, bytenum, bitnum, blocknr, partlen;
1233 long int rv;
1234 char *mc;
1235
1236 /*
1237 ** complicated approach: breakup required of read-request
1238 */
1239 for (rv=1; len > 0; len-=partlen, buf+=partlen, offset+=partlen) {
1240 /*
1241 ** calculate blocknr of entire block
1242 */
1243 blocknr = offset >> MUSHIFT;
1244
1245 /*
1246 ** calculate partial length for this transfer
1247 */
1248 partlen = MAPUNIT - (offset & MUMASK);
1249 if (partlen > len)
1250 partlen = len;
1251
1252 /*
1253 ** is this block located in the cowfile
1254 */
1255 mapnum = CALCMAP (blocknr);
1256 bytenum = CALCBYTE(blocknr);
1257 bitnum = CALCBIT (blocknr);
1258 mc = *(cowdev->mapcache+mapnum);
1259
1260 if (*(mc+bytenum)&(1<<bitnum)) {
1261 /*
1262 ** read (partial) block from cowfile
1263 */
1264 DEBUGP(DCOW"cowloop - split read "
1265 "cow partlen=%ld off=%lld\n", partlen, offset);
1266
1267 if (cowlo_readcow(cowdev, buf, partlen, offset) <= 0)
1268 rv = 0;
1269 } else {
1270 /*
1271 ** read (partial) block from rdofile
1272 */
1273 DEBUGP(DCOW"cowloop - split read "
1274 "rdo partlen=%ld off=%lld\n", partlen, offset);
1275
1276 if (cowlo_readrdo(cowdev, buf, partlen, offset) <= 0)
1277 rv = 0;
1278 }
1279 }
1280
1281 return rv;
1282}
1283
1284/*
1285** chunk to be written to the cowfile needs pieces to be
1286** read from the rdofile
1287**
1288** returns:
1289** 0 - fail
1290** 1 - success
1291*/
1292static int
1293cowlo_writemix(struct cowloop_device *cowdev, void *buf, int len, loff_t offset)
1294{
1295 unsigned long mapnum, bytenum, bitnum, blocknr, partlen;
1296 long int rv;
1297 char *mc;
1298
1299 /*
1300 ** somewhat more complicated stuff is required:
1301 ** if the request is larger than one underlying
1302 ** block or is spread over two underlying blocks,
1303 ** split the request into pieces; if a block does not
1304 ** start at a block boundary, take care that
1305 ** surrounding data is read first (if needed),
1306 ** fit the new data in and write it as a full block
1307 */
1308 for (rv=1; len > 0; len-=partlen, buf+=partlen, offset+=partlen) {
1309 /*
1310 ** calculate partial length for this transfer
1311 */
1312 partlen = MAPUNIT - (offset & MUMASK);
1313 if (partlen > len)
1314 partlen = len;
1315
1316 /*
1317 ** calculate blocknr of entire block
1318 */
1319 blocknr = offset >> MUSHIFT;
1320
1321 /*
1322 ** has this block been written before?
1323 */
1324 mapnum = CALCMAP (blocknr);
1325 bytenum = CALCBYTE(blocknr);
1326 bitnum = CALCBIT (blocknr);
1327 mc = *(cowdev->mapcache+mapnum);
1328
1329 if (*(mc+bytenum)&(1<<bitnum)) {
1330 /*
1331 ** block has been written before;
1332 ** write transparantly to cowfile
1333 */
1334 DEBUGP(DCOW
1335 "cowloop - splitwr transp\n");
1336
1337 if (cowlo_writecow(cowdev, buf, partlen, offset) <= 0)
1338 rv = 0;
1339 } else {
1340 /*
1341 ** block has never been written before,
1342 ** so read entire block from
1343 ** read-only file first, unless
1344 ** a full block is requested to
1345 ** be written
1346 */
1347 if (partlen < MAPUNIT) {
1348 if (cowlo_readrdo(cowdev, cowdev->iobuf,
1349 MAPUNIT, (loff_t)blocknr << MUSHIFT) <= 0)
1350 rv = 0;
1351 }
1352
1353 /*
1354 ** transfer modified part into
1355 ** the block just read
1356 */
1357 memcpy(cowdev->iobuf + (offset & MUMASK), buf, partlen);
1358
1359 /*
1360 ** write entire block to cowfile
1361 */
1362 DEBUGP(DCOW"cowloop - split "
1363 "partlen=%ld off=%lld\n",
1364 partlen, (loff_t)blocknr << MUSHIFT);
1365
1366 if (cowlo_writecow(cowdev, cowdev->iobuf, MAPUNIT,
1367 (loff_t)blocknr << MUSHIFT) <= 0)
1368 rv = 0;
1369 }
1370 }
1371
1372 return rv;
1373}
1374
1375/*****************************************************************************/
1376/* I/O-support for read-only file and copy-on-write file */
1377/*****************************************************************************/
1378
1379/*
1380** read data from the read-only file
1381**
1382** return-value: similar to user-mode read
1383*/
1384static long int
1385cowlo_readrdo(struct cowloop_device *cowdev, void *buf, int len, loff_t offset)
1386{
1387 long int rv;
1388 mm_segment_t old_fs;
1389 loff_t saveoffset = offset;
1390
1391 DEBUGP(DCOW"cowloop - readrdo called\n");
1392
1393 old_fs = get_fs();
1394 set_fs( get_ds() );
1395 rv = cowdev->rdofp->f_op->read(cowdev->rdofp, buf, len, &offset);
1396 set_fs(old_fs);
1397
1398 if (rv < len) {
1399 printk(KERN_WARNING "cowloop - read-failure %ld on rdofile"
1400 "- offset=%lld len=%d\n",
1401 rv, saveoffset, len);
1402 }
1403
1404 cowdev->rdoreads++;
1405 return rv;
1406}
1407
1408/*
1409** read cowfile from a modified offset, i.e. skipping the bitmap and cowhead
1410**
1411** return-value: similar to user-mode read
1412*/
1413static long int
1414cowlo_readcow(struct cowloop_device *cowdev, void *buf, int len, loff_t offset)
1415{
1416 DEBUGP(DCOW"cowloop - readcow called\n");
1417
1418 offset += cowdev->cowhead->doffset;
1419
1420 return cowlo_readcowraw(cowdev, buf, len, offset);
1421}
1422
1423/*
1424** read cowfile from an absolute offset
1425**
1426** return-value: similar to user-mode read
1427*/
1428static long int
1429cowlo_readcowraw(struct cowloop_device *cowdev,
1430 void *buf, int len, loff_t offset)
1431{
1432 long int rv;
1433 mm_segment_t old_fs;
1434 loff_t saveoffset = offset;
1435
1436 DEBUGP(DCOW"cowloop - readcowraw called\n");
1437
1438 /*
1439 ** be sure that cowfile is opened for read-write
1440 */
1441 if ( !(cowdev->state & COWCOWOPEN) ) {
1442 printk(KERN_WARNING
1443 "cowloop - read request from cowfile refused\n");
1444
1445 return -EBADF;
1446 }
1447
1448 /*
1449 ** issue low level read
1450 */
1451 old_fs = get_fs();
1452 set_fs( get_ds() );
1453 rv = cowdev->cowfp->f_op->read(cowdev->cowfp, buf, len, &offset);
1454 set_fs(old_fs);
1455
1456 if (rv < len) {
1457 printk(KERN_WARNING
1458 "cowloop - read-failure %ld on cowfile"
1459 "- offset=%lld len=%d\n", rv, saveoffset, len);
1460 }
1461
1462 cowdev->cowreads++;
1463 return rv;
1464}
1465
1466/*
1467** write cowfile from a modified offset, i.e. skipping the bitmap and cowhead
1468**
1469** if a block is written for the first time while its contents consists
1470** of binary zeroes only, the concerning bitmap is flushed to the cowfile
1471**
1472** return-value: similar to user-mode write
1473*/
1474static long int
1475cowlo_writecow(struct cowloop_device *cowdev, void *buf, int len, loff_t offset)
1476{
1477 long int rv;
1478 unsigned long mapnum=0, mapbyte=0, mapbit=0, cowblock=0, partlen;
1479 char *tmpptr, *mapptr = NULL;
1480 loff_t tmpoffset, mapoffset = 0;
1481
1482 DEBUGP(DCOW"cowloop - writecow called\n");
1483
1484 /*
1485 ** be sure that cowfile is opened for read-write
1486 */
1487 if ( !(cowdev->state & COWRWCOWOPEN) ) {
1488 printk(KERN_WARNING
1489 "cowloop - Write request to cowfile refused\n");
1490
1491 return -EBADF;
1492 }
1493
1494 /*
1495 ** write the entire block to the cowfile
1496 */
1497 tmpoffset = offset + cowdev->cowhead->doffset;
1498
1499 rv = cowlo_writecowraw(cowdev, buf, len, tmpoffset);
1500
1501 /*
1502 ** verify if enough space available on filesystem holding
1503 ** the cowfile
1504 ** - when the last write failed (might be caused by lack of space)
1505 ** - when a watcher is active (to react adequatly)
1506 ** - when the previous check indicated fs was almost full
1507 ** - with regular intervals
1508 */
1509 if ( (rv <= 0) ||
1510 (cowdev->state & COWWATCHDOG) ||
1511 (cowdev->blkavail / 2 < SPCDFLINTVL) ||
1512 (cowdev->cowwrites % SPCDFLINTVL == 0) ) {
1513 struct kstatfs ks;
1514
1515 if (vfs_statfs(cowdev->cowfp->f_dentry, &ks)==0){
1516 if (ks.f_bavail <= SPCMINBLK) {
1517 switch (ks.f_bavail) {
1518 case 0:
1519 case 1:
1520 case 2:
1521 case 3:
1522 printk(KERN_ALERT
1523 "cowloop - "
1524 "ALERT: cowfile full!\n");
1525 break;
1526
1527 default:
1528 printk(KERN_WARNING
1529 "cowloop - cowfile almost "
1530 "full (only %llu Kb free)\n",
1531 (unsigned long long)
1532 ks.f_bsize * ks.f_bavail /1024);
1533 }
1534 }
1535
1536 cowdev->blktotal = ks.f_blocks;
1537 cowdev->blkavail = ks.f_bavail;
1538
1539 /*
1540 ** wakeup watcher if threshold has been reached
1541 */
1542 if ( (cowdev->state & COWWATCHDOG) &&
1543 (cowdev->watchthresh >= cowdev->blkavail) ) {
1544 wake_up_interruptible(&cowdev->watchq);
1545 }
1546 }
1547 }
1548
1549 if (rv <= 0)
1550 return rv;
1551
1552 DEBUGP(DCOW"cowloop - block written\n");
1553
1554 /*
1555 ** check if block(s) is/are written to the cowfile
1556 ** for the first time; if so, adapt the bitmap
1557 */
1558 for (; len > 0; len-=partlen, offset+=partlen, buf+=partlen) {
1559 /*
1560 ** calculate partial length for this transfer
1561 */
1562 partlen = MAPUNIT - (offset & MUMASK);
1563 if (partlen > len)
1564 partlen = len;
1565
1566 /*
1567 ** calculate bitnr of written chunk of cowblock
1568 */
1569 cowblock = offset >> MUSHIFT;
1570
1571 mapnum = CALCMAP (cowblock);
1572 mapbyte = CALCBYTE(cowblock);
1573 mapbit = CALCBIT (cowblock);
1574
1575 if (*(*(cowdev->mapcache+mapnum)+mapbyte) & (1<<mapbit))
1576 continue; /* already written before */
1577
1578 /*
1579 ** if the block is written for the first time,
1580 ** the corresponding bit should be set in the bitmap
1581 */
1582 *(*(cowdev->mapcache+mapnum)+mapbyte) |= (1<<mapbit);
1583
1584 cowdev->nrcowblocks++;
1585
1586 DEBUGP(DCOW"cowloop - bitupdate blk=%ld map=%ld "
1587 "byte=%ld bit=%ld\n",
1588 cowblock, mapnum, mapbyte, mapbit);
1589
1590 /*
1591 ** check if the cowhead in the cowfile is currently
1592 ** marked clean; if so, mark it dirty and flush it
1593 */
1594 if ( !(cowdev->cowhead->flags &= COWDIRTY)) {
1595 cowdev->cowhead->flags |= COWDIRTY;
1596
1597 cowlo_writecowraw(cowdev, cowdev->cowhead,
1598 MAPUNIT, (loff_t)0);
1599 }
1600
1601 /*
1602 ** if the written datablock contained binary zeroes,
1603 ** the bitmap block should be marked to be flushed to disk
1604 ** (blocks containing all zeroes cannot be recovered by
1605 ** the cowrepair-program later on if cowloop is not properly
1606 ** removed via rmmod)
1607 */
1608 if ( memcmp(buf, allzeroes, partlen) ) /* not all zeroes? */
1609 continue; /* no flush needed */
1610
1611 /*
1612 ** calculate positions of bitmap block to be flushed
1613 ** - pointer of bitmap block in memory
1614 ** - offset of bitmap block in cowfile
1615 */
1616 tmpptr = *(cowdev->mapcache+mapnum) + (mapbyte & (~MUMASK));
1617 tmpoffset = (loff_t) MAPUNIT + mapnum * MAPCHUNKSZ +
1618 (mapbyte & (~MUMASK));
1619
1620 /*
1621 ** flush a bitmap block at the moment that all bits have
1622 ** been set in that block, i.e. at the moment that we
1623 ** switch to another bitmap block
1624 */
1625 if ( (mapoffset != 0) && (mapoffset != tmpoffset) ) {
1626 if (cowlo_writecowraw(cowdev, mapptr, MAPUNIT,
1627 mapoffset) < 0) {
1628 printk(KERN_WARNING
1629 "cowloop - write-failure on bitmap - "
1630 "blk=%ld map=%ld byte=%ld bit=%ld\n",
1631 cowblock, mapnum, mapbyte, mapbit);
1632 }
1633
1634 DEBUGP(DCOW"cowloop - bitmap blk written %lld\n",
1635 mapoffset);
1636 }
1637
1638 /*
1639 ** remember offset in cowfile and offset in memory
1640 ** for bitmap to be flushed; flushing will be done
1641 ** as soon as all updates in this bitmap block have
1642 ** been done
1643 */
1644 mapoffset = tmpoffset;
1645 mapptr = tmpptr;
1646 }
1647
1648 /*
1649 ** any new block written containing binary zeroes?
1650 */
1651 if (mapoffset) {
1652 if (cowlo_writecowraw(cowdev, mapptr, MAPUNIT, mapoffset) < 0) {
1653 printk(KERN_WARNING
1654 "cowloop - write-failure on bitmap - "
1655 "blk=%ld map=%ld byte=%ld bit=%ld\n",
1656 cowblock, mapnum, mapbyte, mapbit);
1657 }
1658
1659 DEBUGP(DCOW"cowloop - bitmap block written %lld\n", mapoffset);
1660 }
1661
1662 return rv;
1663}
1664
1665/*
1666** write cowfile from an absolute offset
1667**
1668** return-value: similar to user-mode write
1669*/
1670static long int
1671cowlo_writecowraw(struct cowloop_device *cowdev,
1672 void *buf, int len, loff_t offset)
1673{
1674 long int rv;
1675 mm_segment_t old_fs;
1676 loff_t saveoffset = offset;
1677
1678 DEBUGP(DCOW"cowloop - writecowraw called\n");
1679
1680 /*
1681 ** be sure that cowfile is opened for read-write
1682 */
1683 if ( !(cowdev->state & COWRWCOWOPEN) ) {
1684 printk(KERN_WARNING
1685 "cowloop - write request to cowfile refused\n");
1686
1687 return -EBADF;
1688 }
1689
1690 /*
1691 ** issue low level write
1692 */
1693 old_fs = get_fs();
1694 set_fs( get_ds() );
1695 rv = cowdev->cowfp->f_op->write(cowdev->cowfp, buf, len, &offset);
1696 set_fs(old_fs);
1697
1698 if (rv < len) {
1699 printk(KERN_WARNING
1700 "cowloop - write-failure %ld on cowfile"
1701 "- offset=%lld len=%d\n", rv, saveoffset, len);
1702 }
1703
1704 cowdev->cowwrites++;
1705 return rv;
1706}
1707
1708
1709/*
1710** readproc-function: called when the corresponding /proc-file is read
1711*/
1712static int
1713cowlo_readproc(char *buf, char **start, off_t pos, int cnt, int *eof, void *p)
1714{
1715 struct cowloop_device *cowdev = p;
1716
1717 revision[sizeof revision - 3] = '\0';
1718
1719 return sprintf(buf,
1720 " cowloop version: %9s\n\n"
1721 " device state: %s%s%s%s\n"
1722 " number of opens: %9d\n"
1723 " pid of thread: %9d\n\n"
1724 " read-only file: %9s\n"
1725 " rdoreads: %9lu\n\n"
1726 "copy-on-write file: %9s\n"
1727 " state cowfile: %9s\n"
1728 " bitmap-blocks: %9lu (of %d bytes)\n"
1729 " cowblocks in use: %9lu (of %d bytes)\n"
1730 " cowreads: %9lu\n"
1731 " cowwrites: %9lu\n",
1732 &revision[11],
1733
1734 cowdev->state & COWDEVOPEN ? "devopen " : "",
1735 cowdev->state & COWRWCOWOPEN ? "cowopenrw " : "",
1736 cowdev->state & COWRDCOWOPEN ? "cowopenro " : "",
1737 cowdev->state & COWWATCHDOG ? "watchdog " : "",
1738
1739 cowdev->opencnt,
1740 cowdev->pid,
1741 cowdev->rdoname,
1742 cowdev->rdoreads,
1743 cowdev->cowname,
1744 cowdev->cowhead->flags & COWDIRTY ? "dirty":"clean",
1745 cowdev->mapsize >> MUSHIFT, MAPUNIT,
1746 cowdev->nrcowblocks, MAPUNIT,
1747 cowdev->cowreads,
1748 cowdev->cowwrites);
1749}
1750
1751/*****************************************************************************/
1752/* Setup and destroy cowdevices */
1753/*****************************************************************************/
1754
1755/*
1756** open and prepare a cowdevice (rdofile and cowfile) and allocate bitmaps
1757**
1758** returns:
1759** 0 - okay
1760** < 0 - error value
1761*/
1762static int
1763cowlo_openpair(char *rdof, char *cowf, int autorecover, int minor)
1764{
1765 long int rv;
1766 struct cowloop_device *cowdev = cowdevall[minor];
1767 struct kstatfs ks;
1768
1769 down(&cowdevlock);
1770
1771 /*
1772 ** requested device exists?
1773 */
1774 if (minor >= maxcows) {
1775 up(&cowdevlock);
1776 return -ENODEV;
1777 }
1778
1779 /*
1780 ** requested device already assigned to cowdevice?
1781 */
1782 if (cowdev->state & COWDEVOPEN) {
1783 up(&cowdevlock);
1784 return -EBUSY;
1785 }
1786
1787 /*
1788 ** initialize administration
1789 */
1790 memset(cowdev, 0, sizeof *cowdev);
1791
1792 spin_lock_init (&cowdev->rqlock);
1793 init_waitqueue_head(&cowdev->waitq);
1794 init_waitqueue_head(&cowdev->watchq);
1795
1796 /*
1797 ** open the read-only file
1798 */
1799 DEBUGP(DCOW"cowloop - call openrdo....\n");
1800
1801 if ( (rv = cowlo_openrdo(cowdev, rdof)) ) {
1802 cowlo_undo_openrdo(cowdev);
1803 up(&cowdevlock);
1804 return rv;
1805 }
1806
1807 /*
1808 ** open the cowfile
1809 */
1810 DEBUGP(DCOW"cowloop - call opencow....\n");
1811
1812 if ( (rv = cowlo_opencow(cowdev, cowf, autorecover)) ) {
1813 cowlo_undo_openrdo(cowdev);
1814 cowlo_undo_opencow(cowdev);
1815 up(&cowdevlock);
1816 return rv;
1817 }
1818
1819 /*
1820 ** administer total and available size of filesystem holding cowfile
1821 */
1822 if (vfs_statfs(cowdev->cowfp->f_dentry, &ks)==0) {
1823 cowdev->blksize = ks.f_bsize;
1824 cowdev->blktotal = ks.f_blocks;
1825 cowdev->blkavail = ks.f_bavail;
1826 } else {
1827 cowdev->blksize = 1024; /* avoid division by zero */
1828 }
1829
1830 /*
1831 ** flush the (recovered) bitmaps and cowhead to the cowfile
1832 */
1833 DEBUGP(DCOW"cowloop - call cowsync....\n");
1834
1835 cowlo_sync();
1836
1837 /*
1838 ** allocate gendisk for the cow device
1839 */
1840 DEBUGP(DCOW"cowloop - alloc disk....\n");
1841
1842 if ((cowdev->gd = alloc_disk(1)) == NULL) {
1843 printk(KERN_WARNING
1844 "cowloop - unable to alloc_disk for cowloop\n");
1845
1846 cowlo_undo_openrdo(cowdev);
1847 cowlo_undo_opencow(cowdev);
1848 up(&cowdevlock);
1849 return -ENOMEM;
1850 }
1851
1852 cowdev->gd->major = COWMAJOR;
1853 cowdev->gd->first_minor = minor;
1854 cowdev->gd->minors = 1;
1855 cowdev->gd->fops = &cowlo_fops;
1856 cowdev->gd->private_data = cowdev;
1857 sprintf(cowdev->gd->disk_name, "%s%d", DEVICE_NAME, minor);
1858
1859 /* in .5 Kb units */
1860 set_capacity(cowdev->gd, (cowdev->numblocks*(MAPUNIT/512)));
1861
1862 DEBUGP(DCOW"cowloop - init request queue....\n");
1863
1864 if ((cowdev->rqueue = blk_init_queue(cowlo_request, &cowdev->rqlock))
1865 == NULL) {
1866 printk(KERN_WARNING
1867 "cowloop - unable to get request queue for cowloop\n");
1868
1869 del_gendisk(cowdev->gd);
1870 cowlo_undo_openrdo(cowdev);
1871 cowlo_undo_opencow(cowdev);
1872 up(&cowdevlock);
1873 return -EINVAL;
1874 }
1875
1876 blk_queue_logical_block_size(cowdev->rqueue, cowdev->blocksz);
1877 cowdev->gd->queue = cowdev->rqueue;
1878
1879 /*
1880 ** start kernel thread to handle requests
1881 */
1882 DEBUGP(DCOW"cowloop - kickoff daemon....\n");
1883
1884 cowdev->pid = kernel_thread((int (*)(void *))cowlo_daemon, cowdev, 0);
1885
1886 /*
1887 ** create a file below directory /proc/cow for this new cowdevice
1888 */
1889 if (cowlo_procdir) {
1890 char tmpname[64];
1891
1892 sprintf(tmpname, "%d", minor);
1893
1894 create_proc_read_entry(tmpname, 0 , cowlo_procdir,
1895 cowlo_readproc, cowdev);
1896 }
1897
1898 cowdev->state |= COWDEVOPEN;
1899
1900 cowdev->rdoname = rdof;
1901 cowdev->cowname = cowf;
1902
1903 /*
1904 ** enable the new disk; this triggers the first request!
1905 */
1906 DEBUGP(DCOW"cowloop - call add_disk....\n");
1907
1908 add_disk(cowdev->gd);
1909
1910 up(&cowdevlock);
1911 return 0;
1912}
1913
1914/*
1915** close a cowdevice (pair of rdofile/cowfile) and release memory
1916**
1917** returns:
1918** 0 - okay
1919** < 0 - error value
1920*/
1921static int
1922cowlo_closepair(struct cowloop_device *cowdev)
1923{
1924 int minor;
1925
1926 down(&cowdevlock);
1927
1928 /*
1929 ** if cowdevice is not activated at all, refuse
1930 */
1931 if ( !(cowdev->state & COWDEVOPEN) ) {
1932 up(&cowdevlock);
1933 return -ENODEV;
1934 }
1935
1936 /*
1937 ** if this cowdevice is still open, refuse
1938 */
1939 if (cowdev->opencnt > 0) {
1940 up(&cowdevlock);
1941 return -EBUSY;
1942 }
1943
1944 up(&cowdevlock);
1945
1946 /*
1947 ** wakeup watcher (if any)
1948 */
1949 if (cowdev->state & COWWATCHDOG) {
1950 cowdev->watchthresh = cowdev->blkavail;
1951 wake_up_interruptible(&cowdev->watchq);
1952 }
1953
1954 /*
1955 ** wakeup kernel-thread to be able to exit
1956 ** and wait until it has exited
1957 */
1958 cowdev->closedown = 1;
1959 cowdev->qfilled = 1;
1960 wake_up_interruptible(&cowdev->waitq);
1961
1962 while (cowdev->pid)
1963 schedule();
1964
1965 del_gendisk(cowdev->gd); /* revert the alloc_disk() */
1966 put_disk(cowdev->gd); /* revert the add_disk() */
1967
1968 if (cowlo_procdir) {
1969 char tmpname[64];
1970
1971 for (minor = 0; minor < maxcows; minor++) {
1972 if (cowdev == cowdevall[minor]) break;
1973 }
1974 sprintf(tmpname, "%d", minor);
1975
1976 remove_proc_entry(tmpname, cowlo_procdir);
1977 }
1978
1979 blk_cleanup_queue(cowdev->rqueue);
1980
1981 /*
1982 ** release memory for filenames if these names have
1983 ** been allocated dynamically
1984 */
1985 if ( (cowdev->cowname) && (cowdev->cowname != cowfile))
1986 kfree(cowdev->cowname);
1987
1988 if ( (cowdev->rdoname) && (cowdev->rdoname != rdofile))
1989 kfree(cowdev->rdoname);
1990
1991 cowlo_undo_openrdo(cowdev);
1992 cowlo_undo_opencow(cowdev);
1993
1994 cowdev->state &= ~COWDEVOPEN;
1995
1996 return 0;
1997}
1998
1999/*
2000** open the read-only file
2001**
2002** returns:
2003** 0 - okay
2004** < 0 - error value
2005*/
2006static int
2007cowlo_openrdo(struct cowloop_device *cowdev, char *rdof)
2008{
2009 struct file *f;
2010 struct inode *inode;
2011 long int i, nrval;
2012
2013 DEBUGP(DCOW"cowloop - openrdo called\n");
2014
2015 /*
2016 ** open the read-only file
2017 */
2018 if(*rdof == '\0') {
2019 printk(KERN_ERR
2020 "cowloop - specify name for read-only file\n\n");
2021 return -EINVAL;
2022 }
2023
2024 f = filp_open(rdof, O_RDONLY|O_LARGEFILE, 0);
2025
2026 if ( (f == NULL) || IS_ERR(f) ) {
2027 printk(KERN_ERR
2028 "cowloop - open of rdofile %s failed\n", rdof);
2029 return -EINVAL;
2030 }
2031
2032 cowdev->rdofp = f;
2033
2034 inode = f->f_dentry->d_inode;
2035
2036 if ( !S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode) ) {
2037 printk(KERN_ERR
2038 "cowloop - %s not regular file or blockdev\n", rdof);
2039 return -EINVAL;
2040 }
2041
2042 DEBUGP(DCOW"cowloop - determine size rdo....\n");
2043
2044 /*
2045 ** determine block-size and total size of read-only file
2046 */
2047 if (S_ISREG(inode->i_mode)) {
2048 /*
2049 ** read-only file is a regular file
2050 */
2051 cowdev->blocksz = 512; /* other value fails */
2052 cowdev->numblocks = inode->i_size >> MUSHIFT;
2053
2054 if (inode->i_size & MUMASK) {
2055 printk(KERN_WARNING
2056 "cowloop - rdofile %s truncated to multiple "
2057 "of %d bytes\n", rdof, MAPUNIT);
2058 }
2059
2060 DEBUGP(DCOW"cowloop - RO=regular: numblocks=%d, blocksz=%d\n",
2061 cowdev->numblocks, cowdev->blocksz);
2062 } else {
2063 /*
2064 ** read-only file is a block device
2065 */
2066 cowdev->belowdev = inode->i_bdev;
2067 cowdev->belowgd = cowdev->belowdev->bd_disk; /* gendisk */
2068
2069 if (cowdev->belowdev->bd_part) {
2070 cowdev->numblocks = cowdev->belowdev->bd_part->nr_sects
2071 / (MAPUNIT/512);
2072 }
2073
2074 if (cowdev->belowgd) {
2075 cowdev->belowq = cowdev->belowgd->queue;
2076
2077 if (cowdev->numblocks == 0) {
2078 cowdev->numblocks = get_capacity(cowdev->belowgd)
2079 / (MAPUNIT/512);
2080 }
2081 }
2082
2083
2084 if (cowdev->belowq)
2085 cowdev->blocksz = queue_logical_block_size(cowdev->belowq);
2086
2087 if (cowdev->blocksz == 0)
2088 cowdev->blocksz = BLOCK_SIZE; /* default 2^10 */
2089
2090 DEBUGP(DCOW"cowloop - numblocks=%d, "
2091 "blocksz=%d, belowgd=%p, belowq=%p\n",
2092 cowdev->numblocks, cowdev->blocksz,
2093 cowdev->belowgd, cowdev->belowq);
2094
2095 DEBUGP(DCOW"cowloop - belowdev.bd_block_size=%d\n",
2096 cowdev->belowdev->bd_block_size);
2097 }
2098
2099 if (cowdev->numblocks == 0) {
2100 printk(KERN_ERR "cowloop - %s has no contents\n", rdof);
2101 return -EINVAL;
2102 }
2103
2104 /*
2105 ** reserve space in memory as generic I/O buffer
2106 */
2107 cowdev->iobuf = kmalloc(MAPUNIT, GFP_KERNEL);
2108
2109 if (!cowdev->iobuf) {
2110 printk(KERN_ERR
2111 "cowloop - cannot get space for buffer %d\n", MAPUNIT);
2112 return -ENOMEM;
2113 }
2114
2115 DEBUGP(DCOW"cowloop - determine fingerprint rdo....\n");
2116
2117 /*
2118 ** determine fingerprint for read-only file
2119 ** calculate fingerprint from first four datablocks
2120 ** which do not contain binary zeroes
2121 */
2122 for (i=0, cowdev->fingerprint=0, nrval=0;
2123 (nrval < 4)&&(i < cowdev->numblocks); i++) {
2124 int j;
2125 unsigned char cs;
2126
2127 /*
2128 ** read next block
2129 */
2130 if (cowlo_readrdo(cowdev, cowdev->iobuf, MAPUNIT,
2131 (loff_t)i << MUSHIFT) < 1)
2132 break;
2133
2134 /*
2135 ** calculate fingerprint by adding all byte-values
2136 */
2137 for (j=0, cs=0; j < MAPUNIT; j++)
2138 cs += *(cowdev->iobuf+j);
2139
2140 if (cs == 0) /* block probably contained zeroes */
2141 continue;
2142
2143 /*
2144 ** shift byte-value to proper place in final fingerprint
2145 */
2146 cowdev->fingerprint |= cs << (nrval*8);
2147 nrval++;
2148 }
2149
2150 return 0;
2151}
2152
2153/*
2154** undo memory allocs and file opens issued so far
2155** related to the read-only file
2156*/
2157static void
2158cowlo_undo_openrdo(struct cowloop_device *cowdev)
2159{
2160 if(cowdev->iobuf);
2161 kfree(cowdev->iobuf);
2162
2163 if (cowdev->rdofp)
2164 filp_close(cowdev->rdofp, 0);
2165}
2166
2167/*
2168** open the cowfile
2169**
2170** returns:
2171** 0 - okay
2172** < 0 - error value
2173*/
2174static int
2175cowlo_opencow(struct cowloop_device *cowdev, char *cowf, int autorecover)
2176{
2177 long int i, rv;
2178 int minor;
2179 unsigned long nb;
2180 struct file *f;
2181 struct inode *inode;
2182 loff_t offset;
2183 struct cowloop_device *cowtmp;
2184
2185 DEBUGP(DCOW"cowloop - opencow called\n");
2186
2187 /*
2188 ** open copy-on-write file (read-write)
2189 */
2190 if (cowf[0] == '\0') {
2191 printk(KERN_ERR
2192 "cowloop - specify name of copy-on-write file\n\n");
2193 return -EINVAL;
2194 }
2195
2196 f = filp_open(cowf, O_RDWR|O_LARGEFILE, 0600);
2197
2198 if ( (f == NULL) || IS_ERR(f) ) {
2199 /*
2200 ** non-existing cowfile: try to create
2201 */
2202 f = filp_open(cowf, O_RDWR|O_CREAT|O_LARGEFILE, 0600);
2203
2204 if ( (f == NULL) || IS_ERR(f) ) {
2205 printk(KERN_ERR
2206 "cowloop - failed to open file %s for read-write\n\n",
2207 cowf);
2208 return -EINVAL;
2209 }
2210 }
2211
2212 cowdev->cowfp = f;
2213
2214 inode = f->f_dentry->d_inode;
2215
2216 if (!S_ISREG(inode->i_mode)) {
2217 printk(KERN_ERR "cowloop - %s is not regular file\n", cowf);
2218 return -EINVAL;
2219 }
2220
2221 /*
2222 ** check if this cowfile is already in use for another cowdevice
2223 */
2224 for (minor = 0; minor < maxcows; minor++) {
2225
2226 cowtmp = cowdevall[minor];
2227
2228 if ( !(cowtmp->state & COWDEVOPEN) )
2229 continue;
2230
2231 if (cowtmp == cowdev)
2232 continue;
2233
2234 if (cowtmp->cowfp->f_dentry->d_inode == f->f_dentry->d_inode) {
2235 printk(KERN_ERR
2236 "cowloop - %s: already in use as cow\n", cowf);
2237 return -EBUSY;
2238 }
2239 }
2240
2241 /*
2242 ** mark cowfile open for read-write
2243 */
2244 cowdev->state |= COWRWCOWOPEN;
2245
2246 /*
2247 ** calculate size (in bytes) for total bitmap in cowfile;
2248 ** when the size of the cowhead block is added, the start-offset
2249 ** for the modified data blocks can be found
2250 */
2251 nb = cowdev->numblocks;
2252
2253 if (nb%8) /* transform #bits to #bytes */
2254 nb+=8; /* rounded if necessary */
2255 nb /= 8;
2256
2257 if (nb & MUMASK) /* round up #bytes to MAPUNIT chunks */
2258 cowdev->mapsize = ( (nb>>MUSHIFT) +1) << MUSHIFT;
2259 else
2260 cowdev->mapsize = nb;
2261
2262 /*
2263 ** reserve space in memory for the cowhead
2264 */
2265 cowdev->cowhead = kmalloc(MAPUNIT, GFP_KERNEL);
2266
2267 if (!cowdev->cowhead) {
2268 printk(KERN_ERR "cowloop - cannot get space for cowhead %d\n",
2269 MAPUNIT);
2270 return -ENOMEM;
2271 }
2272
2273 memset(cowdev->cowhead, 0, MAPUNIT);
2274
2275 DEBUGP(DCOW"cowloop - prepare cowhead....\n");
2276
2277 /*
2278 ** check if the cowfile exists or should be created
2279 */
2280 if (inode->i_size != 0) {
2281 /*
2282 ** existing cowfile: read the cow head
2283 */
2284 if (inode->i_size < MAPUNIT) {
2285 printk(KERN_ERR
2286 "cowloop - existing cowfile %s too small\n",
2287 cowf);
2288 return -EINVAL;
2289 }
2290
2291 cowlo_readcowraw(cowdev, cowdev->cowhead, MAPUNIT, (loff_t) 0);
2292
2293 /*
2294 ** verify if the existing file is really a cowfile
2295 */
2296 if (cowdev->cowhead->magic != COWMAGIC) {
2297 printk(KERN_ERR
2298 "cowloop - cowfile %s has incorrect format\n",
2299 cowf);
2300 return -EINVAL;
2301 }
2302
2303 /*
2304 ** verify the cowhead version of the cowfile
2305 */
2306 if (cowdev->cowhead->version > COWVERSION) {
2307 printk(KERN_ERR
2308 "cowloop - cowfile %s newer than this driver\n",
2309 cowf);
2310 return -EINVAL;
2311 }
2312
2313 /*
2314 ** make sure that this is not a packed cowfile
2315 */
2316 if (cowdev->cowhead->flags & COWPACKED) {
2317 printk(KERN_ERR
2318 "cowloop - packed cowfile %s not accepted\n", cowf);
2319 return -EINVAL;
2320 }
2321
2322 /*
2323 ** verify if the cowfile has been properly closed
2324 */
2325 if (cowdev->cowhead->flags & COWDIRTY) {
2326 /*
2327 ** cowfile was not properly closed;
2328 ** check if automatic recovery is required
2329 ** (actual recovery will be done later on)
2330 */
2331 if (!autorecover) {
2332 printk(KERN_ERR
2333 "cowloop - cowfile %s is dirty "
2334 "(not properly closed by rmmod?)\n",
2335 cowf);
2336 printk(KERN_ERR
2337 "cowloop - run cowrepair or specify "
2338 "'option=r' to recover\n");
2339 return -EINVAL;
2340 }
2341 }
2342
2343 /*
2344 ** verify if the cowfile is really related to this rdofile
2345 */
2346 if (cowdev->cowhead->rdoblocks != cowdev->numblocks) {
2347 printk(KERN_ERR
2348 "cowloop - cowfile %s (size %lld) not related "
2349 "to rdofile (size %lld)\n",
2350 cowf,
2351 (long long)cowdev->cowhead->rdoblocks <<MUSHIFT,
2352 (long long)cowdev->numblocks <<MUSHIFT);
2353 return -EINVAL;
2354 }
2355
2356 if (cowdev->cowhead->rdofingerprint != cowdev->fingerprint) {
2357 printk(KERN_ERR
2358 "cowloop - cowfile %s not related to rdofile "
2359 " (fingerprint err - rdofile modified?)\n", cowf);
2360 return -EINVAL;
2361 }
2362 } else {
2363 /*
2364 ** new cowfile: determine the minimal size (cowhead+bitmap)
2365 */
2366 offset = (loff_t) MAPUNIT + cowdev->mapsize - 1;
2367
2368 if ( cowlo_writecowraw(cowdev, "", 1, offset) < 1) {
2369 printk(KERN_ERR
2370 "cowloop - cannot set cowfile to size %lld\n",
2371 offset+1);
2372 return -EINVAL;
2373 }
2374
2375 /*
2376 ** prepare new cowhead
2377 */
2378 cowdev->cowhead->magic = COWMAGIC;
2379 cowdev->cowhead->version = COWVERSION;
2380 cowdev->cowhead->mapunit = MAPUNIT;
2381 cowdev->cowhead->mapsize = cowdev->mapsize;
2382 cowdev->cowhead->rdoblocks = cowdev->numblocks;
2383 cowdev->cowhead->rdofingerprint = cowdev->fingerprint;
2384 cowdev->cowhead->cowused = 0;
2385
2386 /*
2387 ** calculate start offset of data in cowfile,
2388 ** rounded up to multiple of 4K to avoid
2389 ** unnecessary disk-usage for written datablocks in
2390 ** the sparsed cowfile on e.g. 4K filesystems
2391 */
2392 cowdev->cowhead->doffset =
2393 ((MAPUNIT+cowdev->mapsize+4095)>>12)<<12;
2394 }
2395
2396 cowdev->cowhead->flags = 0;
2397
2398 DEBUGP(DCOW"cowloop - reserve space bitmap....\n");
2399
2400 /*
2401 ** reserve space in memory for the entire bitmap and
2402 ** fill it with the bitmap-data from disk; the entire
2403 ** bitmap is allocated in several chunks because kmalloc
2404 ** has restrictions regarding the allowed size per kmalloc
2405 */
2406 cowdev->mapcount = (cowdev->mapsize+MAPCHUNKSZ-1)/MAPCHUNKSZ;
2407
2408 /*
2409 ** the size of every bitmap chunk will be MAPCHUNKSZ bytes, except for
2410 ** the last bitmap chunk: calculate remaining size for this chunk
2411 */
2412 if (cowdev->mapsize % MAPCHUNKSZ == 0)
2413 cowdev->mapremain = MAPCHUNKSZ;
2414 else
2415 cowdev->mapremain = cowdev->mapsize % MAPCHUNKSZ;
2416
2417 /*
2418 ** allocate space to store all pointers for the bitmap-chunks
2419 ** (initialize area with zeroes to allow proper undo)
2420 */
2421 cowdev->mapcache = kmalloc(cowdev->mapcount * sizeof(char *),
2422 GFP_KERNEL);
2423 if (!cowdev->mapcache) {
2424 printk(KERN_ERR
2425 "cowloop - can not allocate space for bitmap ptrs\n");
2426 return -ENOMEM;
2427 }
2428
2429 memset(cowdev->mapcache, 0, cowdev->mapcount * sizeof(char *));
2430
2431 /*
2432 ** allocate space to store the bitmap-chunks themselves
2433 */
2434 for (i=0; i < cowdev->mapcount; i++) {
2435 if (i < (cowdev->mapcount-1))
2436 *(cowdev->mapcache+i) = kmalloc(MAPCHUNKSZ, GFP_KERNEL);
2437 else
2438 *(cowdev->mapcache+i) = kmalloc(cowdev->mapremain,
2439 GFP_KERNEL);
2440
2441 if (*(cowdev->mapcache+i) == NULL) {
2442 printk(KERN_ERR "cowloop - no space for bitmapchunk %ld"
2443 " totmapsz=%ld, mapcnt=%d mapunit=%d\n",
2444 i, cowdev->mapsize, cowdev->mapcount,
2445 MAPUNIT);
2446 return -ENOMEM;
2447 }
2448 }
2449
2450 DEBUGP(DCOW"cowloop - read bitmap from cow....\n");
2451
2452 /*
2453 ** read the entire bitmap from the cowfile into the in-memory cache;
2454 ** count the number of blocks that are in use already
2455 ** (statistical purposes)
2456 */
2457 for (i=0, offset=MAPUNIT; i < cowdev->mapcount;
2458 i++, offset+=MAPCHUNKSZ) {
2459 unsigned long numbytes;
2460
2461 if (i < (cowdev->mapcount-1))
2462 /*
2463 ** full bitmap chunk
2464 */
2465 numbytes = MAPCHUNKSZ;
2466 else
2467 /*
2468 ** last bitmap chunk: might be partly filled
2469 */
2470 numbytes = cowdev->mapremain;
2471
2472 cowlo_readcowraw(cowdev, *(cowdev->mapcache+i),
2473 numbytes, offset);
2474 }
2475
2476 /*
2477 ** if the cowfile was dirty and automatic recovery is required,
2478 ** reconstruct a proper bitmap in memory now
2479 */
2480 if (cowdev->cowhead->flags & COWDIRTY) {
2481 unsigned long long blocknum;
2482 char databuf[MAPUNIT];
2483 unsigned long mapnum, mapbyte, mapbit;
2484
2485 printk(KERN_NOTICE "cowloop - recover dirty cowfile %s....\n",
2486 cowf);
2487
2488 /*
2489 ** read all data blocks
2490 */
2491 for (blocknum=0, rv=1, offset=0;
2492 cowlo_readcow(cowdev, databuf, MAPUNIT, offset) > 0;
2493 blocknum++, offset += MAPUNIT) {
2494
2495 /*
2496 ** if this datablock contains real data (not binary
2497 ** zeroes), set the corresponding bit in the bitmap
2498 */
2499 if ( memcmp(databuf, allzeroes, MAPUNIT) == 0)
2500 continue;
2501
2502 mapnum = CALCMAP (blocknum);
2503 mapbyte = CALCBYTE(blocknum);
2504 mapbit = CALCBIT (blocknum);
2505
2506 *(*(cowdev->mapcache+mapnum)+mapbyte) |= (1<<mapbit);
2507 }
2508
2509 printk(KERN_NOTICE "cowloop - cowfile recovery completed\n");
2510 }
2511
2512 /*
2513 ** count all bits set in the bitmaps for statistical purposes
2514 */
2515 for (i=0, cowdev->nrcowblocks = 0; i < cowdev->mapcount; i++) {
2516 long numbytes;
2517 char *p;
2518
2519 if (i < (cowdev->mapcount-1))
2520 numbytes = MAPCHUNKSZ;
2521 else
2522 numbytes = cowdev->mapremain;
2523
2524 p = *(cowdev->mapcache+i);
2525
2526 for (numbytes--; numbytes >= 0; numbytes--, p++) {
2527 /*
2528 ** for only eight checks the following construction
2529 ** is faster than a loop-construction
2530 */
2531 if ((*p) & 0x01) cowdev->nrcowblocks++;
2532 if ((*p) & 0x02) cowdev->nrcowblocks++;
2533 if ((*p) & 0x04) cowdev->nrcowblocks++;
2534 if ((*p) & 0x08) cowdev->nrcowblocks++;
2535 if ((*p) & 0x10) cowdev->nrcowblocks++;
2536 if ((*p) & 0x20) cowdev->nrcowblocks++;
2537 if ((*p) & 0x40) cowdev->nrcowblocks++;
2538 if ((*p) & 0x80) cowdev->nrcowblocks++;
2539 }
2540 }
2541
2542 /*
2543 ** consistency-check for number of bits set in bitmap
2544 */
2545 if ( !(cowdev->cowhead->flags & COWDIRTY) &&
2546 (cowdev->cowhead->cowused != cowdev->nrcowblocks) ) {
2547 printk(KERN_ERR "cowloop - inconsistent cowfile admi\n");
2548 return -EINVAL;
2549 }
2550
2551 return 0;
2552}
2553
2554/*
2555** undo memory allocs and file opens issued so far
2556** related to the cowfile
2557*/
2558static void
2559cowlo_undo_opencow(struct cowloop_device *cowdev)
2560{
2561 int i;
2562
2563 if (cowdev->mapcache) {
2564 for (i=0; i < cowdev->mapcount; i++) {
2565 if (*(cowdev->mapcache+i) != NULL)
2566 kfree( *(cowdev->mapcache+i) );
2567 }
2568
2569 kfree(cowdev->mapcache);
2570 }
2571
2572 if (cowdev->cowhead)
2573 kfree(cowdev->cowhead);
2574
2575 if ( (cowdev->state & COWCOWOPEN) && (cowdev->cowfp) )
2576 filp_close(cowdev->cowfp, 0);
2577
2578 /*
2579 ** mark cowfile closed
2580 */
2581 cowdev->state &= ~COWCOWOPEN;
2582}
2583
2584/*
2585** flush the entire bitmap and the cowhead (clean) to the cowfile
2586**
2587** must be called with the cowdevices-lock set
2588*/
2589static void
2590cowlo_sync(void)
2591{
2592 int i, minor;
2593 loff_t offset;
2594 struct cowloop_device *cowdev;
2595
2596 for (minor=0; minor < maxcows; minor++) {
2597 cowdev = cowdevall[minor];
2598 if ( ! (cowdev->state & COWRWCOWOPEN) )
2599 continue;
2600
2601 for (i=0, offset=MAPUNIT; i < cowdev->mapcount;
2602 i++, offset += MAPCHUNKSZ) {
2603 unsigned long numbytes;
2604
2605 if (i < (cowdev->mapcount-1))
2606 /*
2607 ** full bitmap chunk
2608 */
2609 numbytes = MAPCHUNKSZ;
2610 else
2611 /*
2612 ** last bitmap chunk: might be partly filled
2613 */
2614 numbytes = cowdev->mapremain;
2615
2616 DEBUGP(DCOW
2617 "cowloop - flushing bitmap %2d (%3ld Kb)\n",
2618 i, numbytes/1024);
2619
2620 if (cowlo_writecowraw(cowdev, *(cowdev->mapcache+i),
2621 numbytes, offset) < numbytes) {
2622 break;
2623 }
2624 }
2625
2626 /*
2627 ** flush clean up-to-date cowhead to cowfile
2628 */
2629 cowdev->cowhead->cowused = cowdev->nrcowblocks;
2630 cowdev->cowhead->flags &= ~COWDIRTY;
2631
2632 DEBUGP(DCOW "cowloop - flushing cowhead (%3d Kb)\n",
2633 MAPUNIT/1024);
2634
2635 cowlo_writecowraw(cowdev, cowdev->cowhead, MAPUNIT, (loff_t) 0);
2636 }
2637}
2638
2639/*****************************************************************************/
2640/* Module loading/unloading */
2641/*****************************************************************************/
2642
2643/*
2644** called during insmod/modprobe
2645*/
2646static int __init
2647cowlo_init_module(void)
2648{
2649 int rv;
2650 int minor, uptocows;
2651
2652 revision[sizeof revision - 3] = '\0';
2653
2654 printk(KERN_NOTICE "cowloop - (C) 2009 ATComputing.nl - version: %s\n", &revision[11]);
2655 printk(KERN_NOTICE "cowloop - info: www.ATComputing.nl/cowloop\n");
2656
2657 memset(allzeroes, 0, MAPUNIT);
2658
2659 /*
2660 ** Setup administration for all possible cowdevices.
2661 ** Note that their minor numbers go from 0 to MAXCOWS-1 inclusive
2662 ** and minor == MAXCOWS-1 is reserved for the control device.
2663 */
2664 if ((maxcows < 1) || (maxcows > MAXCOWS)) {
2665 printk(KERN_WARNING
2666 "cowloop - maxcows exceeds maximum of %d\n", MAXCOWS);
2667
2668 maxcows = DFLCOWS;
2669 }
2670
2671 /* allocate room for a table with a pointer to each cowloop_device: */
2672 if ( (cowdevall = kmalloc(maxcows * sizeof(struct cowloop_device *),
2673 GFP_KERNEL)) == NULL) {
2674 printk(KERN_WARNING
2675 "cowloop - can not alloc table for %d devs\n", maxcows);
2676 uptocows = 0;
2677 rv = -ENOMEM;
2678 goto error_out;
2679 }
2680 memset(cowdevall, 0, maxcows * sizeof(struct cowloop_device *));
2681 /* then hook an actual cowloop_device struct to each pointer: */
2682 for (minor=0; minor < maxcows; minor++) {
2683 if ((cowdevall[minor] = kmalloc(sizeof(struct cowloop_device),
2684 GFP_KERNEL)) == NULL) {
2685 printk(KERN_WARNING
2686 "cowloop - can not alloc admin-struct for dev no %d\n", minor);
2687
2688 uptocows = minor; /* this is how far we got.... */
2689 rv = -ENOMEM;
2690 goto error_out;
2691 }
2692 memset(cowdevall[minor], 0, sizeof(struct cowloop_device));
2693 }
2694 uptocows = maxcows; /* we got all devices */
2695
2696 sema_init(&cowdevlock, 1);
2697
2698 /*
2699 ** register cowloop module
2700 */
2701 if ( register_blkdev(COWMAJOR, DEVICE_NAME) < 0) {
2702 printk(KERN_WARNING
2703 "cowloop - unable to get major %d for cowloop\n", COWMAJOR);
2704 rv = -EIO;
2705 goto error_out;
2706 }
2707
2708 /*
2709 ** create a directory below /proc to allocate a file
2710 ** for each cowdevice that is allocated later on
2711 */
2712 cowlo_procdir = proc_mkdir("cow", NULL);
2713
2714 /*
2715 ** check if a cowdevice has to be opened during insmod/modprobe;
2716 ** two parameters should be specified then: rdofile= and cowfile=
2717 */
2718 if( (rdofile[0] != '\0') && (cowfile[0] != '\0') ) {
2719 char *po = option;
2720 int wantrecover = 0;
2721
2722 /*
2723 ** check if automatic recovery is wanted
2724 */
2725 while (*po) {
2726 if (*po == 'r') {
2727 wantrecover = 1;
2728 break;
2729 }
2730 po++;
2731 }
2732
2733 /*
2734 ** open new cowdevice with minor number 0
2735 */
2736 if ( (rv = cowlo_openpair(rdofile, cowfile, wantrecover, 0))) {
2737 remove_proc_entry("cow", NULL);
2738 unregister_blkdev(COWMAJOR, DEVICE_NAME);
2739 goto error_out;
2740 }
2741 } else {
2742 /*
2743 ** check if only one parameter has been specified
2744 */
2745 if( (rdofile[0] != '\0') || (cowfile[0] != '\0') ) {
2746 printk(KERN_ERR
2747 "cowloop - only one filename specified\n");
2748 remove_proc_entry("cow", NULL);
2749 unregister_blkdev(COWMAJOR, DEVICE_NAME);
2750 rv = -EINVAL;
2751 goto error_out;
2752 }
2753 }
2754
2755 /*
2756 ** allocate fake disk as control channel to handle the requests
2757 ** to activate and deactivate cowdevices dynamically
2758 */
2759 if (!(cowctlgd = alloc_disk(1))) {
2760 printk(KERN_WARNING
2761 "cowloop - unable to alloc_disk for cowctl\n");
2762
2763 remove_proc_entry("cow", NULL);
2764 (void) cowlo_closepair(cowdevall[0]);
2765 unregister_blkdev(COWMAJOR, DEVICE_NAME);
2766 rv = -ENOMEM;
2767 goto error_out;
2768 }
2769
2770 spin_lock_init(&cowctlrqlock);
2771 cowctlgd->major = COWMAJOR;
2772 cowctlgd->first_minor = COWCTL;
2773 cowctlgd->minors = 1;
2774 cowctlgd->fops = &cowlo_fops;
2775 cowctlgd->private_data = NULL;
2776 /* the device has capacity 0, so there will be no q-requests */
2777 cowctlgd->queue = blk_init_queue(NULL, &cowctlrqlock);
2778 sprintf(cowctlgd->disk_name, "cowctl");
2779 set_capacity(cowctlgd, 0);
2780
2781 add_disk(cowctlgd);
2782
2783 printk(KERN_NOTICE "cowloop - number of configured cowdevices: %d\n",
2784 maxcows);
2785 if (rdofile[0] != '\0') {
2786 printk(KERN_NOTICE "cowloop - initialized on rdofile=%s\n",
2787 rdofile);
2788 } else {
2789 printk(KERN_NOTICE "cowloop - initialized without rdofile yet\n");
2790 }
2791 return 0;
2792
2793error_out:
2794 for (minor=0; minor < uptocows ; minor++) {
2795 kfree(cowdevall[minor]);
2796 }
2797 kfree(cowdevall);
2798 return rv;
2799}
2800
2801/*
2802** called during rmmod
2803*/
2804static void __exit
2805cowlo_cleanup_module(void)
2806{
2807 int minor;
2808
2809 /*
2810 ** flush bitmaps and cowheads to the cowfiles
2811 */
2812 down(&cowdevlock);
2813 cowlo_sync();
2814 up(&cowdevlock);
2815
2816 /*
2817 ** close all cowdevices
2818 */
2819 for (minor=0; minor < maxcows; minor++)
2820 (void) cowlo_closepair(cowdevall[minor]);
2821
2822 unregister_blkdev(COWMAJOR, DEVICE_NAME);
2823
2824 /*
2825 ** get rid of /proc/cow and unregister the driver
2826 */
2827 remove_proc_entry("cow", NULL);
2828
2829 for (minor = 0; minor < maxcows; minor++) {
2830 kfree(cowdevall[minor]);
2831 }
2832 kfree(cowdevall);
2833
2834 del_gendisk(cowctlgd); /* revert the alloc_disk() */
2835 put_disk (cowctlgd); /* revert the add_disk() */
2836 blk_cleanup_queue(cowctlgd->queue); /* cleanup the empty queue */
2837
2838 printk(KERN_NOTICE "cowloop - unloaded\n");
2839}
2840
2841module_init(cowlo_init_module);
2842module_exit(cowlo_cleanup_module);
diff --git a/drivers/staging/cowloop/cowloop.h b/drivers/staging/cowloop/cowloop.h
deleted file mode 100644
index bbd4a35ac667..000000000000
--- a/drivers/staging/cowloop/cowloop.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2** DO NOT MODIFY THESE VALUES (would make old cowfiles unusable)
3*/
4#define MAPUNIT 1024 /* blocksize for bit in bitmap */
5#define MUSHIFT 10 /* bitshift for bit in bitmap */
6#define MUMASK 0x3ff /* bitmask for bit in bitmap */
7
8#define COWMAGIC 0x574f437f /* byte-swapped '7f C O W' */
9#define COWDIRTY 0x01
10#define COWPACKED 0x02
11#define COWVERSION 1
12
13struct cowhead
14{
15 int magic; /* identifies a cowfile */
16 short version; /* version of cowhead */
17 short flags; /* flags indicating status */
18 unsigned long mapunit; /* blocksize per bit in bitmap */
19 unsigned long mapsize; /* total size of bitmap (bytes) */
20 unsigned long doffset; /* start-offset datablocks in cow */
21 unsigned long rdoblocks; /* size of related read-only file */
22 unsigned long rdofingerprint; /* fingerprint of read-only file */
23 unsigned long cowused; /* number of datablocks used in cow */
24};
25
26#define COWDEVDIR "/dev/cow/"
27#define COWDEVICE COWDEVDIR "%ld"
28#define COWCONTROL COWDEVDIR "ctl"
29
30#define MAXCOWS 1024
31#define COWCTL (MAXCOWS-1) /* minor number of /dev/cow/ctl */
32
33#define COWPROCDIR "/proc/cow/"
34#define COWPROCFILE COWPROCDIR "%d"
35
36/*
37** ioctl related stuff
38*/
39#define ANYDEV ((unsigned long)-1)
40
41struct cowpair
42{
43 unsigned char *rdofile; /* pathname of the rdofile */
44 unsigned char *cowfile; /* pathname of the cowfile */
45 unsigned short rdoflen; /* length of rdofile pathname */
46 unsigned short cowflen; /* length of cowfile pathname */
47 unsigned long device; /* requested/returned device number */
48};
49
50struct cowwatch
51{
52 int flags; /* request flags */
53 unsigned long device; /* requested device number */
54 unsigned long threshold; /* continue if free Kb < threshold */
55 unsigned long totalkb; /* ret: total filesystem size (Kb) */
56 unsigned long availkb; /* ret: free filesystem size (Kb) */
57};
58
59#define WATCHWAIT 0x01 /* block until threshold reached */
60
61#define COWSYNC _IO ('C', 1)
62#define COWMKPAIR _IOW ('C', 2, struct cowpair)
63#define COWRMPAIR _IOW ('C', 3, unsigned long)
64#define COWWATCH _IOW ('C', 4, struct cowwatch)
65#define COWCLOSE _IOW ('C', 5, unsigned long)
66#define COWRDOPEN _IOW ('C', 6, unsigned long)
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index ee1601026fb0..c24e4e0367a2 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio)
102 struct dst_node *n = q->queuedata; 102 struct dst_node *n = q->queuedata;
103 int err = -EIO; 103 int err = -EIO;
104 104
105 if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { 105 if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
106 /* 106 /*
107 * This is a dirty^Wnice hack, but if we complete this 107 * This is a dirty^Wnice hack, but if we complete this
108 * operation with -EOPNOTSUPP like intended, XFS 108 * operation with -EOPNOTSUPP like intended, XFS
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
index 6294d3814e72..2c3d65a622a7 100644
--- a/drivers/staging/et131x/et1310_address_map.h
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -223,7 +223,7 @@ typedef union _TXDMA_PR_NUM_DES_t {
223 223
224extern inline void add_10bit(u32 *v, int n) 224extern inline void add_10bit(u32 *v, int n)
225{ 225{
226 *v = INDEX10(*v + n); 226 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
227} 227}
228 228
229/* 229/*
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
index 8f2e91fa0a86..10e21db57ac3 100644
--- a/drivers/staging/et131x/et1310_rx.c
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -1177,12 +1177,20 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1177 1177
1178static inline u32 bump_fbr(u32 *fbr, u32 limit) 1178static inline u32 bump_fbr(u32 *fbr, u32 limit)
1179{ 1179{
1180 u32 v = *fbr; 1180 u32 v = *fbr;
1181 add_10bit(&v, 1); 1181 v++;
1182 if (v > limit) 1182 /* This works for all cases where limit < 1024. The 1023 case
1183 v = (*fbr & ~ET_DMA10_MASK) ^ ET_DMA10_WRAP; 1183 works because 1023++ is 1024 which means the if condition is not
1184 *fbr = v; 1184 taken but the carry of the bit into the wrap bit toggles the wrap
1185 return v; 1185 value correctly */
1186 if ((v & ET_DMA10_MASK) > limit) {
1187 v &= ~ET_DMA10_MASK;
1188 v ^= ET_DMA10_WRAP;
1189 }
1190 /* For the 1023 case */
1191 v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1192 *fbr = v;
1193 return v;
1186} 1194}
1187 1195
1188/** 1196/**
diff --git a/drivers/staging/hv/ChannelMgmt.h b/drivers/staging/hv/ChannelMgmt.h
index a839d8fe6cec..fa973d86b624 100644
--- a/drivers/staging/hv/ChannelMgmt.h
+++ b/drivers/staging/hv/ChannelMgmt.h
@@ -26,6 +26,7 @@
26#define _CHANNEL_MGMT_H_ 26#define _CHANNEL_MGMT_H_
27 27
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/timer.h>
29#include "RingBuffer.h" 30#include "RingBuffer.h"
30#include "VmbusChannelInterface.h" 31#include "VmbusChannelInterface.h"
31#include "VmbusPacketFormat.h" 32#include "VmbusPacketFormat.h"
@@ -54,7 +55,7 @@ enum vmbus_channel_message_type {
54 ChannelMessageViewRangeRemove = 18, 55 ChannelMessageViewRangeRemove = 18,
55#endif 56#endif
56 ChannelMessageCount 57 ChannelMessageCount
57} __attribute__((packed)); 58};
58 59
59struct vmbus_channel_message_header { 60struct vmbus_channel_message_header {
60 enum vmbus_channel_message_type MessageType; 61 enum vmbus_channel_message_type MessageType;
diff --git a/drivers/staging/hv/NetVsc.c b/drivers/staging/hv/NetVsc.c
index 1610b845198f..d384c0ddf069 100644
--- a/drivers/staging/hv/NetVsc.c
+++ b/drivers/staging/hv/NetVsc.c
@@ -1052,7 +1052,7 @@ static void NetVscOnReceive(struct hv_device *Device,
1052 */ 1052 */
1053 spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags); 1053 spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
1054 while (!list_empty(&netDevice->ReceivePacketList)) { 1054 while (!list_empty(&netDevice->ReceivePacketList)) {
1055 list_move_tail(&netDevice->ReceivePacketList, &listHead); 1055 list_move_tail(netDevice->ReceivePacketList.next, &listHead);
1056 if (++count == vmxferpagePacket->RangeCount + 1) 1056 if (++count == vmxferpagePacket->RangeCount + 1)
1057 break; 1057 break;
1058 } 1058 }
@@ -1071,7 +1071,7 @@ static void NetVscOnReceive(struct hv_device *Device,
1071 /* Return it to the freelist */ 1071 /* Return it to the freelist */
1072 spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags); 1072 spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
1073 for (i = count; i != 0; i--) { 1073 for (i = count; i != 0; i--) {
1074 list_move_tail(&listHead, 1074 list_move_tail(listHead.next,
1075 &netDevice->ReceivePacketList); 1075 &netDevice->ReceivePacketList);
1076 } 1076 }
1077 spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, 1077 spin_unlock_irqrestore(&netDevice->receive_packet_list_lock,
@@ -1085,8 +1085,7 @@ static void NetVscOnReceive(struct hv_device *Device,
1085 } 1085 }
1086 1086
1087 /* Remove the 1st packet to represent the xfer page packet itself */ 1087 /* Remove the 1st packet to represent the xfer page packet itself */
1088 xferpagePacket = list_entry(&listHead, struct xferpage_packet, 1088 xferpagePacket = (struct xferpage_packet*)listHead.next;
1089 ListEntry);
1090 list_del(&xferpagePacket->ListEntry); 1089 list_del(&xferpagePacket->ListEntry);
1091 1090
1092 /* This is how much we can satisfy */ 1091 /* This is how much we can satisfy */
@@ -1102,8 +1101,7 @@ static void NetVscOnReceive(struct hv_device *Device,
1102 1101
1103 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 1102 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1104 for (i = 0; i < (count - 1); i++) { 1103 for (i = 0; i < (count - 1); i++) {
1105 netvscPacket = list_entry(&listHead, struct hv_netvsc_packet, 1104 netvscPacket = (struct hv_netvsc_packet*)listHead.next;
1106 ListEntry);
1107 list_del(&netvscPacket->ListEntry); 1105 list_del(&netvscPacket->ListEntry);
1108 1106
1109 /* Initialize the netvsc packet */ 1107 /* Initialize the netvsc packet */
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index 4d390b237742..dbfbde937a66 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,11 +1,17 @@
1TODO: 1TODO:
2 - fix remaining checkpatch warnings and errors 2 - fix remaining checkpatch warnings and errors
3 - use of /** when it is not a kerneldoc header
3 - remove RingBuffer.c to us in-kernel ringbuffer functions instead. 4 - remove RingBuffer.c to us in-kernel ringbuffer functions instead.
4 - audit the vmbus to verify it is working properly with the 5 - audit the vmbus to verify it is working properly with the
5 driver model 6 driver model
7 - convert vmbus driver interface function pointer tables
8 to constant, a.k.a vmbus_ops
6 - see if the vmbus can be merged with the other virtual busses 9 - see if the vmbus can be merged with the other virtual busses
7 in the kernel 10 in the kernel
8 - audit the network driver 11 - audit the network driver
12 - use existing net_device_stats struct in network device
13 - checking for carrier inside open is wrong, network device API
14 confusion??
9 - audit the block driver 15 - audit the block driver
10 - audit the scsi driver 16 - audit the scsi driver
11 17
diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c
index 8fe543bd9910..3a4793a0fd05 100644
--- a/drivers/staging/hv/osd.c
+++ b/drivers/staging/hv/osd.c
@@ -30,6 +30,7 @@
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/sched.h>
33#include <linux/wait.h> 34#include <linux/wait.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/workqueue.h> 36#include <linux/workqueue.h>
diff --git a/drivers/staging/hv/osd.h b/drivers/staging/hv/osd.h
index 9504604c72bd..ce064e8ea644 100644
--- a/drivers/staging/hv/osd.h
+++ b/drivers/staging/hv/osd.h
@@ -25,6 +25,7 @@
25#ifndef _OSD_H_ 25#ifndef _OSD_H_
26#define _OSD_H_ 26#define _OSD_H_
27 27
28#include <linux/workqueue.h>
28 29
29/* Defines */ 30/* Defines */
30#define ALIGN_UP(value, align) (((value) & (align-1)) ? \ 31#define ALIGN_UP(value, align) (((value) & (align-1)) ? \
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index 582318f10222..894eecfc63ca 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -507,12 +507,12 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
507 507
508 child_device_obj = &child_device_ctx->device_obj; 508 child_device_obj = &child_device_ctx->device_obj;
509 child_device_obj->context = context; 509 child_device_obj->context = context;
510 memcpy(&child_device_obj->deviceType, &type, sizeof(struct hv_guid)); 510 memcpy(&child_device_obj->deviceType, type, sizeof(struct hv_guid));
511 memcpy(&child_device_obj->deviceInstance, &instance, 511 memcpy(&child_device_obj->deviceInstance, instance,
512 sizeof(struct hv_guid)); 512 sizeof(struct hv_guid));
513 513
514 memcpy(&child_device_ctx->class_id, &type, sizeof(struct hv_guid)); 514 memcpy(&child_device_ctx->class_id, type, sizeof(struct hv_guid));
515 memcpy(&child_device_ctx->device_id, &instance, sizeof(struct hv_guid)); 515 memcpy(&child_device_ctx->device_id, instance, sizeof(struct hv_guid));
516 516
517 DPRINT_EXIT(VMBUS_DRV); 517 DPRINT_EXIT(VMBUS_DRV);
518 518
@@ -537,18 +537,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
537 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering", 537 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
538 child_device_ctx); 538 child_device_ctx);
539 539
540 /* Make sure we are not registered already */ 540 /* Set the device name. Otherwise, device_register() will fail. */
541 if (strlen(dev_name(&child_device_ctx->device)) != 0) {
542 DPRINT_ERR(VMBUS_DRV,
543 "child device (%p) already registered - busid %s",
544 child_device_ctx,
545 dev_name(&child_device_ctx->device));
546
547 ret = -1;
548 goto Cleanup;
549 }
550
551 /* Set the device bus id. Otherwise, device_register()will fail. */
552 dev_set_name(&child_device_ctx->device, "vmbus_0_%d", 541 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
553 atomic_inc_return(&device_num)); 542 atomic_inc_return(&device_num));
554 543
@@ -573,7 +562,6 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
573 DPRINT_INFO(VMBUS_DRV, "child device (%p) registered", 562 DPRINT_INFO(VMBUS_DRV, "child device (%p) registered",
574 &child_device_ctx->device); 563 &child_device_ctx->device);
575 564
576Cleanup:
577 DPRINT_EXIT(VMBUS_DRV); 565 DPRINT_EXIT(VMBUS_DRV);
578 566
579 return ret; 567 return ret;
@@ -623,8 +611,6 @@ static void vmbus_child_device_destroy(struct hv_device *device_obj)
623static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) 611static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
624{ 612{
625 struct device_context *device_ctx = device_to_device_context(device); 613 struct device_context *device_ctx = device_to_device_context(device);
626 int i = 0;
627 int len = 0;
628 int ret; 614 int ret;
629 615
630 DPRINT_ENTER(VMBUS_DRV); 616 DPRINT_ENTER(VMBUS_DRV);
@@ -644,8 +630,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
644 device_ctx->class_id.data[14], 630 device_ctx->class_id.data[14],
645 device_ctx->class_id.data[15]); 631 device_ctx->class_id.data[15]);
646 632
647 env->envp_idx = i;
648 env->buflen = len;
649 ret = add_uevent_var(env, "VMBUS_DEVICE_CLASS_GUID={" 633 ret = add_uevent_var(env, "VMBUS_DEVICE_CLASS_GUID={"
650 "%02x%02x%02x%02x-%02x%02x-%02x%02x-" 634 "%02x%02x%02x%02x-%02x%02x-%02x%02x-"
651 "%02x%02x%02x%02x%02x%02x%02x%02x}", 635 "%02x%02x%02x%02x%02x%02x%02x%02x}",
@@ -691,8 +675,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
691 if (ret) 675 if (ret)
692 return ret; 676 return ret;
693 677
694 env->envp[env->envp_idx] = NULL;
695
696 DPRINT_EXIT(VMBUS_DRV); 678 DPRINT_EXIT(VMBUS_DRV);
697 679
698 return 0; 680 return 0;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index beb99a547f09..4586650d65c3 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig IIO 5menuconfig IIO
6 tristate "Industrial I/O support" 6 tristate "Industrial I/O support"
7 depends on !S390
7 ---help--- 8 ---help---
8 The industrial I/O subsystem provides a unified framework for 9 The industrial I/O subsystem provides a unified framework for
9 drivers for many different types of embedded sensors using a 10 drivers for many different types of embedded sensors using a
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 1fa18f255814..768f44894d08 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -18,6 +18,8 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/sched.h>
22#include <linux/wait.h>
21#include <linux/cdev.h> 23#include <linux/cdev.h>
22#include "iio.h" 24#include "iio.h"
23#include "trigger_consumer.h" 25#include "trigger_consumer.h"
diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c
index ea8a5efc19bc..fc2107f4c049 100644
--- a/drivers/staging/iio/light/tsl2561.c
+++ b/drivers/staging/iio/light/tsl2561.c
@@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client)
239 return tsl2561_powerdown(client); 239 return tsl2561_powerdown(client);
240} 240}
241 241
242static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END };
243
244I2C_CLIENT_INSMOD;
245
246static const struct i2c_device_id tsl2561_id[] = { 242static const struct i2c_device_id tsl2561_id[] = {
247 { "tsl2561", 0 }, 243 { "tsl2561", 0 },
248 { } 244 { }
diff --git a/drivers/staging/otus/Kconfig b/drivers/staging/otus/Kconfig
index d549d08fd495..f6cc2625e341 100644
--- a/drivers/staging/otus/Kconfig
+++ b/drivers/staging/otus/Kconfig
@@ -1,6 +1,6 @@
1config OTUS 1config OTUS
2 tristate "Atheros OTUS 802.11n USB wireless support" 2 tristate "Atheros OTUS 802.11n USB wireless support"
3 depends on USB && WLAN_80211 && MAC80211 3 depends on USB && WLAN && MAC80211
4 default N 4 default N
5 ---help--- 5 ---help---
6 Enable support for Atheros 802.11n USB hardware: 6 Enable support for Atheros 802.11n USB hardware:
diff --git a/drivers/staging/p9auth/p9auth.c b/drivers/staging/p9auth/p9auth.c
index 9111dcba37a1..8ccfff723eec 100644
--- a/drivers/staging/p9auth/p9auth.c
+++ b/drivers/staging/p9auth/p9auth.c
@@ -183,7 +183,7 @@ static ssize_t cap_write(struct file *filp, const char __user *buf,
183 user_buf_running = NULL; 183 user_buf_running = NULL;
184 hash_str = NULL; 184 hash_str = NULL;
185 node_ptr = kmalloc(sizeof(struct cap_node), GFP_KERNEL); 185 node_ptr = kmalloc(sizeof(struct cap_node), GFP_KERNEL);
186 user_buf = kzalloc(count, GFP_KERNEL); 186 user_buf = kzalloc(count+1, GFP_KERNEL);
187 if (!node_ptr || !user_buf) 187 if (!node_ptr || !user_buf)
188 goto out; 188 goto out;
189 189
@@ -207,6 +207,7 @@ static ssize_t cap_write(struct file *filp, const char __user *buf,
207 list_add(&(node_ptr->list), &(dev->head->list)); 207 list_add(&(node_ptr->list), &(dev->head->list));
208 node_ptr = NULL; 208 node_ptr = NULL;
209 } else { 209 } else {
210 char *tmpu;
210 if (!cap_devices[0].head || 211 if (!cap_devices[0].head ||
211 list_empty(&(cap_devices[0].head->list))) { 212 list_empty(&(cap_devices[0].head->list))) {
212 retval = -EINVAL; 213 retval = -EINVAL;
@@ -218,10 +219,10 @@ static ssize_t cap_write(struct file *filp, const char __user *buf,
218 * need to split it and hash 'user1@user2' using 'randomstring' 219 * need to split it and hash 'user1@user2' using 'randomstring'
219 * as the key. 220 * as the key.
220 */ 221 */
221 user_buf_running = kstrdup(user_buf, GFP_KERNEL); 222 tmpu = user_buf_running = kstrdup(user_buf, GFP_KERNEL);
222 source_user = strsep(&user_buf_running, "@"); 223 source_user = strsep(&tmpu, "@");
223 target_user = strsep(&user_buf_running, "@"); 224 target_user = strsep(&tmpu, "@");
224 rand_str = strsep(&user_buf_running, "@"); 225 rand_str = tmpu;
225 if (!source_user || !target_user || !rand_str) { 226 if (!source_user || !target_user || !rand_str) {
226 retval = -EINVAL; 227 retval = -EINVAL;
227 goto out; 228 goto out;
@@ -229,7 +230,8 @@ static ssize_t cap_write(struct file *filp, const char __user *buf,
229 230
230 /* hash the string user1@user2 with rand_str as the key */ 231 /* hash the string user1@user2 with rand_str as the key */
231 len = strlen(source_user) + strlen(target_user) + 1; 232 len = strlen(source_user) + strlen(target_user) + 1;
232 hash_str = kzalloc(len, GFP_KERNEL); 233 /* src, @, len, \0 */
234 hash_str = kzalloc(len+1, GFP_KERNEL);
233 strcat(hash_str, source_user); 235 strcat(hash_str, source_user);
234 strcat(hash_str, "@"); 236 strcat(hash_str, "@");
235 strcat(hash_str, target_user); 237 strcat(hash_str, target_user);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index dd7d3fde9699..4ce399b6d237 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -2071,11 +2071,15 @@ static void panel_detach(struct parport *port)
2071 return; 2071 return;
2072 } 2072 }
2073 2073
2074 if (keypad_enabled && keypad_initialized) 2074 if (keypad_enabled && keypad_initialized) {
2075 misc_deregister(&keypad_dev); 2075 misc_deregister(&keypad_dev);
2076 keypad_initialized = 0;
2077 }
2076 2078
2077 if (lcd_enabled && lcd_initialized) 2079 if (lcd_enabled && lcd_initialized) {
2078 misc_deregister(&lcd_dev); 2080 misc_deregister(&lcd_dev);
2081 lcd_initialized = 0;
2082 }
2079 2083
2080 parport_release(pprt); 2084 parport_release(pprt);
2081 parport_unregister_device(pprt); 2085 parport_unregister_device(pprt);
@@ -2211,13 +2215,16 @@ static void __exit panel_cleanup_module(void)
2211 del_timer(&scan_timer); 2215 del_timer(&scan_timer);
2212 2216
2213 if (pprt != NULL) { 2217 if (pprt != NULL) {
2214 if (keypad_enabled) 2218 if (keypad_enabled) {
2215 misc_deregister(&keypad_dev); 2219 misc_deregister(&keypad_dev);
2220 keypad_initialized = 0;
2221 }
2216 2222
2217 if (lcd_enabled) { 2223 if (lcd_enabled) {
2218 panel_lcd_print("\x0cLCD driver " PANEL_VERSION 2224 panel_lcd_print("\x0cLCD driver " PANEL_VERSION
2219 "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); 2225 "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
2220 misc_deregister(&lcd_dev); 2226 misc_deregister(&lcd_dev);
2227 lcd_initialized = 0;
2221 } 2228 }
2222 2229
2223 /* TODO: free all input signals */ 2230 /* TODO: free all input signals */
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
index 0d111ddfabb2..2eb8e3d43c4d 100644
--- a/drivers/staging/poch/poch.c
+++ b/drivers/staging/poch/poch.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/ioctl.h> 21#include <linux/ioctl.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/sched.h>
23 24
24#include "poch.h" 25#include "poch.h"
25 26
diff --git a/drivers/staging/rt2860/Kconfig b/drivers/staging/rt2860/Kconfig
index 7f44e5e72463..efe38e25c5ed 100644
--- a/drivers/staging/rt2860/Kconfig
+++ b/drivers/staging/rt2860/Kconfig
@@ -1,5 +1,5 @@
1config RT2860 1config RT2860
2 tristate "Ralink 2860 wireless support" 2 tristate "Ralink 2860 wireless support"
3 depends on PCI && X86 && WLAN_80211 3 depends on PCI && X86 && WLAN
4 ---help--- 4 ---help---
5 This is an experimental driver for the Ralink 2860 wireless chip. 5 This is an experimental driver for the Ralink 2860 wireless chip.
diff --git a/drivers/staging/rt2860/common/cmm_data_2860.c b/drivers/staging/rt2860/common/cmm_data_2860.c
index fb1735533b74..857ff450b6c9 100644
--- a/drivers/staging/rt2860/common/cmm_data_2860.c
+++ b/drivers/staging/rt2860/common/cmm_data_2860.c
@@ -363,6 +363,8 @@ int RtmpPCIMgmtKickOut(
363 ULONG SwIdx = pAd->MgmtRing.TxCpuIdx; 363 ULONG SwIdx = pAd->MgmtRing.TxCpuIdx;
364 364
365 pTxD = (PTXD_STRUC) pAd->MgmtRing.Cell[SwIdx].AllocVa; 365 pTxD = (PTXD_STRUC) pAd->MgmtRing.Cell[SwIdx].AllocVa;
366 if (!pTxD)
367 return 0;
366 368
367 pAd->MgmtRing.Cell[SwIdx].pNdisPacket = pPacket; 369 pAd->MgmtRing.Cell[SwIdx].pNdisPacket = pPacket;
368 pAd->MgmtRing.Cell[SwIdx].pNextNdisPacket = NULL; 370 pAd->MgmtRing.Cell[SwIdx].pNextNdisPacket = NULL;
diff --git a/drivers/staging/rt2860/common/cmm_info.c b/drivers/staging/rt2860/common/cmm_info.c
index 9d589c240ed0..019cc4474ce8 100644
--- a/drivers/staging/rt2860/common/cmm_info.c
+++ b/drivers/staging/rt2860/common/cmm_info.c
@@ -25,6 +25,7 @@
25 ************************************************************************* 25 *************************************************************************
26*/ 26*/
27 27
28#include <linux/sched.h>
28#include "../rt_config.h" 29#include "../rt_config.h"
29 30
30INT Show_SSID_Proc( 31INT Show_SSID_Proc(
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index b396a9b570e2..ed27b8545a1b 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -25,6 +25,7 @@
25 ************************************************************************* 25 *************************************************************************
26 */ 26 */
27 27
28#include <linux/sched.h>
28#include "rt_config.h" 29#include "rt_config.h"
29 30
30ULONG RTDebugLevel = RT_DEBUG_ERROR; 31ULONG RTDebugLevel = RT_DEBUG_ERROR;
diff --git a/drivers/staging/rt2870/Kconfig b/drivers/staging/rt2870/Kconfig
index 76841f6dea93..aea5c8221810 100644
--- a/drivers/staging/rt2870/Kconfig
+++ b/drivers/staging/rt2870/Kconfig
@@ -1,5 +1,5 @@
1config RT2870 1config RT2870
2 tristate "Ralink 2870/3070 wireless support" 2 tristate "Ralink 2870/3070 wireless support"
3 depends on USB && X86 && WLAN_80211 3 depends on USB && X86 && WLAN
4 ---help--- 4 ---help---
5 This is an experimental driver for the Ralink xx70 wireless chips. 5 This is an experimental driver for the Ralink xx70 wireless chips.
diff --git a/drivers/staging/rt3090/Kconfig b/drivers/staging/rt3090/Kconfig
index 255e8eaa4836..2b3f745d72b7 100644
--- a/drivers/staging/rt3090/Kconfig
+++ b/drivers/staging/rt3090/Kconfig
@@ -1,5 +1,5 @@
1config RT3090 1config RT3090
2 tristate "Ralink 3090 wireless support" 2 tristate "Ralink 3090 wireless support"
3 depends on PCI && X86 && WLAN_80211 3 depends on PCI && X86 && WLAN
4 ---help--- 4 ---help---
5 This is an experimental driver for the Ralink 3090 wireless chip. 5 This is an experimental driver for the Ralink 3090 wireless chip.
diff --git a/drivers/staging/rt3090/common/cmm_info.c b/drivers/staging/rt3090/common/cmm_info.c
index 5be0714666cb..3e51e98b474c 100644
--- a/drivers/staging/rt3090/common/cmm_info.c
+++ b/drivers/staging/rt3090/common/cmm_info.c
@@ -34,6 +34,7 @@
34 --------- ---------- ---------------------------------------------- 34 --------- ---------- ----------------------------------------------
35 */ 35 */
36 36
37#include <linux/sched.h>
37#include "../rt_config.h" 38#include "../rt_config.h"
38 39
39 40
diff --git a/drivers/staging/rt3090/rt_linux.c b/drivers/staging/rt3090/rt_linux.c
index d2241ecdf583..9b94aa6eb904 100644
--- a/drivers/staging/rt3090/rt_linux.c
+++ b/drivers/staging/rt3090/rt_linux.c
@@ -25,6 +25,7 @@
25 ************************************************************************* 25 *************************************************************************
26 */ 26 */
27 27
28#include <linux/sched.h>
28#include "rt_config.h" 29#include "rt_config.h"
29 30
30ULONG RTDebugLevel = RT_DEBUG_ERROR; 31ULONG RTDebugLevel = RT_DEBUG_ERROR;
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 236e42725447..203c79b8180f 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,6 +1,6 @@
1config RTL8187SE 1config RTL8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver" 2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI && WLAN
4 depends on WIRELESS_EXT 4 depends on WIRELESS_EXT
5 default N 5 default N
6 ---help--- 6 ---help---
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index 013c3e19ae25..4c5d63fd5833 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -53,10 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
53 53
54 list_del(ptr); 54 list_del(ptr);
55 55
56 if (entry->ops) { 56 if (entry->ops)
57 entry->ops->deinit(entry->priv); 57 entry->ops->deinit(entry->priv);
58 module_put(entry->ops->owner);
59 }
60 kfree(entry); 58 kfree(entry);
61 } 59 }
62} 60}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
index 6fbe4890cb66..18392fce487d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
@@ -189,10 +189,8 @@ void free_ieee80211(struct net_device *dev)
189 for (i = 0; i < WEP_KEYS; i++) { 189 for (i = 0; i < WEP_KEYS; i++) {
190 struct ieee80211_crypt_data *crypt = ieee->crypt[i]; 190 struct ieee80211_crypt_data *crypt = ieee->crypt[i];
191 if (crypt) { 191 if (crypt) {
192 if (crypt->ops) { 192 if (crypt->ops)
193 crypt->ops->deinit(crypt->priv); 193 crypt->ops->deinit(crypt->priv);
194 module_put(crypt->ops->owner);
195 }
196 kfree(crypt); 194 kfree(crypt);
197 ieee->crypt[i] = NULL; 195 ieee->crypt[i] = NULL;
198 } 196 }
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 59b2ab48cdcf..334e4c7ec61b 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -2839,16 +2839,12 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
2839 goto skip_host_crypt; 2839 goto skip_host_crypt;
2840 2840
2841 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 2841 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2842 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { 2842 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0)
2843 request_module("ieee80211_crypt_wep");
2844 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 2843 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2845 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { 2844 else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0)
2846 request_module("ieee80211_crypt_tkip");
2847 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 2845 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2848 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { 2846 else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0)
2849 request_module("ieee80211_crypt_ccmp");
2850 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 2847 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2851 }
2852 if (ops == NULL) { 2848 if (ops == NULL) {
2853 printk("unknown crypto alg '%s'\n", param->u.crypt.alg); 2849 printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
2854 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; 2850 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
@@ -2869,7 +2865,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
2869 } 2865 }
2870 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 2866 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
2871 new_crypt->ops = ops; 2867 new_crypt->ops = ops;
2872 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 2868 if (new_crypt->ops)
2873 new_crypt->priv = 2869 new_crypt->priv =
2874 new_crypt->ops->init(param->u.crypt.idx); 2870 new_crypt->ops->init(param->u.crypt.idx);
2875 2871
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index 8d8bdd0a130e..a08b97a09512 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -331,12 +331,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
331 return -ENOMEM; 331 return -ENOMEM;
332 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 332 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
333 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 333 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
334 if (!new_crypt->ops) { 334 if (!new_crypt->ops)
335 request_module("ieee80211_crypt_wep");
336 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 335 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
337 }
338 336
339 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 337 if (new_crypt->ops)
340 new_crypt->priv = new_crypt->ops->init(key); 338 new_crypt->priv = new_crypt->ops->init(key);
341 339
342 if (!new_crypt->ops || !new_crypt->priv) { 340 if (!new_crypt->ops || !new_crypt->priv) {
@@ -483,7 +481,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
483 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 481 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
484 int i, idx, ret = 0; 482 int i, idx, ret = 0;
485 int group_key = 0; 483 int group_key = 0;
486 const char *alg, *module; 484 const char *alg;
487 struct ieee80211_crypto_ops *ops; 485 struct ieee80211_crypto_ops *ops;
488 struct ieee80211_crypt_data **crypt; 486 struct ieee80211_crypt_data **crypt;
489 487
@@ -539,15 +537,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
539 switch (ext->alg) { 537 switch (ext->alg) {
540 case IW_ENCODE_ALG_WEP: 538 case IW_ENCODE_ALG_WEP:
541 alg = "WEP"; 539 alg = "WEP";
542 module = "ieee80211_crypt_wep";
543 break; 540 break;
544 case IW_ENCODE_ALG_TKIP: 541 case IW_ENCODE_ALG_TKIP:
545 alg = "TKIP"; 542 alg = "TKIP";
546 module = "ieee80211_crypt_tkip";
547 break; 543 break;
548 case IW_ENCODE_ALG_CCMP: 544 case IW_ENCODE_ALG_CCMP:
549 alg = "CCMP"; 545 alg = "CCMP";
550 module = "ieee80211_crypt_ccmp";
551 break; 546 break;
552 default: 547 default:
553 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 548 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -558,10 +553,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
558// printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg); 553// printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg);
559 554
560 ops = ieee80211_get_crypto_ops(alg); 555 ops = ieee80211_get_crypto_ops(alg);
561 if (ops == NULL) { 556 if (ops == NULL)
562 request_module(module);
563 ops = ieee80211_get_crypto_ops(alg); 557 ops = ieee80211_get_crypto_ops(alg);
564 }
565 if (ops == NULL) { 558 if (ops == NULL) {
566 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 559 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
567 dev->name, ext->alg); 560 dev->name, ext->alg);
@@ -581,7 +574,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
581 goto done; 574 goto done;
582 } 575 }
583 new_crypt->ops = ops; 576 new_crypt->ops = ops;
584 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 577 if (new_crypt->ops)
585 new_crypt->priv = new_crypt->ops->init(idx); 578 new_crypt->priv = new_crypt->ops->init(idx);
586 if (new_crypt->priv == NULL) { 579 if (new_crypt->priv == NULL) {
587 kfree(new_crypt); 580 kfree(new_crypt);
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 3100aa58c940..37e4fde45073 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192E 1config RTL8192E
2 tristate "RealTek RTL8192E Wireless LAN NIC driver" 2 tristate "RealTek RTL8192E Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI && WLAN
4 depends on WIRELESS_EXT 4 depends on WIRELESS_EXT
5 default N 5 default N
6 ---help--- 6 ---help---
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
index 1a8ea8a40c3c..b1c54932da3e 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
@@ -53,14 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
53 53
54 list_del(ptr); 54 list_del(ptr);
55 55
56 if (entry->ops) { 56 if (entry->ops)
57 entry->ops->deinit(entry->priv); 57 entry->ops->deinit(entry->priv);
58#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
59 module_put(entry->ops->owner);
60#else
61 __MOD_DEC_USE_COUNT(entry->ops->owner);
62#endif
63 }
64 kfree(entry); 58 kfree(entry);
65 } 59 }
66} 60}
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index 16256a31f993..12c2a18e1fa2 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -242,14 +242,8 @@ void free_ieee80211(struct net_device *dev)
242 for (i = 0; i < WEP_KEYS; i++) { 242 for (i = 0; i < WEP_KEYS; i++) {
243 struct ieee80211_crypt_data *crypt = ieee->crypt[i]; 243 struct ieee80211_crypt_data *crypt = ieee->crypt[i];
244 if (crypt) { 244 if (crypt) {
245 if (crypt->ops) { 245 if (crypt->ops)
246 crypt->ops->deinit(crypt->priv); 246 crypt->ops->deinit(crypt->priv);
247#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
248 module_put(crypt->ops->owner);
249#else
250 __MOD_DEC_USE_COUNT(crypt->ops->owner);
251#endif
252 }
253 kfree(crypt); 247 kfree(crypt);
254 ieee->crypt[i] = NULL; 248 ieee->crypt[i] = NULL;
255 } 249 }
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index 2fc04df872ca..eae7c4579a68 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -3284,17 +3284,14 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
3284 goto skip_host_crypt; 3284 goto skip_host_crypt;
3285 3285
3286 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3286 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3287 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { 3287 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0)
3288 request_module("ieee80211_crypt_wep");
3289 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3288 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3290 //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place 3289 /* set WEP40 first, it will be modified according to WEP104 or
3291 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { 3290 * WEP40 at other place */
3292 request_module("ieee80211_crypt_tkip"); 3291 else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0)
3293 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3292 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3294 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { 3293 else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0)
3295 request_module("ieee80211_crypt_ccmp");
3296 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3294 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3297 }
3298 if (ops == NULL) { 3295 if (ops == NULL) {
3299 printk("unknown crypto alg '%s'\n", param->u.crypt.alg); 3296 printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
3300 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; 3297 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
@@ -3315,11 +3312,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
3315 } 3312 }
3316 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 3313 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
3317 new_crypt->ops = ops; 3314 new_crypt->ops = ops;
3318#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 3315 if (new_crypt->ops)
3319 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
3320#else
3321 if (new_crypt->ops && try_inc_mod_count(new_crypt->ops->owner))
3322#endif
3323 new_crypt->priv = 3316 new_crypt->priv =
3324 new_crypt->ops->init(param->u.crypt.idx); 3317 new_crypt->ops->init(param->u.crypt.idx);
3325 3318
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
index 223483126b0e..4e34a1f4c66b 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
@@ -482,15 +482,9 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
482 return -ENOMEM; 482 return -ENOMEM;
483 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 483 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
484 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 484 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
485 if (!new_crypt->ops) { 485 if (!new_crypt->ops)
486 request_module("ieee80211_crypt_wep");
487 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 486 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
488 } 487 if (new_crypt->ops)
489#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
490 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
491#else
492 if (new_crypt->ops && try_inc_mod_count(new_crypt->ops->owner))
493#endif
494 new_crypt->priv = new_crypt->ops->init(key); 488 new_crypt->priv = new_crypt->ops->init(key);
495 489
496 if (!new_crypt->ops || !new_crypt->priv) { 490 if (!new_crypt->ops || !new_crypt->priv) {
@@ -644,7 +638,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
644 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 638 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
645 int i, idx; 639 int i, idx;
646 int group_key = 0; 640 int group_key = 0;
647 const char *alg, *module; 641 const char *alg;
648 struct ieee80211_crypto_ops *ops; 642 struct ieee80211_crypto_ops *ops;
649 struct ieee80211_crypt_data **crypt; 643 struct ieee80211_crypt_data **crypt;
650 644
@@ -711,15 +705,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
711 switch (ext->alg) { 705 switch (ext->alg) {
712 case IW_ENCODE_ALG_WEP: 706 case IW_ENCODE_ALG_WEP:
713 alg = "WEP"; 707 alg = "WEP";
714 module = "ieee80211_crypt_wep";
715 break; 708 break;
716 case IW_ENCODE_ALG_TKIP: 709 case IW_ENCODE_ALG_TKIP:
717 alg = "TKIP"; 710 alg = "TKIP";
718 module = "ieee80211_crypt_tkip";
719 break; 711 break;
720 case IW_ENCODE_ALG_CCMP: 712 case IW_ENCODE_ALG_CCMP:
721 alg = "CCMP"; 713 alg = "CCMP";
722 module = "ieee80211_crypt_ccmp";
723 break; 714 break;
724 default: 715 default:
725 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 716 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -730,10 +721,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
730 printk("alg name:%s\n",alg); 721 printk("alg name:%s\n",alg);
731 722
732 ops = ieee80211_get_crypto_ops(alg); 723 ops = ieee80211_get_crypto_ops(alg);
733 if (ops == NULL) { 724 if (ops == NULL)
734 request_module(module);
735 ops = ieee80211_get_crypto_ops(alg); 725 ops = ieee80211_get_crypto_ops(alg);
736 }
737 if (ops == NULL) { 726 if (ops == NULL) {
738 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 727 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
739 dev->name, ext->alg); 728 dev->name, ext->alg);
@@ -758,7 +747,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
758 goto done; 747 goto done;
759 } 748 }
760 new_crypt->ops = ops; 749 new_crypt->ops = ops;
761 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 750 if (new_crypt->ops)
762 new_crypt->priv = new_crypt->ops->init(idx); 751 new_crypt->priv = new_crypt->ops->init(idx);
763 if (new_crypt->priv == NULL) { 752 if (new_crypt->priv == NULL) {
764 kfree(new_crypt); 753 kfree(new_crypt);
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index d4fa65489655..b0802a7aeb5f 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -46,6 +46,7 @@
46#undef DEBUG_TX_DESC 46#undef DEBUG_TX_DESC
47 47
48//#define CONFIG_RTL8192_IO_MAP 48//#define CONFIG_RTL8192_IO_MAP
49#include <linux/vmalloc.h>
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
50#include "r8192E_hw.h" 51#include "r8192E_hw.h"
51#include "r8192E.h" 52#include "r8192E.h"
diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig
index 770f41280f21..b8c95f942069 100644
--- a/drivers/staging/rtl8192su/Kconfig
+++ b/drivers/staging/rtl8192su/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192SU 1config RTL8192SU
2 tristate "RealTek RTL8192SU Wireless LAN NIC driver" 2 tristate "RealTek RTL8192SU Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI && WLAN
4 depends on WIRELESS_EXT 4 depends on WIRELESS_EXT
5 default N 5 default N
6 ---help--- 6 ---help---
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
index d76a54d59d2f..521e7b989934 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
@@ -53,10 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
53 53
54 list_del(ptr); 54 list_del(ptr);
55 55
56 if (entry->ops) { 56 if (entry->ops)
57 entry->ops->deinit(entry->priv); 57 entry->ops->deinit(entry->priv);
58 module_put(entry->ops->owner);
59 }
60 kfree(entry); 58 kfree(entry);
61 } 59 }
62} 60}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
index 68dc8fa094cc..c3383bb8b760 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
@@ -216,10 +216,8 @@ void free_ieee80211(struct net_device *dev)
216 for (i = 0; i < WEP_KEYS; i++) { 216 for (i = 0; i < WEP_KEYS; i++) {
217 struct ieee80211_crypt_data *crypt = ieee->crypt[i]; 217 struct ieee80211_crypt_data *crypt = ieee->crypt[i];
218 if (crypt) { 218 if (crypt) {
219 if (crypt->ops) { 219 if (crypt->ops)
220 crypt->ops->deinit(crypt->priv); 220 crypt->ops->deinit(crypt->priv);
221 module_put(crypt->ops->owner);
222 }
223 kfree(crypt); 221 kfree(crypt);
224 ieee->crypt[i] = NULL; 222 ieee->crypt[i] = NULL;
225 } 223 }
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index c64ae03f68a0..fd8e11252f1b 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -3026,17 +3026,14 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
3026 goto skip_host_crypt; 3026 goto skip_host_crypt;
3027 3027
3028 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3028 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3029 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { 3029 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0)
3030 request_module("ieee80211_crypt_wep");
3031 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3030 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3032 //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place 3031 /* set WEP40 first, it will be modified according to WEP104 or
3033 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { 3032 * WEP40 at other place */
3034 request_module("ieee80211_crypt_tkip"); 3033 else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0)
3035 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3034 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3036 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { 3035 else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0)
3037 request_module("ieee80211_crypt_ccmp");
3038 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3036 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3039 }
3040 if (ops == NULL) { 3037 if (ops == NULL) {
3041 printk("unknown crypto alg '%s'\n", param->u.crypt.alg); 3038 printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
3042 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; 3039 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
@@ -3058,7 +3055,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
3058 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 3055 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
3059 new_crypt->ops = ops; 3056 new_crypt->ops = ops;
3060 3057
3061 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 3058 if (new_crypt->ops)
3062 new_crypt->priv = 3059 new_crypt->priv =
3063 new_crypt->ops->init(param->u.crypt.idx); 3060 new_crypt->ops->init(param->u.crypt.idx);
3064 3061
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
index 107759024335..6146c6435dde 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
@@ -358,11 +358,9 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
358 return -ENOMEM; 358 return -ENOMEM;
359 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 359 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
360 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 360 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
361 if (!new_crypt->ops) { 361 if (!new_crypt->ops)
362 request_module("ieee80211_crypt_wep");
363 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 362 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
364 } 363 if (new_crypt->ops)
365 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
366 new_crypt->priv = new_crypt->ops->init(key); 364 new_crypt->priv = new_crypt->ops->init(key);
367 365
368 if (!new_crypt->ops || !new_crypt->priv) { 366 if (!new_crypt->ops || !new_crypt->priv) {
@@ -507,7 +505,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
507 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
508 int i, idx; 506 int i, idx;
509 int group_key = 0; 507 int group_key = 0;
510 const char *alg, *module; 508 const char *alg;
511 struct ieee80211_crypto_ops *ops; 509 struct ieee80211_crypto_ops *ops;
512 struct ieee80211_crypt_data **crypt; 510 struct ieee80211_crypt_data **crypt;
513 511
@@ -570,15 +568,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
570 switch (ext->alg) { 568 switch (ext->alg) {
571 case IW_ENCODE_ALG_WEP: 569 case IW_ENCODE_ALG_WEP:
572 alg = "WEP"; 570 alg = "WEP";
573 module = "ieee80211_crypt_wep";
574 break; 571 break;
575 case IW_ENCODE_ALG_TKIP: 572 case IW_ENCODE_ALG_TKIP:
576 alg = "TKIP"; 573 alg = "TKIP";
577 module = "ieee80211_crypt_tkip";
578 break; 574 break;
579 case IW_ENCODE_ALG_CCMP: 575 case IW_ENCODE_ALG_CCMP:
580 alg = "CCMP"; 576 alg = "CCMP";
581 module = "ieee80211_crypt_ccmp";
582 break; 577 break;
583 default: 578 default:
584 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 579 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -589,10 +584,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
589 printk("alg name:%s\n",alg); 584 printk("alg name:%s\n",alg);
590 585
591 ops = ieee80211_get_crypto_ops(alg); 586 ops = ieee80211_get_crypto_ops(alg);
592 if (ops == NULL) { 587 if (ops == NULL)
593 request_module("%s", module);
594 ops = ieee80211_get_crypto_ops(alg); 588 ops = ieee80211_get_crypto_ops(alg);
595 }
596 if (ops == NULL) { 589 if (ops == NULL) {
597 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", 590 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
598 dev->name, ext->alg); 591 dev->name, ext->alg);
@@ -612,7 +605,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
612 goto done; 605 goto done;
613 } 606 }
614 new_crypt->ops = ops; 607 new_crypt->ops = ops;
615 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 608 if (new_crypt->ops)
616 new_crypt->priv = new_crypt->ops->init(idx); 609 new_crypt->priv = new_crypt->ops->init(idx);
617 if (new_crypt->priv == NULL) { 610 if (new_crypt->priv == NULL) {
618 kfree(new_crypt); 611 kfree(new_crypt);
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 87f8a1192762..f890a16096c0 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -38,6 +38,7 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/wait.h> 40#include <linux/wait.h>
41#include <linux/sched.h>
41#include <linux/pci.h> 42#include <linux/pci.h>
42#include <linux/firmware.h> 43#include <linux/firmware.h>
43#include <asm/ioctl.h> 44#include <asm/ioctl.h>
diff --git a/drivers/staging/stlc45xx/Kconfig b/drivers/staging/stlc45xx/Kconfig
deleted file mode 100644
index 947fb75a9c68..000000000000
--- a/drivers/staging/stlc45xx/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
1config STLC45XX
2 tristate "stlc4550/4560 support"
3 depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS
4 ---help---
5 This is a driver for stlc4550 and stlc4560 chipsets.
6
7 To compile this driver as a module, choose M here: the module will be
8 called stlc45xx. If unsure, say N.
diff --git a/drivers/staging/stlc45xx/Makefile b/drivers/staging/stlc45xx/Makefile
deleted file mode 100644
index 7ee32903055a..000000000000
--- a/drivers/staging/stlc45xx/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_STLC45XX) += stlc45xx.o
diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c
deleted file mode 100644
index be99eb33d817..000000000000
--- a/drivers/staging/stlc45xx/stlc45xx.c
+++ /dev/null
@@ -1,2594 +0,0 @@
1/*
2 * This file is part of stlc45xx
3 *
4 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include "stlc45xx.h"
25
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29#include <linux/firmware.h>
30#include <linux/delay.h>
31#include <linux/irq.h>
32#include <linux/spi/spi.h>
33#include <linux/etherdevice.h>
34#include <linux/gpio.h>
35#include <linux/moduleparam.h>
36
37#include "stlc45xx_lmac.h"
38
39/*
40 * gpios should be handled in board files and provided via platform data,
41 * but because it's currently impossible for stlc45xx to have a header file
42 * in include/linux, let's use module paramaters for now
43 */
44static int stlc45xx_gpio_power = 97;
45module_param(stlc45xx_gpio_power, int, 0444);
46MODULE_PARM_DESC(stlc45xx_gpio_power, "stlc45xx gpio number for power line");
47
48static int stlc45xx_gpio_irq = 87;
49module_param(stlc45xx_gpio_irq, int, 0444);
50MODULE_PARM_DESC(stlc45xx_gpio_irq, "stlc45xx gpio number for irq line");
51
52static const u8 default_cal_channels[] = {
53 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
54 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x09,
55 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10,
56 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xe0, 0x00, 0xe0, 0x00,
57 0xe0, 0x00, 0xe0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0,
58 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42,
59 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00,
60 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9,
61 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00,
62 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
63 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0,
64 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
65 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d,
66 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa,
67 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00,
68 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17,
69 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00,
70 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00,
71 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00,
72 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
73 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, 0x71, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8,
82 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
83 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0,
84 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6,
85 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00,
86 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
87 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0,
88 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
89 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca,
90 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4,
91 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00,
92 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21,
93 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00,
94 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00,
95 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00,
96 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
97 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0,
98 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
99 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
100 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
105 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80,
106 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76,
108 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01,
109 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0,
110 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
111 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0,
112 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
113 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37,
114 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc,
115 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00,
116 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b,
117 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00,
118 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00,
119 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00,
120 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
121 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0,
122 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
123 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a,
124 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d,
125 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
126 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x09, 0x00, 0x00, 0xc9, 0xff,
135 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10,
136 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
137 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab,
138 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb,
139 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00,
140 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33,
141 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00,
142 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00,
143 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00,
144 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
145 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0,
146 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
147 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7,
148 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17,
149 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00,
150 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d,
151 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00,
152 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
158 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80,
159 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
161 0x80, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00,
162 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00,
163 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0,
164 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42,
165 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00,
166 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01,
167 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00,
168 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
169 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0,
170 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
171 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0,
172 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21,
173 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00,
174 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17,
175 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00,
176 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
177 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0,
178 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
179 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x09, 0x00, 0x00, 0xc9,
188 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01,
189 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0,
190 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01,
191 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00,
192 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
193 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0,
194 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
195 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb,
196 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b,
197 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00,
198 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21,
199 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00,
200 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
201 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0,
202 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
203 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0,
204 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96,
205 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
206 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
212 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x8a, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00,
215 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0,
216 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
217 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0,
218 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
219 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22,
220 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33,
221 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00,
222 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b,
223 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00,
224 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
225 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0,
226 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
227 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0,
228 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0,
229 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00,
230 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d,
231 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
232 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x09, 0x00, 0x00,
241 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10,
242 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00,
243 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54,
244 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42,
245 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00,
246 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33,
247 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00,
248 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
249 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0,
250 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
251 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0,
252 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa,
253 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00,
254 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17,
255 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00,
256 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c,
257 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00,
258 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
259 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x94, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00,
268 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01,
269 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0,
270 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42,
271 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00,
272 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
273 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0,
274 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc,
275 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0,
276 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4,
277 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00,
278 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21,
279 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00,
280 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d,
281 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00,
282 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
283 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0,
284 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96,
285 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x09, 0x00,
294 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01,
295 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0,
296 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00,
297 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0,
298 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb,
299 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0,
300 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc,
301 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00,
302 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b,
303 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00,
304 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54,
305 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00,
306 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
307 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0,
308 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0,
309 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06,
310 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96,
311 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
312 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x9e, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff,
321 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10,
322 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00,
323 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0,
324 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb,
325 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00,
326 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33,
327 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00,
328 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79,
329 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00,
330 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4,
331 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0,
332 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
333 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9,
334 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0,
335 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00,
336 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d,
337 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00,
338 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00,
345 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
358 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
398 0x00 };
399
400static const u8 default_cal_rssi[] = {
401 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72,
402 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00,
403 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a,
404 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe,
405 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00,
406 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01,
407 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a,
408 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x00, 0x00 };
411
412static void stlc45xx_tx_edcf(struct stlc45xx *stlc);
413static void stlc45xx_tx_setup(struct stlc45xx *stlc);
414static void stlc45xx_tx_scan(struct stlc45xx *stlc);
415static void stlc45xx_tx_psm(struct stlc45xx *stlc, bool enable);
416static int stlc45xx_tx_nullfunc(struct stlc45xx *stlc, bool powersave);
417static int stlc45xx_tx_pspoll(struct stlc45xx *stlc, bool powersave);
418
419static ssize_t stlc45xx_sysfs_show_cal_rssi(struct device *dev,
420 struct device_attribute *attr,
421 char *buf)
422{
423 struct stlc45xx *stlc = dev_get_drvdata(dev);
424 ssize_t len;
425
426 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
427
428 len = PAGE_SIZE;
429
430 mutex_lock(&stlc->mutex);
431
432 if (stlc->cal_rssi)
433 hex_dump_to_buffer(stlc->cal_rssi, RSSI_CAL_ARRAY_LEN, 16,
434 2, buf, len, 0);
435 mutex_unlock(&stlc->mutex);
436
437 len = strlen(buf);
438
439 return len;
440}
441
442static ssize_t stlc45xx_sysfs_store_cal_rssi(struct device *dev,
443 struct device_attribute *attr,
444 const char *buf, size_t count)
445{
446 struct stlc45xx *stlc = dev_get_drvdata(dev);
447
448 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
449
450 mutex_lock(&stlc->mutex);
451
452 if (count != RSSI_CAL_ARRAY_LEN) {
453 stlc45xx_error("invalid cal_rssi length: %zu", count);
454 count = 0;
455 goto out_unlock;
456 }
457
458 kfree(stlc->cal_rssi);
459
460 stlc->cal_rssi = kmemdup(buf, RSSI_CAL_ARRAY_LEN, GFP_KERNEL);
461
462 if (!stlc->cal_rssi) {
463 stlc45xx_error("failed to allocate memory for cal_rssi");
464 count = 0;
465 goto out_unlock;
466 }
467
468 out_unlock:
469 mutex_unlock(&stlc->mutex);
470
471 return count;
472}
473
474static ssize_t stlc45xx_sysfs_show_cal_channels(struct device *dev,
475 struct device_attribute *attr,
476 char *buf)
477{
478 struct stlc45xx *stlc = dev_get_drvdata(dev);
479 ssize_t len;
480
481 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
482
483 len = PAGE_SIZE;
484
485 mutex_lock(&stlc->mutex);
486
487 if (stlc->cal_channels)
488 hex_dump_to_buffer(stlc->cal_channels, CHANNEL_CAL_ARRAY_LEN,
489 16, 2, buf, len, 0);
490
491 mutex_unlock(&stlc->mutex);
492
493 len = strlen(buf);
494
495 return len;
496}
497
498static ssize_t stlc45xx_sysfs_store_cal_channels(struct device *dev,
499 struct device_attribute *attr,
500 const char *buf, size_t count)
501{
502 struct stlc45xx *stlc = dev_get_drvdata(dev);
503
504 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
505
506 mutex_lock(&stlc->mutex);
507
508 if (count != CHANNEL_CAL_ARRAY_LEN) {
509 stlc45xx_error("invalid cal_channels size: %zu ", count);
510 count = 0;
511 goto out_unlock;
512 }
513
514 kfree(stlc->cal_channels);
515
516 stlc->cal_channels = kmemdup(buf, count, GFP_KERNEL);
517
518 if (!stlc->cal_channels) {
519 stlc45xx_error("failed to allocate memory for cal_channels");
520 count = 0;
521 goto out_unlock;
522 }
523
524out_unlock:
525 mutex_unlock(&stlc->mutex);
526
527 return count;
528}
529
530static ssize_t stlc45xx_sysfs_show_tx_buf(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533{
534 struct stlc45xx *stlc = dev_get_drvdata(dev);
535 struct txbuffer *entry;
536 ssize_t len = 0;
537
538 stlc45xx_debug(DEBUG_FUNC, "%s()", __func__);
539
540 mutex_lock(&stlc->mutex);
541
542 list_for_each_entry(entry, &stlc->tx_sent, tx_list) {
543 len += sprintf(buf + len, "0x%x: 0x%x-0x%x\n",
544 entry->handle, entry->start,
545 entry->end);
546 }
547
548 mutex_unlock(&stlc->mutex);
549
550 return len;
551}
552
553static DEVICE_ATTR(cal_rssi, S_IRUGO | S_IWUSR,
554 stlc45xx_sysfs_show_cal_rssi,
555 stlc45xx_sysfs_store_cal_rssi);
556static DEVICE_ATTR(cal_channels, S_IRUGO | S_IWUSR,
557 stlc45xx_sysfs_show_cal_channels,
558 stlc45xx_sysfs_store_cal_channels);
559static DEVICE_ATTR(tx_buf, S_IRUGO, stlc45xx_sysfs_show_tx_buf, NULL);
560
561static void stlc45xx_spi_read(struct stlc45xx *stlc, unsigned long addr,
562 void *buf, size_t len)
563{
564 struct spi_transfer t[2];
565 struct spi_message m;
566
567 /* We first push the address */
568 addr = (addr << 8) | ADDR_READ_BIT_15;
569
570 spi_message_init(&m);
571 memset(t, 0, sizeof(t));
572
573 t[0].tx_buf = &addr;
574 t[0].len = 2;
575 spi_message_add_tail(&t[0], &m);
576
577 t[1].rx_buf = buf;
578 t[1].len = len;
579 spi_message_add_tail(&t[1], &m);
580
581 spi_sync(stlc->spi, &m);
582}
583
584
585static void stlc45xx_spi_write(struct stlc45xx *stlc, unsigned long addr,
586 void *buf, size_t len)
587{
588 struct spi_transfer t[3];
589 struct spi_message m;
590 u16 last_word;
591
592 /* We first push the address */
593 addr = addr << 8;
594
595 spi_message_init(&m);
596 memset(t, 0, sizeof(t));
597
598 t[0].tx_buf = &addr;
599 t[0].len = 2;
600 spi_message_add_tail(&t[0], &m);
601
602 t[1].tx_buf = buf;
603 t[1].len = len;
604 spi_message_add_tail(&t[1], &m);
605
606 if (len % 2) {
607 last_word = ((u8 *)buf)[len - 1];
608
609 t[2].tx_buf = &last_word;
610 t[2].len = 2;
611 spi_message_add_tail(&t[2], &m);
612 }
613
614 spi_sync(stlc->spi, &m);
615}
616
617static u16 stlc45xx_read16(struct stlc45xx *stlc, unsigned long addr)
618{
619 u16 val;
620
621 stlc45xx_spi_read(stlc, addr, &val, sizeof(val));
622
623 return val;
624}
625
626static u32 stlc45xx_read32(struct stlc45xx *stlc, unsigned long addr)
627{
628 u32 val;
629
630 stlc45xx_spi_read(stlc, addr, &val, sizeof(val));
631
632 return val;
633}
634
635static void stlc45xx_write16(struct stlc45xx *stlc, unsigned long addr, u16 val)
636{
637 stlc45xx_spi_write(stlc, addr, &val, sizeof(val));
638}
639
640static void stlc45xx_write32(struct stlc45xx *stlc, unsigned long addr, u32 val)
641{
642 stlc45xx_spi_write(stlc, addr, &val, sizeof(val));
643}
644
645struct stlc45xx_spi_reg {
646 u16 address;
647 u16 length;
648 char *name;
649};
650
651/* caller must hold tx_lock */
652static void stlc45xx_txbuffer_dump(struct stlc45xx *stlc)
653{
654 struct txbuffer *txbuffer;
655 char *buf, *pos;
656 int buf_len, l, count;
657
658 if (!(DEBUG_LEVEL & DEBUG_TXBUFFER))
659 return;
660
661 stlc45xx_debug(DEBUG_FUNC, "%s()", __func__);
662
663 buf_len = 500;
664 buf = kmalloc(buf_len, GFP_ATOMIC);
665 if (!buf)
666 return;
667
668 pos = buf;
669 count = 0;
670
671 list_for_each_entry(txbuffer, &stlc->txbuffer, buffer_list) {
672 l = snprintf(pos, buf_len, "0x%x-0x%x,",
673 txbuffer->start, txbuffer->end);
674 /* drop the null byte */
675 pos += l;
676 buf_len -= l;
677 count++;
678 }
679
680 if (count == 0)
681 *pos = '\0';
682 else
683 *--pos = '\0';
684
685 stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: in buffer %d regions: %s",
686 count, buf);
687
688 kfree(buf);
689}
690
691/* caller must hold tx_lock */
692static int stlc45xx_txbuffer_find(struct stlc45xx *stlc, size_t len)
693{
694 struct txbuffer *txbuffer;
695 int pos;
696
697 stlc45xx_debug(DEBUG_FUNC, "%s()", __func__);
698
699 pos = FIRMWARE_TXBUFFER_START;
700
701 if (list_empty(&stlc->txbuffer))
702 goto out;
703
704 /*
705 * the entries in txbuffer must be in the same order as they are in
706 * the real buffer
707 */
708 list_for_each_entry(txbuffer, &stlc->txbuffer, buffer_list) {
709 if (pos + len < txbuffer->start)
710 break;
711 pos = ALIGN(txbuffer->end + 1, 4);
712 }
713
714 if (pos + len > FIRMWARE_TXBUFFER_END)
715 /* not enough room */
716 pos = -1;
717
718 stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: find %zu B: 0x%x", len, pos);
719
720out:
721 return pos;
722}
723
724static int stlc45xx_txbuffer_add(struct stlc45xx *stlc,
725 struct txbuffer *txbuffer)
726{
727 struct txbuffer *r, *prev = NULL;
728
729 if (list_empty(&stlc->txbuffer)) {
730 list_add(&txbuffer->buffer_list, &stlc->txbuffer);
731 return 0;
732 }
733
734 r = list_first_entry(&stlc->txbuffer, struct txbuffer, buffer_list);
735
736 if (txbuffer->start < r->start) {
737 /* add to the beginning of the list */
738 list_add(&txbuffer->buffer_list, &stlc->txbuffer);
739 return 0;
740 }
741
742 prev = NULL;
743 list_for_each_entry(r, &stlc->txbuffer, buffer_list) {
744 /* skip first entry, we checked for that above */
745 if (!prev) {
746 prev = r;
747 continue;
748 }
749
750 /* double-check overlaps */
751 WARN_ON_ONCE(txbuffer->start >= r->start &&
752 txbuffer->start <= r->end);
753 WARN_ON_ONCE(txbuffer->end >= r->start &&
754 txbuffer->end <= r->end);
755
756 if (prev->end < txbuffer->start &&
757 txbuffer->end < r->start) {
758 /* insert at this spot */
759 list_add_tail(&txbuffer->buffer_list, &r->buffer_list);
760 return 0;
761 }
762
763 prev = r;
764 }
765
766 /* not found */
767 list_add_tail(&txbuffer->buffer_list, &stlc->txbuffer);
768
769 return 0;
770
771}
772
773/* caller must hold tx_lock */
774static struct txbuffer *stlc45xx_txbuffer_alloc(struct stlc45xx *stlc,
775 size_t frame_len)
776{
777 struct txbuffer *entry = NULL;
778 size_t len;
779 int pos;
780
781 stlc45xx_debug(DEBUG_FUNC, "%s()", __func__);
782
783 len = FIRMWARE_TXBUFFER_HEADER + frame_len + FIRMWARE_TXBUFFER_TRAILER;
784 pos = stlc45xx_txbuffer_find(stlc, len);
785
786 if (pos < 0)
787 return NULL;
788
789 WARN_ON_ONCE(pos + len > FIRMWARE_TXBUFFER_END);
790 WARN_ON_ONCE(pos < FIRMWARE_TXBUFFER_START);
791
792 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
793 entry->start = pos;
794 entry->frame_start = pos + FIRMWARE_TXBUFFER_HEADER;
795 entry->end = entry->start + len - 1;
796
797 stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: allocated 0x%x-0x%x",
798 entry->start, entry->end);
799
800 stlc45xx_txbuffer_add(stlc, entry);
801
802 stlc45xx_txbuffer_dump(stlc);
803
804 return entry;
805}
806
807/* caller must hold tx_lock */
808static void stlc45xx_txbuffer_free(struct stlc45xx *stlc,
809 struct txbuffer *txbuffer)
810{
811 stlc45xx_debug(DEBUG_FUNC, "%s()", __func__);
812
813 stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: freed 0x%x-0x%x",
814 txbuffer->start, txbuffer->end);
815
816 list_del(&txbuffer->buffer_list);
817 kfree(txbuffer);
818}
819
820
821static int stlc45xx_wait_bit(struct stlc45xx *stlc, u16 reg, u32 mask,
822 u32 expected)
823{
824 int i;
825 char buffer[4];
826
827 for (i = 0; i < 2000; i++) {
828 stlc45xx_spi_read(stlc, reg, buffer, sizeof(buffer));
829 if (((*(u32 *)buffer) & mask) == expected)
830 return 1;
831 msleep(1);
832 }
833
834 return 0;
835}
836
837static int stlc45xx_request_firmware(struct stlc45xx *stlc)
838{
839 const struct firmware *fw;
840 int ret;
841
842 /* FIXME: should driver use it's own struct device? */
843 ret = request_firmware(&fw, "3826.arm", &stlc->spi->dev);
844
845 if (ret < 0) {
846 stlc45xx_error("request_firmware() failed: %d", ret);
847 return ret;
848 }
849
850 if (fw->size % 4) {
851 stlc45xx_error("firmware size is not multiple of 32bit: %zu",
852 fw->size);
853 return -EILSEQ; /* Illegal byte sequence */;
854 }
855
856 if (fw->size < 1000) {
857 stlc45xx_error("firmware is too small: %zu", fw->size);
858 return -EILSEQ;
859 }
860
861 stlc->fw = kmemdup(fw->data, fw->size, GFP_KERNEL);
862 if (!stlc->fw) {
863 stlc45xx_error("could not allocate memory for firmware");
864 return -ENOMEM;
865 }
866
867 stlc->fw_len = fw->size;
868
869 release_firmware(fw);
870
871 return 0;
872}
873
874static int stlc45xx_upload_firmware(struct stlc45xx *stlc)
875{
876 struct s_dma_regs dma_regs;
877 unsigned long fw_len, fw_addr;
878 long _fw_len;
879 int ret;
880
881 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
882
883 if (!stlc->fw) {
884 ret = stlc45xx_request_firmware(stlc);
885 if (ret < 0)
886 return ret;
887 }
888
889 /* stop the device */
890 stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT,
891 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET
892 | SPI_CTRL_STAT_START_HALTED);
893
894 msleep(TARGET_BOOT_SLEEP);
895
896 stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT,
897 SPI_CTRL_STAT_HOST_OVERRIDE
898 | SPI_CTRL_STAT_START_HALTED);
899
900 msleep(TARGET_BOOT_SLEEP);
901
902 fw_addr = FIRMWARE_ADDRESS;
903 fw_len = stlc->fw_len;
904
905 while (fw_len > 0) {
906 _fw_len = (fw_len > SPI_MAX_PACKET_SIZE)
907 ? SPI_MAX_PACKET_SIZE : fw_len;
908 dma_regs.cmd = SPI_DMA_WRITE_CTRL_ENABLE;
909 dma_regs.len = cpu_to_le16(_fw_len);
910 dma_regs.addr = cpu_to_le32(fw_addr);
911
912 fw_len -= _fw_len;
913 fw_addr += _fw_len;
914
915 stlc45xx_write16(stlc, SPI_ADRS_DMA_WRITE_CTRL, dma_regs.cmd);
916
917 if (stlc45xx_wait_bit(stlc, SPI_ADRS_DMA_WRITE_CTRL,
918 HOST_ALLOWED, HOST_ALLOWED) == 0) {
919 stlc45xx_error("fw_upload not allowed to DMA write");
920 return -EAGAIN;
921 }
922
923 stlc45xx_write16(stlc, SPI_ADRS_DMA_WRITE_LEN, dma_regs.len);
924 stlc45xx_write32(stlc, SPI_ADRS_DMA_WRITE_BASE, dma_regs.addr);
925
926 stlc45xx_spi_write(stlc, SPI_ADRS_DMA_DATA, stlc->fw, _fw_len);
927
928 /* FIXME: I think this doesn't work if firmware is large,
929 * this loop goes to second round. fw->data is not
930 * increased at all! */
931 }
932
933 BUG_ON(fw_len != 0);
934
935 /* enable host interrupts */
936 stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_EN, SPI_HOST_INTS_DEFAULT);
937
938 /* boot the device */
939 stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT,
940 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET
941 | SPI_CTRL_STAT_RAM_BOOT);
942
943 msleep(TARGET_BOOT_SLEEP);
944
945 stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT,
946 SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT);
947 msleep(TARGET_BOOT_SLEEP);
948
949 return 0;
950}
951
952/* caller must hold tx_lock */
953static void stlc45xx_check_txsent(struct stlc45xx *stlc)
954{
955 struct txbuffer *entry, *n;
956
957 list_for_each_entry_safe(entry, n, &stlc->tx_sent, tx_list) {
958 if (time_after(jiffies, entry->lifetime)) {
959 if (net_ratelimit())
960 stlc45xx_warning("frame 0x%x lifetime exceeded",
961 entry->start);
962 list_del(&entry->tx_list);
963 skb_pull(entry->skb, entry->header_len);
964 ieee80211_tx_status(stlc->hw, entry->skb);
965 stlc45xx_txbuffer_free(stlc, entry);
966 }
967 }
968}
969
970static void stlc45xx_power_off(struct stlc45xx *stlc)
971{
972 disable_irq(gpio_to_irq(stlc45xx_gpio_irq));
973 gpio_set_value(stlc45xx_gpio_power, 0);
974}
975
976static void stlc45xx_power_on(struct stlc45xx *stlc)
977{
978 gpio_set_value(stlc45xx_gpio_power, 1);
979 enable_irq(gpio_to_irq(stlc45xx_gpio_irq));
980
981 /*
982 * need to wait a while before device can be accessed, the length
983 * is just a guess
984 */
985 msleep(10);
986}
987
988/* caller must hold tx_lock */
989static void stlc45xx_flush_queues(struct stlc45xx *stlc)
990{
991 struct txbuffer *entry;
992
993 while (!list_empty(&stlc->tx_sent)) {
994 entry = list_first_entry(&stlc->tx_sent,
995 struct txbuffer, tx_list);
996 list_del(&entry->tx_list);
997 dev_kfree_skb(entry->skb);
998 stlc45xx_txbuffer_free(stlc, entry);
999 }
1000
1001 WARN_ON(!list_empty(&stlc->tx_sent));
1002
1003 while (!list_empty(&stlc->tx_pending)) {
1004 entry = list_first_entry(&stlc->tx_pending,
1005 struct txbuffer, tx_list);
1006 list_del(&entry->tx_list);
1007 dev_kfree_skb(entry->skb);
1008 stlc45xx_txbuffer_free(stlc, entry);
1009 }
1010
1011 WARN_ON(!list_empty(&stlc->tx_pending));
1012 WARN_ON(!list_empty(&stlc->txbuffer));
1013}
1014
1015static void stlc45xx_work_reset(struct work_struct *work)
1016{
1017 struct stlc45xx *stlc = container_of(work, struct stlc45xx,
1018 work_reset);
1019
1020 mutex_lock(&stlc->mutex);
1021
1022 if (stlc->fw_state != FW_STATE_RESET)
1023 goto out;
1024
1025 stlc45xx_power_off(stlc);
1026
1027 mutex_unlock(&stlc->mutex);
1028
1029 /* wait that all work_structs have finished, we can't hold
1030 * stlc->mutex to avoid deadlock */
1031 cancel_work_sync(&stlc->work);
1032
1033 /* FIXME: find out good value to wait for chip power down */
1034 msleep(100);
1035
1036 mutex_lock(&stlc->mutex);
1037
1038 /* FIXME: we should gracefully handle if the state has changed
1039 * after re-acquiring mutex */
1040 WARN_ON(stlc->fw_state != FW_STATE_RESET);
1041
1042 spin_lock_bh(&stlc->tx_lock);
1043 stlc45xx_flush_queues(stlc);
1044 spin_unlock_bh(&stlc->tx_lock);
1045
1046 stlc->fw_state = FW_STATE_RESETTING;
1047
1048 stlc45xx_power_on(stlc);
1049 stlc45xx_upload_firmware(stlc);
1050
1051out:
1052 mutex_unlock(&stlc->mutex);
1053}
1054
1055/* caller must hold mutex */
1056static void stlc45xx_reset(struct stlc45xx *stlc)
1057{
1058 stlc45xx_warning("resetting firmware");
1059 stlc->fw_state = FW_STATE_RESET;
1060 ieee80211_stop_queues(stlc->hw);
1061 queue_work(stlc->hw->workqueue, &stlc->work_reset);
1062}
1063
1064static void stlc45xx_work_tx_timeout(struct work_struct *work)
1065{
1066 struct stlc45xx *stlc = container_of(work, struct stlc45xx,
1067 work_tx_timeout.work);
1068
1069 stlc45xx_warning("tx timeout");
1070
1071 mutex_lock(&stlc->mutex);
1072
1073 if (stlc->fw_state != FW_STATE_READY)
1074 goto out;
1075
1076 stlc45xx_reset(stlc);
1077
1078out:
1079 mutex_unlock(&stlc->mutex);
1080}
1081
1082static void stlc45xx_int_ack(struct stlc45xx *stlc, u32 val)
1083{
1084 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1085
1086 stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_ACK, val);
1087}
1088
1089static void stlc45xx_wakeup(struct stlc45xx *stlc)
1090{
1091 unsigned long timeout;
1092 u32 ints;
1093
1094 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1095
1096 /* wake the chip */
1097 stlc45xx_write32(stlc, SPI_ADRS_ARM_INTERRUPTS, SPI_TARGET_INT_WAKEUP);
1098
1099 /* And wait for the READY interrupt */
1100 timeout = jiffies + HZ;
1101
1102 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1103 while (!(ints & SPI_HOST_INT_READY)) {
1104 if (time_after(jiffies, timeout))
1105 goto out;
1106 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1107 }
1108
1109 stlc45xx_int_ack(stlc, SPI_HOST_INT_READY);
1110
1111out:
1112 return;
1113}
1114
1115static void stlc45xx_sleep(struct stlc45xx *stlc)
1116{
1117 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1118
1119 stlc45xx_write32(stlc, SPI_ADRS_ARM_INTERRUPTS, SPI_TARGET_INT_SLEEP);
1120}
1121
1122static void stlc45xx_int_ready(struct stlc45xx *stlc)
1123{
1124 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1125
1126 stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_EN,
1127 SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE);
1128
1129 switch (stlc->fw_state) {
1130 case FW_STATE_BOOTING:
1131 stlc->fw_state = FW_STATE_READY;
1132 complete(&stlc->fw_comp);
1133 break;
1134 case FW_STATE_RESETTING:
1135 stlc->fw_state = FW_STATE_READY;
1136
1137 stlc45xx_tx_scan(stlc);
1138 stlc45xx_tx_setup(stlc);
1139 stlc45xx_tx_edcf(stlc);
1140
1141 ieee80211_wake_queues(stlc->hw);
1142 break;
1143 default:
1144 break;
1145 }
1146}
1147
1148static int stlc45xx_rx_txack(struct stlc45xx *stlc, struct sk_buff *skb)
1149{
1150 struct ieee80211_tx_info *info;
1151 struct s_lm_control *control;
1152 struct s_lmo_tx *tx;
1153 struct txbuffer *entry;
1154 int found = 0;
1155
1156 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1157
1158 control = (struct s_lm_control *) skb->data;
1159 tx = (struct s_lmo_tx *) (control + 1);
1160
1161 if (list_empty(&stlc->tx_sent)) {
1162 if (net_ratelimit())
1163 stlc45xx_warning("no frames waiting for "
1164 "acknowledgement");
1165 return -1;
1166 }
1167
1168 list_for_each_entry(entry, &stlc->tx_sent, tx_list) {
1169 if (control->handle == entry->handle) {
1170 found = 1;
1171 break;
1172 }
1173 }
1174
1175 if (!found) {
1176 if (net_ratelimit())
1177 stlc45xx_warning("couldn't find frame for tx ack 0x%x",
1178 control->handle);
1179 return -1;
1180 }
1181
1182 stlc45xx_debug(DEBUG_TX, "TX ACK 0x%x", entry->handle);
1183
1184 if (entry->status_needed) {
1185 info = IEEE80211_SKB_CB(entry->skb);
1186
1187 if (!(tx->flags & LM_TX_FAILED)) {
1188 /* frame was acked */
1189 info->flags |= IEEE80211_TX_STAT_ACK;
1190 info->status.ack_signal = tx->rcpi / 2 - 110;
1191 }
1192
1193 skb_pull(entry->skb, entry->header_len);
1194
1195 ieee80211_tx_status(stlc->hw, entry->skb);
1196 }
1197
1198 list_del(&entry->tx_list);
1199
1200 stlc45xx_check_txsent(stlc);
1201 if (list_empty(&stlc->tx_sent))
1202 /* there are no pending frames, we can stop the tx timeout
1203 * timer */
1204 cancel_delayed_work(&stlc->work_tx_timeout);
1205
1206 spin_lock_bh(&stlc->tx_lock);
1207
1208 stlc45xx_txbuffer_free(stlc, entry);
1209
1210 if (stlc->tx_queue_stopped &&
1211 stlc45xx_txbuffer_find(stlc, MAX_FRAME_LEN) != -1) {
1212 stlc45xx_debug(DEBUG_QUEUE, "room in tx buffer, waking queues");
1213 ieee80211_wake_queues(stlc->hw);
1214 stlc->tx_queue_stopped = 0;
1215 }
1216
1217 spin_unlock_bh(&stlc->tx_lock);
1218
1219 return 0;
1220}
1221
1222static int stlc45xx_rx_control(struct stlc45xx *stlc, struct sk_buff *skb)
1223{
1224 struct s_lm_control *control;
1225 int ret = 0;
1226
1227 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1228
1229 control = (struct s_lm_control *) skb->data;
1230
1231 switch (control->oid) {
1232 case LM_OID_TX:
1233 ret = stlc45xx_rx_txack(stlc, skb);
1234 break;
1235 case LM_OID_SETUP:
1236 case LM_OID_SCAN:
1237 case LM_OID_TRAP:
1238 case LM_OID_EDCF:
1239 case LM_OID_KEYCACHE:
1240 case LM_OID_PSM:
1241 case LM_OID_STATS:
1242 case LM_OID_LED:
1243 default:
1244 stlc45xx_warning("unhandled rx control oid %d\n",
1245 control->oid);
1246 break;
1247 }
1248
1249 dev_kfree_skb(skb);
1250
1251 return ret;
1252}
1253
1254/* copied from mac80211 */
1255static void stlc45xx_parse_elems(u8 *start, size_t len,
1256 struct stlc45xx_ie_tim **tim,
1257 size_t *tim_len)
1258{
1259 size_t left = len;
1260 u8 *pos = start;
1261
1262 while (left >= 2) {
1263 u8 id, elen;
1264
1265 id = *pos++;
1266 elen = *pos++;
1267 left -= 2;
1268
1269 if (elen > left)
1270 return;
1271
1272 switch (id) {
1273 case WLAN_EID_TIM:
1274 *tim = (struct stlc45xx_ie_tim *) pos;
1275 *tim_len = elen;
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 left -= elen;
1282 pos += elen;
1283 }
1284}
1285
1286/*
1287 * mac80211 doesn't have support for asking frames with PS-Poll, so let's
1288 * implement in the driver for now. We have to add support to mac80211
1289 * later.
1290 */
1291static int stlc45xx_check_more_data(struct stlc45xx *stlc, struct sk_buff *skb)
1292{
1293 struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data;
1294 struct ieee80211_hdr *hdr;
1295 size_t len;
1296 u16 fc;
1297
1298 hdr = (void *) skb->data + sizeof(*data);
1299 len = skb->len - sizeof(*data);
1300
1301 /* minimum frame length is the null frame length 24 bytes */
1302 if (len < 24) {
1303 stlc45xx_warning("invalid frame length when checking for "
1304 "more data");
1305 return -EINVAL;
1306 }
1307
1308 fc = le16_to_cpu(hdr->frame_control);
1309 if (!(fc & IEEE80211_FCTL_FROMDS))
1310 /* this is not from DS */
1311 return 0;
1312
1313 if (compare_ether_addr(hdr->addr1, stlc->mac_addr) != 0)
1314 /* the frame was not for us */
1315 return 0;
1316
1317 if (!(fc & IEEE80211_FCTL_MOREDATA)) {
1318 /* AP has no more frames buffered for us */
1319 stlc45xx_debug(DEBUG_PSM, "all buffered frames retrieved");
1320 stlc->pspolling = false;
1321 return 0;
1322 }
1323
1324 /* MOREDATA bit is set, let's ask for a new frame from the AP */
1325 stlc45xx_tx_pspoll(stlc, stlc->psm);
1326
1327 return 0;
1328}
1329
1330/*
1331 * mac80211 cannot read TIM from beacons, so let's add a hack to the
1332 * driver. We have to add support to mac80211 later.
1333 */
1334static int stlc45xx_rx_data_beacon(struct stlc45xx *stlc, struct sk_buff *skb)
1335{
1336 struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data;
1337 size_t len = skb->len, tim_len = 0, baselen, pvbmap_len;
1338 struct ieee80211_mgmt *mgmt;
1339 struct stlc45xx_ie_tim *tim = NULL;
1340 int bmap_offset, index, aid_bit;
1341
1342 mgmt = (void *) skb->data + sizeof(*data);
1343
1344 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
1345 if (baselen > len) {
1346 stlc45xx_warning("invalid baselen in beacon");
1347 return -EINVAL;
1348 }
1349
1350 stlc45xx_parse_elems(mgmt->u.beacon.variable, len - baselen, &tim,
1351 &tim_len);
1352
1353 if (!tim) {
1354 stlc45xx_warning("didn't find tim from a beacon");
1355 return -EINVAL;
1356 }
1357
1358 bmap_offset = tim->bmap_control & 0xfe;
1359 index = stlc->aid / 8 - bmap_offset;
1360
1361 pvbmap_len = tim_len - 3;
1362 if (index > pvbmap_len)
1363 return -EINVAL;
1364
1365 aid_bit = !!(tim->pvbmap[index] & (1 << stlc->aid % 8));
1366
1367 stlc45xx_debug(DEBUG_PSM, "fc 0x%x duration %d seq %d dtim %u "
1368 "bmap_control 0x%x aid_bit %d",
1369 mgmt->frame_control, mgmt->duration, mgmt->seq_ctrl >> 4,
1370 tim->dtim_count, tim->bmap_control, aid_bit);
1371
1372 if (!aid_bit)
1373 return 0;
1374
1375 stlc->pspolling = true;
1376 stlc45xx_tx_pspoll(stlc, stlc->psm);
1377
1378 return 0;
1379}
1380
1381static int stlc45xx_rx_data(struct stlc45xx *stlc, struct sk_buff *skb)
1382{
1383 struct ieee80211_rx_status status;
1384 struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data;
1385 int align = 0;
1386 u8 *p, align_len;
1387 u16 len;
1388
1389 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1390
1391 if (stlc->psm) {
1392 if (data->flags & LM_IN_BEACON)
1393 stlc45xx_rx_data_beacon(stlc, skb);
1394 else if (stlc->pspolling && (data->flags & LM_IN_DATA))
1395 stlc45xx_check_more_data(stlc, skb);
1396 }
1397
1398 memset(&status, 0, sizeof(status));
1399
1400 status.freq = data->frequency;
1401 status.signal = data->rcpi / 2 - 110;
1402
1403 /* let's assume that maximum rcpi value is 140 (= 35 dBm) */
1404 status.qual = data->rcpi * 100 / 140;
1405
1406 status.band = IEEE80211_BAND_2GHZ;
1407
1408 /*
1409 * FIXME: this gives warning from __ieee80211_rx()
1410 *
1411 * status.rate_idx = data->rate;
1412 */
1413
1414 len = data->length;
1415
1416 if (data->flags & LM_FLAG_ALIGN)
1417 align = 1;
1418
1419 skb_pull(skb, sizeof(*data));
1420
1421 if (align) {
1422 p = skb->data;
1423 align_len = *p;
1424 skb_pull(skb, align_len);
1425 }
1426
1427 skb_trim(skb, len);
1428
1429 stlc45xx_debug(DEBUG_RX, "rx data 0x%p %d B", skb->data, skb->len);
1430 stlc45xx_dump(DEBUG_RX_CONTENT, skb->data, skb->len);
1431
1432 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1433 ieee80211_rx(stlc->hw, skb);
1434
1435 return 0;
1436}
1437
1438
1439
1440static int stlc45xx_rx(struct stlc45xx *stlc)
1441{
1442 struct s_lm_control *control;
1443 struct sk_buff *skb;
1444 int ret;
1445 u16 len;
1446
1447 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1448
1449 stlc45xx_wakeup(stlc);
1450
1451 /* dummy read to flush SPI DMA controller bug */
1452 stlc45xx_read16(stlc, SPI_ADRS_GEN_PURP_1);
1453
1454 len = stlc45xx_read16(stlc, SPI_ADRS_DMA_DATA);
1455
1456 if (len == 0) {
1457 stlc45xx_warning("rx request of zero bytes");
1458 return 0;
1459 }
1460
1461 skb = dev_alloc_skb(len);
1462 if (!skb) {
1463 stlc45xx_warning("could not alloc skb");
1464 return 0;
1465 }
1466
1467 stlc45xx_spi_read(stlc, SPI_ADRS_DMA_DATA, skb_put(skb, len), len);
1468
1469 stlc45xx_sleep(stlc);
1470
1471 stlc45xx_debug(DEBUG_RX, "rx frame 0x%p %d B", skb->data, skb->len);
1472 stlc45xx_dump(DEBUG_RX_CONTENT, skb->data, skb->len);
1473
1474 control = (struct s_lm_control *) skb->data;
1475
1476 if (control->flags & LM_FLAG_CONTROL)
1477 ret = stlc45xx_rx_control(stlc, skb);
1478 else
1479 ret = stlc45xx_rx_data(stlc, skb);
1480
1481 return ret;
1482}
1483
1484
1485static irqreturn_t stlc45xx_interrupt(int irq, void *config)
1486{
1487 struct spi_device *spi = config;
1488 struct stlc45xx *stlc = dev_get_drvdata(&spi->dev);
1489
1490 stlc45xx_debug(DEBUG_IRQ, "IRQ");
1491
1492 queue_work(stlc->hw->workqueue, &stlc->work);
1493
1494 return IRQ_HANDLED;
1495}
1496
1497static int stlc45xx_tx_frame(struct stlc45xx *stlc, u32 address,
1498 void *buf, size_t len)
1499{
1500 struct s_dma_regs dma_regs;
1501 unsigned long timeout;
1502 int ret = 0;
1503 u32 ints;
1504
1505 stlc->tx_frames++;
1506
1507 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1508
1509 stlc45xx_debug(DEBUG_TX, "tx frame 0x%p %zu B", buf, len);
1510 stlc45xx_dump(DEBUG_TX_CONTENT, buf, len);
1511
1512 stlc45xx_wakeup(stlc);
1513
1514 dma_regs.cmd = SPI_DMA_WRITE_CTRL_ENABLE;
1515 dma_regs.len = cpu_to_le16(len);
1516 dma_regs.addr = cpu_to_le32(address);
1517
1518 stlc45xx_spi_write(stlc, SPI_ADRS_DMA_WRITE_CTRL, &dma_regs,
1519 sizeof(dma_regs));
1520
1521 stlc45xx_spi_write(stlc, SPI_ADRS_DMA_DATA, buf, len);
1522
1523 timeout = jiffies + 2 * HZ;
1524 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1525 while (!(ints & SPI_HOST_INT_WR_READY)) {
1526 if (time_after(jiffies, timeout)) {
1527 stlc45xx_warning("WR_READY timeout");
1528 ret = -1;
1529 goto out;
1530 }
1531 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1532 }
1533
1534 stlc45xx_int_ack(stlc, SPI_HOST_INT_WR_READY);
1535
1536 stlc45xx_sleep(stlc);
1537
1538out:
1539 return ret;
1540}
1541
1542static int stlc45xx_wq_tx(struct stlc45xx *stlc)
1543{
1544 struct txbuffer *entry;
1545 int ret = 0;
1546
1547 spin_lock_bh(&stlc->tx_lock);
1548
1549 while (!list_empty(&stlc->tx_pending)) {
1550 entry = list_entry(stlc->tx_pending.next,
1551 struct txbuffer, tx_list);
1552
1553 list_del_init(&entry->tx_list);
1554
1555 spin_unlock_bh(&stlc->tx_lock);
1556
1557 ret = stlc45xx_tx_frame(stlc, entry->frame_start,
1558 entry->skb->data, entry->skb->len);
1559
1560 spin_lock_bh(&stlc->tx_lock);
1561
1562 if (ret < 0) {
1563 /* frame transfer to firmware buffer failed */
1564 /* FIXME: report this to mac80211 */
1565 dev_kfree_skb(entry->skb);
1566 stlc45xx_txbuffer_free(stlc, entry);
1567 goto out;
1568 }
1569
1570 list_add(&entry->tx_list, &stlc->tx_sent);
1571 queue_delayed_work(stlc->hw->workqueue,
1572 &stlc->work_tx_timeout,
1573 msecs_to_jiffies(TX_TIMEOUT));
1574 }
1575
1576out:
1577 spin_unlock_bh(&stlc->tx_lock);
1578 return ret;
1579}
1580
1581static void stlc45xx_work(struct work_struct *work)
1582{
1583 struct stlc45xx *stlc = container_of(work, struct stlc45xx, work);
1584 u32 ints;
1585 int ret;
1586
1587 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1588
1589 mutex_lock(&stlc->mutex);
1590
1591 if (stlc->fw_state == FW_STATE_OFF &&
1592 stlc->fw_state == FW_STATE_RESET)
1593 goto out;
1594
1595 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1596 stlc45xx_debug(DEBUG_BH, "begin host_ints 0x%08x", ints);
1597
1598 if (ints & SPI_HOST_INT_READY) {
1599 stlc45xx_int_ready(stlc);
1600 stlc45xx_int_ack(stlc, SPI_HOST_INT_READY);
1601 }
1602
1603 if (stlc->fw_state != FW_STATE_READY)
1604 goto out;
1605
1606 if (ints & SPI_HOST_INT_UPDATE) {
1607 stlc45xx_int_ack(stlc, SPI_HOST_INT_UPDATE);
1608 ret = stlc45xx_rx(stlc);
1609 if (ret < 0) {
1610 stlc45xx_reset(stlc);
1611 goto out;
1612 }
1613 }
1614 if (ints & SPI_HOST_INT_SW_UPDATE) {
1615 stlc45xx_int_ack(stlc, SPI_HOST_INT_SW_UPDATE);
1616 ret = stlc45xx_rx(stlc);
1617 if (ret < 0) {
1618 stlc45xx_reset(stlc);
1619 goto out;
1620 }
1621 }
1622
1623 ret = stlc45xx_wq_tx(stlc);
1624 if (ret < 0) {
1625 stlc45xx_reset(stlc);
1626 goto out;
1627 }
1628
1629 ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS);
1630 stlc45xx_debug(DEBUG_BH, "end host_ints 0x%08x", ints);
1631
1632out:
1633 mutex_unlock(&stlc->mutex);
1634}
1635
1636static void stlc45xx_tx_edcf(struct stlc45xx *stlc)
1637{
1638 struct s_lm_control *control;
1639 struct s_lmo_edcf *edcf;
1640 size_t len, edcf_len;
1641
1642 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1643
1644 edcf_len = sizeof(*edcf);
1645 len = sizeof(*control) + edcf_len;
1646 control = kzalloc(len, GFP_KERNEL);
1647 edcf = (struct s_lmo_edcf *) (control + 1);
1648
1649 control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET;
1650 control->length = edcf_len;
1651 control->oid = LM_OID_EDCF;
1652
1653 edcf->slottime = 0x14;
1654 edcf->sifs = 10;
1655 edcf->eofpad = 6;
1656 edcf->maxburst = 1500;
1657
1658 edcf->queues[0].aifs = 2;
1659 edcf->queues[0].pad0 = 1;
1660 edcf->queues[0].cwmin = 3;
1661 edcf->queues[0].cwmax = 7;
1662 edcf->queues[0].txop = 47;
1663 edcf->queues[1].aifs = 2;
1664 edcf->queues[1].pad0 = 0;
1665 edcf->queues[1].cwmin = 7;
1666 edcf->queues[1].cwmax = 15;
1667 edcf->queues[1].txop = 94;
1668 edcf->queues[2].aifs = 3;
1669 edcf->queues[2].pad0 = 0;
1670 edcf->queues[2].cwmin = 15;
1671 edcf->queues[2].cwmax = 1023;
1672 edcf->queues[2].txop = 0;
1673 edcf->queues[3].aifs = 7;
1674 edcf->queues[3].pad0 = 0;
1675 edcf->queues[3].cwmin = 15;
1676 edcf->queues[3].cwmax = 1023;
1677 edcf->queues[3].txop = 0;
1678 edcf->queues[4].aifs = 13;
1679 edcf->queues[4].pad0 = 99;
1680 edcf->queues[4].cwmin = 3437;
1681 edcf->queues[4].cwmax = 512;
1682 edcf->queues[4].txop = 12;
1683 edcf->queues[5].aifs = 142;
1684 edcf->queues[5].pad0 = 109;
1685 edcf->queues[5].cwmin = 8756;
1686 edcf->queues[5].cwmax = 6;
1687 edcf->queues[5].txop = 0;
1688 edcf->queues[6].aifs = 4;
1689 edcf->queues[6].pad0 = 0;
1690 edcf->queues[6].cwmin = 0;
1691 edcf->queues[6].cwmax = 58705;
1692 edcf->queues[6].txop = 25716;
1693 edcf->queues[7].aifs = 0;
1694 edcf->queues[7].pad0 = 0;
1695 edcf->queues[7].cwmin = 0;
1696 edcf->queues[7].cwmax = 0;
1697 edcf->queues[7].txop = 0;
1698
1699 stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len);
1700
1701 kfree(control);
1702}
1703
1704static void stlc45xx_tx_setup(struct stlc45xx *stlc)
1705{
1706 struct s_lm_control *control;
1707 struct s_lmo_setup *setup;
1708 size_t len, setup_len;
1709
1710 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1711
1712 setup_len = sizeof(*setup);
1713 len = sizeof(*control) + setup_len;
1714 control = kzalloc(len, GFP_KERNEL);
1715 setup = (struct s_lmo_setup *) (control + 1);
1716
1717 control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET;
1718 control->length = setup_len;
1719 control->oid = LM_OID_SETUP;
1720
1721 setup->flags = LM_SETUP_INFRA;
1722 setup->antenna = 2;
1723 setup->rx_align = 0;
1724 setup->rx_buffer = FIRMWARE_RXBUFFER_START;
1725 setup->rx_mtu = FIRMWARE_MTU;
1726 setup->frontend = 5;
1727 setup->timeout = 0;
1728 setup->truncate = 48896;
1729 setup->bratemask = 0xffffffff;
1730 setup->ref_clock = 644245094;
1731 setup->lpf_bandwidth = 65535;
1732 setup->osc_start_delay = 65535;
1733
1734 memcpy(setup->macaddr, stlc->mac_addr, ETH_ALEN);
1735 memcpy(setup->bssid, stlc->bssid, ETH_ALEN);
1736
1737 stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len);
1738
1739 kfree(control);
1740}
1741
1742static void stlc45xx_tx_scan(struct stlc45xx *stlc)
1743{
1744 struct s_lm_control *control;
1745 struct s_lmo_scan *scan;
1746 size_t len, scan_len;
1747
1748 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
1749
1750 scan_len = sizeof(*scan);
1751 len = sizeof(*control) + scan_len;
1752 control = kzalloc(len, GFP_KERNEL);
1753 scan = (struct s_lmo_scan *) (control + 1);
1754
1755 control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET;
1756 control->length = scan_len;
1757 control->oid = LM_OID_SCAN;
1758
1759 scan->flags = LM_SCAN_EXIT;
1760 scan->bratemask = 0x15f;
1761 scan->aloft[0] = 3;
1762 scan->aloft[1] = 3;
1763 scan->aloft[2] = 1;
1764 scan->aloft[3] = 0;
1765 scan->aloft[4] = 0;
1766 scan->aloft[5] = 0;
1767 scan->aloft[6] = 0;
1768 scan->aloft[7] = 0;
1769
1770 memcpy(&scan->rssical, &stlc->cal_rssi[(stlc->channel - 1) *
1771 RSSI_CAL_LEN],
1772 RSSI_CAL_LEN);
1773 memcpy(&scan->channel, &stlc->cal_channels[(stlc->channel - 1) *
1774 CHANNEL_CAL_LEN],
1775 CHANNEL_CAL_LEN);
1776
1777 stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len);
1778
1779 kfree(control);
1780}
1781
1782/*
1783 * caller must hold mutex
1784 */
1785static int stlc45xx_tx_pspoll(struct stlc45xx *stlc, bool powersave)
1786{
1787 struct ieee80211_hdr *pspoll;
1788 int payload_len, padding, i;
1789 struct s_lm_data_out *data;
1790 struct txbuffer *entry;
1791 struct sk_buff *skb;
1792 char *payload;
1793 u16 fc;
1794
1795 skb = dev_alloc_skb(stlc->hw->extra_tx_headroom + 16);
1796 if (!skb) {
1797 stlc45xx_warning("failed to allocate pspoll frame");
1798 return -ENOMEM;
1799 }
1800 skb_reserve(skb, stlc->hw->extra_tx_headroom);
1801
1802 pspoll = (struct ieee80211_hdr *) skb_put(skb, 16);
1803 memset(pspoll, 0, 16);
1804 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL;
1805 if (powersave)
1806 fc |= IEEE80211_FCTL_PM;
1807 pspoll->frame_control = cpu_to_le16(fc);
1808 pspoll->duration_id = cpu_to_le16(stlc->aid);
1809
1810 /* aid in PS-Poll has its two MSBs each set to 1 */
1811 pspoll->duration_id |= cpu_to_le16(1 << 15) | cpu_to_le16(1 << 14);
1812
1813 memcpy(pspoll->addr1, stlc->bssid, ETH_ALEN);
1814 memcpy(pspoll->addr2, stlc->mac_addr, ETH_ALEN);
1815
1816 stlc45xx_debug(DEBUG_PSM, "sending PS-Poll frame to %pM (powersave %d, "
1817 "fc 0x%x, aid %d)", pspoll->addr1,
1818 powersave, fc, stlc->aid);
1819
1820 spin_lock_bh(&stlc->tx_lock);
1821
1822 entry = stlc45xx_txbuffer_alloc(stlc, skb->len);
1823
1824 spin_unlock_bh(&stlc->tx_lock);
1825
1826 if (!entry) {
1827 /*
1828 * The queue should be stopped before the firmware buffer
1829 * is full, so firmware buffer should always have enough
1830 * space.
1831 *
1832 * But I'm too lazy and omit it for now.
1833 */
1834 if (net_ratelimit())
1835 stlc45xx_warning("firmware tx buffer full is full "
1836 "for null frame");
1837 return -ENOSPC;
1838 }
1839
1840 payload = skb->data;
1841 payload_len = skb->len;
1842 padding = (int) (skb->data - sizeof(*data)) & 3;
1843 entry->header_len = sizeof(*data) + padding;
1844
1845 entry->skb = skb;
1846 entry->status_needed = false;
1847 entry->handle = (u32) skb;
1848 entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME);
1849
1850 stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B "
1851 "padding %d header_len %d)",
1852 entry->handle, payload, payload_len, padding,
1853 entry->header_len);
1854 stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len);
1855
1856 data = (struct s_lm_data_out *) skb_push(skb, entry->header_len);
1857
1858 memset(data, 0, entry->header_len);
1859
1860 if (padding)
1861 data->flags = LM_FLAG_ALIGN;
1862
1863 data->flags = LM_OUT_BURST;
1864 data->length = payload_len;
1865 data->handle = entry->handle;
1866 data->aid = 1;
1867 data->rts_retries = 7;
1868 data->retries = 7;
1869 data->aloft_ctrl = 0;
1870 data->crypt_offset = 58;
1871 data->keytype = 0;
1872 data->keylen = 0;
1873 data->queue = LM_QUEUE_DATA3;
1874 data->backlog = 32;
1875 data->antenna = 2;
1876 data->cts = 3;
1877 data->power = 127;
1878
1879 for (i = 0; i < 8; i++)
1880 data->aloft[i] = 0;
1881
1882 /*
1883 * check if there's enough space in tx buffer
1884 *
1885 * FIXME: ignored for now
1886 */
1887
1888 stlc45xx_tx_frame(stlc, entry->start, skb->data, skb->len);
1889
1890 list_add(&entry->tx_list, &stlc->tx_sent);
1891
1892 return 0;
1893}
1894
1895/*
1896 * caller must hold mutex
1897 *
1898 * shamelessly stolen from mac80211/ieee80211_send_nullfunc
1899 */
1900static int stlc45xx_tx_nullfunc(struct stlc45xx *stlc, bool powersave)
1901{
1902 struct ieee80211_hdr *nullfunc;
1903 int payload_len, padding, i;
1904 struct s_lm_data_out *data;
1905 struct txbuffer *entry;
1906 struct sk_buff *skb;
1907 char *payload;
1908 u16 fc;
1909
1910 skb = dev_alloc_skb(stlc->hw->extra_tx_headroom + 24);
1911 if (!skb) {
1912 stlc45xx_warning("failed to allocate buffer for null frame\n");
1913 return -ENOMEM;
1914 }
1915 skb_reserve(skb, stlc->hw->extra_tx_headroom);
1916
1917 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
1918 memset(nullfunc, 0, 24);
1919 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
1920 IEEE80211_FCTL_TODS;
1921
1922 if (powersave)
1923 fc |= IEEE80211_FCTL_PM;
1924
1925 nullfunc->frame_control = cpu_to_le16(fc);
1926 memcpy(nullfunc->addr1, stlc->bssid, ETH_ALEN);
1927 memcpy(nullfunc->addr2, stlc->mac_addr, ETH_ALEN);
1928 memcpy(nullfunc->addr3, stlc->bssid, ETH_ALEN);
1929
1930 stlc45xx_debug(DEBUG_PSM, "sending Null frame to %pM (powersave %d, "
1931 "fc 0x%x)", nullfunc->addr1, powersave, fc);
1932
1933 spin_lock_bh(&stlc->tx_lock);
1934
1935 entry = stlc45xx_txbuffer_alloc(stlc, skb->len);
1936
1937 spin_unlock_bh(&stlc->tx_lock);
1938
1939 if (!entry) {
1940 /*
1941 * The queue should be stopped before the firmware buffer
1942 * is full, so firmware buffer should always have enough
1943 * space.
1944 *
1945 * But I'm too lazy and omit it for now.
1946 */
1947 if (net_ratelimit())
1948 stlc45xx_warning("firmware tx buffer full is full "
1949 "for null frame");
1950 return -ENOSPC;
1951 }
1952
1953 payload = skb->data;
1954 payload_len = skb->len;
1955 padding = (int) (skb->data - sizeof(*data)) & 3;
1956 entry->header_len = sizeof(*data) + padding;
1957
1958 entry->skb = skb;
1959 entry->status_needed = false;
1960 entry->handle = (u32) skb;
1961 entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME);
1962
1963 stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B "
1964 "padding %d header_len %d)",
1965 entry->handle, payload, payload_len, padding,
1966 entry->header_len);
1967 stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len);
1968
1969 data = (struct s_lm_data_out *) skb_push(skb, entry->header_len);
1970
1971 memset(data, 0, entry->header_len);
1972
1973 if (padding)
1974 data->flags = LM_FLAG_ALIGN;
1975
1976 data->flags = LM_OUT_BURST;
1977 data->length = payload_len;
1978 data->handle = entry->handle;
1979 data->aid = 1;
1980 data->rts_retries = 7;
1981 data->retries = 7;
1982 data->aloft_ctrl = 0;
1983 data->crypt_offset = 58;
1984 data->keytype = 0;
1985 data->keylen = 0;
1986 data->queue = LM_QUEUE_DATA3;
1987 data->backlog = 32;
1988 data->antenna = 2;
1989 data->cts = 3;
1990 data->power = 127;
1991
1992 for (i = 0; i < 8; i++)
1993 data->aloft[i] = 0;
1994
1995 /*
1996 * check if there's enough space in tx buffer
1997 *
1998 * FIXME: ignored for now
1999 */
2000
2001 stlc45xx_tx_frame(stlc, entry->start, skb->data, skb->len);
2002
2003 list_add(&entry->tx_list, &stlc->tx_sent);
2004
2005 return 0;
2006}
2007
2008/* caller must hold mutex */
2009static void stlc45xx_tx_psm(struct stlc45xx *stlc, bool enable)
2010{
2011 struct s_lm_control *control;
2012 struct s_lmo_psm *psm;
2013 size_t len, psm_len;
2014
2015 WARN_ON(!stlc->associated);
2016 WARN_ON(stlc->aid < 1);
2017 WARN_ON(stlc->aid > 2007);
2018
2019 psm_len = sizeof(*psm);
2020 len = sizeof(*control) + psm_len;
2021 control = kzalloc(len, GFP_KERNEL);
2022 psm = (struct s_lmo_psm *) (control + 1);
2023
2024 control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET;
2025 control->length = psm_len;
2026 control->oid = LM_OID_PSM;
2027
2028 if (enable)
2029 psm->flags |= LM_PSM;
2030
2031 psm->aid = stlc->aid;
2032
2033 psm->beacon_rcpi_skip_max = 60;
2034
2035 psm->intervals[0].interval = 1;
2036 psm->intervals[0].periods = 1;
2037 psm->intervals[1].interval = 1;
2038 psm->intervals[1].periods = 1;
2039 psm->intervals[2].interval = 1;
2040 psm->intervals[2].periods = 1;
2041 psm->intervals[3].interval = 1;
2042 psm->intervals[3].periods = 1;
2043
2044 psm->nr = 0;
2045 psm->exclude[0] = 0;
2046
2047 stlc45xx_debug(DEBUG_PSM, "sending LM_OID_PSM (aid %d, interval %d)",
2048 psm->aid, psm->intervals[0].interval);
2049
2050 stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len);
2051
2052 kfree(control);
2053}
2054
2055static int stlc45xx_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2056{
2057 struct stlc45xx *stlc = hw->priv;
2058 struct ieee80211_tx_info *info;
2059 struct ieee80211_rate *rate;
2060 int payload_len, padding, i;
2061 struct s_lm_data_out *data;
2062 struct txbuffer *entry;
2063 char *payload;
2064
2065 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2066
2067 spin_lock_bh(&stlc->tx_lock);
2068
2069 entry = stlc45xx_txbuffer_alloc(stlc, skb->len);
2070 if (!entry) {
2071 /* the queue should be stopped before the firmware buffer
2072 * is full, so firmware buffer should always have enough
2073 * space */
2074 if (net_ratelimit())
2075 stlc45xx_warning("firmware buffer full");
2076 spin_unlock_bh(&stlc->tx_lock);
2077 return NETDEV_TX_BUSY;
2078 }
2079
2080 info = IEEE80211_SKB_CB(skb);
2081
2082 payload = skb->data;
2083 payload_len = skb->len;
2084 padding = (int) (skb->data - sizeof(*data)) & 3;
2085 entry->header_len = sizeof(*data) + padding;
2086
2087 entry->skb = skb;
2088 entry->status_needed = true;
2089 entry->handle = (u32) skb;
2090 entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME);
2091
2092 stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B "
2093 "padding %d header_len %d)",
2094 entry->handle, payload, payload_len, padding,
2095 entry->header_len);
2096 stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len);
2097
2098 data = (struct s_lm_data_out *) skb_push(skb, entry->header_len);
2099
2100 memset(data, 0, entry->header_len);
2101
2102 if (padding)
2103 data->flags = LM_FLAG_ALIGN;
2104
2105 data->flags = LM_OUT_BURST;
2106 data->length = payload_len;
2107 data->handle = entry->handle;
2108 data->aid = 1;
2109 data->rts_retries = 7;
2110 data->retries = 7;
2111 data->aloft_ctrl = 0;
2112 data->crypt_offset = 58;
2113 data->keytype = 0;
2114 data->keylen = 0;
2115 data->queue = 2;
2116 data->backlog = 32;
2117 data->antenna = 2;
2118 data->cts = 3;
2119 data->power = 127;
2120
2121 for (i = 0; i < 8; i++) {
2122 rate = ieee80211_get_tx_rate(stlc->hw, info);
2123 data->aloft[i] = rate->hw_value;
2124 }
2125
2126 list_add_tail(&entry->tx_list, &stlc->tx_pending);
2127
2128 /* check if there's enough space in tx buffer */
2129 if (stlc45xx_txbuffer_find(stlc, MAX_FRAME_LEN) == -1) {
2130 stlc45xx_debug(DEBUG_QUEUE, "tx buffer full, stopping queues");
2131 stlc->tx_queue_stopped = 1;
2132 ieee80211_stop_queues(stlc->hw);
2133 }
2134
2135 queue_work(stlc->hw->workqueue, &stlc->work);
2136
2137 spin_unlock_bh(&stlc->tx_lock);
2138
2139 return NETDEV_TX_OK;
2140}
2141
2142static int stlc45xx_op_start(struct ieee80211_hw *hw)
2143{
2144 struct stlc45xx *stlc = hw->priv;
2145 unsigned long timeout;
2146 int ret = 0;
2147
2148 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2149
2150 mutex_lock(&stlc->mutex);
2151
2152 stlc->fw_state = FW_STATE_BOOTING;
2153 stlc->channel = 1;
2154
2155 stlc45xx_power_on(stlc);
2156
2157 ret = stlc45xx_upload_firmware(stlc);
2158 if (ret < 0) {
2159 stlc45xx_power_off(stlc);
2160 goto out_unlock;
2161 }
2162
2163 stlc->tx_queue_stopped = 0;
2164
2165 mutex_unlock(&stlc->mutex);
2166
2167 timeout = msecs_to_jiffies(2000);
2168 timeout = wait_for_completion_interruptible_timeout(&stlc->fw_comp,
2169 timeout);
2170 if (!timeout) {
2171 stlc45xx_error("firmware boot failed");
2172 stlc45xx_power_off(stlc);
2173 ret = -1;
2174 goto out;
2175 }
2176
2177 stlc45xx_debug(DEBUG_BOOT, "firmware booted");
2178
2179 /* FIXME: should we take mutex just after wait_for_completion()? */
2180 mutex_lock(&stlc->mutex);
2181
2182 WARN_ON(stlc->fw_state != FW_STATE_READY);
2183
2184out_unlock:
2185 mutex_unlock(&stlc->mutex);
2186
2187out:
2188 return ret;
2189}
2190
2191static void stlc45xx_op_stop(struct ieee80211_hw *hw)
2192{
2193 struct stlc45xx *stlc = hw->priv;
2194
2195 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2196
2197 mutex_lock(&stlc->mutex);
2198
2199 WARN_ON(stlc->fw_state != FW_STATE_READY);
2200
2201 stlc45xx_power_off(stlc);
2202
2203 /* FIXME: make sure that all work_structs have completed */
2204
2205 spin_lock_bh(&stlc->tx_lock);
2206 stlc45xx_flush_queues(stlc);
2207 spin_unlock_bh(&stlc->tx_lock);
2208
2209 stlc->fw_state = FW_STATE_OFF;
2210
2211 mutex_unlock(&stlc->mutex);
2212}
2213
2214static int stlc45xx_op_add_interface(struct ieee80211_hw *hw,
2215 struct ieee80211_if_init_conf *conf)
2216{
2217 struct stlc45xx *stlc = hw->priv;
2218
2219 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2220
2221 switch (conf->type) {
2222 case NL80211_IFTYPE_STATION:
2223 break;
2224 default:
2225 return -EOPNOTSUPP;
2226 }
2227
2228 memcpy(stlc->mac_addr, conf->mac_addr, ETH_ALEN);
2229
2230 return 0;
2231}
2232
2233static void stlc45xx_op_remove_interface(struct ieee80211_hw *hw,
2234 struct ieee80211_if_init_conf *conf)
2235{
2236 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2237}
2238
2239static int stlc45xx_op_config(struct ieee80211_hw *hw, u32 changed)
2240{
2241 struct stlc45xx *stlc = hw->priv;
2242
2243 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2244
2245 mutex_lock(&stlc->mutex);
2246
2247 stlc->channel = hw->conf.channel->hw_value;
2248 stlc45xx_tx_scan(stlc);
2249 stlc45xx_tx_setup(stlc);
2250 stlc45xx_tx_edcf(stlc);
2251
2252 if ((hw->conf.flags & IEEE80211_CONF_PS) != stlc->psm) {
2253 stlc->psm = hw->conf.flags & IEEE80211_CONF_PS;
2254 if (stlc->associated) {
2255 stlc45xx_tx_psm(stlc, stlc->psm);
2256 stlc45xx_tx_nullfunc(stlc, stlc->psm);
2257 }
2258 }
2259
2260 mutex_unlock(&stlc->mutex);
2261
2262 return 0;
2263}
2264
2265static void stlc45xx_op_configure_filter(struct ieee80211_hw *hw,
2266 unsigned int changed_flags,
2267 unsigned int *total_flags,
2268 int mc_count,
2269 struct dev_addr_list *mc_list)
2270{
2271 *total_flags = 0;
2272}
2273
2274static void stlc45xx_op_bss_info_changed(struct ieee80211_hw *hw,
2275 struct ieee80211_vif *vif,
2276 struct ieee80211_bss_conf *info,
2277 u32 changed)
2278{
2279 struct stlc45xx *stlc = hw->priv;
2280
2281 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2282 mutex_lock(&stlc->mutex);
2283
2284 memcpy(stlc->bssid, info->bssid, ETH_ALEN);
2285 stlc45xx_tx_setup(stlc);
2286
2287 mutex_unlock(&stlc->mutex);
2288
2289 if (changed & BSS_CHANGED_ASSOC) {
2290 stlc->associated = info->assoc;
2291 if (info->assoc)
2292 stlc->aid = info->aid;
2293 else
2294 stlc->aid = -1;
2295
2296 if (stlc->psm) {
2297 stlc45xx_tx_psm(stlc, stlc->psm);
2298 stlc45xx_tx_nullfunc(stlc, stlc->psm);
2299 }
2300 }
2301}
2302
2303
2304/* can't be const, mac80211 writes to this */
2305static struct ieee80211_rate stlc45xx_rates[] = {
2306 { .bitrate = 10, .hw_value = 0, .hw_value_short = 0, },
2307 { .bitrate = 20, .hw_value = 1, .hw_value_short = 1, },
2308 { .bitrate = 55, .hw_value = 2, .hw_value_short = 2, },
2309 { .bitrate = 110, .hw_value = 3, .hw_value_short = 3, },
2310 { .bitrate = 60, .hw_value = 4, .hw_value_short = 4, },
2311 { .bitrate = 90, .hw_value = 5, .hw_value_short = 5, },
2312 { .bitrate = 120, .hw_value = 6, .hw_value_short = 6, },
2313 { .bitrate = 180, .hw_value = 7, .hw_value_short = 7, },
2314 { .bitrate = 240, .hw_value = 8, .hw_value_short = 8, },
2315 { .bitrate = 360, .hw_value = 9, .hw_value_short = 9, },
2316 { .bitrate = 480, .hw_value = 10, .hw_value_short = 10, },
2317 { .bitrate = 540, .hw_value = 11, .hw_value_short = 11, },
2318};
2319
2320/* can't be const, mac80211 writes to this */
2321static struct ieee80211_channel stlc45xx_channels[] = {
2322 { .hw_value = 1, .center_freq = 2412},
2323 { .hw_value = 2, .center_freq = 2417},
2324 { .hw_value = 3, .center_freq = 2422},
2325 { .hw_value = 4, .center_freq = 2427},
2326 { .hw_value = 5, .center_freq = 2432},
2327 { .hw_value = 6, .center_freq = 2437},
2328 { .hw_value = 7, .center_freq = 2442},
2329 { .hw_value = 8, .center_freq = 2447},
2330 { .hw_value = 9, .center_freq = 2452},
2331 { .hw_value = 10, .center_freq = 2457},
2332 { .hw_value = 11, .center_freq = 2462},
2333 { .hw_value = 12, .center_freq = 2467},
2334 { .hw_value = 13, .center_freq = 2472},
2335};
2336
2337/* can't be const, mac80211 writes to this */
2338static struct ieee80211_supported_band stlc45xx_band_2ghz = {
2339 .channels = stlc45xx_channels,
2340 .n_channels = ARRAY_SIZE(stlc45xx_channels),
2341 .bitrates = stlc45xx_rates,
2342 .n_bitrates = ARRAY_SIZE(stlc45xx_rates),
2343};
2344
2345static const struct ieee80211_ops stlc45xx_ops = {
2346 .start = stlc45xx_op_start,
2347 .stop = stlc45xx_op_stop,
2348 .add_interface = stlc45xx_op_add_interface,
2349 .remove_interface = stlc45xx_op_remove_interface,
2350 .config = stlc45xx_op_config,
2351 .configure_filter = stlc45xx_op_configure_filter,
2352 .tx = stlc45xx_op_tx,
2353 .bss_info_changed = stlc45xx_op_bss_info_changed,
2354};
2355
2356static int stlc45xx_register_mac80211(struct stlc45xx *stlc)
2357{
2358 /* FIXME: SET_IEEE80211_PERM_ADDR() requires default_mac_addr
2359 to be non-const for some strange reason */
2360 static u8 default_mac_addr[ETH_ALEN] = {
2361 0x00, 0x02, 0xee, 0xc0, 0xff, 0xee
2362 };
2363 int ret;
2364
2365 SET_IEEE80211_PERM_ADDR(stlc->hw, default_mac_addr);
2366
2367 ret = ieee80211_register_hw(stlc->hw);
2368 if (ret) {
2369 stlc45xx_error("unable to register mac80211 hw: %d", ret);
2370 return ret;
2371 }
2372
2373 return 0;
2374}
2375
2376static void stlc45xx_device_release(struct device *dev)
2377{
2378
2379}
2380
2381static struct platform_device stlc45xx_device = {
2382 .name = "stlc45xx",
2383 .id = -1,
2384
2385 /* device model insists to have a release function */
2386 .dev = {
2387 .release = stlc45xx_device_release,
2388 },
2389};
2390
2391static int __devinit stlc45xx_probe(struct spi_device *spi)
2392{
2393 struct stlc45xx *stlc;
2394 struct ieee80211_hw *hw;
2395 int ret;
2396
2397 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2398
2399 /* mac80211 alloc */
2400 hw = ieee80211_alloc_hw(sizeof(*stlc), &stlc45xx_ops);
2401 if (!hw) {
2402 stlc45xx_error("could not alloc ieee80211_hw");
2403 ret = -ENOMEM;
2404 goto out;
2405 }
2406
2407 /* mac80211 clears hw->priv */
2408 stlc = hw->priv;
2409
2410 stlc->hw = hw;
2411 dev_set_drvdata(&spi->dev, stlc);
2412 stlc->spi = spi;
2413
2414 spi->bits_per_word = 16;
2415 spi->max_speed_hz = 24000000;
2416
2417 ret = spi_setup(spi);
2418 if (ret < 0)
2419 stlc45xx_error("spi_setup failed");
2420
2421 ret = gpio_request(stlc45xx_gpio_power, "stlc45xx power");
2422 if (ret < 0) {
2423 stlc45xx_error("power GPIO request failed: %d", ret);
2424 return ret;
2425 }
2426
2427 ret = gpio_request(stlc45xx_gpio_irq, "stlc45xx irq");
2428 if (ret < 0) {
2429 stlc45xx_error("irq GPIO request failed: %d", ret);
2430 goto out;
2431 }
2432
2433 gpio_direction_output(stlc45xx_gpio_power, 0);
2434 gpio_direction_input(stlc45xx_gpio_irq);
2435
2436 ret = request_irq(gpio_to_irq(stlc45xx_gpio_irq),
2437 stlc45xx_interrupt, IRQF_DISABLED, "stlc45xx",
2438 stlc->spi);
2439 if (ret < 0)
2440 /* FIXME: handle the error */
2441 stlc45xx_error("request_irq() failed");
2442
2443 set_irq_type(gpio_to_irq(stlc45xx_gpio_irq),
2444 IRQ_TYPE_EDGE_RISING);
2445
2446 disable_irq(gpio_to_irq(stlc45xx_gpio_irq));
2447
2448 ret = platform_device_register(&stlc45xx_device);
2449 if (ret) {
2450 stlc45xx_error("Couldn't register wlan_omap device.");
2451 return ret;
2452 }
2453 dev_set_drvdata(&stlc45xx_device.dev, stlc);
2454
2455 INIT_WORK(&stlc->work, stlc45xx_work);
2456 INIT_WORK(&stlc->work_reset, stlc45xx_work_reset);
2457 INIT_DELAYED_WORK(&stlc->work_tx_timeout, stlc45xx_work_tx_timeout);
2458 mutex_init(&stlc->mutex);
2459 init_completion(&stlc->fw_comp);
2460 spin_lock_init(&stlc->tx_lock);
2461 INIT_LIST_HEAD(&stlc->txbuffer);
2462 INIT_LIST_HEAD(&stlc->tx_pending);
2463 INIT_LIST_HEAD(&stlc->tx_sent);
2464
2465 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2466 IEEE80211_HW_SIGNAL_DBM |
2467 IEEE80211_HW_NOISE_DBM;
2468 /* four bytes for padding */
2469 hw->extra_tx_headroom = sizeof(struct s_lm_data_out) + 4;
2470
2471 /* unit us */
2472 hw->channel_change_time = 1000;
2473
2474 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
2475 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &stlc45xx_band_2ghz;
2476
2477 SET_IEEE80211_DEV(hw, &spi->dev);
2478
2479 BUILD_BUG_ON(sizeof(default_cal_rssi) != RSSI_CAL_ARRAY_LEN);
2480 BUILD_BUG_ON(sizeof(default_cal_channels) != CHANNEL_CAL_ARRAY_LEN);
2481
2482 stlc->cal_rssi = kmemdup(default_cal_rssi, RSSI_CAL_ARRAY_LEN,
2483 GFP_KERNEL);
2484 stlc->cal_channels = kmemdup(default_cal_channels,
2485 CHANNEL_CAL_ARRAY_LEN,
2486 GFP_KERNEL);
2487
2488 ret = device_create_file(&stlc45xx_device.dev, &dev_attr_cal_rssi);
2489 if (ret < 0) {
2490 stlc45xx_error("failed to create sysfs file cal_rssi");
2491 goto out;
2492 }
2493
2494 ret = device_create_file(&stlc45xx_device.dev, &dev_attr_cal_channels);
2495 if (ret < 0) {
2496 stlc45xx_error("failed to create sysfs file cal_channels");
2497 goto out;
2498 }
2499
2500 ret = device_create_file(&stlc45xx_device.dev, &dev_attr_tx_buf);
2501 if (ret < 0) {
2502 stlc45xx_error("failed to create sysfs file tx_buf");
2503 goto out;
2504 }
2505
2506 ret = stlc45xx_register_mac80211(stlc);
2507 if (ret < 0)
2508 goto out;
2509
2510 stlc45xx_info("v" DRIVER_VERSION " loaded");
2511
2512 stlc45xx_info("config buffer 0x%x-0x%x",
2513 FIRMWARE_CONFIG_START, FIRMWARE_CONFIG_END);
2514 stlc45xx_info("tx 0x%x-0x%x, rx 0x%x-0x%x",
2515 FIRMWARE_TXBUFFER_START, FIRMWARE_TXBUFFER_END,
2516 FIRMWARE_RXBUFFER_START, FIRMWARE_RXBUFFER_END);
2517
2518out:
2519 return ret;
2520}
2521
2522static int __devexit stlc45xx_remove(struct spi_device *spi)
2523{
2524 struct stlc45xx *stlc = dev_get_drvdata(&spi->dev);
2525
2526 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2527
2528 platform_device_unregister(&stlc45xx_device);
2529
2530 ieee80211_unregister_hw(stlc->hw);
2531
2532 free_irq(gpio_to_irq(stlc45xx_gpio_irq), spi);
2533
2534 gpio_free(stlc45xx_gpio_power);
2535 gpio_free(stlc45xx_gpio_irq);
2536
2537 /* FIXME: free cal_channels and cal_rssi? */
2538
2539 kfree(stlc->fw);
2540
2541 mutex_destroy(&stlc->mutex);
2542
2543 /* frees also stlc */
2544 ieee80211_free_hw(stlc->hw);
2545 stlc = NULL;
2546
2547 return 0;
2548}
2549
2550
2551static struct spi_driver stlc45xx_spi_driver = {
2552 .driver = {
2553 /* use cx3110x name because board-n800.c uses that for the
2554 * SPI port */
2555 .name = "cx3110x",
2556 .bus = &spi_bus_type,
2557 .owner = THIS_MODULE,
2558 },
2559
2560 .probe = stlc45xx_probe,
2561 .remove = __devexit_p(stlc45xx_remove),
2562};
2563
2564static int __init stlc45xx_init(void)
2565{
2566 int ret;
2567
2568 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2569
2570 ret = spi_register_driver(&stlc45xx_spi_driver);
2571 if (ret < 0) {
2572 stlc45xx_error("failed to register SPI driver: %d", ret);
2573 goto out;
2574 }
2575
2576out:
2577 return ret;
2578}
2579
2580static void __exit stlc45xx_exit(void)
2581{
2582 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2583
2584 spi_unregister_driver(&stlc45xx_spi_driver);
2585
2586 stlc45xx_info("unloaded");
2587}
2588
2589module_init(stlc45xx_init);
2590module_exit(stlc45xx_exit);
2591
2592MODULE_LICENSE("GPL");
2593MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
2594MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/staging/stlc45xx/stlc45xx.h b/drivers/staging/stlc45xx/stlc45xx.h
deleted file mode 100644
index ac96bbbde79f..000000000000
--- a/drivers/staging/stlc45xx/stlc45xx.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/*
2 * This file is part of stlc45xx
3 *
4 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/mutex.h>
25#include <linux/list.h>
26#include <net/mac80211.h>
27
28#include "stlc45xx_lmac.h"
29
30#define DRIVER_NAME "stlc45xx"
31#define DRIVER_VERSION "0.1.3"
32
33#define DRIVER_PREFIX DRIVER_NAME ": "
34
35enum {
36 DEBUG_NONE = 0,
37 DEBUG_FUNC = 1 << 0,
38 DEBUG_IRQ = 1 << 1,
39 DEBUG_BH = 1 << 2,
40 DEBUG_RX = 1 << 3,
41 DEBUG_RX_CONTENT = 1 << 5,
42 DEBUG_TX = 1 << 6,
43 DEBUG_TX_CONTENT = 1 << 8,
44 DEBUG_TXBUFFER = 1 << 9,
45 DEBUG_QUEUE = 1 << 10,
46 DEBUG_BOOT = 1 << 11,
47 DEBUG_PSM = 1 << 12,
48 DEBUG_ALL = ~0,
49};
50
51#define DEBUG_LEVEL DEBUG_NONE
52/* #define DEBUG_LEVEL DEBUG_ALL */
53/* #define DEBUG_LEVEL (DEBUG_TX | DEBUG_RX | DEBUG_IRQ) */
54/* #define DEBUG_LEVEL (DEBUG_TX | DEBUG_MEMREGION | DEBUG_QUEUE) */
55/* #define DEBUG_LEVEL (DEBUG_MEMREGION | DEBUG_QUEUE) */
56
57#define stlc45xx_error(fmt, arg...) \
58 printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
59
60#define stlc45xx_warning(fmt, arg...) \
61 printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
62
63#define stlc45xx_info(fmt, arg...) \
64 printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg)
65
66#define stlc45xx_debug(level, fmt, arg...) \
67 do { \
68 if (level & DEBUG_LEVEL) \
69 printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \
70 } while (0)
71
72#define stlc45xx_dump(level, buf, len) \
73 do { \
74 if (level & DEBUG_LEVEL) \
75 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \
76 16, 1, buf, len, 1); \
77 } while (0)
78
79#define MAC2STR(a) ((a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5])
80#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
81
82/* Bit 15 is read/write bit; ON = READ, OFF = WRITE */
83#define ADDR_READ_BIT_15 0x8000
84
85#define SPI_ADRS_ARM_INTERRUPTS 0x00
86#define SPI_ADRS_ARM_INT_EN 0x04
87
88#define SPI_ADRS_HOST_INTERRUPTS 0x08
89#define SPI_ADRS_HOST_INT_EN 0x0c
90#define SPI_ADRS_HOST_INT_ACK 0x10
91
92#define SPI_ADRS_GEN_PURP_1 0x14
93#define SPI_ADRS_GEN_PURP_2 0x18
94
95/* high word */
96#define SPI_ADRS_DEV_CTRL_STAT 0x26
97
98#define SPI_ADRS_DMA_DATA 0x28
99
100#define SPI_ADRS_DMA_WRITE_CTRL 0x2c
101#define SPI_ADRS_DMA_WRITE_LEN 0x2e
102#define SPI_ADRS_DMA_WRITE_BASE 0x30
103
104#define SPI_ADRS_DMA_READ_CTRL 0x34
105#define SPI_ADRS_DMA_READ_LEN 0x36
106#define SPI_ADRS_DMA_READ_BASE 0x38
107
108#define SPI_CTRL_STAT_HOST_OVERRIDE 0x8000
109#define SPI_CTRL_STAT_START_HALTED 0x4000
110#define SPI_CTRL_STAT_RAM_BOOT 0x2000
111#define SPI_CTRL_STAT_HOST_RESET 0x1000
112#define SPI_CTRL_STAT_HOST_CPU_EN 0x0800
113
114#define SPI_DMA_WRITE_CTRL_ENABLE 0x0001
115#define SPI_DMA_READ_CTRL_ENABLE 0x0001
116#define HOST_ALLOWED (1 << 7)
117
118#define FIRMWARE_ADDRESS 0x20000
119
120#define SPI_TIMEOUT 100 /* msec */
121
122#define SPI_MAX_TX_PACKETS 32
123
124#define SPI_MAX_PACKET_SIZE 32767
125
126#define SPI_TARGET_INT_WAKEUP 0x00000001
127#define SPI_TARGET_INT_SLEEP 0x00000002
128#define SPI_TARGET_INT_RDDONE 0x00000004
129
130#define SPI_TARGET_INT_CTS 0x00004000
131#define SPI_TARGET_INT_DR 0x00008000
132
133#define SPI_HOST_INT_READY 0x00000001
134#define SPI_HOST_INT_WR_READY 0x00000002
135#define SPI_HOST_INT_SW_UPDATE 0x00000004
136#define SPI_HOST_INT_UPDATE 0x10000000
137
138/* clear to send */
139#define SPI_HOST_INT_CTS 0x00004000
140
141/* data ready */
142#define SPI_HOST_INT_DR 0x00008000
143
144#define SPI_HOST_INTS_DEFAULT \
145 (SPI_HOST_INT_READY | SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE)
146
147#define TARGET_BOOT_SLEEP 50
148
149/* The firmware buffer is divided into three areas:
150 *
151 * o config area (for control commands)
152 * o tx buffer
153 * o rx buffer
154 */
155#define FIRMWARE_BUFFER_START 0x20200
156#define FIRMWARE_BUFFER_END 0x27c60
157#define FIRMWARE_BUFFER_LEN (FIRMWARE_BUFFER_END - FIRMWARE_BUFFER_START)
158#define FIRMWARE_MTU 3240
159#define FIRMWARE_CONFIG_PAYLOAD_LEN 1024
160#define FIRMWARE_CONFIG_START FIRMWARE_BUFFER_START
161#define FIRMWARE_CONFIG_LEN (sizeof(struct s_lm_control) + \
162 FIRMWARE_CONFIG_PAYLOAD_LEN)
163#define FIRMWARE_CONFIG_END (FIRMWARE_CONFIG_START + FIRMWARE_CONFIG_LEN - 1)
164#define FIRMWARE_RXBUFFER_LEN (5 * FIRMWARE_MTU + 1024)
165#define FIRMWARE_RXBUFFER_START (FIRMWARE_BUFFER_END - FIRMWARE_RXBUFFER_LEN)
166#define FIRMWARE_RXBUFFER_END (FIRMWARE_RXBUFFER_START + \
167 FIRMWARE_RXBUFFER_LEN - 1)
168#define FIRMWARE_TXBUFFER_START (FIRMWARE_BUFFER_START + FIRMWARE_CONFIG_LEN)
169#define FIRMWARE_TXBUFFER_LEN (FIRMWARE_BUFFER_LEN - FIRMWARE_CONFIG_LEN - \
170 FIRMWARE_RXBUFFER_LEN)
171#define FIRMWARE_TXBUFFER_END (FIRMWARE_TXBUFFER_START + \
172 FIRMWARE_TXBUFFER_LEN - 1)
173
174#define FIRMWARE_TXBUFFER_HEADER 100
175#define FIRMWARE_TXBUFFER_TRAILER 4
176
177/* FIXME: come up with a proper value */
178#define MAX_FRAME_LEN 2500
179
180/* unit is ms */
181#define TX_FRAME_LIFETIME 2000
182#define TX_TIMEOUT 4000
183
184#define SUPPORTED_CHANNELS 13
185
186/* FIXME */
187/* #define CHANNEL_CAL_LEN offsetof(struct s_lmo_scan, bratemask) - \ */
188/* offsetof(struct s_lmo_scan, channel) */
189#define CHANNEL_CAL_LEN 292
190#define CHANNEL_CAL_ARRAY_LEN (SUPPORTED_CHANNELS * CHANNEL_CAL_LEN)
191/* FIXME */
192/* #define RSSI_CAL_LEN sizeof(struct s_lmo_scan) - \ */
193/* offsetof(struct s_lmo_scan, rssical) */
194#define RSSI_CAL_LEN 8
195#define RSSI_CAL_ARRAY_LEN (SUPPORTED_CHANNELS * RSSI_CAL_LEN)
196
197struct s_dma_regs {
198 unsigned short cmd;
199 unsigned short len;
200 unsigned long addr;
201};
202
203struct stlc45xx_ie_tim {
204 u8 dtim_count;
205 u8 dtim_period;
206 u8 bmap_control;
207 u8 pvbmap[251];
208};
209
210struct txbuffer {
211 /* can be removed when switched to skb queue */
212 struct list_head tx_list;
213
214 struct list_head buffer_list;
215
216 int start;
217 int frame_start;
218 int end;
219
220 struct sk_buff *skb;
221 u32 handle;
222
223 bool status_needed;
224
225 int header_len;
226
227 /* unit jiffies */
228 unsigned long lifetime;
229};
230
231enum fw_state {
232 FW_STATE_OFF,
233 FW_STATE_BOOTING,
234 FW_STATE_READY,
235 FW_STATE_RESET,
236 FW_STATE_RESETTING,
237};
238
239struct stlc45xx {
240 struct ieee80211_hw *hw;
241 struct spi_device *spi;
242 struct work_struct work;
243 struct work_struct work_reset;
244 struct delayed_work work_tx_timeout;
245 struct mutex mutex;
246 struct completion fw_comp;
247
248
249 u8 bssid[ETH_ALEN];
250 u8 mac_addr[ETH_ALEN];
251 int channel;
252
253 u8 *cal_rssi;
254 u8 *cal_channels;
255
256 enum fw_state fw_state;
257
258 spinlock_t tx_lock;
259
260 /* protected by tx_lock */
261 struct list_head txbuffer;
262
263 /* protected by tx_lock */
264 struct list_head tx_pending;
265
266 /* protected by tx_lock */
267 int tx_queue_stopped;
268
269 /* protected by mutex */
270 struct list_head tx_sent;
271
272 int tx_frames;
273
274 u8 *fw;
275 int fw_len;
276
277 bool psm;
278 bool associated;
279 int aid;
280 bool pspolling;
281};
282
283
diff --git a/drivers/staging/stlc45xx/stlc45xx_lmac.h b/drivers/staging/stlc45xx/stlc45xx_lmac.h
deleted file mode 100644
index af5db801347f..000000000000
--- a/drivers/staging/stlc45xx/stlc45xx_lmac.h
+++ /dev/null
@@ -1,434 +0,0 @@
1/************************************************************************
2* This is the LMAC API interface header file for STLC4560. *
3* Copyright (C) 2007 Conexant Systems, Inc. *
4* This program is free software; you can redistribute it and/or *
5* modify it under the terms of the GNU General Public License *
6* as published by the Free Software Foundation; either version 2 *
7* of the License, or (at your option) any later version. *
8* *
9* This program is distributed in the hope that it will be useful, *
10* but WITHOUT ANY WARRANTY; without even the implied warranty of *
11* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12* GNU General Public License for more details. *
13* *
14* You should have received a copy of the GNU General Public License *
15* along with this program. If not, see <http://www.gnu.org/licenses/>.*
16*************************************************************************/
17
18#ifndef __lmac_h__
19#define __lmac_h__
20
21#define LM_TOP_VARIANT 0x0506
22#define LM_BOTTOM_VARIANT 0x0506
23
24/*
25 * LMAC - UMAC interface definition:
26 */
27
28#define LM_FLAG_CONTROL 0x8000
29#define LM_FLAG_ALIGN 0x4000
30
31#define LM_CTRL_OPSET 0x0001
32
33#define LM_OUT_PROMISC 0x0001
34#define LM_OUT_TIMESTAMP 0x0002
35#define LM_OUT_SEQNR 0x0004
36#define LM_OUT_BURST 0x0010
37#define LM_OUT_NOCANCEL 0x0020
38#define LM_OUT_CLEARTIM 0x0040
39#define LM_OUT_HITCHHIKE 0x0080
40#define LM_OUT_COMPRESS 0x0100
41#define LM_OUT_CONCAT 0x0200
42#define LM_OUT_PCS_ACCEPT 0x0400
43#define LM_OUT_WAITEOSP 0x0800
44
45
46#define LM_ALOFT_SP 0x10
47#define LM_ALOFT_CTS 0x20
48#define LM_ALOFT_RTS 0x40
49#define LM_ALOFT_MASK 0x1f
50#define LM_ALOFT_RATE 0x0f
51
52#define LM_IN_FCS_GOOD 0x0001
53#define LM_IN_MATCH_MAC 0x0002
54#define LM_IN_MCBC 0x0004
55#define LM_IN_BEACON 0x0008
56#define LM_IN_MATCH_BSS 0x0010
57#define LM_IN_BCAST_BSS 0x0020
58#define LM_IN_DATA 0x0040
59#define LM_IN_TRUNCATED 0x0080
60
61#define LM_IN_TRANSPARENT 0x0200
62
63#define LM_QUEUE_BEACON 0
64#define LM_QUEUE_SCAN 1
65#define LM_QUEUE_MGT 2
66#define LM_QUEUE_MCBC 3
67#define LM_QUEUE_DATA 4
68#define LM_QUEUE_DATA0 4
69#define LM_QUEUE_DATA1 5
70#define LM_QUEUE_DATA2 6
71#define LM_QUEUE_DATA3 7
72
73#define LM_SETUP_INFRA 0x0001
74#define LM_SETUP_IBSS 0x0002
75#define LM_SETUP_TRANSPARENT 0x0008
76#define LM_SETUP_PROMISCUOUS 0x0010
77#define LM_SETUP_HIBERNATE 0x0020
78#define LM_SETUP_NOACK 0x0040
79#define LM_SETUP_RX_DISABLED 0x0080
80
81#define LM_ANTENNA_0 0
82#define LM_ANTENNA_1 1
83#define LM_ANTENNA_DIVERSITY 2
84
85#define LM_TX_FAILED 0x0001
86#define LM_TX_PSM 0x0002
87#define LM_TX_PSM_CANCELLED 0x0004
88
89#define LM_SCAN_EXIT 0x0001
90#define LM_SCAN_TRAP 0x0002
91#define LM_SCAN_ACTIVE 0x0004
92#define LM_SCAN_FILTER 0x0008
93
94#define LM_PSM 0x0001
95#define LM_PSM_DTIM 0x0002
96#define LM_PSM_MCBC 0x0004
97#define LM_PSM_CHECKSUM 0x0008
98#define LM_PSM_SKIP_MORE_DATA 0x0010
99#define LM_PSM_BEACON_TIMEOUT 0x0020
100#define LM_PSM_HFOSLEEP 0x0040
101#define LM_PSM_AUTOSWITCH_SLEEP 0x0080
102#define LM_PSM_LPIT 0x0100
103#define LM_PSM_BF_UCAST_SKIP 0x0200
104#define LM_PSM_BF_MCAST_SKIP 0x0400
105
106/* hfosleep */
107#define LM_PSM_SLEEP_OPTION_MASK (LM_PSM_AUTOSWITCH_SLEEP | LM_PSM_HFOSLEEP)
108#define LM_PSM_SLEEP_OPTION_SHIFT 6
109/* hfosleepend */
110#define LM_PSM_BF_OPTION_MASK (LM_PSM_BF_MCAST_SKIP | LM_PSM_BF_UCAST_SKIP)
111#define LM_PSM_BF_OPTION_SHIFT 9
112
113
114#define LM_PRIVACC_WEP 0x01
115#define LM_PRIVACC_TKIP 0x02
116#define LM_PRIVACC_MICHAEL 0x04
117#define LM_PRIVACC_CCX_KP 0x08
118#define LM_PRIVACC_CCX_MIC 0x10
119#define LM_PRIVACC_AES_CCMP 0x20
120
121/* size of s_lm_descr in words */
122#define LM_DESCR_SIZE_WORDS 11
123
124#ifndef __ASSEMBLER__
125
126enum {
127 LM_MODE_CLIENT = 0,
128 LM_MODE_AP
129};
130
131struct s_lm_descr {
132 uint16_t modes;
133 uint16_t flags;
134 uint32_t buffer_start;
135 uint32_t buffer_end;
136 uint8_t header;
137 uint8_t trailer;
138 uint8_t tx_queues;
139 uint8_t tx_depth;
140 uint8_t privacy;
141 uint8_t rx_keycache;
142 uint8_t tim_size;
143 uint8_t pad1;
144 uint8_t rates[16];
145 uint32_t link;
146 uint16_t mtu;
147};
148
149
150struct s_lm_control {
151 uint16_t flags;
152 uint16_t length;
153 uint32_t handle;
154 uint16_t oid;
155 uint16_t pad;
156 /* uint8_t data[]; */
157};
158
159enum {
160 LM_PRIV_NONE = 0,
161 LM_PRIV_WEP,
162 LM_PRIV_TKIP,
163 LM_PRIV_TKIPMICHAEL,
164 LM_PRIV_CCX_WEPMIC,
165 LM_PRIV_CCX_KPMIC,
166 LM_PRIV_CCX_KP,
167 LM_PRIV_AES_CCMP
168};
169
170enum {
171 LM_DECRYPT_NONE,
172 LM_DECRYPT_OK,
173 LM_DECRYPT_NOKEY,
174 LM_DECRYPT_NOMICHAEL,
175 LM_DECRYPT_NOCKIPMIC,
176 LM_DECRYPT_FAIL_WEP,
177 LM_DECRYPT_FAIL_TKIP,
178 LM_DECRYPT_FAIL_MICHAEL,
179 LM_DECRYPT_FAIL_CKIPKP,
180 LM_DECRYPT_FAIL_CKIPMIC,
181 LM_DECRYPT_FAIL_AESCCMP
182};
183
184struct s_lm_data_out {
185 uint16_t flags;
186 uint16_t length;
187 uint32_t handle;
188 uint16_t aid;
189 uint8_t rts_retries;
190 uint8_t retries;
191 uint8_t aloft[8];
192 uint8_t aloft_ctrl;
193 uint8_t crypt_offset;
194 uint8_t keytype;
195 uint8_t keylen;
196 uint8_t key[16];
197 uint8_t queue;
198 uint8_t backlog;
199 uint16_t durations[4];
200 uint8_t antenna;
201 uint8_t cts;
202 int16_t power;
203 uint8_t pad[2];
204 /*uint8_t data[];*/
205};
206
207#define LM_RCPI_INVALID (0xff)
208
209struct s_lm_data_in {
210 uint16_t flags;
211 uint16_t length;
212 uint16_t frequency;
213 uint8_t antenna;
214 uint8_t rate;
215 uint8_t rcpi;
216 uint8_t sq;
217 uint8_t decrypt;
218 uint8_t rssi_raw;
219 uint32_t clock[2];
220 /*uint8_t data[];*/
221};
222
223union u_lm_data {
224 struct s_lm_data_out out;
225 struct s_lm_data_in in;
226};
227
228enum {
229 LM_OID_SETUP = 0,
230 LM_OID_SCAN = 1,
231 LM_OID_TRAP = 2,
232 LM_OID_EDCF = 3,
233 LM_OID_KEYCACHE = 4,
234 LM_OID_PSM = 6,
235 LM_OID_TXCANCEL = 7,
236 LM_OID_TX = 8,
237 LM_OID_BURST = 9,
238 LM_OID_STATS = 10,
239 LM_OID_LED = 13,
240 LM_OID_TIMER = 15,
241 LM_OID_NAV = 20,
242 LM_OID_PCS = 22,
243 LM_OID_BT_BALANCER = 28,
244 LM_OID_GROUP_ADDRESS_TABLE = 30,
245 LM_OID_ARPTABLE = 31,
246 LM_OID_BT_OPTIONS = 35
247};
248
249enum {
250 LM_FRONTEND_UNKNOWN = 0,
251 LM_FRONTEND_DUETTE3,
252 LM_FRONTEND_DUETTE2,
253 LM_FRONTEND_FRISBEE,
254 LM_FRONTEND_CROSSBOW,
255 LM_FRONTEND_LONGBOW
256};
257
258
259#define INVALID_LPF_BANDWIDTH 0xffff
260#define INVALID_OSC_START_DELAY 0xffff
261
262struct s_lmo_setup {
263 uint16_t flags;
264 uint8_t macaddr[6];
265 uint8_t bssid[6];
266 uint8_t antenna;
267 uint8_t rx_align;
268 uint32_t rx_buffer;
269 uint16_t rx_mtu;
270 uint16_t frontend;
271 uint16_t timeout;
272 uint16_t truncate;
273 uint32_t bratemask;
274 uint8_t sbss_offset;
275 uint8_t mcast_window;
276 uint8_t rx_rssi_threshold;
277 uint8_t rx_ed_threshold;
278 uint32_t ref_clock;
279 uint16_t lpf_bandwidth;
280 uint16_t osc_start_delay;
281};
282
283
284struct s_lmo_scan {
285 uint16_t flags;
286 uint16_t dwell;
287 uint8_t channel[292];
288 uint32_t bratemask;
289 uint8_t aloft[8];
290 uint8_t rssical[8];
291};
292
293
294enum {
295 LM_TRAP_SCAN = 0,
296 LM_TRAP_TIMER,
297 LM_TRAP_BEACON_TX,
298 LM_TRAP_FAA_RADIO_ON,
299 LM_TRAP_FAA_RADIO_OFF,
300 LM_TRAP_RADAR,
301 LM_TRAP_NO_BEACON,
302 LM_TRAP_TBTT,
303 LM_TRAP_SCO_ENTER,
304 LM_TRAP_SCO_EXIT
305};
306
307struct s_lmo_trap {
308 uint16_t event;
309 uint16_t frequency;
310};
311
312struct s_lmo_timer {
313 uint32_t interval;
314};
315
316struct s_lmo_nav {
317 uint32_t period;
318};
319
320
321struct s_lmo_edcf_queue;
322
323struct s_lmo_edcf {
324 uint8_t flags;
325 uint8_t slottime;
326 uint8_t sifs;
327 uint8_t eofpad;
328 struct s_lmo_edcf_queue {
329 uint8_t aifs;
330 uint8_t pad0;
331 uint16_t cwmin;
332 uint16_t cwmax;
333 uint16_t txop;
334 } queues[8];
335 uint8_t mapping[4];
336 uint16_t maxburst;
337 uint16_t round_trip_delay;
338};
339
340struct s_lmo_keycache {
341 uint8_t entry;
342 uint8_t keyid;
343 uint8_t address[6];
344 uint8_t pad[2];
345 uint8_t keytype;
346 uint8_t keylen;
347 uint8_t key[24];
348};
349
350
351struct s_lm_interval;
352
353struct s_lmo_psm {
354 uint16_t flags;
355 uint16_t aid;
356 struct s_lm_interval {
357 uint16_t interval;
358 uint16_t periods;
359 } intervals[4];
360 /* uint16_t pad; */
361 uint8_t beacon_rcpi_skip_max;
362 uint8_t rcpi_delta_threshold;
363 uint8_t nr;
364 uint8_t exclude[1];
365};
366
367#define MC_FILTER_ADDRESS_NUM 4
368
369struct s_lmo_group_address_table {
370 uint16_t filter_enable;
371 uint16_t num_address;
372 uint8_t macaddr_list[MC_FILTER_ADDRESS_NUM][6];
373};
374
375struct s_lmo_txcancel {
376 uint32_t address[1];
377};
378
379
380struct s_lmo_tx {
381 uint8_t flags;
382 uint8_t retries;
383 uint8_t rcpi;
384 uint8_t sq;
385 uint16_t seqctrl;
386 uint8_t antenna;
387 uint8_t pad;
388};
389
390struct s_lmo_burst {
391 uint8_t flags;
392 uint8_t queue;
393 uint8_t backlog;
394 uint8_t pad;
395 uint16_t durations[32];
396};
397
398struct s_lmo_stats {
399 uint32_t valid;
400 uint32_t fcs;
401 uint32_t abort;
402 uint32_t phyabort;
403 uint32_t rts_success;
404 uint32_t rts_fail;
405 uint32_t timestamp;
406 uint32_t time_tx;
407 uint32_t noisefloor;
408 uint32_t sample_noise[8];
409 uint32_t sample_cca;
410 uint32_t sample_tx;
411};
412
413
414struct s_lmo_led {
415 uint16_t flags;
416 uint16_t mask[2];
417 uint16_t delay/*[2]*/;
418};
419
420
421struct s_lmo_bt_balancer {
422 uint16_t prio_thresh;
423 uint16_t acl_thresh;
424};
425
426
427struct s_lmo_arp_table {
428 uint16_t filter_enable;
429 uint32_t ipaddr;
430};
431
432#endif /* __ASSEMBLER__ */
433
434#endif /* __lmac_h__ */
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 3d2a84c45829..e139eaeaa174 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -25,6 +25,7 @@
25#include <linux/poll.h> 25#include <linux/poll.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/sched.h>
28#include <asm/time.h> 29#include <asm/time.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 8960fa9ee7aa..00fe0803c21c 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/sched.h>
28#include <asm/time.h> 29#include <asm/time.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 7f96bcaf1c60..05186110c029 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1332,7 +1332,6 @@ device_release_WPADEV(pDevice);
1332 free_netdev(pDevice->dev); 1332 free_netdev(pDevice->dev);
1333 } 1333 }
1334 1334
1335 kfree(pDevice);
1336 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect3.. \n"); 1335 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect3.. \n");
1337} 1336}
1338 1337
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index 940460c39f36..132671d96d0d 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -1,6 +1,6 @@
1config W35UND 1config W35UND
2 tristate "IS89C35 WLAN USB driver" 2 tristate "IS89C35 WLAN USB driver"
3 depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL 3 depends on MAC80211 && WLAN && USB && EXPERIMENTAL
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is highly experimental driver for Winbond WIFI card. 6 This is highly experimental driver for Winbond WIFI card.
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 8950724f168e..067082a7d759 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -51,10 +51,26 @@ static struct ieee80211_supported_band wbsoft_band_2GHz = {
51 .n_bitrates = ARRAY_SIZE(wbsoft_rates), 51 .n_bitrates = ARRAY_SIZE(wbsoft_rates),
52}; 52};
53 53
54static void hal_set_beacon_period(struct hw_data *pHwData, u16 beacon_period)
55{
56 u32 tmp;
57
58 if (pHwData->SurpriseRemove)
59 return;
60
61 pHwData->BeaconPeriod = beacon_period;
62 tmp = pHwData->BeaconPeriod << 16;
63 tmp |= pHwData->ProbeDelay;
64 Wb35Reg_Write(pHwData, 0x0848, tmp);
65}
66
54static int wbsoft_add_interface(struct ieee80211_hw *dev, 67static int wbsoft_add_interface(struct ieee80211_hw *dev,
55 struct ieee80211_if_init_conf *conf) 68 struct ieee80211_if_init_conf *conf)
56{ 69{
57 printk("wbsoft_add interface called\n"); 70 struct wbsoft_priv *priv = dev->priv;
71
72 hal_set_beacon_period(&priv->sHwData, conf->vif->bss_conf.beacon_int);
73
58 return 0; 74 return 0;
59} 75}
60 76
@@ -83,10 +99,16 @@ static int wbsoft_get_tx_stats(struct ieee80211_hw *hw,
83 return 0; 99 return 0;
84} 100}
85 101
102static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
103 struct dev_addr_list *mc_list)
104{
105 return mc_count;
106}
107
86static void wbsoft_configure_filter(struct ieee80211_hw *dev, 108static void wbsoft_configure_filter(struct ieee80211_hw *dev,
87 unsigned int changed_flags, 109 unsigned int changed_flags,
88 unsigned int *total_flags, 110 unsigned int *total_flags,
89 int mc_count, struct dev_mc_list *mclist) 111 u64 multicast)
90{ 112{
91 unsigned int new_flags; 113 unsigned int new_flags;
92 114
@@ -94,7 +116,7 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
94 116
95 if (*total_flags & FIF_PROMISC_IN_BSS) 117 if (*total_flags & FIF_PROMISC_IN_BSS)
96 new_flags |= FIF_PROMISC_IN_BSS; 118 new_flags |= FIF_PROMISC_IN_BSS;
97 else if ((*total_flags & FIF_ALLMULTI) || (mc_count > 32)) 119 else if ((*total_flags & FIF_ALLMULTI) || (multicast > 32))
98 new_flags |= FIF_ALLMULTI; 120 new_flags |= FIF_ALLMULTI;
99 121
100 dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; 122 dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
@@ -138,19 +160,6 @@ static void hal_set_radio_mode(struct hw_data *pHwData, unsigned char radio_off)
138 Wb35Reg_Write(pHwData, 0x0824, reg->M24_MacControl); 160 Wb35Reg_Write(pHwData, 0x0824, reg->M24_MacControl);
139} 161}
140 162
141static void hal_set_beacon_period(struct hw_data *pHwData, u16 beacon_period)
142{
143 u32 tmp;
144
145 if (pHwData->SurpriseRemove)
146 return;
147
148 pHwData->BeaconPeriod = beacon_period;
149 tmp = pHwData->BeaconPeriod << 16;
150 tmp |= pHwData->ProbeDelay;
151 Wb35Reg_Write(pHwData, 0x0848, tmp);
152}
153
154static void 163static void
155hal_set_current_channel_ex(struct hw_data *pHwData, ChanInfo channel) 164hal_set_current_channel_ex(struct hw_data *pHwData, ChanInfo channel)
156{ 165{
@@ -244,7 +253,6 @@ static void hal_set_accept_beacon(struct hw_data *pHwData, u8 enable)
244static int wbsoft_config(struct ieee80211_hw *dev, u32 changed) 253static int wbsoft_config(struct ieee80211_hw *dev, u32 changed)
245{ 254{
246 struct wbsoft_priv *priv = dev->priv; 255 struct wbsoft_priv *priv = dev->priv;
247 struct ieee80211_conf *conf = &dev->conf;
248 ChanInfo ch; 256 ChanInfo ch;
249 257
250 printk("wbsoft_config called\n"); 258 printk("wbsoft_config called\n");
@@ -254,7 +262,6 @@ static int wbsoft_config(struct ieee80211_hw *dev, u32 changed)
254 ch.ChanNo = 1; 262 ch.ChanNo = 1;
255 263
256 hal_set_current_channel(&priv->sHwData, ch); 264 hal_set_current_channel(&priv->sHwData, ch);
257 hal_set_beacon_period(&priv->sHwData, conf->beacon_int);
258 hal_set_accept_broadcast(&priv->sHwData, 1); 265 hal_set_accept_broadcast(&priv->sHwData, 1);
259 hal_set_accept_promiscuous(&priv->sHwData, 1); 266 hal_set_accept_promiscuous(&priv->sHwData, 1);
260 hal_set_accept_multicast(&priv->sHwData, 1); 267 hal_set_accept_multicast(&priv->sHwData, 1);
@@ -277,6 +284,7 @@ static const struct ieee80211_ops wbsoft_ops = {
277 .add_interface = wbsoft_add_interface, 284 .add_interface = wbsoft_add_interface,
278 .remove_interface = wbsoft_remove_interface, 285 .remove_interface = wbsoft_remove_interface,
279 .config = wbsoft_config, 286 .config = wbsoft_config,
287 .prepare_multicast = wbsoft_prepare_multicast,
280 .configure_filter = wbsoft_configure_filter, 288 .configure_filter = wbsoft_configure_filter,
281 .get_stats = wbsoft_get_stats, 289 .get_stats = wbsoft_get_stats,
282 .get_tx_stats = wbsoft_get_tx_stats, 290 .get_tx_stats = wbsoft_get_tx_stats,
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 9959b658c8cf..f44294b0d8dc 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
1config PRISM2_USB 1config PRISM2_USB
2 tristate "Prism2.5/3 USB driver" 2 tristate "Prism2.5/3 USB driver"
3 depends on WLAN_80211 && USB && WIRELESS_EXT 3 depends on WLAN && USB && WIRELESS_EXT
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is the wlan-ng prism 2.5/3 USB driver for a wide range of 6 This is the wlan-ng prism 2.5/3 USB driver for a wide range of
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 4e83c297ec9e..6f8d8f971212 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -180,15 +180,15 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
180 180
181 switch (type) { 181 switch (type) {
182 case THERMAL_TRIP_CRITICAL: 182 case THERMAL_TRIP_CRITICAL:
183 return sprintf(buf, "critical"); 183 return sprintf(buf, "critical\n");
184 case THERMAL_TRIP_HOT: 184 case THERMAL_TRIP_HOT:
185 return sprintf(buf, "hot"); 185 return sprintf(buf, "hot\n");
186 case THERMAL_TRIP_PASSIVE: 186 case THERMAL_TRIP_PASSIVE:
187 return sprintf(buf, "passive"); 187 return sprintf(buf, "passive\n");
188 case THERMAL_TRIP_ACTIVE: 188 case THERMAL_TRIP_ACTIVE:
189 return sprintf(buf, "active"); 189 return sprintf(buf, "active\n");
190 default: 190 default:
191 return sprintf(buf, "unknown"); 191 return sprintf(buf, "unknown\n");
192 } 192 }
193} 193}
194 194
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 03efb065455f..e941367dd28f 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -19,6 +19,7 @@
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/idr.h> 21#include <linux/idr.h>
22#include <linux/sched.h>
22#include <linux/string.h> 23#include <linux/string.h>
23#include <linux/kobject.h> 24#include <linux/kobject.h>
24#include <linux/uio_driver.h> 25#include <linux/uio_driver.h>
@@ -658,7 +659,7 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
658 return 0; 659 return 0;
659} 660}
660 661
661static struct vm_operations_struct uio_vm_ops = { 662static const struct vm_operations_struct uio_vm_ops = {
662 .open = uio_vma_open, 663 .open = uio_vma_open,
663 .close = uio_vma_close, 664 .close = uio_vma_close,
664 .fault = uio_vma_fault, 665 .fault = uio_vma_fault,
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 02347c57357d..aa53db9f2e88 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -178,6 +178,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
178 return 0; 178 return 0;
179 bad1: 179 bad1:
180 kfree(priv); 180 kfree(priv);
181 pm_runtime_disable(&pdev->dev);
181 bad0: 182 bad0:
182 return ret; 183 return ret;
183} 184}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e3861b21e776..e4eca7810bcf 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -609,9 +609,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
609 609
610 acm->throttle = 0; 610 acm->throttle = 0;
611 611
612 tasklet_schedule(&acm->urb_task);
613 set_bit(ASYNCB_INITIALIZED, &acm->port.flags); 612 set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
614 rv = tty_port_block_til_ready(&acm->port, tty, filp); 613 rv = tty_port_block_til_ready(&acm->port, tty, filp);
614 tasklet_schedule(&acm->urb_task);
615done: 615done:
616 mutex_unlock(&acm->mutex); 616 mutex_unlock(&acm->mutex);
617err_out: 617err_out:
@@ -686,15 +686,21 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
686 686
687 /* Perform the closing process and see if we need to do the hardware 687 /* Perform the closing process and see if we need to do the hardware
688 shutdown */ 688 shutdown */
689 if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0) 689 if (!acm)
690 return;
691 if (tty_port_close_start(&acm->port, tty, filp) == 0) {
692 mutex_lock(&open_mutex);
693 if (!acm->dev) {
694 tty_port_tty_set(&acm->port, NULL);
695 acm_tty_unregister(acm);
696 tty->driver_data = NULL;
697 }
698 mutex_unlock(&open_mutex);
690 return; 699 return;
700 }
691 acm_port_down(acm, 0); 701 acm_port_down(acm, 0);
692 tty_port_close_end(&acm->port, tty); 702 tty_port_close_end(&acm->port, tty);
693 mutex_lock(&open_mutex);
694 tty_port_tty_set(&acm->port, NULL); 703 tty_port_tty_set(&acm->port, NULL);
695 if (!acm->dev)
696 acm_tty_unregister(acm);
697 mutex_unlock(&open_mutex);
698} 704}
699 705
700static int acm_tty_write(struct tty_struct *tty, 706static int acm_tty_write(struct tty_struct *tty,
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 333ee02e7b2b..2473cf0c6b1d 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -39,7 +39,7 @@
39#define USBTMC_SIZE_IOBUFFER 2048 39#define USBTMC_SIZE_IOBUFFER 2048
40 40
41/* Default USB timeout (in milliseconds) */ 41/* Default USB timeout (in milliseconds) */
42#define USBTMC_TIMEOUT 10 42#define USBTMC_TIMEOUT 5000
43 43
44/* 44/*
45 * Maximum number of read cycles to empty bulk in endpoint during CLEAR and 45 * Maximum number of read cycles to empty bulk in endpoint during CLEAR and
@@ -993,7 +993,7 @@ skip_io_on_zombie:
993 return retval; 993 return retval;
994} 994}
995 995
996static struct file_operations fops = { 996static const struct file_operations fops = {
997 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
998 .read = usbtmc_read, 998 .read = usbtmc_read,
999 .write = usbtmc_write, 999 .write = usbtmc_write,
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 33351312327f..a18e3c5dd82e 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -223,6 +223,7 @@ config USB_OTG
223config USB_GADGET_PXA25X 223config USB_GADGET_PXA25X
224 boolean "PXA 25x or IXP 4xx" 224 boolean "PXA 25x or IXP 4xx"
225 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX 225 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
226 select USB_OTG_UTILS
226 help 227 help
227 Intel's PXA 25x series XScale ARM-5TE processors include 228 Intel's PXA 25x series XScale ARM-5TE processors include
228 an integrated full speed USB 1.1 device controller. The 229 an integrated full speed USB 1.1 device controller. The
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index f37de283d0ab..167cb2a8ecef 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -61,11 +61,6 @@
61 * simpler, Microsoft pushes their own approach: RNDIS. The published 61 * simpler, Microsoft pushes their own approach: RNDIS. The published
62 * RNDIS specs are ambiguous and appear to be incomplete, and are also 62 * RNDIS specs are ambiguous and appear to be incomplete, and are also
63 * needlessly complex. They borrow more from CDC ACM than CDC ECM. 63 * needlessly complex. They borrow more from CDC ACM than CDC ECM.
64 *
65 * While CDC ECM, CDC Subset, and RNDIS are designed to extend the ethernet
66 * interface to the target, CDC EEM was designed to use ethernet over the USB
67 * link between the host and target. CDC EEM is implemented as an alternative
68 * to those other protocols when that communication model is more appropriate
69 */ 64 */
70 65
71#define DRIVER_DESC "Ethernet Gadget" 66#define DRIVER_DESC "Ethernet Gadget"
@@ -157,8 +152,8 @@ static inline bool has_rndis(void)
157#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ 152#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
158 153
159/* For EEM gadgets */ 154/* For EEM gadgets */
160#define EEM_VENDOR_NUM 0x0525 /* INVALID - NEEDS TO BE ALLOCATED */ 155#define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */
161#define EEM_PRODUCT_NUM 0xa4a1 /* INVALID - NEEDS TO BE ALLOCATED */ 156#define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */
162 157
163/*-------------------------------------------------------------------------*/ 158/*-------------------------------------------------------------------------*/
164 159
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 42a74b8a0bb8..fa3d142ba64d 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2139,7 +2139,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
2139static void fsl_udc_release(struct device *dev) 2139static void fsl_udc_release(struct device *dev)
2140{ 2140{
2141 complete(udc_controller->done); 2141 complete(udc_controller->done);
2142 dma_free_coherent(dev, udc_controller->ep_qh_size, 2142 dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
2143 udc_controller->ep_qh, udc_controller->ep_qh_dma); 2143 udc_controller->ep_qh, udc_controller->ep_qh_dma);
2144 kfree(udc_controller); 2144 kfree(udc_controller);
2145} 2145}
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index c52a681f376c..01ee0b9bc957 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1402,7 +1402,8 @@ static int __init imx_udc_probe(struct platform_device *pdev)
1402 struct clk *clk; 1402 struct clk *clk;
1403 void __iomem *base; 1403 void __iomem *base;
1404 int ret = 0; 1404 int ret = 0;
1405 int i, res_size; 1405 int i;
1406 resource_size_t res_size;
1406 1407
1407 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1408 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1408 if (!res) { 1409 if (!res) {
@@ -1416,7 +1417,7 @@ static int __init imx_udc_probe(struct platform_device *pdev)
1416 return -ENODEV; 1417 return -ENODEV;
1417 } 1418 }
1418 1419
1419 res_size = res->end - res->start + 1; 1420 res_size = resource_size(res);
1420 if (!request_mem_region(res->start, res_size, res->name)) { 1421 if (!request_mem_region(res->start, res_size, res->name)) {
1421 dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", 1422 dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
1422 res_size, res->start); 1423 res_size, res->start);
@@ -1527,8 +1528,7 @@ static int __exit imx_udc_remove(struct platform_device *pdev)
1527 clk_disable(imx_usb->clk); 1528 clk_disable(imx_usb->clk);
1528 iounmap(imx_usb->base); 1529 iounmap(imx_usb->base);
1529 1530
1530 release_mem_region(imx_usb->res->start, 1531 release_mem_region(imx_usb->res->start, resource_size(imx_usb->res));
1531 imx_usb->res->end - imx_usb->res->start + 1);
1532 1532
1533 if (pdata->exit) 1533 if (pdata->exit)
1534 pdata->exit(&pdev->dev); 1534 pdata->exit(&pdev->dev);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index c44367fea185..bf0f6520c6df 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -30,6 +30,7 @@
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/compiler.h> 31#include <linux/compiler.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
35#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 29500154d00c..2d867fd22413 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
875} 875}
876 876
877/* used after endpoint configuration */ 877/* used after endpoint configuration */
878static struct file_operations printer_io_operations = { 878static const struct file_operations printer_io_operations = {
879 .owner = THIS_MODULE, 879 .owner = THIS_MODULE,
880 .open = printer_open, 880 .open = printer_open,
881 .read = printer_read, 881 .read = printer_read,
diff --git a/drivers/usb/gadget/r8a66597-udc.h b/drivers/usb/gadget/r8a66597-udc.h
index 03087e7b9190..9a537aa07968 100644
--- a/drivers/usb/gadget/r8a66597-udc.h
+++ b/drivers/usb/gadget/r8a66597-udc.h
@@ -131,31 +131,48 @@ static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
131} 131}
132 132
133static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597, 133static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
134 unsigned long offset, u16 *buf, 134 unsigned long offset,
135 unsigned char *buf,
135 int len) 136 int len)
136{ 137{
138 unsigned long fifoaddr = r8a66597->reg + offset;
139 unsigned int data;
140 int i;
141
137 if (r8a66597->pdata->on_chip) { 142 if (r8a66597->pdata->on_chip) {
138 unsigned long fifoaddr = r8a66597->reg + offset; 143 /* 32-bit accesses for on_chip controllers */
139 unsigned long count; 144
140 union { 145 /* aligned buf case */
141 unsigned long dword; 146 if (len >= 4 && !((unsigned long)buf & 0x03)) {
142 unsigned char byte[4]; 147 insl(fifoaddr, buf, len / 4);
143 } data; 148 buf += len & ~0x03;
144 unsigned char *pb; 149 len &= 0x03;
145 int i; 150 }
146 151
147 count = len / 4; 152 /* unaligned buf case */
148 insl(fifoaddr, buf, count); 153 for (i = 0; i < len; i++) {
149 154 if (!(i & 0x03))
150 if (len & 0x00000003) { 155 data = inl(fifoaddr);
151 data.dword = inl(fifoaddr); 156
152 pb = (unsigned char *)buf + count * 4; 157 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
153 for (i = 0; i < (len & 0x00000003); i++)
154 pb[i] = data.byte[i];
155 } 158 }
156 } else { 159 } else {
157 len = (len + 1) / 2; 160 /* 16-bit accesses for external controllers */
158 insw(r8a66597->reg + offset, buf, len); 161
162 /* aligned buf case */
163 if (len >= 2 && !((unsigned long)buf & 0x01)) {
164 insw(fifoaddr, buf, len / 2);
165 buf += len & ~0x01;
166 len &= 0x01;
167 }
168
169 /* unaligned buf case */
170 for (i = 0; i < len; i++) {
171 if (!(i & 0x01))
172 data = inw(fifoaddr);
173
174 buf[i] = (data >> ((i & 0x01) * 8)) & 0xff;
175 }
159 } 176 }
160} 177}
161 178
@@ -166,38 +183,40 @@ static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
166} 183}
167 184
168static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, 185static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
169 unsigned long offset, u16 *buf, 186 unsigned long offset,
187 unsigned char *buf,
170 int len) 188 int len)
171{ 189{
172 unsigned long fifoaddr = r8a66597->reg + offset; 190 unsigned long fifoaddr = r8a66597->reg + offset;
191 int adj = 0;
192 int i;
173 193
174 if (r8a66597->pdata->on_chip) { 194 if (r8a66597->pdata->on_chip) {
175 unsigned long count; 195 /* 32-bit access only if buf is 32-bit aligned */
176 unsigned char *pb; 196 if (len >= 4 && !((unsigned long)buf & 0x03)) {
177 int i; 197 outsl(fifoaddr, buf, len / 4);
178 198 buf += len & ~0x03;
179 count = len / 4; 199 len &= 0x03;
180 outsl(fifoaddr, buf, count);
181
182 if (len & 0x00000003) {
183 pb = (unsigned char *)buf + count * 4;
184 for (i = 0; i < (len & 0x00000003); i++) {
185 if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND)
186 outb(pb[i], fifoaddr + i);
187 else
188 outb(pb[i], fifoaddr + 3 - i);
189 }
190 } 200 }
191 } else { 201 } else {
192 int odd = len & 0x0001; 202 /* 16-bit access only if buf is 16-bit aligned */
193 203 if (len >= 2 && !((unsigned long)buf & 0x01)) {
194 len = len / 2; 204 outsw(fifoaddr, buf, len / 2);
195 outsw(fifoaddr, buf, len); 205 buf += len & ~0x01;
196 if (unlikely(odd)) { 206 len &= 0x01;
197 buf = &buf[len];
198 outb((unsigned char)*buf, fifoaddr);
199 } 207 }
200 } 208 }
209
210 /* adjust fifo address in the little endian case */
211 if (!(r8a66597_read(r8a66597, CFIFOSEL) & BIGEND)) {
212 if (r8a66597->pdata->on_chip)
213 adj = 0x03; /* 32-bit wide */
214 else
215 adj = 0x01; /* 16-bit wide */
216 }
217
218 for (i = 0; i < len; i++)
219 outb(buf[i], fifoaddr + adj - (i & adj));
201} 220}
202 221
203static inline void r8a66597_mdfy(struct r8a66597 *r8a66597, 222static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 3ea05936851f..b25cdea93a1f 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1400,6 +1400,10 @@ iso_stream_schedule (
1400 goto fail; 1400 goto fail;
1401 } 1401 }
1402 1402
1403 period = urb->interval;
1404 if (!stream->highspeed)
1405 period <<= 3;
1406
1403 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; 1407 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1404 1408
1405 /* when's the last uframe this urb could start? */ 1409 /* when's the last uframe this urb could start? */
@@ -1417,14 +1421,15 @@ iso_stream_schedule (
1417 1421
1418 /* Fell behind (by up to twice the slop amount)? */ 1422 /* Fell behind (by up to twice the slop amount)? */
1419 if (start >= max - 2 * 8 * SCHEDULE_SLOP) 1423 if (start >= max - 2 * 8 * SCHEDULE_SLOP)
1420 start += stream->interval * DIV_ROUND_UP( 1424 start += period * DIV_ROUND_UP(
1421 max - start, stream->interval) - mod; 1425 max - start, period) - mod;
1422 1426
1423 /* Tried to schedule too far into the future? */ 1427 /* Tried to schedule too far into the future? */
1424 if (unlikely((start + sched->span) >= max)) { 1428 if (unlikely((start + sched->span) >= max)) {
1425 status = -EFBIG; 1429 status = -EFBIG;
1426 goto fail; 1430 goto fail;
1427 } 1431 }
1432 stream->next_uframe = start;
1428 goto ready; 1433 goto ready;
1429 } 1434 }
1430 1435
@@ -1440,10 +1445,6 @@ iso_stream_schedule (
1440 1445
1441 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1446 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1442 1447
1443 period = urb->interval;
1444 if (!stream->highspeed)
1445 period <<= 3;
1446
1447 /* find a uframe slot with enough bandwidth */ 1448 /* find a uframe slot with enough bandwidth */
1448 for (; start < (stream->next_uframe + period); start++) { 1449 for (; start < (stream->next_uframe + period); start++) {
1449 int enough_space; 1450 int enough_space;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index e35d82808bab..5c774ab98252 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2284,10 +2284,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
2284 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); 2284 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2285 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", 2285 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2286 istl_size / 2, istl_size, 0, istl_size / 2); 2286 istl_size / 2, istl_size, 0, istl_size / 2);
2287 dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n", 2287 dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n",
2288 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, 2288 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2289 intl_size, istl_size); 2289 intl_size, istl_size);
2290 dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n", 2290 dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n",
2291 atl_buffers, atl_blksize - PTD_HEADER_SIZE, 2291 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2292 atl_size, istl_size + intl_size); 2292 atl_size, istl_size + intl_size);
2293 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, 2293 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
@@ -2677,12 +2677,12 @@ static int __devexit isp1362_remove(struct platform_device *pdev)
2677 DBG(0, "%s: Removing HCD\n", __func__); 2677 DBG(0, "%s: Removing HCD\n", __func__);
2678 usb_remove_hcd(hcd); 2678 usb_remove_hcd(hcd);
2679 2679
2680 DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, 2680 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2681 (u32)isp1362_hcd->data_reg); 2681 isp1362_hcd->data_reg);
2682 iounmap(isp1362_hcd->data_reg); 2682 iounmap(isp1362_hcd->data_reg);
2683 2683
2684 DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, 2684 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2685 (u32)isp1362_hcd->addr_reg); 2685 isp1362_hcd->addr_reg);
2686 iounmap(isp1362_hcd->addr_reg); 2686 iounmap(isp1362_hcd->addr_reg);
2687 2687
2688 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2688 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -2810,16 +2810,16 @@ static int __init isp1362_probe(struct platform_device *pdev)
2810 return 0; 2810 return 0;
2811 2811
2812 err6: 2812 err6:
2813 DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd); 2813 DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2814 usb_put_hcd(hcd); 2814 usb_put_hcd(hcd);
2815 err5: 2815 err5:
2816 DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg); 2816 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2817 iounmap(data_reg); 2817 iounmap(data_reg);
2818 err4: 2818 err4:
2819 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); 2819 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2820 release_mem_region(data->start, resource_len(data)); 2820 release_mem_region(data->start, resource_len(data));
2821 err3: 2821 err3:
2822 DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg); 2822 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2823 iounmap(addr_reg); 2823 iounmap(addr_reg);
2824 err2: 2824 err2:
2825 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); 2825 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index fe60f62a32f3..1a253ebf7e50 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -580,7 +580,7 @@ static inline const char *ISP1362_INT_NAME(int n)
580 580
581static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr) 581static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr)
582{ 582{
583 unsigned p = (unsigned)ptr; 583 unsigned long p = (unsigned long)ptr;
584 if (!(p & 0xf)) 584 if (!(p & 0xf))
585 isp1362_hcd->stat16++; 585 isp1362_hcd->stat16++;
586 else if (!(p & 0x7)) 586 else if (!(p & 0x7))
@@ -770,7 +770,7 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
770 if (!len) 770 if (!len)
771 return; 771 return;
772 772
773 if ((unsigned)dp & 0x1) { 773 if ((unsigned long)dp & 0x1) {
774 /* not aligned */ 774 /* not aligned */
775 for (; len > 1; len -= 2) { 775 for (; len > 1; len -= 2) {
776 data = *dp++; 776 data = *dp++;
@@ -962,8 +962,8 @@ static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16
962 962
963 isp1362_write_diraddr(isp1362_hcd, offset, len); 963 isp1362_write_diraddr(isp1362_hcd, offset, len);
964 964
965 DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__, 965 DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n",
966 len, offset, (u32)buf); 966 __func__, len, offset, buf);
967 967
968 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); 968 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
969 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); 969 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
@@ -982,8 +982,8 @@ static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16
982 982
983 isp1362_write_diraddr(isp1362_hcd, offset, len); 983 isp1362_write_diraddr(isp1362_hcd, offset, len);
984 984
985 DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__, 985 DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n",
986 len, offset, (u32)buf); 986 __func__, len, offset, buf);
987 987
988 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); 988 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
989 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); 989 _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 78bb7710f36d..24eb74781919 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -87,6 +87,7 @@ static int ohci_restart (struct ohci_hcd *ohci);
87#ifdef CONFIG_PCI 87#ifdef CONFIG_PCI
88static void quirk_amd_pll(int state); 88static void quirk_amd_pll(int state);
89static void amd_iso_dev_put(void); 89static void amd_iso_dev_put(void);
90static void sb800_prefetch(struct ohci_hcd *ohci, int on);
90#else 91#else
91static inline void quirk_amd_pll(int state) 92static inline void quirk_amd_pll(int state)
92{ 93{
@@ -96,6 +97,10 @@ static inline void amd_iso_dev_put(void)
96{ 97{
97 return; 98 return;
98} 99}
100static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
101{
102 return;
103}
99#endif 104#endif
100 105
101 106
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index d2ba04dd785e..b8a1148f248e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -177,6 +177,13 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
177 return 0; 177 return 0;
178 178
179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); 179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
180
181 /* SB800 needs pre-fetch fix */
182 if ((rev >= 0x40) && (rev <= 0x4f)) {
183 ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
184 ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
185 }
186
180 if ((rev > 0x3b) || (rev < 0x30)) { 187 if ((rev > 0x3b) || (rev < 0x30)) {
181 pci_dev_put(amd_smbus_dev); 188 pci_dev_put(amd_smbus_dev);
182 amd_smbus_dev = NULL; 189 amd_smbus_dev = NULL;
@@ -262,6 +269,19 @@ static void amd_iso_dev_put(void)
262 269
263} 270}
264 271
272static void sb800_prefetch(struct ohci_hcd *ohci, int on)
273{
274 struct pci_dev *pdev;
275 u16 misc;
276
277 pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller);
278 pci_read_config_word(pdev, 0x50, &misc);
279 if (on == 0)
280 pci_write_config_word(pdev, 0x50, misc & 0xfcff);
281 else
282 pci_write_config_word(pdev, 0x50, misc | 0x0300);
283}
284
265/* List of quirks for OHCI */ 285/* List of quirks for OHCI */
266static const struct pci_device_id ohci_pci_quirks[] = { 286static const struct pci_device_id ohci_pci_quirks[] = {
267 { 287 {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index b5294a9344de..f1c06202fdf2 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -481,38 +481,47 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
481 return 0; 481 return 0;
482} 482}
483 483
484#ifdef CONFIG_PM 484#ifdef CONFIG_PM
485static int ohci_hcd_pxa27x_drv_suspend(struct platform_device *pdev, pm_message_t state) 485static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
486{ 486{
487 struct usb_hcd *hcd = platform_get_drvdata(pdev); 487 struct usb_hcd *hcd = dev_get_drvdata(dev);
488 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); 488 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
489 489
490 if (time_before(jiffies, ohci->ohci.next_statechange)) 490 if (time_before(jiffies, ohci->ohci.next_statechange))
491 msleep(5); 491 msleep(5);
492 ohci->ohci.next_statechange = jiffies; 492 ohci->ohci.next_statechange = jiffies;
493 493
494 pxa27x_stop_hc(ohci, &pdev->dev); 494 pxa27x_stop_hc(ohci, dev);
495 hcd->state = HC_STATE_SUSPENDED; 495 hcd->state = HC_STATE_SUSPENDED;
496 496
497 return 0; 497 return 0;
498} 498}
499 499
500static int ohci_hcd_pxa27x_drv_resume(struct platform_device *pdev) 500static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
501{ 501{
502 struct usb_hcd *hcd = platform_get_drvdata(pdev); 502 struct usb_hcd *hcd = dev_get_drvdata(dev);
503 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); 503 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
504 struct pxaohci_platform_data *inf = dev->platform_data;
504 int status; 505 int status;
505 506
506 if (time_before(jiffies, ohci->ohci.next_statechange)) 507 if (time_before(jiffies, ohci->ohci.next_statechange))
507 msleep(5); 508 msleep(5);
508 ohci->ohci.next_statechange = jiffies; 509 ohci->ohci.next_statechange = jiffies;
509 510
510 if ((status = pxa27x_start_hc(ohci, &pdev->dev)) < 0) 511 if ((status = pxa27x_start_hc(ohci, dev)) < 0)
511 return status; 512 return status;
512 513
514 /* Select Power Management Mode */
515 pxa27x_ohci_select_pmm(ohci, inf->port_mode);
516
513 ohci_finish_controller_resume(hcd); 517 ohci_finish_controller_resume(hcd);
514 return 0; 518 return 0;
515} 519}
520
521static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
522 .suspend = ohci_hcd_pxa27x_drv_suspend,
523 .resume = ohci_hcd_pxa27x_drv_resume,
524};
516#endif 525#endif
517 526
518/* work with hotplug and coldplug */ 527/* work with hotplug and coldplug */
@@ -522,13 +531,12 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
522 .probe = ohci_hcd_pxa27x_drv_probe, 531 .probe = ohci_hcd_pxa27x_drv_probe,
523 .remove = ohci_hcd_pxa27x_drv_remove, 532 .remove = ohci_hcd_pxa27x_drv_remove,
524 .shutdown = usb_hcd_platform_shutdown, 533 .shutdown = usb_hcd_platform_shutdown,
525#ifdef CONFIG_PM
526 .suspend = ohci_hcd_pxa27x_drv_suspend,
527 .resume = ohci_hcd_pxa27x_drv_resume,
528#endif
529 .driver = { 534 .driver = {
530 .name = "pxa27x-ohci", 535 .name = "pxa27x-ohci",
531 .owner = THIS_MODULE, 536 .owner = THIS_MODULE,
537#ifdef CONFIG_PM
538 .pm = &ohci_hcd_pxa27x_pm_ops,
539#endif
532 }, 540 },
533}; 541};
534 542
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 16fecb8ecc39..35288bcae0db 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,9 +49,12 @@ __acquires(ohci->lock)
49 switch (usb_pipetype (urb->pipe)) { 49 switch (usb_pipetype (urb->pipe)) {
50 case PIPE_ISOCHRONOUS: 50 case PIPE_ISOCHRONOUS:
51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; 51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
53 && quirk_amdiso(ohci)) 53 if (quirk_amdiso(ohci))
54 quirk_amd_pll(1); 54 quirk_amd_pll(1);
55 if (quirk_amdprefetch(ohci))
56 sb800_prefetch(ohci, 0);
57 }
55 break; 58 break;
56 case PIPE_INTERRUPT: 59 case PIPE_INTERRUPT:
57 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; 60 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -680,9 +683,12 @@ static void td_submit_urb (
680 data + urb->iso_frame_desc [cnt].offset, 683 data + urb->iso_frame_desc [cnt].offset,
681 urb->iso_frame_desc [cnt].length, urb, cnt); 684 urb->iso_frame_desc [cnt].length, urb, cnt);
682 } 685 }
683 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 686 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
684 && quirk_amdiso(ohci)) 687 if (quirk_amdiso(ohci))
685 quirk_amd_pll(0); 688 quirk_amd_pll(0);
689 if (quirk_amdprefetch(ohci))
690 sb800_prefetch(ohci, 1);
691 }
686 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 692 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
687 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; 693 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
688 break; 694 break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 222011f6172c..5bf15fed0d9f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -402,6 +402,7 @@ struct ohci_hcd {
402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ 402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ 404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
405 // there are also chip quirks/bugs in init logic 406 // there are also chip quirks/bugs in init logic
406 407
407 struct work_struct nec_work; /* Worker for NEC quirk */ 408 struct work_struct nec_work; /* Worker for NEC quirk */
@@ -433,6 +434,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
433{ 434{
434 return ohci->flags & OHCI_QUIRK_AMD_ISO; 435 return ohci->flags & OHCI_QUIRK_AMD_ISO;
435} 436}
437static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
438{
439 return ohci->flags & OHCI_QUIRK_AMD_PREFETCH;
440}
436#else 441#else
437static inline int quirk_nec(struct ohci_hcd *ohci) 442static inline int quirk_nec(struct ohci_hcd *ohci)
438{ 443{
@@ -446,6 +451,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
446{ 451{
447 return 0; 452 return 0;
448} 453}
454static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
455{
456 return 0;
457}
449#endif 458#endif
450 459
451/* convert between an hcd pointer and the corresponding ohci_hcd */ 460/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 23cf3bde4762..83b5f9cea85a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
475 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) 475 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
476 quirk_usb_handoff_xhci(pdev); 476 quirk_usb_handoff_xhci(pdev);
477} 477}
478DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); 478DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 749b53742828..e33d36256350 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1003,19 +1003,20 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1003 if (syssts == SE0) { 1003 if (syssts == SE0) {
1004 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1004 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
1005 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 1005 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
1006 return; 1006 } else {
1007 } 1007 if (syssts == FS_JSTS)
1008 r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port));
1009 else if (syssts == LS_JSTS)
1010 r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
1008 1011
1009 if (syssts == FS_JSTS) 1012 r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port));
1010 r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); 1013 r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port));
1011 else if (syssts == LS_JSTS)
1012 r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
1013 1014
1014 r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); 1015 if (r8a66597->bus_suspended)
1015 r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); 1016 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1017 }
1016 1018
1017 if (r8a66597->bus_suspended) 1019 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1018 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1019} 1020}
1020 1021
1021/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
@@ -1024,6 +1025,8 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
1024 u16 speed = get_rh_usb_speed(r8a66597, port); 1025 u16 speed = get_rh_usb_speed(r8a66597, port);
1025 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; 1026 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
1026 1027
1028 rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) |
1029 (1 << USB_PORT_FEAT_LOWSPEED));
1027 if (speed == HSMODE) 1030 if (speed == HSMODE)
1028 rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); 1031 rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED);
1029 else if (speed == LSMODE) 1032 else if (speed == LSMODE)
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index c632437c7649..562eba108816 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -115,6 +115,10 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
115 if (status & QTD_STS_HALTED) { 115 if (status & QTD_STS_HALTED) {
116 /* Ug, an error. */ 116 /* Ug, an error. */
117 process_halted_qtd(whc, qset, td); 117 process_halted_qtd(whc, qset, td);
118 /* A halted qTD always triggers an update
119 because the qset was either removed or
120 reactivated. */
121 update |= WHC_UPDATE_UPDATED;
118 goto done; 122 goto done;
119 } 123 }
120 124
@@ -305,6 +309,7 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
305 struct whc_urb *wurb = urb->hcpriv; 309 struct whc_urb *wurb = urb->hcpriv;
306 struct whc_qset *qset = wurb->qset; 310 struct whc_qset *qset = wurb->qset;
307 struct whc_std *std, *t; 311 struct whc_std *std, *t;
312 bool has_qtd = false;
308 int ret; 313 int ret;
309 unsigned long flags; 314 unsigned long flags;
310 315
@@ -315,17 +320,21 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
315 goto out; 320 goto out;
316 321
317 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 322 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
318 if (std->urb == urb) 323 if (std->urb == urb) {
324 if (std->qtd)
325 has_qtd = true;
319 qset_free_std(whc, std); 326 qset_free_std(whc, std);
320 else 327 } else
321 std->qtd = NULL; /* so this std is re-added when the qset is */ 328 std->qtd = NULL; /* so this std is re-added when the qset is */
322 } 329 }
323 330
324 asl_qset_remove(whc, qset); 331 if (has_qtd) {
325 wurb->status = status; 332 asl_qset_remove(whc, qset);
326 wurb->is_async = true; 333 wurb->status = status;
327 queue_work(whc->workqueue, &wurb->dequeue_work); 334 wurb->is_async = true;
328 335 queue_work(whc->workqueue, &wurb->dequeue_work);
336 } else
337 qset_remove_urb(whc, qset, urb, status);
329out: 338out:
330 spin_unlock_irqrestore(&whc->lock, flags); 339 spin_unlock_irqrestore(&whc->lock, flags);
331 340
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index cf2d45946c57..2273c815941f 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file)
134 return single_open(file, pzl_print, inode->i_private); 134 return single_open(file, pzl_print, inode->i_private);
135} 135}
136 136
137static struct file_operations di_fops = { 137static const struct file_operations di_fops = {
138 .open = di_open, 138 .open = di_open,
139 .read = seq_read, 139 .read = seq_read,
140 .llseek = seq_lseek, 140 .llseek = seq_lseek,
@@ -142,7 +142,7 @@ static struct file_operations di_fops = {
142 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
143}; 143};
144 144
145static struct file_operations asl_fops = { 145static const struct file_operations asl_fops = {
146 .open = asl_open, 146 .open = asl_open,
147 .read = seq_read, 147 .read = seq_read,
148 .llseek = seq_lseek, 148 .llseek = seq_lseek,
@@ -150,7 +150,7 @@ static struct file_operations asl_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151}; 151};
152 152
153static struct file_operations pzl_fops = { 153static const struct file_operations pzl_fops = {
154 .open = pzl_open, 154 .open = pzl_open,
155 .read = seq_read, 155 .read = seq_read,
156 .llseek = seq_lseek, 156 .llseek = seq_lseek,
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index a9e05bac6646..0db3fb2dc03a 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -121,6 +121,10 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
121 if (status & QTD_STS_HALTED) { 121 if (status & QTD_STS_HALTED) {
122 /* Ug, an error. */ 122 /* Ug, an error. */
123 process_halted_qtd(whc, qset, td); 123 process_halted_qtd(whc, qset, td);
124 /* A halted qTD always triggers an update
125 because the qset was either removed or
126 reactivated. */
127 update |= WHC_UPDATE_UPDATED;
124 goto done; 128 goto done;
125 } 129 }
126 130
@@ -333,6 +337,7 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
333 struct whc_urb *wurb = urb->hcpriv; 337 struct whc_urb *wurb = urb->hcpriv;
334 struct whc_qset *qset = wurb->qset; 338 struct whc_qset *qset = wurb->qset;
335 struct whc_std *std, *t; 339 struct whc_std *std, *t;
340 bool has_qtd = false;
336 int ret; 341 int ret;
337 unsigned long flags; 342 unsigned long flags;
338 343
@@ -343,17 +348,22 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
343 goto out; 348 goto out;
344 349
345 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 350 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
346 if (std->urb == urb) 351 if (std->urb == urb) {
352 if (std->qtd)
353 has_qtd = true;
347 qset_free_std(whc, std); 354 qset_free_std(whc, std);
348 else 355 } else
349 std->qtd = NULL; /* so this std is re-added when the qset is */ 356 std->qtd = NULL; /* so this std is re-added when the qset is */
350 } 357 }
351 358
352 pzl_qset_remove(whc, qset); 359 if (has_qtd) {
353 wurb->status = status; 360 pzl_qset_remove(whc, qset);
354 wurb->is_async = false; 361 update_pzl_hw_view(whc);
355 queue_work(whc->workqueue, &wurb->dequeue_work); 362 wurb->status = status;
356 363 wurb->is_async = false;
364 queue_work(whc->workqueue, &wurb->dequeue_work);
365 } else
366 qset_remove_urb(whc, qset, urb, status);
357out: 367out:
358 spin_unlock_irqrestore(&whc->lock, flags); 368 spin_unlock_irqrestore(&whc->lock, flags);
359 369
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 99911e727e0b..932f99938481 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -335,6 +335,12 @@ void xhci_event_ring_work(unsigned long arg)
335 spin_lock_irqsave(&xhci->lock, flags); 335 spin_lock_irqsave(&xhci->lock, flags);
336 temp = xhci_readl(xhci, &xhci->op_regs->status); 336 temp = xhci_readl(xhci, &xhci->op_regs->status);
337 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 337 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
338 if (temp == 0xffffffff) {
339 xhci_dbg(xhci, "HW died, polling stopped.\n");
340 spin_unlock_irqrestore(&xhci->lock, flags);
341 return;
342 }
343
338 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 344 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
339 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 345 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
340 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); 346 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
@@ -776,6 +782,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
776{ 782{
777 unsigned long flags; 783 unsigned long flags;
778 int ret; 784 int ret;
785 u32 temp;
779 struct xhci_hcd *xhci; 786 struct xhci_hcd *xhci;
780 struct xhci_td *td; 787 struct xhci_td *td;
781 unsigned int ep_index; 788 unsigned int ep_index;
@@ -788,6 +795,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
788 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 795 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
789 if (ret || !urb->hcpriv) 796 if (ret || !urb->hcpriv)
790 goto done; 797 goto done;
798 temp = xhci_readl(xhci, &xhci->op_regs->status);
799 if (temp == 0xffffffff) {
800 xhci_dbg(xhci, "HW died, freeing TD.\n");
801 td = (struct xhci_td *) urb->hcpriv;
802
803 usb_hcd_unlink_urb_from_ep(hcd, urb);
804 spin_unlock_irqrestore(&xhci->lock, flags);
805 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
806 kfree(td);
807 return ret;
808 }
791 809
792 xhci_dbg(xhci, "Cancel URB %p\n", urb); 810 xhci_dbg(xhci, "Cancel URB %p\n", urb);
793 xhci_dbg(xhci, "Event ring:\n"); 811 xhci_dbg(xhci, "Event ring:\n");
@@ -877,7 +895,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
877 ctrl_ctx->drop_flags |= drop_flag; 895 ctrl_ctx->drop_flags |= drop_flag;
878 new_drop_flags = ctrl_ctx->drop_flags; 896 new_drop_flags = ctrl_ctx->drop_flags;
879 897
880 ctrl_ctx->add_flags = ~drop_flag; 898 ctrl_ctx->add_flags &= ~drop_flag;
881 new_add_flags = ctrl_ctx->add_flags; 899 new_add_flags = ctrl_ctx->add_flags;
882 900
883 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); 901 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
@@ -1410,11 +1428,20 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1410{ 1428{
1411 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1429 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1412 unsigned long flags; 1430 unsigned long flags;
1431 u32 state;
1413 1432
1414 if (udev->slot_id == 0) 1433 if (udev->slot_id == 0)
1415 return; 1434 return;
1416 1435
1417 spin_lock_irqsave(&xhci->lock, flags); 1436 spin_lock_irqsave(&xhci->lock, flags);
1437 /* Don't disable the slot if the host controller is dead. */
1438 state = xhci_readl(xhci, &xhci->op_regs->status);
1439 if (state == 0xffffffff) {
1440 xhci_free_virt_device(xhci, udev->slot_id);
1441 spin_unlock_irqrestore(&xhci->lock, flags);
1442 return;
1443 }
1444
1418 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 1445 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1419 spin_unlock_irqrestore(&xhci->lock, flags); 1446 spin_unlock_irqrestore(&xhci->lock, flags);
1420 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1447 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1db4fea8c170..b8fd270a8b0d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -802,9 +802,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
802 int i; 802 int i;
803 803
804 /* Free the Event Ring Segment Table and the actual Event Ring */ 804 /* Free the Event Ring Segment Table and the actual Event Ring */
805 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 805 if (xhci->ir_set) {
806 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 806 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
807 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 807 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
808 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
809 }
808 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 810 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
809 if (xhci->erst.entries) 811 if (xhci->erst.entries)
810 pci_free_consistent(pdev, size, 812 pci_free_consistent(pdev, size,
@@ -841,9 +843,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
841 xhci->dcbaa, xhci->dcbaa->dma); 843 xhci->dcbaa, xhci->dcbaa->dma);
842 xhci->dcbaa = NULL; 844 xhci->dcbaa = NULL;
843 845
846 scratchpad_free(xhci);
844 xhci->page_size = 0; 847 xhci->page_size = 0;
845 xhci->page_shift = 0; 848 xhci->page_shift = 0;
846 scratchpad_free(xhci);
847} 849}
848 850
849int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 851int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 173c39c76489..821b7b4709de 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,9 +864,11 @@ static struct xhci_segment *trb_in_td(
864 cur_seg = start_seg; 864 cur_seg = start_seg;
865 865
866 do { 866 do {
867 if (start_dma == 0)
868 return 0;
867 /* We may get an event for a Link TRB in the middle of a TD */ 869 /* We may get an event for a Link TRB in the middle of a TD */
868 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 870 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
869 &start_seg->trbs[TRBS_PER_SEGMENT - 1]); 871 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
870 /* If the end TRB isn't in this segment, this is set to 0 */ 872 /* If the end TRB isn't in this segment, this is set to 0 */
871 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 873 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
872 874
@@ -893,8 +895,9 @@ static struct xhci_segment *trb_in_td(
893 } 895 }
894 cur_seg = cur_seg->next; 896 cur_seg = cur_seg->next;
895 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 897 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
896 } while (1); 898 } while (cur_seg != start_seg);
897 899
900 return 0;
898} 901}
899 902
900/* 903/*
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index d645f3899fe1..32d0199d0c32 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
429 return read_count; 429 return read_count;
430} 430}
431 431
432static struct 432static const struct file_operations usb_rio_fops = {
433file_operations usb_rio_fops = {
434 .owner = THIS_MODULE, 433 .owner = THIS_MODULE,
435 .read = read_rio, 434 .read = read_rio,
436 .write = write_rio, 435 .write = write_rio,
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 273de5d0934e..0ab990744830 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -43,7 +43,6 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/spinlock.h> 45#include <linux/spinlock.h>
46#include <linux/kref.h>
47 46
48#include "sisusb.h" 47#include "sisusb.h"
49 48
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 29092b8e59ce..4fb120357c55 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -313,7 +313,8 @@ static int lcd_probe(struct usb_interface *interface, const struct usb_device_id
313 313
314 if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) { 314 if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) {
315 dev_warn(&interface->dev, "USBLCD model not supported.\n"); 315 dev_warn(&interface->dev, "USBLCD model not supported.\n");
316 return -ENODEV; 316 retval = -ENODEV;
317 goto error;
317 } 318 }
318 319
319 /* set up the endpoint information */ 320 /* set up the endpoint information */
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index dfdc43e2e00d..10f3205798e8 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -348,12 +348,12 @@ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
348 348
349/* 349/*
350 * Return a few (kilo-)bytes to the head of the buffer. 350 * Return a few (kilo-)bytes to the head of the buffer.
351 * This is used if a DMA fetch fails. 351 * This is used if a data fetch fails.
352 */ 352 */
353static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 353static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
354{ 354{
355 355
356 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 356 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */
357 rp->b_cnt -= size; 357 rp->b_cnt -= size;
358 if (rp->b_in < size) 358 if (rp->b_in < size)
359 rp->b_in += rp->b_size; 359 rp->b_in += rp->b_size;
@@ -433,6 +433,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
433 unsigned int urb_length; 433 unsigned int urb_length;
434 unsigned int offset; 434 unsigned int offset;
435 unsigned int length; 435 unsigned int length;
436 unsigned int delta;
436 unsigned int ndesc, lendesc; 437 unsigned int ndesc, lendesc;
437 unsigned char dir; 438 unsigned char dir;
438 struct mon_bin_hdr *ep; 439 struct mon_bin_hdr *ep;
@@ -537,8 +538,10 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
537 if (length != 0) { 538 if (length != 0) {
538 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 539 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
539 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 540 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
540 ep->len_cap = 0; 541 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
541 mon_buff_area_shrink(rp, length); 542 ep->len_cap -= length;
543 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
544 mon_buff_area_shrink(rp, delta);
542 } 545 }
543 } else { 546 } else {
544 ep->flag_data = data_tag; 547 ep->flag_data = data_tag;
@@ -1174,7 +1177,7 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1174 return 0; 1177 return 0;
1175} 1178}
1176 1179
1177static struct vm_operations_struct mon_bin_vm_ops = { 1180static const struct vm_operations_struct mon_bin_vm_ops = {
1178 .open = mon_bin_vma_open, 1181 .open = mon_bin_vma_open,
1179 .close = mon_bin_vma_close, 1182 .close = mon_bin_vma_close,
1180 .fault = mon_bin_vma_fault, 1183 .fault = mon_bin_vma_fault,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 803adcb5ac1d..b84abd8ee8a5 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -8,8 +8,8 @@ comment "Enable Host or Gadget support to see Inventra options"
8 8
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller 9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC 10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK 11 depends on (USB || USB_GADGET)
12 depends on !SUPERH 12 depends on (ARM || BLACKFIN)
13 select NOP_USB_XCEIV if ARCH_DAVINCI 13 select NOP_USB_XCEIV if ARCH_DAVINCI
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select NOP_USB_XCEIV if MACH_OMAP3EVM 15 select NOP_USB_XCEIV if MACH_OMAP3EVM
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index f2f66ebc7362..fcec87ea709e 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -14,7 +14,6 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/clk.h>
18#include <linux/gpio.h> 17#include <linux/gpio.h>
19#include <linux/io.h> 18#include <linux/io.h>
20 19
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 381d648a36b8..6aa5f22e5274 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -95,6 +95,13 @@ struct musb_ep;
95#endif 95#endif
96#endif /* need MUSB gadget selection */ 96#endif /* need MUSB gadget selection */
97 97
98#ifndef CONFIG_HAVE_CLK
99/* Dummy stub for clk framework */
100#define clk_get(dev, id) NULL
101#define clk_put(clock) do {} while (0)
102#define clk_enable(clock) do {} while (0)
103#define clk_disable(clock) do {} while (0)
104#endif
98 105
99#ifdef CONFIG_PROC_FS 106#ifdef CONFIG_PROC_FS
100#include <linux/fs.h> 107#include <linux/fs.h>
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index fbfd3fd9ce1f..cc1d71b57d3c 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -439,15 +439,6 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
439/* Not implemented - HW has seperate Tx/Rx FIFO */ 439/* Not implemented - HW has seperate Tx/Rx FIFO */
440#define MUSB_TXCSR_MODE 0x0000 440#define MUSB_TXCSR_MODE 0x0000
441 441
442/*
443 * Dummy stub for clk framework, it will be removed
444 * until Blackfin supports clk framework
445 */
446#define clk_get(dev, id) NULL
447#define clk_put(clock) do {} while (0)
448#define clk_enable(clock) do {} while (0)
449#define clk_disable(clock) do {} while (0)
450
451static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size) 442static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
452{ 443{
453} 444}
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 2cbfab3716e5..b10ac8409411 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -554,13 +554,12 @@ static void aircable_throttle(struct tty_struct *tty)
554{ 554{
555 struct usb_serial_port *port = tty->driver_data; 555 struct usb_serial_port *port = tty->driver_data;
556 struct aircable_private *priv = usb_get_serial_port_data(port); 556 struct aircable_private *priv = usb_get_serial_port_data(port);
557 unsigned long flags;
558 557
559 dbg("%s - port %d", __func__, port->number); 558 dbg("%s - port %d", __func__, port->number);
560 559
561 spin_lock_irqsave(&priv->rx_lock, flags); 560 spin_lock_irq(&priv->rx_lock);
562 priv->rx_flags |= THROTTLED; 561 priv->rx_flags |= THROTTLED;
563 spin_unlock_irqrestore(&priv->rx_lock, flags); 562 spin_unlock_irq(&priv->rx_lock);
564} 563}
565 564
566/* Based on ftdi_sio.c unthrottle */ 565/* Based on ftdi_sio.c unthrottle */
@@ -569,14 +568,13 @@ static void aircable_unthrottle(struct tty_struct *tty)
569 struct usb_serial_port *port = tty->driver_data; 568 struct usb_serial_port *port = tty->driver_data;
570 struct aircable_private *priv = usb_get_serial_port_data(port); 569 struct aircable_private *priv = usb_get_serial_port_data(port);
571 int actually_throttled; 570 int actually_throttled;
572 unsigned long flags;
573 571
574 dbg("%s - port %d", __func__, port->number); 572 dbg("%s - port %d", __func__, port->number);
575 573
576 spin_lock_irqsave(&priv->rx_lock, flags); 574 spin_lock_irq(&priv->rx_lock);
577 actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED; 575 actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED;
578 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 576 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
579 spin_unlock_irqrestore(&priv->rx_lock, flags); 577 spin_unlock_irq(&priv->rx_lock);
580 578
581 if (actually_throttled) 579 if (actually_throttled)
582 schedule_work(&priv->rx_work); 580 schedule_work(&priv->rx_work);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 4a208fe85bc9..bd254ec97d14 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -50,6 +50,8 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
50static void cp210x_break_ctl(struct tty_struct *, int); 50static void cp210x_break_ctl(struct tty_struct *, int);
51static int cp210x_startup(struct usb_serial *); 51static int cp210x_startup(struct usb_serial *);
52static void cp210x_disconnect(struct usb_serial *); 52static void cp210x_disconnect(struct usb_serial *);
53static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
54static int cp210x_carrier_raised(struct usb_serial_port *p);
53 55
54static int debug; 56static int debug;
55 57
@@ -113,6 +115,7 @@ static struct usb_device_id id_table [] = {
113 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 115 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
114 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 116 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
115 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 117 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
118 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
116 { } /* Terminating Entry */ 119 { } /* Terminating Entry */
117}; 120};
118 121
@@ -142,6 +145,8 @@ static struct usb_serial_driver cp210x_device = {
142 .tiocmset = cp210x_tiocmset, 145 .tiocmset = cp210x_tiocmset,
143 .attach = cp210x_startup, 146 .attach = cp210x_startup,
144 .disconnect = cp210x_disconnect, 147 .disconnect = cp210x_disconnect,
148 .dtr_rts = cp210x_dtr_rts,
149 .carrier_raised = cp210x_carrier_raised
145}; 150};
146 151
147/* Config request types */ 152/* Config request types */
@@ -745,6 +750,14 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
745 return cp210x_set_config(port, CP210X_SET_MHS, &control, 2); 750 return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
746} 751}
747 752
753static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
754{
755 if (on)
756 cp210x_tiocmset_port(p, NULL, TIOCM_DTR|TIOCM_RTS, 0);
757 else
758 cp210x_tiocmset_port(p, NULL, 0, TIOCM_DTR|TIOCM_RTS);
759}
760
748static int cp210x_tiocmget (struct tty_struct *tty, struct file *file) 761static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
749{ 762{
750 struct usb_serial_port *port = tty->driver_data; 763 struct usb_serial_port *port = tty->driver_data;
@@ -767,6 +780,15 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
767 return result; 780 return result;
768} 781}
769 782
783static int cp210x_carrier_raised(struct usb_serial_port *p)
784{
785 unsigned int control;
786 cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
787 if (control & CONTROL_DCD)
788 return 1;
789 return 0;
790}
791
770static void cp210x_break_ctl (struct tty_struct *tty, int break_state) 792static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
771{ 793{
772 struct usb_serial_port *port = tty->driver_data; 794 struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e0a8b715f2f2..a591ebec0f89 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1155,13 +1155,12 @@ static void cypress_throttle(struct tty_struct *tty)
1155{ 1155{
1156 struct usb_serial_port *port = tty->driver_data; 1156 struct usb_serial_port *port = tty->driver_data;
1157 struct cypress_private *priv = usb_get_serial_port_data(port); 1157 struct cypress_private *priv = usb_get_serial_port_data(port);
1158 unsigned long flags;
1159 1158
1160 dbg("%s - port %d", __func__, port->number); 1159 dbg("%s - port %d", __func__, port->number);
1161 1160
1162 spin_lock_irqsave(&priv->lock, flags); 1161 spin_lock_irq(&priv->lock);
1163 priv->rx_flags = THROTTLED; 1162 priv->rx_flags = THROTTLED;
1164 spin_unlock_irqrestore(&priv->lock, flags); 1163 spin_unlock_irq(&priv->lock);
1165} 1164}
1166 1165
1167 1166
@@ -1170,14 +1169,13 @@ static void cypress_unthrottle(struct tty_struct *tty)
1170 struct usb_serial_port *port = tty->driver_data; 1169 struct usb_serial_port *port = tty->driver_data;
1171 struct cypress_private *priv = usb_get_serial_port_data(port); 1170 struct cypress_private *priv = usb_get_serial_port_data(port);
1172 int actually_throttled, result; 1171 int actually_throttled, result;
1173 unsigned long flags;
1174 1172
1175 dbg("%s - port %d", __func__, port->number); 1173 dbg("%s - port %d", __func__, port->number);
1176 1174
1177 spin_lock_irqsave(&priv->lock, flags); 1175 spin_lock_irq(&priv->lock);
1178 actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED; 1176 actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED;
1179 priv->rx_flags = 0; 1177 priv->rx_flags = 0;
1180 spin_unlock_irqrestore(&priv->lock, flags); 1178 spin_unlock_irq(&priv->lock);
1181 1179
1182 if (!priv->comm_is_ok) 1180 if (!priv->comm_is_ok)
1183 return; 1181 return;
@@ -1185,7 +1183,7 @@ static void cypress_unthrottle(struct tty_struct *tty)
1185 if (actually_throttled) { 1183 if (actually_throttled) {
1186 port->interrupt_in_urb->dev = port->serial->dev; 1184 port->interrupt_in_urb->dev = port->serial->dev;
1187 1185
1188 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 1186 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
1189 if (result) { 1187 if (result) {
1190 dev_err(&port->dev, "%s - failed submitting read urb, " 1188 dev_err(&port->dev, "%s - failed submitting read urb, "
1191 "error %d\n", __func__, result); 1189 "error %d\n", __func__, result);
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index ab3dd991586b..68e80be6b9e1 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -898,16 +898,16 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
898 898
899 spin_lock_irqsave(&priv->dp_port_lock, flags); 899 spin_lock_irqsave(&priv->dp_port_lock, flags);
900 900
901 /* turn throttle off */
902 priv->dp_throttled = 0;
903 priv->dp_throttle_restart = 0;
904
905 /* restart read chain */ 901 /* restart read chain */
906 if (priv->dp_throttle_restart) { 902 if (priv->dp_throttle_restart) {
907 port->read_urb->dev = port->serial->dev; 903 port->read_urb->dev = port->serial->dev;
908 ret = usb_submit_urb(port->read_urb, GFP_ATOMIC); 904 ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
909 } 905 }
910 906
907 /* turn throttle off */
908 priv->dp_throttled = 0;
909 priv->dp_throttle_restart = 0;
910
911 spin_unlock_irqrestore(&priv->dp_port_lock, flags); 911 spin_unlock_irqrestore(&priv->dp_port_lock, flags);
912 912
913 if (ret) 913 if (ret)
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 33c9e9cf9eb2..7dd0e3eadbe6 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -391,7 +391,7 @@ static void empeg_unthrottle(struct tty_struct *tty)
391 dbg("%s - port %d", __func__, port->number); 391 dbg("%s - port %d", __func__, port->number);
392 392
393 port->read_urb->dev = port->serial->dev; 393 port->read_urb->dev = port->serial->dev;
394 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 394 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
395 if (result) 395 if (result)
396 dev_err(&port->dev, 396 dev_err(&port->dev,
397 "%s - failed submitting read urb, error %d\n", 397 "%s - failed submitting read urb, error %d\n",
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4f883b1773d0..9c60d6d4908a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -76,13 +76,7 @@ struct ftdi_private {
76 unsigned long last_dtr_rts; /* saved modem control outputs */ 76 unsigned long last_dtr_rts; /* saved modem control outputs */
77 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 77 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
78 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 78 char prev_status, diff_status; /* Used for TIOCMIWAIT */
79 __u8 rx_flags; /* receive state flags (throttling) */
80 spinlock_t rx_lock; /* spinlock for receive state */
81 struct delayed_work rx_work;
82 struct usb_serial_port *port; 79 struct usb_serial_port *port;
83 int rx_processed;
84 unsigned long rx_bytes;
85
86 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface 80 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
87 (0 for FT232/245) */ 81 (0 for FT232/245) */
88 82
@@ -737,10 +731,6 @@ static const char *ftdi_chip_name[] = {
737/* Constants for read urb and write urb */ 731/* Constants for read urb and write urb */
738#define BUFSZ 512 732#define BUFSZ 512
739 733
740/* rx_flags */
741#define THROTTLED 0x01
742#define ACTUALLY_THROTTLED 0x02
743
744/* Used for TIOCMIWAIT */ 734/* Used for TIOCMIWAIT */
745#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD) 735#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
746#define FTDI_STATUS_B1_MASK (FTDI_RS_BI) 736#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
@@ -763,7 +753,7 @@ static int ftdi_write_room(struct tty_struct *tty);
763static int ftdi_chars_in_buffer(struct tty_struct *tty); 753static int ftdi_chars_in_buffer(struct tty_struct *tty);
764static void ftdi_write_bulk_callback(struct urb *urb); 754static void ftdi_write_bulk_callback(struct urb *urb);
765static void ftdi_read_bulk_callback(struct urb *urb); 755static void ftdi_read_bulk_callback(struct urb *urb);
766static void ftdi_process_read(struct work_struct *work); 756static void ftdi_process_read(struct usb_serial_port *port);
767static void ftdi_set_termios(struct tty_struct *tty, 757static void ftdi_set_termios(struct tty_struct *tty,
768 struct usb_serial_port *port, struct ktermios *old); 758 struct usb_serial_port *port, struct ktermios *old);
769static int ftdi_tiocmget(struct tty_struct *tty, struct file *file); 759static int ftdi_tiocmget(struct tty_struct *tty, struct file *file);
@@ -1234,7 +1224,6 @@ static int set_serial_info(struct tty_struct *tty,
1234 (new_serial.flags & ASYNC_FLAGS)); 1224 (new_serial.flags & ASYNC_FLAGS));
1235 priv->custom_divisor = new_serial.custom_divisor; 1225 priv->custom_divisor = new_serial.custom_divisor;
1236 1226
1237 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1238 write_latency_timer(port); 1227 write_latency_timer(port);
1239 1228
1240check_and_exit: 1229check_and_exit:
@@ -1527,7 +1516,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1527 } 1516 }
1528 1517
1529 kref_init(&priv->kref); 1518 kref_init(&priv->kref);
1530 spin_lock_init(&priv->rx_lock);
1531 spin_lock_init(&priv->tx_lock); 1519 spin_lock_init(&priv->tx_lock);
1532 init_waitqueue_head(&priv->delta_msr_wait); 1520 init_waitqueue_head(&priv->delta_msr_wait);
1533 /* This will push the characters through immediately rather 1521 /* This will push the characters through immediately rather
@@ -1549,7 +1537,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1549 port->read_urb->transfer_buffer_length = BUFSZ; 1537 port->read_urb->transfer_buffer_length = BUFSZ;
1550 } 1538 }
1551 1539
1552 INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
1553 priv->port = port; 1540 priv->port = port;
1554 1541
1555 /* Free port's existing write urb and transfer buffer. */ 1542 /* Free port's existing write urb and transfer buffer. */
@@ -1686,6 +1673,26 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1686 return 0; 1673 return 0;
1687} 1674}
1688 1675
1676static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
1677{
1678 struct urb *urb = port->read_urb;
1679 struct usb_serial *serial = port->serial;
1680 int result;
1681
1682 usb_fill_bulk_urb(urb, serial->dev,
1683 usb_rcvbulkpipe(serial->dev,
1684 port->bulk_in_endpointAddress),
1685 urb->transfer_buffer,
1686 urb->transfer_buffer_length,
1687 ftdi_read_bulk_callback, port);
1688 result = usb_submit_urb(urb, mem_flags);
1689 if (result)
1690 dev_err(&port->dev,
1691 "%s - failed submitting read urb, error %d\n",
1692 __func__, result);
1693 return result;
1694}
1695
1689static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) 1696static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1690{ /* ftdi_open */ 1697{ /* ftdi_open */
1691 struct usb_device *dev = port->serial->dev; 1698 struct usb_device *dev = port->serial->dev;
@@ -1700,12 +1707,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1700 spin_lock_irqsave(&priv->tx_lock, flags); 1707 spin_lock_irqsave(&priv->tx_lock, flags);
1701 priv->tx_bytes = 0; 1708 priv->tx_bytes = 0;
1702 spin_unlock_irqrestore(&priv->tx_lock, flags); 1709 spin_unlock_irqrestore(&priv->tx_lock, flags);
1703 spin_lock_irqsave(&priv->rx_lock, flags);
1704 priv->rx_bytes = 0;
1705 spin_unlock_irqrestore(&priv->rx_lock, flags);
1706
1707 if (tty)
1708 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1709 1710
1710 write_latency_timer(port); 1711 write_latency_timer(port);
1711 1712
@@ -1725,23 +1726,14 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1725 ftdi_set_termios(tty, port, tty->termios); 1726 ftdi_set_termios(tty, port, tty->termios);
1726 1727
1727 /* Not throttled */ 1728 /* Not throttled */
1728 spin_lock_irqsave(&priv->rx_lock, flags); 1729 spin_lock_irqsave(&port->lock, flags);
1729 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 1730 port->throttled = 0;
1730 spin_unlock_irqrestore(&priv->rx_lock, flags); 1731 port->throttle_req = 0;
1732 spin_unlock_irqrestore(&port->lock, flags);
1731 1733
1732 /* Start reading from the device */ 1734 /* Start reading from the device */
1733 priv->rx_processed = 0; 1735 result = ftdi_submit_read_urb(port, GFP_KERNEL);
1734 usb_fill_bulk_urb(port->read_urb, dev, 1736 if (!result)
1735 usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress),
1736 port->read_urb->transfer_buffer,
1737 port->read_urb->transfer_buffer_length,
1738 ftdi_read_bulk_callback, port);
1739 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
1740 if (result)
1741 dev_err(&port->dev,
1742 "%s - failed submitting read urb, error %d\n",
1743 __func__, result);
1744 else
1745 kref_get(&priv->kref); 1737 kref_get(&priv->kref);
1746 1738
1747 return result; 1739 return result;
@@ -1787,10 +1779,6 @@ static void ftdi_close(struct usb_serial_port *port)
1787 1779
1788 dbg("%s", __func__); 1780 dbg("%s", __func__);
1789 1781
1790
1791 /* cancel any scheduled reading */
1792 cancel_delayed_work_sync(&priv->rx_work);
1793
1794 /* shutdown our bulk read */ 1782 /* shutdown our bulk read */
1795 usb_kill_urb(port->read_urb); 1783 usb_kill_urb(port->read_urb);
1796 kref_put(&priv->kref, ftdi_sio_priv_release); 1784 kref_put(&priv->kref, ftdi_sio_priv_release);
@@ -2013,271 +2001,121 @@ static int ftdi_chars_in_buffer(struct tty_struct *tty)
2013 return buffered; 2001 return buffered;
2014} 2002}
2015 2003
2016static void ftdi_read_bulk_callback(struct urb *urb) 2004static int ftdi_process_packet(struct tty_struct *tty,
2005 struct usb_serial_port *port, struct ftdi_private *priv,
2006 char *packet, int len)
2017{ 2007{
2018 struct usb_serial_port *port = urb->context; 2008 int i;
2019 struct tty_struct *tty; 2009 char status;
2020 struct ftdi_private *priv; 2010 char flag;
2021 unsigned long countread; 2011 char *ch;
2022 unsigned long flags;
2023 int status = urb->status;
2024
2025 if (urb->number_of_packets > 0) {
2026 dev_err(&port->dev, "%s transfer_buffer_length %d "
2027 "actual_length %d number of packets %d\n", __func__,
2028 urb->transfer_buffer_length,
2029 urb->actual_length, urb->number_of_packets);
2030 dev_err(&port->dev, "%s transfer_flags %x\n", __func__,
2031 urb->transfer_flags);
2032 }
2033 2012
2034 dbg("%s - port %d", __func__, port->number); 2013 dbg("%s - port %d", __func__, port->number);
2035 2014
2036 if (port->port.count <= 0) 2015 if (len < 2) {
2037 return; 2016 dbg("malformed packet");
2038 2017 return 0;
2039 tty = tty_port_tty_get(&port->port);
2040 if (!tty) {
2041 dbg("%s - bad tty pointer - exiting", __func__);
2042 return;
2043 } 2018 }
2044 2019
2045 priv = usb_get_serial_port_data(port); 2020 /* Compare new line status to the old one, signal if different/
2046 if (!priv) { 2021 N.B. packet may be processed more than once, but differences
2047 dbg("%s - bad port private data pointer - exiting", __func__); 2022 are only processed once. */
2048 goto out; 2023 status = packet[0] & FTDI_STATUS_B0_MASK;
2024 if (status != priv->prev_status) {
2025 priv->diff_status |= status ^ priv->prev_status;
2026 wake_up_interruptible(&priv->delta_msr_wait);
2027 priv->prev_status = status;
2049 } 2028 }
2050 2029
2051 if (urb != port->read_urb) 2030 /*
2052 dev_err(&port->dev, "%s - Not my urb!\n", __func__); 2031 * Although the device uses a bitmask and hence can have multiple
2053 2032 * errors on a packet - the order here sets the priority the error is
2054 if (status) { 2033 * returned to the tty layer.
2055 /* This will happen at close every time so it is a dbg not an 2034 */
2056 err */ 2035 flag = TTY_NORMAL;
2057 dbg("(this is ok on close) nonzero read bulk status received: %d", status); 2036 if (packet[1] & FTDI_RS_OE) {
2058 goto out; 2037 flag = TTY_OVERRUN;
2038 dbg("OVERRRUN error");
2039 }
2040 if (packet[1] & FTDI_RS_BI) {
2041 flag = TTY_BREAK;
2042 dbg("BREAK received");
2043 usb_serial_handle_break(port);
2044 }
2045 if (packet[1] & FTDI_RS_PE) {
2046 flag = TTY_PARITY;
2047 dbg("PARITY error");
2048 }
2049 if (packet[1] & FTDI_RS_FE) {
2050 flag = TTY_FRAME;
2051 dbg("FRAMING error");
2059 } 2052 }
2060 2053
2061 /* count data bytes, but not status bytes */ 2054 len -= 2;
2062 countread = urb->actual_length; 2055 if (!len)
2063 countread -= 2 * DIV_ROUND_UP(countread, priv->max_packet_size); 2056 return 0; /* status only */
2064 spin_lock_irqsave(&priv->rx_lock, flags); 2057 ch = packet + 2;
2065 priv->rx_bytes += countread; 2058
2066 spin_unlock_irqrestore(&priv->rx_lock, flags); 2059 if (!(port->console && port->sysrq) && flag == TTY_NORMAL)
2067 2060 tty_insert_flip_string(tty, ch, len);
2068 ftdi_process_read(&priv->rx_work.work); 2061 else {
2069out: 2062 for (i = 0; i < len; i++, ch++) {
2070 tty_kref_put(tty); 2063 if (!usb_serial_handle_sysrq_char(tty, port, *ch))
2071} /* ftdi_read_bulk_callback */ 2064 tty_insert_flip_char(tty, *ch, flag);
2072 2065 }
2066 }
2067 return len;
2068}
2073 2069
2074static void ftdi_process_read(struct work_struct *work) 2070static void ftdi_process_read(struct usb_serial_port *port)
2075{ /* ftdi_process_read */ 2071{
2076 struct ftdi_private *priv = 2072 struct urb *urb = port->read_urb;
2077 container_of(work, struct ftdi_private, rx_work.work);
2078 struct usb_serial_port *port = priv->port;
2079 struct urb *urb;
2080 struct tty_struct *tty; 2073 struct tty_struct *tty;
2081 char error_flag; 2074 struct ftdi_private *priv = usb_get_serial_port_data(port);
2082 unsigned char *data; 2075 char *data = (char *)urb->transfer_buffer;
2083
2084 int i; 2076 int i;
2085 int result; 2077 int len;
2086 int need_flip; 2078 int count = 0;
2087 int packet_offset;
2088 unsigned long flags;
2089
2090 dbg("%s - port %d", __func__, port->number);
2091
2092 if (port->port.count <= 0)
2093 return;
2094 2079
2095 tty = tty_port_tty_get(&port->port); 2080 tty = tty_port_tty_get(&port->port);
2096 if (!tty) { 2081 if (!tty)
2097 dbg("%s - bad tty pointer - exiting", __func__);
2098 return; 2082 return;
2099 }
2100
2101 priv = usb_get_serial_port_data(port);
2102 if (!priv) {
2103 dbg("%s - bad port private data pointer - exiting", __func__);
2104 goto out;
2105 }
2106
2107 urb = port->read_urb;
2108 if (!urb) {
2109 dbg("%s - bad read_urb pointer - exiting", __func__);
2110 goto out;
2111 }
2112
2113 data = urb->transfer_buffer;
2114 2083
2115 if (priv->rx_processed) { 2084 for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
2116 dbg("%s - already processed: %d bytes, %d remain", __func__, 2085 len = min_t(int, urb->actual_length - i, priv->max_packet_size);
2117 priv->rx_processed, 2086 count += ftdi_process_packet(tty, port, priv, &data[i], len);
2118 urb->actual_length - priv->rx_processed);
2119 } else {
2120 /* The first two bytes of every read packet are status */
2121 if (urb->actual_length > 2)
2122 usb_serial_debug_data(debug, &port->dev, __func__,
2123 urb->actual_length, data);
2124 else
2125 dbg("Status only: %03oo %03oo", data[0], data[1]);
2126 } 2087 }
2127 2088
2128 2089 if (count)
2129 /* TO DO -- check for hung up line and handle appropriately: */
2130 /* send hangup */
2131 /* See acm.c - you do a tty_hangup - eg tty_hangup(tty) */
2132 /* if CD is dropped and the line is not CLOCAL then we should hangup */
2133
2134 need_flip = 0;
2135 for (packet_offset = priv->rx_processed;
2136 packet_offset < urb->actual_length; packet_offset += priv->max_packet_size) {
2137 int length;
2138
2139 /* Compare new line status to the old one, signal if different/
2140 N.B. packet may be processed more than once, but differences
2141 are only processed once. */
2142 char new_status = data[packet_offset + 0] &
2143 FTDI_STATUS_B0_MASK;
2144 if (new_status != priv->prev_status) {
2145 priv->diff_status |=
2146 new_status ^ priv->prev_status;
2147 wake_up_interruptible(&priv->delta_msr_wait);
2148 priv->prev_status = new_status;
2149 }
2150
2151 length = min_t(u32, priv->max_packet_size, urb->actual_length-packet_offset)-2;
2152 if (length < 0) {
2153 dev_err(&port->dev, "%s - bad packet length: %d\n",
2154 __func__, length+2);
2155 length = 0;
2156 }
2157
2158 if (priv->rx_flags & THROTTLED) {
2159 dbg("%s - throttled", __func__);
2160 break;
2161 }
2162 if (tty_buffer_request_room(tty, length) < length) {
2163 /* break out & wait for throttling/unthrottling to
2164 happen */
2165 dbg("%s - receive room low", __func__);
2166 break;
2167 }
2168
2169 /* Handle errors and break */
2170 error_flag = TTY_NORMAL;
2171 /* Although the device uses a bitmask and hence can have
2172 multiple errors on a packet - the order here sets the
2173 priority the error is returned to the tty layer */
2174
2175 if (data[packet_offset+1] & FTDI_RS_OE) {
2176 error_flag = TTY_OVERRUN;
2177 dbg("OVERRRUN error");
2178 }
2179 if (data[packet_offset+1] & FTDI_RS_BI) {
2180 error_flag = TTY_BREAK;
2181 dbg("BREAK received");
2182 usb_serial_handle_break(port);
2183 }
2184 if (data[packet_offset+1] & FTDI_RS_PE) {
2185 error_flag = TTY_PARITY;
2186 dbg("PARITY error");
2187 }
2188 if (data[packet_offset+1] & FTDI_RS_FE) {
2189 error_flag = TTY_FRAME;
2190 dbg("FRAMING error");
2191 }
2192 if (length > 0) {
2193 for (i = 2; i < length+2; i++) {
2194 /* Note that the error flag is duplicated for
2195 every character received since we don't know
2196 which character it applied to */
2197 if (!usb_serial_handle_sysrq_char(tty, port,
2198 data[packet_offset + i]))
2199 tty_insert_flip_char(tty,
2200 data[packet_offset + i],
2201 error_flag);
2202 }
2203 need_flip = 1;
2204 }
2205
2206#ifdef NOT_CORRECT_BUT_KEEPING_IT_FOR_NOW
2207 /* if a parity error is detected you get status packets forever
2208 until a character is sent without a parity error.
2209 This doesn't work well since the application receives a
2210 never ending stream of bad data - even though new data
2211 hasn't been sent. Therefore I (bill) have taken this out.
2212 However - this might make sense for framing errors and so on
2213 so I am leaving the code in for now.
2214 */
2215 else {
2216 if (error_flag != TTY_NORMAL) {
2217 dbg("error_flag is not normal");
2218 /* In this case it is just status - if that is
2219 an error send a bad character */
2220 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
2221 tty_flip_buffer_push(tty);
2222 tty_insert_flip_char(tty, 0xff, error_flag);
2223 need_flip = 1;
2224 }
2225 }
2226#endif
2227 } /* "for(packet_offset=0..." */
2228
2229 /* Low latency */
2230 if (need_flip)
2231 tty_flip_buffer_push(tty); 2090 tty_flip_buffer_push(tty);
2091 tty_kref_put(tty);
2092}
2232 2093
2233 if (packet_offset < urb->actual_length) { 2094static void ftdi_read_bulk_callback(struct urb *urb)
2234 /* not completely processed - record progress */ 2095{
2235 priv->rx_processed = packet_offset; 2096 struct usb_serial_port *port = urb->context;
2236 dbg("%s - incomplete, %d bytes processed, %d remain", 2097 unsigned long flags;
2237 __func__, packet_offset,
2238 urb->actual_length - packet_offset);
2239 /* check if we were throttled while processing */
2240 spin_lock_irqsave(&priv->rx_lock, flags);
2241 if (priv->rx_flags & THROTTLED) {
2242 priv->rx_flags |= ACTUALLY_THROTTLED;
2243 spin_unlock_irqrestore(&priv->rx_lock, flags);
2244 dbg("%s - deferring remainder until unthrottled",
2245 __func__);
2246 goto out;
2247 }
2248 spin_unlock_irqrestore(&priv->rx_lock, flags);
2249 /* if the port is closed stop trying to read */
2250 if (port->port.count > 0)
2251 /* delay processing of remainder */
2252 schedule_delayed_work(&priv->rx_work, 1);
2253 else
2254 dbg("%s - port is closed", __func__);
2255 goto out;
2256 }
2257
2258 /* urb is completely processed */
2259 priv->rx_processed = 0;
2260 2098
2261 /* if the port is closed stop trying to read */ 2099 dbg("%s - port %d", __func__, port->number);
2262 if (port->port.count > 0) {
2263 /* Continue trying to always read */
2264 usb_fill_bulk_urb(port->read_urb, port->serial->dev,
2265 usb_rcvbulkpipe(port->serial->dev,
2266 port->bulk_in_endpointAddress),
2267 port->read_urb->transfer_buffer,
2268 port->read_urb->transfer_buffer_length,
2269 ftdi_read_bulk_callback, port);
2270 2100
2271 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 2101 if (urb->status) {
2272 if (result) 2102 dbg("%s - nonzero read bulk status received: %d",
2273 dev_err(&port->dev, 2103 __func__, urb->status);
2274 "%s - failed resubmitting read urb, error %d\n", 2104 return;
2275 __func__, result);
2276 } 2105 }
2277out:
2278 tty_kref_put(tty);
2279} /* ftdi_process_read */
2280 2106
2107 usb_serial_debug_data(debug, &port->dev, __func__,
2108 urb->actual_length, urb->transfer_buffer);
2109 ftdi_process_read(port);
2110
2111 spin_lock_irqsave(&port->lock, flags);
2112 port->throttled = port->throttle_req;
2113 if (!port->throttled) {
2114 spin_unlock_irqrestore(&port->lock, flags);
2115 ftdi_submit_read_urb(port, GFP_ATOMIC);
2116 } else
2117 spin_unlock_irqrestore(&port->lock, flags);
2118}
2281 2119
2282static void ftdi_break_ctl(struct tty_struct *tty, int break_state) 2120static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2283{ 2121{
@@ -2609,33 +2447,31 @@ static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
2609static void ftdi_throttle(struct tty_struct *tty) 2447static void ftdi_throttle(struct tty_struct *tty)
2610{ 2448{
2611 struct usb_serial_port *port = tty->driver_data; 2449 struct usb_serial_port *port = tty->driver_data;
2612 struct ftdi_private *priv = usb_get_serial_port_data(port);
2613 unsigned long flags; 2450 unsigned long flags;
2614 2451
2615 dbg("%s - port %d", __func__, port->number); 2452 dbg("%s - port %d", __func__, port->number);
2616 2453
2617 spin_lock_irqsave(&priv->rx_lock, flags); 2454 spin_lock_irqsave(&port->lock, flags);
2618 priv->rx_flags |= THROTTLED; 2455 port->throttle_req = 1;
2619 spin_unlock_irqrestore(&priv->rx_lock, flags); 2456 spin_unlock_irqrestore(&port->lock, flags);
2620} 2457}
2621 2458
2622 2459void ftdi_unthrottle(struct tty_struct *tty)
2623static void ftdi_unthrottle(struct tty_struct *tty)
2624{ 2460{
2625 struct usb_serial_port *port = tty->driver_data; 2461 struct usb_serial_port *port = tty->driver_data;
2626 struct ftdi_private *priv = usb_get_serial_port_data(port); 2462 int was_throttled;
2627 int actually_throttled;
2628 unsigned long flags; 2463 unsigned long flags;
2629 2464
2630 dbg("%s - port %d", __func__, port->number); 2465 dbg("%s - port %d", __func__, port->number);
2631 2466
2632 spin_lock_irqsave(&priv->rx_lock, flags); 2467 spin_lock_irqsave(&port->lock, flags);
2633 actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED; 2468 was_throttled = port->throttled;
2634 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 2469 port->throttled = port->throttle_req = 0;
2635 spin_unlock_irqrestore(&priv->rx_lock, flags); 2470 spin_unlock_irqrestore(&port->lock, flags);
2636 2471
2637 if (actually_throttled) 2472 /* Resubmit urb if throttled and open. */
2638 schedule_delayed_work(&priv->rx_work, 0); 2473 if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
2474 ftdi_submit_read_urb(port, GFP_KERNEL);
2639} 2475}
2640 2476
2641static int __init ftdi_init(void) 2477static int __init ftdi_init(void)
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 20432d345529..5ac900e5a50e 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1390,14 +1390,13 @@ static void garmin_throttle(struct tty_struct *tty)
1390{ 1390{
1391 struct usb_serial_port *port = tty->driver_data; 1391 struct usb_serial_port *port = tty->driver_data;
1392 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 1392 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
1393 unsigned long flags;
1394 1393
1395 dbg("%s - port %d", __func__, port->number); 1394 dbg("%s - port %d", __func__, port->number);
1396 /* set flag, data received will be put into a queue 1395 /* set flag, data received will be put into a queue
1397 for later processing */ 1396 for later processing */
1398 spin_lock_irqsave(&garmin_data_p->lock, flags); 1397 spin_lock_irq(&garmin_data_p->lock);
1399 garmin_data_p->flags |= FLAGS_QUEUING|FLAGS_THROTTLED; 1398 garmin_data_p->flags |= FLAGS_QUEUING|FLAGS_THROTTLED;
1400 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 1399 spin_unlock_irq(&garmin_data_p->lock);
1401} 1400}
1402 1401
1403 1402
@@ -1405,13 +1404,12 @@ static void garmin_unthrottle(struct tty_struct *tty)
1405{ 1404{
1406 struct usb_serial_port *port = tty->driver_data; 1405 struct usb_serial_port *port = tty->driver_data;
1407 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 1406 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
1408 unsigned long flags;
1409 int status; 1407 int status;
1410 1408
1411 dbg("%s - port %d", __func__, port->number); 1409 dbg("%s - port %d", __func__, port->number);
1412 spin_lock_irqsave(&garmin_data_p->lock, flags); 1410 spin_lock_irq(&garmin_data_p->lock);
1413 garmin_data_p->flags &= ~FLAGS_THROTTLED; 1411 garmin_data_p->flags &= ~FLAGS_THROTTLED;
1414 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 1412 spin_unlock_irq(&garmin_data_p->lock);
1415 1413
1416 /* in native mode send queued data to tty, in 1414 /* in native mode send queued data to tty, in
1417 serial mode nothing needs to be done here */ 1415 serial mode nothing needs to be done here */
@@ -1419,7 +1417,7 @@ static void garmin_unthrottle(struct tty_struct *tty)
1419 garmin_flush_queue(garmin_data_p); 1417 garmin_flush_queue(garmin_data_p);
1420 1418
1421 if (0 != (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) { 1419 if (0 != (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
1422 status = usb_submit_urb(port->read_urb, GFP_ATOMIC); 1420 status = usb_submit_urb(port->read_urb, GFP_KERNEL);
1423 if (status) 1421 if (status)
1424 dev_err(&port->dev, 1422 dev_err(&port->dev,
1425 "%s - failed resubmitting read urb, error %d\n", 1423 "%s - failed resubmitting read urb, error %d\n",
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index deba08c7a015..bbe005cefcfb 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -546,7 +546,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
546 546
547 if (was_throttled) { 547 if (was_throttled) {
548 /* Resume reading from device */ 548 /* Resume reading from device */
549 usb_serial_generic_resubmit_read_urb(port, GFP_KERNEL); 549 flush_and_resubmit_read_urb(port);
550 } 550 }
551} 551}
552 552
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 24fcc64b837d..d6231c38813e 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -966,6 +966,15 @@ static int ipaq_calc_num_ports(struct usb_serial *serial)
966static int ipaq_startup(struct usb_serial *serial) 966static int ipaq_startup(struct usb_serial *serial)
967{ 967{
968 dbg("%s", __func__); 968 dbg("%s", __func__);
969
970 /* Some of the devices in ipaq_id_table[] are composite, and we
971 * shouldn't bind to all the interfaces. This test will rule out
972 * some obviously invalid possibilities.
973 */
974 if (serial->num_bulk_in < serial->num_ports ||
975 serial->num_bulk_out < serial->num_ports)
976 return -ENODEV;
977
969 if (serial->dev->actconfig->desc.bConfigurationValue != 1) { 978 if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
970 /* 979 /*
971 * FIXME: HP iPaq rx3715, possibly others, have 1 config that 980 * FIXME: HP iPaq rx3715, possibly others, have 1 config that
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 257c16cc6b2a..1296a097f5c3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -290,7 +290,7 @@ static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
290 /* just restart the receive interrupt URB */ 290 /* just restart the receive interrupt URB */
291 dbg("keyspan_pda_rx_unthrottle port %d", port->number); 291 dbg("keyspan_pda_rx_unthrottle port %d", port->number);
292 port->interrupt_in_urb->dev = port->serial->dev; 292 port->interrupt_in_urb->dev = port->serial->dev;
293 if (usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC)) 293 if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
294 dbg(" usb_submit_urb(read urb) failed"); 294 dbg(" usb_submit_urb(read urb) failed");
295 return; 295 return;
296} 296}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index f7373371b137..3a7873806f46 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -951,7 +951,7 @@ static void klsi_105_unthrottle(struct tty_struct *tty)
951 dbg("%s - port %d", __func__, port->number); 951 dbg("%s - port %d", __func__, port->number);
952 952
953 port->read_urb->dev = port->serial->dev; 953 port->read_urb->dev = port->serial->dev;
954 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 954 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
955 if (result) 955 if (result)
956 dev_err(&port->dev, 956 dev_err(&port->dev,
957 "%s - failed submitting read urb, error %d\n", 957 "%s - failed submitting read urb, error %d\n",
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ad4998bbf16f..cd009cb280a5 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -777,20 +777,19 @@ static void mct_u232_throttle(struct tty_struct *tty)
777{ 777{
778 struct usb_serial_port *port = tty->driver_data; 778 struct usb_serial_port *port = tty->driver_data;
779 struct mct_u232_private *priv = usb_get_serial_port_data(port); 779 struct mct_u232_private *priv = usb_get_serial_port_data(port);
780 unsigned long flags;
781 unsigned int control_state; 780 unsigned int control_state;
782 781
783 dbg("%s - port %d", __func__, port->number); 782 dbg("%s - port %d", __func__, port->number);
784 783
785 spin_lock_irqsave(&priv->lock, flags); 784 spin_lock_irq(&priv->lock);
786 priv->rx_flags |= THROTTLED; 785 priv->rx_flags |= THROTTLED;
787 if (C_CRTSCTS(tty)) { 786 if (C_CRTSCTS(tty)) {
788 priv->control_state &= ~TIOCM_RTS; 787 priv->control_state &= ~TIOCM_RTS;
789 control_state = priv->control_state; 788 control_state = priv->control_state;
790 spin_unlock_irqrestore(&priv->lock, flags); 789 spin_unlock_irq(&priv->lock);
791 (void) mct_u232_set_modem_ctrl(port->serial, control_state); 790 (void) mct_u232_set_modem_ctrl(port->serial, control_state);
792 } else { 791 } else {
793 spin_unlock_irqrestore(&priv->lock, flags); 792 spin_unlock_irq(&priv->lock);
794 } 793 }
795} 794}
796 795
@@ -799,20 +798,19 @@ static void mct_u232_unthrottle(struct tty_struct *tty)
799{ 798{
800 struct usb_serial_port *port = tty->driver_data; 799 struct usb_serial_port *port = tty->driver_data;
801 struct mct_u232_private *priv = usb_get_serial_port_data(port); 800 struct mct_u232_private *priv = usb_get_serial_port_data(port);
802 unsigned long flags;
803 unsigned int control_state; 801 unsigned int control_state;
804 802
805 dbg("%s - port %d", __func__, port->number); 803 dbg("%s - port %d", __func__, port->number);
806 804
807 spin_lock_irqsave(&priv->lock, flags); 805 spin_lock_irq(&priv->lock);
808 if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) { 806 if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) {
809 priv->rx_flags &= ~THROTTLED; 807 priv->rx_flags &= ~THROTTLED;
810 priv->control_state |= TIOCM_RTS; 808 priv->control_state |= TIOCM_RTS;
811 control_state = priv->control_state; 809 control_state = priv->control_state;
812 spin_unlock_irqrestore(&priv->lock, flags); 810 spin_unlock_irq(&priv->lock);
813 (void) mct_u232_set_modem_ctrl(port->serial, control_state); 811 (void) mct_u232_set_modem_ctrl(port->serial, control_state);
814 } else { 812 } else {
815 spin_unlock_irqrestore(&priv->lock, flags); 813 spin_unlock_irq(&priv->lock);
816 } 814 }
817} 815}
818 816
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 1085a577c5c1..80f59b6350cb 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -314,21 +314,24 @@ static void opticon_unthrottle(struct tty_struct *tty)
314 struct usb_serial_port *port = tty->driver_data; 314 struct usb_serial_port *port = tty->driver_data;
315 struct opticon_private *priv = usb_get_serial_data(port->serial); 315 struct opticon_private *priv = usb_get_serial_data(port->serial);
316 unsigned long flags; 316 unsigned long flags;
317 int result; 317 int result, was_throttled;
318 318
319 dbg("%s - port %d", __func__, port->number); 319 dbg("%s - port %d", __func__, port->number);
320 320
321 spin_lock_irqsave(&priv->lock, flags); 321 spin_lock_irqsave(&priv->lock, flags);
322 priv->throttled = false; 322 priv->throttled = false;
323 was_throttled = priv->actually_throttled;
323 priv->actually_throttled = false; 324 priv->actually_throttled = false;
324 spin_unlock_irqrestore(&priv->lock, flags); 325 spin_unlock_irqrestore(&priv->lock, flags);
325 326
326 priv->bulk_read_urb->dev = port->serial->dev; 327 priv->bulk_read_urb->dev = port->serial->dev;
327 result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC); 328 if (was_throttled) {
328 if (result) 329 result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
329 dev_err(&port->dev, 330 if (result)
330 "%s - failed submitting read urb, error %d\n", 331 dev_err(&port->dev,
332 "%s - failed submitting read urb, error %d\n",
331 __func__, result); 333 __func__, result);
334 }
332} 335}
333 336
334static int opticon_tiocmget(struct tty_struct *tty, struct file *file) 337static int opticon_tiocmget(struct tty_struct *tty, struct file *file)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f66e39883218..319aaf9725b3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -165,6 +165,7 @@ static int option_resume(struct usb_serial *serial);
165#define HUAWEI_PRODUCT_E143D 0x143D 165#define HUAWEI_PRODUCT_E143D 0x143D
166#define HUAWEI_PRODUCT_E143E 0x143E 166#define HUAWEI_PRODUCT_E143E 0x143E
167#define HUAWEI_PRODUCT_E143F 0x143F 167#define HUAWEI_PRODUCT_E143F 0x143F
168#define HUAWEI_PRODUCT_E14AC 0x14AC
168 169
169#define QUANTA_VENDOR_ID 0x0408 170#define QUANTA_VENDOR_ID 0x0408
170#define QUANTA_PRODUCT_Q101 0xEA02 171#define QUANTA_PRODUCT_Q101 0xEA02
@@ -307,6 +308,7 @@ static int option_resume(struct usb_serial *serial);
307 308
308#define DLINK_VENDOR_ID 0x1186 309#define DLINK_VENDOR_ID 0x1186
309#define DLINK_PRODUCT_DWM_652 0x3e04 310#define DLINK_PRODUCT_DWM_652 0x3e04
311#define DLINK_PRODUCT_DWM_652_U5 0xce16
310 312
311#define QISDA_VENDOR_ID 0x1da5 313#define QISDA_VENDOR_ID 0x1da5
312#define QISDA_PRODUCT_H21_4512 0x4512 314#define QISDA_PRODUCT_H21_4512 0x4512
@@ -314,10 +316,14 @@ static int option_resume(struct usb_serial *serial);
314#define QISDA_PRODUCT_H20_4515 0x4515 316#define QISDA_PRODUCT_H20_4515 0x4515
315#define QISDA_PRODUCT_H20_4519 0x4519 317#define QISDA_PRODUCT_H20_4519 0x4519
316 318
319/* TLAYTECH PRODUCTS */
320#define TLAYTECH_VENDOR_ID 0x20B9
321#define TLAYTECH_PRODUCT_TEU800 0x1682
317 322
318/* TOSHIBA PRODUCTS */ 323/* TOSHIBA PRODUCTS */
319#define TOSHIBA_VENDOR_ID 0x0930 324#define TOSHIBA_VENDOR_ID 0x0930
320#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 325#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302
326#define TOSHIBA_PRODUCT_G450 0x0d45
321 327
322#define ALINK_VENDOR_ID 0x1e0e 328#define ALINK_VENDOR_ID 0x1e0e
323#define ALINK_PRODUCT_3GU 0x9200 329#define ALINK_PRODUCT_3GU 0x9200
@@ -326,6 +332,9 @@ static int option_resume(struct usb_serial *serial);
326#define ALCATEL_VENDOR_ID 0x1bbb 332#define ALCATEL_VENDOR_ID 0x1bbb
327#define ALCATEL_PRODUCT_X060S 0x0000 333#define ALCATEL_PRODUCT_X060S 0x0000
328 334
335/* Airplus products */
336#define AIRPLUS_VENDOR_ID 0x1011
337#define AIRPLUS_PRODUCT_MCD650 0x3198
329 338
330static struct usb_device_id option_ids[] = { 339static struct usb_device_id option_ids[] = {
331 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 340 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -424,6 +433,7 @@ static struct usb_device_id option_ids[] = {
424 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, 433 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
425 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, 434 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
426 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, 435 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
436 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
427 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, 437 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
428 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 438 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
429 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 439 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -577,14 +587,18 @@ static struct usb_device_id option_ids[] = {
577 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 587 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
578 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 588 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
579 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 589 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
590 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
580 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, 591 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
581 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, 592 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
582 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, 593 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
583 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, 594 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
595 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
584 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 596 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
585 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 597 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
586 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 598 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
587 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 599 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
600 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
601 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
588 { } /* Terminating entry */ 602 { } /* Terminating entry */
589}; 603};
590MODULE_DEVICE_TABLE(usb, option_ids); 604MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 0f4a70ce3823..c644e26394b4 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -288,7 +288,7 @@ static void setup_line(struct work_struct *work)
288 288
289 dbg("%s(): submitting interrupt urb", __func__); 289 dbg("%s(): submitting interrupt urb", __func__);
290 port->interrupt_in_urb->dev = port->serial->dev; 290 port->interrupt_in_urb->dev = port->serial->dev;
291 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 291 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
292 if (result != 0) { 292 if (result != 0) {
293 dev_err(&port->dev, "%s(): usb_submit_urb() failed" 293 dev_err(&port->dev, "%s(): usb_submit_urb() failed"
294 " with error %d\n", __func__, result); 294 " with error %d\n", __func__, result);
@@ -335,7 +335,7 @@ void send_data(struct work_struct *work)
335 335
336 dbg("%s(): submitting interrupt urb", __func__); 336 dbg("%s(): submitting interrupt urb", __func__);
337 port->interrupt_in_urb->dev = port->serial->dev; 337 port->interrupt_in_urb->dev = port->serial->dev;
338 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 338 result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
339 if (result != 0) { 339 if (result != 0) {
340 dev_err(&port->dev, "%s(): usb_submit_urb() failed" 340 dev_err(&port->dev, "%s(): usb_submit_urb() failed"
341 " with error %d\n", __func__, result); 341 " with error %d\n", __func__, result);
@@ -349,7 +349,7 @@ void send_data(struct work_struct *work)
349 349
350 port->write_urb->transfer_buffer_length = count; 350 port->write_urb->transfer_buffer_length = count;
351 port->write_urb->dev = port->serial->dev; 351 port->write_urb->dev = port->serial->dev;
352 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 352 result = usb_submit_urb(port->write_urb, GFP_NOIO);
353 if (result != 0) { 353 if (result != 0) {
354 dev_err(&port->dev, "%s(): usb_submit_urb() failed" 354 dev_err(&port->dev, "%s(): usb_submit_urb() failed"
355 " with error %d\n", __func__, result); 355 " with error %d\n", __func__, result);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1128e01525b1..9ec1a49e2362 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1046,13 +1046,15 @@ static void pl2303_push_data(struct tty_struct *tty,
1046 /* overrun is special, not associated with a char */ 1046 /* overrun is special, not associated with a char */
1047 if (line_status & UART_OVERRUN_ERROR) 1047 if (line_status & UART_OVERRUN_ERROR)
1048 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1048 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1049 if (port->console && port->sysrq) { 1049
1050 if (tty_flag == TTY_NORMAL && !(port->console && port->sysrq))
1051 tty_insert_flip_string(tty, data, urb->actual_length);
1052 else {
1050 int i; 1053 int i;
1051 for (i = 0; i < urb->actual_length; ++i) 1054 for (i = 0; i < urb->actual_length; ++i)
1052 if (!usb_serial_handle_sysrq_char(tty, port, data[i])) 1055 if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
1053 tty_insert_flip_char(tty, data[i], tty_flag); 1056 tty_insert_flip_char(tty, data[i], tty_flag);
1054 } else 1057 }
1055 tty_insert_flip_string(tty, data, urb->actual_length);
1056 tty_flip_buffer_push(tty); 1058 tty_flip_buffer_push(tty);
1057} 1059}
1058 1060
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 8c075b2416bb..5019325ba25d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -17,7 +17,7 @@
17 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 17 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
18*/ 18*/
19 19
20#define DRIVER_VERSION "v.1.3.7" 20#define DRIVER_VERSION "v.1.3.8"
21#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer" 21#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
22#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 22#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
23 23
@@ -296,7 +296,6 @@ struct sierra_port_private {
296 int dsr_state; 296 int dsr_state;
297 int dcd_state; 297 int dcd_state;
298 int ri_state; 298 int ri_state;
299
300 unsigned int opened:1; 299 unsigned int opened:1;
301}; 300};
302 301
@@ -306,6 +305,8 @@ static int sierra_send_setup(struct usb_serial_port *port)
306 struct sierra_port_private *portdata; 305 struct sierra_port_private *portdata;
307 __u16 interface = 0; 306 __u16 interface = 0;
308 int val = 0; 307 int val = 0;
308 int do_send = 0;
309 int retval;
309 310
310 dev_dbg(&port->dev, "%s\n", __func__); 311 dev_dbg(&port->dev, "%s\n", __func__);
311 312
@@ -324,10 +325,7 @@ static int sierra_send_setup(struct usb_serial_port *port)
324 */ 325 */
325 if (port->interrupt_in_urb) { 326 if (port->interrupt_in_urb) {
326 /* send control message */ 327 /* send control message */
327 return usb_control_msg(serial->dev, 328 do_send = 1;
328 usb_rcvctrlpipe(serial->dev, 0),
329 0x22, 0x21, val, interface,
330 NULL, 0, USB_CTRL_SET_TIMEOUT);
331 } 329 }
332 } 330 }
333 331
@@ -339,12 +337,18 @@ static int sierra_send_setup(struct usb_serial_port *port)
339 interface = 1; 337 interface = 1;
340 else if (port->bulk_out_endpointAddress == 5) 338 else if (port->bulk_out_endpointAddress == 5)
341 interface = 2; 339 interface = 2;
342 return usb_control_msg(serial->dev, 340
343 usb_rcvctrlpipe(serial->dev, 0), 341 do_send = 1;
344 0x22, 0x21, val, interface,
345 NULL, 0, USB_CTRL_SET_TIMEOUT);
346 } 342 }
347 return 0; 343 if (!do_send)
344 return 0;
345
346 usb_autopm_get_interface(serial->interface);
347 retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
348 0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT);
349 usb_autopm_put_interface(serial->interface);
350
351 return retval;
348} 352}
349 353
350static void sierra_set_termios(struct tty_struct *tty, 354static void sierra_set_termios(struct tty_struct *tty,
@@ -773,8 +777,11 @@ static void sierra_close(struct usb_serial_port *port)
773 777
774 if (serial->dev) { 778 if (serial->dev) {
775 mutex_lock(&serial->disc_mutex); 779 mutex_lock(&serial->disc_mutex);
776 if (!serial->disconnected) 780 if (!serial->disconnected) {
781 serial->interface->needs_remote_wakeup = 0;
782 usb_autopm_get_interface(serial->interface);
777 sierra_send_setup(port); 783 sierra_send_setup(port);
784 }
778 mutex_unlock(&serial->disc_mutex); 785 mutex_unlock(&serial->disc_mutex);
779 spin_lock_irq(&intfdata->susp_lock); 786 spin_lock_irq(&intfdata->susp_lock);
780 portdata->opened = 0; 787 portdata->opened = 0;
@@ -788,8 +795,6 @@ static void sierra_close(struct usb_serial_port *port)
788 sierra_release_urb(portdata->in_urbs[i]); 795 sierra_release_urb(portdata->in_urbs[i]);
789 portdata->in_urbs[i] = NULL; 796 portdata->in_urbs[i] = NULL;
790 } 797 }
791 usb_autopm_get_interface(serial->interface);
792 serial->interface->needs_remote_wakeup = 0;
793 } 798 }
794} 799}
795 800
@@ -827,6 +832,8 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
827 if (err) { 832 if (err) {
828 /* get rid of everything as in close */ 833 /* get rid of everything as in close */
829 sierra_close(port); 834 sierra_close(port);
835 /* restore balance for autopm */
836 usb_autopm_put_interface(serial->interface);
830 return err; 837 return err;
831 } 838 }
832 sierra_send_setup(port); 839 sierra_send_setup(port);
@@ -915,7 +922,7 @@ static void sierra_release(struct usb_serial *serial)
915#ifdef CONFIG_PM 922#ifdef CONFIG_PM
916static void stop_read_write_urbs(struct usb_serial *serial) 923static void stop_read_write_urbs(struct usb_serial *serial)
917{ 924{
918 int i, j; 925 int i;
919 struct usb_serial_port *port; 926 struct usb_serial_port *port;
920 struct sierra_port_private *portdata; 927 struct sierra_port_private *portdata;
921 928
@@ -923,8 +930,7 @@ static void stop_read_write_urbs(struct usb_serial *serial)
923 for (i = 0; i < serial->num_ports; ++i) { 930 for (i = 0; i < serial->num_ports; ++i) {
924 port = serial->port[i]; 931 port = serial->port[i];
925 portdata = usb_get_serial_port_data(port); 932 portdata = usb_get_serial_port_data(port);
926 for (j = 0; j < N_IN_URB; j++) 933 sierra_stop_rx_urbs(port);
927 usb_kill_urb(portdata->in_urbs[j]);
928 usb_kill_anchored_urbs(&portdata->active); 934 usb_kill_anchored_urbs(&portdata->active);
929 } 935 }
930} 936}
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index cb7e95f9fcbf..b282c0f2d8e5 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -165,34 +165,36 @@ static void symbol_throttle(struct tty_struct *tty)
165{ 165{
166 struct usb_serial_port *port = tty->driver_data; 166 struct usb_serial_port *port = tty->driver_data;
167 struct symbol_private *priv = usb_get_serial_data(port->serial); 167 struct symbol_private *priv = usb_get_serial_data(port->serial);
168 unsigned long flags;
169 168
170 dbg("%s - port %d", __func__, port->number); 169 dbg("%s - port %d", __func__, port->number);
171 spin_lock_irqsave(&priv->lock, flags); 170 spin_lock_irq(&priv->lock);
172 priv->throttled = true; 171 priv->throttled = true;
173 spin_unlock_irqrestore(&priv->lock, flags); 172 spin_unlock_irq(&priv->lock);
174} 173}
175 174
176static void symbol_unthrottle(struct tty_struct *tty) 175static void symbol_unthrottle(struct tty_struct *tty)
177{ 176{
178 struct usb_serial_port *port = tty->driver_data; 177 struct usb_serial_port *port = tty->driver_data;
179 struct symbol_private *priv = usb_get_serial_data(port->serial); 178 struct symbol_private *priv = usb_get_serial_data(port->serial);
180 unsigned long flags;
181 int result; 179 int result;
180 bool was_throttled;
182 181
183 dbg("%s - port %d", __func__, port->number); 182 dbg("%s - port %d", __func__, port->number);
184 183
185 spin_lock_irqsave(&priv->lock, flags); 184 spin_lock_irq(&priv->lock);
186 priv->throttled = false; 185 priv->throttled = false;
186 was_throttled = priv->actually_throttled;
187 priv->actually_throttled = false; 187 priv->actually_throttled = false;
188 spin_unlock_irqrestore(&priv->lock, flags); 188 spin_unlock_irq(&priv->lock);
189 189
190 priv->int_urb->dev = port->serial->dev; 190 priv->int_urb->dev = port->serial->dev;
191 result = usb_submit_urb(priv->int_urb, GFP_ATOMIC); 191 if (was_throttled) {
192 if (result) 192 result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
193 dev_err(&port->dev, 193 if (result)
194 "%s - failed submitting read urb, error %d\n", 194 dev_err(&port->dev,
195 "%s - failed submitting read urb, error %d\n",
195 __func__, result); 196 __func__, result);
197 }
196} 198}
197 199
198static int symbol_startup(struct usb_serial *serial) 200static int symbol_startup(struct usb_serial *serial)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index ff75a3589e7e..bd3fa7ff15b1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -156,7 +156,8 @@ static void destroy_serial(struct kref *kref)
156 if (serial->minor != SERIAL_TTY_NO_MINOR) 156 if (serial->minor != SERIAL_TTY_NO_MINOR)
157 return_serial(serial); 157 return_serial(serial);
158 158
159 serial->type->release(serial); 159 if (serial->attached)
160 serial->type->release(serial);
160 161
161 /* Now that nothing is using the ports, they can be freed */ 162 /* Now that nothing is using the ports, they can be freed */
162 for (i = 0; i < serial->num_port_pointers; ++i) { 163 for (i = 0; i < serial->num_port_pointers; ++i) {
@@ -192,7 +193,7 @@ void usb_serial_put(struct usb_serial *serial)
192 * This is the first place a new tty gets used. Hence this is where we 193 * This is the first place a new tty gets used. Hence this is where we
193 * acquire references to the usb_serial structure and the driver module, 194 * acquire references to the usb_serial structure and the driver module,
194 * where we store a pointer to the port, and where we do an autoresume. 195 * where we store a pointer to the port, and where we do an autoresume.
195 * All these actions are reversed in serial_release(). 196 * All these actions are reversed in serial_cleanup().
196 */ 197 */
197static int serial_install(struct tty_driver *driver, struct tty_struct *tty) 198static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
198{ 199{
@@ -339,15 +340,16 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
339} 340}
340 341
341/** 342/**
342 * serial_release - free resources post close/hangup 343 * serial_cleanup - free resources post close/hangup
343 * @port: port to free up 344 * @port: port to free up
344 * 345 *
345 * Do the resource freeing and refcount dropping for the port. 346 * Do the resource freeing and refcount dropping for the port.
346 * Avoid freeing the console. 347 * Avoid freeing the console.
347 * 348 *
348 * Called when the last tty kref is dropped. 349 * Called asynchronously after the last tty kref is dropped,
350 * and the tty layer has already done the tty_shutdown(tty);
349 */ 351 */
350static void serial_release(struct tty_struct *tty) 352static void serial_cleanup(struct tty_struct *tty)
351{ 353{
352 struct usb_serial_port *port = tty->driver_data; 354 struct usb_serial_port *port = tty->driver_data;
353 struct usb_serial *serial; 355 struct usb_serial *serial;
@@ -361,9 +363,6 @@ static void serial_release(struct tty_struct *tty)
361 363
362 dbg("%s - port %d", __func__, port->number); 364 dbg("%s - port %d", __func__, port->number);
363 365
364 /* Standard shutdown processing */
365 tty_shutdown(tty);
366
367 tty->driver_data = NULL; 366 tty->driver_data = NULL;
368 367
369 serial = port->serial; 368 serial = port->serial;
@@ -1061,12 +1060,15 @@ int usb_serial_probe(struct usb_interface *interface,
1061 module_put(type->driver.owner); 1060 module_put(type->driver.owner);
1062 if (retval < 0) 1061 if (retval < 0)
1063 goto probe_error; 1062 goto probe_error;
1063 serial->attached = 1;
1064 if (retval > 0) { 1064 if (retval > 0) {
1065 /* quietly accept this device, but don't bind to a 1065 /* quietly accept this device, but don't bind to a
1066 serial port as it's about to disappear */ 1066 serial port as it's about to disappear */
1067 serial->num_ports = 0; 1067 serial->num_ports = 0;
1068 goto exit; 1068 goto exit;
1069 } 1069 }
1070 } else {
1071 serial->attached = 1;
1070 } 1072 }
1071 1073
1072 if (get_free_serial(serial, num_ports, &minor) == NULL) { 1074 if (get_free_serial(serial, num_ports, &minor) == NULL) {
@@ -1166,8 +1168,10 @@ int usb_serial_suspend(struct usb_interface *intf, pm_message_t message)
1166 1168
1167 if (serial->type->suspend) { 1169 if (serial->type->suspend) {
1168 r = serial->type->suspend(serial, message); 1170 r = serial->type->suspend(serial, message);
1169 if (r < 0) 1171 if (r < 0) {
1172 serial->suspending = 0;
1170 goto err_out; 1173 goto err_out;
1174 }
1171 } 1175 }
1172 1176
1173 for (i = 0; i < serial->num_ports; ++i) { 1177 for (i = 0; i < serial->num_ports; ++i) {
@@ -1210,7 +1214,7 @@ static const struct tty_operations serial_ops = {
1210 .chars_in_buffer = serial_chars_in_buffer, 1214 .chars_in_buffer = serial_chars_in_buffer,
1211 .tiocmget = serial_tiocmget, 1215 .tiocmget = serial_tiocmget,
1212 .tiocmset = serial_tiocmset, 1216 .tiocmset = serial_tiocmset,
1213 .shutdown = serial_release, 1217 .cleanup = serial_cleanup,
1214 .install = serial_install, 1218 .install = serial_install,
1215 .proc_fops = &serial_proc_fops, 1219 .proc_fops = &serial_proc_fops,
1216}; 1220};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 1aa5d20a5d99..ad1f9232292d 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -513,7 +513,8 @@ static void visor_read_bulk_callback(struct urb *urb)
513 tty_kref_put(tty); 513 tty_kref_put(tty);
514 } 514 }
515 spin_lock(&priv->lock); 515 spin_lock(&priv->lock);
516 priv->bytes_in += available_room; 516 if (tty)
517 priv->bytes_in += available_room;
517 518
518 } else { 519 } else {
519 spin_lock(&priv->lock); 520 spin_lock(&priv->lock);
@@ -582,12 +583,11 @@ static void visor_throttle(struct tty_struct *tty)
582{ 583{
583 struct usb_serial_port *port = tty->driver_data; 584 struct usb_serial_port *port = tty->driver_data;
584 struct visor_private *priv = usb_get_serial_port_data(port); 585 struct visor_private *priv = usb_get_serial_port_data(port);
585 unsigned long flags;
586 586
587 dbg("%s - port %d", __func__, port->number); 587 dbg("%s - port %d", __func__, port->number);
588 spin_lock_irqsave(&priv->lock, flags); 588 spin_lock_irq(&priv->lock);
589 priv->throttled = 1; 589 priv->throttled = 1;
590 spin_unlock_irqrestore(&priv->lock, flags); 590 spin_unlock_irq(&priv->lock);
591} 591}
592 592
593 593
@@ -595,21 +595,23 @@ static void visor_unthrottle(struct tty_struct *tty)
595{ 595{
596 struct usb_serial_port *port = tty->driver_data; 596 struct usb_serial_port *port = tty->driver_data;
597 struct visor_private *priv = usb_get_serial_port_data(port); 597 struct visor_private *priv = usb_get_serial_port_data(port);
598 unsigned long flags; 598 int result, was_throttled;
599 int result;
600 599
601 dbg("%s - port %d", __func__, port->number); 600 dbg("%s - port %d", __func__, port->number);
602 spin_lock_irqsave(&priv->lock, flags); 601 spin_lock_irq(&priv->lock);
603 priv->throttled = 0; 602 priv->throttled = 0;
603 was_throttled = priv->actually_throttled;
604 priv->actually_throttled = 0; 604 priv->actually_throttled = 0;
605 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irq(&priv->lock);
606 606
607 port->read_urb->dev = port->serial->dev; 607 if (was_throttled) {
608 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 608 port->read_urb->dev = port->serial->dev;
609 if (result) 609 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
610 dev_err(&port->dev, 610 if (result)
611 "%s - failed submitting read urb, error %d\n", 611 dev_err(&port->dev,
612 "%s - failed submitting read urb, error %d\n",
612 __func__, result); 613 __func__, result);
614 }
613} 615}
614 616
615static int palm_os_3_probe(struct usb_serial *serial, 617static int palm_os_3_probe(struct usb_serial *serial,
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 62424eec33ec..1093d2eb046a 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -949,13 +949,12 @@ static void whiteheat_throttle(struct tty_struct *tty)
949{ 949{
950 struct usb_serial_port *port = tty->driver_data; 950 struct usb_serial_port *port = tty->driver_data;
951 struct whiteheat_private *info = usb_get_serial_port_data(port); 951 struct whiteheat_private *info = usb_get_serial_port_data(port);
952 unsigned long flags;
953 952
954 dbg("%s - port %d", __func__, port->number); 953 dbg("%s - port %d", __func__, port->number);
955 954
956 spin_lock_irqsave(&info->lock, flags); 955 spin_lock_irq(&info->lock);
957 info->flags |= THROTTLED; 956 info->flags |= THROTTLED;
958 spin_unlock_irqrestore(&info->lock, flags); 957 spin_unlock_irq(&info->lock);
959 958
960 return; 959 return;
961} 960}
@@ -966,14 +965,13 @@ static void whiteheat_unthrottle(struct tty_struct *tty)
966 struct usb_serial_port *port = tty->driver_data; 965 struct usb_serial_port *port = tty->driver_data;
967 struct whiteheat_private *info = usb_get_serial_port_data(port); 966 struct whiteheat_private *info = usb_get_serial_port_data(port);
968 int actually_throttled; 967 int actually_throttled;
969 unsigned long flags;
970 968
971 dbg("%s - port %d", __func__, port->number); 969 dbg("%s - port %d", __func__, port->number);
972 970
973 spin_lock_irqsave(&info->lock, flags); 971 spin_lock_irq(&info->lock);
974 actually_throttled = info->flags & ACTUALLY_THROTTLED; 972 actually_throttled = info->flags & ACTUALLY_THROTTLED;
975 info->flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 973 info->flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
976 spin_unlock_irqrestore(&info->lock, flags); 974 spin_unlock_irq(&info->lock);
977 975
978 if (actually_throttled) 976 if (actually_throttled)
979 rx_data_softint(&info->rx_work); 977 rx_data_softint(&info->rx_work);
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index e20dc525d177..589f6b4404f0 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -696,7 +696,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
696 /* device supports and needs bigger sense buffer */ 696 /* device supports and needs bigger sense buffer */
697 if (us->fflags & US_FL_SANE_SENSE) 697 if (us->fflags & US_FL_SANE_SENSE)
698 sense_size = ~0; 698 sense_size = ~0;
699 699Retry_Sense:
700 US_DEBUGP("Issuing auto-REQUEST_SENSE\n"); 700 US_DEBUGP("Issuing auto-REQUEST_SENSE\n");
701 701
702 scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size); 702 scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
@@ -720,6 +720,21 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
720 srb->result = DID_ABORT << 16; 720 srb->result = DID_ABORT << 16;
721 goto Handle_Errors; 721 goto Handle_Errors;
722 } 722 }
723
724 /* Some devices claim to support larger sense but fail when
725 * trying to request it. When a transport failure happens
726 * using US_FS_SANE_SENSE, we always retry with a standard
727 * (small) sense request. This fixes some USB GSM modems
728 */
729 if (temp_result == USB_STOR_TRANSPORT_FAILED &&
730 (us->fflags & US_FL_SANE_SENSE) &&
731 sense_size != US_SENSE_SIZE) {
732 US_DEBUGP("-- auto-sense failure, retry small sense\n");
733 sense_size = US_SENSE_SIZE;
734 goto Retry_Sense;
735 }
736
737 /* Other failures */
723 if (temp_result != USB_STOR_TRANSPORT_GOOD) { 738 if (temp_result != USB_STOR_TRANSPORT_GOOD) {
724 US_DEBUGP("-- auto-sense failure\n"); 739 US_DEBUGP("-- auto-sense failure\n");
725 740
@@ -768,17 +783,32 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
768 /* set the result so the higher layers expect this data */ 783 /* set the result so the higher layers expect this data */
769 srb->result = SAM_STAT_CHECK_CONDITION; 784 srb->result = SAM_STAT_CHECK_CONDITION;
770 785
771 /* If things are really okay, then let's show that. Zero 786 /* We often get empty sense data. This could indicate that
772 * out the sense buffer so the higher layers won't realize 787 * everything worked or that there was an unspecified
773 * we did an unsolicited auto-sense. */ 788 * problem. We have to decide which.
774 if (result == USB_STOR_TRANSPORT_GOOD && 789 */
775 /* Filemark 0, ignore EOM, ILI 0, no sense */ 790 if ( /* Filemark 0, ignore EOM, ILI 0, no sense */
776 (srb->sense_buffer[2] & 0xaf) == 0 && 791 (srb->sense_buffer[2] & 0xaf) == 0 &&
777 /* No ASC or ASCQ */ 792 /* No ASC or ASCQ */
778 srb->sense_buffer[12] == 0 && 793 srb->sense_buffer[12] == 0 &&
779 srb->sense_buffer[13] == 0) { 794 srb->sense_buffer[13] == 0) {
780 srb->result = SAM_STAT_GOOD; 795
781 srb->sense_buffer[0] = 0x0; 796 /* If things are really okay, then let's show that.
797 * Zero out the sense buffer so the higher layers
798 * won't realize we did an unsolicited auto-sense.
799 */
800 if (result == USB_STOR_TRANSPORT_GOOD) {
801 srb->result = SAM_STAT_GOOD;
802 srb->sense_buffer[0] = 0x0;
803
804 /* If there was a problem, report an unspecified
805 * hardware error to prevent the higher layers from
806 * entering an infinite retry loop.
807 */
808 } else {
809 srb->result = DID_ERROR << 16;
810 srb->sense_buffer[2] = HARDWARE_ERROR;
811 }
782 } 812 }
783 } 813 }
784 814
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 079ae0f7bec1..d4f034ebaa8a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1823,6 +1823,13 @@ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1823 US_SC_DEVICE, US_PR_DEVICE, NULL, 1823 US_SC_DEVICE, US_PR_DEVICE, NULL,
1824 US_FL_IGNORE_RESIDUE ), 1824 US_FL_IGNORE_RESIDUE ),
1825 1825
1826/* Reported by Sergey Pinaev <dfo@antex.ru> */
1827UNUSUAL_DEV( 0x4102, 0x1059, 0x0000, 0x0000,
1828 "iRiver",
1829 "P7K",
1830 US_SC_DEVICE, US_PR_DEVICE, NULL,
1831 US_FL_MAX_SECTORS_64 ),
1832
1826/* 1833/*
1827 * David Härdeman <david@2gen.com> 1834 * David Härdeman <david@2gen.com>
1828 * The key makes the SCSI stack print confusing (but harmless) messages 1835 * The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index b2f149fedcc5..4516c36436e6 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -200,35 +200,40 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
200{ 200{
201 int result, bytes, secd_size; 201 int result, bytes, secd_size;
202 struct device *dev = &usb_dev->dev; 202 struct device *dev = &usb_dev->dev;
203 struct usb_security_descriptor secd; 203 struct usb_security_descriptor *secd;
204 const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; 204 const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
205 void *secd_buf;
206 const void *itr, *top; 205 const void *itr, *top;
207 char buf[64]; 206 char buf[64];
208 207
208 secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL);
209 if (secd == NULL) {
210 result = -ENOMEM;
211 goto out;
212 }
213
209 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 214 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
210 0, &secd, sizeof(secd)); 215 0, secd, sizeof(struct usb_security_descriptor));
211 if (result < sizeof(secd)) { 216 if (result < sizeof(secd)) {
212 dev_err(dev, "Can't read security descriptor or " 217 dev_err(dev, "Can't read security descriptor or "
213 "not enough data: %d\n", result); 218 "not enough data: %d\n", result);
214 goto error_secd; 219 goto out;
215 } 220 }
216 secd_size = le16_to_cpu(secd.wTotalLength); 221 secd_size = le16_to_cpu(secd->wTotalLength);
217 secd_buf = kmalloc(secd_size, GFP_KERNEL); 222 secd = krealloc(secd, secd_size, GFP_KERNEL);
218 if (secd_buf == NULL) { 223 if (secd == NULL) {
219 dev_err(dev, "Can't allocate space for security descriptors\n"); 224 dev_err(dev, "Can't allocate space for security descriptors\n");
220 goto error_secd_alloc; 225 goto out;
221 } 226 }
222 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 227 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
223 0, secd_buf, secd_size); 228 0, secd, secd_size);
224 if (result < secd_size) { 229 if (result < secd_size) {
225 dev_err(dev, "Can't read security descriptor or " 230 dev_err(dev, "Can't read security descriptor or "
226 "not enough data: %d\n", result); 231 "not enough data: %d\n", result);
227 goto error_secd_all; 232 goto out;
228 } 233 }
229 bytes = 0; 234 bytes = 0;
230 itr = secd_buf + sizeof(secd); 235 itr = &secd[1];
231 top = secd_buf + result; 236 top = (void *)secd + result;
232 while (itr < top) { 237 while (itr < top) {
233 etd = itr; 238 etd = itr;
234 if (top - itr < sizeof(*etd)) { 239 if (top - itr < sizeof(*etd)) {
@@ -259,24 +264,16 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
259 dev_err(dev, "WUSB device doesn't support CCM1 encryption, " 264 dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
260 "can't use!\n"); 265 "can't use!\n");
261 result = -EINVAL; 266 result = -EINVAL;
262 goto error_no_ccm1; 267 goto out;
263 } 268 }
264 wusb_dev->ccm1_etd = *ccm1_etd; 269 wusb_dev->ccm1_etd = *ccm1_etd;
265 dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", 270 dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
266 buf, wusb_et_name(ccm1_etd->bEncryptionType), 271 buf, wusb_et_name(ccm1_etd->bEncryptionType),
267 ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); 272 ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
268 result = 0; 273 result = 0;
269 kfree(secd_buf);
270out: 274out:
275 kfree(secd);
271 return result; 276 return result;
272
273
274error_no_ccm1:
275error_secd_all:
276 kfree(secd_buf);
277error_secd_alloc:
278error_secd:
279 goto out;
280} 277}
281 278
282void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) 279void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 4a42993700c1..2eecec0c13c9 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
205 return ret < 0 ? ret : len; 205 return ret < 0 ? ret : len;
206} 206}
207 207
208static struct file_operations command_fops = { 208static const struct file_operations command_fops = {
209 .open = command_open, 209 .open = command_open,
210 .write = command_write, 210 .write = command_write,
211 .read = NULL, 211 .read = NULL,
@@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file)
255 return single_open(file, reservations_print, inode->i_private); 255 return single_open(file, reservations_print, inode->i_private);
256} 256}
257 257
258static struct file_operations reservations_fops = { 258static const struct file_operations reservations_fops = {
259 .open = reservations_open, 259 .open = reservations_open,
260 .read = seq_read, 260 .read = seq_read,
261 .llseek = seq_lseek, 261 .llseek = seq_lseek,
@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file)
283 return single_open(file, drp_avail_print, inode->i_private); 283 return single_open(file, drp_avail_print, inode->i_private);
284} 284}
285 285
286static struct file_operations drp_avail_fops = { 286static const struct file_operations drp_avail_fops = {
287 .open = drp_avail_open, 287 .open = drp_avail_open,
288 .read = seq_read, 288 .read = seq_read,
289 .llseek = seq_lseek, 289 .llseek = seq_lseek,
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
index 1d9a6f54658e..01950c62dc8d 100644
--- a/drivers/uwb/whc-rc.c
+++ b/drivers/uwb/whc-rc.c
@@ -42,6 +42,7 @@
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/pci.h> 44#include <linux/pci.h>
45#include <linux/sched.h>
45#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
46#include <linux/interrupt.h> 47#include <linux/interrupt.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9bbb2855ea91..188e1ba3b69f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2121,7 +2121,7 @@ config FB_EP93XX
2121 2121
2122config FB_PRE_INIT_FB 2122config FB_PRE_INIT_FB
2123 bool "Don't reinitialize, use bootloader's GDC/Display configuration" 2123 bool "Don't reinitialize, use bootloader's GDC/Display configuration"
2124 depends on FB_MB862XX_LIME 2124 depends on FB && FB_MB862XX_LIME
2125 ---help--- 2125 ---help---
2126 Select this option if display contents should be inherited as set by 2126 Select this option if display contents should be inherited as set by
2127 the bootloader. 2127 the bootloader.
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 8cd279be74e5..37624f74e88b 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -329,12 +329,6 @@ extern unsigned char fontdata_8x16[];
329 * 329 *
330 * * perform fb specific mmap * 330 * * perform fb specific mmap *
331 * int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); 331 * int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma);
332 *
333 * * save current hardware state *
334 * void (*fb_save_state)(struct fb_info *info);
335 *
336 * * restore saved state *
337 * void (*fb_restore_state)(struct fb_info *info);
338 * } ; 332 * } ;
339 */ 333 */
340 334
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 2830ffd72976..d5e801076d33 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -484,6 +484,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
484 unsigned long value; 484 unsigned long value;
485 unsigned long clk_value_khz; 485 unsigned long clk_value_khz;
486 unsigned long bits_per_line; 486 unsigned long bits_per_line;
487 unsigned long pix_factor = 2;
487 488
488 might_sleep(); 489 might_sleep();
489 490
@@ -516,20 +517,24 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
516 /* Now, the LCDC core... */ 517 /* Now, the LCDC core... */
517 518
518 /* Set pixel clock */ 519 /* Set pixel clock */
520 if (cpu_is_at91sam9g45() && !cpu_is_at91sam9g45es())
521 pix_factor = 1;
522
519 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000; 523 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
520 524
521 value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock)); 525 value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock));
522 526
523 if (value < 2) { 527 if (value < pix_factor) {
524 dev_notice(info->device, "Bypassing pixel clock divider\n"); 528 dev_notice(info->device, "Bypassing pixel clock divider\n");
525 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS); 529 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS);
526 } else { 530 } else {
527 value = (value / 2) - 1; 531 value = (value / pix_factor) - 1;
528 dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n", 532 dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n",
529 value); 533 value);
530 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, 534 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1,
531 value << ATMEL_LCDC_CLKVAL_OFFSET); 535 value << ATMEL_LCDC_CLKVAL_OFFSET);
532 info->var.pixclock = KHZ2PICOS(clk_value_khz / (2 * (value + 1))); 536 info->var.pixclock =
537 KHZ2PICOS(clk_value_khz / (pix_factor * (value + 1)));
533 dev_dbg(info->device, " updated pixclk: %lu KHz\n", 538 dev_dbg(info->device, " updated pixclk: %lu KHz\n",
534 PICOS2KHZ(info->var.pixclock)); 539 PICOS2KHZ(info->var.pixclock));
535 } 540 }
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 90861cd93165..09bfa9662e4d 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@ config LCD_CORGI
31 Say y here to support the LCD panels usually found on SHARP 31 Say y here to support the LCD panels usually found on SHARP
32 corgi (C7x0) and spitz (Cxx00) models. 32 corgi (C7x0) and spitz (Cxx00) models.
33 33
34config LCD_LMS283GF05
35 tristate "Samsung LMS283GF05 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
37 help
38 SPI driver for Samsung LMS283GF05. This provides basic support
39 for powering the LCD up/down through a sysfs interface.
40
34config LCD_LTV350QV 41config LCD_LTV350QV
35 tristate "Samsung LTV350QV LCD Panel" 42 tristate "Samsung LTV350QV LCD Panel"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER 43 depends on LCD_CLASS_DEVICE && SPI_MASTER
@@ -229,3 +236,29 @@ config BACKLIGHT_SAHARA
229 help 236 help
230 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the 237 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
231 backlight driver. 238 backlight driver.
239
240config BACKLIGHT_WM831X
241 tristate "WM831x PMIC Backlight Driver"
242 depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
243 help
244 If you have a backlight driven by the ISINK and DCDC of a
245 WM831x PMIC say y to enable the backlight driver for it.
246
247config BACKLIGHT_ADX
248 tristate "Avionic Design Xanthos Backlight Driver"
249 depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
250 default y
251 help
252 Say Y to enable the backlight driver on Avionic Design Xanthos-based
253 boards.
254
255config BACKLIGHT_ADP5520
256 tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
257 depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
258 help
259 If you have a LCD backlight connected to the BST/BL_SNK output of
260 ADP5520 or ADP5501, say Y here to enable this driver.
261
262 To compile this driver as a module, choose M here: the module will
263 be called adp5520_bl.
264
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4eb178c1d684..9a405548874c 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o 4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o 5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
6obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
6obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 7obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
7obj-$(CONFIG_LCD_ILI9320) += ili9320.o 8obj-$(CONFIG_LCD_ILI9320) += ili9320.o
8obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o 9obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
@@ -24,4 +25,7 @@ obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
24obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 25obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
25obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 26obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
26obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 27obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
28obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
29obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
30obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
27 31
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
new file mode 100644
index 000000000000..ad05da5ba3c7
--- /dev/null
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -0,0 +1,377 @@
1/*
2 * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs
3 *
4 * Copyright 2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/fb.h>
13#include <linux/backlight.h>
14#include <linux/mfd/adp5520.h>
15
16struct adp5520_bl {
17 struct device *master;
18 struct adp5520_backlight_platfrom_data *pdata;
19 struct mutex lock;
20 unsigned long cached_daylight_max;
21 int id;
22 int current_brightness;
23};
24
25static int adp5520_bl_set(struct backlight_device *bl, int brightness)
26{
27 struct adp5520_bl *data = bl_get_data(bl);
28 struct device *master = data->master;
29 int ret = 0;
30
31 if (data->pdata->en_ambl_sens) {
32 if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
33 /* Disable Ambient Light auto adjust */
34 ret |= adp5520_clr_bits(master, BL_CONTROL,
35 BL_AUTO_ADJ);
36 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
37 } else {
38 /*
39 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
40 * restore daylight l3 sysfs brightness
41 */
42 ret |= adp5520_write(master, DAYLIGHT_MAX,
43 data->cached_daylight_max);
44 ret |= adp5520_set_bits(master, BL_CONTROL,
45 BL_AUTO_ADJ);
46 }
47 } else {
48 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
49 }
50
51 if (data->current_brightness && brightness == 0)
52 ret |= adp5520_set_bits(master,
53 MODE_STATUS, DIM_EN);
54 else if (data->current_brightness == 0 && brightness)
55 ret |= adp5520_clr_bits(master,
56 MODE_STATUS, DIM_EN);
57
58 if (!ret)
59 data->current_brightness = brightness;
60
61 return ret;
62}
63
64static int adp5520_bl_update_status(struct backlight_device *bl)
65{
66 int brightness = bl->props.brightness;
67 if (bl->props.power != FB_BLANK_UNBLANK)
68 brightness = 0;
69
70 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
71 brightness = 0;
72
73 return adp5520_bl_set(bl, brightness);
74}
75
76static int adp5520_bl_get_brightness(struct backlight_device *bl)
77{
78 struct adp5520_bl *data = bl_get_data(bl);
79 int error;
80 uint8_t reg_val;
81
82 error = adp5520_read(data->master, BL_VALUE, &reg_val);
83
84 return error ? data->current_brightness : reg_val;
85}
86
87static struct backlight_ops adp5520_bl_ops = {
88 .update_status = adp5520_bl_update_status,
89 .get_brightness = adp5520_bl_get_brightness,
90};
91
92static int adp5520_bl_setup(struct backlight_device *bl)
93{
94 struct adp5520_bl *data = bl_get_data(bl);
95 struct device *master = data->master;
96 struct adp5520_backlight_platfrom_data *pdata = data->pdata;
97 int ret = 0;
98
99 ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
100 ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
101
102 if (pdata->en_ambl_sens) {
103 data->cached_daylight_max = pdata->l1_daylight_max;
104 ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
105 ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
106 ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
107 ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
108 ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
109 ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
110 ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
111 ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
112 ret |= adp5520_write(master, ALS_CMPR_CFG,
113 ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
114 }
115
116 ret |= adp5520_write(master, BL_CONTROL,
117 BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
118
119 ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
120 pdata->fade_out));
121
122 ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
123
124 return ret;
125}
126
127static ssize_t adp5520_show(struct device *dev, char *buf, int reg)
128{
129 struct adp5520_bl *data = dev_get_drvdata(dev);
130 int error;
131 uint8_t reg_val;
132
133 mutex_lock(&data->lock);
134 error = adp5520_read(data->master, reg, &reg_val);
135 mutex_unlock(&data->lock);
136
137 return sprintf(buf, "%u\n", reg_val);
138}
139
140static ssize_t adp5520_store(struct device *dev, const char *buf,
141 size_t count, int reg)
142{
143 struct adp5520_bl *data = dev_get_drvdata(dev);
144 unsigned long val;
145 int ret;
146
147 ret = strict_strtoul(buf, 10, &val);
148 if (ret)
149 return ret;
150
151 mutex_lock(&data->lock);
152 adp5520_write(data->master, reg, val);
153 mutex_unlock(&data->lock);
154
155 return count;
156}
157
158static ssize_t adp5520_bl_dark_max_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 return adp5520_show(dev, buf, DARK_MAX);
162}
163
164static ssize_t adp5520_bl_dark_max_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 return adp5520_store(dev, buf, count, DARK_MAX);
168}
169static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
170 adp5520_bl_dark_max_store);
171
172static ssize_t adp5520_bl_office_max_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 return adp5520_show(dev, buf, OFFICE_MAX);
176}
177
178static ssize_t adp5520_bl_office_max_store(struct device *dev,
179 struct device_attribute *attr, const char *buf, size_t count)
180{
181 return adp5520_store(dev, buf, count, OFFICE_MAX);
182}
183static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
184 adp5520_bl_office_max_store);
185
186static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 return adp5520_show(dev, buf, DAYLIGHT_MAX);
190}
191
192static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct adp5520_bl *data = dev_get_drvdata(dev);
196
197 strict_strtoul(buf, 10, &data->cached_daylight_max);
198 return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
199}
200static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
201 adp5520_bl_daylight_max_store);
202
203static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 return adp5520_show(dev, buf, DARK_DIM);
207}
208
209static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
210 struct device_attribute *attr,
211 const char *buf, size_t count)
212{
213 return adp5520_store(dev, buf, count, DARK_DIM);
214}
215static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
216 adp5520_bl_dark_dim_store);
217
218static ssize_t adp5520_bl_office_dim_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 return adp5520_show(dev, buf, OFFICE_DIM);
222}
223
224static ssize_t adp5520_bl_office_dim_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 return adp5520_store(dev, buf, count, OFFICE_DIM);
229}
230static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
231 adp5520_bl_office_dim_store);
232
233static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 return adp5520_show(dev, buf, DAYLIGHT_DIM);
237}
238
239static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
240 struct device_attribute *attr,
241 const char *buf, size_t count)
242{
243 return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
244}
245static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
246 adp5520_bl_daylight_dim_store);
247
248static struct attribute *adp5520_bl_attributes[] = {
249 &dev_attr_dark_max.attr,
250 &dev_attr_dark_dim.attr,
251 &dev_attr_office_max.attr,
252 &dev_attr_office_dim.attr,
253 &dev_attr_daylight_max.attr,
254 &dev_attr_daylight_dim.attr,
255 NULL
256};
257
258static const struct attribute_group adp5520_bl_attr_group = {
259 .attrs = adp5520_bl_attributes,
260};
261
262static int __devinit adp5520_bl_probe(struct platform_device *pdev)
263{
264 struct backlight_device *bl;
265 struct adp5520_bl *data;
266 int ret = 0;
267
268 data = kzalloc(sizeof(*data), GFP_KERNEL);
269 if (data == NULL)
270 return -ENOMEM;
271
272 data->master = pdev->dev.parent;
273 data->pdata = pdev->dev.platform_data;
274
275 if (data->pdata == NULL) {
276 dev_err(&pdev->dev, "missing platform data\n");
277 kfree(data);
278 return -ENODEV;
279 }
280
281 data->id = pdev->id;
282 data->current_brightness = 0;
283
284 mutex_init(&data->lock);
285
286 bl = backlight_device_register(pdev->name, data->master,
287 data, &adp5520_bl_ops);
288 if (IS_ERR(bl)) {
289 dev_err(&pdev->dev, "failed to register backlight\n");
290 kfree(data);
291 return PTR_ERR(bl);
292 }
293
294 bl->props.max_brightness =
295 bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
296
297 if (data->pdata->en_ambl_sens)
298 ret = sysfs_create_group(&bl->dev.kobj,
299 &adp5520_bl_attr_group);
300
301 if (ret) {
302 dev_err(&pdev->dev, "failed to register sysfs\n");
303 backlight_device_unregister(bl);
304 kfree(data);
305 }
306
307 platform_set_drvdata(pdev, bl);
308 ret |= adp5520_bl_setup(bl);
309 backlight_update_status(bl);
310
311 return ret;
312}
313
314static int __devexit adp5520_bl_remove(struct platform_device *pdev)
315{
316 struct backlight_device *bl = platform_get_drvdata(pdev);
317 struct adp5520_bl *data = bl_get_data(bl);
318
319 adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
320
321 if (data->pdata->en_ambl_sens)
322 sysfs_remove_group(&bl->dev.kobj,
323 &adp5520_bl_attr_group);
324
325 backlight_device_unregister(bl);
326 kfree(data);
327
328 return 0;
329}
330
331#ifdef CONFIG_PM
332static int adp5520_bl_suspend(struct platform_device *pdev,
333 pm_message_t state)
334{
335 struct backlight_device *bl = platform_get_drvdata(pdev);
336 return adp5520_bl_set(bl, 0);
337}
338
339static int adp5520_bl_resume(struct platform_device *pdev)
340{
341 struct backlight_device *bl = platform_get_drvdata(pdev);
342
343 backlight_update_status(bl);
344 return 0;
345}
346#else
347#define adp5520_bl_suspend NULL
348#define adp5520_bl_resume NULL
349#endif
350
351static struct platform_driver adp5520_bl_driver = {
352 .driver = {
353 .name = "adp5520-backlight",
354 .owner = THIS_MODULE,
355 },
356 .probe = adp5520_bl_probe,
357 .remove = __devexit_p(adp5520_bl_remove),
358 .suspend = adp5520_bl_suspend,
359 .resume = adp5520_bl_resume,
360};
361
362static int __init adp5520_bl_init(void)
363{
364 return platform_driver_register(&adp5520_bl_driver);
365}
366module_init(adp5520_bl_init);
367
368static void __exit adp5520_bl_exit(void)
369{
370 platform_driver_unregister(&adp5520_bl_driver);
371}
372module_exit(adp5520_bl_exit);
373
374MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
375MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
376MODULE_LICENSE("GPL");
377MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
new file mode 100644
index 000000000000..2c3bdfc620b7
--- /dev/null
+++ b/drivers/video/backlight/adx_bl.c
@@ -0,0 +1,178 @@
1/*
2 * linux/drivers/video/backlight/adx.c
3 *
4 * Copyright (C) 2009 Avionic Design GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by Thierry Reding <thierry.reding@avionic-design.de>
11 */
12
13#include <linux/backlight.h>
14#include <linux/fb.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18
19/* register definitions */
20#define ADX_BACKLIGHT_CONTROL 0x00
21#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
22#define ADX_BACKLIGHT_BRIGHTNESS 0x08
23#define ADX_BACKLIGHT_STATUS 0x10
24#define ADX_BACKLIGHT_ERROR 0x18
25
26struct adxbl {
27 void __iomem *base;
28};
29
30static int adx_backlight_update_status(struct backlight_device *bldev)
31{
32 struct adxbl *bl = bl_get_data(bldev);
33 u32 value;
34
35 value = bldev->props.brightness;
36 writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
37
38 value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
39
40 if (bldev->props.state & BL_CORE_FBBLANK)
41 value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
42 else
43 value |= ADX_BACKLIGHT_CONTROL_ENABLE;
44
45 writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
46
47 return 0;
48}
49
50static int adx_backlight_get_brightness(struct backlight_device *bldev)
51{
52 struct adxbl *bl = bl_get_data(bldev);
53 u32 brightness;
54
55 brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
56 return brightness & 0xff;
57}
58
59static int adx_backlight_check_fb(struct fb_info *fb)
60{
61 return 1;
62}
63
64static struct backlight_ops adx_backlight_ops = {
65 .options = 0,
66 .update_status = adx_backlight_update_status,
67 .get_brightness = adx_backlight_get_brightness,
68 .check_fb = adx_backlight_check_fb,
69};
70
71static int __devinit adx_backlight_probe(struct platform_device *pdev)
72{
73 struct backlight_device *bldev;
74 struct resource *res;
75 struct adxbl *bl;
76 int ret = 0;
77
78 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!res) {
80 ret = -ENXIO;
81 goto out;
82 }
83
84 res = devm_request_mem_region(&pdev->dev, res->start,
85 resource_size(res), res->name);
86 if (!res) {
87 ret = -ENXIO;
88 goto out;
89 }
90
91 bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
92 if (!bl) {
93 ret = -ENOMEM;
94 goto out;
95 }
96
97 bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
98 resource_size(res));
99 if (!bl->base) {
100 ret = -ENXIO;
101 goto out;
102 }
103
104 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl,
105 &adx_backlight_ops);
106 if (!bldev) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111 bldev->props.max_brightness = 0xff;
112 bldev->props.brightness = 0xff;
113 bldev->props.power = FB_BLANK_UNBLANK;
114
115 platform_set_drvdata(pdev, bldev);
116
117out:
118 return ret;
119}
120
121static int __devexit adx_backlight_remove(struct platform_device *pdev)
122{
123 struct backlight_device *bldev;
124 int ret = 0;
125
126 bldev = platform_get_drvdata(pdev);
127 bldev->props.power = FB_BLANK_UNBLANK;
128 bldev->props.brightness = 0xff;
129 backlight_update_status(bldev);
130 backlight_device_unregister(bldev);
131 platform_set_drvdata(pdev, NULL);
132
133 return ret;
134}
135
136#ifdef CONFIG_PM
137static int adx_backlight_suspend(struct platform_device *pdev,
138 pm_message_t state)
139{
140 return 0;
141}
142
143static int adx_backlight_resume(struct platform_device *pdev)
144{
145 return 0;
146}
147#else
148#define adx_backlight_suspend NULL
149#define adx_backlight_resume NULL
150#endif
151
152static struct platform_driver adx_backlight_driver = {
153 .probe = adx_backlight_probe,
154 .remove = __devexit_p(adx_backlight_remove),
155 .suspend = adx_backlight_suspend,
156 .resume = adx_backlight_resume,
157 .driver = {
158 .name = "adx-backlight",
159 .owner = THIS_MODULE,
160 },
161};
162
163static int __init adx_backlight_init(void)
164{
165 return platform_driver_register(&adx_backlight_driver);
166}
167
168static void __exit adx_backlight_exit(void)
169{
170 platform_driver_unregister(&adx_backlight_driver);
171}
172
173module_init(adx_backlight_init);
174module_exit(adx_backlight_exit);
175
176MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
177MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
178MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 157057c79ca3..6615ac7fa60a 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -73,6 +73,27 @@ static inline void backlight_unregister_fb(struct backlight_device *bd)
73} 73}
74#endif /* CONFIG_FB */ 74#endif /* CONFIG_FB */
75 75
76static void backlight_generate_event(struct backlight_device *bd,
77 enum backlight_update_reason reason)
78{
79 char *envp[2];
80
81 switch (reason) {
82 case BACKLIGHT_UPDATE_SYSFS:
83 envp[0] = "SOURCE=sysfs";
84 break;
85 case BACKLIGHT_UPDATE_HOTKEY:
86 envp[0] = "SOURCE=hotkey";
87 break;
88 default:
89 envp[0] = "SOURCE=unknown";
90 break;
91 }
92 envp[1] = NULL;
93 kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp);
94 sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness");
95}
96
76static ssize_t backlight_show_power(struct device *dev, 97static ssize_t backlight_show_power(struct device *dev,
77 struct device_attribute *attr,char *buf) 98 struct device_attribute *attr,char *buf)
78{ 99{
@@ -142,6 +163,8 @@ static ssize_t backlight_store_brightness(struct device *dev,
142 } 163 }
143 mutex_unlock(&bd->ops_lock); 164 mutex_unlock(&bd->ops_lock);
144 165
166 backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
167
145 return rc; 168 return rc;
146} 169}
147 170
@@ -214,6 +237,25 @@ static struct device_attribute bl_device_attributes[] = {
214}; 237};
215 238
216/** 239/**
240 * backlight_force_update - tell the backlight subsystem that hardware state
241 * has changed
242 * @bd: the backlight device to update
243 *
244 * Updates the internal state of the backlight in response to a hardware event,
245 * and generate a uevent to notify userspace
246 */
247void backlight_force_update(struct backlight_device *bd,
248 enum backlight_update_reason reason)
249{
250 mutex_lock(&bd->ops_lock);
251 if (bd->ops && bd->ops->get_brightness)
252 bd->props.brightness = bd->ops->get_brightness(bd);
253 mutex_unlock(&bd->ops_lock);
254 backlight_generate_event(bd, reason);
255}
256EXPORT_SYMBOL(backlight_force_update);
257
258/**
217 * backlight_device_register - create and register a new object of 259 * backlight_device_register - create and register a new object of
218 * backlight_device class. 260 * backlight_device class.
219 * @name: the name of the new object(must be the same as the name of the 261 * @name: the name of the new object(must be the same as the name of the
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 2211a852af9c..96774949cd30 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -433,8 +433,9 @@ static int corgi_bl_update_status(struct backlight_device *bd)
433 433
434 if (corgibl_flags & CORGIBL_SUSPENDED) 434 if (corgibl_flags & CORGIBL_SUSPENDED)
435 intensity = 0; 435 intensity = 0;
436 if (corgibl_flags & CORGIBL_BATTLOW) 436
437 intensity &= lcd->limit_mask; 437 if ((corgibl_flags & CORGIBL_BATTLOW) && intensity > lcd->limit_mask)
438 intensity = lcd->limit_mask;
438 439
439 return corgi_bl_set_intensity(lcd, intensity); 440 return corgi_bl_set_intensity(lcd, intensity);
440} 441}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 93bb4340cc64..701a1081e199 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -154,34 +154,38 @@ static int da903x_backlight_remove(struct platform_device *pdev)
154} 154}
155 155
156#ifdef CONFIG_PM 156#ifdef CONFIG_PM
157static int da903x_backlight_suspend(struct platform_device *pdev, 157static int da903x_backlight_suspend(struct device *dev)
158 pm_message_t state)
159{ 158{
159 struct platform_device *pdev = to_platform_device(dev);
160 struct backlight_device *bl = platform_get_drvdata(pdev); 160 struct backlight_device *bl = platform_get_drvdata(pdev);
161 return da903x_backlight_set(bl, 0); 161 return da903x_backlight_set(bl, 0);
162} 162}
163 163
164static int da903x_backlight_resume(struct platform_device *pdev) 164static int da903x_backlight_resume(struct device *dev)
165{ 165{
166 struct platform_device *pdev = to_platform_device(dev);
166 struct backlight_device *bl = platform_get_drvdata(pdev); 167 struct backlight_device *bl = platform_get_drvdata(pdev);
167 168
168 backlight_update_status(bl); 169 backlight_update_status(bl);
169 return 0; 170 return 0;
170} 171}
171#else 172
172#define da903x_backlight_suspend NULL 173static struct dev_pm_ops da903x_backlight_pm_ops = {
173#define da903x_backlight_resume NULL 174 .suspend = da903x_backlight_suspend,
175 .resume = da903x_backlight_resume,
176};
174#endif 177#endif
175 178
176static struct platform_driver da903x_backlight_driver = { 179static struct platform_driver da903x_backlight_driver = {
177 .driver = { 180 .driver = {
178 .name = "da903x-backlight", 181 .name = "da903x-backlight",
179 .owner = THIS_MODULE, 182 .owner = THIS_MODULE,
183#ifdef CONFIG_PM
184 .pm = &da903x_backlight_pm_ops,
185#endif
180 }, 186 },
181 .probe = da903x_backlight_probe, 187 .probe = da903x_backlight_probe,
182 .remove = da903x_backlight_remove, 188 .remove = da903x_backlight_remove,
183 .suspend = da903x_backlight_suspend,
184 .resume = da903x_backlight_resume,
185}; 189};
186 190
187static int __init da903x_backlight_init(void) 191static int __init da903x_backlight_init(void)
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 5be55a20d8c7..7fb4eefff80d 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -103,7 +103,7 @@ static struct backlight_ops hp680bl_ops = {
103 .update_status = hp680bl_set_intensity, 103 .update_status = hp680bl_set_intensity,
104}; 104};
105 105
106static int __init hp680bl_probe(struct platform_device *pdev) 106static int __devinit hp680bl_probe(struct platform_device *pdev)
107{ 107{
108 struct backlight_device *bd; 108 struct backlight_device *bd;
109 109
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index b6449470106c..a482dd7b0311 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -56,7 +56,7 @@ static int fb_notifier_callback(struct notifier_block *self,
56 56
57static int lcd_register_fb(struct lcd_device *ld) 57static int lcd_register_fb(struct lcd_device *ld)
58{ 58{
59 memset(&ld->fb_notif, 0, sizeof(&ld->fb_notif)); 59 memset(&ld->fb_notif, 0, sizeof(ld->fb_notif));
60 ld->fb_notif.notifier_call = fb_notifier_callback; 60 ld->fb_notif.notifier_call = fb_notifier_callback;
61 return fb_register_client(&ld->fb_notif); 61 return fb_register_client(&ld->fb_notif);
62} 62}
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
new file mode 100644
index 000000000000..447b542a20ca
--- /dev/null
+++ b/drivers/video/backlight/lms283gf05.c
@@ -0,0 +1,242 @@
1/*
2 * lms283gf05.c -- support for Samsung LMS283GF05 LCD
3 *
4 * Copyright (c) 2009 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/device.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/lcd.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/lms283gf05.h>
19
20struct lms283gf05_state {
21 struct spi_device *spi;
22 struct lcd_device *ld;
23};
24
25struct lms283gf05_seq {
26 unsigned char reg;
27 unsigned short value;
28 unsigned char delay;
29};
30
31/* Magic sequences supplied by manufacturer, for details refer to datasheet */
32static struct lms283gf05_seq disp_initseq[] = {
33 /* REG, VALUE, DELAY */
34 { 0x07, 0x0000, 0 },
35 { 0x13, 0x0000, 10 },
36
37 { 0x11, 0x3004, 0 },
38 { 0x14, 0x200F, 0 },
39 { 0x10, 0x1a20, 0 },
40 { 0x13, 0x0040, 50 },
41
42 { 0x13, 0x0060, 0 },
43 { 0x13, 0x0070, 200 },
44
45 { 0x01, 0x0127, 0 },
46 { 0x02, 0x0700, 0 },
47 { 0x03, 0x1030, 0 },
48 { 0x08, 0x0208, 0 },
49 { 0x0B, 0x0620, 0 },
50 { 0x0C, 0x0110, 0 },
51 { 0x30, 0x0120, 0 },
52 { 0x31, 0x0127, 0 },
53 { 0x32, 0x0000, 0 },
54 { 0x33, 0x0503, 0 },
55 { 0x34, 0x0727, 0 },
56 { 0x35, 0x0124, 0 },
57 { 0x36, 0x0706, 0 },
58 { 0x37, 0x0701, 0 },
59 { 0x38, 0x0F00, 0 },
60 { 0x39, 0x0F00, 0 },
61 { 0x40, 0x0000, 0 },
62 { 0x41, 0x0000, 0 },
63 { 0x42, 0x013f, 0 },
64 { 0x43, 0x0000, 0 },
65 { 0x44, 0x013f, 0 },
66 { 0x45, 0x0000, 0 },
67 { 0x46, 0xef00, 0 },
68 { 0x47, 0x013f, 0 },
69 { 0x48, 0x0000, 0 },
70 { 0x07, 0x0015, 30 },
71
72 { 0x07, 0x0017, 0 },
73
74 { 0x20, 0x0000, 0 },
75 { 0x21, 0x0000, 0 },
76 { 0x22, 0x0000, 0 }
77};
78
79static struct lms283gf05_seq disp_pdwnseq[] = {
80 { 0x07, 0x0016, 30 },
81
82 { 0x07, 0x0004, 0 },
83 { 0x10, 0x0220, 20 },
84
85 { 0x13, 0x0060, 50 },
86
87 { 0x13, 0x0040, 50 },
88
89 { 0x13, 0x0000, 0 },
90 { 0x10, 0x0000, 0 }
91};
92
93
94static void lms283gf05_reset(unsigned long gpio, bool inverted)
95{
96 gpio_set_value(gpio, !inverted);
97 mdelay(100);
98 gpio_set_value(gpio, inverted);
99 mdelay(20);
100 gpio_set_value(gpio, !inverted);
101 mdelay(20);
102}
103
104static void lms283gf05_toggle(struct spi_device *spi,
105 struct lms283gf05_seq *seq, int sz)
106{
107 char buf[3];
108 int i;
109
110 for (i = 0; i < sz; i++) {
111 buf[0] = 0x74;
112 buf[1] = 0x00;
113 buf[2] = seq[i].reg;
114 spi_write(spi, buf, 3);
115
116 buf[0] = 0x76;
117 buf[1] = seq[i].value >> 8;
118 buf[2] = seq[i].value & 0xff;
119 spi_write(spi, buf, 3);
120
121 mdelay(seq[i].delay);
122 }
123}
124
125static int lms283gf05_power_set(struct lcd_device *ld, int power)
126{
127 struct lms283gf05_state *st = lcd_get_data(ld);
128 struct spi_device *spi = st->spi;
129 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
130
131 if (power) {
132 if (pdata)
133 lms283gf05_reset(pdata->reset_gpio,
134 pdata->reset_inverted);
135 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
136 } else {
137 lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
138 if (pdata)
139 gpio_set_value(pdata->reset_gpio,
140 pdata->reset_inverted);
141 }
142
143 return 0;
144}
145
146static struct lcd_ops lms_ops = {
147 .set_power = lms283gf05_power_set,
148 .get_power = NULL,
149};
150
151static int __devinit lms283gf05_probe(struct spi_device *spi)
152{
153 struct lms283gf05_state *st;
154 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
155 struct lcd_device *ld;
156 int ret = 0;
157
158 if (pdata != NULL) {
159 ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
160 if (ret)
161 return ret;
162
163 ret = gpio_direction_output(pdata->reset_gpio,
164 !pdata->reset_inverted);
165 if (ret)
166 goto err;
167 }
168
169 st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
170 if (st == NULL) {
171 dev_err(&spi->dev, "No memory for device state\n");
172 ret = -ENOMEM;
173 goto err;
174 }
175
176 ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
177 if (IS_ERR(ld)) {
178 ret = PTR_ERR(ld);
179 goto err2;
180 }
181
182 st->spi = spi;
183 st->ld = ld;
184
185 dev_set_drvdata(&spi->dev, st);
186
187 /* kick in the LCD */
188 if (pdata)
189 lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
190 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
191
192 return 0;
193
194err2:
195 kfree(st);
196err:
197 if (pdata != NULL)
198 gpio_free(pdata->reset_gpio);
199
200 return ret;
201}
202
203static int __devexit lms283gf05_remove(struct spi_device *spi)
204{
205 struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
206 struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
207
208 lcd_device_unregister(st->ld);
209
210 if (pdata != NULL)
211 gpio_free(pdata->reset_gpio);
212
213 kfree(st);
214
215 return 0;
216}
217
218static struct spi_driver lms283gf05_driver = {
219 .driver = {
220 .name = "lms283gf05",
221 .owner = THIS_MODULE,
222 },
223 .probe = lms283gf05_probe,
224 .remove = __devexit_p(lms283gf05_remove),
225};
226
227static __init int lms283gf05_init(void)
228{
229 return spi_register_driver(&lms283gf05_driver);
230}
231
232static __exit void lms283gf05_exit(void)
233{
234 spi_unregister_driver(&lms283gf05_driver);
235}
236
237module_init(lms283gf05_init);
238module_exit(lms283gf05_exit);
239
240MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
241MODULE_DESCRIPTION("LCD283GF05 LCD");
242MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 3bb4c0a50c62..9edb8d7c295f 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -166,6 +166,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
166 }, 166 },
167 { 167 {
168 .callback = mbp_dmi_match, 168 .callback = mbp_dmi_match,
169 .ident = "MacBookAir 1,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
169 .ident = "MacBook 5,1", 178 .ident = "MacBook 5,1",
170 .matches = { 179 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -175,6 +184,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
175 }, 184 },
176 { 185 {
177 .callback = mbp_dmi_match, 186 .callback = mbp_dmi_match,
187 .ident = "MacBook 5,2",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
191 },
192 .driver_data = (void *)&nvidia_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
178 .ident = "MacBookAir 2,1", 196 .ident = "MacBookAir 2,1",
179 .matches = { 197 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -191,6 +209,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
191 }, 209 },
192 .driver_data = (void *)&nvidia_chipset_data, 210 .driver_data = (void *)&nvidia_chipset_data,
193 }, 211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 5,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
218 },
219 .driver_data = (void *)&nvidia_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,5",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
227 },
228 .driver_data = (void *)&nvidia_chipset_data,
229 },
194 { } 230 { }
195}; 231};
196 232
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
new file mode 100644
index 000000000000..467bdb7efb23
--- /dev/null
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -0,0 +1,250 @@
1/*
2 * Backlight driver for Wolfson Microelectronics WM831x PMICs
3 *
4 * Copyright 2009 Wolfson Microelectonics plc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/fb.h>
15#include <linux/backlight.h>
16
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/regulator.h>
20
21struct wm831x_backlight_data {
22 struct wm831x *wm831x;
23 int isink_reg;
24 int current_brightness;
25};
26
27static int wm831x_backlight_set(struct backlight_device *bl, int brightness)
28{
29 struct wm831x_backlight_data *data = bl_get_data(bl);
30 struct wm831x *wm831x = data->wm831x;
31 int power_up = !data->current_brightness && brightness;
32 int power_down = data->current_brightness && !brightness;
33 int ret;
34
35 if (power_up) {
36 /* Enable the ISINK */
37 ret = wm831x_set_bits(wm831x, data->isink_reg,
38 WM831X_CS1_ENA, WM831X_CS1_ENA);
39 if (ret < 0)
40 goto err;
41
42 /* Enable the DC-DC */
43 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
44 WM831X_DC4_ENA, WM831X_DC4_ENA);
45 if (ret < 0)
46 goto err;
47 }
48
49 if (power_down) {
50 /* DCDC first */
51 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
52 WM831X_DC4_ENA, 0);
53 if (ret < 0)
54 goto err;
55
56 /* ISINK */
57 ret = wm831x_set_bits(wm831x, data->isink_reg,
58 WM831X_CS1_DRIVE | WM831X_CS1_ENA, 0);
59 if (ret < 0)
60 goto err;
61 }
62
63 /* Set the new brightness */
64 ret = wm831x_set_bits(wm831x, data->isink_reg,
65 WM831X_CS1_ISEL_MASK, brightness);
66 if (ret < 0)
67 goto err;
68
69 if (power_up) {
70 /* Drive current through the ISINK */
71 ret = wm831x_set_bits(wm831x, data->isink_reg,
72 WM831X_CS1_DRIVE, WM831X_CS1_DRIVE);
73 if (ret < 0)
74 return ret;
75 }
76
77 data->current_brightness = brightness;
78
79 return 0;
80
81err:
82 /* If we were in the middle of a power transition always shut down
83 * for safety.
84 */
85 if (power_up || power_down) {
86 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
87 wm831x_set_bits(wm831x, data->isink_reg, WM831X_CS1_ENA, 0);
88 }
89
90 return ret;
91}
92
93static int wm831x_backlight_update_status(struct backlight_device *bl)
94{
95 int brightness = bl->props.brightness;
96
97 if (bl->props.power != FB_BLANK_UNBLANK)
98 brightness = 0;
99
100 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
101 brightness = 0;
102
103 if (bl->props.state & BL_CORE_SUSPENDED)
104 brightness = 0;
105
106 return wm831x_backlight_set(bl, brightness);
107}
108
109static int wm831x_backlight_get_brightness(struct backlight_device *bl)
110{
111 struct wm831x_backlight_data *data = bl_get_data(bl);
112 return data->current_brightness;
113}
114
115static struct backlight_ops wm831x_backlight_ops = {
116 .options = BL_CORE_SUSPENDRESUME,
117 .update_status = wm831x_backlight_update_status,
118 .get_brightness = wm831x_backlight_get_brightness,
119};
120
121static int wm831x_backlight_probe(struct platform_device *pdev)
122{
123 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
124 struct wm831x_pdata *wm831x_pdata;
125 struct wm831x_backlight_pdata *pdata;
126 struct wm831x_backlight_data *data;
127 struct backlight_device *bl;
128 int ret, i, max_isel, isink_reg, dcdc_cfg;
129
130 /* We need platform data */
131 if (pdev->dev.parent->platform_data) {
132 wm831x_pdata = pdev->dev.parent->platform_data;
133 pdata = wm831x_pdata->backlight;
134 } else {
135 pdata = NULL;
136 }
137
138 if (!pdata) {
139 dev_err(&pdev->dev, "No platform data supplied\n");
140 return -EINVAL;
141 }
142
143 /* Figure out the maximum current we can use */
144 for (i = 0; i < WM831X_ISINK_MAX_ISEL; i++) {
145 if (wm831x_isinkv_values[i] > pdata->max_uA)
146 break;
147 }
148
149 if (i == 0) {
150 dev_err(&pdev->dev, "Invalid max_uA: %duA\n", pdata->max_uA);
151 return -EINVAL;
152 }
153 max_isel = i - 1;
154
155 if (pdata->max_uA != wm831x_isinkv_values[max_isel])
156 dev_warn(&pdev->dev,
157 "Maximum current is %duA not %duA as requested\n",
158 wm831x_isinkv_values[max_isel], pdata->max_uA);
159
160 switch (pdata->isink) {
161 case 1:
162 isink_reg = WM831X_CURRENT_SINK_1;
163 dcdc_cfg = 0;
164 break;
165 case 2:
166 isink_reg = WM831X_CURRENT_SINK_2;
167 dcdc_cfg = WM831X_DC4_FBSRC;
168 break;
169 default:
170 dev_err(&pdev->dev, "Invalid ISINK %d\n", pdata->isink);
171 return -EINVAL;
172 }
173
174 /* Configure the ISINK to use for feedback */
175 ret = wm831x_reg_unlock(wm831x);
176 if (ret < 0)
177 return ret;
178
179 ret = wm831x_set_bits(wm831x, WM831X_DC4_CONTROL, WM831X_DC4_FBSRC,
180 dcdc_cfg);
181
182 wm831x_reg_lock(wm831x);
183 if (ret < 0)
184 return ret;
185
186 data = kzalloc(sizeof(*data), GFP_KERNEL);
187 if (data == NULL)
188 return -ENOMEM;
189
190 data->wm831x = wm831x;
191 data->current_brightness = 0;
192 data->isink_reg = isink_reg;
193
194 bl = backlight_device_register("wm831x", &pdev->dev,
195 data, &wm831x_backlight_ops);
196 if (IS_ERR(bl)) {
197 dev_err(&pdev->dev, "failed to register backlight\n");
198 kfree(data);
199 return PTR_ERR(bl);
200 }
201
202 bl->props.max_brightness = max_isel;
203 bl->props.brightness = max_isel;
204
205 platform_set_drvdata(pdev, bl);
206
207 /* Disable the DCDC if it was started so we can bootstrap */
208 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
209
210
211 backlight_update_status(bl);
212
213 return 0;
214}
215
216static int wm831x_backlight_remove(struct platform_device *pdev)
217{
218 struct backlight_device *bl = platform_get_drvdata(pdev);
219 struct wm831x_backlight_data *data = bl_get_data(bl);
220
221 backlight_device_unregister(bl);
222 kfree(data);
223 return 0;
224}
225
226static struct platform_driver wm831x_backlight_driver = {
227 .driver = {
228 .name = "wm831x-backlight",
229 .owner = THIS_MODULE,
230 },
231 .probe = wm831x_backlight_probe,
232 .remove = wm831x_backlight_remove,
233};
234
235static int __init wm831x_backlight_init(void)
236{
237 return platform_driver_register(&wm831x_backlight_driver);
238}
239module_init(wm831x_backlight_init);
240
241static void __exit wm831x_backlight_exit(void)
242{
243 platform_driver_unregister(&wm831x_backlight_driver);
244}
245module_exit(wm831x_backlight_exit);
246
247MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
248MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
249MODULE_LICENSE("GPL");
250MODULE_ALIAS("platform:wm831x-backlight");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 5a686cea23f4..3681c6a88212 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2311,14 +2311,11 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
2311 ops->graphics = 1; 2311 ops->graphics = 1;
2312 2312
2313 if (!blank) { 2313 if (!blank) {
2314 if (info->fbops->fb_save_state)
2315 info->fbops->fb_save_state(info);
2316 var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; 2314 var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
2317 fb_set_var(info, &var); 2315 fb_set_var(info, &var);
2318 ops->graphics = 0; 2316 ops->graphics = 0;
2319 ops->var = info->var; 2317 ops->var = info->var;
2320 } else if (info->fbops->fb_restore_state) 2318 }
2321 info->fbops->fb_restore_state(info);
2322 } 2319 }
2323 2320
2324 if (!fbcon_is_inactive(vc, info)) { 2321 if (!fbcon_is_inactive(vc, info)) {
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 42e1005e2916..035d56835b75 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/device.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31#include <linux/clk.h> 30#include <linux/clk.h>
32#include <video/da8xx-fb.h> 31#include <video/da8xx-fb.h>
@@ -705,7 +704,7 @@ static int __init fb_probe(struct platform_device *device)
705 704
706 if (i == ARRAY_SIZE(known_lcd_panels)) { 705 if (i == ARRAY_SIZE(known_lcd_panels)) {
707 dev_err(&device->dev, "GLCD: No valid panel found\n"); 706 dev_err(&device->dev, "GLCD: No valid panel found\n");
708 ret = ENODEV; 707 ret = -ENODEV;
709 goto err_clk_disable; 708 goto err_clk_disable;
710 } else 709 } else
711 dev_info(&device->dev, "GLCD: Found %s panel\n", 710 dev_info(&device->dev, "GLCD: Found %s panel\n",
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 0a7a6679ee6e..c27ab1ed9604 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -125,7 +125,7 @@ page_already_added:
125 return 0; 125 return 0;
126} 126}
127 127
128static struct vm_operations_struct fb_deferred_io_vm_ops = { 128static const struct vm_operations_struct fb_deferred_io_vm_ops = {
129 .fault = fb_deferred_io_fault, 129 .fault = fb_deferred_io_fault,
130 .page_mkwrite = fb_deferred_io_mkwrite, 130 .page_mkwrite = fb_deferred_io_mkwrite,
131}; 131};
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a1f2e7ce730b..99bbd282ce63 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1800,7 +1800,7 @@ static int __init video_setup(char *options)
1800 global = 1; 1800 global = 1;
1801 } 1801 }
1802 1802
1803 if (!global && !strstr(options, "fb:")) { 1803 if (!global && !strchr(options, ':')) {
1804 fb_mode_option = options; 1804 fb_mode_option = options;
1805 global = 1; 1805 global = 1;
1806 } 1806 }
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index f2de5a1acd6d..474421fe79a6 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -24,11 +24,10 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/sched.h>
27#include <mach/msm_iomap.h> 28#include <mach/msm_iomap.h>
28#include <mach/irqs.h> 29#include <mach/irqs.h>
29#include <mach/board.h> 30#include <mach/board.h>
30#include <linux/delay.h>
31
32#include <mach/msm_fb.h> 31#include <mach/msm_fb.h>
33#include "mddi_hw.h" 32#include "mddi_hw.h"
34 33
diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/msm/mddi_client_nt35399.c
index 9c78050ac799..c9e9349451cb 100644
--- a/drivers/video/msm/mddi_client_nt35399.c
+++ b/drivers/video/msm/mddi_client_nt35399.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/sched.h>
22#include <linux/gpio.h> 23#include <linux/gpio.h>
23#include <mach/msm_fb.h> 24#include <mach/msm_fb.h>
24 25
diff --git a/drivers/video/msm/mddi_client_toshiba.c b/drivers/video/msm/mddi_client_toshiba.c
index 80d0f5fdf0b1..71048e78f7f0 100644
--- a/drivers/video/msm/mddi_client_toshiba.c
+++ b/drivers/video/msm/mddi_client_toshiba.c
@@ -20,6 +20,7 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/sched.h>
23#include <mach/msm_fb.h> 24#include <mach/msm_fb.h>
24 25
25 26
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 99636a2b20f2..6c519e2fa2b7 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -22,9 +22,6 @@
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/file.h> 24#include <linux/file.h>
25#ifdef CONFIG_ANDROID_PMEM
26#include <linux/android_pmem.h>
27#endif
28#include <linux/major.h> 25#include <linux/major.h>
29 26
30#include <mach/msm_iomap.h> 27#include <mach/msm_iomap.h>
@@ -262,11 +259,6 @@ int get_img(struct mdp_img *img, struct fb_info *info,
262 struct file *file; 259 struct file *file;
263 unsigned long vstart; 260 unsigned long vstart;
264 261
265#ifdef CONFIG_ANDROID_PMEM
266 if (!get_pmem_file(img->memory_id, start, &vstart, len, filep))
267 return 0;
268#endif
269
270 file = fget_light(img->memory_id, &put_needed); 262 file = fget_light(img->memory_id, &put_needed);
271 if (file == NULL) 263 if (file == NULL)
272 return -1; 264 return -1;
@@ -283,12 +275,6 @@ int get_img(struct mdp_img *img, struct fb_info *info,
283 275
284void put_img(struct file *src_file, struct file *dst_file) 276void put_img(struct file *src_file, struct file *dst_file)
285{ 277{
286#ifdef CONFIG_ANDROID_PMEM
287 if (src_file)
288 put_pmem_file(src_file);
289 if (dst_file)
290 put_pmem_file(dst_file);
291#endif
292} 278}
293 279
294int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb, 280int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
@@ -320,9 +306,6 @@ int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
320 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) { 306 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
321 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from " 307 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
322 "memory\n"); 308 "memory\n");
323#ifdef CONFIG_ANDROID_PMEM
324 put_pmem_file(src_file);
325#endif
326 return -EINVAL; 309 return -EINVAL;
327 } 310 }
328 mutex_lock(&mdp_mutex); 311 mutex_lock(&mdp_mutex);
@@ -499,7 +482,6 @@ int mdp_probe(struct platform_device *pdev)
499 /* register mdp device */ 482 /* register mdp device */
500 mdp->mdp_dev.dev.parent = &pdev->dev; 483 mdp->mdp_dev.dev.parent = &pdev->dev;
501 mdp->mdp_dev.dev.class = mdp_class; 484 mdp->mdp_dev.dev.class = mdp_class;
502 snprintf(mdp->mdp_dev.dev.bus_id, BUS_ID_SIZE, "mdp%d", pdev->id);
503 485
504 /* if you can remove the platform device you'd have to implement 486 /* if you can remove the platform device you'd have to implement
505 * this: 487 * this:
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index ba2c4673b648..4ff001f4cbbd 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -16,7 +16,6 @@
16#include <linux/file.h> 16#include <linux/file.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/msm_mdp.h> 18#include <linux/msm_mdp.h>
19#include <linux/android_pmem.h>
20#include <mach/msm_fb.h> 19#include <mach/msm_fb.h>
21 20
22#include "mdp_hw.h" 21#include "mdp_hw.h"
@@ -579,25 +578,6 @@ static int valid_src_dst(unsigned long src_start, unsigned long src_len,
579static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs, 578static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs,
580 struct file *src_file, struct file *dst_file) 579 struct file *src_file, struct file *dst_file)
581{ 580{
582#ifdef CONFIG_ANDROID_PMEM
583 uint32_t src0_len, src1_len, dst0_len, dst1_len;
584
585 /* flush src images to memory before dma to mdp */
586 get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len,
587 &src1_len);
588 flush_pmem_file(src_file, req->src.offset, src0_len);
589 if (IS_PSEUDOPLNR(req->src.format))
590 flush_pmem_file(src_file, req->src.offset + src0_len,
591 src1_len);
592
593 /* flush dst images */
594 get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len,
595 &dst1_len);
596 flush_pmem_file(dst_file, req->dst.offset, dst0_len);
597 if (IS_PSEUDOPLNR(req->dst.format))
598 flush_pmem_file(dst_file, req->dst.offset + dst0_len,
599 dst1_len);
600#endif
601} 581}
602 582
603static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect, 583static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect,
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index d5e59556f9e2..70dadf9d2334 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -93,7 +93,7 @@ struct blizzard_reg_list {
93}; 93};
94 94
95/* These need to be saved / restored separately from the rest. */ 95/* These need to be saved / restored separately from the rest. */
96static struct blizzard_reg_list blizzard_pll_regs[] = { 96static const struct blizzard_reg_list blizzard_pll_regs[] = {
97 { 97 {
98 .start = 0x04, /* Don't save PLL ctrl (0x0C) */ 98 .start = 0x04, /* Don't save PLL ctrl (0x0C) */
99 .end = 0x0a, 99 .end = 0x0a,
@@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = {
104 }, 104 },
105}; 105};
106 106
107static struct blizzard_reg_list blizzard_gen_regs[] = { 107static const struct blizzard_reg_list blizzard_gen_regs[] = {
108 { 108 {
109 .start = 0x18, /* SDRAM control */ 109 .start = 0x18, /* SDRAM control */
110 .end = 0x20, 110 .end = 0x20,
@@ -191,7 +191,7 @@ struct blizzard_struct {
191 191
192 struct omapfb_device *fbdev; 192 struct omapfb_device *fbdev;
193 struct lcd_ctrl_extif *extif; 193 struct lcd_ctrl_extif *extif;
194 struct lcd_ctrl *int_ctrl; 194 const struct lcd_ctrl *int_ctrl;
195 195
196 void (*power_up)(struct device *dev); 196 void (*power_up)(struct device *dev);
197 void (*power_down)(struct device *dev); 197 void (*power_down)(struct device *dev);
@@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
1372 (1 << OMAPFB_COLOR_YUV420); 1372 (1 << OMAPFB_COLOR_YUV420);
1373} 1373}
1374 1374
1375static void _save_regs(struct blizzard_reg_list *list, int cnt) 1375static void _save_regs(const struct blizzard_reg_list *list, int cnt)
1376{ 1376{
1377 int i; 1377 int i;
1378 1378
@@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt)
1383 } 1383 }
1384} 1384}
1385 1385
1386static void _restore_regs(struct blizzard_reg_list *list, int cnt) 1386static void _restore_regs(const struct blizzard_reg_list *list, int cnt)
1387{ 1387{
1388 int i; 1388 int i;
1389 1389
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 80a11d078df4..f16e42154229 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -1035,7 +1035,7 @@ static void mmap_user_close(struct vm_area_struct *vma)
1035 atomic_dec(&dispc.map_count[plane]); 1035 atomic_dec(&dispc.map_count[plane]);
1036} 1036}
1037 1037
1038static struct vm_operations_struct mmap_user_ops = { 1038static const struct vm_operations_struct mmap_user_ops = {
1039 .open = mmap_user_open, 1039 .open = mmap_user_open,
1040 .close = mmap_user_close, 1040 .close = mmap_user_close,
1041}; 1041};
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 125e605b8c68..0d0c8c8b9b56 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi)
393 * Set fb_info.fix fields and also updates fbdev. 393 * Set fb_info.fix fields and also updates fbdev.
394 * When calling this fb_info.var must be set up already. 394 * When calling this fb_info.var must be set up already.
395 */ 395 */
396static void set_fb_fix(struct fb_info *fbi) 396static void set_fb_fix(struct fb_info *fbi, int from_init)
397{ 397{
398 struct fb_fix_screeninfo *fix = &fbi->fix; 398 struct fb_fix_screeninfo *fix = &fbi->fix;
399 struct fb_var_screeninfo *var = &fbi->var; 399 struct fb_var_screeninfo *var = &fbi->var;
@@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi)
403 403
404 rg = &plane->fbdev->mem_desc.region[plane->idx]; 404 rg = &plane->fbdev->mem_desc.region[plane->idx];
405 fbi->screen_base = rg->vaddr; 405 fbi->screen_base = rg->vaddr;
406 mutex_lock(&fbi->mm_lock); 406
407 fix->smem_start = rg->paddr; 407 if (!from_init) {
408 fix->smem_len = rg->size; 408 mutex_lock(&fbi->mm_lock);
409 mutex_unlock(&fbi->mm_lock); 409 fix->smem_start = rg->paddr;
410 fix->smem_len = rg->size;
411 mutex_unlock(&fbi->mm_lock);
412 } else {
413 fix->smem_start = rg->paddr;
414 fix->smem_len = rg->size;
415 }
410 416
411 fix->type = FB_TYPE_PACKED_PIXELS; 417 fix->type = FB_TYPE_PACKED_PIXELS;
412 bpp = var->bits_per_pixel; 418 bpp = var->bits_per_pixel;
@@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi)
704 int r = 0; 710 int r = 0;
705 711
706 omapfb_rqueue_lock(fbdev); 712 omapfb_rqueue_lock(fbdev);
707 set_fb_fix(fbi); 713 set_fb_fix(fbi, 0);
708 r = ctrl_change_mode(fbi); 714 r = ctrl_change_mode(fbi);
709 omapfb_rqueue_unlock(fbdev); 715 omapfb_rqueue_unlock(fbdev);
710 716
@@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
904 if (old_size != size) { 910 if (old_size != size) {
905 if (size) { 911 if (size) {
906 memcpy(&fbi->var, new_var, sizeof(fbi->var)); 912 memcpy(&fbi->var, new_var, sizeof(fbi->var));
907 set_fb_fix(fbi); 913 set_fb_fix(fbi, 0);
908 } else { 914 } else {
909 /* 915 /*
910 * Set these explicitly to indicate that the 916 * Set these explicitly to indicate that the
@@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
1504 var->bits_per_pixel = fbdev->panel->bpp; 1510 var->bits_per_pixel = fbdev->panel->bpp;
1505 1511
1506 set_fb_var(info, var); 1512 set_fb_var(info, var);
1507 set_fb_fix(info); 1513 set_fb_fix(info, 1);
1508 1514
1509 r = fb_alloc_cmap(&info->cmap, 16, 0); 1515 r = fb_alloc_cmap(&info->cmap, 16, 0);
1510 if (r != 0) 1516 if (r != 0)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 6506117c134b..1820c4a24434 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1638,24 +1638,26 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1638 * Power management hooks. Note that we won't be called from IRQ context, 1638 * Power management hooks. Note that we won't be called from IRQ context,
1639 * unlike the blank functions above, so we may sleep. 1639 * unlike the blank functions above, so we may sleep.
1640 */ 1640 */
1641static int pxafb_suspend(struct platform_device *dev, pm_message_t state) 1641static int pxafb_suspend(struct device *dev)
1642{ 1642{
1643 struct pxafb_info *fbi = platform_get_drvdata(dev); 1643 struct pxafb_info *fbi = dev_get_drvdata(dev);
1644 1644
1645 set_ctrlr_state(fbi, C_DISABLE_PM); 1645 set_ctrlr_state(fbi, C_DISABLE_PM);
1646 return 0; 1646 return 0;
1647} 1647}
1648 1648
1649static int pxafb_resume(struct platform_device *dev) 1649static int pxafb_resume(struct device *dev)
1650{ 1650{
1651 struct pxafb_info *fbi = platform_get_drvdata(dev); 1651 struct pxafb_info *fbi = dev_get_drvdata(dev);
1652 1652
1653 set_ctrlr_state(fbi, C_ENABLE_PM); 1653 set_ctrlr_state(fbi, C_ENABLE_PM);
1654 return 0; 1654 return 0;
1655} 1655}
1656#else 1656
1657#define pxafb_suspend NULL 1657static struct dev_pm_ops pxafb_pm_ops = {
1658#define pxafb_resume NULL 1658 .suspend = pxafb_suspend,
1659 .resume = pxafb_resume,
1660};
1659#endif 1661#endif
1660 1662
1661static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi) 1663static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
@@ -2081,6 +2083,9 @@ static int __devinit pxafb_probe(struct platform_device *dev)
2081 goto failed; 2083 goto failed;
2082 } 2084 }
2083 2085
2086 if (cpu_is_pxa3xx() && inf->acceleration_enabled)
2087 fbi->fb.fix.accel = FB_ACCEL_PXA3XX;
2088
2084 fbi->backlight_power = inf->pxafb_backlight_power; 2089 fbi->backlight_power = inf->pxafb_backlight_power;
2085 fbi->lcd_power = inf->pxafb_lcd_power; 2090 fbi->lcd_power = inf->pxafb_lcd_power;
2086 2091
@@ -2091,14 +2096,14 @@ static int __devinit pxafb_probe(struct platform_device *dev)
2091 goto failed_fbi; 2096 goto failed_fbi;
2092 } 2097 }
2093 2098
2094 r = request_mem_region(r->start, r->end - r->start + 1, dev->name); 2099 r = request_mem_region(r->start, resource_size(r), dev->name);
2095 if (r == NULL) { 2100 if (r == NULL) {
2096 dev_err(&dev->dev, "failed to request I/O memory\n"); 2101 dev_err(&dev->dev, "failed to request I/O memory\n");
2097 ret = -EBUSY; 2102 ret = -EBUSY;
2098 goto failed_fbi; 2103 goto failed_fbi;
2099 } 2104 }
2100 2105
2101 fbi->mmio_base = ioremap(r->start, r->end - r->start + 1); 2106 fbi->mmio_base = ioremap(r->start, resource_size(r));
2102 if (fbi->mmio_base == NULL) { 2107 if (fbi->mmio_base == NULL) {
2103 dev_err(&dev->dev, "failed to map I/O memory\n"); 2108 dev_err(&dev->dev, "failed to map I/O memory\n");
2104 ret = -EBUSY; 2109 ret = -EBUSY;
@@ -2197,7 +2202,7 @@ failed_free_dma:
2197failed_free_io: 2202failed_free_io:
2198 iounmap(fbi->mmio_base); 2203 iounmap(fbi->mmio_base);
2199failed_free_res: 2204failed_free_res:
2200 release_mem_region(r->start, r->end - r->start + 1); 2205 release_mem_region(r->start, resource_size(r));
2201failed_fbi: 2206failed_fbi:
2202 clk_put(fbi->clk); 2207 clk_put(fbi->clk);
2203 platform_set_drvdata(dev, NULL); 2208 platform_set_drvdata(dev, NULL);
@@ -2237,7 +2242,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
2237 iounmap(fbi->mmio_base); 2242 iounmap(fbi->mmio_base);
2238 2243
2239 r = platform_get_resource(dev, IORESOURCE_MEM, 0); 2244 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
2240 release_mem_region(r->start, r->end - r->start + 1); 2245 release_mem_region(r->start, resource_size(r));
2241 2246
2242 clk_put(fbi->clk); 2247 clk_put(fbi->clk);
2243 kfree(fbi); 2248 kfree(fbi);
@@ -2248,11 +2253,12 @@ static int __devexit pxafb_remove(struct platform_device *dev)
2248static struct platform_driver pxafb_driver = { 2253static struct platform_driver pxafb_driver = {
2249 .probe = pxafb_probe, 2254 .probe = pxafb_probe,
2250 .remove = __devexit_p(pxafb_remove), 2255 .remove = __devexit_p(pxafb_remove),
2251 .suspend = pxafb_suspend,
2252 .resume = pxafb_resume,
2253 .driver = { 2256 .driver = {
2254 .owner = THIS_MODULE, 2257 .owner = THIS_MODULE,
2255 .name = "pxa2xx-fb", 2258 .name = "pxa2xx-fb",
2259#ifdef CONFIG_PM
2260 .pm = &pxafb_pm_ops,
2261#endif
2256 }, 2262 },
2257}; 2263};
2258 2264
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 37b135d5d12e..842d157e1025 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1565,7 +1565,7 @@ static int savagefb_blank(int blank, struct fb_info *info)
1565 vga_out8(0x3c5, sr8, par); 1565 vga_out8(0x3c5, sr8, par);
1566 vga_out8(0x3c4, 0x0d, par); 1566 vga_out8(0x3c4, 0x0d, par);
1567 srd = vga_in8(0x3c5, par); 1567 srd = vga_in8(0x3c5, par);
1568 srd &= 0x03; 1568 srd &= 0x50;
1569 1569
1570 switch (blank) { 1570 switch (blank) {
1571 case FB_BLANK_UNBLANK: 1571 case FB_BLANK_UNBLANK:
@@ -1606,22 +1606,6 @@ static int savagefb_blank(int blank, struct fb_info *info)
1606 return (blank == FB_BLANK_NORMAL) ? 1 : 0; 1606 return (blank == FB_BLANK_NORMAL) ? 1 : 0;
1607} 1607}
1608 1608
1609static void savagefb_save_state(struct fb_info *info)
1610{
1611 struct savagefb_par *par = info->par;
1612
1613 savage_get_default_par(par, &par->save);
1614}
1615
1616static void savagefb_restore_state(struct fb_info *info)
1617{
1618 struct savagefb_par *par = info->par;
1619
1620 savagefb_blank(FB_BLANK_POWERDOWN, info);
1621 savage_set_default_par(par, &par->save);
1622 savagefb_blank(FB_BLANK_UNBLANK, info);
1623}
1624
1625static int savagefb_open(struct fb_info *info, int user) 1609static int savagefb_open(struct fb_info *info, int user)
1626{ 1610{
1627 struct savagefb_par *par = info->par; 1611 struct savagefb_par *par = info->par;
@@ -1667,8 +1651,6 @@ static struct fb_ops savagefb_ops = {
1667 .fb_setcolreg = savagefb_setcolreg, 1651 .fb_setcolreg = savagefb_setcolreg,
1668 .fb_pan_display = savagefb_pan_display, 1652 .fb_pan_display = savagefb_pan_display,
1669 .fb_blank = savagefb_blank, 1653 .fb_blank = savagefb_blank,
1670 .fb_save_state = savagefb_save_state,
1671 .fb_restore_state = savagefb_restore_state,
1672#if defined(CONFIG_FB_SAVAGE_ACCEL) 1654#if defined(CONFIG_FB_SAVAGE_ACCEL)
1673 .fb_fillrect = savagefb_fillrect, 1655 .fb_fillrect = savagefb_fillrect,
1674 .fb_copyarea = savagefb_copyarea, 1656 .fb_copyarea = savagefb_copyarea,
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e35232a18571..54fbb2995a5f 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1411,23 +1411,6 @@ static int uvesafb_check_var(struct fb_var_screeninfo *var,
1411 return 0; 1411 return 0;
1412} 1412}
1413 1413
1414static void uvesafb_save_state(struct fb_info *info)
1415{
1416 struct uvesafb_par *par = info->par;
1417
1418 if (par->vbe_state_saved)
1419 kfree(par->vbe_state_saved);
1420
1421 par->vbe_state_saved = uvesafb_vbe_state_save(par);
1422}
1423
1424static void uvesafb_restore_state(struct fb_info *info)
1425{
1426 struct uvesafb_par *par = info->par;
1427
1428 uvesafb_vbe_state_restore(par, par->vbe_state_saved);
1429}
1430
1431static struct fb_ops uvesafb_ops = { 1414static struct fb_ops uvesafb_ops = {
1432 .owner = THIS_MODULE, 1415 .owner = THIS_MODULE,
1433 .fb_open = uvesafb_open, 1416 .fb_open = uvesafb_open,
@@ -1441,8 +1424,6 @@ static struct fb_ops uvesafb_ops = {
1441 .fb_imageblit = cfb_imageblit, 1424 .fb_imageblit = cfb_imageblit,
1442 .fb_check_var = uvesafb_check_var, 1425 .fb_check_var = uvesafb_check_var,
1443 .fb_set_par = uvesafb_set_par, 1426 .fb_set_par = uvesafb_set_par,
1444 .fb_save_state = uvesafb_save_state,
1445 .fb_restore_state = uvesafb_restore_state,
1446}; 1427};
1447 1428
1448static void __devinit uvesafb_init_info(struct fb_info *info, 1429static void __devinit uvesafb_init_info(struct fb_info *info,
@@ -1459,15 +1440,6 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
1459 info->fix.ypanstep = par->ypan ? 1 : 0; 1440 info->fix.ypanstep = par->ypan ? 1 : 0;
1460 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0; 1441 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
1461 1442
1462 /*
1463 * If we were unable to get the state buffer size, disable
1464 * functions for saving and restoring the hardware state.
1465 */
1466 if (par->vbe_state_size == 0) {
1467 info->fbops->fb_save_state = NULL;
1468 info->fbops->fb_restore_state = NULL;
1469 }
1470
1471 /* Disable blanking if the user requested so. */ 1443 /* Disable blanking if the user requested so. */
1472 if (!blank) 1444 if (!blank)
1473 info->fbops->fb_blank = NULL; 1445 info->fbops->fb_blank = NULL;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 200c22f55130..9dd588042880 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -19,7 +19,6 @@
19 */ 19 */
20//#define DEBUG 20//#define DEBUG
21#include <linux/virtio.h> 21#include <linux/virtio.h>
22#include <linux/virtio_ids.h>
23#include <linux/virtio_balloon.h> 22#include <linux/virtio_balloon.h>
24#include <linux/swap.h> 23#include <linux/swap.h>
25#include <linux/kthread.h> 24#include <linux/kthread.h>
@@ -248,7 +247,7 @@ out:
248 return err; 247 return err;
249} 248}
250 249
251static void virtballoon_remove(struct virtio_device *vdev) 250static void __devexit virtballoon_remove(struct virtio_device *vdev)
252{ 251{
253 struct virtio_balloon *vb = vdev->priv; 252 struct virtio_balloon *vb = vdev->priv;
254 253
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4a1f1ebff7bf..28d9cf7cf72f 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -530,19 +530,22 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
530 err = PTR_ERR(vqs[i]); 530 err = PTR_ERR(vqs[i]);
531 goto error_find; 531 goto error_find;
532 } 532 }
533
534 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
535 continue;
536
533 /* allocate per-vq irq if available and necessary */ 537 /* allocate per-vq irq if available and necessary */
534 if (vp_dev->per_vq_vectors) { 538 snprintf(vp_dev->msix_names[msix_vec],
535 snprintf(vp_dev->msix_names[msix_vec], 539 sizeof *vp_dev->msix_names,
536 sizeof *vp_dev->msix_names, 540 "%s-%s",
537 "%s-%s", 541 dev_name(&vp_dev->vdev.dev), names[i]);
538 dev_name(&vp_dev->vdev.dev), names[i]); 542 err = request_irq(vp_dev->msix_entries[msix_vec].vector,
539 err = request_irq(msix_vec, vring_interrupt, 0, 543 vring_interrupt, 0,
540 vp_dev->msix_names[msix_vec], 544 vp_dev->msix_names[msix_vec],
541 vqs[i]); 545 vqs[i]);
542 if (err) { 546 if (err) {
543 vp_del_vq(vqs[i]); 547 vp_del_vq(vqs[i]);
544 goto error_find; 548 goto error_find;
545 }
546 } 549 }
547 } 550 }
548 return 0; 551 return 0;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f53600580726..fbd2ecde93e4 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -285,6 +285,9 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
285 return NULL; 285 return NULL;
286 } 286 }
287 287
288 /* Only get used array entries after they have been exposed by host. */
289 rmb();
290
288 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 291 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
289 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 292 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
290 293
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index df52cb355f7d..406caa6a71cb 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -24,19 +24,6 @@
24#include "../w1_int.h" 24#include "../w1_int.h"
25 25
26/** 26/**
27 * Address is selected using 2 pins, resulting in 4 possible addresses.
28 * 0x18, 0x19, 0x1a, 0x1b
29 * However, the chip cannot be detected without doing an i2c write,
30 * so use the force module parameter.
31 */
32static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
33
34/**
35 * Insmod parameters
36 */
37I2C_CLIENT_INSMOD_1(ds2482);
38
39/**
40 * The DS2482 registers - there are 3 registers that are addressed by a read 27 * The DS2482 registers - there are 3 registers that are addressed by a read
41 * pointer. The read pointer is set by the last command executed. 28 * pointer. The read pointer is set by the last command executed.
42 * 29 *
@@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] =
96 83
97static int ds2482_probe(struct i2c_client *client, 84static int ds2482_probe(struct i2c_client *client,
98 const struct i2c_device_id *id); 85 const struct i2c_device_id *id);
99static int ds2482_detect(struct i2c_client *client, int kind,
100 struct i2c_board_info *info);
101static int ds2482_remove(struct i2c_client *client); 86static int ds2482_remove(struct i2c_client *client);
102 87
103 88
@@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = {
117 .probe = ds2482_probe, 102 .probe = ds2482_probe,
118 .remove = ds2482_remove, 103 .remove = ds2482_remove,
119 .id_table = ds2482_id, 104 .id_table = ds2482_id,
120 .detect = ds2482_detect,
121 .address_data = &addr_data,
122}; 105};
123 106
124/* 107/*
@@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data)
425} 408}
426 409
427 410
428static int ds2482_detect(struct i2c_client *client, int kind,
429 struct i2c_board_info *info)
430{
431 if (!i2c_check_functionality(client->adapter,
432 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
433 I2C_FUNC_SMBUS_BYTE))
434 return -ENODEV;
435
436 strlcpy(info->type, "ds2482", I2C_NAME_SIZE);
437
438 return 0;
439}
440
441static int ds2482_probe(struct i2c_client *client, 411static int ds2482_probe(struct i2c_client *client,
442 const struct i2c_device_id *id) 412 const struct i2c_device_id *id)
443{ 413{
@@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client,
446 int temp1; 416 int temp1;
447 int idx; 417 int idx;
448 418
419 if (!i2c_check_functionality(client->adapter,
420 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
421 I2C_FUNC_SMBUS_BYTE))
422 return -ENODEV;
423
449 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { 424 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) {
450 err = -ENOMEM; 425 err = -ENOMEM;
451 goto exit; 426 goto exit;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ff3eb8ff6bd7..3711b888d482 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -282,6 +282,13 @@ config NUC900_WATCHDOG
282 To compile this driver as a module, choose M here: the 282 To compile this driver as a module, choose M here: the
283 module will be called nuc900_wdt. 283 module will be called nuc900_wdt.
284 284
285config ADX_WATCHDOG
286 tristate "Avionic Design Xanthos watchdog"
287 depends on ARCH_PXA_ADX
288 help
289 Say Y here if you want support for the watchdog timer on Avionic
290 Design Xanthos boards.
291
285# AVR32 Architecture 292# AVR32 Architecture
286 293
287config AT32AP700X_WDT 294config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 348b3b862c99..699199b1baa6 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
45obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 45obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
46obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o 46obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
48obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
48 49
49# AVR32 Architecture 50# AVR32 Architecture
50obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 51obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
new file mode 100644
index 000000000000..77afb0acc500
--- /dev/null
+++ b/drivers/watchdog/adx_wdt.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (C) 2008-2009 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/fs.h>
10#include <linux/io.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/types.h>
15#include <linux/uaccess.h>
16#include <linux/watchdog.h>
17
18#define WATCHDOG_NAME "adx-wdt"
19
20/* register offsets */
21#define ADX_WDT_CONTROL 0x00
22#define ADX_WDT_CONTROL_ENABLE (1 << 0)
23#define ADX_WDT_CONTROL_nRESET (1 << 1)
24#define ADX_WDT_TIMEOUT 0x08
25
26static struct platform_device *adx_wdt_dev;
27static unsigned long driver_open;
28
29#define WDT_STATE_STOP 0
30#define WDT_STATE_START 1
31
32struct adx_wdt {
33 void __iomem *base;
34 unsigned long timeout;
35 unsigned int state;
36 unsigned int wake;
37 spinlock_t lock;
38};
39
40static struct watchdog_info adx_wdt_info = {
41 .identity = "Avionic Design Xanthos Watchdog",
42 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
43};
44
45static void adx_wdt_start_locked(struct adx_wdt *wdt)
46{
47 u32 ctrl;
48
49 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
50 ctrl |= ADX_WDT_CONTROL_ENABLE;
51 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
52 wdt->state = WDT_STATE_START;
53}
54
55static void adx_wdt_start(struct adx_wdt *wdt)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&wdt->lock, flags);
60 adx_wdt_start_locked(wdt);
61 spin_unlock_irqrestore(&wdt->lock, flags);
62}
63
64static void adx_wdt_stop_locked(struct adx_wdt *wdt)
65{
66 u32 ctrl;
67
68 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
69 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
70 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
71 wdt->state = WDT_STATE_STOP;
72}
73
74static void adx_wdt_stop(struct adx_wdt *wdt)
75{
76 unsigned long flags;
77
78 spin_lock_irqsave(&wdt->lock, flags);
79 adx_wdt_stop_locked(wdt);
80 spin_unlock_irqrestore(&wdt->lock, flags);
81}
82
83static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
84{
85 unsigned long timeout = seconds * 1000;
86 unsigned long flags;
87 unsigned int state;
88
89 spin_lock_irqsave(&wdt->lock, flags);
90 state = wdt->state;
91 adx_wdt_stop_locked(wdt);
92 writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
93
94 if (state == WDT_STATE_START)
95 adx_wdt_start_locked(wdt);
96
97 wdt->timeout = timeout;
98 spin_unlock_irqrestore(&wdt->lock, flags);
99}
100
101static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
102{
103 *seconds = wdt->timeout / 1000;
104}
105
106static void adx_wdt_keepalive(struct adx_wdt *wdt)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(&wdt->lock, flags);
111 writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
112 spin_unlock_irqrestore(&wdt->lock, flags);
113}
114
115static int adx_wdt_open(struct inode *inode, struct file *file)
116{
117 struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
118
119 if (test_and_set_bit(0, &driver_open))
120 return -EBUSY;
121
122 file->private_data = wdt;
123 adx_wdt_set_timeout(wdt, 30);
124 adx_wdt_start(wdt);
125
126 return nonseekable_open(inode, file);
127}
128
129static int adx_wdt_release(struct inode *inode, struct file *file)
130{
131 struct adx_wdt *wdt = file->private_data;
132
133 adx_wdt_stop(wdt);
134 clear_bit(0, &driver_open);
135
136 return 0;
137}
138
139static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
140{
141 struct adx_wdt *wdt = file->private_data;
142 void __user *argp = (void __user *)arg;
143 unsigned long __user *p = argp;
144 unsigned long seconds = 0;
145 unsigned int options;
146 long ret = -EINVAL;
147
148 switch (cmd) {
149 case WDIOC_GETSUPPORT:
150 if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
151 return -EFAULT;
152 else
153 return 0;
154
155 case WDIOC_GETSTATUS:
156 case WDIOC_GETBOOTSTATUS:
157 return put_user(0, p);
158
159 case WDIOC_KEEPALIVE:
160 adx_wdt_keepalive(wdt);
161 return 0;
162
163 case WDIOC_SETTIMEOUT:
164 if (get_user(seconds, p))
165 return -EFAULT;
166
167 adx_wdt_set_timeout(wdt, seconds);
168
169 /* fallthrough */
170 case WDIOC_GETTIMEOUT:
171 adx_wdt_get_timeout(wdt, &seconds);
172 return put_user(seconds, p);
173
174 case WDIOC_SETOPTIONS:
175 if (copy_from_user(&options, argp, sizeof(options)))
176 return -EFAULT;
177
178 if (options & WDIOS_DISABLECARD) {
179 adx_wdt_stop(wdt);
180 ret = 0;
181 }
182
183 if (options & WDIOS_ENABLECARD) {
184 adx_wdt_start(wdt);
185 ret = 0;
186 }
187
188 return ret;
189
190 default:
191 break;
192 }
193
194 return -ENOTTY;
195}
196
197static ssize_t adx_wdt_write(struct file *file, const char __user *data,
198 size_t len, loff_t *ppos)
199{
200 struct adx_wdt *wdt = file->private_data;
201
202 if (len)
203 adx_wdt_keepalive(wdt);
204
205 return len;
206}
207
208static const struct file_operations adx_wdt_fops = {
209 .owner = THIS_MODULE,
210 .llseek = no_llseek,
211 .open = adx_wdt_open,
212 .release = adx_wdt_release,
213 .unlocked_ioctl = adx_wdt_ioctl,
214 .write = adx_wdt_write,
215};
216
217static struct miscdevice adx_wdt_miscdev = {
218 .minor = WATCHDOG_MINOR,
219 .name = "watchdog",
220 .fops = &adx_wdt_fops,
221};
222
223static int __devinit adx_wdt_probe(struct platform_device *pdev)
224{
225 struct resource *res;
226 struct adx_wdt *wdt;
227 int ret = 0;
228 u32 ctrl;
229
230 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
231 if (!wdt) {
232 dev_err(&pdev->dev, "cannot allocate WDT structure\n");
233 return -ENOMEM;
234 }
235
236 spin_lock_init(&wdt->lock);
237
238 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
239 if (!res) {
240 dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
241 return -ENXIO;
242 }
243
244 res = devm_request_mem_region(&pdev->dev, res->start,
245 res->end - res->start + 1, res->name);
246 if (!res) {
247 dev_err(&pdev->dev, "cannot request I/O memory region\n");
248 return -ENXIO;
249 }
250
251 wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
252 res->end - res->start + 1);
253 if (!wdt->base) {
254 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
255 return -ENXIO;
256 }
257
258 /* disable watchdog and reboot on timeout */
259 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
260 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
261 ctrl &= ~ADX_WDT_CONTROL_nRESET;
262 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
263
264 platform_set_drvdata(pdev, wdt);
265 adx_wdt_dev = pdev;
266
267 ret = misc_register(&adx_wdt_miscdev);
268 if (ret) {
269 dev_err(&pdev->dev, "cannot register miscdev on minor %d "
270 "(err=%d)\n", WATCHDOG_MINOR, ret);
271 return ret;
272 }
273
274 return 0;
275}
276
277static int __devexit adx_wdt_remove(struct platform_device *pdev)
278{
279 struct adx_wdt *wdt = platform_get_drvdata(pdev);
280
281 misc_deregister(&adx_wdt_miscdev);
282 adx_wdt_stop(wdt);
283 platform_set_drvdata(pdev, NULL);
284
285 return 0;
286}
287
288static void adx_wdt_shutdown(struct platform_device *pdev)
289{
290 struct adx_wdt *wdt = platform_get_drvdata(pdev);
291 adx_wdt_stop(wdt);
292}
293
294#ifdef CONFIG_PM
295static int adx_wdt_suspend(struct device *dev)
296{
297 struct platform_device *pdev = to_platform_device(dev);
298 struct adx_wdt *wdt = platform_get_drvdata(pdev);
299
300 wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
301 adx_wdt_stop(wdt);
302
303 return 0;
304}
305
306static int adx_wdt_resume(struct device *dev)
307{
308 struct platform_device *pdev = to_platform_device(dev);
309 struct adx_wdt *wdt = platform_get_drvdata(pdev);
310
311 if (wdt->wake)
312 adx_wdt_start(wdt);
313
314 return 0;
315}
316
317static struct dev_pm_ops adx_wdt_pm_ops = {
318 .suspend = adx_wdt_suspend,
319 .resume = adx_wdt_resume,
320};
321
322# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
323#else
324# define ADX_WDT_PM_OPS NULL
325#endif
326
327static struct platform_driver adx_wdt_driver = {
328 .probe = adx_wdt_probe,
329 .remove = __devexit_p(adx_wdt_remove),
330 .shutdown = adx_wdt_shutdown,
331 .driver = {
332 .name = WATCHDOG_NAME,
333 .owner = THIS_MODULE,
334 .pm = ADX_WDT_PM_OPS,
335 },
336};
337
338static int __init adx_wdt_init(void)
339{
340 return platform_driver_register(&adx_wdt_driver);
341}
342
343static void __exit adx_wdt_exit(void)
344{
345 platform_driver_unregister(&adx_wdt_driver);
346}
347
348module_init(adx_wdt_init);
349module_exit(adx_wdt_exit);
350
351MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
352MODULE_LICENSE("GPL v2");
353MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 1e8f02f440e6..d3c824dc2358 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -206,7 +206,7 @@ static int __devinit riowd_probe(struct of_device *op,
206 206
207 dev_set_drvdata(&op->dev, p); 207 dev_set_drvdata(&op->dev, p);
208 riowd_device = p; 208 riowd_device = p;
209 err = 0; 209 return 0;
210 210
211out_iounmap: 211out_iounmap:
212 of_iounmap(&op->resource[0], p->regs, 2); 212 of_iounmap(&op->resource[0], p->regs, 2);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 852ca1977917..91430a89107c 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -227,7 +227,7 @@ static int __init fitpc2_wdt_init(void)
227 } 227 }
228 228
229 err = misc_register(&fitpc2_wdt_miscdev); 229 err = misc_register(&fitpc2_wdt_miscdev);
230 if (!err) { 230 if (err) {
231 pr_err("cannot register miscdev on minor=%d (err=%d)\n", 231 pr_err("cannot register miscdev on minor=%d (err=%d)\n",
232 WATCHDOG_MINOR, err); 232 WATCHDOG_MINOR, err);
233 goto err_margin; 233 goto err_margin;
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index a9592d981b10..6c4269b836b7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -43,6 +43,7 @@
43#include <linux/fs.h> 43#include <linux/fs.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/sched.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <linux/mount.h> 48#include <linux/mount.h>
48#include <linux/pagemap.h> 49#include <linux/pagemap.h>