aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 14:35:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 14:35:00 -0500
commit72cca7baf4fba777b8ab770b902cf2e08941773f (patch)
treec5cdbcd65ac166946f54f1dd6ef3693eea29d791
parent5266e70335dac35c35b5ca9cea4251c1389d4a68 (diff)
parent3e0f9b2ca8e4839335e4d64ec0a75f4fd5111c4b (diff)
Merge tag 'staging-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging/IIO updates from Greg KH: "Here's the "big" staging/iio pull request for 4.10-rc1. Not as big as 4.9 was, but still just over a thousand changes. We almost broke even of lines added vs. removed, as the slicoss driver was removed (got a "clean" driver for the same hardware through the netdev tree), and some iio drivers were also dropped, but I think we ended up adding a few thousand lines to the source tree in the end. Other than that it's a lot of minor fixes all over the place, nothing major stands out at all. All of these have been in linux-next for a while. There will be a merge conflict with Al's vfs tree in the lustre code, but the resolution for that should be pretty simple, that too has been in linux-next" * tag 'staging-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (1002 commits) staging: comedi: comedidev.h: Document usage of 'detach' handler staging: fsl-mc: remove unnecessary info prints from bus driver staging: fsl-mc: add sysfs ABI doc staging/lustre/o2iblnd: Fix misspelled attemps->attempts staging/lustre/o2iblnd: Fix misspelling intialized->intialized staging/lustre: Convert all bare unsigned to unsigned int staging/lustre/socklnd: Fix whitespace problem staging/lustre/o2iblnd: Add missing space staging/lustre/lnetselftest: Fix potential integer overflow staging: greybus: audio_module: remove redundant OOM message staging: dgnc: Fix lines longer than 80 characters staging: dgnc: fix blank line after '{' warnings. staging/android: remove Sync Framework tasks from TODO staging/lustre/osc: Revert erroneous list_for_each_entry_safe use staging: slicoss: remove the staging driver staging: lustre: libcfs: remove lnet upcall code staging: lustre: remove set but unused variables staging: lustre: osc: set lock data for readahead lock staging: lustre: import: don't reconnect during connect interpret staging: lustre: clio: remove mtime check in vvp_io_fault_start() ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsl-mc21
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector36
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8125
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-cros-ec18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-isl2901819
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 (renamed from drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583)14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp45318
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt7
-rw-r--r--Documentation/devicetree/bindings/iio/adc/envelope-detector.txt54
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt83
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/dpot-dac.txt41
-rw-r--r--Documentation/devicetree/bindings/iio/dac/mcp4725.txt35
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt46
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hts221.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/light/isl29018.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2583.txt26
-rw-r--r--Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt30
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--MAINTAINERS52
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/Kconfig45
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/da280.c183
-rw-r--r--drivers/iio/accel/da311.c305
-rw-r--r--drivers/iio/accel/dmard10.c266
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/mma8452.c79
-rw-r--r--drivers/iio/accel/sca3000.c1576
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c605
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig46
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7766.c330
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/envelope-detector.c422
-rw-r--r--drivers/iio/adc/max1027.c17
-rw-r--r--drivers/iio/adc/stm32-adc-core.c303
-rw-r--r--drivers/iio/adc/stm32-adc-core.h52
-rw-r--r--drivers/iio/adc/stm32-adc.c518
-rw-r--r--drivers/iio/adc/ti-adc0832.c106
-rw-r--r--drivers/iio/adc/ti-adc161s626.c55
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c148
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig22
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c322
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c450
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h175
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c5
-rw-r--r--drivers/iio/counter/104-quad-8.c593
-rw-r--r--drivers/iio/counter/Kconfig24
-rw-r--r--drivers/iio/counter/Makefile7
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5592r.c2
-rw-r--r--drivers/iio/dac/dpot-dac.c266
-rw-r--r--drivers/iio/dac/mcp4725.c176
-rw-r--r--drivers/iio/gyro/Kconfig18
-rw-r--r--drivers/iio/gyro/Makefile5
-rw-r--r--drivers/iio/gyro/mpu3050-core.c1306
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c124
-rw-r--r--drivers/iio/gyro/mpu3050.h96
-rw-r--r--drivers/iio/gyro/st_gyro_core.c205
-rw-r--r--drivers/iio/humidity/Kconfig24
-rw-r--r--drivers/iio/humidity/Makefile7
-rw-r--r--drivers/iio/humidity/hdc100x.c130
-rw-r--r--drivers/iio/humidity/hts221.h73
-rw-r--r--drivers/iio/humidity/hts221_buffer.c168
-rw-r--r--drivers/iio/humidity/hts221_core.c687
-rw-r--r--drivers/iio/humidity/hts221_i2c.c110
-rw-r--r--drivers/iio/humidity/hts221_spi.c125
-rw-r--r--drivers/iio/humidity/si7020.c11
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/industrialio-buffer.c7
-rw-r--r--drivers/iio/industrialio-core.c261
-rw-r--r--drivers/iio/industrialio-trigger.c21
-rw-r--r--drivers/iio/inkern.c123
-rw-r--r--drivers/iio/light/Kconfig19
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/isl29018.c (renamed from drivers/staging/iio/light/isl29018.c)159
-rw-r--r--drivers/iio/light/ltr501.c111
-rw-r--r--drivers/iio/light/max44000.c5
-rw-r--r--drivers/iio/light/tsl2583.c913
-rw-r--r--drivers/iio/magnetometer/ak8974.c8
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c147
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c376
-rw-r--r--drivers/iio/potentiometer/mcp4531.c104
-rw-r--r--drivers/iio/potentiostat/Kconfig22
-rw-r--r--drivers/iio/potentiostat/Makefile6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c446
-rw-r--r--drivers/iio/pressure/Kconfig10
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c276
-rw-r--r--drivers/iio/pressure/mpl3115.c26
-rw-r--r--drivers/iio/pressure/ms5611_core.c19
-rw-r--r--drivers/iio/pressure/st_pressure_core.c257
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c1
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c1
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c159
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/TODO8
-rw-r--r--drivers/staging/android/ashmem.c40
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/uapi/ion_test.h1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/staging/comedi/comedi.h55
-rw-r--r--drivers/staging/comedi/comedidev.h12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c4
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c172
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h14
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c16
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c182
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c12
-rw-r--r--drivers/staging/dgnc/Makefile3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c44
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c558
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h189
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c6
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c111
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h58
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c703
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h40
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c362
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/digi.h107
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c69
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c68
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c19
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c2
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c33
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fbtft/fbtft_device.c12
-rw-r--r--drivers/staging/fbtft/flexfb.c373
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig24
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp-cmd.h (renamed from drivers/staging/fsl-mc/include/dpbp-cmd.h)61
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c74
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h (renamed from drivers/staging/fsl-mc/include/dpcon-cmd.h)4
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h49
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c70
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h141
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h14
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c37
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h90
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c23
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c69
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c78
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c66
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h3
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c12
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h169
-rw-r--r--drivers/staging/fsl-mc/include/dpmng.h18
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h402
-rw-r--r--drivers/staging/fsl-mc/include/mc-bus.h6
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h44
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h3
-rw-r--r--drivers/staging/fsl-mc/include/mc.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c6
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h1
-rw-r--r--drivers/staging/gdm724x/netlink_k.h3
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c8
-rw-r--r--drivers/staging/greybus/arche-platform.c3
-rw-r--r--drivers/staging/greybus/audio_codec.c5
-rw-r--r--drivers/staging/greybus/audio_codec.h1
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c35
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c16
-rw-r--r--drivers/staging/greybus/audio_module.c7
-rw-r--r--drivers/staging/greybus/audio_topology.c8
-rw-r--r--drivers/staging/greybus/camera.c7
-rw-r--r--drivers/staging/greybus/es2.c5
-rw-r--r--drivers/staging/greybus/log.c6
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/timesync.c6
-rw-r--r--drivers/staging/greybus/uart.c36
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c1
-rw-r--r--drivers/staging/i4l/act2000/capi.c7
-rw-r--r--drivers/staging/i4l/act2000/module.c24
-rw-r--r--drivers/staging/i4l/icn/icn.c3
-rw-r--r--drivers/staging/i4l/icn/icn.h5
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c2
-rw-r--r--drivers/staging/i4l/pcbit/capi.c5
-rw-r--r--drivers/staging/i4l/pcbit/drv.c5
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c2
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/TODO70
-rw-r--r--drivers/staging/iio/accel/Kconfig10
-rw-r--r--drivers/staging/iio/accel/Makefile3
-rw-r--r--drivers/staging/iio/accel/sca3000.h279
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1210
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c350
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c127
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606.c (renamed from drivers/staging/iio/adc/ad7606_core.c)436
-rw-r--r--drivers/staging/iio/adc/ad7606.h58
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c23
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c102
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c19
-rw-r--r--drivers/staging/iio/adc/ad7780.c22
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c140
-rw-r--r--drivers/staging/iio/cdc/ad7746.c151
-rw-r--r--drivers/staging/iio/frequency/ad9832.c66
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.c19
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c21
-rw-r--r--drivers/staging/iio/light/Kconfig19
-rw-r--r--drivers/staging/iio/light/Makefile2
-rw-r--r--drivers/staging/iio/light/tsl2583.c963
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c86
-rw-r--r--drivers/staging/iio/ring_hw.h27
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c37
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h22
-rw-r--r--drivers/staging/ks7010/ks_hostif.c124
-rw-r--r--drivers/staging/ks7010/ks_wlan.h143
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c436
-rw-r--r--drivers/staging/ks7010/michael_mic.c29
-rw-r--r--drivers/staging/ks7010/michael_mic.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h53
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h185
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h24
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h8
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c26
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c44
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c36
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c5
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c143
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c22
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c58
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c187
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c32
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c62
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c10
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c108
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c50
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c17
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c73
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c36
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h23
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c29
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h25
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c22
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h30
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c7
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c8
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h379
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h65
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h438
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h898
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h717
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h70
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h102
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h291
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h264
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h15
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h199
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h88
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c316
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c26
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c132
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c195
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c968
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c139
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c47
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c395
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h121
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c342
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c69
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c68
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c302
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c65
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c185
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c122
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c360
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c88
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c11
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c407
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c52
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c208
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h100
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c116
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c50
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c720
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c698
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c293
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c292
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c61
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c22
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c98
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c80
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c60
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c107
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c317
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c167
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h23
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c290
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c452
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c68
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c228
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c139
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c27
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c84
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c70
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c7
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c65
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c65
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h50
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h43
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c330
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c143
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c171
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c186
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c52
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c669
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c338
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c95
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c66
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c15
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c106
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c410
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c9
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c1
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c4
-rw-r--r--drivers/staging/most/aim-network/networking.c53
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c5
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c230
-rw-r--r--drivers/staging/most/mostcore/core.c55
-rw-r--r--drivers/staging/netlogic/xlr_net.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c32
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c46
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c69
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c70
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c51
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c20
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c25
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c21
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h11
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h6
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h20
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h13
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h2
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c58
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h9
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h12
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h18
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c13
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c50
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h6
-rw-r--r--drivers/staging/rts5208/ms.c393
-rw-r--r--drivers/staging/rts5208/ms.h4
-rw-r--r--drivers/staging/rts5208/rtsx.c55
-rw-r--r--drivers/staging/rts5208/rtsx.h2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c94
-rw-r--r--drivers/staging/rts5208/rtsx_card.h16
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c17
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h137
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c319
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h4
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h4
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h30
-rw-r--r--drivers/staging/rts5208/sd.c813
-rw-r--r--drivers/staging/rts5208/sd.h5
-rw-r--r--drivers/staging/rts5208/spi.c144
-rw-r--r--drivers/staging/rts5208/xd.c461
-rw-r--r--drivers/staging/rts5208/xd.h2
-rw-r--r--drivers/staging/skein/skein_api.c26
-rw-r--r--drivers/staging/skein/threefish_block.c16
-rw-r--r--drivers/staging/slicoss/Kconfig14
-rw-r--r--drivers/staging/slicoss/Makefile1
-rw-r--r--drivers/staging/slicoss/README7
-rw-r--r--drivers/staging/slicoss/TODO36
-rw-r--r--drivers/staging/slicoss/slic.h573
-rw-r--r--drivers/staging/slicoss/slichw.h652
-rw-r--r--drivers/staging/slicoss/slicoss.c3131
-rw-r--r--drivers/staging/sm750fb/Makefile2
-rw-r--r--drivers/staging/sm750fb/ddk750.h23
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c100
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h89
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c75
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h30
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c17
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h21
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c37
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c74
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c31
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h24
-rw-r--r--drivers/staging/sm750fb/sm750.c52
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c52
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h10
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h14
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c40
-rw-r--r--drivers/staging/speakup/TODO2
-rw-r--r--drivers/staging/speakup/main.c42
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup_soft.c46
-rw-r--r--drivers/staging/speakup/speakup_spkout.c31
-rw-r--r--drivers/staging/speakup/speakup_txprt.c29
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h148
-rw-r--r--drivers/staging/speakup/spk_types.h16
-rw-r--r--drivers/staging/speakup/synth.c22
-rw-r--r--drivers/staging/speakup/thread.c5
-rw-r--r--drivers/staging/speakup/varhandlers.c6
-rw-r--r--drivers/staging/unisys/include/iochannel.h335
-rw-r--r--drivers/staging/unisys/include/visorbus.h2
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h225
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c231
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c44
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c653
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h185
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/vc04_services/Kconfig7
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO50
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c324
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c202
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c659
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h9
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c17
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c138
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h13
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c6
-rw-r--r--drivers/staging/vt6655/baseband.c58
-rw-r--r--drivers/staging/vt6655/baseband.h11
-rw-r--r--drivers/staging/vt6655/card.c45
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/channel.h4
-rw-r--r--drivers/staging/vt6655/desc.h4
-rw-r--r--drivers/staging/vt6655/device.h16
-rw-r--r--drivers/staging/vt6655/device_cfg.h4
-rw-r--r--drivers/staging/vt6655/device_main.c9
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6655/dpc.h4
-rw-r--r--drivers/staging/vt6655/key.c5
-rw-r--r--drivers/staging/vt6655/key.h5
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h327
-rw-r--r--drivers/staging/vt6655/power.c6
-rw-r--r--drivers/staging/vt6655/power.h5
-rw-r--r--drivers/staging/vt6655/rf.c718
-rw-r--r--drivers/staging/vt6655/rf.h5
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/rxtx.h4
-rw-r--r--drivers/staging/vt6655/srom.c36
-rw-r--r--drivers/staging/vt6655/srom.h11
-rw-r--r--drivers/staging/vt6655/tmacro.h4
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/baseband.h20
-rw-r--r--drivers/staging/vt6656/card.c15
-rw-r--r--drivers/staging/vt6656/mac.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rf.c10
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c12
-rw-r--r--drivers/staging/wilc1000/host_interface.h1
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c51
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c6
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c3
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c24
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h128
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2479
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c322
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h100
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h118
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h120
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h88
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h194
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h90
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c637
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h102
-rw-r--r--drivers/staging/wlan-ng/p80211req.c189
-rw-r--r--drivers/staging/wlan-ng/p80211req.h90
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c100
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c559
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c544
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h125
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c102
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c210
-rw-r--r--drivers/staging/xgifb/XGI_main.h54
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c196
-rw-r--r--drivers/staging/xgifb/vb_init.c56
-rw-r--r--drivers/staging/xgifb/vb_setmode.c667
-rw-r--r--drivers/staging/xgifb/vb_table.h9
-rw-r--r--drivers/staging/xgifb/vb_util.h4
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/consumer.h41
-rw-r--r--include/linux/iio/dac/mcp4725.h12
-rw-r--r--include/linux/iio/iio.h48
-rw-r--r--include/linux/iio/sysfs.h24
-rw-r--r--include/linux/iio/trigger.h2
-rw-r--r--include/linux/iio/types.h5
-rw-r--r--include/linux/mfd/cros_ec.h10
-rw-r--r--include/linux/mfd/cros_ec_commands.h183
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h21
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--tools/iio/iio_generic_buffer.c18
685 files changed, 33601 insertions, 31456 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fsl-mc b/Documentation/ABI/testing/sysfs-bus-fsl-mc
new file mode 100644
index 000000000000..80256b8b4f26
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsl-mc
@@ -0,0 +1,21 @@
1What: /sys/bus/fsl-mc/drivers/.../bind
2Date: December 2016
3Contact: stuart.yoder@nxp.com
4Description:
5 Writing a device location to this file will cause
6 the driver to attempt to bind to the device found at
7 this location. The format for the location is Object.Id
8 and is the same as found in /sys/bus/fsl-mc/devices/.
9 For example:
10 # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/bind
11
12What: /sys/bus/fsl-mc/drivers/.../unbind
13Date: December 2016
14Contact: stuart.yoder@nxp.com
15Description:
16 Writing a device location to this file will cause the
17 driver to attempt to unbind from the device found at
18 this location. The format for the location is Object.Id
19 and is the same as found in /sys/bus/fsl-mc/devices/.
20 For example:
21 # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/unbind
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index fee35c00cc4e..b8f220f978dd 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -329,6 +329,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
329What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale 329What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
330What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale 330What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
331What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale 331What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
332What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale
332KernelVersion: 2.6.35 333KernelVersion: 2.6.35
333Contact: linux-iio@vger.kernel.org 334Contact: linux-iio@vger.kernel.org
334Description: 335Description:
@@ -1579,3 +1580,20 @@ Contact: linux-iio@vger.kernel.org
1579Description: 1580Description:
1580 Raw (unscaled no offset etc.) electric conductivity reading that 1581 Raw (unscaled no offset etc.) electric conductivity reading that
1581 can be processed to siemens per meter. 1582 can be processed to siemens per meter.
1583
1584What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw
1585KernelVersion: 4.9
1586Contact: linux-iio@vger.kernel.org
1587Description:
1588 Raw counter device counts from channel Y. For quadrature
1589 counters, multiplication by an available [Y]_scale results in
1590 the counts of a single quadrature signal phase from channel Y.
1591
1592What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw
1593KernelVersion: 4.9
1594Contact: linux-iio@vger.kernel.org
1595Description:
1596 Raw counter device index value from channel Y. This attribute
1597 provides an absolute positional reference (e.g. a pulse once per
1598 revolution) which may be used to home positional systems as
1599 required.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
new file mode 100644
index 000000000000..2071f9bcfaa5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
@@ -0,0 +1,36 @@
1What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_invert
2Date: October 2016
3KernelVersion: 4.9
4Contact: Peter Rosin <peda@axentia.se>
5Description:
6 The DAC is used to find the peak level of an alternating
7 voltage input signal by a binary search using the output
8 of a comparator wired to an interrupt pin. Like so:
9 _
10 | \
11 input +------>-------|+ \
12 | \
13 .-------. | }---.
14 | | | / |
15 | dac|-->--|- / |
16 | | |_/ |
17 | | |
18 | | |
19 | irq|------<-------'
20 | |
21 '-------'
22 The boolean invert attribute (0/1) should be set when the
23 input signal is centered around the maximum value of the
24 dac instead of zero. The envelope detector will search
25 from below in this case and will also invert the result.
26 The edge/level of the interrupt is also switched to its
27 opposite value.
28
29What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_compare_interval
30Date: October 2016
31KernelVersion: 4.9
32Contact: Peter Rosin <peda@axentia.se>
33Description:
34 Number of milliseconds to wait for the comparator in each
35 step of the binary search for the input peak level. Needs
36 to relate to the frequency of the input signal.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
new file mode 100644
index 000000000000..ba676520b953
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -0,0 +1,125 @@
1What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
2What: /sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available
3What: /sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available
4What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
5What: /sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available
6What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
7KernelVersion: 4.9
8Contact: linux-iio@vger.kernel.org
9Description:
10 Discrete set of available values for the respective counter
11 configuration are listed in this file.
12
13What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
14KernelVersion: 4.9
15Contact: linux-iio@vger.kernel.org
16Description:
17 Read-only attribute that indicates whether the counter for
18 channel Y is counting up or down.
19
20What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
21KernelVersion: 4.9
22Contact: linux-iio@vger.kernel.org
23Description:
24 Count mode for channel Y. Four count modes are available:
25 normal, range limit, non-recycle, and modulo-n. The preset value
26 for channel Y is used by the count mode where required.
27
28 Normal:
29 Counting is continuous in either direction.
30
31 Range Limit:
32 An upper or lower limit is set, mimicking limit switches
33 in the mechanical counterpart. The upper limit is set to
34 the preset value, while the lower limit is set to 0. The
35 counter freezes at count = preset when counting up, and
36 at count = 0 when counting down. At either of these
37 limits, the counting is resumed only when the count
38 direction is reversed.
39
40 Non-recycle:
41 Counter is disabled whenever a 24-bit count overflow or
42 underflow takes place. The counter is re-enabled when a
43 new count value is loaded to the counter via a preset
44 operation or write to raw.
45
46 Modulo-N:
47 A count boundary is set between 0 and the preset value.
48 The counter is reset to 0 at count = preset when
49 counting up, while the counter is set to the preset
50 value at count = 0 when counting down; the counter does
51 not freeze at the bundary points, but counts
52 continuously throughout.
53
54What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
55KernelVersion: 4.9
56Contact: linux-iio@vger.kernel.org
57Description:
58 Read-only attribute that indicates whether excessive noise is
59 present at the channel Y count inputs in quadrature clock mode;
60 irrelevant in non-quadrature clock mode.
61
62What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset
63KernelVersion: 4.9
64Contact: linux-iio@vger.kernel.org
65Description:
66 If the counter device supports preset registers, the preset
67 count for channel Y is provided by this attribute.
68
69What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
70KernelVersion: 4.9
71Contact: linux-iio@vger.kernel.org
72Description:
73 Configure channel Y counter for non-quadrature or quadrature
74 clock mode. Selecting non-quadrature clock mode will disable
75 synchronous load mode. In quadrature clock mode, the channel Y
76 scale attribute selects the encoder phase division (scale of 1
77 selects full-cycle, scale of 0.5 selects half-cycle, scale of
78 0.25 selects quarter-cycle) processed by the channel Y counter.
79
80 Non-quadrature:
81 The filter and decoder circuit are bypassed. Encoder A
82 input serves as the count input and B as the UP/DOWN
83 direction control input, with B = 1 selecting UP Count
84 mode and B = 0 selecting Down Count mode.
85
86 Quadrature:
87 Encoder A and B inputs are digitally filtered and
88 decoded for UP/DN clock.
89
90What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
91KernelVersion: 4.9
92Contact: linux-iio@vger.kernel.org
93Description:
94 Whether to set channel Y counter with channel Y preset value
95 when channel Y index input is active, or continuously count.
96 Valid attribute values are boolean.
97
98What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
99KernelVersion: 4.9
100Contact: linux-iio@vger.kernel.org
101Description:
102 Active level of channel Y index input; irrelevant in
103 non-synchronous load mode.
104
105What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
106KernelVersion: 4.9
107Contact: linux-iio@vger.kernel.org
108Description:
109 Configure channel Y counter for non-synchronous or synchronous
110 load mode. Synchronous load mode cannot be selected in
111 non-quadrature clock mode.
112
113 Non-synchronous:
114 A logic low level is the active level at this index
115 input. The index function (as enabled via
116 set_to_preset_on_index) is performed directly on the
117 active level of the index input.
118
119 Synchronous:
120 Intended for interfacing with encoder Index output in
121 quadrature clock mode. The active level is configured
122 via index_polarity. The index function (as enabled via
123 set_to_preset_on_index) is performed synchronously with
124 the quadrature clock on the active level of the index
125 input.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
new file mode 100644
index 000000000000..297b9720f024
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
@@ -0,0 +1,18 @@
1What: /sys/bus/iio/devices/iio:deviceX/calibrate
2Date: July 2015
3KernelVersion: 4.7
4Contact: linux-iio@vger.kernel.org
5Description:
6 Writing '1' will perform a FOC (Fast Online Calibration). The
7 corresponding calibration offsets can be read from *_calibbias
8 entries.
9
10What: /sys/bus/iio/devices/iio:deviceX/location
11Date: July 2015
12KernelVersion: 4.7
13Contact: linux-iio@vger.kernel.org
14Description:
15 This attribute returns a string with the physical location where
16 the motion sensor is placed. For example, in a laptop a motion
17 sensor can be located on the base or on the lid. Current valid
18 values are 'base' and 'lid'.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
new file mode 100644
index 000000000000..580e93f373f6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
@@ -0,0 +1,8 @@
1What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw_available
2Date: October 2016
3KernelVersion: 4.9
4Contact: Peter Rosin <peda@axentia.se>
5Description:
6 The range of available values represented as the minimum value,
7 the step and the maximum value, all enclosed in square brackets.
8 Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
new file mode 100644
index 000000000000..f0ce0a0476ea
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
@@ -0,0 +1,19 @@
1What: /sys/bus/iio/devices/iio:deviceX/proximity_on_chip_ambient_infrared_suppression
2Date: January 2011
3KernelVersion: 2.6.37
4Contact: linux-iio@vger.kernel.org
5Description:
6 From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
7 infrared suppression:
8
9 Scheme 0, makes full n (4, 8, 12, 16) bits (unsigned) proximity
10 detection. The range of Scheme 0 proximity count is from 0 to
11 2^n. Logic 1 of this bit, Scheme 1, makes n-1 (3, 7, 11, 15)
12 bits (2's complementary) proximity_less_ambient detection. The
13 range of Scheme 1 proximity count is from -2^(n-1) to 2^(n-1).
14 The sign bit is extended for resolutions less than 16. While
15 Scheme 0 has wider dynamic range, Scheme 1 proximity detection
16 is less affected by the ambient IR noise variation.
17
18 0 Sensing IR from LED and ambient
19 1 Sensing IR from LED with ambient IR rejection
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
index 660781df409f..a2e19964e87e 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
@@ -1,18 +1,18 @@
1What: /sys/bus/iio/devices/device[n]/lux_table 1What: /sys/bus/iio/devices/device[n]/in_illuminance_calibrate
2KernelVersion: 2.6.37 2KernelVersion: 2.6.37
3Contact: linux-iio@vger.kernel.org 3Contact: linux-iio@vger.kernel.org
4Description: 4Description:
5 This property gets/sets the table of coefficients 5 This property causes an internal calibration of the als gain trim
6 used in calculating illuminance in lux. 6 value which is later used in calculating illuminance in lux.
7 7
8What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate 8What: /sys/bus/iio/devices/device[n]/in_illuminance_lux_table
9KernelVersion: 2.6.37 9KernelVersion: 2.6.37
10Contact: linux-iio@vger.kernel.org 10Contact: linux-iio@vger.kernel.org
11Description: 11Description:
12 This property causes an internal calibration of the als gain trim 12 This property gets/sets the table of coefficients
13 value which is later used in calculating illuminance in lux. 13 used in calculating illuminance in lux.
14 14
15What: /sys/bus/iio/devices/device[n]/illuminance0_input_target 15What: /sys/bus/iio/devices/device[n]/in_illuminance_input_target
16KernelVersion: 2.6.37 16KernelVersion: 2.6.37
17Contact: linux-iio@vger.kernel.org 17Contact: linux-iio@vger.kernel.org
18Description: 18Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
new file mode 100644
index 000000000000..2a91fbe394fc
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
@@ -0,0 +1,8 @@
1What: /sys/bus/iio/devices/iio:deviceX/out_resistance_raw_available
2Date: October 2016
3KernelVersion: 4.9
4Contact: Peter Rosin <peda@axentia.se>
5Description:
6 The range of available values represented as the minimum value,
7 the step and the maximum value, all enclosed in square brackets.
8 Example: [0 1 256]
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index fbbad6446741..03349ad5abfa 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -39,11 +39,13 @@ dallas,ds75 Digital Thermometer and Thermostat
39dlg,da9053 DA9053: flexible system level PMIC with multicore support 39dlg,da9053 DA9053: flexible system level PMIC with multicore support
40dlg,da9063 DA9063: system PMIC for quad-core application processors 40dlg,da9063 DA9063: system PMIC for quad-core application processors
41domintech,dmard09 DMARD09: 3-axis Accelerometer 41domintech,dmard09 DMARD09: 3-axis Accelerometer
42domintech,dmard10 DMARD10: 3-axis Accelerometer
42epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE 43epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
43epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE 44epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
44epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE 45epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
45fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer 46fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
46fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51 47fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
48fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
47fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer 49fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
48fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor 50fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor
49fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller 51fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
@@ -57,6 +59,7 @@ maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
57maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface 59maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
58mc,rv3029c2 Real Time Clock Module with I2C-Bus 60mc,rv3029c2 Real Time Clock Module with I2C-Bus
59mcube,mc3230 mCube 3-axis 8-bit digital accelerometer 61mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
62memsic,mxc6225 MEMSIC 2-axis 8-bit digital accelerometer
60microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k) 63microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k)
61microchip,mcp4531-103 Microchip 7-bit Single I2C Digital Potentiometer (10k) 64microchip,mcp4531-103 Microchip 7-bit Single I2C Digital Potentiometer (10k)
62microchip,mcp4531-503 Microchip 7-bit Single I2C Digital Potentiometer (50k) 65microchip,mcp4531-503 Microchip 7-bit Single I2C Digital Potentiometer (50k)
@@ -121,6 +124,9 @@ microchip,mcp4662-502 Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem
121microchip,mcp4662-103 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k) 124microchip,mcp4662-103 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
122microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k) 125microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
123microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k) 126microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
127miramems,da226 MiraMEMS DA226 2-axis 14-bit digital accelerometer
128miramems,da280 MiraMEMS DA280 3-axis 14-bit digital accelerometer
129miramems,da311 MiraMEMS DA311 3-axis 12-bit digital accelerometer
124national,lm63 Temperature sensor with integrated fan control 130national,lm63 Temperature sensor with integrated fan control
125national,lm75 I2C TEMP SENSOR 131national,lm75 I2C TEMP SENSOR
126national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor 132national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
@@ -146,6 +152,7 @@ ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
146samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power) 152samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
147sgx,vz89x SGX Sensortech VZ89X Sensors 153sgx,vz89x SGX Sensortech VZ89X Sensors
148sii,s35390a 2-wire CMOS real-time clock 154sii,s35390a 2-wire CMOS real-time clock
155silabs,si7020 Relative Humidity and Temperature Sensors
149skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply 156skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
150st,24c256 i2c serial eeprom (24cxx) 157st,24c256 i2c serial eeprom (24cxx)
151st,m41t00 Serial real-time clock (RTC) 158st,m41t00 Serial real-time clock (RTC)
diff --git a/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
new file mode 100644
index 000000000000..27544bdd4478
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
@@ -0,0 +1,54 @@
1Bindings for ADC envelope detector using a DAC and a comparator
2
3The DAC is used to find the peak level of an alternating voltage input
4signal by a binary search using the output of a comparator wired to
5an interrupt pin. Like so:
6 _
7 | \
8 input +------>-------|+ \
9 | \
10 .-------. | }---.
11 | | | / |
12 | dac|-->--|- / |
13 | | |_/ |
14 | | |
15 | | |
16 | irq|------<-------'
17 | |
18 '-------'
19
20Required properties:
21- compatible: Should be "axentia,tse850-envelope-detector"
22- io-channels: Channel node of the dac to be used for comparator input.
23- io-channel-names: Should be "dac".
24- interrupt specification for one client interrupt,
25 see ../../interrupt-controller/interrupts.txt for details.
26- interrupt-names: Should be "comp".
27
28Example:
29
30 &i2c {
31 dpot: mcp4651-104@28 {
32 compatible = "microchip,mcp4651-104";
33 reg = <0x28>;
34 #io-channel-cells = <1>;
35 };
36 };
37
38 dac: dac {
39 compatible = "dpot-dac";
40 vref-supply = <&reg_3v3>;
41 io-channels = <&dpot 0>;
42 io-channel-names = "dpot";
43 #io-channel-cells = <1>;
44 };
45
46 envelope-detector {
47 compatible = "axentia,tse850-envelope-detector";
48 io-channels = <&dac 0>;
49 io-channel-names = "dac";
50
51 interrupt-parent = <&gpio>;
52 interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
53 interrupt-names = "comp";
54 };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
new file mode 100644
index 000000000000..49ed82e89870
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -0,0 +1,83 @@
1STMicroelectronics STM32 ADC device driver
2
3STM32 ADC is a successive approximation analog-to-digital converter.
4It has several multiplexed input channels. Conversions can be performed
5in single, continuous, scan or discontinuous mode. Result of the ADC is
6stored in a left-aligned or right-aligned 32-bit data register.
7Conversions can be launched in software or using hardware triggers.
8
9The analog watchdog feature allows the application to detect if the input
10voltage goes beyond the user-defined, higher or lower thresholds.
11
12Each STM32 ADC block can have up to 3 ADC instances.
13
14Each instance supports two contexts to manage conversions, each one has its
15own configurable sequence and trigger:
16- regular conversion can be done in sequence, running in background
17- injected conversions have higher priority, and so have the ability to
18 interrupt regular conversion sequence (either triggered in SW or HW).
19 Regular sequence is resumed, in case it has been interrupted.
20
21Contents of a stm32 adc root node:
22-----------------------------------
23Required properties:
24- compatible: Should be "st,stm32f4-adc-core".
25- reg: Offset and length of the ADC block register set.
26- interrupts: Must contain the interrupt for ADC block.
27- clocks: Clock for the analog circuitry (common to all ADCs).
28- clock-names: Must be "adc".
29- interrupt-controller: Identifies the controller node as interrupt-parent
30- vref-supply: Phandle to the vref input analog reference voltage.
31- #interrupt-cells = <1>;
32- #address-cells = <1>;
33- #size-cells = <0>;
34
35Optional properties:
36- A pinctrl state named "default" for each ADC channel may be defined to set
37 inX ADC pins in mode of operation for analog input on external pin.
38
39Contents of a stm32 adc child node:
40-----------------------------------
41An ADC block node should contain at least one subnode, representing an
42ADC instance available on the machine.
43
44Required properties:
45- compatible: Should be "st,stm32f4-adc".
46- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
47- clocks: Input clock private to this ADC instance.
48- interrupt-parent: Phandle to the parent interrupt controller.
49- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
50 2 for adc@200).
51- st,adc-channels: List of single-ended channels muxed for this ADC.
52 It can have up to 16 channels, numbered from 0 to 15 (resp. for in0..in15).
53- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
54 Documentation/devicetree/bindings/iio/iio-bindings.txt
55
56Example:
57 adc: adc@40012000 {
58 compatible = "st,stm32f4-adc-core";
59 reg = <0x40012000 0x400>;
60 interrupts = <18>;
61 clocks = <&rcc 0 168>;
62 clock-names = "adc";
63 vref-supply = <&reg_vref>;
64 interrupt-controller;
65 pinctrl-names = "default";
66 pinctrl-0 = <&adc3_in8_pin>;
67
68 #interrupt-cells = <1>;
69 #address-cells = <1>;
70 #size-cells = <0>;
71
72 adc@0 {
73 compatible = "st,stm32f4-adc";
74 #io-channel-cells = <1>;
75 reg = <0x0>;
76 clocks = <&rcc 0 168>;
77 interrupt-parent = <&adc>;
78 interrupts = <0>;
79 st,adc-channels = <8>;
80 };
81 ...
82 other adc child nodes follow...
83 };
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
index 9ed2315781e4..3d25011f0c99 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
@@ -3,6 +3,7 @@
3Required properties: 3Required properties:
4 - compatible: Should be "ti,adc141s626" or "ti,adc161s626" 4 - compatible: Should be "ti,adc141s626" or "ti,adc161s626"
5 - reg: spi chip select number for the device 5 - reg: spi chip select number for the device
6 - vdda-supply: supply voltage to VDDA pin
6 7
7Recommended properties: 8Recommended properties:
8 - spi-max-frequency: Definition as per 9 - spi-max-frequency: Definition as per
@@ -11,6 +12,7 @@ Recommended properties:
11Example: 12Example:
12adc@0 { 13adc@0 {
13 compatible = "ti,adc161s626"; 14 compatible = "ti,adc161s626";
15 vdda-supply = <&vdda_fixed>;
14 reg = <0>; 16 reg = <0>;
15 spi-max-frequency = <4300000>; 17 spi-max-frequency = <4300000>;
16}; 18};
diff --git a/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
new file mode 100644
index 000000000000..fdf47a01bfef
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
@@ -0,0 +1,41 @@
1Bindings for DAC emulation using a digital potentiometer
2
3It is assumed that the dpot is used as a voltage divider between the
4current dpot wiper setting and the maximum resistance of the dpot. The
5divided voltage is provided by a vref regulator.
6
7 .------.
8 .-----------. | |
9 | vref |--' .---.
10 | regulator |--. | |
11 '-----------' | | d |
12 | | p |
13 | | o | wiper
14 | | t |<---------+
15 | | |
16 | '---' dac output voltage
17 | |
18 '------+------------+
19
20Required properties:
21- compatible: Should be "dpot-dac"
22- vref-supply: The regulator supplying the voltage divider.
23- io-channels: Channel node of the dpot to be used for the voltage division.
24- io-channel-names: Should be "dpot".
25
26Example:
27
28 &i2c {
29 dpot: mcp4651-503@28 {
30 compatible = "microchip,mcp4651-503";
31 reg = <0x28>;
32 #io-channel-cells = <1>;
33 };
34 };
35
36 dac {
37 compatible = "dpot-dac";
38 vref-supply = <&reg_3v3>;
39 io-channels = <&dpot 0>;
40 io-channel-names = "dpot";
41 };
diff --git a/Documentation/devicetree/bindings/iio/dac/mcp4725.txt b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
new file mode 100644
index 000000000000..1bc6c093fbfe
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
@@ -0,0 +1,35 @@
1Microchip mcp4725 and mcp4726 DAC device driver
2
3Required properties:
4 - compatible: Must be "microchip,mcp4725" or "microchip,mcp4726"
5 - reg: Should contain the DAC I2C address
6 - vdd-supply: Phandle to the Vdd power supply. This supply is used as a
7 voltage reference on mcp4725. It is used as a voltage reference on
8 mcp4726 if there is no vref-supply specified.
9
10Optional properties (valid only for mcp4726):
11 - vref-supply: Optional phandle to the Vref power supply. Vref pin is
12 used as a voltage reference when this supply is specified.
13 - microchip,vref-buffered: Boolean to enable buffering of the external
14 Vref pin. This boolean is not valid without the vref-supply. Quoting
15 the datasheet: This is offered in cases where the reference voltage
16 does not have the current capability not to drop its voltage when
17 connected to the internal resistor ladder circuit.
18
19Examples:
20
21 /* simple mcp4725 */
22 mcp4725@60 {
23 compatible = "microchip,mcp4725";
24 reg = <0x60>;
25 vdd-supply = <&vdac_vdd>;
26 };
27
28 /* mcp4726 with the buffered external reference voltage */
29 mcp4726@60 {
30 compatible = "microchip,mcp4726";
31 reg = <0x60>;
32 vdd-supply = <&vdac_vdd>;
33 vref-supply = <&vdac_vref>;
34 microchip,vref-buffered;
35 };
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
new file mode 100644
index 000000000000..b0d3b59966bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -0,0 +1,46 @@
1Invensense MPU-3050 Gyroscope device tree bindings
2
3Required properties:
4 - compatible : should be "invensense,mpu3050"
5 - reg : the I2C address of the sensor
6
7Optional properties:
8 - interrupt-parent : should be the phandle for the interrupt controller
9 - interrupts : interrupt mapping for the trigger interrupt from the
10 internal oscillator. The following IRQ modes are supported:
11 IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
12 IRQ_TYPE_LEVEL_LOW. The driver should detect and configure the hardware
13 for the desired interrupt type.
14 - vdd-supply : supply regulator for the main power voltage.
15 - vlogic-supply : supply regulator for the signal voltage.
16 - mount-matrix : see iio/mount-matrix.txt
17
18Optional subnodes:
19 - The MPU-3050 will pass through and forward the I2C signals from the
20 incoming I2C bus, alternatively drive traffic to a slave device (usually
21 an accelerometer) on its own initiative. Therefore is supports a subnode
22 i2c gate node. For details see: i2c/i2c-gate.txt
23
24Example:
25
26mpu3050@68 {
27 compatible = "invensense,mpu3050";
28 reg = <0x68>;
29 interrupt-parent = <&foo>;
30 interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
31 vdd-supply = <&bar>;
32 vlogic-supply = <&baz>;
33
34 /* External I2C interface */
35 i2c-gate {
36 #address-cells = <1>;
37 #size-cells = <0>;
38
39 fnord@18 {
40 compatible = "fnord";
41 reg = <0x18>;
42 interrupt-parent = <&foo>;
43 interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
44 };
45 };
46};
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
new file mode 100644
index 000000000000..b20ab9c12080
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -0,0 +1,22 @@
1* HTS221 STM humidity + temperature sensor
2
3Required properties:
4- compatible: should be "st,hts221"
5- reg: i2c address of the sensor / spi cs line
6
7Optional properties:
8- interrupt-parent: should be the phandle for the interrupt controller
9- interrupts: interrupt mapping for IRQ. It should be configured with
10 flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
11
12 Refer to interrupt-controller/interrupts.txt for generic interrupt
13 client node bindings.
14
15Example:
16
17hts221@5f {
18 compatible = "st,hts221";
19 reg = <0x5f>;
20 interrupt-parent = <&gpio0>;
21 interrupts = <0 IRQ_TYPE_EDGE_RISING>;
22};
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
new file mode 100644
index 000000000000..425ab459e209
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -0,0 +1,28 @@
1* ISL 29018/29023/29035 I2C ALS, Proximity, and Infrared sensor
2
3Required properties:
4
5 - compatible: Should be one of
6 "isil,isl29018"
7 "isil,isl29023"
8 "isil,isl29035"
9 - reg: the I2C address of the device
10
11Optional properties:
12
13 - interrupt-parent: should be the phandle for the interrupt controller
14 - interrupts: the sole interrupt generated by the device
15
16 Refer to interrupt-controller/interrupts.txt for generic interrupt client
17 node bindings.
18
19 - vcc-supply: phandle to the regulator that provides power to the sensor.
20
21Example:
22
23isl29018@44 {
24 compatible = "isil,isl29018";
25 reg = <0x44>;
26 interrupt-parent = <&gpio>;
27 interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_LEVEL_HIGH>;
28};
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
new file mode 100644
index 000000000000..8e2066c83f70
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -0,0 +1,26 @@
1* TAOS TSL 2580/2581/2583 ALS sensor
2
3Required properties:
4
5 - compatible: Should be one of
6 "amstaos,tsl2580"
7 "amstaos,tsl2581"
8 "amstaos,tsl2583"
9 - reg: the I2C address of the device
10
11Optional properties:
12
13 - interrupt-parent: should be the phandle for the interrupt controller
14 - interrupts: the sole interrupt generated by the device
15
16 Refer to interrupt-controller/interrupts.txt for generic interrupt client
17 node bindings.
18
19 - vcc-supply: phandle to the regulator that provides power to the sensor.
20
21Example:
22
23tsl2581@29 {
24 compatible = "amstaos,tsl2581";
25 reg = <0x29>;
26};
diff --git a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
new file mode 100644
index 000000000000..b9b621e94cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
@@ -0,0 +1,30 @@
1* Texas Instruments LMP91000 potentiostat
2
3http://www.ti.com/lit/ds/symlink/lmp91000.pdf
4
5Required properties:
6
7 - compatible: should be "ti,lmp91000"
8 - reg: the I2C address of the device
9 - io-channels: the phandle of the iio provider
10
11 - ti,external-tia-resistor: if the property ti,tia-gain-ohm is not defined this
12 needs to be set to signal that an external resistor value is being used.
13
14Optional properties:
15
16 - ti,tia-gain-ohm: ohm value of the internal resistor for the transimpedance
17 amplifier. Must be 2750, 3500, 7000, 14000, 35000, 120000, or 350000 ohms.
18
19 - ti,rload-ohm: ohm value of the internal resistor load applied to the gas
20 sensor. Must be 10, 33, 50, or 100 (default) ohms.
21
22Example:
23
24lmp91000@48 {
25 compatible = "ti,lmp91000";
26 reg = <0x48>;
27 ti,tia-gain-ohm = <7500>;
28 ti,rload = <100>;
29 io-channels = <&adc>;
30};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index e41fe340162b..c040c9ad1889 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -42,6 +42,7 @@ Accelerometers:
42- st,lsm303agr-accel 42- st,lsm303agr-accel
43- st,lis2dh12-accel 43- st,lis2dh12-accel
44- st,h3lis331dl-accel 44- st,h3lis331dl-accel
45- st,lng2dm-accel
45 46
46Gyroscopes: 47Gyroscopes:
47- st,l3g4200d-gyro 48- st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d9c51d7f4aac..56a257b7c4d7 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -39,6 +39,7 @@ auo AU Optronics Corporation
39auvidea Auvidea GmbH 39auvidea Auvidea GmbH
40avago Avago Technologies 40avago Avago Technologies
41avic Shanghai AVIC Optoelectronics Co., Ltd. 41avic Shanghai AVIC Optoelectronics Co., Ltd.
42axentia Axentia Technologies AB
42axis Axis Communications AB 43axis Axis Communications AB
43boe BOE Technology Group Co., Ltd. 44boe BOE Technology Group Co., Ltd.
44bosch Bosch Sensortec GmbH 45bosch Bosch Sensortec GmbH
@@ -160,16 +161,19 @@ lltc Linear Technology Corporation
160lsi LSI Corp. (LSI Logic) 161lsi LSI Corp. (LSI Logic)
161marvell Marvell Technology Group Ltd. 162marvell Marvell Technology Group Ltd.
162maxim Maxim Integrated Products 163maxim Maxim Integrated Products
164mcube mCube
163meas Measurement Specialties 165meas Measurement Specialties
164mediatek MediaTek Inc. 166mediatek MediaTek Inc.
165melexis Melexis N.V. 167melexis Melexis N.V.
166melfas MELFAS Inc. 168melfas MELFAS Inc.
169memsic MEMSIC Inc.
167merrii Merrii Technology Co., Ltd. 170merrii Merrii Technology Co., Ltd.
168micrel Micrel Inc. 171micrel Micrel Inc.
169microchip Microchip Technology Inc. 172microchip Microchip Technology Inc.
170microcrystal Micro Crystal AG 173microcrystal Micro Crystal AG
171micron Micron Technology Inc. 174micron Micron Technology Inc.
172minix MINIX Technology Ltd. 175minix MINIX Technology Ltd.
176miramems MiraMEMS Sensing Technology Co., Ltd.
173mitsubishi Mitsubishi Electric Corporation 177mitsubishi Mitsubishi Electric Corporation
174mosaixtech Mosaix Technologies, Inc. 178mosaixtech Mosaix Technologies, Inc.
175moxa Moxa 179moxa Moxa
diff --git a/MAINTAINERS b/MAINTAINERS
index 34ef63763566..116168194d0e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -260,6 +260,12 @@ L: linux-gpio@vger.kernel.org
260S: Maintained 260S: Maintained
261F: drivers/gpio/gpio-104-idio-16.c 261F: drivers/gpio/gpio-104-idio-16.c
262 262
263ACCES 104-QUAD-8 IIO DRIVER
264M: William Breathitt Gray <vilhelm.gray@gmail.com>
265L: linux-iio@vger.kernel.org
266S: Maintained
267F: drivers/iio/counter/104-quad-8.c
268
263ACENIC DRIVER 269ACENIC DRIVER
264M: Jes Sorensen <jes@trained-monkey.org> 270M: Jes Sorensen <jes@trained-monkey.org>
265L: linux-acenic@sunsite.dk 271L: linux-acenic@sunsite.dk
@@ -803,7 +809,7 @@ S: Supported
803F: drivers/iio/*/ad* 809F: drivers/iio/*/ad*
804X: drivers/iio/*/adjd* 810X: drivers/iio/*/adjd*
805F: drivers/staging/iio/*/ad* 811F: drivers/staging/iio/*/ad*
806F: staging/iio/trigger/iio-trig-bfin-timer.c 812F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
807 813
808ANALOG DEVICES INC DMA DRIVERS 814ANALOG DEVICES INC DMA DRIVERS
809M: Lars-Peter Clausen <lars@metafoo.de> 815M: Lars-Peter Clausen <lars@metafoo.de>
@@ -2612,6 +2618,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2612T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git 2618T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
2613S: Maintained 2619S: Maintained
2614N: bcm2835 2620N: bcm2835
2621F: drivers/staging/vc04_services
2615 2622
2616BROADCOM BCM47XX MIPS ARCHITECTURE 2623BROADCOM BCM47XX MIPS ARCHITECTURE
2617M: Hauke Mehrtens <hauke@hauke-m.de> 2624M: Hauke Mehrtens <hauke@hauke-m.de>
@@ -5192,13 +5199,6 @@ F: sound/soc/fsl/fsl*
5192F: sound/soc/fsl/imx* 5199F: sound/soc/fsl/imx*
5193F: sound/soc/fsl/mpc8610_hpcd.c 5200F: sound/soc/fsl/mpc8610_hpcd.c
5194 5201
5195FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
5196M: "J. German Rivera" <German.Rivera@freescale.com>
5197M: Stuart Yoder <stuart.yoder@nxp.com>
5198L: linux-kernel@vger.kernel.org
5199S: Maintained
5200F: drivers/staging/fsl-mc/
5201
5202FREEVXFS FILESYSTEM 5202FREEVXFS FILESYSTEM
5203M: Christoph Hellwig <hch@infradead.org> 5203M: Christoph Hellwig <hch@infradead.org>
5204W: ftp://ftp.openlinux.org/pub/people/hch/vxfs 5204W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@ -6215,6 +6215,22 @@ L: linux-media@vger.kernel.org
6215S: Maintained 6215S: Maintained
6216F: drivers/media/rc/iguanair.c 6216F: drivers/media/rc/iguanair.c
6217 6217
6218IIO DIGITAL POTENTIOMETER DAC
6219M: Peter Rosin <peda@axentia.se>
6220L: linux-iio@vger.kernel.org
6221S: Maintained
6222F: Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
6223F: Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
6224F: drivers/iio/dac/dpot-dac.c
6225
6226IIO ENVELOPE DETECTOR
6227M: Peter Rosin <peda@axentia.se>
6228L: linux-iio@vger.kernel.org
6229S: Maintained
6230F: Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
6231F: Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
6232F: drivers/iio/adc/envelope-detector.c
6233
6218IIO SUBSYSTEM AND DRIVERS 6234IIO SUBSYSTEM AND DRIVERS
6219M: Jonathan Cameron <jic23@kernel.org> 6235M: Jonathan Cameron <jic23@kernel.org>
6220R: Hartmut Knaack <knaack.h@gmx.de> 6236R: Hartmut Knaack <knaack.h@gmx.de>
@@ -6596,6 +6612,13 @@ S: Maintained
6596F: arch/x86/include/asm/pmc_core.h 6612F: arch/x86/include/asm/pmc_core.h
6597F: drivers/platform/x86/intel_pmc_core* 6613F: drivers/platform/x86/intel_pmc_core*
6598 6614
6615INVENSENSE MPU-3050 GYROSCOPE DRIVER
6616M: Linus Walleij <linus.walleij@linaro.org>
6617L: linux-iio@vger.kernel.org
6618S: Maintained
6619F: drivers/iio/gyro/mpu3050*
6620F: Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
6621
6599IOC3 ETHERNET DRIVER 6622IOC3 ETHERNET DRIVER
6600M: Ralf Baechle <ralf@linux-mips.org> 6623M: Ralf Baechle <ralf@linux-mips.org>
6601L: linux-mips@linux-mips.org 6624L: linux-mips@linux-mips.org
@@ -7803,6 +7826,7 @@ MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVER
7803M: Peter Rosin <peda@axentia.se> 7826M: Peter Rosin <peda@axentia.se>
7804L: linux-iio@vger.kernel.org 7827L: linux-iio@vger.kernel.org
7805S: Maintained 7828S: Maintained
7829F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
7806F: drivers/iio/potentiometer/mcp4531.c 7830F: drivers/iio/potentiometer/mcp4531.c
7807 7831
7808MEASUREMENT COMPUTING CIO-DAC IIO DRIVER 7832MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@ -10056,6 +10080,12 @@ F: fs/qnx4/
10056F: include/uapi/linux/qnx4_fs.h 10080F: include/uapi/linux/qnx4_fs.h
10057F: include/uapi/linux/qnxtypes.h 10081F: include/uapi/linux/qnxtypes.h
10058 10082
10083QORIQ DPAA2 FSL-MC BUS DRIVER
10084M: Stuart Yoder <stuart.yoder@nxp.com>
10085L: linux-kernel@vger.kernel.org
10086S: Maintained
10087F: drivers/staging/fsl-mc/
10088
10059QT1010 MEDIA DRIVER 10089QT1010 MEDIA DRIVER
10060M: Antti Palosaari <crope@iki.fi> 10090M: Antti Palosaari <crope@iki.fi>
10061L: linux-media@vger.kernel.org 10091L: linux-media@vger.kernel.org
@@ -11647,12 +11677,6 @@ L: linux-fbdev@vger.kernel.org
11647S: Maintained 11677S: Maintained
11648F: drivers/staging/sm750fb/ 11678F: drivers/staging/sm750fb/
11649 11679
11650STAGING - SLICOSS
11651M: Lior Dotan <liodot@gmail.com>
11652M: Christopher Harrer <charrer@alacritech.com>
11653S: Odd Fixes
11654F: drivers/staging/slicoss/
11655
11656STAGING - SPEAKUP CONSOLE SPEECH DRIVER 11680STAGING - SPEAKUP CONSOLE SPEECH DRIVER
11657M: William Hubbs <w.d.hubbs@gmail.com> 11681M: William Hubbs <w.d.hubbs@gmail.com>
11658M: Chris Brannon <chris@the-brannons.com> 11682M: Chris Brannon <chris@the-brannons.com>
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6743b18194fb..a918270d6f54 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -73,6 +73,7 @@ source "drivers/iio/adc/Kconfig"
73source "drivers/iio/amplifiers/Kconfig" 73source "drivers/iio/amplifiers/Kconfig"
74source "drivers/iio/chemical/Kconfig" 74source "drivers/iio/chemical/Kconfig"
75source "drivers/iio/common/Kconfig" 75source "drivers/iio/common/Kconfig"
76source "drivers/iio/counter/Kconfig"
76source "drivers/iio/dac/Kconfig" 77source "drivers/iio/dac/Kconfig"
77source "drivers/iio/dummy/Kconfig" 78source "drivers/iio/dummy/Kconfig"
78source "drivers/iio/frequency/Kconfig" 79source "drivers/iio/frequency/Kconfig"
@@ -87,6 +88,7 @@ if IIO_TRIGGER
87 source "drivers/iio/trigger/Kconfig" 88 source "drivers/iio/trigger/Kconfig"
88endif #IIO_TRIGGER 89endif #IIO_TRIGGER
89source "drivers/iio/potentiometer/Kconfig" 90source "drivers/iio/potentiometer/Kconfig"
91source "drivers/iio/potentiostat/Kconfig"
90source "drivers/iio/pressure/Kconfig" 92source "drivers/iio/pressure/Kconfig"
91source "drivers/iio/proximity/Kconfig" 93source "drivers/iio/proximity/Kconfig"
92source "drivers/iio/temperature/Kconfig" 94source "drivers/iio/temperature/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 87e4c4369e2f..33fa4026f92c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,6 +18,7 @@ obj-y += amplifiers/
18obj-y += buffer/ 18obj-y += buffer/
19obj-y += chemical/ 19obj-y += chemical/
20obj-y += common/ 20obj-y += common/
21obj-y += counter/
21obj-y += dac/ 22obj-y += dac/
22obj-y += dummy/ 23obj-y += dummy/
23obj-y += gyro/ 24obj-y += gyro/
@@ -29,6 +30,7 @@ obj-y += light/
29obj-y += magnetometer/ 30obj-y += magnetometer/
30obj-y += orientation/ 31obj-y += orientation/
31obj-y += potentiometer/ 32obj-y += potentiometer/
33obj-y += potentiostat/
32obj-y += pressure/ 34obj-y += pressure/
33obj-y += proximity/ 35obj-y += proximity/
34obj-y += temperature/ 36obj-y += temperature/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 2b791fe1e2bc..c68bdb649005 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,26 @@ config BMC150_ACCEL_SPI
52 tristate 52 tristate
53 select REGMAP_SPI 53 select REGMAP_SPI
54 54
55config DA280
56 tristate "MiraMEMS DA280 3-axis 14-bit digital accelerometer driver"
57 depends on I2C
58 help
59 Say yes here to build support for the MiraMEMS DA280 3-axis 14-bit
60 digital accelerometer.
61
62 To compile this driver as a module, choose M here: the
63 module will be called da280.
64
65config DA311
66 tristate "MiraMEMS DA311 3-axis 12-bit digital accelerometer driver"
67 depends on I2C
68 help
69 Say yes here to build support for the MiraMEMS DA311 3-axis 12-bit
70 digital accelerometer.
71
72 To compile this driver as a module, choose M here: the
73 module will be called da311.
74
55config DMARD06 75config DMARD06
56 tristate "Domintech DMARD06 Digital Accelerometer Driver" 76 tristate "Domintech DMARD06 Digital Accelerometer Driver"
57 depends on OF || COMPILE_TEST 77 depends on OF || COMPILE_TEST
@@ -73,6 +93,16 @@ config DMARD09
73 Choosing M will build the driver as a module. If so, the module 93 Choosing M will build the driver as a module. If so, the module
74 will be called dmard09. 94 will be called dmard09.
75 95
96config DMARD10
97 tristate "Domintech DMARD10 3-axis Accelerometer Driver"
98 depends on I2C
99 help
100 Say yes here to get support for the Domintech DMARD10 3-axis
101 accelerometer.
102
103 Choosing M will build the driver as a module. If so, the module
104 will be called dmard10.
105
76config HID_SENSOR_ACCEL_3D 106config HID_SENSOR_ACCEL_3D
77 depends on HID_SENSOR_HUB 107 depends on HID_SENSOR_HUB
78 select IIO_BUFFER 108 select IIO_BUFFER
@@ -97,7 +127,8 @@ config IIO_ST_ACCEL_3AXIS
97 help 127 help
98 Say yes here to build support for STMicroelectronics accelerometers: 128 Say yes here to build support for STMicroelectronics accelerometers:
99 LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC, 129 LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
100 LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL. 130 LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
131 LNG2DM
101 132
102 This driver can also be built as a module. If so, these modules 133 This driver can also be built as a module. If so, these modules
103 will be created: 134 will be created:
@@ -273,6 +304,18 @@ config MXC6255
273 To compile this driver as a module, choose M here: the module will be 304 To compile this driver as a module, choose M here: the module will be
274 called mxc6255. 305 called mxc6255.
275 306
307config SCA3000
308 select IIO_BUFFER
309 select IIO_KFIFO_BUF
310 depends on SPI
311 tristate "VTI SCA3000 series accelerometers"
312 help
313 Say Y here to build support for the VTI SCA3000 series of SPI
314 accelerometers. These devices use a hardware ring buffer.
315
316 To compile this driver as a module, say M here: the module will be
317 called sca3000.
318
276config STK8312 319config STK8312
277 tristate "Sensortek STK8312 3-Axis Accelerometer Driver" 320 tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
278 depends on I2C 321 depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index f5d3ddee619e..69fe8edc57a2 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_BMA220) += bma220_spi.o
8obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o 8obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
9obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o 9obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
10obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o 10obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
11obj-$(CONFIG_DA280) += da280.o
12obj-$(CONFIG_DA311) += da311.o
11obj-$(CONFIG_DMARD06) += dmard06.o 13obj-$(CONFIG_DMARD06) += dmard06.o
12obj-$(CONFIG_DMARD09) += dmard09.o 14obj-$(CONFIG_DMARD09) += dmard09.o
15obj-$(CONFIG_DMARD10) += dmard10.o
13obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o 16obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
14obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o 17obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
15obj-$(CONFIG_KXSD9) += kxsd9.o 18obj-$(CONFIG_KXSD9) += kxsd9.o
@@ -32,6 +35,8 @@ obj-$(CONFIG_MMA9553) += mma9553.o
32obj-$(CONFIG_MXC4005) += mxc4005.o 35obj-$(CONFIG_MXC4005) += mxc4005.o
33obj-$(CONFIG_MXC6255) += mxc6255.o 36obj-$(CONFIG_MXC6255) += mxc6255.o
34 37
38obj-$(CONFIG_SCA3000) += sca3000.o
39
35obj-$(CONFIG_STK8312) += stk8312.o 40obj-$(CONFIG_STK8312) += stk8312.o
36obj-$(CONFIG_STK8BA50) += stk8ba50.o 41obj-$(CONFIG_STK8BA50) += stk8ba50.o
37 42
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
new file mode 100644
index 000000000000..ed8343aeac9c
--- /dev/null
+++ b/drivers/iio/accel/da280.c
@@ -0,0 +1,183 @@
1/**
2 * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
3 * IIO driver for the MiraMEMS DA226 2-axis accelerometer
4 *
5 * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/iio/iio.h>
15#include <linux/iio/sysfs.h>
16#include <linux/byteorder/generic.h>
17
18#define DA280_REG_CHIP_ID 0x01
19#define DA280_REG_ACC_X_LSB 0x02
20#define DA280_REG_ACC_Y_LSB 0x04
21#define DA280_REG_ACC_Z_LSB 0x06
22#define DA280_REG_MODE_BW 0x11
23
24#define DA280_CHIP_ID 0x13
25#define DA280_MODE_ENABLE 0x1e
26#define DA280_MODE_DISABLE 0x9e
27
28enum { da226, da280 };
29
30/*
31 * a value of + or -4096 corresponds to + or - 1G
32 * scale = 9.81 / 4096 = 0.002395019
33 */
34
35static const int da280_nscale = 2395019;
36
37#define DA280_CHANNEL(reg, axis) { \
38 .type = IIO_ACCEL, \
39 .address = reg, \
40 .modified = 1, \
41 .channel2 = IIO_MOD_##axis, \
42 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
43 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
44}
45
46static const struct iio_chan_spec da280_channels[] = {
47 DA280_CHANNEL(DA280_REG_ACC_X_LSB, X),
48 DA280_CHANNEL(DA280_REG_ACC_Y_LSB, Y),
49 DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
50};
51
52struct da280_data {
53 struct i2c_client *client;
54};
55
56static int da280_enable(struct i2c_client *client, bool enable)
57{
58 u8 data = enable ? DA280_MODE_ENABLE : DA280_MODE_DISABLE;
59
60 return i2c_smbus_write_byte_data(client, DA280_REG_MODE_BW, data);
61}
62
63static int da280_read_raw(struct iio_dev *indio_dev,
64 struct iio_chan_spec const *chan,
65 int *val, int *val2, long mask)
66{
67 struct da280_data *data = iio_priv(indio_dev);
68 int ret;
69
70 switch (mask) {
71 case IIO_CHAN_INFO_RAW:
72 ret = i2c_smbus_read_word_data(data->client, chan->address);
73 if (ret < 0)
74 return ret;
75 /*
76 * Values are 14 bits, stored as 16 bits with the 2
77 * least significant bits always 0.
78 */
79 *val = (short)ret >> 2;
80 return IIO_VAL_INT;
81 case IIO_CHAN_INFO_SCALE:
82 *val = 0;
83 *val2 = da280_nscale;
84 return IIO_VAL_INT_PLUS_NANO;
85 default:
86 return -EINVAL;
87 }
88}
89
90static const struct iio_info da280_info = {
91 .driver_module = THIS_MODULE,
92 .read_raw = da280_read_raw,
93};
94
95static int da280_probe(struct i2c_client *client,
96 const struct i2c_device_id *id)
97{
98 int ret;
99 struct iio_dev *indio_dev;
100 struct da280_data *data;
101
102 ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
103 if (ret != DA280_CHIP_ID)
104 return (ret < 0) ? ret : -ENODEV;
105
106 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
107 if (!indio_dev)
108 return -ENOMEM;
109
110 data = iio_priv(indio_dev);
111 data->client = client;
112 i2c_set_clientdata(client, indio_dev);
113
114 indio_dev->dev.parent = &client->dev;
115 indio_dev->info = &da280_info;
116 indio_dev->modes = INDIO_DIRECT_MODE;
117 indio_dev->channels = da280_channels;
118 if (id->driver_data == da226) {
119 indio_dev->name = "da226";
120 indio_dev->num_channels = 2;
121 } else {
122 indio_dev->name = "da280";
123 indio_dev->num_channels = 3;
124 }
125
126 ret = da280_enable(client, true);
127 if (ret < 0)
128 return ret;
129
130 ret = iio_device_register(indio_dev);
131 if (ret < 0) {
132 dev_err(&client->dev, "device_register failed\n");
133 da280_enable(client, false);
134 }
135
136 return ret;
137}
138
139static int da280_remove(struct i2c_client *client)
140{
141 struct iio_dev *indio_dev = i2c_get_clientdata(client);
142
143 iio_device_unregister(indio_dev);
144
145 return da280_enable(client, false);
146}
147
148#ifdef CONFIG_PM_SLEEP
149static int da280_suspend(struct device *dev)
150{
151 return da280_enable(to_i2c_client(dev), false);
152}
153
154static int da280_resume(struct device *dev)
155{
156 return da280_enable(to_i2c_client(dev), true);
157}
158#endif
159
160static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
161
162static const struct i2c_device_id da280_i2c_id[] = {
163 { "da226", da226 },
164 { "da280", da280 },
165 {}
166};
167MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
168
169static struct i2c_driver da280_driver = {
170 .driver = {
171 .name = "da280",
172 .pm = &da280_pm_ops,
173 },
174 .probe = da280_probe,
175 .remove = da280_remove,
176 .id_table = da280_i2c_id,
177};
178
179module_i2c_driver(da280_driver);
180
181MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
182MODULE_DESCRIPTION("MiraMEMS DA280 3-Axis Accelerometer driver");
183MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
new file mode 100644
index 000000000000..537cfa8b6edf
--- /dev/null
+++ b/drivers/iio/accel/da311.c
@@ -0,0 +1,305 @@
1/**
2 * IIO driver for the MiraMEMS DA311 3-axis accelerometer
3 *
4 * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
5 * Copyright (c) 2011-2013 MiraMEMS Sensing Technology Co., Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/iio/iio.h>
15#include <linux/iio/sysfs.h>
16#include <linux/byteorder/generic.h>
17
18#define DA311_CHIP_ID 0x13
19
20/*
21 * Note register addressed go from 0 - 0x3f and then wrap.
22 * For some reason there are 2 banks with 0 - 0x3f addresses,
23 * rather then a single 0-0x7f bank.
24 */
25
26/* Bank 0 regs */
27#define DA311_REG_BANK 0x0000
28#define DA311_REG_LDO_REG 0x0006
29#define DA311_REG_CHIP_ID 0x000f
30#define DA311_REG_TEMP_CFG_REG 0x001f
31#define DA311_REG_CTRL_REG1 0x0020
32#define DA311_REG_CTRL_REG3 0x0022
33#define DA311_REG_CTRL_REG4 0x0023
34#define DA311_REG_CTRL_REG5 0x0024
35#define DA311_REG_CTRL_REG6 0x0025
36#define DA311_REG_STATUS_REG 0x0027
37#define DA311_REG_OUT_X_L 0x0028
38#define DA311_REG_OUT_X_H 0x0029
39#define DA311_REG_OUT_Y_L 0x002a
40#define DA311_REG_OUT_Y_H 0x002b
41#define DA311_REG_OUT_Z_L 0x002c
42#define DA311_REG_OUT_Z_H 0x002d
43#define DA311_REG_INT1_CFG 0x0030
44#define DA311_REG_INT1_SRC 0x0031
45#define DA311_REG_INT1_THS 0x0032
46#define DA311_REG_INT1_DURATION 0x0033
47#define DA311_REG_INT2_CFG 0x0034
48#define DA311_REG_INT2_SRC 0x0035
49#define DA311_REG_INT2_THS 0x0036
50#define DA311_REG_INT2_DURATION 0x0037
51#define DA311_REG_CLICK_CFG 0x0038
52#define DA311_REG_CLICK_SRC 0x0039
53#define DA311_REG_CLICK_THS 0x003a
54#define DA311_REG_TIME_LIMIT 0x003b
55#define DA311_REG_TIME_LATENCY 0x003c
56#define DA311_REG_TIME_WINDOW 0x003d
57
58/* Bank 1 regs */
59#define DA311_REG_SOFT_RESET 0x0105
60#define DA311_REG_OTP_XOFF_L 0x0110
61#define DA311_REG_OTP_XOFF_H 0x0111
62#define DA311_REG_OTP_YOFF_L 0x0112
63#define DA311_REG_OTP_YOFF_H 0x0113
64#define DA311_REG_OTP_ZOFF_L 0x0114
65#define DA311_REG_OTP_ZOFF_H 0x0115
66#define DA311_REG_OTP_XSO 0x0116
67#define DA311_REG_OTP_YSO 0x0117
68#define DA311_REG_OTP_ZSO 0x0118
69#define DA311_REG_OTP_TRIM_OSC 0x011b
70#define DA311_REG_LPF_ABSOLUTE 0x011c
71#define DA311_REG_TEMP_OFF1 0x0127
72#define DA311_REG_TEMP_OFF2 0x0128
73#define DA311_REG_TEMP_OFF3 0x0129
74#define DA311_REG_OTP_TRIM_THERM_H 0x011a
75
76/*
77 * a value of + or -1024 corresponds to + or - 1G
78 * scale = 9.81 / 1024 = 0.009580078
79 */
80
81static const int da311_nscale = 9580078;
82
83#define DA311_CHANNEL(reg, axis) { \
84 .type = IIO_ACCEL, \
85 .address = reg, \
86 .modified = 1, \
87 .channel2 = IIO_MOD_##axis, \
88 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
89 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
90}
91
92static const struct iio_chan_spec da311_channels[] = {
93 /* | 0x80 comes from the android driver */
94 DA311_CHANNEL(DA311_REG_OUT_X_L | 0x80, X),
95 DA311_CHANNEL(DA311_REG_OUT_Y_L | 0x80, Y),
96 DA311_CHANNEL(DA311_REG_OUT_Z_L | 0x80, Z),
97};
98
99struct da311_data {
100 struct i2c_client *client;
101};
102
103static int da311_register_mask_write(struct i2c_client *client, u16 addr,
104 u8 mask, u8 data)
105{
106 int ret;
107 u8 tmp_data = 0;
108
109 if (addr & 0xff00) {
110 /* Select bank 1 */
111 ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x01);
112 if (ret < 0)
113 return ret;
114 }
115
116 if (mask != 0xff) {
117 ret = i2c_smbus_read_byte_data(client, addr);
118 if (ret < 0)
119 return ret;
120 tmp_data = ret;
121 }
122
123 tmp_data &= ~mask;
124 tmp_data |= data & mask;
125 ret = i2c_smbus_write_byte_data(client, addr & 0xff, tmp_data);
126 if (ret < 0)
127 return ret;
128
129 if (addr & 0xff00) {
130 /* Back to bank 0 */
131 ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x00);
132 if (ret < 0)
133 return ret;
134 }
135
136 return 0;
137}
138
139/* Init sequence taken from the android driver */
140static int da311_reset(struct i2c_client *client)
141{
142 const struct {
143 u16 addr;
144 u8 mask;
145 u8 data;
146 } init_data[] = {
147 { DA311_REG_TEMP_CFG_REG, 0xff, 0x08 },
148 { DA311_REG_CTRL_REG5, 0xff, 0x80 },
149 { DA311_REG_CTRL_REG4, 0x30, 0x00 },
150 { DA311_REG_CTRL_REG1, 0xff, 0x6f },
151 { DA311_REG_TEMP_CFG_REG, 0xff, 0x88 },
152 { DA311_REG_LDO_REG, 0xff, 0x02 },
153 { DA311_REG_OTP_TRIM_OSC, 0xff, 0x27 },
154 { DA311_REG_LPF_ABSOLUTE, 0xff, 0x30 },
155 { DA311_REG_TEMP_OFF1, 0xff, 0x3f },
156 { DA311_REG_TEMP_OFF2, 0xff, 0xff },
157 { DA311_REG_TEMP_OFF3, 0xff, 0x0f },
158 };
159 int i, ret;
160
161 /* Reset */
162 ret = da311_register_mask_write(client, DA311_REG_SOFT_RESET,
163 0xff, 0xaa);
164 if (ret < 0)
165 return ret;
166
167 for (i = 0; i < ARRAY_SIZE(init_data); i++) {
168 ret = da311_register_mask_write(client,
169 init_data[i].addr,
170 init_data[i].mask,
171 init_data[i].data);
172 if (ret < 0)
173 return ret;
174 }
175
176 return 0;
177}
178
179static int da311_enable(struct i2c_client *client, bool enable)
180{
181 u8 data = enable ? 0x00 : 0x20;
182
183 return da311_register_mask_write(client, DA311_REG_TEMP_CFG_REG,
184 0x20, data);
185}
186
187static int da311_read_raw(struct iio_dev *indio_dev,
188 struct iio_chan_spec const *chan,
189 int *val, int *val2, long mask)
190{
191 struct da311_data *data = iio_priv(indio_dev);
192 int ret;
193
194 switch (mask) {
195 case IIO_CHAN_INFO_RAW:
196 ret = i2c_smbus_read_word_data(data->client, chan->address);
197 if (ret < 0)
198 return ret;
199 /*
200 * Values are 12 bits, stored as 16 bits with the 4
201 * least significant bits always 0.
202 */
203 *val = (short)ret >> 4;
204 return IIO_VAL_INT;
205 case IIO_CHAN_INFO_SCALE:
206 *val = 0;
207 *val2 = da311_nscale;
208 return IIO_VAL_INT_PLUS_NANO;
209 default:
210 return -EINVAL;
211 }
212}
213
214static const struct iio_info da311_info = {
215 .driver_module = THIS_MODULE,
216 .read_raw = da311_read_raw,
217};
218
219static int da311_probe(struct i2c_client *client,
220 const struct i2c_device_id *id)
221{
222 int ret;
223 struct iio_dev *indio_dev;
224 struct da311_data *data;
225
226 ret = i2c_smbus_read_byte_data(client, DA311_REG_CHIP_ID);
227 if (ret != DA311_CHIP_ID)
228 return (ret < 0) ? ret : -ENODEV;
229
230 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
231 if (!indio_dev)
232 return -ENOMEM;
233
234 data = iio_priv(indio_dev);
235 data->client = client;
236 i2c_set_clientdata(client, indio_dev);
237
238 indio_dev->dev.parent = &client->dev;
239 indio_dev->info = &da311_info;
240 indio_dev->name = "da311";
241 indio_dev->modes = INDIO_DIRECT_MODE;
242 indio_dev->channels = da311_channels;
243 indio_dev->num_channels = ARRAY_SIZE(da311_channels);
244
245 ret = da311_reset(client);
246 if (ret < 0)
247 return ret;
248
249 ret = da311_enable(client, true);
250 if (ret < 0)
251 return ret;
252
253 ret = iio_device_register(indio_dev);
254 if (ret < 0) {
255 dev_err(&client->dev, "device_register failed\n");
256 da311_enable(client, false);
257 }
258
259 return ret;
260}
261
262static int da311_remove(struct i2c_client *client)
263{
264 struct iio_dev *indio_dev = i2c_get_clientdata(client);
265
266 iio_device_unregister(indio_dev);
267
268 return da311_enable(client, false);
269}
270
271#ifdef CONFIG_PM_SLEEP
272static int da311_suspend(struct device *dev)
273{
274 return da311_enable(to_i2c_client(dev), false);
275}
276
277static int da311_resume(struct device *dev)
278{
279 return da311_enable(to_i2c_client(dev), true);
280}
281#endif
282
283static SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
284
285static const struct i2c_device_id da311_i2c_id[] = {
286 {"da311", 0},
287 {}
288};
289MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
290
291static struct i2c_driver da311_driver = {
292 .driver = {
293 .name = "da311",
294 .pm = &da311_pm_ops,
295 },
296 .probe = da311_probe,
297 .remove = da311_remove,
298 .id_table = da311_i2c_id,
299};
300
301module_i2c_driver(da311_driver);
302
303MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
304MODULE_DESCRIPTION("MiraMEMS DA311 3-Axis Accelerometer driver");
305MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
new file mode 100644
index 000000000000..b8736cc75656
--- /dev/null
+++ b/drivers/iio/accel/dmard10.c
@@ -0,0 +1,266 @@
1/**
2 * IIO driver for the 3-axis accelerometer Domintech ARD10.
3 *
4 * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
5 * Copyright (c) 2012 Domintech Technology Co., Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/iio/iio.h>
15#include <linux/iio/sysfs.h>
16#include <linux/byteorder/generic.h>
17
18#define DMARD10_REG_ACTR 0x00
19#define DMARD10_REG_AFEM 0x0c
20#define DMARD10_REG_STADR 0x12
21#define DMARD10_REG_STAINT 0x1c
22#define DMARD10_REG_MISC2 0x1f
23#define DMARD10_REG_PD 0x21
24
25#define DMARD10_MODE_OFF 0x00
26#define DMARD10_MODE_STANDBY 0x02
27#define DMARD10_MODE_ACTIVE 0x06
28#define DMARD10_MODE_READ_OTP 0x12
29#define DMARD10_MODE_RESET_DATA_PATH 0x82
30
31/* AFEN set 1, ATM[2:0]=b'000 (normal), EN_Z/Y/X/T=1 */
32#define DMARD10_VALUE_AFEM_AFEN_NORMAL 0x8f
33/* ODR[3:0]=b'0111 (100Hz), CCK[3:0]=b'0100 (204.8kHZ) */
34#define DMARD10_VALUE_CKSEL_ODR_100_204 0x74
35/* INTC[6:5]=b'00 */
36#define DMARD10_VALUE_INTC 0x00
37/* TAP1/TAP2 Average 2 */
38#define DMARD10_VALUE_TAPNS_AVE_2 0x11
39
40#define DMARD10_VALUE_STADR 0x55
41#define DMARD10_VALUE_STAINT 0xaa
42#define DMARD10_VALUE_MISC2_OSCA_EN 0x08
43#define DMARD10_VALUE_PD_RST 0x52
44
45/* Offsets into the buffer read in dmard10_read_raw() */
46#define DMARD10_X_OFFSET 1
47#define DMARD10_Y_OFFSET 2
48#define DMARD10_Z_OFFSET 3
49
50/*
51 * a value of + or -128 corresponds to + or - 1G
52 * scale = 9.81 / 128 = 0.076640625
53 */
54
55static const int dmard10_nscale = 76640625;
56
57#define DMARD10_CHANNEL(reg, axis) { \
58 .type = IIO_ACCEL, \
59 .address = reg, \
60 .modified = 1, \
61 .channel2 = IIO_MOD_##axis, \
62 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
63 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
64}
65
66static const struct iio_chan_spec dmard10_channels[] = {
67 DMARD10_CHANNEL(DMARD10_X_OFFSET, X),
68 DMARD10_CHANNEL(DMARD10_Y_OFFSET, Y),
69 DMARD10_CHANNEL(DMARD10_Z_OFFSET, Z),
70};
71
72struct dmard10_data {
73 struct i2c_client *client;
74};
75
76/* Init sequence taken from the android driver */
77static int dmard10_reset(struct i2c_client *client)
78{
79 unsigned char buffer[7];
80 int ret;
81
82 /* 1. Powerdown reset */
83 ret = i2c_smbus_write_byte_data(client, DMARD10_REG_PD,
84 DMARD10_VALUE_PD_RST);
85 if (ret < 0)
86 return ret;
87
88 /*
89 * 2. ACTR => Standby mode => Download OTP to parameter reg =>
90 * Standby mode => Reset data path => Standby mode
91 */
92 buffer[0] = DMARD10_REG_ACTR;
93 buffer[1] = DMARD10_MODE_STANDBY;
94 buffer[2] = DMARD10_MODE_READ_OTP;
95 buffer[3] = DMARD10_MODE_STANDBY;
96 buffer[4] = DMARD10_MODE_RESET_DATA_PATH;
97 buffer[5] = DMARD10_MODE_STANDBY;
98 ret = i2c_master_send(client, buffer, 6);
99 if (ret < 0)
100 return ret;
101
102 /* 3. OSCA_EN = 1, TSTO = b'000 (INT1 = normal, TEST0 = normal) */
103 ret = i2c_smbus_write_byte_data(client, DMARD10_REG_MISC2,
104 DMARD10_VALUE_MISC2_OSCA_EN);
105 if (ret < 0)
106 return ret;
107
108 /* 4. AFEN = 1 (AFE will powerdown after ADC) */
109 buffer[0] = DMARD10_REG_AFEM;
110 buffer[1] = DMARD10_VALUE_AFEM_AFEN_NORMAL;
111 buffer[2] = DMARD10_VALUE_CKSEL_ODR_100_204;
112 buffer[3] = DMARD10_VALUE_INTC;
113 buffer[4] = DMARD10_VALUE_TAPNS_AVE_2;
114 buffer[5] = 0x00; /* DLYC, no delay timing */
115 buffer[6] = 0x07; /* INTD=1 push-pull, INTA=1 active high, AUTOT=1 */
116 ret = i2c_master_send(client, buffer, 7);
117 if (ret < 0)
118 return ret;
119
120 /* 5. Activation mode */
121 ret = i2c_smbus_write_byte_data(client, DMARD10_REG_ACTR,
122 DMARD10_MODE_ACTIVE);
123 if (ret < 0)
124 return ret;
125
126 return 0;
127}
128
129/* Shutdown sequence taken from the android driver */
130static int dmard10_shutdown(struct i2c_client *client)
131{
132 unsigned char buffer[3];
133
134 buffer[0] = DMARD10_REG_ACTR;
135 buffer[1] = DMARD10_MODE_STANDBY;
136 buffer[2] = DMARD10_MODE_OFF;
137
138 return i2c_master_send(client, buffer, 3);
139}
140
141static int dmard10_read_raw(struct iio_dev *indio_dev,
142 struct iio_chan_spec const *chan,
143 int *val, int *val2, long mask)
144{
145 struct dmard10_data *data = iio_priv(indio_dev);
146 __le16 buf[4];
147 int ret;
148
149 switch (mask) {
150 case IIO_CHAN_INFO_RAW:
151 /*
152 * Read 8 bytes starting at the REG_STADR register, trying to
153 * read the individual X, Y, Z registers will always read 0.
154 */
155 ret = i2c_smbus_read_i2c_block_data(data->client,
156 DMARD10_REG_STADR,
157 sizeof(buf), (u8 *)buf);
158 if (ret < 0)
159 return ret;
160 ret = le16_to_cpu(buf[chan->address]);
161 *val = sign_extend32(ret, 12);
162 return IIO_VAL_INT;
163 case IIO_CHAN_INFO_SCALE:
164 *val = 0;
165 *val2 = dmard10_nscale;
166 return IIO_VAL_INT_PLUS_NANO;
167 default:
168 return -EINVAL;
169 }
170}
171
172static const struct iio_info dmard10_info = {
173 .driver_module = THIS_MODULE,
174 .read_raw = dmard10_read_raw,
175};
176
177static int dmard10_probe(struct i2c_client *client,
178 const struct i2c_device_id *id)
179{
180 int ret;
181 struct iio_dev *indio_dev;
182 struct dmard10_data *data;
183
184 /* These 2 registers have special POR reset values used for id */
185 ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STADR);
186 if (ret != DMARD10_VALUE_STADR)
187 return (ret < 0) ? ret : -ENODEV;
188
189 ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STAINT);
190 if (ret != DMARD10_VALUE_STAINT)
191 return (ret < 0) ? ret : -ENODEV;
192
193 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
194 if (!indio_dev) {
195 dev_err(&client->dev, "iio allocation failed!\n");
196 return -ENOMEM;
197 }
198
199 data = iio_priv(indio_dev);
200 data->client = client;
201 i2c_set_clientdata(client, indio_dev);
202
203 indio_dev->dev.parent = &client->dev;
204 indio_dev->info = &dmard10_info;
205 indio_dev->name = "dmard10";
206 indio_dev->modes = INDIO_DIRECT_MODE;
207 indio_dev->channels = dmard10_channels;
208 indio_dev->num_channels = ARRAY_SIZE(dmard10_channels);
209
210 ret = dmard10_reset(client);
211 if (ret < 0)
212 return ret;
213
214 ret = iio_device_register(indio_dev);
215 if (ret < 0) {
216 dev_err(&client->dev, "device_register failed\n");
217 dmard10_shutdown(client);
218 }
219
220 return ret;
221}
222
223static int dmard10_remove(struct i2c_client *client)
224{
225 struct iio_dev *indio_dev = i2c_get_clientdata(client);
226
227 iio_device_unregister(indio_dev);
228
229 return dmard10_shutdown(client);
230}
231
232#ifdef CONFIG_PM_SLEEP
233static int dmard10_suspend(struct device *dev)
234{
235 return dmard10_shutdown(to_i2c_client(dev));
236}
237
238static int dmard10_resume(struct device *dev)
239{
240 return dmard10_reset(to_i2c_client(dev));
241}
242#endif
243
244static SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend, dmard10_resume);
245
246static const struct i2c_device_id dmard10_i2c_id[] = {
247 {"dmard10", 0},
248 {}
249};
250MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
251
252static struct i2c_driver dmard10_driver = {
253 .driver = {
254 .name = "dmard10",
255 .pm = &dmard10_pm_ops,
256 },
257 .probe = dmard10_probe,
258 .remove = dmard10_remove,
259 .id_table = dmard10_i2c_id,
260};
261
262module_i2c_driver(dmard10_driver);
263
264MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
265MODULE_DESCRIPTION("Domintech ARD10 3-Axis Accelerometer driver");
266MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 03beadf14ad3..3a40774cca74 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -39,7 +39,7 @@
39 39
40#define MMA7660_SCALE_AVAIL "0.467142857" 40#define MMA7660_SCALE_AVAIL "0.467142857"
41 41
42const int mma7660_nscale = 467142857; 42static const int mma7660_nscale = 467142857;
43 43
44#define MMA7660_CHANNEL(reg, axis) { \ 44#define MMA7660_CHANNEL(reg, axis) { \
45 .type = IIO_ACCEL, \ 45 .type = IIO_ACCEL, \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index d41e1b588e68..f418c588af6a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -459,12 +459,14 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
459 459
460 switch (mask) { 460 switch (mask) {
461 case IIO_CHAN_INFO_RAW: 461 case IIO_CHAN_INFO_RAW:
462 if (iio_buffer_enabled(indio_dev)) 462 ret = iio_device_claim_direct_mode(indio_dev);
463 return -EBUSY; 463 if (ret)
464 return ret;
464 465
465 mutex_lock(&data->lock); 466 mutex_lock(&data->lock);
466 ret = mma8452_read(data, buffer); 467 ret = mma8452_read(data, buffer);
467 mutex_unlock(&data->lock); 468 mutex_unlock(&data->lock);
469 iio_device_release_direct_mode(indio_dev);
468 if (ret < 0) 470 if (ret < 0)
469 return ret; 471 return ret;
470 472
@@ -664,37 +666,46 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
664 struct mma8452_data *data = iio_priv(indio_dev); 666 struct mma8452_data *data = iio_priv(indio_dev);
665 int i, ret; 667 int i, ret;
666 668
667 if (iio_buffer_enabled(indio_dev)) 669 ret = iio_device_claim_direct_mode(indio_dev);
668 return -EBUSY; 670 if (ret)
671 return ret;
669 672
670 switch (mask) { 673 switch (mask) {
671 case IIO_CHAN_INFO_SAMP_FREQ: 674 case IIO_CHAN_INFO_SAMP_FREQ:
672 i = mma8452_get_samp_freq_index(data, val, val2); 675 i = mma8452_get_samp_freq_index(data, val, val2);
673 if (i < 0) 676 if (i < 0) {
674 return i; 677 ret = i;
675 678 break;
679 }
676 data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK; 680 data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
677 data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT; 681 data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
678 682
679 return mma8452_change_config(data, MMA8452_CTRL_REG1, 683 ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
680 data->ctrl_reg1); 684 data->ctrl_reg1);
685 break;
681 case IIO_CHAN_INFO_SCALE: 686 case IIO_CHAN_INFO_SCALE:
682 i = mma8452_get_scale_index(data, val, val2); 687 i = mma8452_get_scale_index(data, val, val2);
683 if (i < 0) 688 if (i < 0) {
684 return i; 689 ret = i;
690 break;
691 }
685 692
686 data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK; 693 data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
687 data->data_cfg |= i; 694 data->data_cfg |= i;
688 695
689 return mma8452_change_config(data, MMA8452_DATA_CFG, 696 ret = mma8452_change_config(data, MMA8452_DATA_CFG,
690 data->data_cfg); 697 data->data_cfg);
698 break;
691 case IIO_CHAN_INFO_CALIBBIAS: 699 case IIO_CHAN_INFO_CALIBBIAS:
692 if (val < -128 || val > 127) 700 if (val < -128 || val > 127) {
693 return -EINVAL; 701 ret = -EINVAL;
702 break;
703 }
694 704
695 return mma8452_change_config(data, 705 ret = mma8452_change_config(data,
696 MMA8452_OFF_X + chan->scan_index, 706 MMA8452_OFF_X + chan->scan_index,
697 val); 707 val);
708 break;
698 709
699 case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: 710 case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
700 if (val == 0 && val2 == 0) { 711 if (val == 0 && val2 == 0) {
@@ -703,23 +714,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
703 data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK; 714 data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
704 ret = mma8452_set_hp_filter_frequency(data, val, val2); 715 ret = mma8452_set_hp_filter_frequency(data, val, val2);
705 if (ret < 0) 716 if (ret < 0)
706 return ret; 717 break;
707 } 718 }
708 719
709 return mma8452_change_config(data, MMA8452_DATA_CFG, 720 ret = mma8452_change_config(data, MMA8452_DATA_CFG,
710 data->data_cfg); 721 data->data_cfg);
722 break;
711 723
712 case IIO_CHAN_INFO_OVERSAMPLING_RATIO: 724 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
713 ret = mma8452_get_odr_index(data); 725 ret = mma8452_get_odr_index(data);
714 726
715 for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { 727 for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
716 if (mma8452_os_ratio[i][ret] == val) 728 if (mma8452_os_ratio[i][ret] == val) {
717 return mma8452_set_power_mode(data, i); 729 ret = mma8452_set_power_mode(data, i);
730 break;
731 }
718 } 732 }
719 733 break;
720 default: 734 default:
721 return -EINVAL; 735 ret = -EINVAL;
736 break;
722 } 737 }
738
739 iio_device_release_direct_mode(indio_dev);
740 return ret;
723} 741}
724 742
725static int mma8452_read_thresh(struct iio_dev *indio_dev, 743static int mma8452_read_thresh(struct iio_dev *indio_dev,
@@ -1347,20 +1365,9 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
1347 return mma8452_change_config(data, MMA8452_CTRL_REG4, reg); 1365 return mma8452_change_config(data, MMA8452_CTRL_REG4, reg);
1348} 1366}
1349 1367
1350static int mma8452_validate_device(struct iio_trigger *trig,
1351 struct iio_dev *indio_dev)
1352{
1353 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
1354
1355 if (indio != indio_dev)
1356 return -EINVAL;
1357
1358 return 0;
1359}
1360
1361static const struct iio_trigger_ops mma8452_trigger_ops = { 1368static const struct iio_trigger_ops mma8452_trigger_ops = {
1362 .set_trigger_state = mma8452_data_rdy_trigger_set_state, 1369 .set_trigger_state = mma8452_data_rdy_trigger_set_state,
1363 .validate_device = mma8452_validate_device, 1370 .validate_device = iio_trigger_validate_own_device,
1364 .owner = THIS_MODULE, 1371 .owner = THIS_MODULE,
1365}; 1372};
1366 1373
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
new file mode 100644
index 000000000000..cb1d83fa19a0
--- /dev/null
+++ b/drivers/iio/accel/sca3000.c
@@ -0,0 +1,1576 @@
1/*
2 * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
9 *
10 * See industrialio/accels/sca3000.h for comments.
11 */
12
13#include <linux/interrupt.h>
14#include <linux/fs.h>
15#include <linux/device.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/spi/spi.h>
19#include <linux/sysfs.h>
20#include <linux/module.h>
21#include <linux/uaccess.h>
22#include <linux/iio/iio.h>
23#include <linux/iio/sysfs.h>
24#include <linux/iio/events.h>
25#include <linux/iio/buffer.h>
26#include <linux/iio/kfifo_buf.h>
27
28#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
29#define SCA3000_READ_REG(a) ((a) << 2)
30
31#define SCA3000_REG_REVID_ADDR 0x00
32#define SCA3000_REG_REVID_MAJOR_MASK GENMASK(8, 4)
33#define SCA3000_REG_REVID_MINOR_MASK GENMASK(3, 0)
34
35#define SCA3000_REG_STATUS_ADDR 0x02
36#define SCA3000_LOCKED BIT(5)
37#define SCA3000_EEPROM_CS_ERROR BIT(1)
38#define SCA3000_SPI_FRAME_ERROR BIT(0)
39
40/* All reads done using register decrement so no need to directly access LSBs */
41#define SCA3000_REG_X_MSB_ADDR 0x05
42#define SCA3000_REG_Y_MSB_ADDR 0x07
43#define SCA3000_REG_Z_MSB_ADDR 0x09
44
45#define SCA3000_REG_RING_OUT_ADDR 0x0f
46
47/* Temp read untested - the e05 doesn't have the sensor */
48#define SCA3000_REG_TEMP_MSB_ADDR 0x13
49
50#define SCA3000_REG_MODE_ADDR 0x14
51#define SCA3000_MODE_PROT_MASK 0x28
52#define SCA3000_REG_MODE_RING_BUF_ENABLE BIT(7)
53#define SCA3000_REG_MODE_RING_BUF_8BIT BIT(6)
54
55/*
56 * Free fall detection triggers an interrupt if the acceleration
57 * is below a threshold for equivalent of 25cm drop
58 */
59#define SCA3000_REG_MODE_FREE_FALL_DETECT BIT(4)
60#define SCA3000_REG_MODE_MEAS_MODE_NORMAL 0x00
61#define SCA3000_REG_MODE_MEAS_MODE_OP_1 0x01
62#define SCA3000_REG_MODE_MEAS_MODE_OP_2 0x02
63
64/*
65 * In motion detection mode the accelerations are band pass filtered
66 * (approx 1 - 25Hz) and then a programmable threshold used to trigger
67 * and interrupt.
68 */
69#define SCA3000_REG_MODE_MEAS_MODE_MOT_DET 0x03
70#define SCA3000_REG_MODE_MODE_MASK 0x03
71
72#define SCA3000_REG_BUF_COUNT_ADDR 0x15
73
74#define SCA3000_REG_INT_STATUS_ADDR 0x16
75#define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7)
76#define SCA3000_REG_INT_STATUS_HALF BIT(6)
77
78#define SCA3000_INT_STATUS_FREE_FALL BIT(3)
79#define SCA3000_INT_STATUS_Y_TRIGGER BIT(2)
80#define SCA3000_INT_STATUS_X_TRIGGER BIT(1)
81#define SCA3000_INT_STATUS_Z_TRIGGER BIT(0)
82
83/* Used to allow access to multiplexed registers */
84#define SCA3000_REG_CTRL_SEL_ADDR 0x18
85/* Only available for SCA3000-D03 and SCA3000-D01 */
86#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
87#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
88#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
89#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
90#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
91/*
92 * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
93 * will not function
94 */
95#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
96
97#define SCA3000_REG_OUT_CTRL_PROT_MASK 0xE0
98#define SCA3000_REG_OUT_CTRL_BUF_X_EN 0x10
99#define SCA3000_REG_OUT_CTRL_BUF_Y_EN 0x08
100#define SCA3000_REG_OUT_CTRL_BUF_Z_EN 0x04
101#define SCA3000_REG_OUT_CTRL_BUF_DIV_MASK 0x03
102#define SCA3000_REG_OUT_CTRL_BUF_DIV_4 0x02
103#define SCA3000_REG_OUT_CTRL_BUF_DIV_2 0x01
104
105
106/*
107 * Control which motion detector interrupts are on.
108 * For now only OR combinations are supported.
109 */
110#define SCA3000_MD_CTRL_PROT_MASK 0xC0
111#define SCA3000_MD_CTRL_OR_Y BIT(0)
112#define SCA3000_MD_CTRL_OR_X BIT(1)
113#define SCA3000_MD_CTRL_OR_Z BIT(2)
114/* Currently unsupported */
115#define SCA3000_MD_CTRL_AND_Y BIT(3)
116#define SCA3000_MD_CTRL_AND_X BIT(4)
117#define SAC3000_MD_CTRL_AND_Z BIT(5)
118
119/*
120 * Some control registers of complex access methods requiring this register to
121 * be used to remove a lock.
122 */
123#define SCA3000_REG_UNLOCK_ADDR 0x1e
124
125#define SCA3000_REG_INT_MASK_ADDR 0x21
126#define SCA3000_REG_INT_MASK_PROT_MASK 0x1C
127
128#define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7)
129#define SCA3000_REG_INT_MASK_RING_HALF BIT(6)
130
131#define SCA3000_REG_INT_MASK_ALL_INTS 0x02
132#define SCA3000_REG_INT_MASK_ACTIVE_HIGH 0x01
133#define SCA3000_REG_INT_MASK_ACTIVE_LOW 0x00
134/* Values of multiplexed registers (write to ctrl_data after select) */
135#define SCA3000_REG_CTRL_DATA_ADDR 0x22
136
137/*
138 * Measurement modes available on some sca3000 series chips. Code assumes others
139 * may become available in the future.
140 *
141 * Bypass - Bypass the low-pass filter in the signal channel so as to increase
142 * signal bandwidth.
143 *
144 * Narrow - Narrow low-pass filtering of the signal channel and half output
145 * data rate by decimation.
146 *
147 * Wide - Widen low-pass filtering of signal channel to increase bandwidth
148 */
149#define SCA3000_OP_MODE_BYPASS 0x01
150#define SCA3000_OP_MODE_NARROW 0x02
151#define SCA3000_OP_MODE_WIDE 0x04
152#define SCA3000_MAX_TX 6
153#define SCA3000_MAX_RX 2
154
155/**
156 * struct sca3000_state - device instance state information
157 * @us: the associated spi device
158 * @info: chip variant information
159 * @last_timestamp: the timestamp of the last event
160 * @mo_det_use_count: reference counter for the motion detection unit
161 * @lock: lock used to protect elements of sca3000_state
162 * and the underlying device state.
163 * @tx: dma-able transmit buffer
164 * @rx: dma-able receive buffer
165 **/
166struct sca3000_state {
167 struct spi_device *us;
168 const struct sca3000_chip_info *info;
169 s64 last_timestamp;
170 int mo_det_use_count;
171 struct mutex lock;
172 /* Can these share a cacheline ? */
173 u8 rx[384] ____cacheline_aligned;
174 u8 tx[6] ____cacheline_aligned;
175};
176
177/**
178 * struct sca3000_chip_info - model dependent parameters
179 * @scale: scale * 10^-6
180 * @temp_output: some devices have temperature sensors.
181 * @measurement_mode_freq: normal mode sampling frequency
182 * @measurement_mode_3db_freq: 3db cutoff frequency of the low pass filter for
183 * the normal measurement mode.
184 * @option_mode_1: first optional mode. Not all models have one
185 * @option_mode_1_freq: option mode 1 sampling frequency
186 * @option_mode_1_3db_freq: 3db cutoff frequency of the low pass filter for
187 * the first option mode.
188 * @option_mode_2: second optional mode. Not all chips have one
189 * @option_mode_2_freq: option mode 2 sampling frequency
190 * @option_mode_2_3db_freq: 3db cutoff frequency of the low pass filter for
191 * the second option mode.
192 * @mod_det_mult_xz: Bit wise multipliers to calculate the threshold
193 * for motion detection in the x and z axis.
194 * @mod_det_mult_y: Bit wise multipliers to calculate the threshold
195 * for motion detection in the y axis.
196 *
197 * This structure is used to hold information about the functionality of a given
198 * sca3000 variant.
199 **/
200struct sca3000_chip_info {
201 unsigned int scale;
202 bool temp_output;
203 int measurement_mode_freq;
204 int measurement_mode_3db_freq;
205 int option_mode_1;
206 int option_mode_1_freq;
207 int option_mode_1_3db_freq;
208 int option_mode_2;
209 int option_mode_2_freq;
210 int option_mode_2_3db_freq;
211 int mot_det_mult_xz[6];
212 int mot_det_mult_y[7];
213};
214
215enum sca3000_variant {
216 d01,
217 e02,
218 e04,
219 e05,
220};
221
222/*
223 * Note where option modes are not defined, the chip simply does not
224 * support any.
225 * Other chips in the sca3000 series use i2c and are not included here.
226 *
227 * Some of these devices are only listed in the family data sheet and
228 * do not actually appear to be available.
229 */
230static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
231 [d01] = {
232 .scale = 7357,
233 .temp_output = true,
234 .measurement_mode_freq = 250,
235 .measurement_mode_3db_freq = 45,
236 .option_mode_1 = SCA3000_OP_MODE_BYPASS,
237 .option_mode_1_freq = 250,
238 .option_mode_1_3db_freq = 70,
239 .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
240 .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
241 },
242 [e02] = {
243 .scale = 9810,
244 .measurement_mode_freq = 125,
245 .measurement_mode_3db_freq = 40,
246 .option_mode_1 = SCA3000_OP_MODE_NARROW,
247 .option_mode_1_freq = 63,
248 .option_mode_1_3db_freq = 11,
249 .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
250 .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
251 },
252 [e04] = {
253 .scale = 19620,
254 .measurement_mode_freq = 100,
255 .measurement_mode_3db_freq = 38,
256 .option_mode_1 = SCA3000_OP_MODE_NARROW,
257 .option_mode_1_freq = 50,
258 .option_mode_1_3db_freq = 9,
259 .option_mode_2 = SCA3000_OP_MODE_WIDE,
260 .option_mode_2_freq = 400,
261 .option_mode_2_3db_freq = 70,
262 .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
263 .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
264 },
265 [e05] = {
266 .scale = 61313,
267 .measurement_mode_freq = 200,
268 .measurement_mode_3db_freq = 60,
269 .option_mode_1 = SCA3000_OP_MODE_NARROW,
270 .option_mode_1_freq = 50,
271 .option_mode_1_3db_freq = 9,
272 .option_mode_2 = SCA3000_OP_MODE_WIDE,
273 .option_mode_2_freq = 400,
274 .option_mode_2_3db_freq = 75,
275 .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
276 .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
277 },
278};
279
280static int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
281{
282 st->tx[0] = SCA3000_WRITE_REG(address);
283 st->tx[1] = val;
284 return spi_write(st->us, st->tx, 2);
285}
286
287static int sca3000_read_data_short(struct sca3000_state *st,
288 u8 reg_address_high,
289 int len)
290{
291 struct spi_transfer xfer[2] = {
292 {
293 .len = 1,
294 .tx_buf = st->tx,
295 }, {
296 .len = len,
297 .rx_buf = st->rx,
298 }
299 };
300 st->tx[0] = SCA3000_READ_REG(reg_address_high);
301
302 return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
303}
304
305/**
306 * sca3000_reg_lock_on() - test if the ctrl register lock is on
307 * @st: Driver specific device instance data.
308 *
309 * Lock must be held.
310 **/
311static int sca3000_reg_lock_on(struct sca3000_state *st)
312{
313 int ret;
314
315 ret = sca3000_read_data_short(st, SCA3000_REG_STATUS_ADDR, 1);
316 if (ret < 0)
317 return ret;
318
319 return !(st->rx[0] & SCA3000_LOCKED);
320}
321
322/**
323 * __sca3000_unlock_reg_lock() - unlock the control registers
324 * @st: Driver specific device instance data.
325 *
326 * Note the device does not appear to support doing this in a single transfer.
327 * This should only ever be used as part of ctrl reg read.
328 * Lock must be held before calling this
329 */
330static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
331{
332 struct spi_transfer xfer[3] = {
333 {
334 .len = 2,
335 .cs_change = 1,
336 .tx_buf = st->tx,
337 }, {
338 .len = 2,
339 .cs_change = 1,
340 .tx_buf = st->tx + 2,
341 }, {
342 .len = 2,
343 .tx_buf = st->tx + 4,
344 },
345 };
346 st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
347 st->tx[1] = 0x00;
348 st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
349 st->tx[3] = 0x50;
350 st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
351 st->tx[5] = 0xA0;
352
353 return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
354}
355
356/**
357 * sca3000_write_ctrl_reg() write to a lock protect ctrl register
358 * @st: Driver specific device instance data.
359 * @sel: selects which registers we wish to write to
360 * @val: the value to be written
361 *
362 * Certain control registers are protected against overwriting by the lock
363 * register and use a shared write address. This function allows writing of
364 * these registers.
365 * Lock must be held.
366 */
367static int sca3000_write_ctrl_reg(struct sca3000_state *st,
368 u8 sel,
369 uint8_t val)
370{
371 int ret;
372
373 ret = sca3000_reg_lock_on(st);
374 if (ret < 0)
375 goto error_ret;
376 if (ret) {
377 ret = __sca3000_unlock_reg_lock(st);
378 if (ret)
379 goto error_ret;
380 }
381
382 /* Set the control select register */
383 ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, sel);
384 if (ret)
385 goto error_ret;
386
387 /* Write the actual value into the register */
388 ret = sca3000_write_reg(st, SCA3000_REG_CTRL_DATA_ADDR, val);
389
390error_ret:
391 return ret;
392}
393
394/**
395 * sca3000_read_ctrl_reg() read from lock protected control register.
396 * @st: Driver specific device instance data.
397 * @ctrl_reg: Which ctrl register do we want to read.
398 *
399 * Lock must be held.
400 */
401static int sca3000_read_ctrl_reg(struct sca3000_state *st,
402 u8 ctrl_reg)
403{
404 int ret;
405
406 ret = sca3000_reg_lock_on(st);
407 if (ret < 0)
408 goto error_ret;
409 if (ret) {
410 ret = __sca3000_unlock_reg_lock(st);
411 if (ret)
412 goto error_ret;
413 }
414 /* Set the control select register */
415 ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, ctrl_reg);
416 if (ret)
417 goto error_ret;
418 ret = sca3000_read_data_short(st, SCA3000_REG_CTRL_DATA_ADDR, 1);
419 if (ret)
420 goto error_ret;
421 return st->rx[0];
422error_ret:
423 return ret;
424}
425
426/**
427 * sca3000_show_rev() - sysfs interface to read the chip revision number
428 * @indio_dev: Device instance specific generic IIO data.
429 * Driver specific device instance data can be obtained via
430 * via iio_priv(indio_dev)
431 */
432static int sca3000_print_rev(struct iio_dev *indio_dev)
433{
434 int ret;
435 struct sca3000_state *st = iio_priv(indio_dev);
436
437 mutex_lock(&st->lock);
438 ret = sca3000_read_data_short(st, SCA3000_REG_REVID_ADDR, 1);
439 if (ret < 0)
440 goto error_ret;
441 dev_info(&indio_dev->dev,
442 "sca3000 revision major=%lu, minor=%lu\n",
443 st->rx[0] & SCA3000_REG_REVID_MAJOR_MASK,
444 st->rx[0] & SCA3000_REG_REVID_MINOR_MASK);
445error_ret:
446 mutex_unlock(&st->lock);
447
448 return ret;
449}
450
451static ssize_t
452sca3000_show_available_3db_freqs(struct device *dev,
453 struct device_attribute *attr,
454 char *buf)
455{
456 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
457 struct sca3000_state *st = iio_priv(indio_dev);
458 int len;
459
460 len = sprintf(buf, "%d", st->info->measurement_mode_3db_freq);
461 if (st->info->option_mode_1)
462 len += sprintf(buf + len, " %d",
463 st->info->option_mode_1_3db_freq);
464 if (st->info->option_mode_2)
465 len += sprintf(buf + len, " %d",
466 st->info->option_mode_2_3db_freq);
467 len += sprintf(buf + len, "\n");
468
469 return len;
470}
471
472static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
473 S_IRUGO, sca3000_show_available_3db_freqs,
474 NULL, 0);
475
476static const struct iio_event_spec sca3000_event = {
477 .type = IIO_EV_TYPE_MAG,
478 .dir = IIO_EV_DIR_RISING,
479 .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
480};
481
482/*
483 * Note the hack in the number of bits to pretend we have 2 more than
484 * we do in the fifo.
485 */
486#define SCA3000_CHAN(index, mod) \
487 { \
488 .type = IIO_ACCEL, \
489 .modified = 1, \
490 .channel2 = mod, \
491 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
492 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |\
493 BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),\
494 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
495 .address = index, \
496 .scan_index = index, \
497 .scan_type = { \
498 .sign = 's', \
499 .realbits = 13, \
500 .storagebits = 16, \
501 .shift = 3, \
502 .endianness = IIO_BE, \
503 }, \
504 .event_spec = &sca3000_event, \
505 .num_event_specs = 1, \
506 }
507
508static const struct iio_event_spec sca3000_freefall_event_spec = {
509 .type = IIO_EV_TYPE_MAG,
510 .dir = IIO_EV_DIR_FALLING,
511 .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
512 BIT(IIO_EV_INFO_PERIOD),
513};
514
515static const struct iio_chan_spec sca3000_channels[] = {
516 SCA3000_CHAN(0, IIO_MOD_X),
517 SCA3000_CHAN(1, IIO_MOD_Y),
518 SCA3000_CHAN(2, IIO_MOD_Z),
519 {
520 .type = IIO_ACCEL,
521 .modified = 1,
522 .channel2 = IIO_MOD_X_AND_Y_AND_Z,
523 .scan_index = -1, /* Fake channel */
524 .event_spec = &sca3000_freefall_event_spec,
525 .num_event_specs = 1,
526 },
527};
528
529static const struct iio_chan_spec sca3000_channels_with_temp[] = {
530 SCA3000_CHAN(0, IIO_MOD_X),
531 SCA3000_CHAN(1, IIO_MOD_Y),
532 SCA3000_CHAN(2, IIO_MOD_Z),
533 {
534 .type = IIO_TEMP,
535 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
536 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
537 BIT(IIO_CHAN_INFO_OFFSET),
538 /* No buffer support */
539 .scan_index = -1,
540 },
541 {
542 .type = IIO_ACCEL,
543 .modified = 1,
544 .channel2 = IIO_MOD_X_AND_Y_AND_Z,
545 .scan_index = -1, /* Fake channel */
546 .event_spec = &sca3000_freefall_event_spec,
547 .num_event_specs = 1,
548 },
549};
550
551static u8 sca3000_addresses[3][3] = {
552 [0] = {SCA3000_REG_X_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_X_TH,
553 SCA3000_MD_CTRL_OR_X},
554 [1] = {SCA3000_REG_Y_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Y_TH,
555 SCA3000_MD_CTRL_OR_Y},
556 [2] = {SCA3000_REG_Z_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Z_TH,
557 SCA3000_MD_CTRL_OR_Z},
558};
559
560/**
561 * __sca3000_get_base_freq() - obtain mode specific base frequency
562 * @st: Private driver specific device instance specific state.
563 * @info: chip type specific information.
564 * @base_freq: Base frequency for the current measurement mode.
565 *
566 * lock must be held
567 */
568static inline int __sca3000_get_base_freq(struct sca3000_state *st,
569 const struct sca3000_chip_info *info,
570 int *base_freq)
571{
572 int ret;
573
574 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
575 if (ret)
576 goto error_ret;
577 switch (SCA3000_REG_MODE_MODE_MASK & st->rx[0]) {
578 case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
579 *base_freq = info->measurement_mode_freq;
580 break;
581 case SCA3000_REG_MODE_MEAS_MODE_OP_1:
582 *base_freq = info->option_mode_1_freq;
583 break;
584 case SCA3000_REG_MODE_MEAS_MODE_OP_2:
585 *base_freq = info->option_mode_2_freq;
586 break;
587 default:
588 ret = -EINVAL;
589 }
590error_ret:
591 return ret;
592}
593
594/**
595 * sca3000_read_raw_samp_freq() - read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
596 * @st: Private driver specific device instance specific state.
597 * @val: The frequency read back.
598 *
599 * lock must be held
600 **/
601static int sca3000_read_raw_samp_freq(struct sca3000_state *st, int *val)
602{
603 int ret;
604
605 ret = __sca3000_get_base_freq(st, st->info, val);
606 if (ret)
607 return ret;
608
609 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
610 if (ret < 0)
611 return ret;
612
613 if (*val > 0) {
614 ret &= SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
615 switch (ret) {
616 case SCA3000_REG_OUT_CTRL_BUF_DIV_2:
617 *val /= 2;
618 break;
619 case SCA3000_REG_OUT_CTRL_BUF_DIV_4:
620 *val /= 4;
621 break;
622 }
623 }
624
625 return 0;
626}
627
628/**
629 * sca3000_write_raw_samp_freq() - write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
630 * @st: Private driver specific device instance specific state.
631 * @val: The frequency desired.
632 *
633 * lock must be held
634 */
635static int sca3000_write_raw_samp_freq(struct sca3000_state *st, int val)
636{
637 int ret, base_freq, ctrlval;
638
639 ret = __sca3000_get_base_freq(st, st->info, &base_freq);
640 if (ret)
641 return ret;
642
643 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
644 if (ret < 0)
645 return ret;
646
647 ctrlval = ret & ~SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
648
649 if (val == base_freq / 2)
650 ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_2;
651 if (val == base_freq / 4)
652 ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_4;
653 else if (val != base_freq)
654 return -EINVAL;
655
656 return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
657 ctrlval);
658}
659
660static int sca3000_read_3db_freq(struct sca3000_state *st, int *val)
661{
662 int ret;
663
664 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
665 if (ret)
666 return ret;
667
668 /* mask bottom 2 bits - only ones that are relevant */
669 st->rx[0] &= SCA3000_REG_MODE_MODE_MASK;
670 switch (st->rx[0]) {
671 case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
672 *val = st->info->measurement_mode_3db_freq;
673 return IIO_VAL_INT;
674 case SCA3000_REG_MODE_MEAS_MODE_MOT_DET:
675 return -EBUSY;
676 case SCA3000_REG_MODE_MEAS_MODE_OP_1:
677 *val = st->info->option_mode_1_3db_freq;
678 return IIO_VAL_INT;
679 case SCA3000_REG_MODE_MEAS_MODE_OP_2:
680 *val = st->info->option_mode_2_3db_freq;
681 return IIO_VAL_INT;
682 default:
683 return -EINVAL;
684 }
685}
686
687static int sca3000_write_3db_freq(struct sca3000_state *st, int val)
688{
689 int ret;
690 int mode;
691
692 if (val == st->info->measurement_mode_3db_freq)
693 mode = SCA3000_REG_MODE_MEAS_MODE_NORMAL;
694 else if (st->info->option_mode_1 &&
695 (val == st->info->option_mode_1_3db_freq))
696 mode = SCA3000_REG_MODE_MEAS_MODE_OP_1;
697 else if (st->info->option_mode_2 &&
698 (val == st->info->option_mode_2_3db_freq))
699 mode = SCA3000_REG_MODE_MEAS_MODE_OP_2;
700 else
701 return -EINVAL;
702 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
703 if (ret)
704 return ret;
705
706 st->rx[0] &= ~SCA3000_REG_MODE_MODE_MASK;
707 st->rx[0] |= (mode & SCA3000_REG_MODE_MODE_MASK);
708
709 return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR, st->rx[0]);
710}
711
712static int sca3000_read_raw(struct iio_dev *indio_dev,
713 struct iio_chan_spec const *chan,
714 int *val,
715 int *val2,
716 long mask)
717{
718 struct sca3000_state *st = iio_priv(indio_dev);
719 int ret;
720 u8 address;
721
722 switch (mask) {
723 case IIO_CHAN_INFO_RAW:
724 mutex_lock(&st->lock);
725 if (chan->type == IIO_ACCEL) {
726 if (st->mo_det_use_count) {
727 mutex_unlock(&st->lock);
728 return -EBUSY;
729 }
730 address = sca3000_addresses[chan->address][0];
731 ret = sca3000_read_data_short(st, address, 2);
732 if (ret < 0) {
733 mutex_unlock(&st->lock);
734 return ret;
735 }
736 *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
737 *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
738 (sizeof(*val) * 8 - 13);
739 } else {
740 /* get the temperature when available */
741 ret = sca3000_read_data_short(st,
742 SCA3000_REG_TEMP_MSB_ADDR,
743 2);
744 if (ret < 0) {
745 mutex_unlock(&st->lock);
746 return ret;
747 }
748 *val = ((st->rx[0] & 0x3F) << 3) |
749 ((st->rx[1] & 0xE0) >> 5);
750 }
751 mutex_unlock(&st->lock);
752 return IIO_VAL_INT;
753 case IIO_CHAN_INFO_SCALE:
754 *val = 0;
755 if (chan->type == IIO_ACCEL)
756 *val2 = st->info->scale;
757 else /* temperature */
758 *val2 = 555556;
759 return IIO_VAL_INT_PLUS_MICRO;
760 case IIO_CHAN_INFO_OFFSET:
761 *val = -214;
762 *val2 = 600000;
763 return IIO_VAL_INT_PLUS_MICRO;
764 case IIO_CHAN_INFO_SAMP_FREQ:
765 mutex_lock(&st->lock);
766 ret = sca3000_read_raw_samp_freq(st, val);
767 mutex_unlock(&st->lock);
768 return ret ? ret : IIO_VAL_INT;
769 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
770 mutex_lock(&st->lock);
771 ret = sca3000_read_3db_freq(st, val);
772 mutex_unlock(&st->lock);
773 return ret;
774 default:
775 return -EINVAL;
776 }
777}
778
779static int sca3000_write_raw(struct iio_dev *indio_dev,
780 struct iio_chan_spec const *chan,
781 int val, int val2, long mask)
782{
783 struct sca3000_state *st = iio_priv(indio_dev);
784 int ret;
785
786 switch (mask) {
787 case IIO_CHAN_INFO_SAMP_FREQ:
788 if (val2)
789 return -EINVAL;
790 mutex_lock(&st->lock);
791 ret = sca3000_write_raw_samp_freq(st, val);
792 mutex_unlock(&st->lock);
793 return ret;
794 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
795 if (val2)
796 return -EINVAL;
797 mutex_lock(&st->lock);
798 ret = sca3000_write_3db_freq(st, val);
799 mutex_unlock(&st->lock);
800 default:
801 return -EINVAL;
802 }
803
804 return ret;
805}
806
807/**
808 * sca3000_read_av_freq() - sysfs function to get available frequencies
809 * @dev: Device structure for this device.
810 * @attr: Description of the attribute.
811 * @buf: Incoming string
812 *
813 * The later modes are only relevant to the ring buffer - and depend on current
814 * mode. Note that data sheet gives rather wide tolerances for these so integer
815 * division will give good enough answer and not all chips have them specified
816 * at all.
817 **/
818static ssize_t sca3000_read_av_freq(struct device *dev,
819 struct device_attribute *attr,
820 char *buf)
821{
822 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
823 struct sca3000_state *st = iio_priv(indio_dev);
824 int len = 0, ret, val;
825
826 mutex_lock(&st->lock);
827 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
828 val = st->rx[0];
829 mutex_unlock(&st->lock);
830 if (ret)
831 goto error_ret;
832
833 switch (val & SCA3000_REG_MODE_MODE_MASK) {
834 case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
835 len += sprintf(buf + len, "%d %d %d\n",
836 st->info->measurement_mode_freq,
837 st->info->measurement_mode_freq / 2,
838 st->info->measurement_mode_freq / 4);
839 break;
840 case SCA3000_REG_MODE_MEAS_MODE_OP_1:
841 len += sprintf(buf + len, "%d %d %d\n",
842 st->info->option_mode_1_freq,
843 st->info->option_mode_1_freq / 2,
844 st->info->option_mode_1_freq / 4);
845 break;
846 case SCA3000_REG_MODE_MEAS_MODE_OP_2:
847 len += sprintf(buf + len, "%d %d %d\n",
848 st->info->option_mode_2_freq,
849 st->info->option_mode_2_freq / 2,
850 st->info->option_mode_2_freq / 4);
851 break;
852 }
853 return len;
854error_ret:
855 return ret;
856}
857
858/*
859 * Should only really be registered if ring buffer support is compiled in.
860 * Does no harm however and doing it right would add a fair bit of complexity
861 */
862static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
863
864/**
865 * sca3000_read_event_value() - query of a threshold or period
866 **/
867static int sca3000_read_event_value(struct iio_dev *indio_dev,
868 const struct iio_chan_spec *chan,
869 enum iio_event_type type,
870 enum iio_event_direction dir,
871 enum iio_event_info info,
872 int *val, int *val2)
873{
874 int ret, i;
875 struct sca3000_state *st = iio_priv(indio_dev);
876
877 switch (info) {
878 case IIO_EV_INFO_VALUE:
879 mutex_lock(&st->lock);
880 ret = sca3000_read_ctrl_reg(st,
881 sca3000_addresses[chan->address][1]);
882 mutex_unlock(&st->lock);
883 if (ret < 0)
884 return ret;
885 *val = 0;
886 if (chan->channel2 == IIO_MOD_Y)
887 for_each_set_bit(i, (unsigned long *)&ret,
888 ARRAY_SIZE(st->info->mot_det_mult_y))
889 *val += st->info->mot_det_mult_y[i];
890 else
891 for_each_set_bit(i, (unsigned long *)&ret,
892 ARRAY_SIZE(st->info->mot_det_mult_xz))
893 *val += st->info->mot_det_mult_xz[i];
894
895 return IIO_VAL_INT;
896 case IIO_EV_INFO_PERIOD:
897 *val = 0;
898 *val2 = 226000;
899 return IIO_VAL_INT_PLUS_MICRO;
900 default:
901 return -EINVAL;
902 }
903}
904
905/**
906 * sca3000_write_value() - control of threshold and period
907 * @indio_dev: Device instance specific IIO information.
908 * @chan: Description of the channel for which the event is being
909 * configured.
910 * @type: The type of event being configured, here magnitude rising
911 * as everything else is read only.
912 * @dir: Direction of the event (here rising)
913 * @info: What information about the event are we configuring.
914 * Here the threshold only.
915 * @val: Integer part of the value being written..
916 * @val2: Non integer part of the value being written. Here always 0.
917 */
918static int sca3000_write_event_value(struct iio_dev *indio_dev,
919 const struct iio_chan_spec *chan,
920 enum iio_event_type type,
921 enum iio_event_direction dir,
922 enum iio_event_info info,
923 int val, int val2)
924{
925 struct sca3000_state *st = iio_priv(indio_dev);
926 int ret;
927 int i;
928 u8 nonlinear = 0;
929
930 if (chan->channel2 == IIO_MOD_Y) {
931 i = ARRAY_SIZE(st->info->mot_det_mult_y);
932 while (i > 0)
933 if (val >= st->info->mot_det_mult_y[--i]) {
934 nonlinear |= (1 << i);
935 val -= st->info->mot_det_mult_y[i];
936 }
937 } else {
938 i = ARRAY_SIZE(st->info->mot_det_mult_xz);
939 while (i > 0)
940 if (val >= st->info->mot_det_mult_xz[--i]) {
941 nonlinear |= (1 << i);
942 val -= st->info->mot_det_mult_xz[i];
943 }
944 }
945
946 mutex_lock(&st->lock);
947 ret = sca3000_write_ctrl_reg(st,
948 sca3000_addresses[chan->address][1],
949 nonlinear);
950 mutex_unlock(&st->lock);
951
952 return ret;
953}
954
955static struct attribute *sca3000_attributes[] = {
956 &iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
957 &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
958 NULL,
959};
960
961static const struct attribute_group sca3000_attribute_group = {
962 .attrs = sca3000_attributes,
963};
964
965static int sca3000_read_data(struct sca3000_state *st,
966 u8 reg_address_high,
967 u8 *rx,
968 int len)
969{
970 int ret;
971 struct spi_transfer xfer[2] = {
972 {
973 .len = 1,
974 .tx_buf = st->tx,
975 }, {
976 .len = len,
977 .rx_buf = rx,
978 }
979 };
980
981 st->tx[0] = SCA3000_READ_REG(reg_address_high);
982 ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
983 if (ret) {
984 dev_err(get_device(&st->us->dev), "problem reading register");
985 return ret;
986 }
987
988 return 0;
989}
990
991/**
992 * sca3000_ring_int_process() - ring specific interrupt handling.
993 * @val: Value of the interrupt status register.
994 * @indio_dev: Device instance specific IIO device structure.
995 */
996static void sca3000_ring_int_process(u8 val, struct iio_dev *indio_dev)
997{
998 struct sca3000_state *st = iio_priv(indio_dev);
999 int ret, i, num_available;
1000
1001 mutex_lock(&st->lock);
1002
1003 if (val & SCA3000_REG_INT_STATUS_HALF) {
1004 ret = sca3000_read_data_short(st, SCA3000_REG_BUF_COUNT_ADDR,
1005 1);
1006 if (ret)
1007 goto error_ret;
1008 num_available = st->rx[0];
1009 /*
1010 * num_available is the total number of samples available
1011 * i.e. number of time points * number of channels.
1012 */
1013 ret = sca3000_read_data(st, SCA3000_REG_RING_OUT_ADDR, st->rx,
1014 num_available * 2);
1015 if (ret)
1016 goto error_ret;
1017 for (i = 0; i < num_available / 3; i++) {
1018 /*
1019 * Dirty hack to cover for 11 bit in fifo, 13 bit
1020 * direct reading.
1021 *
1022 * In theory the bottom two bits are undefined.
1023 * In reality they appear to always be 0.
1024 */
1025 iio_push_to_buffers(indio_dev, st->rx + i * 3 * 2);
1026 }
1027 }
1028error_ret:
1029 mutex_unlock(&st->lock);
1030}
1031
1032/**
1033 * sca3000_event_handler() - handling ring and non ring events
1034 * @irq: The irq being handled.
1035 * @private: struct iio_device pointer for the device.
1036 *
1037 * Ring related interrupt handler. Depending on event, push to
1038 * the ring buffer event chrdev or the event one.
1039 *
1040 * This function is complicated by the fact that the devices can signify ring
1041 * and non ring events via the same interrupt line and they can only
1042 * be distinguished via a read of the relevant status register.
1043 */
1044static irqreturn_t sca3000_event_handler(int irq, void *private)
1045{
1046 struct iio_dev *indio_dev = private;
1047 struct sca3000_state *st = iio_priv(indio_dev);
1048 int ret, val;
1049 s64 last_timestamp = iio_get_time_ns(indio_dev);
1050
1051 /*
1052 * Could lead if badly timed to an extra read of status reg,
1053 * but ensures no interrupt is missed.
1054 */
1055 mutex_lock(&st->lock);
1056 ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
1057 val = st->rx[0];
1058 mutex_unlock(&st->lock);
1059 if (ret)
1060 goto done;
1061
1062 sca3000_ring_int_process(val, indio_dev);
1063
1064 if (val & SCA3000_INT_STATUS_FREE_FALL)
1065 iio_push_event(indio_dev,
1066 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1067 0,
1068 IIO_MOD_X_AND_Y_AND_Z,
1069 IIO_EV_TYPE_MAG,
1070 IIO_EV_DIR_FALLING),
1071 last_timestamp);
1072
1073 if (val & SCA3000_INT_STATUS_Y_TRIGGER)
1074 iio_push_event(indio_dev,
1075 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1076 0,
1077 IIO_MOD_Y,
1078 IIO_EV_TYPE_MAG,
1079 IIO_EV_DIR_RISING),
1080 last_timestamp);
1081
1082 if (val & SCA3000_INT_STATUS_X_TRIGGER)
1083 iio_push_event(indio_dev,
1084 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1085 0,
1086 IIO_MOD_X,
1087 IIO_EV_TYPE_MAG,
1088 IIO_EV_DIR_RISING),
1089 last_timestamp);
1090
1091 if (val & SCA3000_INT_STATUS_Z_TRIGGER)
1092 iio_push_event(indio_dev,
1093 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1094 0,
1095 IIO_MOD_Z,
1096 IIO_EV_TYPE_MAG,
1097 IIO_EV_DIR_RISING),
1098 last_timestamp);
1099
1100done:
1101 return IRQ_HANDLED;
1102}
1103
1104/**
1105 * sca3000_read_event_config() what events are enabled
1106 **/
1107static int sca3000_read_event_config(struct iio_dev *indio_dev,
1108 const struct iio_chan_spec *chan,
1109 enum iio_event_type type,
1110 enum iio_event_direction dir)
1111{
1112 struct sca3000_state *st = iio_priv(indio_dev);
1113 int ret;
1114 /* read current value of mode register */
1115 mutex_lock(&st->lock);
1116
1117 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
1118 if (ret)
1119 goto error_ret;
1120
1121 switch (chan->channel2) {
1122 case IIO_MOD_X_AND_Y_AND_Z:
1123 ret = !!(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT);
1124 break;
1125 case IIO_MOD_X:
1126 case IIO_MOD_Y:
1127 case IIO_MOD_Z:
1128 /*
1129 * Motion detection mode cannot run at the same time as
1130 * acceleration data being read.
1131 */
1132 if ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
1133 != SCA3000_REG_MODE_MEAS_MODE_MOT_DET) {
1134 ret = 0;
1135 } else {
1136 ret = sca3000_read_ctrl_reg(st,
1137 SCA3000_REG_CTRL_SEL_MD_CTRL);
1138 if (ret < 0)
1139 goto error_ret;
1140 /* only supporting logical or's for now */
1141 ret = !!(ret & sca3000_addresses[chan->address][2]);
1142 }
1143 break;
1144 default:
1145 ret = -EINVAL;
1146 }
1147
1148error_ret:
1149 mutex_unlock(&st->lock);
1150
1151 return ret;
1152}
1153
1154static int sca3000_freefall_set_state(struct iio_dev *indio_dev, int state)
1155{
1156 struct sca3000_state *st = iio_priv(indio_dev);
1157 int ret;
1158
1159 /* read current value of mode register */
1160 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
1161 if (ret)
1162 return ret;
1163
1164 /* if off and should be on */
1165 if (state && !(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
1166 return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
1167 st->rx[0] | SCA3000_REG_MODE_FREE_FALL_DETECT);
1168 /* if on and should be off */
1169 else if (!state && (st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
1170 return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
1171 st->rx[0] & ~SCA3000_REG_MODE_FREE_FALL_DETECT);
1172 else
1173 return 0;
1174}
1175
1176static int sca3000_motion_detect_set_state(struct iio_dev *indio_dev, int axis,
1177 int state)
1178{
1179 struct sca3000_state *st = iio_priv(indio_dev);
1180 int ret, ctrlval;
1181
1182 /*
1183 * First read the motion detector config to find out if
1184 * this axis is on
1185 */
1186 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
1187 if (ret < 0)
1188 return ret;
1189 ctrlval = ret;
1190 /* if off and should be on */
1191 if (state && !(ctrlval & sca3000_addresses[axis][2])) {
1192 ret = sca3000_write_ctrl_reg(st,
1193 SCA3000_REG_CTRL_SEL_MD_CTRL,
1194 ctrlval |
1195 sca3000_addresses[axis][2]);
1196 if (ret)
1197 return ret;
1198 st->mo_det_use_count++;
1199 } else if (!state && (ctrlval & sca3000_addresses[axis][2])) {
1200 ret = sca3000_write_ctrl_reg(st,
1201 SCA3000_REG_CTRL_SEL_MD_CTRL,
1202 ctrlval &
1203 ~(sca3000_addresses[axis][2]));
1204 if (ret)
1205 return ret;
1206 st->mo_det_use_count--;
1207 }
1208
1209 /* read current value of mode register */
1210 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
1211 if (ret)
1212 return ret;
1213 /* if off and should be on */
1214 if ((st->mo_det_use_count) &&
1215 ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
1216 != SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
1217 return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
1218 (st->rx[0] & ~SCA3000_REG_MODE_MODE_MASK)
1219 | SCA3000_REG_MODE_MEAS_MODE_MOT_DET);
1220 /* if on and should be off */
1221 else if (!(st->mo_det_use_count) &&
1222 ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
1223 == SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
1224 return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
1225 st->rx[0] & SCA3000_REG_MODE_MODE_MASK);
1226 else
1227 return 0;
1228}
1229
1230/**
1231 * sca3000_write_event_config() - simple on off control for motion detector
1232 * @indio_dev: IIO device instance specific structure. Data specific to this
1233 * particular driver may be accessed via iio_priv(indio_dev).
1234 * @chan: Description of the channel whose event we are configuring.
1235 * @type: The type of event.
1236 * @dir: The direction of the event.
1237 * @state: Desired state of event being configured.
1238 *
1239 * This is a per axis control, but enabling any will result in the
1240 * motion detector unit being enabled.
1241 * N.B. enabling motion detector stops normal data acquisition.
1242 * There is a complexity in knowing which mode to return to when
1243 * this mode is disabled. Currently normal mode is assumed.
1244 **/
1245static int sca3000_write_event_config(struct iio_dev *indio_dev,
1246 const struct iio_chan_spec *chan,
1247 enum iio_event_type type,
1248 enum iio_event_direction dir,
1249 int state)
1250{
1251 struct sca3000_state *st = iio_priv(indio_dev);
1252 int ret;
1253
1254 mutex_lock(&st->lock);
1255 switch (chan->channel2) {
1256 case IIO_MOD_X_AND_Y_AND_Z:
1257 ret = sca3000_freefall_set_state(indio_dev, state);
1258 break;
1259
1260 case IIO_MOD_X:
1261 case IIO_MOD_Y:
1262 case IIO_MOD_Z:
1263 ret = sca3000_motion_detect_set_state(indio_dev,
1264 chan->address,
1265 state);
1266 break;
1267 default:
1268 ret = -EINVAL;
1269 break;
1270 }
1271 mutex_unlock(&st->lock);
1272
1273 return ret;
1274}
1275
1276static int sca3000_configure_ring(struct iio_dev *indio_dev)
1277{
1278 struct iio_buffer *buffer;
1279
1280 buffer = iio_kfifo_allocate();
1281 if (!buffer)
1282 return -ENOMEM;
1283
1284 iio_device_attach_buffer(indio_dev, buffer);
1285 indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
1286
1287 return 0;
1288}
1289
1290static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
1291{
1292 iio_kfifo_free(indio_dev->buffer);
1293}
1294
1295static inline
1296int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
1297{
1298 struct sca3000_state *st = iio_priv(indio_dev);
1299 int ret;
1300
1301 mutex_lock(&st->lock);
1302 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
1303 if (ret)
1304 goto error_ret;
1305 if (state) {
1306 dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
1307 ret = sca3000_write_reg(st,
1308 SCA3000_REG_MODE_ADDR,
1309 (st->rx[0] | SCA3000_REG_MODE_RING_BUF_ENABLE));
1310 } else
1311 ret = sca3000_write_reg(st,
1312 SCA3000_REG_MODE_ADDR,
1313 (st->rx[0] & ~SCA3000_REG_MODE_RING_BUF_ENABLE));
1314error_ret:
1315 mutex_unlock(&st->lock);
1316
1317 return ret;
1318}
1319
1320/**
1321 * sca3000_hw_ring_preenable() - hw ring buffer preenable function
1322 * @indio_dev: structure representing the IIO device. Device instance
1323 * specific state can be accessed via iio_priv(indio_dev).
1324 *
1325 * Very simple enable function as the chip will allows normal reads
1326 * during ring buffer operation so as long as it is indeed running
1327 * before we notify the core, the precise ordering does not matter.
1328 */
1329static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
1330{
1331 int ret;
1332 struct sca3000_state *st = iio_priv(indio_dev);
1333
1334 mutex_lock(&st->lock);
1335
1336 /* Enable the 50% full interrupt */
1337 ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
1338 if (ret)
1339 goto error_unlock;
1340 ret = sca3000_write_reg(st,
1341 SCA3000_REG_INT_MASK_ADDR,
1342 st->rx[0] | SCA3000_REG_INT_MASK_RING_HALF);
1343 if (ret)
1344 goto error_unlock;
1345
1346 mutex_unlock(&st->lock);
1347
1348 return __sca3000_hw_ring_state_set(indio_dev, 1);
1349
1350error_unlock:
1351 mutex_unlock(&st->lock);
1352
1353 return ret;
1354}
1355
1356static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
1357{
1358 int ret;
1359 struct sca3000_state *st = iio_priv(indio_dev);
1360
1361 ret = __sca3000_hw_ring_state_set(indio_dev, 0);
1362 if (ret)
1363 return ret;
1364
1365 /* Disable the 50% full interrupt */
1366 mutex_lock(&st->lock);
1367
1368 ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
1369 if (ret)
1370 goto unlock;
1371 ret = sca3000_write_reg(st,
1372 SCA3000_REG_INT_MASK_ADDR,
1373 st->rx[0] & ~SCA3000_REG_INT_MASK_RING_HALF);
1374unlock:
1375 mutex_unlock(&st->lock);
1376 return ret;
1377}
1378
1379static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
1380 .preenable = &sca3000_hw_ring_preenable,
1381 .postdisable = &sca3000_hw_ring_postdisable,
1382};
1383
1384/**
1385 * sca3000_clean_setup() - get the device into a predictable state
1386 * @st: Device instance specific private data structure
1387 *
1388 * Devices use flash memory to store many of the register values
1389 * and hence can come up in somewhat unpredictable states.
1390 * Hence reset everything on driver load.
1391 */
1392static int sca3000_clean_setup(struct sca3000_state *st)
1393{
1394 int ret;
1395
1396 mutex_lock(&st->lock);
1397 /* Ensure all interrupts have been acknowledged */
1398 ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
1399 if (ret)
1400 goto error_ret;
1401
1402 /* Turn off all motion detection channels */
1403 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
1404 if (ret < 0)
1405 goto error_ret;
1406 ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
1407 ret & SCA3000_MD_CTRL_PROT_MASK);
1408 if (ret)
1409 goto error_ret;
1410
1411 /* Disable ring buffer */
1412 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
1413 if (ret < 0)
1414 goto error_ret;
1415 ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
1416 (ret & SCA3000_REG_OUT_CTRL_PROT_MASK)
1417 | SCA3000_REG_OUT_CTRL_BUF_X_EN
1418 | SCA3000_REG_OUT_CTRL_BUF_Y_EN
1419 | SCA3000_REG_OUT_CTRL_BUF_Z_EN
1420 | SCA3000_REG_OUT_CTRL_BUF_DIV_4);
1421 if (ret)
1422 goto error_ret;
1423 /* Enable interrupts, relevant to mode and set up as active low */
1424 ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
1425 if (ret)
1426 goto error_ret;
1427 ret = sca3000_write_reg(st,
1428 SCA3000_REG_INT_MASK_ADDR,
1429 (ret & SCA3000_REG_INT_MASK_PROT_MASK)
1430 | SCA3000_REG_INT_MASK_ACTIVE_LOW);
1431 if (ret)
1432 goto error_ret;
1433 /*
1434 * Select normal measurement mode, free fall off, ring off
1435 * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
1436 * as that occurs in one of the example on the datasheet
1437 */
1438 ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
1439 if (ret)
1440 goto error_ret;
1441 ret = sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
1442 (st->rx[0] & SCA3000_MODE_PROT_MASK));
1443
1444error_ret:
1445 mutex_unlock(&st->lock);
1446 return ret;
1447}
1448
1449static const struct iio_info sca3000_info = {
1450 .attrs = &sca3000_attribute_group,
1451 .read_raw = &sca3000_read_raw,
1452 .write_raw = &sca3000_write_raw,
1453 .read_event_value = &sca3000_read_event_value,
1454 .write_event_value = &sca3000_write_event_value,
1455 .read_event_config = &sca3000_read_event_config,
1456 .write_event_config = &sca3000_write_event_config,
1457 .driver_module = THIS_MODULE,
1458};
1459
1460static int sca3000_probe(struct spi_device *spi)
1461{
1462 int ret;
1463 struct sca3000_state *st;
1464 struct iio_dev *indio_dev;
1465
1466 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
1467 if (!indio_dev)
1468 return -ENOMEM;
1469
1470 st = iio_priv(indio_dev);
1471 spi_set_drvdata(spi, indio_dev);
1472 st->us = spi;
1473 mutex_init(&st->lock);
1474 st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
1475 ->driver_data];
1476
1477 indio_dev->dev.parent = &spi->dev;
1478 indio_dev->name = spi_get_device_id(spi)->name;
1479 indio_dev->info = &sca3000_info;
1480 if (st->info->temp_output) {
1481 indio_dev->channels = sca3000_channels_with_temp;
1482 indio_dev->num_channels =
1483 ARRAY_SIZE(sca3000_channels_with_temp);
1484 } else {
1485 indio_dev->channels = sca3000_channels;
1486 indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
1487 }
1488 indio_dev->modes = INDIO_DIRECT_MODE;
1489
1490 sca3000_configure_ring(indio_dev);
1491
1492 if (spi->irq) {
1493 ret = request_threaded_irq(spi->irq,
1494 NULL,
1495 &sca3000_event_handler,
1496 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1497 "sca3000",
1498 indio_dev);
1499 if (ret)
1500 return ret;
1501 }
1502 indio_dev->setup_ops = &sca3000_ring_setup_ops;
1503 ret = sca3000_clean_setup(st);
1504 if (ret)
1505 goto error_free_irq;
1506
1507 ret = sca3000_print_rev(indio_dev);
1508 if (ret)
1509 goto error_free_irq;
1510
1511 return iio_device_register(indio_dev);
1512
1513error_free_irq:
1514 if (spi->irq)
1515 free_irq(spi->irq, indio_dev);
1516
1517 return ret;
1518}
1519
1520static int sca3000_stop_all_interrupts(struct sca3000_state *st)
1521{
1522 int ret;
1523
1524 mutex_lock(&st->lock);
1525 ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
1526 if (ret)
1527 goto error_ret;
1528 ret = sca3000_write_reg(st, SCA3000_REG_INT_MASK_ADDR,
1529 (st->rx[0] &
1530 ~(SCA3000_REG_INT_MASK_RING_THREE_QUARTER |
1531 SCA3000_REG_INT_MASK_RING_HALF |
1532 SCA3000_REG_INT_MASK_ALL_INTS)));
1533error_ret:
1534 mutex_unlock(&st->lock);
1535 return ret;
1536}
1537
1538static int sca3000_remove(struct spi_device *spi)
1539{
1540 struct iio_dev *indio_dev = spi_get_drvdata(spi);
1541 struct sca3000_state *st = iio_priv(indio_dev);
1542
1543 iio_device_unregister(indio_dev);
1544
1545 /* Must ensure no interrupts can be generated after this! */
1546 sca3000_stop_all_interrupts(st);
1547 if (spi->irq)
1548 free_irq(spi->irq, indio_dev);
1549
1550 sca3000_unconfigure_ring(indio_dev);
1551
1552 return 0;
1553}
1554
1555static const struct spi_device_id sca3000_id[] = {
1556 {"sca3000_d01", d01},
1557 {"sca3000_e02", e02},
1558 {"sca3000_e04", e04},
1559 {"sca3000_e05", e05},
1560 {}
1561};
1562MODULE_DEVICE_TABLE(spi, sca3000_id);
1563
1564static struct spi_driver sca3000_driver = {
1565 .driver = {
1566 .name = "sca3000",
1567 },
1568 .probe = sca3000_probe,
1569 .remove = sca3000_remove,
1570 .id_table = sca3000_id,
1571};
1572module_spi_driver(sca3000_driver);
1573
1574MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
1575MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
1576MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index f8dfdb690563..7c231687109a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -30,6 +30,7 @@
30#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel" 30#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
31#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel" 31#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
32#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq" 32#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
33#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
33 34
34/** 35/**
35* struct st_sensors_platform_data - default accel platform data 36* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ce69048c88e9..f6b6d42385e1 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -43,194 +43,6 @@
43#define ST_ACCEL_FS_AVL_200G 200 43#define ST_ACCEL_FS_AVL_200G 200
44#define ST_ACCEL_FS_AVL_400G 400 44#define ST_ACCEL_FS_AVL_400G 400
45 45
46/* CUSTOM VALUES FOR SENSOR 1 */
47#define ST_ACCEL_1_WAI_EXP 0x33
48#define ST_ACCEL_1_ODR_ADDR 0x20
49#define ST_ACCEL_1_ODR_MASK 0xf0
50#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
51#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
52#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
53#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
54#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
55#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
56#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
57#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
58#define ST_ACCEL_1_FS_ADDR 0x23
59#define ST_ACCEL_1_FS_MASK 0x30
60#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
61#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
62#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
63#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
64#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
65#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
66#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
67#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
68#define ST_ACCEL_1_BDU_ADDR 0x23
69#define ST_ACCEL_1_BDU_MASK 0x80
70#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
71#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
72#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
73#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
74#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
75#define ST_ACCEL_1_MULTIREAD_BIT true
76
77/* CUSTOM VALUES FOR SENSOR 2 */
78#define ST_ACCEL_2_WAI_EXP 0x32
79#define ST_ACCEL_2_ODR_ADDR 0x20
80#define ST_ACCEL_2_ODR_MASK 0x18
81#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
82#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
83#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
84#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
85#define ST_ACCEL_2_PW_ADDR 0x20
86#define ST_ACCEL_2_PW_MASK 0xe0
87#define ST_ACCEL_2_FS_ADDR 0x23
88#define ST_ACCEL_2_FS_MASK 0x30
89#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
90#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
91#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
92#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
93#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
94#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
95#define ST_ACCEL_2_BDU_ADDR 0x23
96#define ST_ACCEL_2_BDU_MASK 0x80
97#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
98#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
99#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
100#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
101#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
102#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
103#define ST_ACCEL_2_OD_IRQ_MASK 0x40
104#define ST_ACCEL_2_MULTIREAD_BIT true
105
106/* CUSTOM VALUES FOR SENSOR 3 */
107#define ST_ACCEL_3_WAI_EXP 0x40
108#define ST_ACCEL_3_ODR_ADDR 0x20
109#define ST_ACCEL_3_ODR_MASK 0xf0
110#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
111#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
112#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
113#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
114#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
115#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
116#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
117#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
118#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
119#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
120#define ST_ACCEL_3_FS_ADDR 0x24
121#define ST_ACCEL_3_FS_MASK 0x38
122#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
123#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
124#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
125#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
126#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
127#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
128#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
129#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
130#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
131#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
132#define ST_ACCEL_3_BDU_ADDR 0x20
133#define ST_ACCEL_3_BDU_MASK 0x08
134#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
135#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
136#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
137#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
138#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
139#define ST_ACCEL_3_IG1_EN_ADDR 0x23
140#define ST_ACCEL_3_IG1_EN_MASK 0x08
141#define ST_ACCEL_3_MULTIREAD_BIT false
142
143/* CUSTOM VALUES FOR SENSOR 4 */
144#define ST_ACCEL_4_WAI_EXP 0x3a
145#define ST_ACCEL_4_ODR_ADDR 0x20
146#define ST_ACCEL_4_ODR_MASK 0x30 /* DF1 and DF0 */
147#define ST_ACCEL_4_ODR_AVL_40HZ_VAL 0x00
148#define ST_ACCEL_4_ODR_AVL_160HZ_VAL 0x01
149#define ST_ACCEL_4_ODR_AVL_640HZ_VAL 0x02
150#define ST_ACCEL_4_ODR_AVL_2560HZ_VAL 0x03
151#define ST_ACCEL_4_PW_ADDR 0x20
152#define ST_ACCEL_4_PW_MASK 0xc0
153#define ST_ACCEL_4_FS_ADDR 0x21
154#define ST_ACCEL_4_FS_MASK 0x80
155#define ST_ACCEL_4_FS_AVL_2_VAL 0X00
156#define ST_ACCEL_4_FS_AVL_6_VAL 0X01
157#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1024)
158#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(340)
159#define ST_ACCEL_4_BDU_ADDR 0x21
160#define ST_ACCEL_4_BDU_MASK 0x40
161#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
162#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
163#define ST_ACCEL_4_MULTIREAD_BIT true
164
165/* CUSTOM VALUES FOR SENSOR 5 */
166#define ST_ACCEL_5_WAI_EXP 0x3b
167#define ST_ACCEL_5_ODR_ADDR 0x20
168#define ST_ACCEL_5_ODR_MASK 0x80
169#define ST_ACCEL_5_ODR_AVL_100HZ_VAL 0x00
170#define ST_ACCEL_5_ODR_AVL_400HZ_VAL 0x01
171#define ST_ACCEL_5_PW_ADDR 0x20
172#define ST_ACCEL_5_PW_MASK 0x40
173#define ST_ACCEL_5_FS_ADDR 0x20
174#define ST_ACCEL_5_FS_MASK 0x20
175#define ST_ACCEL_5_FS_AVL_2_VAL 0X00
176#define ST_ACCEL_5_FS_AVL_8_VAL 0X01
177/* TODO: check these resulting gain settings, these are not in the datsheet */
178#define ST_ACCEL_5_FS_AVL_2_GAIN IIO_G_TO_M_S_2(18000)
179#define ST_ACCEL_5_FS_AVL_8_GAIN IIO_G_TO_M_S_2(72000)
180#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
181#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
182#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
183#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
184#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
185#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
186#define ST_ACCEL_5_OD_IRQ_MASK 0x40
187#define ST_ACCEL_5_IG1_EN_ADDR 0x21
188#define ST_ACCEL_5_IG1_EN_MASK 0x08
189#define ST_ACCEL_5_MULTIREAD_BIT false
190
191/* CUSTOM VALUES FOR SENSOR 6 */
192#define ST_ACCEL_6_WAI_EXP 0x32
193#define ST_ACCEL_6_ODR_ADDR 0x20
194#define ST_ACCEL_6_ODR_MASK 0x18
195#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
196#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
197#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
198#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
199#define ST_ACCEL_6_PW_ADDR 0x20
200#define ST_ACCEL_6_PW_MASK 0x20
201#define ST_ACCEL_6_FS_ADDR 0x23
202#define ST_ACCEL_6_FS_MASK 0x30
203#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
204#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
205#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
206#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
207#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
208#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
209#define ST_ACCEL_6_BDU_ADDR 0x23
210#define ST_ACCEL_6_BDU_MASK 0x80
211#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
212#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
213#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
214#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
215#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
216#define ST_ACCEL_6_MULTIREAD_BIT true
217
218/* CUSTOM VALUES FOR SENSOR 7 */
219#define ST_ACCEL_7_ODR_ADDR 0x20
220#define ST_ACCEL_7_ODR_MASK 0x30
221#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
222#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
223#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
224#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
225#define ST_ACCEL_7_PW_ADDR 0x20
226#define ST_ACCEL_7_PW_MASK 0xc0
227#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
228#define ST_ACCEL_7_BDU_ADDR 0x21
229#define ST_ACCEL_7_BDU_MASK 0x40
230#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
231#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
232#define ST_ACCEL_7_MULTIREAD_BIT false
233
234static const struct iio_chan_spec st_accel_8bit_channels[] = { 46static const struct iio_chan_spec st_accel_8bit_channels[] = {
235 ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, 47 ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
236 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 48 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -281,7 +93,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
281 93
282static const struct st_sensor_settings st_accel_sensors_settings[] = { 94static const struct st_sensor_settings st_accel_sensors_settings[] = {
283 { 95 {
284 .wai = ST_ACCEL_1_WAI_EXP, 96 .wai = 0x33,
285 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 97 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
286 .sensors_supported = { 98 .sensors_supported = {
287 [0] = LIS3DH_ACCEL_DEV_NAME, 99 [0] = LIS3DH_ACCEL_DEV_NAME,
@@ -294,22 +106,22 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
294 }, 106 },
295 .ch = (struct iio_chan_spec *)st_accel_12bit_channels, 107 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
296 .odr = { 108 .odr = {
297 .addr = ST_ACCEL_1_ODR_ADDR, 109 .addr = 0x20,
298 .mask = ST_ACCEL_1_ODR_MASK, 110 .mask = 0xf0,
299 .odr_avl = { 111 .odr_avl = {
300 { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, }, 112 { .hz = 1, .value = 0x01, },
301 { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, }, 113 { .hz = 10, .value = 0x02, },
302 { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, }, 114 { .hz = 25, .value = 0x03, },
303 { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, }, 115 { .hz = 50, .value = 0x04, },
304 { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, }, 116 { .hz = 100, .value = 0x05, },
305 { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, }, 117 { .hz = 200, .value = 0x06, },
306 { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, }, 118 { .hz = 400, .value = 0x07, },
307 { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, }, 119 { .hz = 1600, .value = 0x08, },
308 }, 120 },
309 }, 121 },
310 .pw = { 122 .pw = {
311 .addr = ST_ACCEL_1_ODR_ADDR, 123 .addr = 0x20,
312 .mask = ST_ACCEL_1_ODR_MASK, 124 .mask = 0xf0,
313 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 125 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
314 }, 126 },
315 .enable_axis = { 127 .enable_axis = {
@@ -317,48 +129,48 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
317 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 129 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
318 }, 130 },
319 .fs = { 131 .fs = {
320 .addr = ST_ACCEL_1_FS_ADDR, 132 .addr = 0x23,
321 .mask = ST_ACCEL_1_FS_MASK, 133 .mask = 0x30,
322 .fs_avl = { 134 .fs_avl = {
323 [0] = { 135 [0] = {
324 .num = ST_ACCEL_FS_AVL_2G, 136 .num = ST_ACCEL_FS_AVL_2G,
325 .value = ST_ACCEL_1_FS_AVL_2_VAL, 137 .value = 0x00,
326 .gain = ST_ACCEL_1_FS_AVL_2_GAIN, 138 .gain = IIO_G_TO_M_S_2(1000),
327 }, 139 },
328 [1] = { 140 [1] = {
329 .num = ST_ACCEL_FS_AVL_4G, 141 .num = ST_ACCEL_FS_AVL_4G,
330 .value = ST_ACCEL_1_FS_AVL_4_VAL, 142 .value = 0x01,
331 .gain = ST_ACCEL_1_FS_AVL_4_GAIN, 143 .gain = IIO_G_TO_M_S_2(2000),
332 }, 144 },
333 [2] = { 145 [2] = {
334 .num = ST_ACCEL_FS_AVL_8G, 146 .num = ST_ACCEL_FS_AVL_8G,
335 .value = ST_ACCEL_1_FS_AVL_8_VAL, 147 .value = 0x02,
336 .gain = ST_ACCEL_1_FS_AVL_8_GAIN, 148 .gain = IIO_G_TO_M_S_2(4000),
337 }, 149 },
338 [3] = { 150 [3] = {
339 .num = ST_ACCEL_FS_AVL_16G, 151 .num = ST_ACCEL_FS_AVL_16G,
340 .value = ST_ACCEL_1_FS_AVL_16_VAL, 152 .value = 0x03,
341 .gain = ST_ACCEL_1_FS_AVL_16_GAIN, 153 .gain = IIO_G_TO_M_S_2(12000),
342 }, 154 },
343 }, 155 },
344 }, 156 },
345 .bdu = { 157 .bdu = {
346 .addr = ST_ACCEL_1_BDU_ADDR, 158 .addr = 0x23,
347 .mask = ST_ACCEL_1_BDU_MASK, 159 .mask = 0x80,
348 }, 160 },
349 .drdy_irq = { 161 .drdy_irq = {
350 .addr = ST_ACCEL_1_DRDY_IRQ_ADDR, 162 .addr = 0x22,
351 .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK, 163 .mask_int1 = 0x10,
352 .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK, 164 .mask_int2 = 0x08,
353 .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR, 165 .addr_ihl = 0x25,
354 .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK, 166 .mask_ihl = 0x02,
355 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
356 }, 168 },
357 .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT, 169 .multi_read_bit = true,
358 .bootime = 2, 170 .bootime = 2,
359 }, 171 },
360 { 172 {
361 .wai = ST_ACCEL_2_WAI_EXP, 173 .wai = 0x32,
362 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 174 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
363 .sensors_supported = { 175 .sensors_supported = {
364 [0] = LIS331DLH_ACCEL_DEV_NAME, 176 [0] = LIS331DLH_ACCEL_DEV_NAME,
@@ -368,18 +180,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
368 }, 180 },
369 .ch = (struct iio_chan_spec *)st_accel_12bit_channels, 181 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
370 .odr = { 182 .odr = {
371 .addr = ST_ACCEL_2_ODR_ADDR, 183 .addr = 0x20,
372 .mask = ST_ACCEL_2_ODR_MASK, 184 .mask = 0x18,
373 .odr_avl = { 185 .odr_avl = {
374 { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, }, 186 { .hz = 50, .value = 0x00, },
375 { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, }, 187 { .hz = 100, .value = 0x01, },
376 { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, }, 188 { .hz = 400, .value = 0x02, },
377 { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, }, 189 { .hz = 1000, .value = 0x03, },
378 }, 190 },
379 }, 191 },
380 .pw = { 192 .pw = {
381 .addr = ST_ACCEL_2_PW_ADDR, 193 .addr = 0x20,
382 .mask = ST_ACCEL_2_PW_MASK, 194 .mask = 0xe0,
383 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 195 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
384 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 196 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
385 }, 197 },
@@ -388,69 +200,69 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
388 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 200 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
389 }, 201 },
390 .fs = { 202 .fs = {
391 .addr = ST_ACCEL_2_FS_ADDR, 203 .addr = 0x23,
392 .mask = ST_ACCEL_2_FS_MASK, 204 .mask = 0x30,
393 .fs_avl = { 205 .fs_avl = {
394 [0] = { 206 [0] = {
395 .num = ST_ACCEL_FS_AVL_2G, 207 .num = ST_ACCEL_FS_AVL_2G,
396 .value = ST_ACCEL_2_FS_AVL_2_VAL, 208 .value = 0x00,
397 .gain = ST_ACCEL_2_FS_AVL_2_GAIN, 209 .gain = IIO_G_TO_M_S_2(1000),
398 }, 210 },
399 [1] = { 211 [1] = {
400 .num = ST_ACCEL_FS_AVL_4G, 212 .num = ST_ACCEL_FS_AVL_4G,
401 .value = ST_ACCEL_2_FS_AVL_4_VAL, 213 .value = 0x01,
402 .gain = ST_ACCEL_2_FS_AVL_4_GAIN, 214 .gain = IIO_G_TO_M_S_2(2000),
403 }, 215 },
404 [2] = { 216 [2] = {
405 .num = ST_ACCEL_FS_AVL_8G, 217 .num = ST_ACCEL_FS_AVL_8G,
406 .value = ST_ACCEL_2_FS_AVL_8_VAL, 218 .value = 0x03,
407 .gain = ST_ACCEL_2_FS_AVL_8_GAIN, 219 .gain = IIO_G_TO_M_S_2(3900),
408 }, 220 },
409 }, 221 },
410 }, 222 },
411 .bdu = { 223 .bdu = {
412 .addr = ST_ACCEL_2_BDU_ADDR, 224 .addr = 0x23,
413 .mask = ST_ACCEL_2_BDU_MASK, 225 .mask = 0x80,
414 }, 226 },
415 .drdy_irq = { 227 .drdy_irq = {
416 .addr = ST_ACCEL_2_DRDY_IRQ_ADDR, 228 .addr = 0x22,
417 .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK, 229 .mask_int1 = 0x02,
418 .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK, 230 .mask_int2 = 0x10,
419 .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR, 231 .addr_ihl = 0x22,
420 .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK, 232 .mask_ihl = 0x80,
421 .addr_od = ST_ACCEL_2_OD_IRQ_ADDR, 233 .addr_od = 0x22,
422 .mask_od = ST_ACCEL_2_OD_IRQ_MASK, 234 .mask_od = 0x40,
423 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 235 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
424 }, 236 },
425 .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT, 237 .multi_read_bit = true,
426 .bootime = 2, 238 .bootime = 2,
427 }, 239 },
428 { 240 {
429 .wai = ST_ACCEL_3_WAI_EXP, 241 .wai = 0x40,
430 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 242 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
431 .sensors_supported = { 243 .sensors_supported = {
432 [0] = LSM330_ACCEL_DEV_NAME, 244 [0] = LSM330_ACCEL_DEV_NAME,
433 }, 245 },
434 .ch = (struct iio_chan_spec *)st_accel_16bit_channels, 246 .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
435 .odr = { 247 .odr = {
436 .addr = ST_ACCEL_3_ODR_ADDR, 248 .addr = 0x20,
437 .mask = ST_ACCEL_3_ODR_MASK, 249 .mask = 0xf0,
438 .odr_avl = { 250 .odr_avl = {
439 { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL }, 251 { .hz = 3, .value = 0x01, },
440 { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, }, 252 { .hz = 6, .value = 0x02, },
441 { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, }, 253 { .hz = 12, .value = 0x03, },
442 { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, }, 254 { .hz = 25, .value = 0x04, },
443 { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, }, 255 { .hz = 50, .value = 0x05, },
444 { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, }, 256 { .hz = 100, .value = 0x06, },
445 { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, }, 257 { .hz = 200, .value = 0x07, },
446 { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, }, 258 { .hz = 400, .value = 0x08, },
447 { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, }, 259 { .hz = 800, .value = 0x09, },
448 { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, }, 260 { .hz = 1600, .value = 0x0a, },
449 }, 261 },
450 }, 262 },
451 .pw = { 263 .pw = {
452 .addr = ST_ACCEL_3_ODR_ADDR, 264 .addr = 0x20,
453 .mask = ST_ACCEL_3_ODR_MASK, 265 .mask = 0xf0,
454 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 266 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
455 }, 267 },
456 .enable_axis = { 268 .enable_axis = {
@@ -458,75 +270,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
458 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 270 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
459 }, 271 },
460 .fs = { 272 .fs = {
461 .addr = ST_ACCEL_3_FS_ADDR, 273 .addr = 0x24,
462 .mask = ST_ACCEL_3_FS_MASK, 274 .mask = 0x38,
463 .fs_avl = { 275 .fs_avl = {
464 [0] = { 276 [0] = {
465 .num = ST_ACCEL_FS_AVL_2G, 277 .num = ST_ACCEL_FS_AVL_2G,
466 .value = ST_ACCEL_3_FS_AVL_2_VAL, 278 .value = 0x00,
467 .gain = ST_ACCEL_3_FS_AVL_2_GAIN, 279 .gain = IIO_G_TO_M_S_2(61),
468 }, 280 },
469 [1] = { 281 [1] = {
470 .num = ST_ACCEL_FS_AVL_4G, 282 .num = ST_ACCEL_FS_AVL_4G,
471 .value = ST_ACCEL_3_FS_AVL_4_VAL, 283 .value = 0x01,
472 .gain = ST_ACCEL_3_FS_AVL_4_GAIN, 284 .gain = IIO_G_TO_M_S_2(122),
473 }, 285 },
474 [2] = { 286 [2] = {
475 .num = ST_ACCEL_FS_AVL_6G, 287 .num = ST_ACCEL_FS_AVL_6G,
476 .value = ST_ACCEL_3_FS_AVL_6_VAL, 288 .value = 0x02,
477 .gain = ST_ACCEL_3_FS_AVL_6_GAIN, 289 .gain = IIO_G_TO_M_S_2(183),
478 }, 290 },
479 [3] = { 291 [3] = {
480 .num = ST_ACCEL_FS_AVL_8G, 292 .num = ST_ACCEL_FS_AVL_8G,
481 .value = ST_ACCEL_3_FS_AVL_8_VAL, 293 .value = 0x03,
482 .gain = ST_ACCEL_3_FS_AVL_8_GAIN, 294 .gain = IIO_G_TO_M_S_2(244),
483 }, 295 },
484 [4] = { 296 [4] = {
485 .num = ST_ACCEL_FS_AVL_16G, 297 .num = ST_ACCEL_FS_AVL_16G,
486 .value = ST_ACCEL_3_FS_AVL_16_VAL, 298 .value = 0x04,
487 .gain = ST_ACCEL_3_FS_AVL_16_GAIN, 299 .gain = IIO_G_TO_M_S_2(732),
488 }, 300 },
489 }, 301 },
490 }, 302 },
491 .bdu = { 303 .bdu = {
492 .addr = ST_ACCEL_3_BDU_ADDR, 304 .addr = 0x20,
493 .mask = ST_ACCEL_3_BDU_MASK, 305 .mask = 0x08,
494 }, 306 },
495 .drdy_irq = { 307 .drdy_irq = {
496 .addr = ST_ACCEL_3_DRDY_IRQ_ADDR, 308 .addr = 0x23,
497 .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK, 309 .mask_int1 = 0x80,
498 .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK, 310 .mask_int2 = 0x00,
499 .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR, 311 .addr_ihl = 0x23,
500 .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK, 312 .mask_ihl = 0x40,
501 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 313 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
502 .ig1 = { 314 .ig1 = {
503 .en_addr = ST_ACCEL_3_IG1_EN_ADDR, 315 .en_addr = 0x23,
504 .en_mask = ST_ACCEL_3_IG1_EN_MASK, 316 .en_mask = 0x08,
505 }, 317 },
506 }, 318 },
507 .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT, 319 .multi_read_bit = false,
508 .bootime = 2, 320 .bootime = 2,
509 }, 321 },
510 { 322 {
511 .wai = ST_ACCEL_4_WAI_EXP, 323 .wai = 0x3a,
512 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 324 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
513 .sensors_supported = { 325 .sensors_supported = {
514 [0] = LIS3LV02DL_ACCEL_DEV_NAME, 326 [0] = LIS3LV02DL_ACCEL_DEV_NAME,
515 }, 327 },
516 .ch = (struct iio_chan_spec *)st_accel_12bit_channels, 328 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
517 .odr = { 329 .odr = {
518 .addr = ST_ACCEL_4_ODR_ADDR, 330 .addr = 0x20,
519 .mask = ST_ACCEL_4_ODR_MASK, 331 .mask = 0x30, /* DF1 and DF0 */
520 .odr_avl = { 332 .odr_avl = {
521 { 40, ST_ACCEL_4_ODR_AVL_40HZ_VAL }, 333 { .hz = 40, .value = 0x00, },
522 { 160, ST_ACCEL_4_ODR_AVL_160HZ_VAL, }, 334 { .hz = 160, .value = 0x01, },
523 { 640, ST_ACCEL_4_ODR_AVL_640HZ_VAL, }, 335 { .hz = 640, .value = 0x02, },
524 { 2560, ST_ACCEL_4_ODR_AVL_2560HZ_VAL, }, 336 { .hz = 2560, .value = 0x03, },
525 }, 337 },
526 }, 338 },
527 .pw = { 339 .pw = {
528 .addr = ST_ACCEL_4_PW_ADDR, 340 .addr = 0x20,
529 .mask = ST_ACCEL_4_PW_MASK, 341 .mask = 0xc0,
530 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 342 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
531 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 343 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
532 }, 344 },
@@ -535,51 +347,51 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
535 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 347 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
536 }, 348 },
537 .fs = { 349 .fs = {
538 .addr = ST_ACCEL_4_FS_ADDR, 350 .addr = 0x21,
539 .mask = ST_ACCEL_4_FS_MASK, 351 .mask = 0x80,
540 .fs_avl = { 352 .fs_avl = {
541 [0] = { 353 [0] = {
542 .num = ST_ACCEL_FS_AVL_2G, 354 .num = ST_ACCEL_FS_AVL_2G,
543 .value = ST_ACCEL_4_FS_AVL_2_VAL, 355 .value = 0x00,
544 .gain = ST_ACCEL_4_FS_AVL_2_GAIN, 356 .gain = IIO_G_TO_M_S_2(1024),
545 }, 357 },
546 [1] = { 358 [1] = {
547 .num = ST_ACCEL_FS_AVL_6G, 359 .num = ST_ACCEL_FS_AVL_6G,
548 .value = ST_ACCEL_4_FS_AVL_6_VAL, 360 .value = 0x01,
549 .gain = ST_ACCEL_4_FS_AVL_6_GAIN, 361 .gain = IIO_G_TO_M_S_2(340),
550 }, 362 },
551 }, 363 },
552 }, 364 },
553 .bdu = { 365 .bdu = {
554 .addr = ST_ACCEL_4_BDU_ADDR, 366 .addr = 0x21,
555 .mask = ST_ACCEL_4_BDU_MASK, 367 .mask = 0x40,
556 }, 368 },
557 .drdy_irq = { 369 .drdy_irq = {
558 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, 370 .addr = 0x21,
559 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, 371 .mask_int1 = 0x04,
560 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 372 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
561 }, 373 },
562 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, 374 .multi_read_bit = true,
563 .bootime = 2, /* guess */ 375 .bootime = 2, /* guess */
564 }, 376 },
565 { 377 {
566 .wai = ST_ACCEL_5_WAI_EXP, 378 .wai = 0x3b,
567 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 379 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
568 .sensors_supported = { 380 .sensors_supported = {
569 [0] = LIS331DL_ACCEL_DEV_NAME, 381 [0] = LIS331DL_ACCEL_DEV_NAME,
570 }, 382 },
571 .ch = (struct iio_chan_spec *)st_accel_8bit_channels, 383 .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
572 .odr = { 384 .odr = {
573 .addr = ST_ACCEL_5_ODR_ADDR, 385 .addr = 0x20,
574 .mask = ST_ACCEL_5_ODR_MASK, 386 .mask = 0x80,
575 .odr_avl = { 387 .odr_avl = {
576 { 100, ST_ACCEL_5_ODR_AVL_100HZ_VAL }, 388 { .hz = 100, .value = 0x00, },
577 { 400, ST_ACCEL_5_ODR_AVL_400HZ_VAL, }, 389 { .hz = 400, .value = 0x01, },
578 }, 390 },
579 }, 391 },
580 .pw = { 392 .pw = {
581 .addr = ST_ACCEL_5_PW_ADDR, 393 .addr = 0x20,
582 .mask = ST_ACCEL_5_PW_MASK, 394 .mask = 0x40,
583 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 395 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
584 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 396 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
585 }, 397 },
@@ -588,54 +400,58 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
588 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 400 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
589 }, 401 },
590 .fs = { 402 .fs = {
591 .addr = ST_ACCEL_5_FS_ADDR, 403 .addr = 0x20,
592 .mask = ST_ACCEL_5_FS_MASK, 404 .mask = 0x20,
405 /*
406 * TODO: check these resulting gain settings, these are
407 * not in the datsheet
408 */
593 .fs_avl = { 409 .fs_avl = {
594 [0] = { 410 [0] = {
595 .num = ST_ACCEL_FS_AVL_2G, 411 .num = ST_ACCEL_FS_AVL_2G,
596 .value = ST_ACCEL_5_FS_AVL_2_VAL, 412 .value = 0x00,
597 .gain = ST_ACCEL_5_FS_AVL_2_GAIN, 413 .gain = IIO_G_TO_M_S_2(18000),
598 }, 414 },
599 [1] = { 415 [1] = {
600 .num = ST_ACCEL_FS_AVL_8G, 416 .num = ST_ACCEL_FS_AVL_8G,
601 .value = ST_ACCEL_5_FS_AVL_8_VAL, 417 .value = 0x01,
602 .gain = ST_ACCEL_5_FS_AVL_8_GAIN, 418 .gain = IIO_G_TO_M_S_2(72000),
603 }, 419 },
604 }, 420 },
605 }, 421 },
606 .drdy_irq = { 422 .drdy_irq = {
607 .addr = ST_ACCEL_5_DRDY_IRQ_ADDR, 423 .addr = 0x22,
608 .mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK, 424 .mask_int1 = 0x04,
609 .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK, 425 .mask_int2 = 0x20,
610 .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR, 426 .addr_ihl = 0x22,
611 .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK, 427 .mask_ihl = 0x80,
612 .addr_od = ST_ACCEL_5_OD_IRQ_ADDR, 428 .addr_od = 0x22,
613 .mask_od = ST_ACCEL_5_OD_IRQ_MASK, 429 .mask_od = 0x40,
614 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 430 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
615 }, 431 },
616 .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT, 432 .multi_read_bit = false,
617 .bootime = 2, /* guess */ 433 .bootime = 2, /* guess */
618 }, 434 },
619 { 435 {
620 .wai = ST_ACCEL_6_WAI_EXP, 436 .wai = 0x32,
621 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 437 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
622 .sensors_supported = { 438 .sensors_supported = {
623 [0] = H3LIS331DL_DRIVER_NAME, 439 [0] = H3LIS331DL_DRIVER_NAME,
624 }, 440 },
625 .ch = (struct iio_chan_spec *)st_accel_12bit_channels, 441 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
626 .odr = { 442 .odr = {
627 .addr = ST_ACCEL_6_ODR_ADDR, 443 .addr = 0x20,
628 .mask = ST_ACCEL_6_ODR_MASK, 444 .mask = 0x18,
629 .odr_avl = { 445 .odr_avl = {
630 { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL }, 446 { .hz = 50, .value = 0x00, },
631 { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, }, 447 { .hz = 100, .value = 0x01, },
632 { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, }, 448 { .hz = 400, .value = 0x02, },
633 { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, }, 449 { .hz = 1000, .value = 0x03, },
634 }, 450 },
635 }, 451 },
636 .pw = { 452 .pw = {
637 .addr = ST_ACCEL_6_PW_ADDR, 453 .addr = 0x20,
638 .mask = ST_ACCEL_6_PW_MASK, 454 .mask = 0x20,
639 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 455 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
640 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 456 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
641 }, 457 },
@@ -644,38 +460,38 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
644 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 460 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
645 }, 461 },
646 .fs = { 462 .fs = {
647 .addr = ST_ACCEL_6_FS_ADDR, 463 .addr = 0x23,
648 .mask = ST_ACCEL_6_FS_MASK, 464 .mask = 0x30,
649 .fs_avl = { 465 .fs_avl = {
650 [0] = { 466 [0] = {
651 .num = ST_ACCEL_FS_AVL_100G, 467 .num = ST_ACCEL_FS_AVL_100G,
652 .value = ST_ACCEL_6_FS_AVL_100_VAL, 468 .value = 0x00,
653 .gain = ST_ACCEL_6_FS_AVL_100_GAIN, 469 .gain = IIO_G_TO_M_S_2(49000),
654 }, 470 },
655 [1] = { 471 [1] = {
656 .num = ST_ACCEL_FS_AVL_200G, 472 .num = ST_ACCEL_FS_AVL_200G,
657 .value = ST_ACCEL_6_FS_AVL_200_VAL, 473 .value = 0x01,
658 .gain = ST_ACCEL_6_FS_AVL_200_GAIN, 474 .gain = IIO_G_TO_M_S_2(98000),
659 }, 475 },
660 [2] = { 476 [2] = {
661 .num = ST_ACCEL_FS_AVL_400G, 477 .num = ST_ACCEL_FS_AVL_400G,
662 .value = ST_ACCEL_6_FS_AVL_400_VAL, 478 .value = 0x03,
663 .gain = ST_ACCEL_6_FS_AVL_400_GAIN, 479 .gain = IIO_G_TO_M_S_2(195000),
664 }, 480 },
665 }, 481 },
666 }, 482 },
667 .bdu = { 483 .bdu = {
668 .addr = ST_ACCEL_6_BDU_ADDR, 484 .addr = 0x23,
669 .mask = ST_ACCEL_6_BDU_MASK, 485 .mask = 0x80,
670 }, 486 },
671 .drdy_irq = { 487 .drdy_irq = {
672 .addr = ST_ACCEL_6_DRDY_IRQ_ADDR, 488 .addr = 0x22,
673 .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK, 489 .mask_int1 = 0x02,
674 .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK, 490 .mask_int2 = 0x10,
675 .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR, 491 .addr_ihl = 0x22,
676 .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK, 492 .mask_ihl = 0x80,
677 }, 493 },
678 .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT, 494 .multi_read_bit = true,
679 .bootime = 2, 495 .bootime = 2,
680 }, 496 },
681 { 497 {
@@ -685,18 +501,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
685 }, 501 },
686 .ch = (struct iio_chan_spec *)st_accel_12bit_channels, 502 .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
687 .odr = { 503 .odr = {
688 .addr = ST_ACCEL_7_ODR_ADDR, 504 .addr = 0x20,
689 .mask = ST_ACCEL_7_ODR_MASK, 505 .mask = 0x30,
690 .odr_avl = { 506 .odr_avl = {
691 { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, }, 507 { .hz = 280, .value = 0x00, },
692 { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, }, 508 { .hz = 560, .value = 0x01, },
693 { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, }, 509 { .hz = 1120, .value = 0x02, },
694 { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, }, 510 { .hz = 4480, .value = 0x03, },
695 }, 511 },
696 }, 512 },
697 .pw = { 513 .pw = {
698 .addr = ST_ACCEL_7_PW_ADDR, 514 .addr = 0x20,
699 .mask = ST_ACCEL_7_PW_MASK, 515 .mask = 0xc0,
700 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 516 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
701 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 517 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
702 }, 518 },
@@ -708,7 +524,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
708 .fs_avl = { 524 .fs_avl = {
709 [0] = { 525 [0] = {
710 .num = ST_ACCEL_FS_AVL_2G, 526 .num = ST_ACCEL_FS_AVL_2G,
711 .gain = ST_ACCEL_7_FS_AVL_2_GAIN, 527 .gain = IIO_G_TO_M_S_2(488),
712 }, 528 },
713 }, 529 },
714 }, 530 },
@@ -719,11 +535,78 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
719 .bdu = { 535 .bdu = {
720 }, 536 },
721 .drdy_irq = { 537 .drdy_irq = {
722 .addr = ST_ACCEL_7_DRDY_IRQ_ADDR, 538 .addr = 0x21,
723 .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK, 539 .mask_int1 = 0x04,
540 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
541 },
542 .multi_read_bit = false,
543 .bootime = 2,
544 },
545 {
546 .wai = 0x33,
547 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
548 .sensors_supported = {
549 [0] = LNG2DM_ACCEL_DEV_NAME,
550 },
551 .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
552 .odr = {
553 .addr = 0x20,
554 .mask = 0xf0,
555 .odr_avl = {
556 { .hz = 1, .value = 0x01, },
557 { .hz = 10, .value = 0x02, },
558 { .hz = 25, .value = 0x03, },
559 { .hz = 50, .value = 0x04, },
560 { .hz = 100, .value = 0x05, },
561 { .hz = 200, .value = 0x06, },
562 { .hz = 400, .value = 0x07, },
563 { .hz = 1600, .value = 0x08, },
564 },
565 },
566 .pw = {
567 .addr = 0x20,
568 .mask = 0xf0,
569 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
570 },
571 .enable_axis = {
572 .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
573 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
574 },
575 .fs = {
576 .addr = 0x23,
577 .mask = 0x30,
578 .fs_avl = {
579 [0] = {
580 .num = ST_ACCEL_FS_AVL_2G,
581 .value = 0x00,
582 .gain = IIO_G_TO_M_S_2(15600),
583 },
584 [1] = {
585 .num = ST_ACCEL_FS_AVL_4G,
586 .value = 0x01,
587 .gain = IIO_G_TO_M_S_2(31200),
588 },
589 [2] = {
590 .num = ST_ACCEL_FS_AVL_8G,
591 .value = 0x02,
592 .gain = IIO_G_TO_M_S_2(62500),
593 },
594 [3] = {
595 .num = ST_ACCEL_FS_AVL_16G,
596 .value = 0x03,
597 .gain = IIO_G_TO_M_S_2(187500),
598 },
599 },
600 },
601 .drdy_irq = {
602 .addr = 0x22,
603 .mask_int1 = 0x10,
604 .mask_int2 = 0x08,
605 .addr_ihl = 0x25,
606 .mask_ihl = 0x02,
724 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 607 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
725 }, 608 },
726 .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT, 609 .multi_read_bit = true,
727 .bootime = 2, 610 .bootime = 2,
728 }, 611 },
729}; 612};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index e9d427a5df7c..c0f8867aa1ea 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,6 +84,10 @@ static const struct of_device_id st_accel_of_match[] = {
84 .compatible = "st,lis3l02dq", 84 .compatible = "st,lis3l02dq",
85 .data = LIS3L02DQ_ACCEL_DEV_NAME, 85 .data = LIS3L02DQ_ACCEL_DEV_NAME,
86 }, 86 },
87 {
88 .compatible = "st,lng2dm-accel",
89 .data = LNG2DM_ACCEL_DEV_NAME,
90 },
87 {}, 91 {},
88}; 92};
89MODULE_DEVICE_TABLE(of, st_accel_of_match); 93MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -135,6 +139,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
135 { LSM303AGR_ACCEL_DEV_NAME }, 139 { LSM303AGR_ACCEL_DEV_NAME },
136 { LIS2DH12_ACCEL_DEV_NAME }, 140 { LIS2DH12_ACCEL_DEV_NAME },
137 { LIS3L02DQ_ACCEL_DEV_NAME }, 141 { LIS3L02DQ_ACCEL_DEV_NAME },
142 { LNG2DM_ACCEL_DEV_NAME },
138 {}, 143 {},
139}; 144};
140MODULE_DEVICE_TABLE(i2c, st_accel_id_table); 145MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index efd43941d45d..c25ac50d4600 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -60,6 +60,7 @@ static const struct spi_device_id st_accel_id_table[] = {
60 { LSM303AGR_ACCEL_DEV_NAME }, 60 { LSM303AGR_ACCEL_DEV_NAME },
61 { LIS2DH12_ACCEL_DEV_NAME }, 61 { LIS2DH12_ACCEL_DEV_NAME },
62 { LIS3L02DQ_ACCEL_DEV_NAME }, 62 { LIS3L02DQ_ACCEL_DEV_NAME },
63 { LNG2DM_ACCEL_DEV_NAME },
63 {}, 64 {},
64}; 65};
65MODULE_DEVICE_TABLE(spi, st_accel_id_table); 66MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 99c051490eff..38bc319904c4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -58,6 +58,18 @@ config AD7476
58 To compile this driver as a module, choose M here: the 58 To compile this driver as a module, choose M here: the
59 module will be called ad7476. 59 module will be called ad7476.
60 60
61config AD7766
62 tristate "Analog Devices AD7766/AD7767 ADC driver"
63 depends on SPI_MASTER
64 select IIO_BUFFER
65 select IIO_TRIGGERED_BUFFER
66 help
67 Say yes here to build support for Analog Devices AD7766, AD7766-1,
68 AD7766-2, AD7767, AD7767-1, AD7767-2 SPI analog to digital converters.
69
70 To compile this driver as a module, choose M here: the module will be
71 called ad7766.
72
61config AD7791 73config AD7791
62 tristate "Analog Devices AD7791 ADC driver" 74 tristate "Analog Devices AD7791 ADC driver"
63 depends on SPI 75 depends on SPI
@@ -195,6 +207,16 @@ config DA9150_GPADC
195 To compile this driver as a module, choose M here: the module will be 207 To compile this driver as a module, choose M here: the module will be
196 called berlin2-adc. 208 called berlin2-adc.
197 209
210config ENVELOPE_DETECTOR
211 tristate "Envelope detector using a DAC and a comparator"
212 depends on OF
213 help
214 Say yes here to build support for an envelope detector using a DAC
215 and a comparator.
216
217 To compile this driver as a module, choose M here: the module will be
218 called envelope-detector.
219
198config EXYNOS_ADC 220config EXYNOS_ADC
199 tristate "Exynos ADC driver support" 221 tristate "Exynos ADC driver support"
200 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST) 222 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -419,6 +441,28 @@ config ROCKCHIP_SARADC
419 To compile this driver as a module, choose M here: the 441 To compile this driver as a module, choose M here: the
420 module will be called rockchip_saradc. 442 module will be called rockchip_saradc.
421 443
444config STM32_ADC_CORE
445 tristate "STMicroelectronics STM32 adc core"
446 depends on ARCH_STM32 || COMPILE_TEST
447 depends on OF
448 depends on REGULATOR
449 help
450 Select this option to enable the core driver for STMicroelectronics
451 STM32 analog-to-digital converter (ADC).
452
453 This driver can also be built as a module. If so, the module
454 will be called stm32-adc-core.
455
456config STM32_ADC
457 tristate "STMicroelectronics STM32 adc"
458 depends on STM32_ADC_CORE
459 help
460 Say yes here to build support for STMicroelectronics stm32 Analog
461 to Digital Converter (ADC).
462
463 This driver can also be built as a module. If so, the module
464 will be called stm32-adc.
465
422config STX104 466config STX104
423 tristate "Apex Embedded Systems STX104 driver" 467 tristate "Apex Embedded Systems STX104 driver"
424 depends on X86 && ISA_BUS_API 468 depends on X86 && ISA_BUS_API
@@ -449,6 +493,8 @@ config TI_ADC081C
449config TI_ADC0832 493config TI_ADC0832
450 tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838" 494 tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
451 depends on SPI 495 depends on SPI
496 select IIO_BUFFER
497 select IIO_TRIGGERED_BUFFER
452 help 498 help
453 If you say yes here you get support for Texas Instruments ADC0831, 499 If you say yes here you get support for Texas Instruments ADC0831,
454 ADC0832, ADC0834, ADC0838 ADC chips. 500 ADC0832, ADC0834, ADC0838 ADC chips.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 7a40c04c311f..d36c4be8d1fc 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD7291) += ad7291.o
9obj-$(CONFIG_AD7298) += ad7298.o 9obj-$(CONFIG_AD7298) += ad7298.o
10obj-$(CONFIG_AD7923) += ad7923.o 10obj-$(CONFIG_AD7923) += ad7923.o
11obj-$(CONFIG_AD7476) += ad7476.o 11obj-$(CONFIG_AD7476) += ad7476.o
12obj-$(CONFIG_AD7766) += ad7766.o
12obj-$(CONFIG_AD7791) += ad7791.o 13obj-$(CONFIG_AD7791) += ad7791.o
13obj-$(CONFIG_AD7793) += ad7793.o 14obj-$(CONFIG_AD7793) += ad7793.o
14obj-$(CONFIG_AD7887) += ad7887.o 15obj-$(CONFIG_AD7887) += ad7887.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
20obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o 21obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
21obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o 22obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
22obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o 23obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
24obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
23obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o 25obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
24obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o 26obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
25obj-$(CONFIG_HI8435) += hi8435.o 27obj-$(CONFIG_HI8435) += hi8435.o
@@ -41,6 +43,8 @@ obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
41obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o 43obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
42obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o 44obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
43obj-$(CONFIG_STX104) += stx104.o 45obj-$(CONFIG_STX104) += stx104.o
46obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
47obj-$(CONFIG_STM32_ADC) += stm32-adc.o
44obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o 48obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
45obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o 49obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
46obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o 50obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
new file mode 100644
index 000000000000..75cca42b6e70
--- /dev/null
+++ b/drivers/iio/adc/ad7766.c
@@ -0,0 +1,330 @@
1/*
2 * AD7766/AD7767 SPI ADC driver
3 *
4 * Copyright 2016 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/gpio/consumer.h>
14#include <linux/module.h>
15#include <linux/regulator/consumer.h>
16#include <linux/slab.h>
17#include <linux/spi/spi.h>
18
19#include <linux/iio/iio.h>
20#include <linux/iio/buffer.h>
21#include <linux/iio/trigger.h>
22#include <linux/iio/trigger_consumer.h>
23#include <linux/iio/triggered_buffer.h>
24
25struct ad7766_chip_info {
26 unsigned int decimation_factor;
27};
28
29enum {
30 AD7766_SUPPLY_AVDD = 0,
31 AD7766_SUPPLY_DVDD = 1,
32 AD7766_SUPPLY_VREF = 2,
33 AD7766_NUM_SUPPLIES = 3
34};
35
36struct ad7766 {
37 const struct ad7766_chip_info *chip_info;
38 struct spi_device *spi;
39 struct clk *mclk;
40 struct gpio_desc *pd_gpio;
41 struct regulator_bulk_data reg[AD7766_NUM_SUPPLIES];
42
43 struct iio_trigger *trig;
44
45 struct spi_transfer xfer;
46 struct spi_message msg;
47
48 /*
49 * DMA (thus cache coherency maintenance) requires the
50 * transfer buffers to live in their own cache lines.
51 * Make the buffer large enough for one 24 bit sample and one 64 bit
52 * aligned 64 bit timestamp.
53 */
54 unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
55 ____cacheline_aligned;
56};
57
58/*
59 * AD7766 and AD7767 variations are interface compatible, the main difference is
60 * analog performance. Both parts will use the same ID.
61 */
62enum ad7766_device_ids {
63 ID_AD7766,
64 ID_AD7766_1,
65 ID_AD7766_2,
66};
67
68static irqreturn_t ad7766_trigger_handler(int irq, void *p)
69{
70 struct iio_poll_func *pf = p;
71 struct iio_dev *indio_dev = pf->indio_dev;
72 struct ad7766 *ad7766 = iio_priv(indio_dev);
73 int ret;
74
75 ret = spi_sync(ad7766->spi, &ad7766->msg);
76 if (ret < 0)
77 goto done;
78
79 iio_push_to_buffers_with_timestamp(indio_dev, ad7766->data,
80 pf->timestamp);
81done:
82 iio_trigger_notify_done(indio_dev->trig);
83
84 return IRQ_HANDLED;
85}
86
87static int ad7766_preenable(struct iio_dev *indio_dev)
88{
89 struct ad7766 *ad7766 = iio_priv(indio_dev);
90 int ret;
91
92 ret = regulator_bulk_enable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
93 if (ret < 0) {
94 dev_err(&ad7766->spi->dev, "Failed to enable supplies: %d\n",
95 ret);
96 return ret;
97 }
98
99 ret = clk_prepare_enable(ad7766->mclk);
100 if (ret < 0) {
101 dev_err(&ad7766->spi->dev, "Failed to enable MCLK: %d\n", ret);
102 regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
103 return ret;
104 }
105
106 if (ad7766->pd_gpio)
107 gpiod_set_value(ad7766->pd_gpio, 0);
108
109 return 0;
110}
111
112static int ad7766_postdisable(struct iio_dev *indio_dev)
113{
114 struct ad7766 *ad7766 = iio_priv(indio_dev);
115
116 if (ad7766->pd_gpio)
117 gpiod_set_value(ad7766->pd_gpio, 1);
118
119 /*
120 * The PD pin is synchronous to the clock, so give it some time to
121 * notice the change before we disable the clock.
122 */
123 msleep(20);
124
125 clk_disable_unprepare(ad7766->mclk);
126 regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
127
128 return 0;
129}
130
131static int ad7766_read_raw(struct iio_dev *indio_dev,
132 const struct iio_chan_spec *chan, int *val, int *val2, long info)
133{
134 struct ad7766 *ad7766 = iio_priv(indio_dev);
135 struct regulator *vref = ad7766->reg[AD7766_SUPPLY_VREF].consumer;
136 int scale_uv;
137
138 switch (info) {
139 case IIO_CHAN_INFO_SCALE:
140 scale_uv = regulator_get_voltage(vref);
141 if (scale_uv < 0)
142 return scale_uv;
143 *val = scale_uv / 1000;
144 *val2 = chan->scan_type.realbits;
145 return IIO_VAL_FRACTIONAL_LOG2;
146 case IIO_CHAN_INFO_SAMP_FREQ:
147 *val = clk_get_rate(ad7766->mclk) /
148 ad7766->chip_info->decimation_factor;
149 return IIO_VAL_INT;
150 }
151 return -EINVAL;
152}
153
154static const struct iio_chan_spec ad7766_channels[] = {
155 {
156 .type = IIO_VOLTAGE,
157 .indexed = 1,
158 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
159 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
160 .scan_type = {
161 .sign = 's',
162 .realbits = 24,
163 .storagebits = 32,
164 .endianness = IIO_BE,
165 },
166 },
167 IIO_CHAN_SOFT_TIMESTAMP(1),
168};
169
170static const struct ad7766_chip_info ad7766_chip_info[] = {
171 [ID_AD7766] = {
172 .decimation_factor = 8,
173 },
174 [ID_AD7766_1] = {
175 .decimation_factor = 16,
176 },
177 [ID_AD7766_2] = {
178 .decimation_factor = 32,
179 },
180};
181
182static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
183 .preenable = &ad7766_preenable,
184 .postenable = &iio_triggered_buffer_postenable,
185 .predisable = &iio_triggered_buffer_predisable,
186 .postdisable = &ad7766_postdisable,
187};
188
189static const struct iio_info ad7766_info = {
190 .driver_module = THIS_MODULE,
191 .read_raw = &ad7766_read_raw,
192};
193
194static irqreturn_t ad7766_irq(int irq, void *private)
195{
196 iio_trigger_poll(private);
197 return IRQ_HANDLED;
198}
199
200static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
201{
202 struct ad7766 *ad7766 = iio_trigger_get_drvdata(trig);
203
204 if (enable)
205 enable_irq(ad7766->spi->irq);
206 else
207 disable_irq(ad7766->spi->irq);
208
209 return 0;
210}
211
212static const struct iio_trigger_ops ad7766_trigger_ops = {
213 .owner = THIS_MODULE,
214 .set_trigger_state = ad7766_set_trigger_state,
215 .validate_device = iio_trigger_validate_own_device,
216};
217
218static int ad7766_probe(struct spi_device *spi)
219{
220 const struct spi_device_id *id = spi_get_device_id(spi);
221 struct iio_dev *indio_dev;
222 struct ad7766 *ad7766;
223 int ret;
224
225 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ad7766));
226 if (!indio_dev)
227 return -ENOMEM;
228
229 ad7766 = iio_priv(indio_dev);
230 ad7766->chip_info = &ad7766_chip_info[id->driver_data];
231
232 ad7766->mclk = devm_clk_get(&spi->dev, "mclk");
233 if (IS_ERR(ad7766->mclk))
234 return PTR_ERR(ad7766->mclk);
235
236 ad7766->reg[AD7766_SUPPLY_AVDD].supply = "avdd";
237 ad7766->reg[AD7766_SUPPLY_DVDD].supply = "dvdd";
238 ad7766->reg[AD7766_SUPPLY_VREF].supply = "vref";
239
240 ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(ad7766->reg),
241 ad7766->reg);
242 if (ret)
243 return ret;
244
245 ad7766->pd_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
246 GPIOD_OUT_HIGH);
247 if (IS_ERR(ad7766->pd_gpio))
248 return PTR_ERR(ad7766->pd_gpio);
249
250 indio_dev->dev.parent = &spi->dev;
251 indio_dev->name = spi_get_device_id(spi)->name;
252 indio_dev->modes = INDIO_DIRECT_MODE;
253 indio_dev->channels = ad7766_channels;
254 indio_dev->num_channels = ARRAY_SIZE(ad7766_channels);
255 indio_dev->info = &ad7766_info;
256
257 if (spi->irq > 0) {
258 ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
259 indio_dev->name, indio_dev->id);
260 if (!ad7766->trig)
261 return -ENOMEM;
262
263 ad7766->trig->ops = &ad7766_trigger_ops;
264 ad7766->trig->dev.parent = &spi->dev;
265 iio_trigger_set_drvdata(ad7766->trig, ad7766);
266
267 ret = devm_request_irq(&spi->dev, spi->irq, ad7766_irq,
268 IRQF_TRIGGER_FALLING, dev_name(&spi->dev),
269 ad7766->trig);
270 if (ret < 0)
271 return ret;
272
273 /*
274 * The device generates interrupts as long as it is powered up.
275 * Some platforms might not allow the option to power it down so
276 * disable the interrupt to avoid extra load on the system
277 */
278 disable_irq(spi->irq);
279
280 ret = devm_iio_trigger_register(&spi->dev, ad7766->trig);
281 if (ret)
282 return ret;
283 }
284
285 spi_set_drvdata(spi, indio_dev);
286
287 ad7766->spi = spi;
288
289 /* First byte always 0 */
290 ad7766->xfer.rx_buf = &ad7766->data[1];
291 ad7766->xfer.len = 3;
292
293 spi_message_init(&ad7766->msg);
294 spi_message_add_tail(&ad7766->xfer, &ad7766->msg);
295
296 ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
297 &iio_pollfunc_store_time, &ad7766_trigger_handler,
298 &ad7766_buffer_setup_ops);
299 if (ret)
300 return ret;
301
302 ret = devm_iio_device_register(&spi->dev, indio_dev);
303 if (ret)
304 return ret;
305 return 0;
306}
307
308static const struct spi_device_id ad7766_id[] = {
309 {"ad7766", ID_AD7766},
310 {"ad7766-1", ID_AD7766_1},
311 {"ad7766-2", ID_AD7766_2},
312 {"ad7767", ID_AD7766},
313 {"ad7767-1", ID_AD7766_1},
314 {"ad7767-2", ID_AD7766_2},
315 {}
316};
317MODULE_DEVICE_TABLE(spi, ad7766_id);
318
319static struct spi_driver ad7766_driver = {
320 .driver = {
321 .name = "ad7766",
322 },
323 .probe = ad7766_probe,
324 .id_table = ad7766_id,
325};
326module_spi_driver(ad7766_driver);
327
328MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
329MODULE_DESCRIPTION("Analog Devices AD7766 and AD7767 ADCs driver support");
330MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index bbdac07f4aaa..34b928cefeed 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -30,6 +30,7 @@
30#include <linux/iio/trigger.h> 30#include <linux/iio/trigger.h>
31#include <linux/iio/trigger_consumer.h> 31#include <linux/iio/trigger_consumer.h>
32#include <linux/iio/triggered_buffer.h> 32#include <linux/iio/triggered_buffer.h>
33#include <linux/pinctrl/consumer.h>
33 34
34/* Registers */ 35/* Registers */
35#define AT91_ADC_CR 0x00 /* Control Register */ 36#define AT91_ADC_CR 0x00 /* Control Register */
@@ -1347,6 +1348,32 @@ static int at91_adc_remove(struct platform_device *pdev)
1347 return 0; 1348 return 0;
1348} 1349}
1349 1350
1351#ifdef CONFIG_PM_SLEEP
1352static int at91_adc_suspend(struct device *dev)
1353{
1354 struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
1355 struct at91_adc_state *st = iio_priv(idev);
1356
1357 pinctrl_pm_select_sleep_state(dev);
1358 clk_disable_unprepare(st->clk);
1359
1360 return 0;
1361}
1362
1363static int at91_adc_resume(struct device *dev)
1364{
1365 struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
1366 struct at91_adc_state *st = iio_priv(idev);
1367
1368 clk_prepare_enable(st->clk);
1369 pinctrl_pm_select_default_state(dev);
1370
1371 return 0;
1372}
1373#endif
1374
1375static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
1376
1350static struct at91_adc_caps at91sam9260_caps = { 1377static struct at91_adc_caps at91sam9260_caps = {
1351 .calc_startup_ticks = calc_startup_ticks_9260, 1378 .calc_startup_ticks = calc_startup_ticks_9260,
1352 .num_channels = 4, 1379 .num_channels = 4,
@@ -1441,6 +1468,7 @@ static struct platform_driver at91_adc_driver = {
1441 .driver = { 1468 .driver = {
1442 .name = DRIVER_NAME, 1469 .name = DRIVER_NAME,
1443 .of_match_table = of_match_ptr(at91_adc_dt_ids), 1470 .of_match_table = of_match_ptr(at91_adc_dt_ids),
1471 .pm = &at91_adc_pm_ops,
1444 }, 1472 },
1445}; 1473};
1446 1474
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
new file mode 100644
index 000000000000..fef15c0d7c9c
--- /dev/null
+++ b/drivers/iio/adc/envelope-detector.c
@@ -0,0 +1,422 @@
1/*
2 * Driver for an envelope detector using a DAC and a comparator
3 *
4 * Copyright (C) 2016 Axentia Technologies AB
5 *
6 * Author: Peter Rosin <peda@axentia.se>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 * The DAC is used to find the peak level of an alternating voltage input
15 * signal by a binary search using the output of a comparator wired to
16 * an interrupt pin. Like so:
17 * _
18 * | \
19 * input +------>-------|+ \
20 * | \
21 * .-------. | }---.
22 * | | | / |
23 * | dac|-->--|- / |
24 * | | |_/ |
25 * | | |
26 * | | |
27 * | irq|------<-------'
28 * | |
29 * '-------'
30 */
31
32#include <linux/completion.h>
33#include <linux/device.h>
34#include <linux/err.h>
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/mutex.h>
38#include <linux/iio/consumer.h>
39#include <linux/iio/iio.h>
40#include <linux/iio/sysfs.h>
41#include <linux/interrupt.h>
42#include <linux/irq.h>
43#include <linux/of.h>
44#include <linux/of_device.h>
45#include <linux/platform_device.h>
46#include <linux/spinlock.h>
47#include <linux/workqueue.h>
48
49struct envelope {
50 spinlock_t comp_lock; /* protects comp */
51 int comp;
52
53 struct mutex read_lock; /* protects everything else */
54
55 int comp_irq;
56 u32 comp_irq_trigger;
57 u32 comp_irq_trigger_inv;
58
59 struct iio_channel *dac;
60 struct delayed_work comp_timeout;
61
62 unsigned int comp_interval;
63 bool invert;
64 u32 dac_max;
65
66 int high;
67 int level;
68 int low;
69
70 struct completion done;
71};
72
73/*
74 * The envelope_detector_comp_latch function works together with the compare
75 * interrupt service routine below (envelope_detector_comp_isr) as a latch
76 * (one-bit memory) for if the interrupt has triggered since last calling
77 * this function.
78 * The ..._comp_isr function disables the interrupt so that the cpu does not
79 * need to service a possible interrupt flood from the comparator when no-one
80 * cares anyway, and this ..._comp_latch function reenables them again if
81 * needed.
82 */
83static int envelope_detector_comp_latch(struct envelope *env)
84{
85 int comp;
86
87 spin_lock_irq(&env->comp_lock);
88 comp = env->comp;
89 env->comp = 0;
90 spin_unlock_irq(&env->comp_lock);
91
92 if (!comp)
93 return 0;
94
95 /*
96 * The irq was disabled, and is reenabled just now.
97 * But there might have been a pending irq that
98 * happened while the irq was disabled that fires
99 * just as the irq is reenabled. That is not what
100 * is desired.
101 */
102 enable_irq(env->comp_irq);
103
104 /* So, synchronize this possibly pending irq... */
105 synchronize_irq(env->comp_irq);
106
107 /* ...and redo the whole dance. */
108 spin_lock_irq(&env->comp_lock);
109 comp = env->comp;
110 env->comp = 0;
111 spin_unlock_irq(&env->comp_lock);
112
113 if (comp)
114 enable_irq(env->comp_irq);
115
116 return 1;
117}
118
119static irqreturn_t envelope_detector_comp_isr(int irq, void *ctx)
120{
121 struct envelope *env = ctx;
122
123 spin_lock(&env->comp_lock);
124 env->comp = 1;
125 disable_irq_nosync(env->comp_irq);
126 spin_unlock(&env->comp_lock);
127
128 return IRQ_HANDLED;
129}
130
131static void envelope_detector_setup_compare(struct envelope *env)
132{
133 int ret;
134
135 /*
136 * Do a binary search for the peak input level, and stop
137 * when that level is "trapped" between two adjacent DAC
138 * values.
139 * When invert is active, use the midpoint floor so that
140 * env->level ends up as env->low when the termination
141 * criteria below is fulfilled, and use the midpoint
142 * ceiling when invert is not active so that env->level
143 * ends up as env->high in that case.
144 */
145 env->level = (env->high + env->low + !env->invert) / 2;
146
147 if (env->high == env->low + 1) {
148 complete(&env->done);
149 return;
150 }
151
152 /* Set a "safe" DAC level (if there is such a thing)... */
153 ret = iio_write_channel_raw(env->dac, env->invert ? 0 : env->dac_max);
154 if (ret < 0)
155 goto err;
156
157 /* ...clear the comparison result... */
158 envelope_detector_comp_latch(env);
159
160 /* ...set the real DAC level... */
161 ret = iio_write_channel_raw(env->dac, env->level);
162 if (ret < 0)
163 goto err;
164
165 /* ...and wait for a bit to see if the latch catches anything. */
166 schedule_delayed_work(&env->comp_timeout,
167 msecs_to_jiffies(env->comp_interval));
168 return;
169
170err:
171 env->level = ret;
172 complete(&env->done);
173}
174
175static void envelope_detector_timeout(struct work_struct *work)
176{
177 struct envelope *env = container_of(work, struct envelope,
178 comp_timeout.work);
179
180 /* Adjust low/high depending on the latch content... */
181 if (!envelope_detector_comp_latch(env) ^ !env->invert)
182 env->low = env->level;
183 else
184 env->high = env->level;
185
186 /* ...and continue the search. */
187 envelope_detector_setup_compare(env);
188}
189
190static int envelope_detector_read_raw(struct iio_dev *indio_dev,
191 struct iio_chan_spec const *chan,
192 int *val, int *val2, long mask)
193{
194 struct envelope *env = iio_priv(indio_dev);
195 int ret;
196
197 switch (mask) {
198 case IIO_CHAN_INFO_RAW:
199 /*
200 * When invert is active, start with high=max+1 and low=0
201 * since we will end up with the low value when the
202 * termination criteria is fulfilled (rounding down). And
203 * start with high=max and low=-1 when invert is not active
204 * since we will end up with the high value in that case.
205 * This ensures that the returned value in both cases are
206 * in the same range as the DAC and is a value that has not
207 * triggered the comparator.
208 */
209 mutex_lock(&env->read_lock);
210 env->high = env->dac_max + env->invert;
211 env->low = -1 + env->invert;
212 envelope_detector_setup_compare(env);
213 wait_for_completion(&env->done);
214 if (env->level < 0) {
215 ret = env->level;
216 goto err_unlock;
217 }
218 *val = env->invert ? env->dac_max - env->level : env->level;
219 mutex_unlock(&env->read_lock);
220
221 return IIO_VAL_INT;
222
223 case IIO_CHAN_INFO_SCALE:
224 return iio_read_channel_scale(env->dac, val, val2);
225 }
226
227 return -EINVAL;
228
229err_unlock:
230 mutex_unlock(&env->read_lock);
231 return ret;
232}
233
234static ssize_t envelope_show_invert(struct iio_dev *indio_dev,
235 uintptr_t private,
236 struct iio_chan_spec const *ch, char *buf)
237{
238 struct envelope *env = iio_priv(indio_dev);
239
240 return sprintf(buf, "%u\n", env->invert);
241}
242
243static ssize_t envelope_store_invert(struct iio_dev *indio_dev,
244 uintptr_t private,
245 struct iio_chan_spec const *ch,
246 const char *buf, size_t len)
247{
248 struct envelope *env = iio_priv(indio_dev);
249 unsigned long invert;
250 int ret;
251 u32 trigger;
252
253 ret = kstrtoul(buf, 0, &invert);
254 if (ret < 0)
255 return ret;
256 if (invert > 1)
257 return -EINVAL;
258
259 trigger = invert ? env->comp_irq_trigger_inv : env->comp_irq_trigger;
260
261 mutex_lock(&env->read_lock);
262 if (invert != env->invert)
263 ret = irq_set_irq_type(env->comp_irq, trigger);
264 if (!ret) {
265 env->invert = invert;
266 ret = len;
267 }
268 mutex_unlock(&env->read_lock);
269
270 return ret;
271}
272
273static ssize_t envelope_show_comp_interval(struct iio_dev *indio_dev,
274 uintptr_t private,
275 struct iio_chan_spec const *ch,
276 char *buf)
277{
278 struct envelope *env = iio_priv(indio_dev);
279
280 return sprintf(buf, "%u\n", env->comp_interval);
281}
282
283static ssize_t envelope_store_comp_interval(struct iio_dev *indio_dev,
284 uintptr_t private,
285 struct iio_chan_spec const *ch,
286 const char *buf, size_t len)
287{
288 struct envelope *env = iio_priv(indio_dev);
289 unsigned long interval;
290 int ret;
291
292 ret = kstrtoul(buf, 0, &interval);
293 if (ret < 0)
294 return ret;
295 if (interval > 1000)
296 return -EINVAL;
297
298 mutex_lock(&env->read_lock);
299 env->comp_interval = interval;
300 mutex_unlock(&env->read_lock);
301
302 return len;
303}
304
305static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
306 { .name = "invert",
307 .read = envelope_show_invert,
308 .write = envelope_store_invert, },
309 { .name = "compare_interval",
310 .read = envelope_show_comp_interval,
311 .write = envelope_store_comp_interval, },
312 { /* sentinel */ }
313};
314
315static const struct iio_chan_spec envelope_detector_iio_channel = {
316 .type = IIO_ALTVOLTAGE,
317 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
318 | BIT(IIO_CHAN_INFO_SCALE),
319 .ext_info = envelope_detector_ext_info,
320 .indexed = 1,
321};
322
323static const struct iio_info envelope_detector_info = {
324 .read_raw = &envelope_detector_read_raw,
325 .driver_module = THIS_MODULE,
326};
327
328static int envelope_detector_probe(struct platform_device *pdev)
329{
330 struct device *dev = &pdev->dev;
331 struct iio_dev *indio_dev;
332 struct envelope *env;
333 enum iio_chan_type type;
334 int ret;
335
336 indio_dev = devm_iio_device_alloc(dev, sizeof(*env));
337 if (!indio_dev)
338 return -ENOMEM;
339
340 platform_set_drvdata(pdev, indio_dev);
341 env = iio_priv(indio_dev);
342 env->comp_interval = 50; /* some sensible default? */
343
344 spin_lock_init(&env->comp_lock);
345 mutex_init(&env->read_lock);
346 init_completion(&env->done);
347 INIT_DELAYED_WORK(&env->comp_timeout, envelope_detector_timeout);
348
349 indio_dev->name = dev_name(dev);
350 indio_dev->dev.parent = dev;
351 indio_dev->dev.of_node = dev->of_node;
352 indio_dev->info = &envelope_detector_info;
353 indio_dev->channels = &envelope_detector_iio_channel;
354 indio_dev->num_channels = 1;
355
356 env->dac = devm_iio_channel_get(dev, "dac");
357 if (IS_ERR(env->dac)) {
358 if (PTR_ERR(env->dac) != -EPROBE_DEFER)
359 dev_err(dev, "failed to get dac input channel\n");
360 return PTR_ERR(env->dac);
361 }
362
363 env->comp_irq = platform_get_irq_byname(pdev, "comp");
364 if (env->comp_irq < 0) {
365 if (env->comp_irq != -EPROBE_DEFER)
366 dev_err(dev, "failed to get compare interrupt\n");
367 return env->comp_irq;
368 }
369
370 ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
371 0, "envelope-detector", env);
372 if (ret) {
373 if (ret != -EPROBE_DEFER)
374 dev_err(dev, "failed to request interrupt\n");
375 return ret;
376 }
377 env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
378 if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
379 env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
380 if (env->comp_irq_trigger & IRQF_TRIGGER_FALLING)
381 env->comp_irq_trigger_inv |= IRQF_TRIGGER_RISING;
382 if (env->comp_irq_trigger & IRQF_TRIGGER_HIGH)
383 env->comp_irq_trigger_inv |= IRQF_TRIGGER_LOW;
384 if (env->comp_irq_trigger & IRQF_TRIGGER_LOW)
385 env->comp_irq_trigger_inv |= IRQF_TRIGGER_HIGH;
386
387 ret = iio_get_channel_type(env->dac, &type);
388 if (ret < 0)
389 return ret;
390
391 if (type != IIO_VOLTAGE) {
392 dev_err(dev, "dac is of the wrong type\n");
393 return -EINVAL;
394 }
395
396 ret = iio_read_max_channel_raw(env->dac, &env->dac_max);
397 if (ret < 0) {
398 dev_err(dev, "dac does not indicate its raw maximum value\n");
399 return ret;
400 }
401
402 return devm_iio_device_register(dev, indio_dev);
403}
404
405static const struct of_device_id envelope_detector_match[] = {
406 { .compatible = "axentia,tse850-envelope-detector", },
407 { /* sentinel */ }
408};
409MODULE_DEVICE_TABLE(of, envelope_detector_match);
410
411static struct platform_driver envelope_detector_driver = {
412 .probe = envelope_detector_probe,
413 .driver = {
414 .name = "iio-envelope-detector",
415 .of_match_table = envelope_detector_match,
416 },
417};
418module_platform_driver(envelope_detector_driver);
419
420MODULE_DESCRIPTION("Envelope detector using a DAC and a comparator");
421MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
422MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2b1f16..3b7c4f78f37a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -238,7 +238,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
238 238
239 /* Configure conversion register with the requested chan */ 239 /* Configure conversion register with the requested chan */
240 st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) | 240 st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
241 MAX1027_NOSCAN | !!(chan->type == IIO_TEMP); 241 MAX1027_NOSCAN;
242 if (chan->type == IIO_TEMP)
243 st->reg |= MAX1027_TEMP;
242 ret = spi_write(st->spi, &st->reg, 1); 244 ret = spi_write(st->spi, &st->reg, 1);
243 if (ret < 0) { 245 if (ret < 0) {
244 dev_err(&indio_dev->dev, 246 dev_err(&indio_dev->dev,
@@ -360,17 +362,6 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
360 return 0; 362 return 0;
361} 363}
362 364
363static int max1027_validate_device(struct iio_trigger *trig,
364 struct iio_dev *indio_dev)
365{
366 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
367
368 if (indio != indio_dev)
369 return -EINVAL;
370
371 return 0;
372}
373
374static irqreturn_t max1027_trigger_handler(int irq, void *private) 365static irqreturn_t max1027_trigger_handler(int irq, void *private)
375{ 366{
376 struct iio_poll_func *pf = (struct iio_poll_func *)private; 367 struct iio_poll_func *pf = (struct iio_poll_func *)private;
@@ -391,7 +382,7 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
391 382
392static const struct iio_trigger_ops max1027_trigger_ops = { 383static const struct iio_trigger_ops max1027_trigger_ops = {
393 .owner = THIS_MODULE, 384 .owner = THIS_MODULE,
394 .validate_device = &max1027_validate_device, 385 .validate_device = &iio_trigger_validate_own_device,
395 .set_trigger_state = &max1027_set_trigger_state, 386 .set_trigger_state = &max1027_set_trigger_state,
396}; 387};
397 388
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
new file mode 100644
index 000000000000..4214b0cd6b1b
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -0,0 +1,303 @@
1/*
2 * This file is part of STM32 ADC driver
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
6 *
7 * Inspired from: fsl-imx25-tsadc
8 *
9 * License type: GPLv2
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE.
18 * See the GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/clk.h>
25#include <linux/interrupt.h>
26#include <linux/irqchip/chained_irq.h>
27#include <linux/irqdesc.h>
28#include <linux/irqdomain.h>
29#include <linux/module.h>
30#include <linux/of_device.h>
31#include <linux/regulator/consumer.h>
32#include <linux/slab.h>
33
34#include "stm32-adc-core.h"
35
36/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
37#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
38#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
39
40/* STM32F4_ADC_CSR - bit fields */
41#define STM32F4_EOC3 BIT(17)
42#define STM32F4_EOC2 BIT(9)
43#define STM32F4_EOC1 BIT(1)
44
45/* STM32F4_ADC_CCR - bit fields */
46#define STM32F4_ADC_ADCPRE_SHIFT 16
47#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
48
49/* STM32 F4 maximum analog clock rate (from datasheet) */
50#define STM32F4_ADC_MAX_CLK_RATE 36000000
51
52/**
53 * struct stm32_adc_priv - stm32 ADC core private data
54 * @irq: irq for ADC block
55 * @domain: irq domain reference
56 * @aclk: clock reference for the analog circuitry
57 * @vref: regulator reference
58 * @common: common data for all ADC instances
59 */
60struct stm32_adc_priv {
61 int irq;
62 struct irq_domain *domain;
63 struct clk *aclk;
64 struct regulator *vref;
65 struct stm32_adc_common common;
66};
67
68static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
69{
70 return container_of(com, struct stm32_adc_priv, common);
71}
72
73/* STM32F4 ADC internal common clock prescaler division ratios */
74static int stm32f4_pclk_div[] = {2, 4, 6, 8};
75
76/**
77 * stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
78 * @priv: stm32 ADC core private data
79 * Select clock prescaler used for analog conversions, before using ADC.
80 */
81static int stm32f4_adc_clk_sel(struct platform_device *pdev,
82 struct stm32_adc_priv *priv)
83{
84 unsigned long rate;
85 u32 val;
86 int i;
87
88 rate = clk_get_rate(priv->aclk);
89 for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
90 if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
91 break;
92 }
93 if (i >= ARRAY_SIZE(stm32f4_pclk_div))
94 return -EINVAL;
95
96 val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
97 val &= ~STM32F4_ADC_ADCPRE_MASK;
98 val |= i << STM32F4_ADC_ADCPRE_SHIFT;
99 writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
100
101 dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
102 rate / (stm32f4_pclk_div[i] * 1000));
103
104 return 0;
105}
106
107/* ADC common interrupt for all instances */
108static void stm32_adc_irq_handler(struct irq_desc *desc)
109{
110 struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
111 struct irq_chip *chip = irq_desc_get_chip(desc);
112 u32 status;
113
114 chained_irq_enter(chip, desc);
115 status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
116
117 if (status & STM32F4_EOC1)
118 generic_handle_irq(irq_find_mapping(priv->domain, 0));
119
120 if (status & STM32F4_EOC2)
121 generic_handle_irq(irq_find_mapping(priv->domain, 1));
122
123 if (status & STM32F4_EOC3)
124 generic_handle_irq(irq_find_mapping(priv->domain, 2));
125
126 chained_irq_exit(chip, desc);
127};
128
129static int stm32_adc_domain_map(struct irq_domain *d, unsigned int irq,
130 irq_hw_number_t hwirq)
131{
132 irq_set_chip_data(irq, d->host_data);
133 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
134
135 return 0;
136}
137
138static void stm32_adc_domain_unmap(struct irq_domain *d, unsigned int irq)
139{
140 irq_set_chip_and_handler(irq, NULL, NULL);
141 irq_set_chip_data(irq, NULL);
142}
143
144static const struct irq_domain_ops stm32_adc_domain_ops = {
145 .map = stm32_adc_domain_map,
146 .unmap = stm32_adc_domain_unmap,
147 .xlate = irq_domain_xlate_onecell,
148};
149
150static int stm32_adc_irq_probe(struct platform_device *pdev,
151 struct stm32_adc_priv *priv)
152{
153 struct device_node *np = pdev->dev.of_node;
154
155 priv->irq = platform_get_irq(pdev, 0);
156 if (priv->irq < 0) {
157 dev_err(&pdev->dev, "failed to get irq\n");
158 return priv->irq;
159 }
160
161 priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
162 &stm32_adc_domain_ops,
163 priv);
164 if (!priv->domain) {
165 dev_err(&pdev->dev, "Failed to add irq domain\n");
166 return -ENOMEM;
167 }
168
169 irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
170 irq_set_handler_data(priv->irq, priv);
171
172 return 0;
173}
174
175static void stm32_adc_irq_remove(struct platform_device *pdev,
176 struct stm32_adc_priv *priv)
177{
178 int hwirq;
179
180 for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
181 irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
182 irq_domain_remove(priv->domain);
183 irq_set_chained_handler(priv->irq, NULL);
184}
185
186static int stm32_adc_probe(struct platform_device *pdev)
187{
188 struct stm32_adc_priv *priv;
189 struct device_node *np = pdev->dev.of_node;
190 struct resource *res;
191 int ret;
192
193 if (!pdev->dev.of_node)
194 return -ENODEV;
195
196 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
197 if (!priv)
198 return -ENOMEM;
199
200 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
201 priv->common.base = devm_ioremap_resource(&pdev->dev, res);
202 if (IS_ERR(priv->common.base))
203 return PTR_ERR(priv->common.base);
204
205 priv->vref = devm_regulator_get(&pdev->dev, "vref");
206 if (IS_ERR(priv->vref)) {
207 ret = PTR_ERR(priv->vref);
208 dev_err(&pdev->dev, "vref get failed, %d\n", ret);
209 return ret;
210 }
211
212 ret = regulator_enable(priv->vref);
213 if (ret < 0) {
214 dev_err(&pdev->dev, "vref enable failed\n");
215 return ret;
216 }
217
218 ret = regulator_get_voltage(priv->vref);
219 if (ret < 0) {
220 dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
221 goto err_regulator_disable;
222 }
223 priv->common.vref_mv = ret / 1000;
224 dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
225
226 priv->aclk = devm_clk_get(&pdev->dev, "adc");
227 if (IS_ERR(priv->aclk)) {
228 ret = PTR_ERR(priv->aclk);
229 dev_err(&pdev->dev, "Can't get 'adc' clock\n");
230 goto err_regulator_disable;
231 }
232
233 ret = clk_prepare_enable(priv->aclk);
234 if (ret < 0) {
235 dev_err(&pdev->dev, "adc clk enable failed\n");
236 goto err_regulator_disable;
237 }
238
239 ret = stm32f4_adc_clk_sel(pdev, priv);
240 if (ret < 0) {
241 dev_err(&pdev->dev, "adc clk selection failed\n");
242 goto err_clk_disable;
243 }
244
245 ret = stm32_adc_irq_probe(pdev, priv);
246 if (ret < 0)
247 goto err_clk_disable;
248
249 platform_set_drvdata(pdev, &priv->common);
250
251 ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
252 if (ret < 0) {
253 dev_err(&pdev->dev, "failed to populate DT children\n");
254 goto err_irq_remove;
255 }
256
257 return 0;
258
259err_irq_remove:
260 stm32_adc_irq_remove(pdev, priv);
261
262err_clk_disable:
263 clk_disable_unprepare(priv->aclk);
264
265err_regulator_disable:
266 regulator_disable(priv->vref);
267
268 return ret;
269}
270
271static int stm32_adc_remove(struct platform_device *pdev)
272{
273 struct stm32_adc_common *common = platform_get_drvdata(pdev);
274 struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
275
276 of_platform_depopulate(&pdev->dev);
277 stm32_adc_irq_remove(pdev, priv);
278 clk_disable_unprepare(priv->aclk);
279 regulator_disable(priv->vref);
280
281 return 0;
282}
283
284static const struct of_device_id stm32_adc_of_match[] = {
285 { .compatible = "st,stm32f4-adc-core" },
286 {},
287};
288MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
289
290static struct platform_driver stm32_adc_driver = {
291 .probe = stm32_adc_probe,
292 .remove = stm32_adc_remove,
293 .driver = {
294 .name = "stm32-adc-core",
295 .of_match_table = stm32_adc_of_match,
296 },
297};
298module_platform_driver(stm32_adc_driver);
299
300MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
301MODULE_DESCRIPTION("STMicroelectronics STM32 ADC core driver");
302MODULE_LICENSE("GPL v2");
303MODULE_ALIAS("platform:stm32-adc-core");
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
new file mode 100644
index 000000000000..081fa5f55015
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -0,0 +1,52 @@
1/*
2 * This file is part of STM32 ADC driver
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
6 *
7 * License type: GPLv2
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE.
16 * See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __STM32_ADC_H
23#define __STM32_ADC_H
24
25/*
26 * STM32 - ADC global register map
27 * ________________________________________________________
28 * | Offset | Register |
29 * --------------------------------------------------------
30 * | 0x000 | Master ADC1 |
31 * --------------------------------------------------------
32 * | 0x100 | Slave ADC2 |
33 * --------------------------------------------------------
34 * | 0x200 | Slave ADC3 |
35 * --------------------------------------------------------
36 * | 0x300 | Master & Slave common regs |
37 * --------------------------------------------------------
38 */
39#define STM32_ADC_MAX_ADCS 3
40#define STM32_ADCX_COMN_OFFSET 0x300
41
42/**
43 * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
44 * @base: control registers base cpu addr
45 * @vref_mv: vref voltage (mv)
46 */
47struct stm32_adc_common {
48 void __iomem *base;
49 int vref_mv;
50};
51
52#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
new file mode 100644
index 000000000000..5715e79f4935
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc.c
@@ -0,0 +1,518 @@
1/*
2 * This file is part of STM32 ADC driver
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
6 *
7 * License type: GPLv2
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE.
16 * See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/clk.h>
23#include <linux/delay.h>
24#include <linux/iio/iio.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30
31#include "stm32-adc-core.h"
32
33/* STM32F4 - Registers for each ADC instance */
34#define STM32F4_ADC_SR 0x00
35#define STM32F4_ADC_CR1 0x04
36#define STM32F4_ADC_CR2 0x08
37#define STM32F4_ADC_SMPR1 0x0C
38#define STM32F4_ADC_SMPR2 0x10
39#define STM32F4_ADC_HTR 0x24
40#define STM32F4_ADC_LTR 0x28
41#define STM32F4_ADC_SQR1 0x2C
42#define STM32F4_ADC_SQR2 0x30
43#define STM32F4_ADC_SQR3 0x34
44#define STM32F4_ADC_JSQR 0x38
45#define STM32F4_ADC_JDR1 0x3C
46#define STM32F4_ADC_JDR2 0x40
47#define STM32F4_ADC_JDR3 0x44
48#define STM32F4_ADC_JDR4 0x48
49#define STM32F4_ADC_DR 0x4C
50
51/* STM32F4_ADC_SR - bit fields */
52#define STM32F4_STRT BIT(4)
53#define STM32F4_EOC BIT(1)
54
55/* STM32F4_ADC_CR1 - bit fields */
56#define STM32F4_SCAN BIT(8)
57#define STM32F4_EOCIE BIT(5)
58
59/* STM32F4_ADC_CR2 - bit fields */
60#define STM32F4_SWSTART BIT(30)
61#define STM32F4_EXTEN_MASK GENMASK(29, 28)
62#define STM32F4_EOCS BIT(10)
63#define STM32F4_ADON BIT(0)
64
65/* STM32F4_ADC_SQR1 - bit fields */
66#define STM32F4_L_SHIFT 20
67#define STM32F4_L_MASK GENMASK(23, 20)
68
69/* STM32F4_ADC_SQR3 - bit fields */
70#define STM32F4_SQ1_SHIFT 0
71#define STM32F4_SQ1_MASK GENMASK(4, 0)
72
73#define STM32_ADC_TIMEOUT_US 100000
74#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
75
76/**
77 * struct stm32_adc - private data of each ADC IIO instance
78 * @common: reference to ADC block common data
79 * @offset: ADC instance register offset in ADC block
80 * @completion: end of single conversion completion
81 * @buffer: data buffer
82 * @clk: clock for this adc instance
83 * @irq: interrupt for this adc instance
84 * @lock: spinlock
85 */
86struct stm32_adc {
87 struct stm32_adc_common *common;
88 u32 offset;
89 struct completion completion;
90 u16 *buffer;
91 struct clk *clk;
92 int irq;
93 spinlock_t lock; /* interrupt lock */
94};
95
96/**
97 * struct stm32_adc_chan_spec - specification of stm32 adc channel
98 * @type: IIO channel type
99 * @channel: channel number (single ended)
100 * @name: channel name (single ended)
101 */
102struct stm32_adc_chan_spec {
103 enum iio_chan_type type;
104 int channel;
105 const char *name;
106};
107
108/* Input definitions common for all STM32F4 instances */
109static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
110 { IIO_VOLTAGE, 0, "in0" },
111 { IIO_VOLTAGE, 1, "in1" },
112 { IIO_VOLTAGE, 2, "in2" },
113 { IIO_VOLTAGE, 3, "in3" },
114 { IIO_VOLTAGE, 4, "in4" },
115 { IIO_VOLTAGE, 5, "in5" },
116 { IIO_VOLTAGE, 6, "in6" },
117 { IIO_VOLTAGE, 7, "in7" },
118 { IIO_VOLTAGE, 8, "in8" },
119 { IIO_VOLTAGE, 9, "in9" },
120 { IIO_VOLTAGE, 10, "in10" },
121 { IIO_VOLTAGE, 11, "in11" },
122 { IIO_VOLTAGE, 12, "in12" },
123 { IIO_VOLTAGE, 13, "in13" },
124 { IIO_VOLTAGE, 14, "in14" },
125 { IIO_VOLTAGE, 15, "in15" },
126};
127
128/**
129 * STM32 ADC registers access routines
130 * @adc: stm32 adc instance
131 * @reg: reg offset in adc instance
132 *
133 * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
134 * for adc1, adc2 and adc3.
135 */
136static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
137{
138 return readl_relaxed(adc->common->base + adc->offset + reg);
139}
140
141static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
142{
143 return readw_relaxed(adc->common->base + adc->offset + reg);
144}
145
146static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
147{
148 writel_relaxed(val, adc->common->base + adc->offset + reg);
149}
150
151static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(&adc->lock, flags);
156 stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
157 spin_unlock_irqrestore(&adc->lock, flags);
158}
159
160static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
161{
162 unsigned long flags;
163
164 spin_lock_irqsave(&adc->lock, flags);
165 stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
166 spin_unlock_irqrestore(&adc->lock, flags);
167}
168
169/**
170 * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
171 * @adc: stm32 adc instance
172 */
173static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
174{
175 stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
176};
177
178/**
179 * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
180 * @adc: stm32 adc instance
181 */
182static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
183{
184 stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
185}
186
187/**
188 * stm32_adc_start_conv() - Start conversions for regular channels.
189 * @adc: stm32 adc instance
190 */
191static void stm32_adc_start_conv(struct stm32_adc *adc)
192{
193 stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
194 stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
195
196 /* Wait for Power-up time (tSTAB from datasheet) */
197 usleep_range(2, 3);
198
199 /* Software start ? (e.g. trigger detection disabled ?) */
200 if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
201 stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
202}
203
204static void stm32_adc_stop_conv(struct stm32_adc *adc)
205{
206 stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
207 stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
208
209 stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
210 stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_ADON);
211}
212
213/**
214 * stm32_adc_single_conv() - Performs a single conversion
215 * @indio_dev: IIO device
216 * @chan: IIO channel
217 * @res: conversion result
218 *
219 * The function performs a single conversion on a given channel:
220 * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
221 * - Use SW trigger
222 * - Start conversion, then wait for interrupt completion.
223 */
224static int stm32_adc_single_conv(struct iio_dev *indio_dev,
225 const struct iio_chan_spec *chan,
226 int *res)
227{
228 struct stm32_adc *adc = iio_priv(indio_dev);
229 long timeout;
230 u32 val;
231 u16 result;
232 int ret;
233
234 reinit_completion(&adc->completion);
235
236 adc->buffer = &result;
237
238 /* Program chan number in regular sequence */
239 val = stm32_adc_readl(adc, STM32F4_ADC_SQR3);
240 val &= ~STM32F4_SQ1_MASK;
241 val |= chan->channel << STM32F4_SQ1_SHIFT;
242 stm32_adc_writel(adc, STM32F4_ADC_SQR3, val);
243
244 /* Set regular sequence len (0 for 1 conversion) */
245 stm32_adc_clr_bits(adc, STM32F4_ADC_SQR1, STM32F4_L_MASK);
246
247 /* Trigger detection disabled (conversion can be launched in SW) */
248 stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
249
250 stm32_adc_conv_irq_enable(adc);
251
252 stm32_adc_start_conv(adc);
253
254 timeout = wait_for_completion_interruptible_timeout(
255 &adc->completion, STM32_ADC_TIMEOUT);
256 if (timeout == 0) {
257 ret = -ETIMEDOUT;
258 } else if (timeout < 0) {
259 ret = timeout;
260 } else {
261 *res = result;
262 ret = IIO_VAL_INT;
263 }
264
265 stm32_adc_stop_conv(adc);
266
267 stm32_adc_conv_irq_disable(adc);
268
269 return ret;
270}
271
272static int stm32_adc_read_raw(struct iio_dev *indio_dev,
273 struct iio_chan_spec const *chan,
274 int *val, int *val2, long mask)
275{
276 struct stm32_adc *adc = iio_priv(indio_dev);
277 int ret;
278
279 switch (mask) {
280 case IIO_CHAN_INFO_RAW:
281 ret = iio_device_claim_direct_mode(indio_dev);
282 if (ret)
283 return ret;
284 if (chan->type == IIO_VOLTAGE)
285 ret = stm32_adc_single_conv(indio_dev, chan, val);
286 else
287 ret = -EINVAL;
288 iio_device_release_direct_mode(indio_dev);
289 return ret;
290
291 case IIO_CHAN_INFO_SCALE:
292 *val = adc->common->vref_mv;
293 *val2 = chan->scan_type.realbits;
294 return IIO_VAL_FRACTIONAL_LOG2;
295
296 default:
297 return -EINVAL;
298 }
299}
300
301static irqreturn_t stm32_adc_isr(int irq, void *data)
302{
303 struct stm32_adc *adc = data;
304 u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
305
306 if (status & STM32F4_EOC) {
307 *adc->buffer = stm32_adc_readw(adc, STM32F4_ADC_DR);
308 complete(&adc->completion);
309 return IRQ_HANDLED;
310 }
311
312 return IRQ_NONE;
313}
314
315static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
316 const struct of_phandle_args *iiospec)
317{
318 int i;
319
320 for (i = 0; i < indio_dev->num_channels; i++)
321 if (indio_dev->channels[i].channel == iiospec->args[0])
322 return i;
323
324 return -EINVAL;
325}
326
327/**
328 * stm32_adc_debugfs_reg_access - read or write register value
329 *
330 * To read a value from an ADC register:
331 * echo [ADC reg offset] > direct_reg_access
332 * cat direct_reg_access
333 *
334 * To write a value in a ADC register:
335 * echo [ADC_reg_offset] [value] > direct_reg_access
336 */
337static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
338 unsigned reg, unsigned writeval,
339 unsigned *readval)
340{
341 struct stm32_adc *adc = iio_priv(indio_dev);
342
343 if (!readval)
344 stm32_adc_writel(adc, reg, writeval);
345 else
346 *readval = stm32_adc_readl(adc, reg);
347
348 return 0;
349}
350
351static const struct iio_info stm32_adc_iio_info = {
352 .read_raw = stm32_adc_read_raw,
353 .debugfs_reg_access = stm32_adc_debugfs_reg_access,
354 .of_xlate = stm32_adc_of_xlate,
355 .driver_module = THIS_MODULE,
356};
357
358static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
359 struct iio_chan_spec *chan,
360 const struct stm32_adc_chan_spec *channel,
361 int scan_index)
362{
363 chan->type = channel->type;
364 chan->channel = channel->channel;
365 chan->datasheet_name = channel->name;
366 chan->scan_index = scan_index;
367 chan->indexed = 1;
368 chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
369 chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
370 chan->scan_type.sign = 'u';
371 chan->scan_type.realbits = 12;
372 chan->scan_type.storagebits = 16;
373}
374
375static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
376{
377 struct device_node *node = indio_dev->dev.of_node;
378 struct property *prop;
379 const __be32 *cur;
380 struct iio_chan_spec *channels;
381 int scan_index = 0, num_channels;
382 u32 val;
383
384 num_channels = of_property_count_u32_elems(node, "st,adc-channels");
385 if (num_channels < 0 ||
386 num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
387 dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
388 return num_channels < 0 ? num_channels : -EINVAL;
389 }
390
391 channels = devm_kcalloc(&indio_dev->dev, num_channels,
392 sizeof(struct iio_chan_spec), GFP_KERNEL);
393 if (!channels)
394 return -ENOMEM;
395
396 of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
397 if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
398 dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
399 return -EINVAL;
400 }
401 stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
402 &stm32f4_adc123_channels[val],
403 scan_index);
404 scan_index++;
405 }
406
407 indio_dev->num_channels = scan_index;
408 indio_dev->channels = channels;
409
410 return 0;
411}
412
413static int stm32_adc_probe(struct platform_device *pdev)
414{
415 struct iio_dev *indio_dev;
416 struct stm32_adc *adc;
417 int ret;
418
419 if (!pdev->dev.of_node)
420 return -ENODEV;
421
422 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
423 if (!indio_dev)
424 return -ENOMEM;
425
426 adc = iio_priv(indio_dev);
427 adc->common = dev_get_drvdata(pdev->dev.parent);
428 spin_lock_init(&adc->lock);
429 init_completion(&adc->completion);
430
431 indio_dev->name = dev_name(&pdev->dev);
432 indio_dev->dev.parent = &pdev->dev;
433 indio_dev->dev.of_node = pdev->dev.of_node;
434 indio_dev->info = &stm32_adc_iio_info;
435 indio_dev->modes = INDIO_DIRECT_MODE;
436
437 platform_set_drvdata(pdev, adc);
438
439 ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
440 if (ret != 0) {
441 dev_err(&pdev->dev, "missing reg property\n");
442 return -EINVAL;
443 }
444
445 adc->irq = platform_get_irq(pdev, 0);
446 if (adc->irq < 0) {
447 dev_err(&pdev->dev, "failed to get irq\n");
448 return adc->irq;
449 }
450
451 ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
452 0, pdev->name, adc);
453 if (ret) {
454 dev_err(&pdev->dev, "failed to request IRQ\n");
455 return ret;
456 }
457
458 adc->clk = devm_clk_get(&pdev->dev, NULL);
459 if (IS_ERR(adc->clk)) {
460 dev_err(&pdev->dev, "Can't get clock\n");
461 return PTR_ERR(adc->clk);
462 }
463
464 ret = clk_prepare_enable(adc->clk);
465 if (ret < 0) {
466 dev_err(&pdev->dev, "clk enable failed\n");
467 return ret;
468 }
469
470 ret = stm32_adc_chan_of_init(indio_dev);
471 if (ret < 0)
472 goto err_clk_disable;
473
474 ret = iio_device_register(indio_dev);
475 if (ret) {
476 dev_err(&pdev->dev, "iio dev register failed\n");
477 goto err_clk_disable;
478 }
479
480 return 0;
481
482err_clk_disable:
483 clk_disable_unprepare(adc->clk);
484
485 return ret;
486}
487
488static int stm32_adc_remove(struct platform_device *pdev)
489{
490 struct stm32_adc *adc = platform_get_drvdata(pdev);
491 struct iio_dev *indio_dev = iio_priv_to_dev(adc);
492
493 iio_device_unregister(indio_dev);
494 clk_disable_unprepare(adc->clk);
495
496 return 0;
497}
498
499static const struct of_device_id stm32_adc_of_match[] = {
500 { .compatible = "st,stm32f4-adc" },
501 {},
502};
503MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
504
505static struct platform_driver stm32_adc_driver = {
506 .probe = stm32_adc_probe,
507 .remove = stm32_adc_remove,
508 .driver = {
509 .name = "stm32-adc",
510 .of_match_table = stm32_adc_of_match,
511 },
512};
513module_platform_driver(stm32_adc_driver);
514
515MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
516MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
517MODULE_LICENSE("GPL v2");
518MODULE_ALIAS("platform:stm32-adc");
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index f4ba23effe9a..e952e94a14af 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -14,6 +14,10 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/iio/iio.h> 15#include <linux/iio/iio.h>
16#include <linux/regulator/consumer.h> 16#include <linux/regulator/consumer.h>
17#include <linux/iio/buffer.h>
18#include <linux/iio/trigger.h>
19#include <linux/iio/triggered_buffer.h>
20#include <linux/iio/trigger_consumer.h>
17 21
18enum { 22enum {
19 adc0831, 23 adc0831,
@@ -38,10 +42,16 @@ struct adc0832 {
38 .indexed = 1, \ 42 .indexed = 1, \
39 .channel = chan, \ 43 .channel = chan, \
40 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 44 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
41 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ 45 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
46 .scan_index = chan, \
47 .scan_type = { \
48 .sign = 'u', \
49 .realbits = 8, \
50 .storagebits = 8, \
51 }, \
42 } 52 }
43 53
44#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \ 54#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
45 { \ 55 { \
46 .type = IIO_VOLTAGE, \ 56 .type = IIO_VOLTAGE, \
47 .indexed = 1, \ 57 .indexed = 1, \
@@ -49,18 +59,26 @@ struct adc0832 {
49 .channel2 = (chan2), \ 59 .channel2 = (chan2), \
50 .differential = 1, \ 60 .differential = 1, \
51 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 61 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
52 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ 62 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
63 .scan_index = si, \
64 .scan_type = { \
65 .sign = 'u', \
66 .realbits = 8, \
67 .storagebits = 8, \
68 }, \
53 } 69 }
54 70
55static const struct iio_chan_spec adc0831_channels[] = { 71static const struct iio_chan_spec adc0831_channels[] = {
56 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), 72 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 0),
73 IIO_CHAN_SOFT_TIMESTAMP(1),
57}; 74};
58 75
59static const struct iio_chan_spec adc0832_channels[] = { 76static const struct iio_chan_spec adc0832_channels[] = {
60 ADC0832_VOLTAGE_CHANNEL(0), 77 ADC0832_VOLTAGE_CHANNEL(0),
61 ADC0832_VOLTAGE_CHANNEL(1), 78 ADC0832_VOLTAGE_CHANNEL(1),
62 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), 79 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
63 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), 80 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
81 IIO_CHAN_SOFT_TIMESTAMP(4),
64}; 82};
65 83
66static const struct iio_chan_spec adc0834_channels[] = { 84static const struct iio_chan_spec adc0834_channels[] = {
@@ -68,10 +86,11 @@ static const struct iio_chan_spec adc0834_channels[] = {
68 ADC0832_VOLTAGE_CHANNEL(1), 86 ADC0832_VOLTAGE_CHANNEL(1),
69 ADC0832_VOLTAGE_CHANNEL(2), 87 ADC0832_VOLTAGE_CHANNEL(2),
70 ADC0832_VOLTAGE_CHANNEL(3), 88 ADC0832_VOLTAGE_CHANNEL(3),
71 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), 89 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 4),
72 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), 90 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 5),
73 ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3), 91 ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 6),
74 ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2), 92 ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 7),
93 IIO_CHAN_SOFT_TIMESTAMP(8),
75}; 94};
76 95
77static const struct iio_chan_spec adc0838_channels[] = { 96static const struct iio_chan_spec adc0838_channels[] = {
@@ -83,14 +102,15 @@ static const struct iio_chan_spec adc0838_channels[] = {
83 ADC0832_VOLTAGE_CHANNEL(5), 102 ADC0832_VOLTAGE_CHANNEL(5),
84 ADC0832_VOLTAGE_CHANNEL(6), 103 ADC0832_VOLTAGE_CHANNEL(6),
85 ADC0832_VOLTAGE_CHANNEL(7), 104 ADC0832_VOLTAGE_CHANNEL(7),
86 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), 105 ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
87 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), 106 ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
88 ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3), 107 ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
89 ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2), 108 ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
90 ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5), 109 ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
91 ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4), 110 ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
92 ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7), 111 ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
93 ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6), 112 ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
113 IIO_CHAN_SOFT_TIMESTAMP(16),
94}; 114};
95 115
96static int adc0831_adc_conversion(struct adc0832 *adc) 116static int adc0831_adc_conversion(struct adc0832 *adc)
@@ -178,6 +198,42 @@ static const struct iio_info adc0832_info = {
178 .driver_module = THIS_MODULE, 198 .driver_module = THIS_MODULE,
179}; 199};
180 200
201static irqreturn_t adc0832_trigger_handler(int irq, void *p)
202{
203 struct iio_poll_func *pf = p;
204 struct iio_dev *indio_dev = pf->indio_dev;
205 struct adc0832 *adc = iio_priv(indio_dev);
206 u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
207 int scan_index;
208 int i = 0;
209
210 mutex_lock(&adc->lock);
211
212 for_each_set_bit(scan_index, indio_dev->active_scan_mask,
213 indio_dev->masklength) {
214 const struct iio_chan_spec *scan_chan =
215 &indio_dev->channels[scan_index];
216 int ret = adc0832_adc_conversion(adc, scan_chan->channel,
217 scan_chan->differential);
218 if (ret < 0) {
219 dev_warn(&adc->spi->dev,
220 "failed to get conversion data\n");
221 goto out;
222 }
223
224 data[i] = ret;
225 i++;
226 }
227 iio_push_to_buffers_with_timestamp(indio_dev, data,
228 iio_get_time_ns(indio_dev));
229out:
230 mutex_unlock(&adc->lock);
231
232 iio_trigger_notify_done(indio_dev->trig);
233
234 return IRQ_HANDLED;
235}
236
181static int adc0832_probe(struct spi_device *spi) 237static int adc0832_probe(struct spi_device *spi)
182{ 238{
183 struct iio_dev *indio_dev; 239 struct iio_dev *indio_dev;
@@ -233,9 +289,20 @@ static int adc0832_probe(struct spi_device *spi)
233 289
234 spi_set_drvdata(spi, indio_dev); 290 spi_set_drvdata(spi, indio_dev);
235 291
292 ret = iio_triggered_buffer_setup(indio_dev, NULL,
293 adc0832_trigger_handler, NULL);
294 if (ret)
295 goto err_reg_disable;
296
236 ret = iio_device_register(indio_dev); 297 ret = iio_device_register(indio_dev);
237 if (ret) 298 if (ret)
238 regulator_disable(adc->reg); 299 goto err_buffer_cleanup;
300
301 return 0;
302err_buffer_cleanup:
303 iio_triggered_buffer_cleanup(indio_dev);
304err_reg_disable:
305 regulator_disable(adc->reg);
239 306
240 return ret; 307 return ret;
241} 308}
@@ -246,6 +313,7 @@ static int adc0832_remove(struct spi_device *spi)
246 struct adc0832 *adc = iio_priv(indio_dev); 313 struct adc0832 *adc = iio_priv(indio_dev);
247 314
248 iio_device_unregister(indio_dev); 315 iio_device_unregister(indio_dev);
316 iio_triggered_buffer_cleanup(indio_dev);
249 regulator_disable(adc->reg); 317 regulator_disable(adc->reg);
250 318
251 return 0; 319 return 0;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f94b69f9c288..4836a0d7aef5 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -27,6 +27,7 @@
27#include <linux/iio/buffer.h> 27#include <linux/iio/buffer.h>
28#include <linux/iio/trigger_consumer.h> 28#include <linux/iio/trigger_consumer.h>
29#include <linux/iio/triggered_buffer.h> 29#include <linux/iio/triggered_buffer.h>
30#include <linux/regulator/consumer.h>
30 31
31#define TI_ADC_DRV_NAME "ti-adc161s626" 32#define TI_ADC_DRV_NAME "ti-adc161s626"
32 33
@@ -39,7 +40,9 @@ static const struct iio_chan_spec ti_adc141s626_channels[] = {
39 { 40 {
40 .type = IIO_VOLTAGE, 41 .type = IIO_VOLTAGE,
41 .channel = 0, 42 .channel = 0,
42 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 43 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
44 BIT(IIO_CHAN_INFO_SCALE) |
45 BIT(IIO_CHAN_INFO_OFFSET),
43 .scan_index = 0, 46 .scan_index = 0,
44 .scan_type = { 47 .scan_type = {
45 .sign = 's', 48 .sign = 's',
@@ -54,7 +57,9 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
54 { 57 {
55 .type = IIO_VOLTAGE, 58 .type = IIO_VOLTAGE,
56 .channel = 0, 59 .channel = 0,
57 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 60 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
61 BIT(IIO_CHAN_INFO_SCALE) |
62 BIT(IIO_CHAN_INFO_OFFSET),
58 .scan_index = 0, 63 .scan_index = 0,
59 .scan_type = { 64 .scan_type = {
60 .sign = 's', 65 .sign = 's',
@@ -68,6 +73,8 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
68struct ti_adc_data { 73struct ti_adc_data {
69 struct iio_dev *indio_dev; 74 struct iio_dev *indio_dev;
70 struct spi_device *spi; 75 struct spi_device *spi;
76 struct regulator *ref;
77
71 u8 read_size; 78 u8 read_size;
72 u8 shift; 79 u8 shift;
73 80
@@ -135,18 +142,32 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
135 struct ti_adc_data *data = iio_priv(indio_dev); 142 struct ti_adc_data *data = iio_priv(indio_dev);
136 int ret; 143 int ret;
137 144
138 if (mask != IIO_CHAN_INFO_RAW) 145 switch (mask) {
139 return -EINVAL; 146 case IIO_CHAN_INFO_RAW:
147 ret = iio_device_claim_direct_mode(indio_dev);
148 if (ret)
149 return ret;
140 150
141 ret = iio_device_claim_direct_mode(indio_dev); 151 ret = ti_adc_read_measurement(data, chan, val);
142 if (ret) 152 iio_device_release_direct_mode(indio_dev);
143 return ret;
144 153
145 ret = ti_adc_read_measurement(data, chan, val); 154 if (ret)
146 iio_device_release_direct_mode(indio_dev); 155 return ret;
147 156
148 if (!ret)
149 return IIO_VAL_INT; 157 return IIO_VAL_INT;
158 case IIO_CHAN_INFO_SCALE:
159 ret = regulator_get_voltage(data->ref);
160 if (ret < 0)
161 return ret;
162
163 *val = ret / 1000;
164 *val2 = chan->scan_type.realbits;
165
166 return IIO_VAL_FRACTIONAL_LOG2;
167 case IIO_CHAN_INFO_OFFSET:
168 *val = 1 << (chan->scan_type.realbits - 1);
169 return IIO_VAL_INT;
170 }
150 171
151 return 0; 172 return 0;
152} 173}
@@ -191,10 +212,17 @@ static int ti_adc_probe(struct spi_device *spi)
191 break; 212 break;
192 } 213 }
193 214
215 data->ref = devm_regulator_get(&spi->dev, "vdda");
216 if (!IS_ERR(data->ref)) {
217 ret = regulator_enable(data->ref);
218 if (ret < 0)
219 return ret;
220 }
221
194 ret = iio_triggered_buffer_setup(indio_dev, NULL, 222 ret = iio_triggered_buffer_setup(indio_dev, NULL,
195 ti_adc_trigger_handler, NULL); 223 ti_adc_trigger_handler, NULL);
196 if (ret) 224 if (ret)
197 return ret; 225 goto error_regulator_disable;
198 226
199 ret = iio_device_register(indio_dev); 227 ret = iio_device_register(indio_dev);
200 if (ret) 228 if (ret)
@@ -205,15 +233,20 @@ static int ti_adc_probe(struct spi_device *spi)
205error_unreg_buffer: 233error_unreg_buffer:
206 iio_triggered_buffer_cleanup(indio_dev); 234 iio_triggered_buffer_cleanup(indio_dev);
207 235
236error_regulator_disable:
237 regulator_disable(data->ref);
238
208 return ret; 239 return ret;
209} 240}
210 241
211static int ti_adc_remove(struct spi_device *spi) 242static int ti_adc_remove(struct spi_device *spi)
212{ 243{
213 struct iio_dev *indio_dev = spi_get_drvdata(spi); 244 struct iio_dev *indio_dev = spi_get_drvdata(spi);
245 struct ti_adc_data *data = iio_priv(indio_dev);
214 246
215 iio_device_unregister(indio_dev); 247 iio_device_unregister(indio_dev);
216 iio_triggered_buffer_cleanup(indio_dev); 248 iio_triggered_buffer_cleanup(indio_dev);
249 regulator_disable(data->ref);
217 250
218 return 0; 251 return 0;
219} 252}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacca2541..ad9dec30bb30 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -30,10 +30,28 @@
30#include <linux/iio/buffer.h> 30#include <linux/iio/buffer.h>
31#include <linux/iio/kfifo_buf.h> 31#include <linux/iio/kfifo_buf.h>
32 32
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35
36#define DMA_BUFFER_SIZE SZ_2K
37
38struct tiadc_dma {
39 struct dma_slave_config conf;
40 struct dma_chan *chan;
41 dma_addr_t addr;
42 dma_cookie_t cookie;
43 u8 *buf;
44 int current_period;
45 int period_size;
46 u8 fifo_thresh;
47};
48
33struct tiadc_device { 49struct tiadc_device {
34 struct ti_tscadc_dev *mfd_tscadc; 50 struct ti_tscadc_dev *mfd_tscadc;
51 struct tiadc_dma dma;
35 struct mutex fifo1_lock; /* to protect fifo access */ 52 struct mutex fifo1_lock; /* to protect fifo access */
36 int channels; 53 int channels;
54 int total_ch_enabled;
37 u8 channel_line[8]; 55 u8 channel_line[8];
38 u8 channel_step[8]; 56 u8 channel_step[8];
39 int buffer_en_ch_steps; 57 int buffer_en_ch_steps;
@@ -198,6 +216,67 @@ static irqreturn_t tiadc_worker_h(int irq, void *private)
198 return IRQ_HANDLED; 216 return IRQ_HANDLED;
199} 217}
200 218
219static void tiadc_dma_rx_complete(void *param)
220{
221 struct iio_dev *indio_dev = param;
222 struct tiadc_device *adc_dev = iio_priv(indio_dev);
223 struct tiadc_dma *dma = &adc_dev->dma;
224 u8 *data;
225 int i;
226
227 data = dma->buf + dma->current_period * dma->period_size;
228 dma->current_period = 1 - dma->current_period; /* swap the buffer ID */
229
230 for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) {
231 iio_push_to_buffers(indio_dev, data);
232 data += indio_dev->scan_bytes;
233 }
234}
235
236static int tiadc_start_dma(struct iio_dev *indio_dev)
237{
238 struct tiadc_device *adc_dev = iio_priv(indio_dev);
239 struct tiadc_dma *dma = &adc_dev->dma;
240 struct dma_async_tx_descriptor *desc;
241
242 dma->current_period = 0; /* We start to fill period 0 */
243 /*
244 * Make the fifo thresh as the multiple of total number of
245 * channels enabled, so make sure that cyclic DMA period
246 * length is also a multiple of total number of channels
247 * enabled. This ensures that no invalid data is reported
248 * to the stack via iio_push_to_buffers().
249 */
250 dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1,
251 adc_dev->total_ch_enabled) - 1;
252 /* Make sure that period length is multiple of fifo thresh level */
253 dma->period_size = rounddown(DMA_BUFFER_SIZE / 2,
254 (dma->fifo_thresh + 1) * sizeof(u16));
255
256 dma->conf.src_maxburst = dma->fifo_thresh + 1;
257 dmaengine_slave_config(dma->chan, &dma->conf);
258
259 desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr,
260 dma->period_size * 2,
261 dma->period_size, DMA_DEV_TO_MEM,
262 DMA_PREP_INTERRUPT);
263 if (!desc)
264 return -EBUSY;
265
266 desc->callback = tiadc_dma_rx_complete;
267 desc->callback_param = indio_dev;
268
269 dma->cookie = dmaengine_submit(desc);
270
271 dma_async_issue_pending(dma->chan);
272
273 tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh);
274 tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh);
275 tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1);
276
277 return 0;
278}
279
201static int tiadc_buffer_preenable(struct iio_dev *indio_dev) 280static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
202{ 281{
203 struct tiadc_device *adc_dev = iio_priv(indio_dev); 282 struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -218,20 +297,30 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
218static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 297static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
219{ 298{
220 struct tiadc_device *adc_dev = iio_priv(indio_dev); 299 struct tiadc_device *adc_dev = iio_priv(indio_dev);
300 struct tiadc_dma *dma = &adc_dev->dma;
301 unsigned int irq_enable;
221 unsigned int enb = 0; 302 unsigned int enb = 0;
222 u8 bit; 303 u8 bit;
223 304
224 tiadc_step_config(indio_dev); 305 tiadc_step_config(indio_dev);
225 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) 306 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) {
226 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 307 enb |= (get_adc_step_bit(adc_dev, bit) << 1);
308 adc_dev->total_ch_enabled++;
309 }
227 adc_dev->buffer_en_ch_steps = enb; 310 adc_dev->buffer_en_ch_steps = enb;
228 311
312 if (dma->chan)
313 tiadc_start_dma(indio_dev);
314
229 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb); 315 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
230 316
231 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES 317 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
232 | IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW); 318 | IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
233 tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES 319
234 | IRQENB_FIFO1OVRRUN); 320 irq_enable = IRQENB_FIFO1OVRRUN;
321 if (!dma->chan)
322 irq_enable |= IRQENB_FIFO1THRES;
323 tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable);
235 324
236 return 0; 325 return 0;
237} 326}
@@ -239,12 +328,18 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
239static int tiadc_buffer_predisable(struct iio_dev *indio_dev) 328static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
240{ 329{
241 struct tiadc_device *adc_dev = iio_priv(indio_dev); 330 struct tiadc_device *adc_dev = iio_priv(indio_dev);
331 struct tiadc_dma *dma = &adc_dev->dma;
242 int fifo1count, i, read; 332 int fifo1count, i, read;
243 333
244 tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES | 334 tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
245 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW)); 335 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
246 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps); 336 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
247 adc_dev->buffer_en_ch_steps = 0; 337 adc_dev->buffer_en_ch_steps = 0;
338 adc_dev->total_ch_enabled = 0;
339 if (dma->chan) {
340 tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2);
341 dmaengine_terminate_async(dma->chan);
342 }
248 343
249 /* Flush FIFO of leftover data in the time it takes to disable adc */ 344 /* Flush FIFO of leftover data in the time it takes to disable adc */
250 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 345 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -430,6 +525,41 @@ static const struct iio_info tiadc_info = {
430 .driver_module = THIS_MODULE, 525 .driver_module = THIS_MODULE,
431}; 526};
432 527
528static int tiadc_request_dma(struct platform_device *pdev,
529 struct tiadc_device *adc_dev)
530{
531 struct tiadc_dma *dma = &adc_dev->dma;
532 dma_cap_mask_t mask;
533
534 /* Default slave configuration parameters */
535 dma->conf.direction = DMA_DEV_TO_MEM;
536 dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
537 dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1;
538
539 dma_cap_zero(mask);
540 dma_cap_set(DMA_CYCLIC, mask);
541
542 /* Get a channel for RX */
543 dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1");
544 if (IS_ERR(dma->chan)) {
545 int ret = PTR_ERR(dma->chan);
546
547 dma->chan = NULL;
548 return ret;
549 }
550
551 /* RX buffer */
552 dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
553 &dma->addr, GFP_KERNEL);
554 if (!dma->buf)
555 goto err;
556
557 return 0;
558err:
559 dma_release_channel(dma->chan);
560 return -ENOMEM;
561}
562
433static int tiadc_parse_dt(struct platform_device *pdev, 563static int tiadc_parse_dt(struct platform_device *pdev,
434 struct tiadc_device *adc_dev) 564 struct tiadc_device *adc_dev)
435{ 565{
@@ -512,8 +642,14 @@ static int tiadc_probe(struct platform_device *pdev)
512 642
513 platform_set_drvdata(pdev, indio_dev); 643 platform_set_drvdata(pdev, indio_dev);
514 644
645 err = tiadc_request_dma(pdev, adc_dev);
646 if (err && err == -EPROBE_DEFER)
647 goto err_dma;
648
515 return 0; 649 return 0;
516 650
651err_dma:
652 iio_device_unregister(indio_dev);
517err_buffer_unregister: 653err_buffer_unregister:
518 tiadc_iio_buffered_hardware_remove(indio_dev); 654 tiadc_iio_buffered_hardware_remove(indio_dev);
519err_free_channels: 655err_free_channels:
@@ -525,8 +661,14 @@ static int tiadc_remove(struct platform_device *pdev)
525{ 661{
526 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 662 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
527 struct tiadc_device *adc_dev = iio_priv(indio_dev); 663 struct tiadc_device *adc_dev = iio_priv(indio_dev);
664 struct tiadc_dma *dma = &adc_dev->dma;
528 u32 step_en; 665 u32 step_en;
529 666
667 if (dma->chan) {
668 dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
669 dma->buf, dma->addr);
670 dma_release_channel(dma->chan);
671 }
530 iio_device_unregister(indio_dev); 672 iio_device_unregister(indio_dev);
531 tiadc_iio_buffered_hardware_remove(indio_dev); 673 tiadc_iio_buffered_hardware_remove(indio_dev);
532 tiadc_channels_remove(indio_dev); 674 tiadc_channels_remove(indio_dev);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 26a6026de614..e108996a9627 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -2,6 +2,7 @@
2# IIO common modules 2# IIO common modules
3# 3#
4 4
5source "drivers/iio/common/cros_ec_sensors/Kconfig"
5source "drivers/iio/common/hid-sensors/Kconfig" 6source "drivers/iio/common/hid-sensors/Kconfig"
6source "drivers/iio/common/ms_sensors/Kconfig" 7source "drivers/iio/common/ms_sensors/Kconfig"
7source "drivers/iio/common/ssp_sensors/Kconfig" 8source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 585da6a1b188..6fa760e1bdd5 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,6 +7,7 @@
7# 7#
8 8
9# When adding new entries keep the list in alphabetical order 9# When adding new entries keep the list in alphabetical order
10obj-y += cros_ec_sensors/
10obj-y += hid-sensors/ 11obj-y += hid-sensors/
11obj-y += ms_sensors/ 12obj-y += ms_sensors/
12obj-y += ssp_sensors/ 13obj-y += ssp_sensors/
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
new file mode 100644
index 000000000000..135f6825903f
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -0,0 +1,22 @@
1#
2# Chrome OS Embedded Controller managed sensors library
3#
4config IIO_CROS_EC_SENSORS_CORE
5 tristate "ChromeOS EC Sensors Core"
6 depends on SYSFS && MFD_CROS_EC
7 select IIO_BUFFER
8 select IIO_TRIGGERED_BUFFER
9 help
10 Base module for the ChromeOS EC Sensors module.
11 Contains core functions used by other IIO CrosEC sensor
12 drivers.
13 Define common attributes and sysfs interrupt handler.
14
15config IIO_CROS_EC_SENSORS
16 tristate "ChromeOS EC Contiguous Sensors"
17 depends on IIO_CROS_EC_SENSORS_CORE
18 help
19 Module to handle 3d contiguous sensors like
20 Accelerometers, Gyroscope and Magnetometer that are
21 presented by the ChromeOS EC Sensor hub.
22 Creates an IIO device for each functions.
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
new file mode 100644
index 000000000000..ec716ff2a775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for sensors seen through the ChromeOS EC sensor hub.
3#
4
5obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
6obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
new file mode 100644
index 000000000000..d6c372bb433b
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -0,0 +1,322 @@
1/*
2 * cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
3 *
4 * Copyright (C) 2016 Google, Inc
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * This driver uses the cros-ec interface to communicate with the Chrome OS
16 * EC about sensors data. Data access is presented through iio sysfs.
17 */
18
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/iio/buffer.h>
22#include <linux/iio/iio.h>
23#include <linux/iio/kfifo_buf.h>
24#include <linux/iio/trigger_consumer.h>
25#include <linux/iio/triggered_buffer.h>
26#include <linux/kernel.h>
27#include <linux/mfd/cros_ec.h>
28#include <linux/mfd/cros_ec_commands.h>
29#include <linux/module.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32#include <linux/sysfs.h>
33
34#include "cros_ec_sensors_core.h"
35
36#define CROS_EC_SENSORS_MAX_CHANNELS 4
37
38/* State data for ec_sensors iio driver. */
39struct cros_ec_sensors_state {
40 /* Shared by all sensors */
41 struct cros_ec_sensors_core_state core;
42
43 struct iio_chan_spec channels[CROS_EC_SENSORS_MAX_CHANNELS];
44};
45
46static int cros_ec_sensors_read(struct iio_dev *indio_dev,
47 struct iio_chan_spec const *chan,
48 int *val, int *val2, long mask)
49{
50 struct cros_ec_sensors_state *st = iio_priv(indio_dev);
51 s16 data = 0;
52 s64 val64;
53 int i;
54 int ret;
55 int idx = chan->scan_index;
56
57 mutex_lock(&st->core.cmd_lock);
58
59 switch (mask) {
60 case IIO_CHAN_INFO_RAW:
61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
62 if (ret < 0)
63 break;
64
65 *val = data;
66 break;
67 case IIO_CHAN_INFO_CALIBBIAS:
68 st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
69 st->core.param.sensor_offset.flags = 0;
70
71 ret = cros_ec_motion_send_host_cmd(&st->core, 0);
72 if (ret < 0)
73 break;
74
75 /* Save values */
76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
77 st->core.calib[i] =
78 st->core.resp->sensor_offset.offset[i];
79
80 *val = st->core.calib[idx];
81 break;
82 case IIO_CHAN_INFO_SCALE:
83 st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
84 st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
85
86 ret = cros_ec_motion_send_host_cmd(&st->core, 0);
87 if (ret < 0)
88 break;
89
90 val64 = st->core.resp->sensor_range.ret;
91 switch (st->core.type) {
92 case MOTIONSENSE_TYPE_ACCEL:
93 /*
94 * EC returns data in g, iio exepects m/s^2.
95 * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
96 */
97 *val = div_s64(val64 * 980665, 10);
98 *val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
99 ret = IIO_VAL_FRACTIONAL;
100 break;
101 case MOTIONSENSE_TYPE_GYRO:
102 /*
103 * EC returns data in dps, iio expects rad/s.
104 * Do not use IIO_DEGREE_TO_RAD to avoid precision
105 * loss. Round to the nearest integer.
106 */
107 *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
108 *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
109 ret = IIO_VAL_FRACTIONAL;
110 break;
111 case MOTIONSENSE_TYPE_MAG:
112 /*
113 * EC returns data in 16LSB / uT,
114 * iio expects Gauss
115 */
116 *val = val64;
117 *val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
118 ret = IIO_VAL_FRACTIONAL;
119 break;
120 default:
121 ret = -EINVAL;
122 }
123 break;
124 default:
125 ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
126 mask);
127 break;
128 }
129 mutex_unlock(&st->core.cmd_lock);
130
131 return ret;
132}
133
134static int cros_ec_sensors_write(struct iio_dev *indio_dev,
135 struct iio_chan_spec const *chan,
136 int val, int val2, long mask)
137{
138 struct cros_ec_sensors_state *st = iio_priv(indio_dev);
139 int i;
140 int ret;
141 int idx = chan->scan_index;
142
143 mutex_lock(&st->core.cmd_lock);
144
145 switch (mask) {
146 case IIO_CHAN_INFO_CALIBBIAS:
147 st->core.calib[idx] = val;
148
149 /* Send to EC for each axis, even if not complete */
150 st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
151 st->core.param.sensor_offset.flags =
152 MOTION_SENSE_SET_OFFSET;
153 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
154 st->core.param.sensor_offset.offset[i] =
155 st->core.calib[i];
156 st->core.param.sensor_offset.temp =
157 EC_MOTION_SENSE_INVALID_CALIB_TEMP;
158
159 ret = cros_ec_motion_send_host_cmd(&st->core, 0);
160 break;
161 case IIO_CHAN_INFO_SCALE:
162 if (st->core.type == MOTIONSENSE_TYPE_MAG) {
163 ret = -EINVAL;
164 break;
165 }
166 st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
167 st->core.param.sensor_range.data = val;
168
169 /* Always roundup, so caller gets at least what it asks for. */
170 st->core.param.sensor_range.roundup = 1;
171
172 ret = cros_ec_motion_send_host_cmd(&st->core, 0);
173 break;
174 default:
175 ret = cros_ec_sensors_core_write(
176 &st->core, chan, val, val2, mask);
177 break;
178 }
179
180 mutex_unlock(&st->core.cmd_lock);
181
182 return ret;
183}
184
185static const struct iio_info ec_sensors_info = {
186 .read_raw = &cros_ec_sensors_read,
187 .write_raw = &cros_ec_sensors_write,
188 .driver_module = THIS_MODULE,
189};
190
191static int cros_ec_sensors_probe(struct platform_device *pdev)
192{
193 struct device *dev = &pdev->dev;
194 struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
195 struct cros_ec_device *ec_device;
196 struct iio_dev *indio_dev;
197 struct cros_ec_sensors_state *state;
198 struct iio_chan_spec *channel;
199 int ret, i;
200
201 if (!ec_dev || !ec_dev->ec_dev) {
202 dev_warn(&pdev->dev, "No CROS EC device found.\n");
203 return -EINVAL;
204 }
205 ec_device = ec_dev->ec_dev;
206
207 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
208 if (!indio_dev)
209 return -ENOMEM;
210
211 ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
212 if (ret)
213 return ret;
214
215 indio_dev->info = &ec_sensors_info;
216 state = iio_priv(indio_dev);
217 for (channel = state->channels, i = CROS_EC_SENSOR_X;
218 i < CROS_EC_SENSOR_MAX_AXIS; i++, channel++) {
219 /* Common part */
220 channel->info_mask_separate =
221 BIT(IIO_CHAN_INFO_RAW) |
222 BIT(IIO_CHAN_INFO_CALIBBIAS);
223 channel->info_mask_shared_by_all =
224 BIT(IIO_CHAN_INFO_SCALE) |
225 BIT(IIO_CHAN_INFO_FREQUENCY) |
226 BIT(IIO_CHAN_INFO_SAMP_FREQ);
227 channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
228 channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
229 channel->scan_index = i;
230 channel->ext_info = cros_ec_sensors_ext_info;
231 channel->modified = 1;
232 channel->channel2 = IIO_MOD_X + i;
233 channel->scan_type.sign = 's';
234
235 /* Sensor specific */
236 switch (state->core.type) {
237 case MOTIONSENSE_TYPE_ACCEL:
238 channel->type = IIO_ACCEL;
239 break;
240 case MOTIONSENSE_TYPE_GYRO:
241 channel->type = IIO_ANGL_VEL;
242 break;
243 case MOTIONSENSE_TYPE_MAG:
244 channel->type = IIO_MAGN;
245 break;
246 default:
247 dev_err(&pdev->dev, "Unknown motion sensor\n");
248 return -EINVAL;
249 }
250 }
251
252 /* Timestamp */
253 channel->type = IIO_TIMESTAMP;
254 channel->channel = -1;
255 channel->scan_index = CROS_EC_SENSOR_MAX_AXIS;
256 channel->scan_type.sign = 's';
257 channel->scan_type.realbits = 64;
258 channel->scan_type.storagebits = 64;
259
260 indio_dev->channels = state->channels;
261 indio_dev->num_channels = CROS_EC_SENSORS_MAX_CHANNELS;
262
263 /* There is only enough room for accel and gyro in the io space */
264 if ((state->core.ec->cmd_readmem != NULL) &&
265 (state->core.type != MOTIONSENSE_TYPE_MAG))
266 state->core.read_ec_sensors_data = cros_ec_sensors_read_lpc;
267 else
268 state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
269
270 ret = iio_triggered_buffer_setup(indio_dev, NULL,
271 cros_ec_sensors_capture, NULL);
272 if (ret)
273 return ret;
274
275 ret = iio_device_register(indio_dev);
276 if (ret)
277 goto error_uninit_buffer;
278
279 return 0;
280
281error_uninit_buffer:
282 iio_triggered_buffer_cleanup(indio_dev);
283
284 return ret;
285}
286
287static int cros_ec_sensors_remove(struct platform_device *pdev)
288{
289 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
290
291 iio_device_unregister(indio_dev);
292 iio_triggered_buffer_cleanup(indio_dev);
293
294 return 0;
295}
296
297static const struct platform_device_id cros_ec_sensors_ids[] = {
298 {
299 .name = "cros-ec-accel",
300 },
301 {
302 .name = "cros-ec-gyro",
303 },
304 {
305 .name = "cros-ec-mag",
306 },
307 { /* sentinel */ }
308};
309MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
310
311static struct platform_driver cros_ec_sensors_platform_driver = {
312 .driver = {
313 .name = "cros-ec-sensors",
314 },
315 .probe = cros_ec_sensors_probe,
316 .remove = cros_ec_sensors_remove,
317 .id_table = cros_ec_sensors_ids,
318};
319module_platform_driver(cros_ec_sensors_platform_driver);
320
321MODULE_DESCRIPTION("ChromeOS EC 3-axis sensors driver");
322MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
new file mode 100644
index 000000000000..416cae5ebbd0
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -0,0 +1,450 @@
1/*
2 * cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
3 *
4 * Copyright (C) 2016 Google, Inc
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/iio/buffer.h>
19#include <linux/iio/iio.h>
20#include <linux/iio/kfifo_buf.h>
21#include <linux/iio/trigger_consumer.h>
22#include <linux/kernel.h>
23#include <linux/mfd/cros_ec.h>
24#include <linux/mfd/cros_ec_commands.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/sysfs.h>
28#include <linux/platform_device.h>
29
30#include "cros_ec_sensors_core.h"
31
32static char *cros_ec_loc[] = {
33 [MOTIONSENSE_LOC_BASE] = "base",
34 [MOTIONSENSE_LOC_LID] = "lid",
35 [MOTIONSENSE_LOC_MAX] = "unknown",
36};
37
38int cros_ec_sensors_core_init(struct platform_device *pdev,
39 struct iio_dev *indio_dev,
40 bool physical_device)
41{
42 struct device *dev = &pdev->dev;
43 struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
44 struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
45 struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
46
47 platform_set_drvdata(pdev, indio_dev);
48
49 state->ec = ec->ec_dev;
50 state->msg = devm_kzalloc(&pdev->dev,
51 max((u16)sizeof(struct ec_params_motion_sense),
52 state->ec->max_response), GFP_KERNEL);
53 if (!state->msg)
54 return -ENOMEM;
55
56 state->resp = (struct ec_response_motion_sense *)state->msg->data;
57
58 mutex_init(&state->cmd_lock);
59
60 /* Set up the host command structure. */
61 state->msg->version = 2;
62 state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
63 state->msg->outsize = sizeof(struct ec_params_motion_sense);
64
65 indio_dev->dev.parent = &pdev->dev;
66 indio_dev->name = pdev->name;
67
68 if (physical_device) {
69 indio_dev->modes = INDIO_DIRECT_MODE;
70
71 state->param.cmd = MOTIONSENSE_CMD_INFO;
72 state->param.info.sensor_num = sensor_platform->sensor_num;
73 if (cros_ec_motion_send_host_cmd(state, 0)) {
74 dev_warn(dev, "Can not access sensor info\n");
75 return -EIO;
76 }
77 state->type = state->resp->info.type;
78 state->loc = state->resp->info.location;
79 }
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
84
85int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
86 u16 opt_length)
87{
88 int ret;
89
90 if (opt_length)
91 state->msg->insize = min(opt_length, state->ec->max_response);
92 else
93 state->msg->insize = state->ec->max_response;
94
95 memcpy(state->msg->data, &state->param, sizeof(state->param));
96
97 ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
98 if (ret < 0)
99 return -EIO;
100
101 if (ret &&
102 state->resp != (struct ec_response_motion_sense *)state->msg->data)
103 memcpy(state->resp, state->msg->data, ret);
104
105 return 0;
106}
107EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
108
109static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
110 uintptr_t private, const struct iio_chan_spec *chan,
111 const char *buf, size_t len)
112{
113 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
114 int ret, i;
115 bool calibrate;
116
117 ret = strtobool(buf, &calibrate);
118 if (ret < 0)
119 return ret;
120 if (!calibrate)
121 return -EINVAL;
122
123 mutex_lock(&st->cmd_lock);
124 st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
125 ret = cros_ec_motion_send_host_cmd(st, 0);
126 if (ret != 0) {
127 dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
128 } else {
129 /* Save values */
130 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
131 st->calib[i] = st->resp->perform_calib.offset[i];
132 }
133 mutex_unlock(&st->cmd_lock);
134
135 return ret ? ret : len;
136}
137
138static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
139 uintptr_t private, const struct iio_chan_spec *chan,
140 char *buf)
141{
142 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
143
144 return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
145}
146
147const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
148 {
149 .name = "calibrate",
150 .shared = IIO_SHARED_BY_ALL,
151 .write = cros_ec_sensors_calibrate
152 },
153 {
154 .name = "location",
155 .shared = IIO_SHARED_BY_ALL,
156 .read = cros_ec_sensors_loc
157 },
158 { },
159};
160EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
161
162/**
163 * cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
164 * @st: pointer to state information for device
165 * @idx: sensor index (should be element of enum sensor_index)
166 *
167 * Return: address to read at
168 */
169static unsigned int cros_ec_sensors_idx_to_reg(
170 struct cros_ec_sensors_core_state *st,
171 unsigned int idx)
172{
173 /*
174 * When using LPC interface, only space for 2 Accel and one Gyro.
175 * First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
176 */
177 if (st->type == MOTIONSENSE_TYPE_ACCEL)
178 return EC_MEMMAP_ACC_DATA + sizeof(u16) *
179 (1 + idx + st->param.info.sensor_num *
180 CROS_EC_SENSOR_MAX_AXIS);
181
182 return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
183}
184
185static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
186 unsigned int offset, u8 *dest)
187{
188 return ec->cmd_readmem(ec, offset, 1, dest);
189}
190
191static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
192 unsigned int offset, u16 *dest)
193{
194 __le16 tmp;
195 int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
196
197 if (ret >= 0)
198 *dest = le16_to_cpu(tmp);
199
200 return ret;
201}
202
203/**
204 * cros_ec_sensors_read_until_not_busy() - read until is not busy
205 *
206 * @st: pointer to state information for device
207 *
208 * Read from EC status byte until it reads not busy.
209 * Return: 8-bit status if ok, -errno on failure.
210 */
211static int cros_ec_sensors_read_until_not_busy(
212 struct cros_ec_sensors_core_state *st)
213{
214 struct cros_ec_device *ec = st->ec;
215 u8 status;
216 int ret, attempts = 0;
217
218 ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
219 if (ret < 0)
220 return ret;
221
222 while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
223 /* Give up after enough attempts, return error. */
224 if (attempts++ >= 50)
225 return -EIO;
226
227 /* Small delay every so often. */
228 if (attempts % 5 == 0)
229 msleep(25);
230
231 ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
232 &status);
233 if (ret < 0)
234 return ret;
235 }
236
237 return status;
238}
239
240/**
241 * read_ec_sensors_data_unsafe() - read acceleration data from EC shared memory
242 * @indio_dev: pointer to IIO device
243 * @scan_mask: bitmap of the sensor indices to scan
244 * @data: location to store data
245 *
246 * This is the unsafe function for reading the EC data. It does not guarantee
247 * that the EC will not modify the data as it is being read in.
248 *
249 * Return: 0 on success, -errno on failure.
250 */
251static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
252 unsigned long scan_mask, s16 *data)
253{
254 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
255 struct cros_ec_device *ec = st->ec;
256 unsigned int i;
257 int ret;
258
259 /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
260 for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
261 ret = cros_ec_sensors_cmd_read_u16(ec,
262 cros_ec_sensors_idx_to_reg(st, i),
263 data);
264 if (ret < 0)
265 return ret;
266
267 data++;
268 }
269
270 return 0;
271}
272
273int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
274 unsigned long scan_mask, s16 *data)
275{
276 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
277 struct cros_ec_device *ec = st->ec;
278 u8 samp_id = 0xff, status = 0;
279 int ret, attempts = 0;
280
281 /*
282 * Continually read all data from EC until the status byte after
283 * all reads reflects that the EC is not busy and the sample id
284 * matches the sample id from before all reads. This guarantees
285 * that data read in was not modified by the EC while reading.
286 */
287 while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
288 EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
289 /* If we have tried to read too many times, return error. */
290 if (attempts++ >= 5)
291 return -EIO;
292
293 /* Read status byte until EC is not busy. */
294 ret = cros_ec_sensors_read_until_not_busy(st);
295 if (ret < 0)
296 return ret;
297
298 /*
299 * Store the current sample id so that we can compare to the
300 * sample id after reading the data.
301 */
302 samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
303
304 /* Read all EC data, format it, and store it into data. */
305 ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
306 data);
307 if (ret < 0)
308 return ret;
309
310 /* Read status byte. */
311 ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
312 &status);
313 if (ret < 0)
314 return ret;
315 }
316
317 return 0;
318}
319EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
320
321int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
322 unsigned long scan_mask, s16 *data)
323{
324 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
325 int ret;
326 unsigned int i;
327
328 /* Read all sensor data through a command. */
329 st->param.cmd = MOTIONSENSE_CMD_DATA;
330 ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
331 if (ret != 0) {
332 dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
333 return ret;
334 }
335
336 for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
337 *data = st->resp->data.data[i];
338 data++;
339 }
340
341 return 0;
342}
343EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
344
345irqreturn_t cros_ec_sensors_capture(int irq, void *p)
346{
347 struct iio_poll_func *pf = p;
348 struct iio_dev *indio_dev = pf->indio_dev;
349 struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
350 int ret;
351
352 mutex_lock(&st->cmd_lock);
353
354 /* Clear capture data. */
355 memset(st->samples, 0, indio_dev->scan_bytes);
356
357 /* Read data based on which channels are enabled in scan mask. */
358 ret = st->read_ec_sensors_data(indio_dev,
359 *(indio_dev->active_scan_mask),
360 (s16 *)st->samples);
361 if (ret < 0)
362 goto done;
363
364 iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
365 iio_get_time_ns(indio_dev));
366
367done:
368 /*
369 * Tell the core we are done with this trigger and ready for the
370 * next one.
371 */
372 iio_trigger_notify_done(indio_dev->trig);
373
374 mutex_unlock(&st->cmd_lock);
375
376 return IRQ_HANDLED;
377}
378EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
379
380int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
381 struct iio_chan_spec const *chan,
382 int *val, int *val2, long mask)
383{
384 int ret = IIO_VAL_INT;
385
386 switch (mask) {
387 case IIO_CHAN_INFO_SAMP_FREQ:
388 st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
389 st->param.ec_rate.data =
390 EC_MOTION_SENSE_NO_VALUE;
391
392 if (cros_ec_motion_send_host_cmd(st, 0))
393 ret = -EIO;
394 else
395 *val = st->resp->ec_rate.ret;
396 break;
397 case IIO_CHAN_INFO_FREQUENCY:
398 st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
399 st->param.sensor_odr.data =
400 EC_MOTION_SENSE_NO_VALUE;
401
402 if (cros_ec_motion_send_host_cmd(st, 0))
403 ret = -EIO;
404 else
405 *val = st->resp->sensor_odr.ret;
406 break;
407 default:
408 break;
409 }
410
411 return ret;
412}
413EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
414
415int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
416 struct iio_chan_spec const *chan,
417 int val, int val2, long mask)
418{
419 int ret = 0;
420
421 switch (mask) {
422 case IIO_CHAN_INFO_FREQUENCY:
423 st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
424 st->param.sensor_odr.data = val;
425
426 /* Always roundup, so caller gets at least what it asks for. */
427 st->param.sensor_odr.roundup = 1;
428
429 if (cros_ec_motion_send_host_cmd(st, 0))
430 ret = -EIO;
431 break;
432 case IIO_CHAN_INFO_SAMP_FREQ:
433 st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
434 st->param.ec_rate.data = val;
435
436 if (cros_ec_motion_send_host_cmd(st, 0))
437 ret = -EIO;
438 else
439 st->curr_sampl_freq = val;
440 break;
441 default:
442 ret = -EINVAL;
443 break;
444 }
445 return ret;
446}
447EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
448
449MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
450MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..8bc2ca3c2e2e
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
@@ -0,0 +1,175 @@
1/*
2 * ChromeOS EC sensor hub
3 *
4 * Copyright (C) 2016 Google, Inc
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __CROS_EC_SENSORS_CORE_H
17#define __CROS_EC_SENSORS_CORE_H
18
19#include <linux/irqreturn.h>
20
21enum {
22 CROS_EC_SENSOR_X,
23 CROS_EC_SENSOR_Y,
24 CROS_EC_SENSOR_Z,
25 CROS_EC_SENSOR_MAX_AXIS,
26};
27
28/* EC returns sensor values using signed 16 bit registers */
29#define CROS_EC_SENSOR_BITS 16
30
31/*
32 * 4 16 bit channels are allowed.
33 * Good enough for current sensors, they use up to 3 16 bit vectors.
34 */
35#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
36
37/* Minimum sampling period to use when device is suspending */
38#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
39
40/**
41 * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
42 * @ec: cros EC device structure
43 * @cmd_lock: lock used to prevent simultaneous access to the
44 * commands.
45 * @msg: cros EC command structure
46 * @param: motion sensor parameters structure
47 * @resp: motion sensor response structure
48 * @type: type of motion sensor
49 * @loc: location where the motion sensor is placed
50 * @calib: calibration parameters. Note that trigger
51 * captured data will always provide the calibrated
52 * data
53 * @samples: static array to hold data from a single capture.
54 * For each channel we need 2 bytes, except for
55 * the timestamp. The timestamp is always last and
56 * is always 8-byte aligned.
57 * @read_ec_sensors_data: function used for accessing sensors values
58 * @cuur_sampl_freq: current sampling period
59 */
60struct cros_ec_sensors_core_state {
61 struct cros_ec_device *ec;
62 struct mutex cmd_lock;
63
64 struct cros_ec_command *msg;
65 struct ec_params_motion_sense param;
66 struct ec_response_motion_sense *resp;
67
68 enum motionsensor_type type;
69 enum motionsensor_location loc;
70
71 s16 calib[CROS_EC_SENSOR_MAX_AXIS];
72
73 u8 samples[CROS_EC_SAMPLE_SIZE];
74
75 int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
76 unsigned long scan_mask, s16 *data);
77
78 int curr_sampl_freq;
79};
80
81/**
82 * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
83 * @indio_dev: pointer to IIO device
84 * @scan_mask: bitmap of the sensor indices to scan
85 * @data: location to store data
86 *
87 * This is the safe function for reading the EC data. It guarantees that the
88 * data sampled was not modified by the EC while being read.
89 *
90 * Return: 0 on success, -errno on failure.
91 */
92int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
93 s16 *data);
94
95/**
96 * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
97 * @indio_dev: pointer to IIO device
98 * @scan_mask: bitmap of the sensor indices to scan
99 * @data: location to store data
100 *
101 * Return: 0 on success, -errno on failure.
102 */
103int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
104 s16 *data);
105
106/**
107 * cros_ec_sensors_core_init() - basic initialization of the core structure
108 * @pdev: platform device created for the sensors
109 * @indio_dev: iio device structure of the device
110 * @physical_device: true if the device refers to a physical device
111 *
112 * Return: 0 on success, -errno on failure.
113 */
114int cros_ec_sensors_core_init(struct platform_device *pdev,
115 struct iio_dev *indio_dev, bool physical_device);
116
117/**
118 * cros_ec_sensors_capture() - the trigger handler function
119 * @irq: the interrupt number.
120 * @p: a pointer to the poll function.
121 *
122 * On a trigger event occurring, if the pollfunc is attached then this
123 * handler is called as a threaded interrupt (and hence may sleep). It
124 * is responsible for grabbing data from the device and pushing it into
125 * the associated buffer.
126 *
127 * Return: IRQ_HANDLED
128 */
129irqreturn_t cros_ec_sensors_capture(int irq, void *p);
130
131/**
132 * cros_ec_motion_send_host_cmd() - send motion sense host command
133 * @st: pointer to state information for device
134 * @opt_length: optional length to reduce the response size, useful on the data
135 * path. Otherwise, the maximal allowed response size is used
136 *
137 * When called, the sub-command is assumed to be set in param->cmd.
138 *
139 * Return: 0 on success, -errno on failure.
140 */
141int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
142 u16 opt_length);
143
144/**
145 * cros_ec_sensors_core_read() - function to request a value from the sensor
146 * @st: pointer to state information for device
147 * @chan: channel specification structure table
148 * @val: will contain one element making up the returned value
149 * @val2: will contain another element making up the returned value
150 * @mask: specifies which values to be requested
151 *
152 * Return: the type of value returned by the device
153 */
154int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
155 struct iio_chan_spec const *chan,
156 int *val, int *val2, long mask);
157
158/**
159 * cros_ec_sensors_core_write() - function to write a value to the sensor
160 * @st: pointer to state information for device
161 * @chan: channel specification structure table
162 * @val: first part of value to write
163 * @val2: second part of value to write
164 * @mask: specifies which values to write
165 *
166 * Return: the type of value returned by the device
167 */
168int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
169 struct iio_chan_spec const *chan,
170 int val, int val2, long mask);
171
172/* List of extended channel specification for all sensors */
173extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
174
175#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53d6f6..7ef94a90ecf7 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -201,7 +201,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
201 int ret; 201 int ret;
202 202
203 if (val1 < 0 || val2 < 0) 203 if (val1 < 0 || val2 < 0)
204 ret = -EINVAL; 204 return -EINVAL;
205 205
206 value = val1 * pow_10(6) + val2; 206 value = val1 * pow_10(6) + val2;
207 if (value) { 207 if (value) {
@@ -250,6 +250,9 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
250 s32 value; 250 s32 value;
251 int ret; 251 int ret;
252 252
253 if (val1 < 0 || val2 < 0)
254 return -EINVAL;
255
253 value = convert_to_vtf_format(st->sensitivity.size, 256 value = convert_to_vtf_format(st->sensitivity.size,
254 st->sensitivity.unit_expo, 257 st->sensitivity.unit_expo,
255 val1, val2); 258 val1, val2);
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
new file mode 100644
index 000000000000..2d2ee353dde7
--- /dev/null
+++ b/drivers/iio/counter/104-quad-8.c
@@ -0,0 +1,593 @@
1/*
2 * IIO driver for the ACCES 104-QUAD-8
3 * Copyright (C) 2016 William Breathitt Gray
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
15 */
16#include <linux/bitops.h>
17#include <linux/device.h>
18#include <linux/errno.h>
19#include <linux/iio/iio.h>
20#include <linux/iio/types.h>
21#include <linux/io.h>
22#include <linux/ioport.h>
23#include <linux/isa.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/types.h>
28
29#define QUAD8_EXTENT 32
30
31static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
32static unsigned int num_quad8;
33module_param_array(base, uint, &num_quad8, 0);
34MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
35
36#define QUAD8_NUM_COUNTERS 8
37
38/**
39 * struct quad8_iio - IIO device private data structure
40 * @preset: array of preset values
41 * @count_mode: array of count mode configurations
42 * @quadrature_mode: array of quadrature mode configurations
43 * @quadrature_scale: array of quadrature mode scale configurations
44 * @ab_enable: array of A and B inputs enable configurations
45 * @preset_enable: array of set_to_preset_on_index attribute configurations
46 * @synchronous_mode: array of index function synchronous mode configurations
47 * @index_polarity: array of index function polarity configurations
48 * @base: base port address of the IIO device
49 */
50struct quad8_iio {
51 unsigned int preset[QUAD8_NUM_COUNTERS];
52 unsigned int count_mode[QUAD8_NUM_COUNTERS];
53 unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
54 unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
55 unsigned int ab_enable[QUAD8_NUM_COUNTERS];
56 unsigned int preset_enable[QUAD8_NUM_COUNTERS];
57 unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
58 unsigned int index_polarity[QUAD8_NUM_COUNTERS];
59 unsigned int base;
60};
61
62static int quad8_read_raw(struct iio_dev *indio_dev,
63 struct iio_chan_spec const *chan, int *val, int *val2, long mask)
64{
65 struct quad8_iio *const priv = iio_priv(indio_dev);
66 const int base_offset = priv->base + 2 * chan->channel;
67 unsigned int flags;
68 unsigned int borrow;
69 unsigned int carry;
70 int i;
71
72 switch (mask) {
73 case IIO_CHAN_INFO_RAW:
74 if (chan->type == IIO_INDEX) {
75 *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
76 return IIO_VAL_INT;
77 }
78
79 flags = inb(base_offset);
80 borrow = flags & BIT(0);
81 carry = !!(flags & BIT(1));
82
83 /* Borrow XOR Carry effectively doubles count range */
84 *val = (borrow ^ carry) << 24;
85
86 /* Reset Byte Pointer; transfer Counter to Output Latch */
87 outb(0x11, base_offset + 1);
88
89 for (i = 0; i < 3; i++)
90 *val |= (unsigned int)inb(base_offset) << (8 * i);
91
92 return IIO_VAL_INT;
93 case IIO_CHAN_INFO_ENABLE:
94 *val = priv->ab_enable[chan->channel];
95 return IIO_VAL_INT;
96 case IIO_CHAN_INFO_SCALE:
97 *val = 1;
98 *val2 = priv->quadrature_scale[chan->channel];
99 return IIO_VAL_FRACTIONAL_LOG2;
100 }
101
102 return -EINVAL;
103}
104
105static int quad8_write_raw(struct iio_dev *indio_dev,
106 struct iio_chan_spec const *chan, int val, int val2, long mask)
107{
108 struct quad8_iio *const priv = iio_priv(indio_dev);
109 const int base_offset = priv->base + 2 * chan->channel;
110 int i;
111 unsigned int ior_cfg;
112
113 switch (mask) {
114 case IIO_CHAN_INFO_RAW:
115 if (chan->type == IIO_INDEX)
116 return -EINVAL;
117
118 /* Only 24-bit values are supported */
119 if ((unsigned int)val > 0xFFFFFF)
120 return -EINVAL;
121
122 /* Reset Byte Pointer */
123 outb(0x01, base_offset + 1);
124
125 /* Counter can only be set via Preset Register */
126 for (i = 0; i < 3; i++)
127 outb(val >> (8 * i), base_offset);
128
129 /* Transfer Preset Register to Counter */
130 outb(0x08, base_offset + 1);
131
132 /* Reset Byte Pointer */
133 outb(0x01, base_offset + 1);
134
135 /* Set Preset Register back to original value */
136 val = priv->preset[chan->channel];
137 for (i = 0; i < 3; i++)
138 outb(val >> (8 * i), base_offset);
139
140 /* Reset Borrow, Carry, Compare, and Sign flags */
141 outb(0x02, base_offset + 1);
142 /* Reset Error flag */
143 outb(0x06, base_offset + 1);
144
145 return 0;
146 case IIO_CHAN_INFO_ENABLE:
147 /* only boolean values accepted */
148 if (val < 0 || val > 1)
149 return -EINVAL;
150
151 priv->ab_enable[chan->channel] = val;
152
153 ior_cfg = val | priv->preset_enable[chan->channel] << 1;
154
155 /* Load I/O control configuration */
156 outb(0x40 | ior_cfg, base_offset);
157
158 return 0;
159 case IIO_CHAN_INFO_SCALE:
160 /* Quadrature scaling only available in quadrature mode */
161 if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
162 return -EINVAL;
163
164 /* Only three gain states (1, 0.5, 0.25) */
165 if (val == 1 && !val2)
166 priv->quadrature_scale[chan->channel] = 0;
167 else if (!val)
168 switch (val2) {
169 case 500000:
170 priv->quadrature_scale[chan->channel] = 1;
171 break;
172 case 250000:
173 priv->quadrature_scale[chan->channel] = 2;
174 break;
175 default:
176 return -EINVAL;
177 }
178 else
179 return -EINVAL;
180
181 return 0;
182 }
183
184 return -EINVAL;
185}
186
187static const struct iio_info quad8_info = {
188 .driver_module = THIS_MODULE,
189 .read_raw = quad8_read_raw,
190 .write_raw = quad8_write_raw
191};
192
193static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
194 const struct iio_chan_spec *chan, char *buf)
195{
196 const struct quad8_iio *const priv = iio_priv(indio_dev);
197
198 return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
199}
200
201static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
202 const struct iio_chan_spec *chan, const char *buf, size_t len)
203{
204 struct quad8_iio *const priv = iio_priv(indio_dev);
205 const int base_offset = priv->base + 2 * chan->channel;
206 unsigned int preset;
207 int ret;
208 int i;
209
210 ret = kstrtouint(buf, 0, &preset);
211 if (ret)
212 return ret;
213
214 /* Only 24-bit values are supported */
215 if (preset > 0xFFFFFF)
216 return -EINVAL;
217
218 priv->preset[chan->channel] = preset;
219
220 /* Reset Byte Pointer */
221 outb(0x01, base_offset + 1);
222
223 /* Set Preset Register */
224 for (i = 0; i < 3; i++)
225 outb(preset >> (8 * i), base_offset);
226
227 return len;
228}
229
230static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
231 uintptr_t private, const struct iio_chan_spec *chan, char *buf)
232{
233 const struct quad8_iio *const priv = iio_priv(indio_dev);
234
235 return snprintf(buf, PAGE_SIZE, "%u\n",
236 priv->preset_enable[chan->channel]);
237}
238
239static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
240 uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
241 size_t len)
242{
243 struct quad8_iio *const priv = iio_priv(indio_dev);
244 const int base_offset = priv->base + 2 * chan->channel;
245 bool preset_enable;
246 int ret;
247 unsigned int ior_cfg;
248
249 ret = kstrtobool(buf, &preset_enable);
250 if (ret)
251 return ret;
252
253 priv->preset_enable[chan->channel] = preset_enable;
254
255 ior_cfg = priv->ab_enable[chan->channel] |
256 (unsigned int)preset_enable << 1;
257
258 /* Load I/O control configuration to Input / Output Control Register */
259 outb(0x40 | ior_cfg, base_offset);
260
261 return len;
262}
263
264static const char *const quad8_noise_error_states[] = {
265 "No excessive noise is present at the count inputs",
266 "Excessive noise is present at the count inputs"
267};
268
269static int quad8_get_noise_error(struct iio_dev *indio_dev,
270 const struct iio_chan_spec *chan)
271{
272 struct quad8_iio *const priv = iio_priv(indio_dev);
273 const int base_offset = priv->base + 2 * chan->channel + 1;
274
275 return !!(inb(base_offset) & BIT(4));
276}
277
278static const struct iio_enum quad8_noise_error_enum = {
279 .items = quad8_noise_error_states,
280 .num_items = ARRAY_SIZE(quad8_noise_error_states),
281 .get = quad8_get_noise_error
282};
283
284static const char *const quad8_count_direction_states[] = {
285 "down",
286 "up"
287};
288
289static int quad8_get_count_direction(struct iio_dev *indio_dev,
290 const struct iio_chan_spec *chan)
291{
292 struct quad8_iio *const priv = iio_priv(indio_dev);
293 const int base_offset = priv->base + 2 * chan->channel + 1;
294
295 return !!(inb(base_offset) & BIT(5));
296}
297
298static const struct iio_enum quad8_count_direction_enum = {
299 .items = quad8_count_direction_states,
300 .num_items = ARRAY_SIZE(quad8_count_direction_states),
301 .get = quad8_get_count_direction
302};
303
304static const char *const quad8_count_modes[] = {
305 "normal",
306 "range limit",
307 "non-recycle",
308 "modulo-n"
309};
310
311static int quad8_set_count_mode(struct iio_dev *indio_dev,
312 const struct iio_chan_spec *chan, unsigned int count_mode)
313{
314 struct quad8_iio *const priv = iio_priv(indio_dev);
315 unsigned int mode_cfg = count_mode << 1;
316 const int base_offset = priv->base + 2 * chan->channel + 1;
317
318 priv->count_mode[chan->channel] = count_mode;
319
320 /* Add quadrature mode configuration */
321 if (priv->quadrature_mode[chan->channel])
322 mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
323
324 /* Load mode configuration to Counter Mode Register */
325 outb(0x20 | mode_cfg, base_offset);
326
327 return 0;
328}
329
330static int quad8_get_count_mode(struct iio_dev *indio_dev,
331 const struct iio_chan_spec *chan)
332{
333 const struct quad8_iio *const priv = iio_priv(indio_dev);
334
335 return priv->count_mode[chan->channel];
336}
337
338static const struct iio_enum quad8_count_mode_enum = {
339 .items = quad8_count_modes,
340 .num_items = ARRAY_SIZE(quad8_count_modes),
341 .set = quad8_set_count_mode,
342 .get = quad8_get_count_mode
343};
344
345static const char *const quad8_synchronous_modes[] = {
346 "non-synchronous",
347 "synchronous"
348};
349
350static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
351 const struct iio_chan_spec *chan, unsigned int synchronous_mode)
352{
353 struct quad8_iio *const priv = iio_priv(indio_dev);
354 const unsigned int idr_cfg = synchronous_mode |
355 priv->index_polarity[chan->channel] << 1;
356 const int base_offset = priv->base + 2 * chan->channel + 1;
357
358 /* Index function must be non-synchronous in non-quadrature mode */
359 if (synchronous_mode && !priv->quadrature_mode[chan->channel])
360 return -EINVAL;
361
362 priv->synchronous_mode[chan->channel] = synchronous_mode;
363
364 /* Load Index Control configuration to Index Control Register */
365 outb(0x40 | idr_cfg, base_offset);
366
367 return 0;
368}
369
370static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
371 const struct iio_chan_spec *chan)
372{
373 const struct quad8_iio *const priv = iio_priv(indio_dev);
374
375 return priv->synchronous_mode[chan->channel];
376}
377
378static const struct iio_enum quad8_synchronous_mode_enum = {
379 .items = quad8_synchronous_modes,
380 .num_items = ARRAY_SIZE(quad8_synchronous_modes),
381 .set = quad8_set_synchronous_mode,
382 .get = quad8_get_synchronous_mode
383};
384
385static const char *const quad8_quadrature_modes[] = {
386 "non-quadrature",
387 "quadrature"
388};
389
390static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
391 const struct iio_chan_spec *chan, unsigned int quadrature_mode)
392{
393 struct quad8_iio *const priv = iio_priv(indio_dev);
394 unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
395 const int base_offset = priv->base + 2 * chan->channel + 1;
396
397 if (quadrature_mode)
398 mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
399 else {
400 /* Quadrature scaling only available in quadrature mode */
401 priv->quadrature_scale[chan->channel] = 0;
402
403 /* Synchronous function not supported in non-quadrature mode */
404 if (priv->synchronous_mode[chan->channel])
405 quad8_set_synchronous_mode(indio_dev, chan, 0);
406 }
407
408 priv->quadrature_mode[chan->channel] = quadrature_mode;
409
410 /* Load mode configuration to Counter Mode Register */
411 outb(0x20 | mode_cfg, base_offset);
412
413 return 0;
414}
415
416static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
417 const struct iio_chan_spec *chan)
418{
419 const struct quad8_iio *const priv = iio_priv(indio_dev);
420
421 return priv->quadrature_mode[chan->channel];
422}
423
424static const struct iio_enum quad8_quadrature_mode_enum = {
425 .items = quad8_quadrature_modes,
426 .num_items = ARRAY_SIZE(quad8_quadrature_modes),
427 .set = quad8_set_quadrature_mode,
428 .get = quad8_get_quadrature_mode
429};
430
431static const char *const quad8_index_polarity_modes[] = {
432 "negative",
433 "positive"
434};
435
436static int quad8_set_index_polarity(struct iio_dev *indio_dev,
437 const struct iio_chan_spec *chan, unsigned int index_polarity)
438{
439 struct quad8_iio *const priv = iio_priv(indio_dev);
440 const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
441 index_polarity << 1;
442 const int base_offset = priv->base + 2 * chan->channel + 1;
443
444 priv->index_polarity[chan->channel] = index_polarity;
445
446 /* Load Index Control configuration to Index Control Register */
447 outb(0x40 | idr_cfg, base_offset);
448
449 return 0;
450}
451
452static int quad8_get_index_polarity(struct iio_dev *indio_dev,
453 const struct iio_chan_spec *chan)
454{
455 const struct quad8_iio *const priv = iio_priv(indio_dev);
456
457 return priv->index_polarity[chan->channel];
458}
459
460static const struct iio_enum quad8_index_polarity_enum = {
461 .items = quad8_index_polarity_modes,
462 .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
463 .set = quad8_set_index_polarity,
464 .get = quad8_get_index_polarity
465};
466
467static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
468 {
469 .name = "preset",
470 .shared = IIO_SEPARATE,
471 .read = quad8_read_preset,
472 .write = quad8_write_preset
473 },
474 {
475 .name = "set_to_preset_on_index",
476 .shared = IIO_SEPARATE,
477 .read = quad8_read_set_to_preset_on_index,
478 .write = quad8_write_set_to_preset_on_index
479 },
480 IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
481 IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
482 IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
483 IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
484 IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
485 IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
486 IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
487 IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
488 {}
489};
490
491static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
492 IIO_ENUM("synchronous_mode", IIO_SEPARATE,
493 &quad8_synchronous_mode_enum),
494 IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
495 IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
496 IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
497 {}
498};
499
500#define QUAD8_COUNT_CHAN(_chan) { \
501 .type = IIO_COUNT, \
502 .channel = (_chan), \
503 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
504 BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
505 .ext_info = quad8_count_ext_info, \
506 .indexed = 1 \
507}
508
509#define QUAD8_INDEX_CHAN(_chan) { \
510 .type = IIO_INDEX, \
511 .channel = (_chan), \
512 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
513 .ext_info = quad8_index_ext_info, \
514 .indexed = 1 \
515}
516
517static const struct iio_chan_spec quad8_channels[] = {
518 QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
519 QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
520 QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
521 QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
522 QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
523 QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
524 QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
525 QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
526};
527
528static int quad8_probe(struct device *dev, unsigned int id)
529{
530 struct iio_dev *indio_dev;
531 struct quad8_iio *priv;
532 int i, j;
533 unsigned int base_offset;
534
535 indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
536 if (!indio_dev)
537 return -ENOMEM;
538
539 if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
540 dev_name(dev))) {
541 dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
542 base[id], base[id] + QUAD8_EXTENT);
543 return -EBUSY;
544 }
545
546 indio_dev->info = &quad8_info;
547 indio_dev->modes = INDIO_DIRECT_MODE;
548 indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
549 indio_dev->channels = quad8_channels;
550 indio_dev->name = dev_name(dev);
551
552 priv = iio_priv(indio_dev);
553 priv->base = base[id];
554
555 /* Reset all counters and disable interrupt function */
556 outb(0x01, base[id] + 0x11);
557 /* Set initial configuration for all counters */
558 for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
559 base_offset = base[id] + 2 * i;
560 /* Reset Byte Pointer */
561 outb(0x01, base_offset + 1);
562 /* Reset Preset Register */
563 for (j = 0; j < 3; j++)
564 outb(0x00, base_offset);
565 /* Reset Borrow, Carry, Compare, and Sign flags */
566 outb(0x04, base_offset + 1);
567 /* Reset Error flag */
568 outb(0x06, base_offset + 1);
569 /* Binary encoding; Normal count; non-quadrature mode */
570 outb(0x20, base_offset + 1);
571 /* Disable A and B inputs; preset on index; FLG1 as Carry */
572 outb(0x40, base_offset + 1);
573 /* Disable index function; negative index polarity */
574 outb(0x60, base_offset + 1);
575 }
576 /* Enable all counters */
577 outb(0x00, base[id] + 0x11);
578
579 return devm_iio_device_register(dev, indio_dev);
580}
581
582static struct isa_driver quad8_driver = {
583 .probe = quad8_probe,
584 .driver = {
585 .name = "104-quad-8"
586 }
587};
588
589module_isa_driver(quad8_driver, num_quad8);
590
591MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
592MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
593MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
new file mode 100644
index 000000000000..44627f6e4861
--- /dev/null
+++ b/drivers/iio/counter/Kconfig
@@ -0,0 +1,24 @@
1#
2# Counter devices
3#
4# When adding new entries keep the list in alphabetical order
5
6menu "Counters"
7
8config 104_QUAD_8
9 tristate "ACCES 104-QUAD-8 driver"
10 depends on X86 && ISA_BUS_API
11 help
12 Say yes here to build support for the ACCES 104-QUAD-8 quadrature
13 encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
14
15 Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
16 also clears the counter's respective error flag. Although the counters
17 have a 25-bit range, only the lower 24 bits may be set, either directly
18 or via a counter's preset attribute. Interrupts are not supported by
19 this driver.
20
21 The base port addresses for the devices may be configured via the base
22 array module parameter.
23
24endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
new file mode 100644
index 000000000000..007e88411648
--- /dev/null
+++ b/drivers/iio/counter/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for IIO counter devices
3#
4
5# When adding new entries keep the list in alphabetical order
6
7obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 120b24478469..d3084028905b 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -200,6 +200,16 @@ config AD8801
200 To compile this driver as a module choose M here: the module will be called 200 To compile this driver as a module choose M here: the module will be called
201 ad8801. 201 ad8801.
202 202
203config DPOT_DAC
204 tristate "DAC emulation using a DPOT"
205 depends on OF
206 help
207 Say yes here to build support for DAC emulation using a digital
208 potentiometer.
209
210 To compile this driver as a module, choose M here: the module will be
211 called dpot-dac.
212
203config LPC18XX_DAC 213config LPC18XX_DAC
204 tristate "NXP LPC18xx DAC driver" 214 tristate "NXP LPC18xx DAC driver"
205 depends on ARCH_LPC18XX || COMPILE_TEST 215 depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 27642bbf75f2..f01bf4a99867 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AD5686) += ad5686.o
22obj-$(CONFIG_AD7303) += ad7303.o 22obj-$(CONFIG_AD7303) += ad7303.o
23obj-$(CONFIG_AD8801) += ad8801.o 23obj-$(CONFIG_AD8801) += ad8801.o
24obj-$(CONFIG_CIO_DAC) += cio-dac.o 24obj-$(CONFIG_CIO_DAC) += cio-dac.o
25obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
25obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o 26obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
26obj-$(CONFIG_M62332) += m62332.o 27obj-$(CONFIG_M62332) += m62332.o
27obj-$(CONFIG_MAX517) += max517.o 28obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 0b235a2c7359..6eed5b7729be 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -17,7 +17,7 @@
17#define AD5592R_GPIO_READBACK_EN BIT(10) 17#define AD5592R_GPIO_READBACK_EN BIT(10)
18#define AD5592R_LDAC_READBACK_EN BIT(6) 18#define AD5592R_LDAC_READBACK_EN BIT(6)
19 19
20static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf) 20static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, __be16 *buf)
21{ 21{
22 struct spi_device *spi = container_of(st->dev, struct spi_device, dev); 22 struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
23 struct spi_transfer t = { 23 struct spi_transfer t = {
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
new file mode 100644
index 000000000000..960a2b430480
--- /dev/null
+++ b/drivers/iio/dac/dpot-dac.c
@@ -0,0 +1,266 @@
1/*
2 * IIO DAC emulation driver using a digital potentiometer
3 *
4 * Copyright (C) 2016 Axentia Technologies AB
5 *
6 * Author: Peter Rosin <peda@axentia.se>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 * It is assumed that the dpot is used as a voltage divider between the
15 * current dpot wiper setting and the maximum resistance of the dpot. The
16 * divided voltage is provided by a vref regulator.
17 *
18 * .------.
19 * .-----------. | |
20 * | vref |--' .---.
21 * | regulator |--. | |
22 * '-----------' | | d |
23 * | | p |
24 * | | o | wiper
25 * | | t |<---------+
26 * | | |
27 * | '---' dac output voltage
28 * | |
29 * '------+------------+
30 */
31
32#include <linux/err.h>
33#include <linux/iio/consumer.h>
34#include <linux/iio/iio.h>
35#include <linux/module.h>
36#include <linux/of.h>
37#include <linux/platform_device.h>
38#include <linux/regulator/consumer.h>
39
40struct dpot_dac {
41 struct regulator *vref;
42 struct iio_channel *dpot;
43 u32 max_ohms;
44};
45
46static const struct iio_chan_spec dpot_dac_iio_channel = {
47 .type = IIO_VOLTAGE,
48 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
49 | BIT(IIO_CHAN_INFO_SCALE),
50 .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
51 .output = 1,
52 .indexed = 1,
53};
54
55static int dpot_dac_read_raw(struct iio_dev *indio_dev,
56 struct iio_chan_spec const *chan,
57 int *val, int *val2, long mask)
58{
59 struct dpot_dac *dac = iio_priv(indio_dev);
60 int ret;
61 unsigned long long tmp;
62
63 switch (mask) {
64 case IIO_CHAN_INFO_RAW:
65 return iio_read_channel_raw(dac->dpot, val);
66
67 case IIO_CHAN_INFO_SCALE:
68 ret = iio_read_channel_scale(dac->dpot, val, val2);
69 switch (ret) {
70 case IIO_VAL_FRACTIONAL_LOG2:
71 tmp = *val * 1000000000LL;
72 do_div(tmp, dac->max_ohms);
73 tmp *= regulator_get_voltage(dac->vref) / 1000;
74 do_div(tmp, 1000000000LL);
75 *val = tmp;
76 return ret;
77 case IIO_VAL_INT:
78 /*
79 * Convert integer scale to fractional scale by
80 * setting the denominator (val2) to one...
81 */
82 *val2 = 1;
83 ret = IIO_VAL_FRACTIONAL;
84 /* ...and fall through. */
85 case IIO_VAL_FRACTIONAL:
86 *val *= regulator_get_voltage(dac->vref) / 1000;
87 *val2 *= dac->max_ohms;
88 break;
89 }
90
91 return ret;
92 }
93
94 return -EINVAL;
95}
96
97static int dpot_dac_read_avail(struct iio_dev *indio_dev,
98 struct iio_chan_spec const *chan,
99 const int **vals, int *type, int *length,
100 long mask)
101{
102 struct dpot_dac *dac = iio_priv(indio_dev);
103
104 switch (mask) {
105 case IIO_CHAN_INFO_RAW:
106 *type = IIO_VAL_INT;
107 return iio_read_avail_channel_raw(dac->dpot, vals, length);
108 }
109
110 return -EINVAL;
111}
112
113static int dpot_dac_write_raw(struct iio_dev *indio_dev,
114 struct iio_chan_spec const *chan,
115 int val, int val2, long mask)
116{
117 struct dpot_dac *dac = iio_priv(indio_dev);
118
119 switch (mask) {
120 case IIO_CHAN_INFO_RAW:
121 return iio_write_channel_raw(dac->dpot, val);
122 }
123
124 return -EINVAL;
125}
126
127static const struct iio_info dpot_dac_info = {
128 .read_raw = dpot_dac_read_raw,
129 .read_avail = dpot_dac_read_avail,
130 .write_raw = dpot_dac_write_raw,
131 .driver_module = THIS_MODULE,
132};
133
134static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
135{
136 struct device *dev = &indio_dev->dev;
137 struct dpot_dac *dac = iio_priv(indio_dev);
138 unsigned long long tmp;
139 int ret;
140 int val;
141 int val2;
142 int max;
143
144 ret = iio_read_max_channel_raw(dac->dpot, &max);
145 if (ret < 0) {
146 dev_err(dev, "dpot does not indicate its raw maximum value\n");
147 return ret;
148 }
149
150 switch (iio_read_channel_scale(dac->dpot, &val, &val2)) {
151 case IIO_VAL_INT:
152 return max * val;
153 case IIO_VAL_FRACTIONAL:
154 tmp = (unsigned long long)max * val;
155 do_div(tmp, val2);
156 return tmp;
157 case IIO_VAL_FRACTIONAL_LOG2:
158 tmp = val * 1000000000LL * max >> val2;
159 do_div(tmp, 1000000000LL);
160 return tmp;
161 default:
162 dev_err(dev, "dpot has a scale that is too weird\n");
163 }
164
165 return -EINVAL;
166}
167
168static int dpot_dac_probe(struct platform_device *pdev)
169{
170 struct device *dev = &pdev->dev;
171 struct iio_dev *indio_dev;
172 struct dpot_dac *dac;
173 enum iio_chan_type type;
174 int ret;
175
176 indio_dev = devm_iio_device_alloc(dev, sizeof(*dac));
177 if (!indio_dev)
178 return -ENOMEM;
179
180 platform_set_drvdata(pdev, indio_dev);
181 dac = iio_priv(indio_dev);
182
183 indio_dev->name = dev_name(dev);
184 indio_dev->dev.parent = dev;
185 indio_dev->info = &dpot_dac_info;
186 indio_dev->modes = INDIO_DIRECT_MODE;
187 indio_dev->channels = &dpot_dac_iio_channel;
188 indio_dev->num_channels = 1;
189
190 dac->vref = devm_regulator_get(dev, "vref");
191 if (IS_ERR(dac->vref)) {
192 if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
193 dev_err(&pdev->dev, "failed to get vref regulator\n");
194 return PTR_ERR(dac->vref);
195 }
196
197 dac->dpot = devm_iio_channel_get(dev, "dpot");
198 if (IS_ERR(dac->dpot)) {
199 if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
200 dev_err(dev, "failed to get dpot input channel\n");
201 return PTR_ERR(dac->dpot);
202 }
203
204 ret = iio_get_channel_type(dac->dpot, &type);
205 if (ret < 0)
206 return ret;
207
208 if (type != IIO_RESISTANCE) {
209 dev_err(dev, "dpot is of the wrong type\n");
210 return -EINVAL;
211 }
212
213 ret = dpot_dac_channel_max_ohms(indio_dev);
214 if (ret < 0)
215 return ret;
216 dac->max_ohms = ret;
217
218 ret = regulator_enable(dac->vref);
219 if (ret) {
220 dev_err(dev, "failed to enable the vref regulator\n");
221 return ret;
222 }
223
224 ret = iio_device_register(indio_dev);
225 if (ret) {
226 dev_err(dev, "failed to register iio device\n");
227 goto disable_reg;
228 }
229
230 return 0;
231
232disable_reg:
233 regulator_disable(dac->vref);
234 return ret;
235}
236
237static int dpot_dac_remove(struct platform_device *pdev)
238{
239 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
240 struct dpot_dac *dac = iio_priv(indio_dev);
241
242 iio_device_unregister(indio_dev);
243 regulator_disable(dac->vref);
244
245 return 0;
246}
247
248static const struct of_device_id dpot_dac_match[] = {
249 { .compatible = "dpot-dac" },
250 { /* sentinel */ }
251};
252MODULE_DEVICE_TABLE(of, dpot_dac_match);
253
254static struct platform_driver dpot_dac_driver = {
255 .probe = dpot_dac_probe,
256 .remove = dpot_dac_remove,
257 .driver = {
258 .name = "iio-dpot-dac",
259 .of_match_table = dpot_dac_match,
260 },
261};
262module_platform_driver(dpot_dac_driver);
263
264MODULE_DESCRIPTION("DAC emulation driver using a digital potentiometer");
265MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
266MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index cca935c06f2b..db109f0cdd8c 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -18,6 +18,8 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/regulator/consumer.h>
22#include <linux/of.h>
21 23
22#include <linux/iio/iio.h> 24#include <linux/iio/iio.h>
23#include <linux/iio/sysfs.h> 25#include <linux/iio/sysfs.h>
@@ -26,12 +28,20 @@
26 28
27#define MCP4725_DRV_NAME "mcp4725" 29#define MCP4725_DRV_NAME "mcp4725"
28 30
31#define MCP472X_REF_VDD 0x00
32#define MCP472X_REF_VREF_UNBUFFERED 0x02
33#define MCP472X_REF_VREF_BUFFERED 0x03
34
29struct mcp4725_data { 35struct mcp4725_data {
30 struct i2c_client *client; 36 struct i2c_client *client;
31 u16 vref_mv; 37 int id;
38 unsigned ref_mode;
39 bool vref_buffered;
32 u16 dac_value; 40 u16 dac_value;
33 bool powerdown; 41 bool powerdown;
34 unsigned powerdown_mode; 42 unsigned powerdown_mode;
43 struct regulator *vdd_reg;
44 struct regulator *vref_reg;
35}; 45};
36 46
37static int mcp4725_suspend(struct device *dev) 47static int mcp4725_suspend(struct device *dev)
@@ -86,6 +96,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
86 return 0; 96 return 0;
87 97
88 inoutbuf[0] = 0x60; /* write EEPROM */ 98 inoutbuf[0] = 0x60; /* write EEPROM */
99 inoutbuf[0] |= data->ref_mode << 3;
89 inoutbuf[1] = data->dac_value >> 4; 100 inoutbuf[1] = data->dac_value >> 4;
90 inoutbuf[2] = (data->dac_value & 0xf) << 4; 101 inoutbuf[2] = (data->dac_value & 0xf) << 4;
91 102
@@ -278,18 +289,49 @@ static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
278 return 0; 289 return 0;
279} 290}
280 291
292static int mcp4726_set_cfg(struct iio_dev *indio_dev)
293{
294 struct mcp4725_data *data = iio_priv(indio_dev);
295 u8 outbuf[3];
296 int ret;
297
298 outbuf[0] = 0x40;
299 outbuf[0] |= data->ref_mode << 3;
300 if (data->powerdown)
301 outbuf[0] |= data->powerdown << 1;
302 outbuf[1] = data->dac_value >> 4;
303 outbuf[2] = (data->dac_value & 0xf) << 4;
304
305 ret = i2c_master_send(data->client, outbuf, 3);
306 if (ret < 0)
307 return ret;
308 else if (ret != 3)
309 return -EIO;
310 else
311 return 0;
312}
313
281static int mcp4725_read_raw(struct iio_dev *indio_dev, 314static int mcp4725_read_raw(struct iio_dev *indio_dev,
282 struct iio_chan_spec const *chan, 315 struct iio_chan_spec const *chan,
283 int *val, int *val2, long mask) 316 int *val, int *val2, long mask)
284{ 317{
285 struct mcp4725_data *data = iio_priv(indio_dev); 318 struct mcp4725_data *data = iio_priv(indio_dev);
319 int ret;
286 320
287 switch (mask) { 321 switch (mask) {
288 case IIO_CHAN_INFO_RAW: 322 case IIO_CHAN_INFO_RAW:
289 *val = data->dac_value; 323 *val = data->dac_value;
290 return IIO_VAL_INT; 324 return IIO_VAL_INT;
291 case IIO_CHAN_INFO_SCALE: 325 case IIO_CHAN_INFO_SCALE:
292 *val = data->vref_mv; 326 if (data->ref_mode == MCP472X_REF_VDD)
327 ret = regulator_get_voltage(data->vdd_reg);
328 else
329 ret = regulator_get_voltage(data->vref_reg);
330
331 if (ret < 0)
332 return ret;
333
334 *val = ret / 1000;
293 *val2 = 12; 335 *val2 = 12;
294 return IIO_VAL_FRACTIONAL_LOG2; 336 return IIO_VAL_FRACTIONAL_LOG2;
295 } 337 }
@@ -323,27 +365,98 @@ static const struct iio_info mcp4725_info = {
323 .driver_module = THIS_MODULE, 365 .driver_module = THIS_MODULE,
324}; 366};
325 367
368#ifdef CONFIG_OF
369static int mcp4725_probe_dt(struct device *dev,
370 struct mcp4725_platform_data *pdata)
371{
372 struct device_node *np = dev->of_node;
373
374 if (!np)
375 return -ENODEV;
376
377 /* check if is the vref-supply defined */
378 pdata->use_vref = of_property_read_bool(np, "vref-supply");
379 pdata->vref_buffered =
380 of_property_read_bool(np, "microchip,vref-buffered");
381
382 return 0;
383}
384#else
385static int mcp4725_probe_dt(struct device *dev,
386 struct mcp4725_platform_data *platform_data)
387{
388 return -ENODEV;
389}
390#endif
391
326static int mcp4725_probe(struct i2c_client *client, 392static int mcp4725_probe(struct i2c_client *client,
327 const struct i2c_device_id *id) 393 const struct i2c_device_id *id)
328{ 394{
329 struct mcp4725_data *data; 395 struct mcp4725_data *data;
330 struct iio_dev *indio_dev; 396 struct iio_dev *indio_dev;
331 struct mcp4725_platform_data *platform_data = client->dev.platform_data; 397 struct mcp4725_platform_data *pdata, pdata_dt;
332 u8 inbuf[3]; 398 u8 inbuf[4];
333 u8 pd; 399 u8 pd;
400 u8 ref;
334 int err; 401 int err;
335 402
336 if (!platform_data || !platform_data->vref_mv) {
337 dev_err(&client->dev, "invalid platform data");
338 return -EINVAL;
339 }
340
341 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); 403 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
342 if (indio_dev == NULL) 404 if (indio_dev == NULL)
343 return -ENOMEM; 405 return -ENOMEM;
344 data = iio_priv(indio_dev); 406 data = iio_priv(indio_dev);
345 i2c_set_clientdata(client, indio_dev); 407 i2c_set_clientdata(client, indio_dev);
346 data->client = client; 408 data->client = client;
409 data->id = id->driver_data;
410 pdata = dev_get_platdata(&client->dev);
411
412 if (!pdata) {
413 err = mcp4725_probe_dt(&client->dev, &pdata_dt);
414 if (err) {
415 dev_err(&client->dev,
416 "invalid platform or devicetree data");
417 return err;
418 }
419 pdata = &pdata_dt;
420 }
421
422 if (data->id == MCP4725 && pdata->use_vref) {
423 dev_err(&client->dev,
424 "external reference is unavailable on MCP4725");
425 return -EINVAL;
426 }
427
428 if (!pdata->use_vref && pdata->vref_buffered) {
429 dev_err(&client->dev,
430 "buffering is unavailable on the internal reference");
431 return -EINVAL;
432 }
433
434 if (!pdata->use_vref)
435 data->ref_mode = MCP472X_REF_VDD;
436 else
437 data->ref_mode = pdata->vref_buffered ?
438 MCP472X_REF_VREF_BUFFERED :
439 MCP472X_REF_VREF_UNBUFFERED;
440
441 data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
442 if (IS_ERR(data->vdd_reg))
443 return PTR_ERR(data->vdd_reg);
444
445 err = regulator_enable(data->vdd_reg);
446 if (err)
447 return err;
448
449 if (pdata->use_vref) {
450 data->vref_reg = devm_regulator_get(&client->dev, "vref");
451 if (IS_ERR(data->vref_reg)) {
452 err = PTR_ERR(data->vref_reg);
453 goto err_disable_vdd_reg;
454 }
455
456 err = regulator_enable(data->vref_reg);
457 if (err)
458 goto err_disable_vdd_reg;
459 }
347 460
348 indio_dev->dev.parent = &client->dev; 461 indio_dev->dev.parent = &client->dev;
349 indio_dev->name = id->name; 462 indio_dev->name = id->name;
@@ -352,25 +465,56 @@ static int mcp4725_probe(struct i2c_client *client,
352 indio_dev->num_channels = 1; 465 indio_dev->num_channels = 1;
353 indio_dev->modes = INDIO_DIRECT_MODE; 466 indio_dev->modes = INDIO_DIRECT_MODE;
354 467
355 data->vref_mv = platform_data->vref_mv; 468 /* read current DAC value and settings */
469 err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
356 470
357 /* read current DAC value */
358 err = i2c_master_recv(client, inbuf, 3);
359 if (err < 0) { 471 if (err < 0) {
360 dev_err(&client->dev, "failed to read DAC value"); 472 dev_err(&client->dev, "failed to read DAC value");
361 return err; 473 goto err_disable_vref_reg;
362 } 474 }
363 pd = (inbuf[0] >> 1) & 0x3; 475 pd = (inbuf[0] >> 1) & 0x3;
364 data->powerdown = pd > 0 ? true : false; 476 data->powerdown = pd > 0 ? true : false;
365 data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */ 477 data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
366 data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4); 478 data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
479 if (data->id == MCP4726)
480 ref = (inbuf[3] >> 3) & 0x3;
481
482 if (data->id == MCP4726 && ref != data->ref_mode) {
483 dev_info(&client->dev,
484 "voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
485 data->ref_mode, ref, data->ref_mode);
486 err = mcp4726_set_cfg(indio_dev);
487 if (err < 0)
488 goto err_disable_vref_reg;
489 }
490
491 err = iio_device_register(indio_dev);
492 if (err)
493 goto err_disable_vref_reg;
494
495 return 0;
496
497err_disable_vref_reg:
498 if (data->vref_reg)
499 regulator_disable(data->vref_reg);
367 500
368 return iio_device_register(indio_dev); 501err_disable_vdd_reg:
502 regulator_disable(data->vdd_reg);
503
504 return err;
369} 505}
370 506
371static int mcp4725_remove(struct i2c_client *client) 507static int mcp4725_remove(struct i2c_client *client)
372{ 508{
373 iio_device_unregister(i2c_get_clientdata(client)); 509 struct iio_dev *indio_dev = i2c_get_clientdata(client);
510 struct mcp4725_data *data = iio_priv(indio_dev);
511
512 iio_device_unregister(indio_dev);
513
514 if (data->vref_reg)
515 regulator_disable(data->vref_reg);
516 regulator_disable(data->vdd_reg);
517
374 return 0; 518 return 0;
375} 519}
376 520
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 205a84420ae9..3126cf05e6b9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -84,6 +84,24 @@ config HID_SENSOR_GYRO_3D
84 Say yes here to build support for the HID SENSOR 84 Say yes here to build support for the HID SENSOR
85 Gyroscope 3D. 85 Gyroscope 3D.
86 86
87config MPU3050
88 tristate
89 select IIO_BUFFER
90 select IIO_TRIGGERED_BUFFER
91 select REGMAP
92
93config MPU3050_I2C
94 tristate "Invensense MPU3050 devices on I2C"
95 depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
96 depends on I2C
97 select MPU3050
98 select REGMAP_I2C
99 select I2C_MUX
100 help
101 This driver supports the Invensense MPU3050 gyroscope over I2C.
102 This driver can be built as a module. The module will be called
103 inv-mpu3050-i2c.
104
87config IIO_ST_GYRO_3AXIS 105config IIO_ST_GYRO_3AXIS
88 tristate "STMicroelectronics gyroscopes 3-Axis Driver" 106 tristate "STMicroelectronics gyroscopes 3-Axis Driver"
89 depends on (I2C || SPI_MASTER) && SYSFS 107 depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f866a4be0667..f0e149a606b0 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -14,6 +14,11 @@ obj-$(CONFIG_BMG160_SPI) += bmg160_spi.o
14 14
15obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o 15obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
16 16
17# Currently this is rolled into one module, split it if
18# we ever create a separate SPI interface for MPU-3050
19obj-$(CONFIG_MPU3050) += mpu3050.o
20mpu3050-objs := mpu3050-core.o mpu3050-i2c.o
21
17itg3200-y := itg3200_core.o 22itg3200-y := itg3200_core.o
18itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o 23itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
19obj-$(CONFIG_ITG3200) += itg3200.o 24obj-$(CONFIG_ITG3200) += itg3200.o
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
new file mode 100644
index 000000000000..2be2a5d287e6
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -0,0 +1,1306 @@
1/*
2 * MPU3050 gyroscope driver
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Author: Linus Walleij <linus.walleij@linaro.org>
6 *
7 * Based on the input subsystem driver, Copyright (C) 2011 Wistron Co.Ltd
8 * Joseph Lai <joseph_lai@wistron.com> and trimmed down by
9 * Alan Cox <alan@linux.intel.com> in turn based on bma023.c.
10 * Device behaviour based on a misc driver posted by Nathan Royer in 2011.
11 *
12 * TODO: add support for setting up the low pass 3dB frequency.
13 */
14
15#include <linux/bitops.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/iio/buffer.h>
19#include <linux/iio/iio.h>
20#include <linux/iio/sysfs.h>
21#include <linux/iio/trigger.h>
22#include <linux/iio/trigger_consumer.h>
23#include <linux/iio/triggered_buffer.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/pm_runtime.h>
27#include <linux/random.h>
28#include <linux/slab.h>
29
30#include "mpu3050.h"
31
32#define MPU3050_CHIP_ID 0x69
33
34/*
35 * Register map: anything suffixed *_H is a big-endian high byte and always
36 * followed by the corresponding low byte (*_L) even though these are not
37 * explicitly included in the register definitions.
38 */
39#define MPU3050_CHIP_ID_REG 0x00
40#define MPU3050_PRODUCT_ID_REG 0x01
41#define MPU3050_XG_OFFS_TC 0x05
42#define MPU3050_YG_OFFS_TC 0x08
43#define MPU3050_ZG_OFFS_TC 0x0B
44#define MPU3050_X_OFFS_USR_H 0x0C
45#define MPU3050_Y_OFFS_USR_H 0x0E
46#define MPU3050_Z_OFFS_USR_H 0x10
47#define MPU3050_FIFO_EN 0x12
48#define MPU3050_AUX_VDDIO 0x13
49#define MPU3050_SLV_ADDR 0x14
50#define MPU3050_SMPLRT_DIV 0x15
51#define MPU3050_DLPF_FS_SYNC 0x16
52#define MPU3050_INT_CFG 0x17
53#define MPU3050_AUX_ADDR 0x18
54#define MPU3050_INT_STATUS 0x1A
55#define MPU3050_TEMP_H 0x1B
56#define MPU3050_XOUT_H 0x1D
57#define MPU3050_YOUT_H 0x1F
58#define MPU3050_ZOUT_H 0x21
59#define MPU3050_DMP_CFG1 0x35
60#define MPU3050_DMP_CFG2 0x36
61#define MPU3050_BANK_SEL 0x37
62#define MPU3050_MEM_START_ADDR 0x38
63#define MPU3050_MEM_R_W 0x39
64#define MPU3050_FIFO_COUNT_H 0x3A
65#define MPU3050_FIFO_R 0x3C
66#define MPU3050_USR_CTRL 0x3D
67#define MPU3050_PWR_MGM 0x3E
68
69/* MPU memory bank read options */
70#define MPU3050_MEM_PRFTCH BIT(5)
71#define MPU3050_MEM_USER_BANK BIT(4)
72/* Bits 8-11 select memory bank */
73#define MPU3050_MEM_RAM_BANK_0 0
74#define MPU3050_MEM_RAM_BANK_1 1
75#define MPU3050_MEM_RAM_BANK_2 2
76#define MPU3050_MEM_RAM_BANK_3 3
77#define MPU3050_MEM_OTP_BANK_0 4
78
79#define MPU3050_AXIS_REGS(axis) (MPU3050_XOUT_H + (axis * 2))
80
81/* Register bits */
82
83/* FIFO Enable */
84#define MPU3050_FIFO_EN_FOOTER BIT(0)
85#define MPU3050_FIFO_EN_AUX_ZOUT BIT(1)
86#define MPU3050_FIFO_EN_AUX_YOUT BIT(2)
87#define MPU3050_FIFO_EN_AUX_XOUT BIT(3)
88#define MPU3050_FIFO_EN_GYRO_ZOUT BIT(4)
89#define MPU3050_FIFO_EN_GYRO_YOUT BIT(5)
90#define MPU3050_FIFO_EN_GYRO_XOUT BIT(6)
91#define MPU3050_FIFO_EN_TEMP_OUT BIT(7)
92
93/*
94 * Digital Low Pass filter (DLPF)
95 * Full Scale (FS)
96 * and Synchronization
97 */
98#define MPU3050_EXT_SYNC_NONE 0x00
99#define MPU3050_EXT_SYNC_TEMP 0x20
100#define MPU3050_EXT_SYNC_GYROX 0x40
101#define MPU3050_EXT_SYNC_GYROY 0x60
102#define MPU3050_EXT_SYNC_GYROZ 0x80
103#define MPU3050_EXT_SYNC_ACCELX 0xA0
104#define MPU3050_EXT_SYNC_ACCELY 0xC0
105#define MPU3050_EXT_SYNC_ACCELZ 0xE0
106#define MPU3050_EXT_SYNC_MASK 0xE0
107#define MPU3050_EXT_SYNC_SHIFT 5
108
109#define MPU3050_FS_250DPS 0x00
110#define MPU3050_FS_500DPS 0x08
111#define MPU3050_FS_1000DPS 0x10
112#define MPU3050_FS_2000DPS 0x18
113#define MPU3050_FS_MASK 0x18
114#define MPU3050_FS_SHIFT 3
115
116#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
117#define MPU3050_DLPF_CFG_188HZ 0x01
118#define MPU3050_DLPF_CFG_98HZ 0x02
119#define MPU3050_DLPF_CFG_42HZ 0x03
120#define MPU3050_DLPF_CFG_20HZ 0x04
121#define MPU3050_DLPF_CFG_10HZ 0x05
122#define MPU3050_DLPF_CFG_5HZ 0x06
123#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
124#define MPU3050_DLPF_CFG_MASK 0x07
125#define MPU3050_DLPF_CFG_SHIFT 0
126
127/* Interrupt config */
128#define MPU3050_INT_RAW_RDY_EN BIT(0)
129#define MPU3050_INT_DMP_DONE_EN BIT(1)
130#define MPU3050_INT_MPU_RDY_EN BIT(2)
131#define MPU3050_INT_ANYRD_2CLEAR BIT(4)
132#define MPU3050_INT_LATCH_EN BIT(5)
133#define MPU3050_INT_OPEN BIT(6)
134#define MPU3050_INT_ACTL BIT(7)
135/* Interrupt status */
136#define MPU3050_INT_STATUS_RAW_RDY BIT(0)
137#define MPU3050_INT_STATUS_DMP_DONE BIT(1)
138#define MPU3050_INT_STATUS_MPU_RDY BIT(2)
139#define MPU3050_INT_STATUS_FIFO_OVFLW BIT(7)
140/* USR_CTRL */
141#define MPU3050_USR_CTRL_FIFO_EN BIT(6)
142#define MPU3050_USR_CTRL_AUX_IF_EN BIT(5)
143#define MPU3050_USR_CTRL_AUX_IF_RST BIT(3)
144#define MPU3050_USR_CTRL_FIFO_RST BIT(1)
145#define MPU3050_USR_CTRL_GYRO_RST BIT(0)
146/* PWR_MGM */
147#define MPU3050_PWR_MGM_PLL_X 0x01
148#define MPU3050_PWR_MGM_PLL_Y 0x02
149#define MPU3050_PWR_MGM_PLL_Z 0x03
150#define MPU3050_PWR_MGM_CLKSEL_MASK 0x07
151#define MPU3050_PWR_MGM_STBY_ZG BIT(3)
152#define MPU3050_PWR_MGM_STBY_YG BIT(4)
153#define MPU3050_PWR_MGM_STBY_XG BIT(5)
154#define MPU3050_PWR_MGM_SLEEP BIT(6)
155#define MPU3050_PWR_MGM_RESET BIT(7)
156#define MPU3050_PWR_MGM_MASK 0xff
157
158/*
159 * Fullscale precision is (for finest precision) +/- 250 deg/s, so the full
160 * scale is actually 500 deg/s. All 16 bits are then used to cover this scale,
161 * in two's complement.
162 */
163static unsigned int mpu3050_fs_precision[] = {
164 IIO_DEGREE_TO_RAD(250),
165 IIO_DEGREE_TO_RAD(500),
166 IIO_DEGREE_TO_RAD(1000),
167 IIO_DEGREE_TO_RAD(2000)
168};
169
170/*
171 * Regulator names
172 */
173static const char mpu3050_reg_vdd[] = "vdd";
174static const char mpu3050_reg_vlogic[] = "vlogic";
175
176static unsigned int mpu3050_get_freq(struct mpu3050 *mpu3050)
177{
178 unsigned int freq;
179
180 if (mpu3050->lpf == MPU3050_DLPF_CFG_256HZ_NOLPF2)
181 freq = 8000;
182 else
183 freq = 1000;
184 freq /= (mpu3050->divisor + 1);
185
186 return freq;
187}
188
189static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
190{
191 __be16 raw_val[3];
192 int ret;
193 int i;
194
195 /* Reset */
196 ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
197 MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
198 if (ret)
199 return ret;
200
201 /* Turn on the Z-axis PLL */
202 ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
203 MPU3050_PWR_MGM_CLKSEL_MASK,
204 MPU3050_PWR_MGM_PLL_Z);
205 if (ret)
206 return ret;
207
208 /* Write calibration offset registers */
209 for (i = 0; i < 3; i++)
210 raw_val[i] = cpu_to_be16(mpu3050->calibration[i]);
211
212 ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
213 sizeof(raw_val));
214 if (ret)
215 return ret;
216
217 /* Set low pass filter (sample rate), sync and full scale */
218 ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
219 MPU3050_EXT_SYNC_NONE << MPU3050_EXT_SYNC_SHIFT |
220 mpu3050->fullscale << MPU3050_FS_SHIFT |
221 mpu3050->lpf << MPU3050_DLPF_CFG_SHIFT);
222 if (ret)
223 return ret;
224
225 /* Set up sampling frequency */
226 ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
227 if (ret)
228 return ret;
229
230 /*
231 * Max 50 ms start-up time after setting DLPF_FS_SYNC
232 * according to the data sheet, then wait for the next sample
233 * at this frequency T = 1000/f ms.
234 */
235 msleep(50 + 1000 / mpu3050_get_freq(mpu3050));
236
237 return 0;
238}
239
240static int mpu3050_set_8khz_samplerate(struct mpu3050 *mpu3050)
241{
242 int ret;
243 u8 divisor;
244 enum mpu3050_lpf lpf;
245
246 lpf = mpu3050->lpf;
247 divisor = mpu3050->divisor;
248
249 mpu3050->lpf = LPF_256_HZ_NOLPF; /* 8 kHz base frequency */
250 mpu3050->divisor = 0; /* Divide by 1 */
251 ret = mpu3050_start_sampling(mpu3050);
252
253 mpu3050->lpf = lpf;
254 mpu3050->divisor = divisor;
255
256 return ret;
257}
258
259static int mpu3050_read_raw(struct iio_dev *indio_dev,
260 struct iio_chan_spec const *chan,
261 int *val, int *val2,
262 long mask)
263{
264 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
265 int ret;
266 __be16 raw_val;
267
268 switch (mask) {
269 case IIO_CHAN_INFO_OFFSET:
270 switch (chan->type) {
271 case IIO_TEMP:
272 /* The temperature scaling is (x+23000)/280 Celsius */
273 *val = 23000;
274 return IIO_VAL_INT;
275 default:
276 return -EINVAL;
277 }
278 case IIO_CHAN_INFO_CALIBBIAS:
279 switch (chan->type) {
280 case IIO_ANGL_VEL:
281 *val = mpu3050->calibration[chan->scan_index-1];
282 return IIO_VAL_INT;
283 default:
284 return -EINVAL;
285 }
286 case IIO_CHAN_INFO_SAMP_FREQ:
287 *val = mpu3050_get_freq(mpu3050);
288 return IIO_VAL_INT;
289 case IIO_CHAN_INFO_SCALE:
290 switch (chan->type) {
291 case IIO_TEMP:
292 /* Millidegrees, see about temperature scaling above */
293 *val = 1000;
294 *val2 = 280;
295 return IIO_VAL_FRACTIONAL;
296 case IIO_ANGL_VEL:
297 /*
298 * Convert to the corresponding full scale in
299 * radians. All 16 bits are used with sign to
300 * span the available scale: to account for the one
301 * missing value if we multiply by 1/S16_MAX, instead
302 * multiply with 2/U16_MAX.
303 */
304 *val = mpu3050_fs_precision[mpu3050->fullscale] * 2;
305 *val2 = U16_MAX;
306 return IIO_VAL_FRACTIONAL;
307 default:
308 return -EINVAL;
309 }
310 case IIO_CHAN_INFO_RAW:
311 /* Resume device */
312 pm_runtime_get_sync(mpu3050->dev);
313 mutex_lock(&mpu3050->lock);
314
315 ret = mpu3050_set_8khz_samplerate(mpu3050);
316 if (ret)
317 goto out_read_raw_unlock;
318
319 switch (chan->type) {
320 case IIO_TEMP:
321 ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
322 &raw_val, sizeof(raw_val));
323 if (ret) {
324 dev_err(mpu3050->dev,
325 "error reading temperature\n");
326 goto out_read_raw_unlock;
327 }
328
329 *val = be16_to_cpu(raw_val);
330 ret = IIO_VAL_INT;
331
332 goto out_read_raw_unlock;
333 case IIO_ANGL_VEL:
334 ret = regmap_bulk_read(mpu3050->map,
335 MPU3050_AXIS_REGS(chan->scan_index-1),
336 &raw_val,
337 sizeof(raw_val));
338 if (ret) {
339 dev_err(mpu3050->dev,
340 "error reading axis data\n");
341 goto out_read_raw_unlock;
342 }
343
344 *val = be16_to_cpu(raw_val);
345 ret = IIO_VAL_INT;
346
347 goto out_read_raw_unlock;
348 default:
349 ret = -EINVAL;
350 goto out_read_raw_unlock;
351 }
352 default:
353 break;
354 }
355
356 return -EINVAL;
357
358out_read_raw_unlock:
359 mutex_unlock(&mpu3050->lock);
360 pm_runtime_mark_last_busy(mpu3050->dev);
361 pm_runtime_put_autosuspend(mpu3050->dev);
362
363 return ret;
364}
365
366static int mpu3050_write_raw(struct iio_dev *indio_dev,
367 const struct iio_chan_spec *chan,
368 int val, int val2, long mask)
369{
370 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
371 /*
372 * Couldn't figure out a way to precalculate these at compile time.
373 */
374 unsigned int fs250 =
375 DIV_ROUND_CLOSEST(mpu3050_fs_precision[0] * 1000000 * 2,
376 U16_MAX);
377 unsigned int fs500 =
378 DIV_ROUND_CLOSEST(mpu3050_fs_precision[1] * 1000000 * 2,
379 U16_MAX);
380 unsigned int fs1000 =
381 DIV_ROUND_CLOSEST(mpu3050_fs_precision[2] * 1000000 * 2,
382 U16_MAX);
383 unsigned int fs2000 =
384 DIV_ROUND_CLOSEST(mpu3050_fs_precision[3] * 1000000 * 2,
385 U16_MAX);
386
387 switch (mask) {
388 case IIO_CHAN_INFO_CALIBBIAS:
389 if (chan->type != IIO_ANGL_VEL)
390 return -EINVAL;
391 mpu3050->calibration[chan->scan_index-1] = val;
392 return 0;
393 case IIO_CHAN_INFO_SAMP_FREQ:
394 /*
395 * The max samplerate is 8000 Hz, the minimum
396 * 1000 / 256 ~= 4 Hz
397 */
398 if (val < 4 || val > 8000)
399 return -EINVAL;
400
401 /*
402 * Above 1000 Hz we must turn off the digital low pass filter
403 * so we get a base frequency of 8kHz to the divider
404 */
405 if (val > 1000) {
406 mpu3050->lpf = LPF_256_HZ_NOLPF;
407 mpu3050->divisor = DIV_ROUND_CLOSEST(8000, val) - 1;
408 return 0;
409 }
410
411 mpu3050->lpf = LPF_188_HZ;
412 mpu3050->divisor = DIV_ROUND_CLOSEST(1000, val) - 1;
413 return 0;
414 case IIO_CHAN_INFO_SCALE:
415 if (chan->type != IIO_ANGL_VEL)
416 return -EINVAL;
417 /*
418 * We support +/-250, +/-500, +/-1000 and +/2000 deg/s
419 * which means we need to round to the closest radians
420 * which will be roughly +/-4.3, +/-8.7, +/-17.5, +/-35
421 * rad/s. The scale is then for the 16 bits used to cover
422 * it 2/(2^16) of that.
423 */
424
425 /* Just too large, set the max range */
426 if (val != 0) {
427 mpu3050->fullscale = FS_2000_DPS;
428 return 0;
429 }
430
431 /*
432 * Now we're dealing with fractions below zero in millirad/s
433 * do some integer interpolation and match with the closest
434 * fullscale in the table.
435 */
436 if (val2 <= fs250 ||
437 val2 < ((fs500 + fs250) / 2))
438 mpu3050->fullscale = FS_250_DPS;
439 else if (val2 <= fs500 ||
440 val2 < ((fs1000 + fs500) / 2))
441 mpu3050->fullscale = FS_500_DPS;
442 else if (val2 <= fs1000 ||
443 val2 < ((fs2000 + fs1000) / 2))
444 mpu3050->fullscale = FS_1000_DPS;
445 else
446 /* Catch-all */
447 mpu3050->fullscale = FS_2000_DPS;
448 return 0;
449 default:
450 break;
451 }
452
453 return -EINVAL;
454}
455
456static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
457{
458 const struct iio_poll_func *pf = p;
459 struct iio_dev *indio_dev = pf->indio_dev;
460 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
461 int ret;
462 /*
463 * Temperature 1*16 bits
464 * Three axes 3*16 bits
465 * Timestamp 64 bits (4*16 bits)
466 * Sum total 8*16 bits
467 */
468 __be16 hw_values[8];
469 s64 timestamp;
470 unsigned int datums_from_fifo = 0;
471
472 /*
473 * If we're using the hardware trigger, get the precise timestamp from
474 * the top half of the threaded IRQ handler. Otherwise get the
475 * timestamp here so it will be close in time to the actual values
476 * read from the registers.
477 */
478 if (iio_trigger_using_own(indio_dev))
479 timestamp = mpu3050->hw_timestamp;
480 else
481 timestamp = iio_get_time_ns(indio_dev);
482
483 mutex_lock(&mpu3050->lock);
484
485 /* Using the hardware IRQ trigger? Check the buffer then. */
486 if (mpu3050->hw_irq_trigger) {
487 __be16 raw_fifocnt;
488 u16 fifocnt;
489 /* X, Y, Z + temperature */
490 unsigned int bytes_per_datum = 8;
491 bool fifo_overflow = false;
492
493 ret = regmap_bulk_read(mpu3050->map,
494 MPU3050_FIFO_COUNT_H,
495 &raw_fifocnt,
496 sizeof(raw_fifocnt));
497 if (ret)
498 goto out_trigger_unlock;
499 fifocnt = be16_to_cpu(raw_fifocnt);
500
501 if (fifocnt == 512) {
502 dev_info(mpu3050->dev,
503 "FIFO overflow! Emptying and resetting FIFO\n");
504 fifo_overflow = true;
505 /* Reset and enable the FIFO */
506 ret = regmap_update_bits(mpu3050->map,
507 MPU3050_USR_CTRL,
508 MPU3050_USR_CTRL_FIFO_EN |
509 MPU3050_USR_CTRL_FIFO_RST,
510 MPU3050_USR_CTRL_FIFO_EN |
511 MPU3050_USR_CTRL_FIFO_RST);
512 if (ret) {
513 dev_info(mpu3050->dev, "error resetting FIFO\n");
514 goto out_trigger_unlock;
515 }
516 mpu3050->pending_fifo_footer = false;
517 }
518
519 if (fifocnt)
520 dev_dbg(mpu3050->dev,
521 "%d bytes in the FIFO\n",
522 fifocnt);
523
524 while (!fifo_overflow && fifocnt > bytes_per_datum) {
525 unsigned int toread;
526 unsigned int offset;
527 __be16 fifo_values[5];
528
529 /*
530 * If there is a FIFO footer in the pipe, first clear
531 * that out. This follows the complex algorithm in the
532 * datasheet that states that you may never leave the
533 * FIFO empty after the first reading: you have to
534 * always leave two footer bytes in it. The footer is
535 * in practice just two zero bytes.
536 */
537 if (mpu3050->pending_fifo_footer) {
538 toread = bytes_per_datum + 2;
539 offset = 0;
540 } else {
541 toread = bytes_per_datum;
542 offset = 1;
543 /* Put in some dummy value */
544 fifo_values[0] = 0xAAAA;
545 }
546
547 ret = regmap_bulk_read(mpu3050->map,
548 MPU3050_FIFO_R,
549 &fifo_values[offset],
550 toread);
551
552 dev_dbg(mpu3050->dev,
553 "%04x %04x %04x %04x %04x\n",
554 fifo_values[0],
555 fifo_values[1],
556 fifo_values[2],
557 fifo_values[3],
558 fifo_values[4]);
559
560 /* Index past the footer (fifo_values[0]) and push */
561 iio_push_to_buffers_with_timestamp(indio_dev,
562 &fifo_values[1],
563 timestamp);
564
565 fifocnt -= toread;
566 datums_from_fifo++;
567 mpu3050->pending_fifo_footer = true;
568
569 /*
570 * If we're emptying the FIFO, just make sure to
571 * check if something new appeared.
572 */
573 if (fifocnt < bytes_per_datum) {
574 ret = regmap_bulk_read(mpu3050->map,
575 MPU3050_FIFO_COUNT_H,
576 &raw_fifocnt,
577 sizeof(raw_fifocnt));
578 if (ret)
579 goto out_trigger_unlock;
580 fifocnt = be16_to_cpu(raw_fifocnt);
581 }
582
583 if (fifocnt < bytes_per_datum)
584 dev_dbg(mpu3050->dev,
585 "%d bytes left in the FIFO\n",
586 fifocnt);
587
588 /*
589 * At this point, the timestamp that triggered the
590 * hardware interrupt is no longer valid for what
591 * we are reading (the interrupt likely fired for
592 * the value on the top of the FIFO), so set the
593 * timestamp to zero and let userspace deal with it.
594 */
595 timestamp = 0;
596 }
597 }
598
599 /*
600 * If we picked some datums from the FIFO that's enough, else
601 * fall through and just read from the current value registers.
602 * This happens in two cases:
603 *
604 * - We are using some other trigger (external, like an HRTimer)
605 * than the sensor's own sample generator. In this case the
606 * sensor is just set to the max sampling frequency and we give
607 * the trigger a copy of the latest value every time we get here.
608 *
609 * - The hardware trigger is active but unused and we actually use
610 * another trigger which calls here with a frequency higher
611 * than what the device provides data. We will then just read
612 * duplicate values directly from the hardware registers.
613 */
614 if (datums_from_fifo) {
615 dev_dbg(mpu3050->dev,
616 "read %d datums from the FIFO\n",
617 datums_from_fifo);
618 goto out_trigger_unlock;
619 }
620
621 ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
622 sizeof(hw_values));
623 if (ret) {
624 dev_err(mpu3050->dev,
625 "error reading axis data\n");
626 goto out_trigger_unlock;
627 }
628
629 iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
630
631out_trigger_unlock:
632 mutex_unlock(&mpu3050->lock);
633 iio_trigger_notify_done(indio_dev->trig);
634
635 return IRQ_HANDLED;
636}
637
638static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
639{
640 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
641
642 pm_runtime_get_sync(mpu3050->dev);
643
644 /* Unless we have OUR trigger active, run at full speed */
645 if (!mpu3050->hw_irq_trigger)
646 return mpu3050_set_8khz_samplerate(mpu3050);
647
648 return 0;
649}
650
651static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
652{
653 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
654
655 pm_runtime_mark_last_busy(mpu3050->dev);
656 pm_runtime_put_autosuspend(mpu3050->dev);
657
658 return 0;
659}
660
661static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = {
662 .preenable = mpu3050_buffer_preenable,
663 .postenable = iio_triggered_buffer_postenable,
664 .predisable = iio_triggered_buffer_predisable,
665 .postdisable = mpu3050_buffer_postdisable,
666};
667
668static const struct iio_mount_matrix *
669mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
670 const struct iio_chan_spec *chan)
671{
672 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
673
674 return &mpu3050->orientation;
675}
676
677static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
678 IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
679 { },
680};
681
682#define MPU3050_AXIS_CHANNEL(axis, index) \
683 { \
684 .type = IIO_ANGL_VEL, \
685 .modified = 1, \
686 .channel2 = IIO_MOD_##axis, \
687 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
688 BIT(IIO_CHAN_INFO_CALIBBIAS), \
689 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
690 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
691 .ext_info = mpu3050_ext_info, \
692 .scan_index = index, \
693 .scan_type = { \
694 .sign = 's', \
695 .realbits = 16, \
696 .storagebits = 16, \
697 .endianness = IIO_BE, \
698 }, \
699 }
700
701static const struct iio_chan_spec mpu3050_channels[] = {
702 {
703 .type = IIO_TEMP,
704 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
705 BIT(IIO_CHAN_INFO_SCALE) |
706 BIT(IIO_CHAN_INFO_OFFSET),
707 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
708 .scan_index = 0,
709 .scan_type = {
710 .sign = 's',
711 .realbits = 16,
712 .storagebits = 16,
713 .endianness = IIO_BE,
714 },
715 },
716 MPU3050_AXIS_CHANNEL(X, 1),
717 MPU3050_AXIS_CHANNEL(Y, 2),
718 MPU3050_AXIS_CHANNEL(Z, 3),
719 IIO_CHAN_SOFT_TIMESTAMP(4),
720};
721
722/* Four channels apart from timestamp, scan mask = 0x0f */
723static const unsigned long mpu3050_scan_masks[] = { 0xf, 0 };
724
725/*
726 * These are just the hardcoded factors resulting from the more elaborate
727 * calculations done with fractions in the scale raw get/set functions.
728 */
729static IIO_CONST_ATTR(anglevel_scale_available,
730 "0.000122070 "
731 "0.000274658 "
732 "0.000518798 "
733 "0.001068115");
734
735static struct attribute *mpu3050_attributes[] = {
736 &iio_const_attr_anglevel_scale_available.dev_attr.attr,
737 NULL,
738};
739
740static const struct attribute_group mpu3050_attribute_group = {
741 .attrs = mpu3050_attributes,
742};
743
744static const struct iio_info mpu3050_info = {
745 .driver_module = THIS_MODULE,
746 .read_raw = mpu3050_read_raw,
747 .write_raw = mpu3050_write_raw,
748 .attrs = &mpu3050_attribute_group,
749};
750
751/**
752 * mpu3050_read_mem() - read MPU-3050 internal memory
753 * @mpu3050: device to read from
754 * @bank: target bank
755 * @addr: target address
756 * @len: number of bytes
757 * @buf: the buffer to store the read bytes in
758 */
759static int mpu3050_read_mem(struct mpu3050 *mpu3050,
760 u8 bank,
761 u8 addr,
762 u8 len,
763 u8 *buf)
764{
765 int ret;
766
767 ret = regmap_write(mpu3050->map,
768 MPU3050_BANK_SEL,
769 bank);
770 if (ret)
771 return ret;
772
773 ret = regmap_write(mpu3050->map,
774 MPU3050_MEM_START_ADDR,
775 addr);
776 if (ret)
777 return ret;
778
779 return regmap_bulk_read(mpu3050->map,
780 MPU3050_MEM_R_W,
781 buf,
782 len);
783}
784
785static int mpu3050_hw_init(struct mpu3050 *mpu3050)
786{
787 int ret;
788 u8 otp[8];
789
790 /* Reset */
791 ret = regmap_update_bits(mpu3050->map,
792 MPU3050_PWR_MGM,
793 MPU3050_PWR_MGM_RESET,
794 MPU3050_PWR_MGM_RESET);
795 if (ret)
796 return ret;
797
798 /* Turn on the PLL */
799 ret = regmap_update_bits(mpu3050->map,
800 MPU3050_PWR_MGM,
801 MPU3050_PWR_MGM_CLKSEL_MASK,
802 MPU3050_PWR_MGM_PLL_Z);
803 if (ret)
804 return ret;
805
806 /* Disable IRQs */
807 ret = regmap_write(mpu3050->map,
808 MPU3050_INT_CFG,
809 0);
810 if (ret)
811 return ret;
812
813 /* Read out the 8 bytes of OTP (one-time-programmable) memory */
814 ret = mpu3050_read_mem(mpu3050,
815 (MPU3050_MEM_PRFTCH |
816 MPU3050_MEM_USER_BANK |
817 MPU3050_MEM_OTP_BANK_0),
818 0,
819 sizeof(otp),
820 otp);
821 if (ret)
822 return ret;
823
824 /* This is device-unique data so it goes into the entropy pool */
825 add_device_randomness(otp, sizeof(otp));
826
827 dev_info(mpu3050->dev,
828 "die ID: %04X, wafer ID: %02X, A lot ID: %04X, "
829 "W lot ID: %03X, WP ID: %01X, rev ID: %02X\n",
830 /* Die ID, bits 0-12 */
831 (otp[1] << 8 | otp[0]) & 0x1fff,
832 /* Wafer ID, bits 13-17 */
833 ((otp[2] << 8 | otp[1]) & 0x03e0) >> 5,
834 /* A lot ID, bits 18-33 */
835 ((otp[4] << 16 | otp[3] << 8 | otp[2]) & 0x3fffc) >> 2,
836 /* W lot ID, bits 34-45 */
837 ((otp[5] << 8 | otp[4]) & 0x3ffc) >> 2,
838 /* WP ID, bits 47-49 */
839 ((otp[6] << 8 | otp[5]) & 0x0380) >> 7,
840 /* rev ID, bits 50-55 */
841 otp[6] >> 2);
842
843 return 0;
844}
845
846static int mpu3050_power_up(struct mpu3050 *mpu3050)
847{
848 int ret;
849
850 ret = regulator_bulk_enable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
851 if (ret) {
852 dev_err(mpu3050->dev, "cannot enable regulators\n");
853 return ret;
854 }
855 /*
856 * 20-100 ms start-up time for register read/write according to
857 * the datasheet, be on the safe side and wait 200 ms.
858 */
859 msleep(200);
860
861 /* Take device out of sleep mode */
862 ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
863 MPU3050_PWR_MGM_SLEEP, 0);
864 if (ret) {
865 dev_err(mpu3050->dev, "error setting power mode\n");
866 return ret;
867 }
868 msleep(10);
869
870 return 0;
871}
872
873static int mpu3050_power_down(struct mpu3050 *mpu3050)
874{
875 int ret;
876
877 /*
878 * Put MPU-3050 into sleep mode before cutting regulators.
879 * This is important, because we may not be the sole user
880 * of the regulator so the power may stay on after this, and
881 * then we would be wasting power unless we go to sleep mode
882 * first.
883 */
884 ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
885 MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
886 if (ret)
887 dev_err(mpu3050->dev, "error putting to sleep\n");
888
889 ret = regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
890 if (ret)
891 dev_err(mpu3050->dev, "error disabling regulators\n");
892
893 return 0;
894}
895
896static irqreturn_t mpu3050_irq_handler(int irq, void *p)
897{
898 struct iio_trigger *trig = p;
899 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
900 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
901
902 if (!mpu3050->hw_irq_trigger)
903 return IRQ_NONE;
904
905 /* Get the time stamp as close in time as possible */
906 mpu3050->hw_timestamp = iio_get_time_ns(indio_dev);
907
908 return IRQ_WAKE_THREAD;
909}
910
911static irqreturn_t mpu3050_irq_thread(int irq, void *p)
912{
913 struct iio_trigger *trig = p;
914 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
915 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
916 unsigned int val;
917 int ret;
918
919 /* ACK IRQ and check if it was from us */
920 ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
921 if (ret) {
922 dev_err(mpu3050->dev, "error reading IRQ status\n");
923 return IRQ_HANDLED;
924 }
925 if (!(val & MPU3050_INT_STATUS_RAW_RDY))
926 return IRQ_NONE;
927
928 iio_trigger_poll_chained(p);
929
930 return IRQ_HANDLED;
931}
932
933/**
934 * mpu3050_drdy_trigger_set_state() - set data ready interrupt state
935 * @trig: trigger instance
936 * @enable: true if trigger should be enabled, false to disable
937 */
938static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
939 bool enable)
940{
941 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
942 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
943 unsigned int val;
944 int ret;
945
946 /* Disabling trigger: disable interrupt and return */
947 if (!enable) {
948 /* Disable all interrupts */
949 ret = regmap_write(mpu3050->map,
950 MPU3050_INT_CFG,
951 0);
952 if (ret)
953 dev_err(mpu3050->dev, "error disabling IRQ\n");
954
955 /* Clear IRQ flag */
956 ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
957 if (ret)
958 dev_err(mpu3050->dev, "error clearing IRQ status\n");
959
960 /* Disable all things in the FIFO and reset it */
961 ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
962 if (ret)
963 dev_err(mpu3050->dev, "error disabling FIFO\n");
964
965 ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
966 MPU3050_USR_CTRL_FIFO_RST);
967 if (ret)
968 dev_err(mpu3050->dev, "error resetting FIFO\n");
969
970 pm_runtime_mark_last_busy(mpu3050->dev);
971 pm_runtime_put_autosuspend(mpu3050->dev);
972 mpu3050->hw_irq_trigger = false;
973
974 return 0;
975 } else {
976 /* Else we're enabling the trigger from this point */
977 pm_runtime_get_sync(mpu3050->dev);
978 mpu3050->hw_irq_trigger = true;
979
980 /* Disable all things in the FIFO */
981 ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
982 if (ret)
983 return ret;
984
985 /* Reset and enable the FIFO */
986 ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
987 MPU3050_USR_CTRL_FIFO_EN |
988 MPU3050_USR_CTRL_FIFO_RST,
989 MPU3050_USR_CTRL_FIFO_EN |
990 MPU3050_USR_CTRL_FIFO_RST);
991 if (ret)
992 return ret;
993
994 mpu3050->pending_fifo_footer = false;
995
996 /* Turn on the FIFO for temp+X+Y+Z */
997 ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
998 MPU3050_FIFO_EN_TEMP_OUT |
999 MPU3050_FIFO_EN_GYRO_XOUT |
1000 MPU3050_FIFO_EN_GYRO_YOUT |
1001 MPU3050_FIFO_EN_GYRO_ZOUT |
1002 MPU3050_FIFO_EN_FOOTER);
1003 if (ret)
1004 return ret;
1005
1006 /* Configure the sample engine */
1007 ret = mpu3050_start_sampling(mpu3050);
1008 if (ret)
1009 return ret;
1010
1011 /* Clear IRQ flag */
1012 ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
1013 if (ret)
1014 dev_err(mpu3050->dev, "error clearing IRQ status\n");
1015
1016 /* Give us interrupts whenever there is new data ready */
1017 val = MPU3050_INT_RAW_RDY_EN;
1018
1019 if (mpu3050->irq_actl)
1020 val |= MPU3050_INT_ACTL;
1021 if (mpu3050->irq_latch)
1022 val |= MPU3050_INT_LATCH_EN;
1023 if (mpu3050->irq_opendrain)
1024 val |= MPU3050_INT_OPEN;
1025
1026 ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
1027 if (ret)
1028 return ret;
1029 }
1030
1031 return 0;
1032}
1033
1034static const struct iio_trigger_ops mpu3050_trigger_ops = {
1035 .owner = THIS_MODULE,
1036 .set_trigger_state = mpu3050_drdy_trigger_set_state,
1037};
1038
1039static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
1040{
1041 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
1042 unsigned long irq_trig;
1043 int ret;
1044
1045 mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
1046 "%s-dev%d",
1047 indio_dev->name,
1048 indio_dev->id);
1049 if (!mpu3050->trig)
1050 return -ENOMEM;
1051
1052 /* Check if IRQ is open drain */
1053 if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
1054 mpu3050->irq_opendrain = true;
1055
1056 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
1057 /*
1058 * Configure the interrupt generator hardware to supply whatever
1059 * the interrupt is configured for, edges low/high level low/high,
1060 * we can provide it all.
1061 */
1062 switch (irq_trig) {
1063 case IRQF_TRIGGER_RISING:
1064 dev_info(&indio_dev->dev,
1065 "pulse interrupts on the rising edge\n");
1066 if (mpu3050->irq_opendrain) {
1067 dev_info(&indio_dev->dev,
1068 "rising edge incompatible with open drain\n");
1069 mpu3050->irq_opendrain = false;
1070 }
1071 break;
1072 case IRQF_TRIGGER_FALLING:
1073 mpu3050->irq_actl = true;
1074 dev_info(&indio_dev->dev,
1075 "pulse interrupts on the falling edge\n");
1076 break;
1077 case IRQF_TRIGGER_HIGH:
1078 mpu3050->irq_latch = true;
1079 dev_info(&indio_dev->dev,
1080 "interrupts active high level\n");
1081 if (mpu3050->irq_opendrain) {
1082 dev_info(&indio_dev->dev,
1083 "active high incompatible with open drain\n");
1084 mpu3050->irq_opendrain = false;
1085 }
1086 /*
1087 * With level IRQs, we mask the IRQ until it is processed,
1088 * but with edge IRQs (pulses) we can queue several interrupts
1089 * in the top half.
1090 */
1091 irq_trig |= IRQF_ONESHOT;
1092 break;
1093 case IRQF_TRIGGER_LOW:
1094 mpu3050->irq_latch = true;
1095 mpu3050->irq_actl = true;
1096 irq_trig |= IRQF_ONESHOT;
1097 dev_info(&indio_dev->dev,
1098 "interrupts active low level\n");
1099 break;
1100 default:
1101 /* This is the most preferred mode, if possible */
1102 dev_err(&indio_dev->dev,
1103 "unsupported IRQ trigger specified (%lx), enforce "
1104 "rising edge\n", irq_trig);
1105 irq_trig = IRQF_TRIGGER_RISING;
1106 break;
1107 }
1108
1109 /* An open drain line can be shared with several devices */
1110 if (mpu3050->irq_opendrain)
1111 irq_trig |= IRQF_SHARED;
1112
1113 ret = request_threaded_irq(irq,
1114 mpu3050_irq_handler,
1115 mpu3050_irq_thread,
1116 irq_trig,
1117 mpu3050->trig->name,
1118 mpu3050->trig);
1119 if (ret) {
1120 dev_err(mpu3050->dev,
1121 "can't get IRQ %d, error %d\n", irq, ret);
1122 return ret;
1123 }
1124
1125 mpu3050->irq = irq;
1126 mpu3050->trig->dev.parent = mpu3050->dev;
1127 mpu3050->trig->ops = &mpu3050_trigger_ops;
1128 iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
1129
1130 ret = iio_trigger_register(mpu3050->trig);
1131 if (ret)
1132 return ret;
1133
1134 indio_dev->trig = iio_trigger_get(mpu3050->trig);
1135
1136 return 0;
1137}
1138
1139int mpu3050_common_probe(struct device *dev,
1140 struct regmap *map,
1141 int irq,
1142 const char *name)
1143{
1144 struct iio_dev *indio_dev;
1145 struct mpu3050 *mpu3050;
1146 unsigned int val;
1147 int ret;
1148
1149 indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050));
1150 if (!indio_dev)
1151 return -ENOMEM;
1152 mpu3050 = iio_priv(indio_dev);
1153
1154 mpu3050->dev = dev;
1155 mpu3050->map = map;
1156 mutex_init(&mpu3050->lock);
1157 /* Default fullscale: 2000 degrees per second */
1158 mpu3050->fullscale = FS_2000_DPS;
1159 /* 1 kHz, divide by 100, default frequency = 10 Hz */
1160 mpu3050->lpf = MPU3050_DLPF_CFG_188HZ;
1161 mpu3050->divisor = 99;
1162
1163 /* Read the mounting matrix, if present */
1164 ret = of_iio_read_mount_matrix(dev, "mount-matrix",
1165 &mpu3050->orientation);
1166 if (ret)
1167 return ret;
1168
1169 /* Fetch and turn on regulators */
1170 mpu3050->regs[0].supply = mpu3050_reg_vdd;
1171 mpu3050->regs[1].supply = mpu3050_reg_vlogic;
1172 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs),
1173 mpu3050->regs);
1174 if (ret) {
1175 dev_err(dev, "Cannot get regulators\n");
1176 return ret;
1177 }
1178
1179 ret = mpu3050_power_up(mpu3050);
1180 if (ret)
1181 return ret;
1182
1183 ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
1184 if (ret) {
1185 dev_err(dev, "could not read device ID\n");
1186 ret = -ENODEV;
1187
1188 goto err_power_down;
1189 }
1190
1191 if (val != MPU3050_CHIP_ID) {
1192 dev_err(dev, "unsupported chip id %02x\n", (u8)val);
1193 ret = -ENODEV;
1194 goto err_power_down;
1195 }
1196
1197 ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
1198 if (ret) {
1199 dev_err(dev, "could not read device ID\n");
1200 ret = -ENODEV;
1201
1202 goto err_power_down;
1203 }
1204 dev_info(dev, "found MPU-3050 part no: %d, version: %d\n",
1205 ((val >> 4) & 0xf), (val & 0xf));
1206
1207 ret = mpu3050_hw_init(mpu3050);
1208 if (ret)
1209 goto err_power_down;
1210
1211 indio_dev->dev.parent = dev;
1212 indio_dev->channels = mpu3050_channels;
1213 indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels);
1214 indio_dev->info = &mpu3050_info;
1215 indio_dev->available_scan_masks = mpu3050_scan_masks;
1216 indio_dev->modes = INDIO_DIRECT_MODE;
1217 indio_dev->name = name;
1218
1219 ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
1220 mpu3050_trigger_handler,
1221 &mpu3050_buffer_setup_ops);
1222 if (ret) {
1223 dev_err(dev, "triggered buffer setup failed\n");
1224 goto err_power_down;
1225 }
1226
1227 ret = iio_device_register(indio_dev);
1228 if (ret) {
1229 dev_err(dev, "device register failed\n");
1230 goto err_cleanup_buffer;
1231 }
1232
1233 dev_set_drvdata(dev, indio_dev);
1234
1235 /* Check if we have an assigned IRQ to use as trigger */
1236 if (irq) {
1237 ret = mpu3050_trigger_probe(indio_dev, irq);
1238 if (ret)
1239 dev_err(dev, "failed to register trigger\n");
1240 }
1241
1242 /* Enable runtime PM */
1243 pm_runtime_get_noresume(dev);
1244 pm_runtime_set_active(dev);
1245 pm_runtime_enable(dev);
1246 /*
1247 * Set autosuspend to two orders of magnitude larger than the
1248 * start-up time. 100ms start-up time means 10000ms autosuspend,
1249 * i.e. 10 seconds.
1250 */
1251 pm_runtime_set_autosuspend_delay(dev, 10000);
1252 pm_runtime_use_autosuspend(dev);
1253 pm_runtime_put(dev);
1254
1255 return 0;
1256
1257err_cleanup_buffer:
1258 iio_triggered_buffer_cleanup(indio_dev);
1259err_power_down:
1260 mpu3050_power_down(mpu3050);
1261
1262 return ret;
1263}
1264EXPORT_SYMBOL(mpu3050_common_probe);
1265
1266int mpu3050_common_remove(struct device *dev)
1267{
1268 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1269 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
1270
1271 pm_runtime_get_sync(dev);
1272 pm_runtime_put_noidle(dev);
1273 pm_runtime_disable(dev);
1274 iio_triggered_buffer_cleanup(indio_dev);
1275 if (mpu3050->irq)
1276 free_irq(mpu3050->irq, mpu3050);
1277 iio_device_unregister(indio_dev);
1278 mpu3050_power_down(mpu3050);
1279
1280 return 0;
1281}
1282EXPORT_SYMBOL(mpu3050_common_remove);
1283
1284#ifdef CONFIG_PM
1285static int mpu3050_runtime_suspend(struct device *dev)
1286{
1287 return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
1288}
1289
1290static int mpu3050_runtime_resume(struct device *dev)
1291{
1292 return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
1293}
1294#endif /* CONFIG_PM */
1295
1296const struct dev_pm_ops mpu3050_dev_pm_ops = {
1297 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1298 pm_runtime_force_resume)
1299 SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
1300 mpu3050_runtime_resume, NULL)
1301};
1302EXPORT_SYMBOL(mpu3050_dev_pm_ops);
1303
1304MODULE_AUTHOR("Linus Walleij");
1305MODULE_DESCRIPTION("MPU3050 gyroscope driver");
1306MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
new file mode 100644
index 000000000000..06007200bf49
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -0,0 +1,124 @@
1#include <linux/err.h>
2#include <linux/i2c.h>
3#include <linux/i2c-mux.h>
4#include <linux/iio/iio.h>
5#include <linux/module.h>
6#include <linux/regmap.h>
7#include <linux/pm_runtime.h>
8
9#include "mpu3050.h"
10
11static const struct regmap_config mpu3050_i2c_regmap_config = {
12 .reg_bits = 8,
13 .val_bits = 8,
14};
15
16static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
17{
18 struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
19
20 /* Just power up the device, that is all that is needed */
21 pm_runtime_get_sync(mpu3050->dev);
22 return 0;
23}
24
25static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
26{
27 struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
28
29 pm_runtime_mark_last_busy(mpu3050->dev);
30 pm_runtime_put_autosuspend(mpu3050->dev);
31 return 0;
32}
33
34static int mpu3050_i2c_probe(struct i2c_client *client,
35 const struct i2c_device_id *id)
36{
37 struct regmap *regmap;
38 const char *name;
39 struct mpu3050 *mpu3050;
40 int ret;
41
42 if (!i2c_check_functionality(client->adapter,
43 I2C_FUNC_SMBUS_I2C_BLOCK))
44 return -EOPNOTSUPP;
45
46 if (id)
47 name = id->name;
48 else
49 return -ENODEV;
50
51 regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
52 if (IS_ERR(regmap)) {
53 dev_err(&client->dev, "Failed to register i2c regmap %d\n",
54 (int)PTR_ERR(regmap));
55 return PTR_ERR(regmap);
56 }
57
58 ret = mpu3050_common_probe(&client->dev, regmap, client->irq, name);
59 if (ret)
60 return ret;
61
62 /* The main driver is up, now register the I2C mux */
63 mpu3050 = iio_priv(dev_get_drvdata(&client->dev));
64 mpu3050->i2cmux = i2c_mux_alloc(client->adapter, &client->dev,
65 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
66 mpu3050_i2c_bypass_select,
67 mpu3050_i2c_bypass_deselect);
68 /* Just fail the mux, there is no point in killing the driver */
69 if (!mpu3050->i2cmux)
70 dev_err(&client->dev, "failed to allocate I2C mux\n");
71 else {
72 mpu3050->i2cmux->priv = mpu3050;
73 ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
74 if (ret)
75 dev_err(&client->dev, "failed to add I2C mux\n");
76 }
77
78 return 0;
79}
80
81static int mpu3050_i2c_remove(struct i2c_client *client)
82{
83 struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
84 struct mpu3050 *mpu3050 = iio_priv(indio_dev);
85
86 if (mpu3050->i2cmux)
87 i2c_mux_del_adapters(mpu3050->i2cmux);
88
89 return mpu3050_common_remove(&client->dev);
90}
91
92/*
93 * device id table is used to identify what device can be
94 * supported by this driver
95 */
96static const struct i2c_device_id mpu3050_i2c_id[] = {
97 { "mpu3050" },
98 {}
99};
100MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
101
102static const struct of_device_id mpu3050_i2c_of_match[] = {
103 { .compatible = "invensense,mpu3050", .data = "mpu3050" },
104 /* Deprecated vendor ID from the Input driver */
105 { .compatible = "invn,mpu3050", .data = "mpu3050" },
106 { },
107};
108MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
109
110static struct i2c_driver mpu3050_i2c_driver = {
111 .probe = mpu3050_i2c_probe,
112 .remove = mpu3050_i2c_remove,
113 .id_table = mpu3050_i2c_id,
114 .driver = {
115 .of_match_table = mpu3050_i2c_of_match,
116 .name = "mpu3050-i2c",
117 .pm = &mpu3050_dev_pm_ops,
118 },
119};
120module_i2c_driver(mpu3050_i2c_driver);
121
122MODULE_AUTHOR("Linus Walleij");
123MODULE_DESCRIPTION("Invensense MPU3050 gyroscope driver");
124MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
new file mode 100644
index 000000000000..bef87a714dc5
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050.h
@@ -0,0 +1,96 @@
1#include <linux/iio/iio.h>
2#include <linux/mutex.h>
3#include <linux/regmap.h>
4#include <linux/regulator/consumer.h>
5#include <linux/i2c.h>
6
7/**
8 * enum mpu3050_fullscale - indicates the full range of the sensor in deg/sec
9 */
10enum mpu3050_fullscale {
11 FS_250_DPS = 0,
12 FS_500_DPS,
13 FS_1000_DPS,
14 FS_2000_DPS,
15};
16
17/**
18 * enum mpu3050_lpf - indicates the low pass filter width
19 */
20enum mpu3050_lpf {
21 /* This implicity sets sample frequency to 8 kHz */
22 LPF_256_HZ_NOLPF = 0,
23 /* All others sets the sample frequency to 1 kHz */
24 LPF_188_HZ,
25 LPF_98_HZ,
26 LPF_42_HZ,
27 LPF_20_HZ,
28 LPF_10_HZ,
29 LPF_5_HZ,
30 LPF_2100_HZ_NOLPF,
31};
32
33enum mpu3050_axis {
34 AXIS_X = 0,
35 AXIS_Y,
36 AXIS_Z,
37 AXIS_MAX,
38};
39
40/**
41 * struct mpu3050 - instance state container for the device
42 * @dev: parent device for this instance
43 * @orientation: mounting matrix, flipped axis etc
44 * @map: regmap to reach the registers
45 * @lock: serialization lock to marshal all requests
46 * @irq: the IRQ used for this device
47 * @regs: the regulators to power this device
48 * @fullscale: the current fullscale setting for the device
49 * @lpf: digital low pass filter setting for the device
50 * @divisor: base frequency divider: divides 8 or 1 kHz
51 * @calibration: the three signed 16-bit calibration settings that
52 * get written into the offset registers for each axis to compensate
53 * for DC offsets
54 * @trig: trigger for the MPU-3050 interrupt, if present
55 * @hw_irq_trigger: hardware interrupt trigger is in use
56 * @irq_actl: interrupt is active low
57 * @irq_latch: latched IRQ, this means that it is a level IRQ
58 * @irq_opendrain: the interrupt line shall be configured open drain
59 * @pending_fifo_footer: tells us if there is a pending footer in the FIFO
60 * that we have to read out first when handling the FIFO
61 * @hw_timestamp: latest hardware timestamp from the trigger IRQ, when in
62 * use
63 * @i2cmux: an I2C mux reflecting the fact that this sensor is a hub with
64 * a pass-through I2C interface coming out of it: this device needs to be
65 * powered up in order to reach devices on the other side of this mux
66 */
67struct mpu3050 {
68 struct device *dev;
69 struct iio_mount_matrix orientation;
70 struct regmap *map;
71 struct mutex lock;
72 int irq;
73 struct regulator_bulk_data regs[2];
74 enum mpu3050_fullscale fullscale;
75 enum mpu3050_lpf lpf;
76 u8 divisor;
77 s16 calibration[3];
78 struct iio_trigger *trig;
79 bool hw_irq_trigger;
80 bool irq_actl;
81 bool irq_latch;
82 bool irq_opendrain;
83 bool pending_fifo_footer;
84 s64 hw_timestamp;
85 struct i2c_mux_core *i2cmux;
86};
87
88/* Probe called from different transports */
89int mpu3050_common_probe(struct device *dev,
90 struct regmap *map,
91 int irq,
92 const char *name);
93int mpu3050_common_remove(struct device *dev);
94
95/* PM ops */
96extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d8fe0f..2a42b3d583e8 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -39,79 +39,6 @@
39#define ST_GYRO_FS_AVL_500DPS 500 39#define ST_GYRO_FS_AVL_500DPS 500
40#define ST_GYRO_FS_AVL_2000DPS 2000 40#define ST_GYRO_FS_AVL_2000DPS 2000
41 41
42/* CUSTOM VALUES FOR SENSOR 1 */
43#define ST_GYRO_1_WAI_EXP 0xd3
44#define ST_GYRO_1_ODR_ADDR 0x20
45#define ST_GYRO_1_ODR_MASK 0xc0
46#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
47#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
48#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
49#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
50#define ST_GYRO_1_PW_ADDR 0x20
51#define ST_GYRO_1_PW_MASK 0x08
52#define ST_GYRO_1_FS_ADDR 0x23
53#define ST_GYRO_1_FS_MASK 0x30
54#define ST_GYRO_1_FS_AVL_250_VAL 0x00
55#define ST_GYRO_1_FS_AVL_500_VAL 0x01
56#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
57#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
58#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
59#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
60#define ST_GYRO_1_BDU_ADDR 0x23
61#define ST_GYRO_1_BDU_MASK 0x80
62#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
63#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
64#define ST_GYRO_1_MULTIREAD_BIT true
65
66/* CUSTOM VALUES FOR SENSOR 2 */
67#define ST_GYRO_2_WAI_EXP 0xd4
68#define ST_GYRO_2_ODR_ADDR 0x20
69#define ST_GYRO_2_ODR_MASK 0xc0
70#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
71#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
72#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
73#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
74#define ST_GYRO_2_PW_ADDR 0x20
75#define ST_GYRO_2_PW_MASK 0x08
76#define ST_GYRO_2_FS_ADDR 0x23
77#define ST_GYRO_2_FS_MASK 0x30
78#define ST_GYRO_2_FS_AVL_250_VAL 0x00
79#define ST_GYRO_2_FS_AVL_500_VAL 0x01
80#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
81#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
82#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
83#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
84#define ST_GYRO_2_BDU_ADDR 0x23
85#define ST_GYRO_2_BDU_MASK 0x80
86#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
87#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
88#define ST_GYRO_2_MULTIREAD_BIT true
89
90/* CUSTOM VALUES FOR SENSOR 3 */
91#define ST_GYRO_3_WAI_EXP 0xd7
92#define ST_GYRO_3_ODR_ADDR 0x20
93#define ST_GYRO_3_ODR_MASK 0xc0
94#define ST_GYRO_3_ODR_AVL_95HZ_VAL 0x00
95#define ST_GYRO_3_ODR_AVL_190HZ_VAL 0x01
96#define ST_GYRO_3_ODR_AVL_380HZ_VAL 0x02
97#define ST_GYRO_3_ODR_AVL_760HZ_VAL 0x03
98#define ST_GYRO_3_PW_ADDR 0x20
99#define ST_GYRO_3_PW_MASK 0x08
100#define ST_GYRO_3_FS_ADDR 0x23
101#define ST_GYRO_3_FS_MASK 0x30
102#define ST_GYRO_3_FS_AVL_250_VAL 0x00
103#define ST_GYRO_3_FS_AVL_500_VAL 0x01
104#define ST_GYRO_3_FS_AVL_2000_VAL 0x02
105#define ST_GYRO_3_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
106#define ST_GYRO_3_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
107#define ST_GYRO_3_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
108#define ST_GYRO_3_BDU_ADDR 0x23
109#define ST_GYRO_3_BDU_MASK 0x80
110#define ST_GYRO_3_DRDY_IRQ_ADDR 0x22
111#define ST_GYRO_3_DRDY_IRQ_INT2_MASK 0x08
112#define ST_GYRO_3_MULTIREAD_BIT true
113
114
115static const struct iio_chan_spec st_gyro_16bit_channels[] = { 42static const struct iio_chan_spec st_gyro_16bit_channels[] = {
116 ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, 43 ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
117 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 44 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +57,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
130 57
131static const struct st_sensor_settings st_gyro_sensors_settings[] = { 58static const struct st_sensor_settings st_gyro_sensors_settings[] = {
132 { 59 {
133 .wai = ST_GYRO_1_WAI_EXP, 60 .wai = 0xd3,
134 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 61 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
135 .sensors_supported = { 62 .sensors_supported = {
136 [0] = L3G4200D_GYRO_DEV_NAME, 63 [0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +65,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
138 }, 65 },
139 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, 66 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
140 .odr = { 67 .odr = {
141 .addr = ST_GYRO_1_ODR_ADDR, 68 .addr = 0x20,
142 .mask = ST_GYRO_1_ODR_MASK, 69 .mask = 0xc0,
143 .odr_avl = { 70 .odr_avl = {
144 { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, }, 71 { .hz = 100, .value = 0x00, },
145 { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, }, 72 { .hz = 200, .value = 0x01, },
146 { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, }, 73 { .hz = 400, .value = 0x02, },
147 { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, }, 74 { .hz = 800, .value = 0x03, },
148 }, 75 },
149 }, 76 },
150 .pw = { 77 .pw = {
151 .addr = ST_GYRO_1_PW_ADDR, 78 .addr = 0x20,
152 .mask = ST_GYRO_1_PW_MASK, 79 .mask = 0x08,
153 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 80 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
154 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 81 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
155 }, 82 },
@@ -158,33 +85,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
158 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 85 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
159 }, 86 },
160 .fs = { 87 .fs = {
161 .addr = ST_GYRO_1_FS_ADDR, 88 .addr = 0x23,
162 .mask = ST_GYRO_1_FS_MASK, 89 .mask = 0x30,
163 .fs_avl = { 90 .fs_avl = {
164 [0] = { 91 [0] = {
165 .num = ST_GYRO_FS_AVL_250DPS, 92 .num = ST_GYRO_FS_AVL_250DPS,
166 .value = ST_GYRO_1_FS_AVL_250_VAL, 93 .value = 0x00,
167 .gain = ST_GYRO_1_FS_AVL_250_GAIN, 94 .gain = IIO_DEGREE_TO_RAD(8750),
168 }, 95 },
169 [1] = { 96 [1] = {
170 .num = ST_GYRO_FS_AVL_500DPS, 97 .num = ST_GYRO_FS_AVL_500DPS,
171 .value = ST_GYRO_1_FS_AVL_500_VAL, 98 .value = 0x01,
172 .gain = ST_GYRO_1_FS_AVL_500_GAIN, 99 .gain = IIO_DEGREE_TO_RAD(17500),
173 }, 100 },
174 [2] = { 101 [2] = {
175 .num = ST_GYRO_FS_AVL_2000DPS, 102 .num = ST_GYRO_FS_AVL_2000DPS,
176 .value = ST_GYRO_1_FS_AVL_2000_VAL, 103 .value = 0x02,
177 .gain = ST_GYRO_1_FS_AVL_2000_GAIN, 104 .gain = IIO_DEGREE_TO_RAD(70000),
178 }, 105 },
179 }, 106 },
180 }, 107 },
181 .bdu = { 108 .bdu = {
182 .addr = ST_GYRO_1_BDU_ADDR, 109 .addr = 0x23,
183 .mask = ST_GYRO_1_BDU_MASK, 110 .mask = 0x80,
184 }, 111 },
185 .drdy_irq = { 112 .drdy_irq = {
186 .addr = ST_GYRO_1_DRDY_IRQ_ADDR, 113 .addr = 0x22,
187 .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK, 114 .mask_int2 = 0x08,
188 /* 115 /*
189 * The sensor has IHL (active low) and open 116 * The sensor has IHL (active low) and open
190 * drain settings, but only for INT1 and not 117 * drain settings, but only for INT1 and not
@@ -192,11 +119,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
192 */ 119 */
193 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 120 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
194 }, 121 },
195 .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT, 122 .multi_read_bit = true,
196 .bootime = 2, 123 .bootime = 2,
197 }, 124 },
198 { 125 {
199 .wai = ST_GYRO_2_WAI_EXP, 126 .wai = 0xd4,
200 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 127 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
201 .sensors_supported = { 128 .sensors_supported = {
202 [0] = L3GD20_GYRO_DEV_NAME, 129 [0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +135,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
208 }, 135 },
209 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, 136 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
210 .odr = { 137 .odr = {
211 .addr = ST_GYRO_2_ODR_ADDR, 138 .addr = 0x20,
212 .mask = ST_GYRO_2_ODR_MASK, 139 .mask = 0xc0,
213 .odr_avl = { 140 .odr_avl = {
214 { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, }, 141 { .hz = 95, .value = 0x00, },
215 { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, }, 142 { .hz = 190, .value = 0x01, },
216 { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, }, 143 { .hz = 380, .value = 0x02, },
217 { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, }, 144 { .hz = 760, .value = 0x03, },
218 }, 145 },
219 }, 146 },
220 .pw = { 147 .pw = {
221 .addr = ST_GYRO_2_PW_ADDR, 148 .addr = 0x20,
222 .mask = ST_GYRO_2_PW_MASK, 149 .mask = 0x08,
223 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 150 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
224 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 151 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
225 }, 152 },
@@ -228,33 +155,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
228 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 155 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
229 }, 156 },
230 .fs = { 157 .fs = {
231 .addr = ST_GYRO_2_FS_ADDR, 158 .addr = 0x23,
232 .mask = ST_GYRO_2_FS_MASK, 159 .mask = 0x30,
233 .fs_avl = { 160 .fs_avl = {
234 [0] = { 161 [0] = {
235 .num = ST_GYRO_FS_AVL_250DPS, 162 .num = ST_GYRO_FS_AVL_250DPS,
236 .value = ST_GYRO_2_FS_AVL_250_VAL, 163 .value = 0x00,
237 .gain = ST_GYRO_2_FS_AVL_250_GAIN, 164 .gain = IIO_DEGREE_TO_RAD(8750),
238 }, 165 },
239 [1] = { 166 [1] = {
240 .num = ST_GYRO_FS_AVL_500DPS, 167 .num = ST_GYRO_FS_AVL_500DPS,
241 .value = ST_GYRO_2_FS_AVL_500_VAL, 168 .value = 0x01,
242 .gain = ST_GYRO_2_FS_AVL_500_GAIN, 169 .gain = IIO_DEGREE_TO_RAD(17500),
243 }, 170 },
244 [2] = { 171 [2] = {
245 .num = ST_GYRO_FS_AVL_2000DPS, 172 .num = ST_GYRO_FS_AVL_2000DPS,
246 .value = ST_GYRO_2_FS_AVL_2000_VAL, 173 .value = 0x02,
247 .gain = ST_GYRO_2_FS_AVL_2000_GAIN, 174 .gain = IIO_DEGREE_TO_RAD(70000),
248 }, 175 },
249 }, 176 },
250 }, 177 },
251 .bdu = { 178 .bdu = {
252 .addr = ST_GYRO_2_BDU_ADDR, 179 .addr = 0x23,
253 .mask = ST_GYRO_2_BDU_MASK, 180 .mask = 0x80,
254 }, 181 },
255 .drdy_irq = { 182 .drdy_irq = {
256 .addr = ST_GYRO_2_DRDY_IRQ_ADDR, 183 .addr = 0x22,
257 .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK, 184 .mask_int2 = 0x08,
258 /* 185 /*
259 * The sensor has IHL (active low) and open 186 * The sensor has IHL (active low) and open
260 * drain settings, but only for INT1 and not 187 * drain settings, but only for INT1 and not
@@ -262,29 +189,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
262 */ 189 */
263 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 190 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
264 }, 191 },
265 .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT, 192 .multi_read_bit = true,
266 .bootime = 2, 193 .bootime = 2,
267 }, 194 },
268 { 195 {
269 .wai = ST_GYRO_3_WAI_EXP, 196 .wai = 0xd7,
270 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 197 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
271 .sensors_supported = { 198 .sensors_supported = {
272 [0] = L3GD20_GYRO_DEV_NAME, 199 [0] = L3GD20_GYRO_DEV_NAME,
273 }, 200 },
274 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, 201 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
275 .odr = { 202 .odr = {
276 .addr = ST_GYRO_3_ODR_ADDR, 203 .addr = 0x20,
277 .mask = ST_GYRO_3_ODR_MASK, 204 .mask = 0xc0,
278 .odr_avl = { 205 .odr_avl = {
279 { 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, }, 206 { .hz = 95, .value = 0x00, },
280 { 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, }, 207 { .hz = 190, .value = 0x01, },
281 { 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, }, 208 { .hz = 380, .value = 0x02, },
282 { 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, }, 209 { .hz = 760, .value = 0x03, },
283 }, 210 },
284 }, 211 },
285 .pw = { 212 .pw = {
286 .addr = ST_GYRO_3_PW_ADDR, 213 .addr = 0x20,
287 .mask = ST_GYRO_3_PW_MASK, 214 .mask = 0x08,
288 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 215 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
289 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 216 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
290 }, 217 },
@@ -293,33 +220,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
293 .mask = ST_SENSORS_DEFAULT_AXIS_MASK, 220 .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
294 }, 221 },
295 .fs = { 222 .fs = {
296 .addr = ST_GYRO_3_FS_ADDR, 223 .addr = 0x23,
297 .mask = ST_GYRO_3_FS_MASK, 224 .mask = 0x30,
298 .fs_avl = { 225 .fs_avl = {
299 [0] = { 226 [0] = {
300 .num = ST_GYRO_FS_AVL_250DPS, 227 .num = ST_GYRO_FS_AVL_250DPS,
301 .value = ST_GYRO_3_FS_AVL_250_VAL, 228 .value = 0x00,
302 .gain = ST_GYRO_3_FS_AVL_250_GAIN, 229 .gain = IIO_DEGREE_TO_RAD(8750),
303 }, 230 },
304 [1] = { 231 [1] = {
305 .num = ST_GYRO_FS_AVL_500DPS, 232 .num = ST_GYRO_FS_AVL_500DPS,
306 .value = ST_GYRO_3_FS_AVL_500_VAL, 233 .value = 0x01,
307 .gain = ST_GYRO_3_FS_AVL_500_GAIN, 234 .gain = IIO_DEGREE_TO_RAD(17500),
308 }, 235 },
309 [2] = { 236 [2] = {
310 .num = ST_GYRO_FS_AVL_2000DPS, 237 .num = ST_GYRO_FS_AVL_2000DPS,
311 .value = ST_GYRO_3_FS_AVL_2000_VAL, 238 .value = 0x02,
312 .gain = ST_GYRO_3_FS_AVL_2000_GAIN, 239 .gain = IIO_DEGREE_TO_RAD(70000),
313 }, 240 },
314 }, 241 },
315 }, 242 },
316 .bdu = { 243 .bdu = {
317 .addr = ST_GYRO_3_BDU_ADDR, 244 .addr = 0x23,
318 .mask = ST_GYRO_3_BDU_MASK, 245 .mask = 0x80,
319 }, 246 },
320 .drdy_irq = { 247 .drdy_irq = {
321 .addr = ST_GYRO_3_DRDY_IRQ_ADDR, 248 .addr = 0x22,
322 .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK, 249 .mask_int2 = 0x08,
323 /* 250 /*
324 * The sensor has IHL (active low) and open 251 * The sensor has IHL (active low) and open
325 * drain settings, but only for INT1 and not 252 * drain settings, but only for INT1 and not
@@ -327,7 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
327 */ 254 */
328 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 255 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
329 }, 256 },
330 .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT, 257 .multi_read_bit = true,
331 .bootime = 2, 258 .bootime = 2,
332 }, 259 },
333}; 260};
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b17e2e2bd4f5..912477d54be2 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -27,6 +27,8 @@ config DHT11
27config HDC100X 27config HDC100X
28 tristate "TI HDC100x relative humidity and temperature sensor" 28 tristate "TI HDC100x relative humidity and temperature sensor"
29 depends on I2C 29 depends on I2C
30 select IIO_BUFFER
31 select IIO_TRIGGERED_BUFFER
30 help 32 help
31 Say yes here to build support for the Texas Instruments 33 Say yes here to build support for the Texas Instruments
32 HDC1000 and HDC1008 relative humidity and temperature sensors. 34 HDC1000 and HDC1008 relative humidity and temperature sensors.
@@ -34,6 +36,28 @@ config HDC100X
34 To compile this driver as a module, choose M here: the module 36 To compile this driver as a module, choose M here: the module
35 will be called hdc100x. 37 will be called hdc100x.
36 38
39config HTS221
40 tristate "STMicroelectronics HTS221 sensor Driver"
41 depends on (I2C || SPI)
42 select IIO_BUFFER
43 select IIO_TRIGGERED_BUFFER
44 select HTS221_I2C if (I2C)
45 select HTS221_SPI if (SPI_MASTER)
46 help
47 Say yes here to build support for STMicroelectronics HTS221
48 temperature-humidity sensor
49
50 To compile this driver as a module, choose M here: the module
51 will be called hts221.
52
53config HTS221_I2C
54 tristate
55 depends on HTS221
56
57config HTS221_SPI
58 tristate
59 depends on HTS221
60
37config HTU21 61config HTU21
38 tristate "Measurement Specialties HTU21 humidity & temperature sensor" 62 tristate "Measurement Specialties HTU21 humidity & temperature sensor"
39 depends on I2C 63 depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 4a73442fcd9c..a6850e47c100 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,13 @@
5obj-$(CONFIG_AM2315) += am2315.o 5obj-$(CONFIG_AM2315) += am2315.o
6obj-$(CONFIG_DHT11) += dht11.o 6obj-$(CONFIG_DHT11) += dht11.o
7obj-$(CONFIG_HDC100X) += hdc100x.o 7obj-$(CONFIG_HDC100X) += hdc100x.o
8
9hts221-y := hts221_core.o \
10 hts221_buffer.o
11obj-$(CONFIG_HTS221) += hts221.o
12obj-$(CONFIG_HTS221_I2C) += hts221_i2c.o
13obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
14
8obj-$(CONFIG_HTU21) += htu21.o 15obj-$(CONFIG_HTU21) += htu21.o
9obj-$(CONFIG_SI7005) += si7005.o 16obj-$(CONFIG_SI7005) += si7005.o
10obj-$(CONFIG_SI7020) += si7020.o 17obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70c2a4a..265c34da52d1 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -22,11 +22,15 @@
22 22
23#include <linux/iio/iio.h> 23#include <linux/iio/iio.h>
24#include <linux/iio/sysfs.h> 24#include <linux/iio/sysfs.h>
25#include <linux/iio/buffer.h>
26#include <linux/iio/trigger_consumer.h>
27#include <linux/iio/triggered_buffer.h>
25 28
26#define HDC100X_REG_TEMP 0x00 29#define HDC100X_REG_TEMP 0x00
27#define HDC100X_REG_HUMIDITY 0x01 30#define HDC100X_REG_HUMIDITY 0x01
28 31
29#define HDC100X_REG_CONFIG 0x02 32#define HDC100X_REG_CONFIG 0x02
33#define HDC100X_REG_CONFIG_ACQ_MODE BIT(12)
30#define HDC100X_REG_CONFIG_HEATER_EN BIT(13) 34#define HDC100X_REG_CONFIG_HEATER_EN BIT(13)
31 35
32struct hdc100x_data { 36struct hdc100x_data {
@@ -87,22 +91,40 @@ static const struct iio_chan_spec hdc100x_channels[] = {
87 BIT(IIO_CHAN_INFO_SCALE) | 91 BIT(IIO_CHAN_INFO_SCALE) |
88 BIT(IIO_CHAN_INFO_INT_TIME) | 92 BIT(IIO_CHAN_INFO_INT_TIME) |
89 BIT(IIO_CHAN_INFO_OFFSET), 93 BIT(IIO_CHAN_INFO_OFFSET),
94 .scan_index = 0,
95 .scan_type = {
96 .sign = 's',
97 .realbits = 16,
98 .storagebits = 16,
99 .endianness = IIO_BE,
100 },
90 }, 101 },
91 { 102 {
92 .type = IIO_HUMIDITYRELATIVE, 103 .type = IIO_HUMIDITYRELATIVE,
93 .address = HDC100X_REG_HUMIDITY, 104 .address = HDC100X_REG_HUMIDITY,
94 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | 105 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
95 BIT(IIO_CHAN_INFO_SCALE) | 106 BIT(IIO_CHAN_INFO_SCALE) |
96 BIT(IIO_CHAN_INFO_INT_TIME) 107 BIT(IIO_CHAN_INFO_INT_TIME),
108 .scan_index = 1,
109 .scan_type = {
110 .sign = 'u',
111 .realbits = 16,
112 .storagebits = 16,
113 .endianness = IIO_BE,
114 },
97 }, 115 },
98 { 116 {
99 .type = IIO_CURRENT, 117 .type = IIO_CURRENT,
100 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 118 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
101 .extend_name = "heater", 119 .extend_name = "heater",
102 .output = 1, 120 .output = 1,
121 .scan_index = -1,
103 }, 122 },
123 IIO_CHAN_SOFT_TIMESTAMP(2),
104}; 124};
105 125
126static const unsigned long hdc100x_scan_masks[] = {0x3, 0};
127
106static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val) 128static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val)
107{ 129{
108 int tmp = (~mask & data->config) | val; 130 int tmp = (~mask & data->config) | val;
@@ -183,7 +205,14 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
183 *val = hdc100x_get_heater_status(data); 205 *val = hdc100x_get_heater_status(data);
184 ret = IIO_VAL_INT; 206 ret = IIO_VAL_INT;
185 } else { 207 } else {
208 ret = iio_device_claim_direct_mode(indio_dev);
209 if (ret) {
210 mutex_unlock(&data->lock);
211 return ret;
212 }
213
186 ret = hdc100x_get_measurement(data, chan); 214 ret = hdc100x_get_measurement(data, chan);
215 iio_device_release_direct_mode(indio_dev);
187 if (ret >= 0) { 216 if (ret >= 0) {
188 *val = ret; 217 *val = ret;
189 ret = IIO_VAL_INT; 218 ret = IIO_VAL_INT;
@@ -246,6 +275,78 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
246 } 275 }
247} 276}
248 277
278static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
279{
280 struct hdc100x_data *data = iio_priv(indio_dev);
281 int ret;
282
283 /* Buffer is enabled. First set ACQ Mode, then attach poll func */
284 mutex_lock(&data->lock);
285 ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
286 HDC100X_REG_CONFIG_ACQ_MODE);
287 mutex_unlock(&data->lock);
288 if (ret)
289 return ret;
290
291 return iio_triggered_buffer_postenable(indio_dev);
292}
293
294static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
295{
296 struct hdc100x_data *data = iio_priv(indio_dev);
297 int ret;
298
299 /* First detach poll func, then reset ACQ mode. OK to disable buffer */
300 ret = iio_triggered_buffer_predisable(indio_dev);
301 if (ret)
302 return ret;
303
304 mutex_lock(&data->lock);
305 ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
306 mutex_unlock(&data->lock);
307
308 return ret;
309}
310
311static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
312 .postenable = hdc100x_buffer_postenable,
313 .predisable = hdc100x_buffer_predisable,
314};
315
316static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
317{
318 struct iio_poll_func *pf = p;
319 struct iio_dev *indio_dev = pf->indio_dev;
320 struct hdc100x_data *data = iio_priv(indio_dev);
321 struct i2c_client *client = data->client;
322 int delay = data->adc_int_us[0] + data->adc_int_us[1];
323 int ret;
324 s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
325
326 /* dual read starts at temp register */
327 mutex_lock(&data->lock);
328 ret = i2c_smbus_write_byte(client, HDC100X_REG_TEMP);
329 if (ret < 0) {
330 dev_err(&client->dev, "cannot start measurement\n");
331 goto err;
332 }
333 usleep_range(delay, delay + 1000);
334
335 ret = i2c_master_recv(client, (u8 *)buf, 4);
336 if (ret < 0) {
337 dev_err(&client->dev, "cannot read sensor data\n");
338 goto err;
339 }
340
341 iio_push_to_buffers_with_timestamp(indio_dev, buf,
342 iio_get_time_ns(indio_dev));
343err:
344 mutex_unlock(&data->lock);
345 iio_trigger_notify_done(indio_dev->trig);
346
347 return IRQ_HANDLED;
348}
349
249static const struct iio_info hdc100x_info = { 350static const struct iio_info hdc100x_info = {
250 .read_raw = hdc100x_read_raw, 351 .read_raw = hdc100x_read_raw,
251 .write_raw = hdc100x_write_raw, 352 .write_raw = hdc100x_write_raw,
@@ -258,6 +359,7 @@ static int hdc100x_probe(struct i2c_client *client,
258{ 359{
259 struct iio_dev *indio_dev; 360 struct iio_dev *indio_dev;
260 struct hdc100x_data *data; 361 struct hdc100x_data *data;
362 int ret;
261 363
262 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA | 364 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
263 I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) 365 I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
@@ -279,12 +381,35 @@ static int hdc100x_probe(struct i2c_client *client,
279 381
280 indio_dev->channels = hdc100x_channels; 382 indio_dev->channels = hdc100x_channels;
281 indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels); 383 indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels);
384 indio_dev->available_scan_masks = hdc100x_scan_masks;
282 385
283 /* be sure we are in a known state */ 386 /* be sure we are in a known state */
284 hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]); 387 hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]);
285 hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]); 388 hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
389 hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
390
391 ret = iio_triggered_buffer_setup(indio_dev, NULL,
392 hdc100x_trigger_handler,
393 &hdc_buffer_setup_ops);
394 if (ret < 0) {
395 dev_err(&client->dev, "iio triggered buffer setup failed\n");
396 return ret;
397 }
398 ret = iio_device_register(indio_dev);
399 if (ret < 0)
400 iio_triggered_buffer_cleanup(indio_dev);
401
402 return ret;
403}
404
405static int hdc100x_remove(struct i2c_client *client)
406{
407 struct iio_dev *indio_dev = i2c_get_clientdata(client);
408
409 iio_device_unregister(indio_dev);
410 iio_triggered_buffer_cleanup(indio_dev);
286 411
287 return devm_iio_device_register(&client->dev, indio_dev); 412 return 0;
288} 413}
289 414
290static const struct i2c_device_id hdc100x_id[] = { 415static const struct i2c_device_id hdc100x_id[] = {
@@ -298,6 +423,7 @@ static struct i2c_driver hdc100x_driver = {
298 .name = "hdc100x", 423 .name = "hdc100x",
299 }, 424 },
300 .probe = hdc100x_probe, 425 .probe = hdc100x_probe,
426 .remove = hdc100x_remove,
301 .id_table = hdc100x_id, 427 .id_table = hdc100x_id,
302}; 428};
303module_i2c_driver(hdc100x_driver); 429module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
new file mode 100644
index 000000000000..c7154665512e
--- /dev/null
+++ b/drivers/iio/humidity/hts221.h
@@ -0,0 +1,73 @@
1/*
2 * STMicroelectronics hts221 sensor driver
3 *
4 * Copyright 2016 STMicroelectronics Inc.
5 *
6 * Lorenzo Bianconi <lorenzo.bianconi@st.com>
7 *
8 * Licensed under the GPL-2.
9 */
10
11#ifndef HTS221_H
12#define HTS221_H
13
14#define HTS221_DEV_NAME "hts221"
15
16#include <linux/iio/iio.h>
17
18#define HTS221_RX_MAX_LENGTH 8
19#define HTS221_TX_MAX_LENGTH 8
20
21#define HTS221_DATA_SIZE 2
22
23struct hts221_transfer_buffer {
24 u8 rx_buf[HTS221_RX_MAX_LENGTH];
25 u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
26};
27
28struct hts221_transfer_function {
29 int (*read)(struct device *dev, u8 addr, int len, u8 *data);
30 int (*write)(struct device *dev, u8 addr, int len, u8 *data);
31};
32
33#define HTS221_AVG_DEPTH 8
34struct hts221_avg_avl {
35 u16 avg;
36 u8 val;
37};
38
39enum hts221_sensor_type {
40 HTS221_SENSOR_H,
41 HTS221_SENSOR_T,
42 HTS221_SENSOR_MAX,
43};
44
45struct hts221_sensor {
46 u8 cur_avg_idx;
47 int slope, b_gen;
48};
49
50struct hts221_hw {
51 const char *name;
52 struct device *dev;
53
54 struct mutex lock;
55 struct iio_trigger *trig;
56 int irq;
57
58 struct hts221_sensor sensors[HTS221_SENSOR_MAX];
59
60 u8 odr;
61
62 const struct hts221_transfer_function *tf;
63 struct hts221_transfer_buffer tb;
64};
65
66int hts221_config_drdy(struct hts221_hw *hw, bool enable);
67int hts221_probe(struct iio_dev *iio_dev);
68int hts221_power_on(struct hts221_hw *hw);
69int hts221_power_off(struct hts221_hw *hw);
70int hts221_allocate_buffers(struct hts221_hw *hw);
71int hts221_allocate_trigger(struct hts221_hw *hw);
72
73#endif /* HTS221_H */
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
new file mode 100644
index 000000000000..72ddcdac21a2
--- /dev/null
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -0,0 +1,168 @@
1/*
2 * STMicroelectronics hts221 sensor driver
3 *
4 * Copyright 2016 STMicroelectronics Inc.
5 *
6 * Lorenzo Bianconi <lorenzo.bianconi@st.com>
7 *
8 * Licensed under the GPL-2.
9 */
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/interrupt.h>
14#include <linux/irqreturn.h>
15
16#include <linux/iio/iio.h>
17#include <linux/iio/trigger.h>
18#include <linux/iio/events.h>
19#include <linux/iio/trigger_consumer.h>
20#include <linux/iio/triggered_buffer.h>
21#include <linux/iio/buffer.h>
22
23#include "hts221.h"
24
25#define HTS221_REG_STATUS_ADDR 0x27
26#define HTS221_RH_DRDY_MASK BIT(1)
27#define HTS221_TEMP_DRDY_MASK BIT(0)
28
29static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
30{
31 struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
32 struct hts221_hw *hw = iio_priv(iio_dev);
33
34 return hts221_config_drdy(hw, state);
35}
36
37static const struct iio_trigger_ops hts221_trigger_ops = {
38 .owner = THIS_MODULE,
39 .set_trigger_state = hts221_trig_set_state,
40};
41
42static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
43{
44 struct hts221_hw *hw = (struct hts221_hw *)private;
45 u8 status;
46 int err;
47
48 err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
49 &status);
50 if (err < 0)
51 return IRQ_HANDLED;
52
53 /*
54 * H_DA bit (humidity data available) is routed to DRDY line.
55 * Humidity sample is computed after temperature one.
56 * Here we can assume data channels are both available if H_DA bit
57 * is set in status register
58 */
59 if (!(status & HTS221_RH_DRDY_MASK))
60 return IRQ_NONE;
61
62 iio_trigger_poll_chained(hw->trig);
63
64 return IRQ_HANDLED;
65}
66
67int hts221_allocate_trigger(struct hts221_hw *hw)
68{
69 struct iio_dev *iio_dev = iio_priv_to_dev(hw);
70 unsigned long irq_type;
71 int err;
72
73 irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
74
75 switch (irq_type) {
76 case IRQF_TRIGGER_HIGH:
77 case IRQF_TRIGGER_RISING:
78 break;
79 default:
80 dev_info(hw->dev,
81 "mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
82 irq_type);
83 irq_type = IRQF_TRIGGER_RISING;
84 break;
85 }
86
87 err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
88 hts221_trigger_handler_thread,
89 irq_type | IRQF_ONESHOT,
90 hw->name, hw);
91 if (err) {
92 dev_err(hw->dev, "failed to request trigger irq %d\n",
93 hw->irq);
94 return err;
95 }
96
97 hw->trig = devm_iio_trigger_alloc(hw->dev, "%s-trigger",
98 iio_dev->name);
99 if (!hw->trig)
100 return -ENOMEM;
101
102 iio_trigger_set_drvdata(hw->trig, iio_dev);
103 hw->trig->ops = &hts221_trigger_ops;
104 hw->trig->dev.parent = hw->dev;
105 iio_dev->trig = iio_trigger_get(hw->trig);
106
107 return devm_iio_trigger_register(hw->dev, hw->trig);
108}
109
110static int hts221_buffer_preenable(struct iio_dev *iio_dev)
111{
112 return hts221_power_on(iio_priv(iio_dev));
113}
114
115static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
116{
117 return hts221_power_off(iio_priv(iio_dev));
118}
119
120static const struct iio_buffer_setup_ops hts221_buffer_ops = {
121 .preenable = hts221_buffer_preenable,
122 .postenable = iio_triggered_buffer_postenable,
123 .predisable = iio_triggered_buffer_predisable,
124 .postdisable = hts221_buffer_postdisable,
125};
126
127static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
128{
129 u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
130 struct iio_poll_func *pf = p;
131 struct iio_dev *iio_dev = pf->indio_dev;
132 struct hts221_hw *hw = iio_priv(iio_dev);
133 struct iio_chan_spec const *ch;
134 int err;
135
136 /* humidity data */
137 ch = &iio_dev->channels[HTS221_SENSOR_H];
138 err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
139 buffer);
140 if (err < 0)
141 goto out;
142
143 /* temperature data */
144 ch = &iio_dev->channels[HTS221_SENSOR_T];
145 err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
146 buffer + HTS221_DATA_SIZE);
147 if (err < 0)
148 goto out;
149
150 iio_push_to_buffers_with_timestamp(iio_dev, buffer,
151 iio_get_time_ns(iio_dev));
152
153out:
154 iio_trigger_notify_done(hw->trig);
155
156 return IRQ_HANDLED;
157}
158
159int hts221_allocate_buffers(struct hts221_hw *hw)
160{
161 return devm_iio_triggered_buffer_setup(hw->dev, iio_priv_to_dev(hw),
162 NULL, hts221_buffer_handler_thread,
163 &hts221_buffer_ops);
164}
165
166MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
167MODULE_DESCRIPTION("STMicroelectronics hts221 buffer driver");
168MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
new file mode 100644
index 000000000000..3f3ef4a1a474
--- /dev/null
+++ b/drivers/iio/humidity/hts221_core.c
@@ -0,0 +1,687 @@
1/*
2 * STMicroelectronics hts221 sensor driver
3 *
4 * Copyright 2016 STMicroelectronics Inc.
5 *
6 * Lorenzo Bianconi <lorenzo.bianconi@st.com>
7 *
8 * Licensed under the GPL-2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/iio/sysfs.h>
15#include <linux/delay.h>
16#include <asm/unaligned.h>
17
18#include "hts221.h"
19
20#define HTS221_REG_WHOAMI_ADDR 0x0f
21#define HTS221_REG_WHOAMI_VAL 0xbc
22
23#define HTS221_REG_CNTRL1_ADDR 0x20
24#define HTS221_REG_CNTRL2_ADDR 0x21
25#define HTS221_REG_CNTRL3_ADDR 0x22
26
27#define HTS221_REG_AVG_ADDR 0x10
28#define HTS221_REG_H_OUT_L 0x28
29#define HTS221_REG_T_OUT_L 0x2a
30
31#define HTS221_HUMIDITY_AVG_MASK 0x07
32#define HTS221_TEMP_AVG_MASK 0x38
33
34#define HTS221_ODR_MASK 0x87
35#define HTS221_BDU_MASK BIT(2)
36
37#define HTS221_DRDY_MASK BIT(2)
38
39#define HTS221_ENABLE_SENSOR BIT(7)
40
41#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */
42#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */
43#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */
44#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */
45#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */
46#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */
47#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */
48#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */
49
50#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */
51#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */
52#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */
53#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */
54#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */
55#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */
56#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */
57#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */
58
59/* calibration registers */
60#define HTS221_REG_0RH_CAL_X_H 0x36
61#define HTS221_REG_1RH_CAL_X_H 0x3a
62#define HTS221_REG_0RH_CAL_Y_H 0x30
63#define HTS221_REG_1RH_CAL_Y_H 0x31
64#define HTS221_REG_0T_CAL_X_L 0x3c
65#define HTS221_REG_1T_CAL_X_L 0x3e
66#define HTS221_REG_0T_CAL_Y_H 0x32
67#define HTS221_REG_1T_CAL_Y_H 0x33
68#define HTS221_REG_T1_T0_CAL_Y_H 0x35
69
70struct hts221_odr {
71 u8 hz;
72 u8 val;
73};
74
75struct hts221_avg {
76 u8 addr;
77 u8 mask;
78 struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
79};
80
81static const struct hts221_odr hts221_odr_table[] = {
82 { 1, 0x01 }, /* 1Hz */
83 { 7, 0x02 }, /* 7Hz */
84 { 13, 0x03 }, /* 12.5Hz */
85};
86
87static const struct hts221_avg hts221_avg_list[] = {
88 {
89 .addr = HTS221_REG_AVG_ADDR,
90 .mask = HTS221_HUMIDITY_AVG_MASK,
91 .avg_avl = {
92 { 4, HTS221_HUMIDITY_AVG_4 },
93 { 8, HTS221_HUMIDITY_AVG_8 },
94 { 16, HTS221_HUMIDITY_AVG_16 },
95 { 32, HTS221_HUMIDITY_AVG_32 },
96 { 64, HTS221_HUMIDITY_AVG_64 },
97 { 128, HTS221_HUMIDITY_AVG_128 },
98 { 256, HTS221_HUMIDITY_AVG_256 },
99 { 512, HTS221_HUMIDITY_AVG_512 },
100 },
101 },
102 {
103 .addr = HTS221_REG_AVG_ADDR,
104 .mask = HTS221_TEMP_AVG_MASK,
105 .avg_avl = {
106 { 2, HTS221_TEMP_AVG_2 },
107 { 4, HTS221_TEMP_AVG_4 },
108 { 8, HTS221_TEMP_AVG_8 },
109 { 16, HTS221_TEMP_AVG_16 },
110 { 32, HTS221_TEMP_AVG_32 },
111 { 64, HTS221_TEMP_AVG_64 },
112 { 128, HTS221_TEMP_AVG_128 },
113 { 256, HTS221_TEMP_AVG_256 },
114 },
115 },
116};
117
118static const struct iio_chan_spec hts221_channels[] = {
119 {
120 .type = IIO_HUMIDITYRELATIVE,
121 .address = HTS221_REG_H_OUT_L,
122 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
123 BIT(IIO_CHAN_INFO_OFFSET) |
124 BIT(IIO_CHAN_INFO_SCALE) |
125 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
126 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
127 .scan_index = 0,
128 .scan_type = {
129 .sign = 's',
130 .realbits = 16,
131 .storagebits = 16,
132 .endianness = IIO_LE,
133 },
134 },
135 {
136 .type = IIO_TEMP,
137 .address = HTS221_REG_T_OUT_L,
138 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
139 BIT(IIO_CHAN_INFO_OFFSET) |
140 BIT(IIO_CHAN_INFO_SCALE) |
141 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
142 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
143 .scan_index = 1,
144 .scan_type = {
145 .sign = 's',
146 .realbits = 16,
147 .storagebits = 16,
148 .endianness = IIO_LE,
149 },
150 },
151 IIO_CHAN_SOFT_TIMESTAMP(2),
152};
153
154static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
155 u8 val)
156{
157 u8 data;
158 int err;
159
160 mutex_lock(&hw->lock);
161
162 err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
163 if (err < 0) {
164 dev_err(hw->dev, "failed to read %02x register\n", addr);
165 goto unlock;
166 }
167
168 data = (data & ~mask) | (val & mask);
169
170 err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
171 if (err < 0)
172 dev_err(hw->dev, "failed to write %02x register\n", addr);
173
174unlock:
175 mutex_unlock(&hw->lock);
176
177 return err;
178}
179
180static int hts221_check_whoami(struct hts221_hw *hw)
181{
182 u8 data;
183 int err;
184
185 err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
186 &data);
187 if (err < 0) {
188 dev_err(hw->dev, "failed to read whoami register\n");
189 return err;
190 }
191
192 if (data != HTS221_REG_WHOAMI_VAL) {
193 dev_err(hw->dev, "wrong whoami {%02x vs %02x}\n",
194 data, HTS221_REG_WHOAMI_VAL);
195 return -ENODEV;
196 }
197
198 return 0;
199}
200
201int hts221_config_drdy(struct hts221_hw *hw, bool enable)
202{
203 u8 val = enable ? BIT(2) : 0;
204 int err;
205
206 err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
207 HTS221_DRDY_MASK, val);
208
209 return err < 0 ? err : 0;
210}
211
212static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
213{
214 int i, err;
215 u8 val;
216
217 for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
218 if (hts221_odr_table[i].hz == odr)
219 break;
220
221 if (i == ARRAY_SIZE(hts221_odr_table))
222 return -EINVAL;
223
224 val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
225 err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
226 HTS221_ODR_MASK, val);
227 if (err < 0)
228 return err;
229
230 hw->odr = odr;
231
232 return 0;
233}
234
235static int hts221_update_avg(struct hts221_hw *hw,
236 enum hts221_sensor_type type,
237 u16 val)
238{
239 int i, err;
240 const struct hts221_avg *avg = &hts221_avg_list[type];
241
242 for (i = 0; i < HTS221_AVG_DEPTH; i++)
243 if (avg->avg_avl[i].avg == val)
244 break;
245
246 if (i == HTS221_AVG_DEPTH)
247 return -EINVAL;
248
249 err = hts221_write_with_mask(hw, avg->addr, avg->mask,
250 avg->avg_avl[i].val);
251 if (err < 0)
252 return err;
253
254 hw->sensors[type].cur_avg_idx = i;
255
256 return 0;
257}
258
259static ssize_t hts221_sysfs_sampling_freq(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
263 int i;
264 ssize_t len = 0;
265
266 for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
267 len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
268 hts221_odr_table[i].hz);
269 buf[len - 1] = '\n';
270
271 return len;
272}
273
274static ssize_t
275hts221_sysfs_rh_oversampling_avail(struct device *dev,
276 struct device_attribute *attr,
277 char *buf)
278{
279 const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H];
280 ssize_t len = 0;
281 int i;
282
283 for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
284 len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
285 avg->avg_avl[i].avg);
286 buf[len - 1] = '\n';
287
288 return len;
289}
290
291static ssize_t
292hts221_sysfs_temp_oversampling_avail(struct device *dev,
293 struct device_attribute *attr,
294 char *buf)
295{
296 const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T];
297 ssize_t len = 0;
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
301 len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
302 avg->avg_avl[i].avg);
303 buf[len - 1] = '\n';
304
305 return len;
306}
307
308int hts221_power_on(struct hts221_hw *hw)
309{
310 return hts221_update_odr(hw, hw->odr);
311}
312
313int hts221_power_off(struct hts221_hw *hw)
314{
315 u8 data[] = {0x00, 0x00};
316
317 return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
318 data);
319}
320
321static int hts221_parse_temp_caldata(struct hts221_hw *hw)
322{
323 int err, *slope, *b_gen;
324 s16 cal_x0, cal_x1, cal_y0, cal_y1;
325 u8 cal0, cal1;
326
327 err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
328 sizeof(cal0), &cal0);
329 if (err < 0)
330 return err;
331
332 err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
333 sizeof(cal1), &cal1);
334 if (err < 0)
335 return err;
336 cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
337
338 err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
339 sizeof(cal0), &cal0);
340 if (err < 0)
341 return err;
342 cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
343
344 err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
345 (u8 *)&cal_x0);
346 if (err < 0)
347 return err;
348 cal_x0 = le16_to_cpu(cal_x0);
349
350 err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
351 (u8 *)&cal_x1);
352 if (err < 0)
353 return err;
354 cal_x1 = le16_to_cpu(cal_x1);
355
356 slope = &hw->sensors[HTS221_SENSOR_T].slope;
357 b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
358
359 *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
360 *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
361 (cal_x1 - cal_x0);
362 *b_gen *= 8;
363
364 return 0;
365}
366
367static int hts221_parse_rh_caldata(struct hts221_hw *hw)
368{
369 int err, *slope, *b_gen;
370 s16 cal_x0, cal_x1, cal_y0, cal_y1;
371 u8 data;
372
373 err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
374 &data);
375 if (err < 0)
376 return err;
377 cal_y0 = data;
378
379 err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
380 &data);
381 if (err < 0)
382 return err;
383 cal_y1 = data;
384
385 err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
386 (u8 *)&cal_x0);
387 if (err < 0)
388 return err;
389 cal_x0 = le16_to_cpu(cal_x0);
390
391 err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
392 (u8 *)&cal_x1);
393 if (err < 0)
394 return err;
395 cal_x1 = le16_to_cpu(cal_x1);
396
397 slope = &hw->sensors[HTS221_SENSOR_H].slope;
398 b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
399
400 *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
401 *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
402 (cal_x1 - cal_x0);
403 *b_gen *= 8;
404
405 return 0;
406}
407
408static int hts221_get_sensor_scale(struct hts221_hw *hw,
409 enum iio_chan_type ch_type,
410 int *val, int *val2)
411{
412 s64 tmp;
413 s32 rem, div, data;
414
415 switch (ch_type) {
416 case IIO_HUMIDITYRELATIVE:
417 data = hw->sensors[HTS221_SENSOR_H].slope;
418 div = (1 << 4) * 1000;
419 break;
420 case IIO_TEMP:
421 data = hw->sensors[HTS221_SENSOR_T].slope;
422 div = (1 << 6) * 1000;
423 break;
424 default:
425 return -EINVAL;
426 }
427
428 tmp = div_s64(data * 1000000000LL, div);
429 tmp = div_s64_rem(tmp, 1000000000LL, &rem);
430
431 *val = tmp;
432 *val2 = rem;
433
434 return IIO_VAL_INT_PLUS_NANO;
435}
436
437static int hts221_get_sensor_offset(struct hts221_hw *hw,
438 enum iio_chan_type ch_type,
439 int *val, int *val2)
440{
441 s64 tmp;
442 s32 rem, div, data;
443
444 switch (ch_type) {
445 case IIO_HUMIDITYRELATIVE:
446 data = hw->sensors[HTS221_SENSOR_H].b_gen;
447 div = hw->sensors[HTS221_SENSOR_H].slope;
448 break;
449 case IIO_TEMP:
450 data = hw->sensors[HTS221_SENSOR_T].b_gen;
451 div = hw->sensors[HTS221_SENSOR_T].slope;
452 break;
453 default:
454 return -EINVAL;
455 }
456
457 tmp = div_s64(data * 1000000000LL, div);
458 tmp = div_s64_rem(tmp, 1000000000LL, &rem);
459
460 *val = tmp;
461 *val2 = rem;
462
463 return IIO_VAL_INT_PLUS_NANO;
464}
465
466static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
467{
468 u8 data[HTS221_DATA_SIZE];
469 int err;
470
471 err = hts221_power_on(hw);
472 if (err < 0)
473 return err;
474
475 msleep(50);
476
477 err = hw->tf->read(hw->dev, addr, sizeof(data), data);
478 if (err < 0)
479 return err;
480
481 hts221_power_off(hw);
482
483 *val = (s16)get_unaligned_le16(data);
484
485 return IIO_VAL_INT;
486}
487
488static int hts221_read_raw(struct iio_dev *iio_dev,
489 struct iio_chan_spec const *ch,
490 int *val, int *val2, long mask)
491{
492 struct hts221_hw *hw = iio_priv(iio_dev);
493 int ret;
494
495 ret = iio_device_claim_direct_mode(iio_dev);
496 if (ret)
497 return ret;
498
499 switch (mask) {
500 case IIO_CHAN_INFO_RAW:
501 ret = hts221_read_oneshot(hw, ch->address, val);
502 break;
503 case IIO_CHAN_INFO_SCALE:
504 ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
505 break;
506 case IIO_CHAN_INFO_OFFSET:
507 ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
508 break;
509 case IIO_CHAN_INFO_SAMP_FREQ:
510 *val = hw->odr;
511 ret = IIO_VAL_INT;
512 break;
513 case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
514 u8 idx;
515 const struct hts221_avg *avg;
516
517 switch (ch->type) {
518 case IIO_HUMIDITYRELATIVE:
519 avg = &hts221_avg_list[HTS221_SENSOR_H];
520 idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
521 *val = avg->avg_avl[idx].avg;
522 ret = IIO_VAL_INT;
523 break;
524 case IIO_TEMP:
525 avg = &hts221_avg_list[HTS221_SENSOR_T];
526 idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
527 *val = avg->avg_avl[idx].avg;
528 ret = IIO_VAL_INT;
529 break;
530 default:
531 ret = -EINVAL;
532 break;
533 }
534 break;
535 }
536 default:
537 ret = -EINVAL;
538 break;
539 }
540
541 iio_device_release_direct_mode(iio_dev);
542
543 return ret;
544}
545
546static int hts221_write_raw(struct iio_dev *iio_dev,
547 struct iio_chan_spec const *chan,
548 int val, int val2, long mask)
549{
550 struct hts221_hw *hw = iio_priv(iio_dev);
551 int ret;
552
553 ret = iio_device_claim_direct_mode(iio_dev);
554 if (ret)
555 return ret;
556
557 switch (mask) {
558 case IIO_CHAN_INFO_SAMP_FREQ:
559 ret = hts221_update_odr(hw, val);
560 break;
561 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
562 switch (chan->type) {
563 case IIO_HUMIDITYRELATIVE:
564 ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
565 break;
566 case IIO_TEMP:
567 ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
568 break;
569 default:
570 ret = -EINVAL;
571 break;
572 }
573 break;
574 default:
575 ret = -EINVAL;
576 break;
577 }
578
579 iio_device_release_direct_mode(iio_dev);
580
581 return ret;
582}
583
584static int hts221_validate_trigger(struct iio_dev *iio_dev,
585 struct iio_trigger *trig)
586{
587 struct hts221_hw *hw = iio_priv(iio_dev);
588
589 return hw->trig == trig ? 0 : -EINVAL;
590}
591
592static IIO_DEVICE_ATTR(in_humidity_oversampling_ratio_available, S_IRUGO,
593 hts221_sysfs_rh_oversampling_avail, NULL, 0);
594static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available, S_IRUGO,
595 hts221_sysfs_temp_oversampling_avail, NULL, 0);
596static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hts221_sysfs_sampling_freq);
597
598static struct attribute *hts221_attributes[] = {
599 &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
600 &iio_dev_attr_in_humidity_oversampling_ratio_available.dev_attr.attr,
601 &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
602 NULL,
603};
604
605static const struct attribute_group hts221_attribute_group = {
606 .attrs = hts221_attributes,
607};
608
609static const struct iio_info hts221_info = {
610 .driver_module = THIS_MODULE,
611 .attrs = &hts221_attribute_group,
612 .read_raw = hts221_read_raw,
613 .write_raw = hts221_write_raw,
614 .validate_trigger = hts221_validate_trigger,
615};
616
617static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
618
619int hts221_probe(struct iio_dev *iio_dev)
620{
621 struct hts221_hw *hw = iio_priv(iio_dev);
622 int err;
623 u8 data;
624
625 mutex_init(&hw->lock);
626
627 err = hts221_check_whoami(hw);
628 if (err < 0)
629 return err;
630
631 hw->odr = hts221_odr_table[0].hz;
632
633 iio_dev->modes = INDIO_DIRECT_MODE;
634 iio_dev->dev.parent = hw->dev;
635 iio_dev->available_scan_masks = hts221_scan_masks;
636 iio_dev->channels = hts221_channels;
637 iio_dev->num_channels = ARRAY_SIZE(hts221_channels);
638 iio_dev->name = HTS221_DEV_NAME;
639 iio_dev->info = &hts221_info;
640
641 /* configure humidity sensor */
642 err = hts221_parse_rh_caldata(hw);
643 if (err < 0) {
644 dev_err(hw->dev, "failed to get rh calibration data\n");
645 return err;
646 }
647
648 data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
649 err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
650 if (err < 0) {
651 dev_err(hw->dev, "failed to set rh oversampling ratio\n");
652 return err;
653 }
654
655 /* configure temperature sensor */
656 err = hts221_parse_temp_caldata(hw);
657 if (err < 0) {
658 dev_err(hw->dev,
659 "failed to get temperature calibration data\n");
660 return err;
661 }
662
663 data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
664 err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
665 if (err < 0) {
666 dev_err(hw->dev,
667 "failed to set temperature oversampling ratio\n");
668 return err;
669 }
670
671 if (hw->irq > 0) {
672 err = hts221_allocate_buffers(hw);
673 if (err < 0)
674 return err;
675
676 err = hts221_allocate_trigger(hw);
677 if (err)
678 return err;
679 }
680
681 return devm_iio_device_register(hw->dev, iio_dev);
682}
683EXPORT_SYMBOL(hts221_probe);
684
685MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
686MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
687MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
new file mode 100644
index 000000000000..367ecd509f31
--- /dev/null
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -0,0 +1,110 @@
1/*
2 * STMicroelectronics hts221 i2c driver
3 *
4 * Copyright 2016 STMicroelectronics Inc.
5 *
6 * Lorenzo Bianconi <lorenzo.bianconi@st.com>
7 *
8 * Licensed under the GPL-2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/slab.h>
15#include "hts221.h"
16
17#define I2C_AUTO_INCREMENT 0x80
18
19static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
20{
21 struct i2c_msg msg[2];
22 struct i2c_client *client = to_i2c_client(dev);
23
24 if (len > 1)
25 addr |= I2C_AUTO_INCREMENT;
26
27 msg[0].addr = client->addr;
28 msg[0].flags = client->flags;
29 msg[0].len = 1;
30 msg[0].buf = &addr;
31
32 msg[1].addr = client->addr;
33 msg[1].flags = client->flags | I2C_M_RD;
34 msg[1].len = len;
35 msg[1].buf = data;
36
37 return i2c_transfer(client->adapter, msg, 2);
38}
39
40static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
41{
42 u8 send[len + 1];
43 struct i2c_msg msg;
44 struct i2c_client *client = to_i2c_client(dev);
45
46 if (len > 1)
47 addr |= I2C_AUTO_INCREMENT;
48
49 send[0] = addr;
50 memcpy(&send[1], data, len * sizeof(u8));
51
52 msg.addr = client->addr;
53 msg.flags = client->flags;
54 msg.len = len + 1;
55 msg.buf = send;
56
57 return i2c_transfer(client->adapter, &msg, 1);
58}
59
60static const struct hts221_transfer_function hts221_transfer_fn = {
61 .read = hts221_i2c_read,
62 .write = hts221_i2c_write,
63};
64
65static int hts221_i2c_probe(struct i2c_client *client,
66 const struct i2c_device_id *id)
67{
68 struct hts221_hw *hw;
69 struct iio_dev *iio_dev;
70
71 iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
72 if (!iio_dev)
73 return -ENOMEM;
74
75 i2c_set_clientdata(client, iio_dev);
76
77 hw = iio_priv(iio_dev);
78 hw->name = client->name;
79 hw->dev = &client->dev;
80 hw->irq = client->irq;
81 hw->tf = &hts221_transfer_fn;
82
83 return hts221_probe(iio_dev);
84}
85
86static const struct of_device_id hts221_i2c_of_match[] = {
87 { .compatible = "st,hts221", },
88 {},
89};
90MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
91
92static const struct i2c_device_id hts221_i2c_id_table[] = {
93 { HTS221_DEV_NAME },
94 {},
95};
96MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
97
98static struct i2c_driver hts221_driver = {
99 .driver = {
100 .name = "hts221_i2c",
101 .of_match_table = of_match_ptr(hts221_i2c_of_match),
102 },
103 .probe = hts221_i2c_probe,
104 .id_table = hts221_i2c_id_table,
105};
106module_i2c_driver(hts221_driver);
107
108MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
109MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
110MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
new file mode 100644
index 000000000000..70df5e7150c1
--- /dev/null
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -0,0 +1,125 @@
1/*
2 * STMicroelectronics hts221 spi driver
3 *
4 * Copyright 2016 STMicroelectronics Inc.
5 *
6 * Lorenzo Bianconi <lorenzo.bianconi@st.com>
7 *
8 * Licensed under the GPL-2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/spi/spi.h>
14#include <linux/slab.h>
15#include "hts221.h"
16
17#define SENSORS_SPI_READ 0x80
18#define SPI_AUTO_INCREMENT 0x40
19
20static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
21{
22 int err;
23 struct spi_device *spi = to_spi_device(dev);
24 struct iio_dev *iio_dev = spi_get_drvdata(spi);
25 struct hts221_hw *hw = iio_priv(iio_dev);
26
27 struct spi_transfer xfers[] = {
28 {
29 .tx_buf = hw->tb.tx_buf,
30 .bits_per_word = 8,
31 .len = 1,
32 },
33 {
34 .rx_buf = hw->tb.rx_buf,
35 .bits_per_word = 8,
36 .len = len,
37 }
38 };
39
40 if (len > 1)
41 addr |= SPI_AUTO_INCREMENT;
42 hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
43
44 err = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
45 if (err < 0)
46 return err;
47
48 memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
49
50 return len;
51}
52
53static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
54{
55 struct spi_device *spi = to_spi_device(dev);
56 struct iio_dev *iio_dev = spi_get_drvdata(spi);
57 struct hts221_hw *hw = iio_priv(iio_dev);
58
59 struct spi_transfer xfers = {
60 .tx_buf = hw->tb.tx_buf,
61 .bits_per_word = 8,
62 .len = len + 1,
63 };
64
65 if (len >= HTS221_TX_MAX_LENGTH)
66 return -ENOMEM;
67
68 if (len > 1)
69 addr |= SPI_AUTO_INCREMENT;
70 hw->tb.tx_buf[0] = addr;
71 memcpy(&hw->tb.tx_buf[1], data, len);
72
73 return spi_sync_transfer(spi, &xfers, 1);
74}
75
76static const struct hts221_transfer_function hts221_transfer_fn = {
77 .read = hts221_spi_read,
78 .write = hts221_spi_write,
79};
80
81static int hts221_spi_probe(struct spi_device *spi)
82{
83 struct hts221_hw *hw;
84 struct iio_dev *iio_dev;
85
86 iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
87 if (!iio_dev)
88 return -ENOMEM;
89
90 spi_set_drvdata(spi, iio_dev);
91
92 hw = iio_priv(iio_dev);
93 hw->name = spi->modalias;
94 hw->dev = &spi->dev;
95 hw->irq = spi->irq;
96 hw->tf = &hts221_transfer_fn;
97
98 return hts221_probe(iio_dev);
99}
100
101static const struct of_device_id hts221_spi_of_match[] = {
102 { .compatible = "st,hts221", },
103 {},
104};
105MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
106
107static const struct spi_device_id hts221_spi_id_table[] = {
108 { HTS221_DEV_NAME },
109 {},
110};
111MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
112
113static struct spi_driver hts221_driver = {
114 .driver = {
115 .name = "hts221_spi",
116 .of_match_table = of_match_ptr(hts221_spi_of_match),
117 },
118 .probe = hts221_spi_probe,
119 .id_table = hts221_spi_id_table,
120};
121module_spi_driver(hts221_driver);
122
123MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
124MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
125MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index ffc2ccf6374e..345a7656c5ef 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -154,8 +154,17 @@ static const struct i2c_device_id si7020_id[] = {
154}; 154};
155MODULE_DEVICE_TABLE(i2c, si7020_id); 155MODULE_DEVICE_TABLE(i2c, si7020_id);
156 156
157static const struct of_device_id si7020_dt_ids[] = {
158 { .compatible = "silabs,si7020" },
159 { }
160};
161MODULE_DEVICE_TABLE(of, si7020_dt_ids);
162
157static struct i2c_driver si7020_driver = { 163static struct i2c_driver si7020_driver = {
158 .driver.name = "si7020", 164 .driver = {
165 .name = "si7020",
166 .of_match_table = of_match_ptr(si7020_dt_ids),
167 },
159 .probe = si7020_probe, 168 .probe = si7020_probe,
160 .id_table = si7020_id, 169 .id_table = si7020_id,
161}; 170};
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e0251b8c1a52..5355507f8fa1 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -398,7 +398,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
398 struct iio_poll_func *pf = p; 398 struct iio_poll_func *pf = p;
399 struct iio_dev *indio_dev = pf->indio_dev; 399 struct iio_dev *indio_dev = pf->indio_dev;
400 struct bmi160_data *data = iio_priv(indio_dev); 400 struct bmi160_data *data = iio_priv(indio_dev);
401 s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */ 401 __le16 buf[16];
402 /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
402 int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L; 403 int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
403 __le16 sample; 404 __le16 sample;
404 405
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 19580d1db597..2c3f8964a3ea 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -126,7 +126,7 @@ static int inv_mpu_probe(struct i2c_client *client,
126 126
127 st = iio_priv(dev_get_drvdata(&client->dev)); 127 st = iio_priv(dev_get_drvdata(&client->dev));
128 st->muxc = i2c_mux_alloc(client->adapter, &client->dev, 128 st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
129 1, 0, I2C_MUX_LOCKED, 129 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
130 inv_mpu6050_select_bypass, 130 inv_mpu6050_select_bypass,
131 inv_mpu6050_deselect_bypass); 131 inv_mpu6050_deselect_bypass);
132 if (!st->muxc) { 132 if (!st->muxc) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf44dd95..b12830b09c7d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -307,10 +307,9 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
307 const unsigned long *mask; 307 const unsigned long *mask;
308 unsigned long *trialmask; 308 unsigned long *trialmask;
309 309
310 trialmask = kmalloc(sizeof(*trialmask)* 310 trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
311 BITS_TO_LONGS(indio_dev->masklength), 311 sizeof(*trialmask),
312 GFP_KERNEL); 312 GFP_KERNEL);
313
314 if (trialmask == NULL) 313 if (trialmask == NULL)
315 return -ENOMEM; 314 return -ENOMEM;
316 if (!indio_dev->masklength) { 315 if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed3dca1..aaca42862389 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,8 @@ static const char * const iio_chan_type_name_spec[] = {
81 [IIO_PH] = "ph", 81 [IIO_PH] = "ph",
82 [IIO_UVINDEX] = "uvindex", 82 [IIO_UVINDEX] = "uvindex",
83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
84 [IIO_COUNT] = "count",
85 [IIO_INDEX] = "index",
84}; 86};
85 87
86static const char * const iio_modifier_names[] = { 88static const char * const iio_modifier_names[] = {
@@ -575,66 +577,82 @@ int of_iio_read_mount_matrix(const struct device *dev,
575#endif 577#endif
576EXPORT_SYMBOL(of_iio_read_mount_matrix); 578EXPORT_SYMBOL(of_iio_read_mount_matrix);
577 579
578/** 580static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
579 * iio_format_value() - Formats a IIO value into its string representation 581 int size, const int *vals)
580 * @buf: The buffer to which the formatted value gets written
581 * @type: One of the IIO_VAL_... constants. This decides how the val
582 * and val2 parameters are formatted.
583 * @size: Number of IIO value entries contained in vals
584 * @vals: Pointer to the values, exact meaning depends on the
585 * type parameter.
586 *
587 * Return: 0 by default, a negative number on failure or the
588 * total number of characters written for a type that belongs
589 * to the IIO_VAL_... constant.
590 */
591ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
592{ 582{
593 unsigned long long tmp; 583 unsigned long long tmp;
584 int tmp0, tmp1;
594 bool scale_db = false; 585 bool scale_db = false;
595 586
596 switch (type) { 587 switch (type) {
597 case IIO_VAL_INT: 588 case IIO_VAL_INT:
598 return sprintf(buf, "%d\n", vals[0]); 589 return snprintf(buf, len, "%d", vals[0]);
599 case IIO_VAL_INT_PLUS_MICRO_DB: 590 case IIO_VAL_INT_PLUS_MICRO_DB:
600 scale_db = true; 591 scale_db = true;
601 case IIO_VAL_INT_PLUS_MICRO: 592 case IIO_VAL_INT_PLUS_MICRO:
602 if (vals[1] < 0) 593 if (vals[1] < 0)
603 return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]), 594 return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
604 -vals[1], scale_db ? " dB" : ""); 595 -vals[1], scale_db ? " dB" : "");
605 else 596 else
606 return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1], 597 return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
607 scale_db ? " dB" : ""); 598 scale_db ? " dB" : "");
608 case IIO_VAL_INT_PLUS_NANO: 599 case IIO_VAL_INT_PLUS_NANO:
609 if (vals[1] < 0) 600 if (vals[1] < 0)
610 return sprintf(buf, "-%d.%09u\n", abs(vals[0]), 601 return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
611 -vals[1]); 602 -vals[1]);
612 else 603 else
613 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); 604 return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
614 case IIO_VAL_FRACTIONAL: 605 case IIO_VAL_FRACTIONAL:
615 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 606 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
616 vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]); 607 tmp1 = vals[1];
617 return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1])); 608 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
609 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
618 case IIO_VAL_FRACTIONAL_LOG2: 610 case IIO_VAL_FRACTIONAL_LOG2:
619 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 611 tmp = (s64)vals[0] * 1000000000LL >> vals[1];
620 vals[1] = do_div(tmp, 1000000000LL); 612 tmp1 = do_div(tmp, 1000000000LL);
621 vals[0] = tmp; 613 tmp0 = tmp;
622 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); 614 return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
623 case IIO_VAL_INT_MULTIPLE: 615 case IIO_VAL_INT_MULTIPLE:
624 { 616 {
625 int i; 617 int i;
626 int len = 0; 618 int l = 0;
627 619
628 for (i = 0; i < size; ++i) 620 for (i = 0; i < size; ++i) {
629 len += snprintf(&buf[len], PAGE_SIZE - len, "%d ", 621 l += snprintf(&buf[l], len - l, "%d ", vals[i]);
630 vals[i]); 622 if (l >= len)
631 len += snprintf(&buf[len], PAGE_SIZE - len, "\n"); 623 break;
632 return len; 624 }
625 return l;
633 } 626 }
634 default: 627 default:
635 return 0; 628 return 0;
636 } 629 }
637} 630}
631
632/**
633 * iio_format_value() - Formats a IIO value into its string representation
634 * @buf: The buffer to which the formatted value gets written
635 * which is assumed to be big enough (i.e. PAGE_SIZE).
636 * @type: One of the IIO_VAL_... constants. This decides how the val
637 * and val2 parameters are formatted.
638 * @size: Number of IIO value entries contained in vals
639 * @vals: Pointer to the values, exact meaning depends on the
640 * type parameter.
641 *
642 * Return: 0 by default, a negative number on failure or the
643 * total number of characters written for a type that belongs
644 * to the IIO_VAL_... constant.
645 */
646ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
647{
648 ssize_t len;
649
650 len = __iio_format_value(buf, PAGE_SIZE, type, size, vals);
651 if (len >= PAGE_SIZE - 1)
652 return -EFBIG;
653
654 return len + sprintf(buf + len, "\n");
655}
638EXPORT_SYMBOL_GPL(iio_format_value); 656EXPORT_SYMBOL_GPL(iio_format_value);
639 657
640static ssize_t iio_read_channel_info(struct device *dev, 658static ssize_t iio_read_channel_info(struct device *dev,
@@ -662,6 +680,119 @@ static ssize_t iio_read_channel_info(struct device *dev,
662 return iio_format_value(buf, ret, val_len, vals); 680 return iio_format_value(buf, ret, val_len, vals);
663} 681}
664 682
683static ssize_t iio_format_avail_list(char *buf, const int *vals,
684 int type, int length)
685{
686 int i;
687 ssize_t len = 0;
688
689 switch (type) {
690 case IIO_VAL_INT:
691 for (i = 0; i < length; i++) {
692 len += __iio_format_value(buf + len, PAGE_SIZE - len,
693 type, 1, &vals[i]);
694 if (len >= PAGE_SIZE)
695 return -EFBIG;
696 if (i < length - 1)
697 len += snprintf(buf + len, PAGE_SIZE - len,
698 " ");
699 else
700 len += snprintf(buf + len, PAGE_SIZE - len,
701 "\n");
702 if (len >= PAGE_SIZE)
703 return -EFBIG;
704 }
705 break;
706 default:
707 for (i = 0; i < length / 2; i++) {
708 len += __iio_format_value(buf + len, PAGE_SIZE - len,
709 type, 2, &vals[i * 2]);
710 if (len >= PAGE_SIZE)
711 return -EFBIG;
712 if (i < length / 2 - 1)
713 len += snprintf(buf + len, PAGE_SIZE - len,
714 " ");
715 else
716 len += snprintf(buf + len, PAGE_SIZE - len,
717 "\n");
718 if (len >= PAGE_SIZE)
719 return -EFBIG;
720 }
721 }
722
723 return len;
724}
725
726static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
727{
728 int i;
729 ssize_t len;
730
731 len = snprintf(buf, PAGE_SIZE, "[");
732 switch (type) {
733 case IIO_VAL_INT:
734 for (i = 0; i < 3; i++) {
735 len += __iio_format_value(buf + len, PAGE_SIZE - len,
736 type, 1, &vals[i]);
737 if (len >= PAGE_SIZE)
738 return -EFBIG;
739 if (i < 2)
740 len += snprintf(buf + len, PAGE_SIZE - len,
741 " ");
742 else
743 len += snprintf(buf + len, PAGE_SIZE - len,
744 "]\n");
745 if (len >= PAGE_SIZE)
746 return -EFBIG;
747 }
748 break;
749 default:
750 for (i = 0; i < 3; i++) {
751 len += __iio_format_value(buf + len, PAGE_SIZE - len,
752 type, 2, &vals[i * 2]);
753 if (len >= PAGE_SIZE)
754 return -EFBIG;
755 if (i < 2)
756 len += snprintf(buf + len, PAGE_SIZE - len,
757 " ");
758 else
759 len += snprintf(buf + len, PAGE_SIZE - len,
760 "]\n");
761 if (len >= PAGE_SIZE)
762 return -EFBIG;
763 }
764 }
765
766 return len;
767}
768
769static ssize_t iio_read_channel_info_avail(struct device *dev,
770 struct device_attribute *attr,
771 char *buf)
772{
773 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
774 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
775 const int *vals;
776 int ret;
777 int length;
778 int type;
779
780 ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
781 &vals, &type, &length,
782 this_attr->address);
783
784 if (ret < 0)
785 return ret;
786 switch (ret) {
787 case IIO_AVAIL_LIST:
788 return iio_format_avail_list(buf, vals, type, length);
789 case IIO_AVAIL_RANGE:
790 return iio_format_avail_range(buf, vals, type);
791 default:
792 return -EINVAL;
793 }
794}
795
665/** 796/**
666 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 797 * iio_str_to_fixpoint() - Parse a fixed-point number from a string
667 * @str: The string to parse 798 * @str: The string to parse
@@ -978,6 +1109,40 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
978 return attrcount; 1109 return attrcount;
979} 1110}
980 1111
1112static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
1113 struct iio_chan_spec const *chan,
1114 enum iio_shared_by shared_by,
1115 const long *infomask)
1116{
1117 int i, ret, attrcount = 0;
1118 char *avail_postfix;
1119
1120 for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
1121 avail_postfix = kasprintf(GFP_KERNEL,
1122 "%s_available",
1123 iio_chan_info_postfix[i]);
1124 if (!avail_postfix)
1125 return -ENOMEM;
1126
1127 ret = __iio_add_chan_devattr(avail_postfix,
1128 chan,
1129 &iio_read_channel_info_avail,
1130 NULL,
1131 i,
1132 shared_by,
1133 &indio_dev->dev,
1134 &indio_dev->channel_attr_list);
1135 kfree(avail_postfix);
1136 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1137 continue;
1138 else if (ret < 0)
1139 return ret;
1140 attrcount++;
1141 }
1142
1143 return attrcount;
1144}
1145
981static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1146static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
982 struct iio_chan_spec const *chan) 1147 struct iio_chan_spec const *chan)
983{ 1148{
@@ -993,6 +1158,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
993 return ret; 1158 return ret;
994 attrcount += ret; 1159 attrcount += ret;
995 1160
1161 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1162 IIO_SEPARATE,
1163 &chan->
1164 info_mask_separate_available);
1165 if (ret < 0)
1166 return ret;
1167 attrcount += ret;
1168
996 ret = iio_device_add_info_mask_type(indio_dev, chan, 1169 ret = iio_device_add_info_mask_type(indio_dev, chan,
997 IIO_SHARED_BY_TYPE, 1170 IIO_SHARED_BY_TYPE,
998 &chan->info_mask_shared_by_type); 1171 &chan->info_mask_shared_by_type);
@@ -1000,6 +1173,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1000 return ret; 1173 return ret;
1001 attrcount += ret; 1174 attrcount += ret;
1002 1175
1176 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1177 IIO_SHARED_BY_TYPE,
1178 &chan->
1179 info_mask_shared_by_type_available);
1180 if (ret < 0)
1181 return ret;
1182 attrcount += ret;
1183
1003 ret = iio_device_add_info_mask_type(indio_dev, chan, 1184 ret = iio_device_add_info_mask_type(indio_dev, chan,
1004 IIO_SHARED_BY_DIR, 1185 IIO_SHARED_BY_DIR,
1005 &chan->info_mask_shared_by_dir); 1186 &chan->info_mask_shared_by_dir);
@@ -1007,6 +1188,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1007 return ret; 1188 return ret;
1008 attrcount += ret; 1189 attrcount += ret;
1009 1190
1191 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1192 IIO_SHARED_BY_DIR,
1193 &chan->info_mask_shared_by_dir_available);
1194 if (ret < 0)
1195 return ret;
1196 attrcount += ret;
1197
1010 ret = iio_device_add_info_mask_type(indio_dev, chan, 1198 ret = iio_device_add_info_mask_type(indio_dev, chan,
1011 IIO_SHARED_BY_ALL, 1199 IIO_SHARED_BY_ALL,
1012 &chan->info_mask_shared_by_all); 1200 &chan->info_mask_shared_by_all);
@@ -1014,6 +1202,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1014 return ret; 1202 return ret;
1015 attrcount += ret; 1203 attrcount += ret;
1016 1204
1205 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1206 IIO_SHARED_BY_ALL,
1207 &chan->info_mask_shared_by_all_available);
1208 if (ret < 0)
1209 return ret;
1210 attrcount += ret;
1211
1017 if (chan->ext_info) { 1212 if (chan->ext_info) {
1018 unsigned int i = 0; 1213 unsigned int i = 0;
1019 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1214 for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index e1e104845e38..978729f6d7c4 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -717,6 +717,27 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
717} 717}
718EXPORT_SYMBOL(iio_trigger_using_own); 718EXPORT_SYMBOL(iio_trigger_using_own);
719 719
720/**
721 * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
722 * the same device
723 * @trig: The IIO trigger to check
724 * @indio_dev: the IIO device to check
725 *
726 * This function can be used as the validate_device callback for triggers that
727 * can only be attached to their own device.
728 *
729 * Return: 0 if both the trigger and the IIO device belong to the same
730 * device, -EINVAL otherwise.
731 */
732int iio_trigger_validate_own_device(struct iio_trigger *trig,
733 struct iio_dev *indio_dev)
734{
735 if (indio_dev->dev.parent != trig->dev.parent)
736 return -EINVAL;
737 return 0;
738}
739EXPORT_SYMBOL(iio_trigger_validate_own_device);
740
720void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) 741void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
721{ 742{
722 indio_dev->groups[indio_dev->groupcounter++] = 743 indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c4757e6367e7..b0f4630a163f 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -658,6 +658,31 @@ err_unlock:
658} 658}
659EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed); 659EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
660 660
661static int iio_read_channel_attribute(struct iio_channel *chan,
662 int *val, int *val2,
663 enum iio_chan_info_enum attribute)
664{
665 int ret;
666
667 mutex_lock(&chan->indio_dev->info_exist_lock);
668 if (chan->indio_dev->info == NULL) {
669 ret = -ENODEV;
670 goto err_unlock;
671 }
672
673 ret = iio_channel_read(chan, val, val2, attribute);
674err_unlock:
675 mutex_unlock(&chan->indio_dev->info_exist_lock);
676
677 return ret;
678}
679
680int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
681{
682 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
683}
684EXPORT_SYMBOL_GPL(iio_read_channel_offset);
685
661int iio_read_channel_processed(struct iio_channel *chan, int *val) 686int iio_read_channel_processed(struct iio_channel *chan, int *val)
662{ 687{
663 int ret; 688 int ret;
@@ -687,21 +712,113 @@ EXPORT_SYMBOL_GPL(iio_read_channel_processed);
687 712
688int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2) 713int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
689{ 714{
715 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
716}
717EXPORT_SYMBOL_GPL(iio_read_channel_scale);
718
719static int iio_channel_read_avail(struct iio_channel *chan,
720 const int **vals, int *type, int *length,
721 enum iio_chan_info_enum info)
722{
723 if (!iio_channel_has_available(chan->channel, info))
724 return -EINVAL;
725
726 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
727 vals, type, length, info);
728}
729
730int iio_read_avail_channel_raw(struct iio_channel *chan,
731 const int **vals, int *length)
732{
690 int ret; 733 int ret;
734 int type;
691 735
692 mutex_lock(&chan->indio_dev->info_exist_lock); 736 mutex_lock(&chan->indio_dev->info_exist_lock);
693 if (chan->indio_dev->info == NULL) { 737 if (!chan->indio_dev->info) {
694 ret = -ENODEV; 738 ret = -ENODEV;
695 goto err_unlock; 739 goto err_unlock;
696 } 740 }
697 741
698 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE); 742 ret = iio_channel_read_avail(chan,
743 vals, &type, length, IIO_CHAN_INFO_RAW);
699err_unlock: 744err_unlock:
700 mutex_unlock(&chan->indio_dev->info_exist_lock); 745 mutex_unlock(&chan->indio_dev->info_exist_lock);
701 746
747 if (ret >= 0 && type != IIO_VAL_INT) {
748 /* raw values are assumed to be IIO_VAL_INT */
749 ret = -EINVAL;
750 goto err_unlock;
751 }
752
702 return ret; 753 return ret;
703} 754}
704EXPORT_SYMBOL_GPL(iio_read_channel_scale); 755EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
756
757static int iio_channel_read_max(struct iio_channel *chan,
758 int *val, int *val2, int *type,
759 enum iio_chan_info_enum info)
760{
761 int unused;
762 const int *vals;
763 int length;
764 int ret;
765
766 if (!val2)
767 val2 = &unused;
768
769 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
770 switch (ret) {
771 case IIO_AVAIL_RANGE:
772 switch (*type) {
773 case IIO_VAL_INT:
774 *val = vals[2];
775 break;
776 default:
777 *val = vals[4];
778 *val2 = vals[5];
779 }
780 return 0;
781
782 case IIO_AVAIL_LIST:
783 if (length <= 0)
784 return -EINVAL;
785 switch (*type) {
786 case IIO_VAL_INT:
787 *val = vals[--length];
788 while (length) {
789 if (vals[--length] > *val)
790 *val = vals[length];
791 }
792 break;
793 default:
794 /* FIXME: learn about max for other iio values */
795 return -EINVAL;
796 }
797 return 0;
798
799 default:
800 return ret;
801 }
802}
803
804int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
805{
806 int ret;
807 int type;
808
809 mutex_lock(&chan->indio_dev->info_exist_lock);
810 if (!chan->indio_dev->info) {
811 ret = -ENODEV;
812 goto err_unlock;
813 }
814
815 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
816err_unlock:
817 mutex_unlock(&chan->indio_dev->info_exist_lock);
818
819 return ret;
820}
821EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
705 822
706int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type) 823int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
707{ 824{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ba2e64d7ee58..298ea5081a96 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -140,6 +140,18 @@ config GP2AP020A00F
140 To compile this driver as a module, choose M here: the 140 To compile this driver as a module, choose M here: the
141 module will be called gp2ap020a00f. 141 module will be called gp2ap020a00f.
142 142
143config SENSORS_ISL29018
144 tristate "Intersil 29018 light and proximity sensor"
145 depends on I2C
146 select REGMAP_I2C
147 default n
148 help
149 If you say yes here you get support for ambient light sensing and
150 proximity infrared sensing from Intersil ISL29018.
151 This driver will provide the measurements of ambient light intensity
152 in lux, proximity infrared sensing and normal infrared sensing.
153 Data from sensor is accessible via sysfs.
154
143config ISL29125 155config ISL29125
144 tristate "Intersil ISL29125 digital color light sensor" 156 tristate "Intersil ISL29125 digital color light sensor"
145 depends on I2C 157 depends on I2C
@@ -326,6 +338,13 @@ config SENSORS_TSL2563
326 This driver can also be built as a module. If so, the module 338 This driver can also be built as a module. If so, the module
327 will be called tsl2563. 339 will be called tsl2563.
328 340
341config TSL2583
342 tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
343 depends on I2C
344 help
345 Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
346 Access ALS data via iio, sysfs.
347
329config TSL4531 348config TSL4531
330 tristate "TAOS TSL4531 ambient light sensors" 349 tristate "TAOS TSL4531 ambient light sensors"
331 depends on I2C 350 depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c5768df87a17..4de520036e6e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CM36651) += cm36651.o
17obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o 17obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
18obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o 18obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
19obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o 19obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
20obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
20obj-$(CONFIG_ISL29125) += isl29125.o 21obj-$(CONFIG_ISL29125) += isl29125.o
21obj-$(CONFIG_JSA1212) += jsa1212.o 22obj-$(CONFIG_JSA1212) += jsa1212.o
22obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o 23obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SI1145) += si1145.o
30obj-$(CONFIG_STK3310) += stk3310.o 31obj-$(CONFIG_STK3310) += stk3310.o
31obj-$(CONFIG_TCS3414) += tcs3414.o 32obj-$(CONFIG_TCS3414) += tcs3414.o
32obj-$(CONFIG_TCS3472) += tcs3472.o 33obj-$(CONFIG_TCS3472) += tcs3472.o
34obj-$(CONFIG_TSL2583) += tsl2583.o
33obj-$(CONFIG_TSL4531) += tsl4531.o 35obj-$(CONFIG_TSL4531) += tsl4531.o
34obj-$(CONFIG_US5182D) += us5182d.o 36obj-$(CONFIG_US5182D) += us5182d.o
35obj-$(CONFIG_VCNL4000) += vcnl4000.o 37obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index a767a43c995c..917dd8b43e72 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -62,16 +62,6 @@
62#define ISL29035_BOUT_SHIFT 0x07 62#define ISL29035_BOUT_SHIFT 0x07
63#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT) 63#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT)
64 64
65#define ISL29018_INT_TIME_AVAIL "0.090000 0.005630 0.000351 0.000021"
66#define ISL29023_INT_TIME_AVAIL "0.090000 0.005600 0.000352 0.000022"
67#define ISL29035_INT_TIME_AVAIL "0.105000 0.006500 0.000410 0.000025"
68
69static const char * const int_time_avail[] = {
70 ISL29018_INT_TIME_AVAIL,
71 ISL29023_INT_TIME_AVAIL,
72 ISL29035_INT_TIME_AVAIL,
73};
74
75enum isl29018_int_time { 65enum isl29018_int_time {
76 ISL29018_INT_TIME_16, 66 ISL29018_INT_TIME_16,
77 ISL29018_INT_TIME_12, 67 ISL29018_INT_TIME_12,
@@ -110,7 +100,8 @@ struct isl29018_chip {
110static int isl29018_set_integration_time(struct isl29018_chip *chip, 100static int isl29018_set_integration_time(struct isl29018_chip *chip,
111 unsigned int utime) 101 unsigned int utime)
112{ 102{
113 int i, ret; 103 unsigned int i;
104 int ret;
114 unsigned int int_time, new_int_time; 105 unsigned int int_time, new_int_time;
115 106
116 for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) { 107 for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
@@ -145,7 +136,8 @@ static int isl29018_set_integration_time(struct isl29018_chip *chip,
145 136
146static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale) 137static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
147{ 138{
148 int i, ret; 139 unsigned int i;
140 int ret;
149 struct isl29018_scale new_scale; 141 struct isl29018_scale new_scale;
150 142
151 for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) { 143 for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
@@ -276,29 +268,35 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
276 return 0; 268 return 0;
277} 269}
278 270
279static ssize_t isl29018_show_scale_available(struct device *dev, 271static ssize_t in_illuminance_scale_available_show
280 struct device_attribute *attr, char *buf) 272 (struct device *dev, struct device_attribute *attr,
273 char *buf)
281{ 274{
282 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 275 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
283 struct isl29018_chip *chip = iio_priv(indio_dev); 276 struct isl29018_chip *chip = iio_priv(indio_dev);
284 int i, len = 0; 277 unsigned int i;
278 int len = 0;
285 279
280 mutex_lock(&chip->lock);
286 for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) 281 for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
287 len += sprintf(buf + len, "%d.%06d ", 282 len += sprintf(buf + len, "%d.%06d ",
288 isl29018_scales[chip->int_time][i].scale, 283 isl29018_scales[chip->int_time][i].scale,
289 isl29018_scales[chip->int_time][i].uscale); 284 isl29018_scales[chip->int_time][i].uscale);
285 mutex_unlock(&chip->lock);
290 286
291 buf[len - 1] = '\n'; 287 buf[len - 1] = '\n';
292 288
293 return len; 289 return len;
294} 290}
295 291
296static ssize_t isl29018_show_int_time_available(struct device *dev, 292static ssize_t in_illuminance_integration_time_available_show
297 struct device_attribute *attr, char *buf) 293 (struct device *dev, struct device_attribute *attr,
294 char *buf)
298{ 295{
299 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 296 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
300 struct isl29018_chip *chip = iio_priv(indio_dev); 297 struct isl29018_chip *chip = iio_priv(indio_dev);
301 int i, len = 0; 298 unsigned int i;
299 int len = 0;
302 300
303 for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) 301 for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
304 len += sprintf(buf + len, "0.%06d ", 302 len += sprintf(buf + len, "0.%06d ",
@@ -309,9 +307,27 @@ static ssize_t isl29018_show_int_time_available(struct device *dev,
309 return len; 307 return len;
310} 308}
311 309
312static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev, 310/*
313 struct device_attribute *attr, 311 * From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
314 char *buf) 312 * infrared suppression:
313 *
314 * Proximity Sensing Scheme: Bit 7. This bit programs the function
315 * of the proximity detection. Logic 0 of this bit, Scheme 0, makes
316 * full n (4, 8, 12, 16) bits (unsigned) proximity detection. The range
317 * of Scheme 0 proximity count is from 0 to 2^n. Logic 1 of this bit,
318 * Scheme 1, makes n-1 (3, 7, 11, 15) bits (2's complementary)
319 * proximity_less_ambient detection. The range of Scheme 1
320 * proximity count is from -2^(n-1) to 2^(n-1) . The sign bit is extended
321 * for resolutions less than 16. While Scheme 0 has wider dynamic
322 * range, Scheme 1 proximity detection is less affected by the
323 * ambient IR noise variation.
324 *
325 * 0 Sensing IR from LED and ambient
326 * 1 Sensing IR from LED with ambient IR rejection
327 */
328static ssize_t proximity_on_chip_ambient_infrared_suppression_show
329 (struct device *dev, struct device_attribute *attr,
330 char *buf)
315{ 331{
316 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 332 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
317 struct isl29018_chip *chip = iio_priv(indio_dev); 333 struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -323,9 +339,9 @@ static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
323 return sprintf(buf, "%d\n", chip->prox_scheme); 339 return sprintf(buf, "%d\n", chip->prox_scheme);
324} 340}
325 341
326static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev, 342static ssize_t proximity_on_chip_ambient_infrared_suppression_store
327 struct device_attribute *attr, 343 (struct device *dev, struct device_attribute *attr,
328 const char *buf, size_t count) 344 const char *buf, size_t count)
329{ 345{
330 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 346 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
331 struct isl29018_chip *chip = iio_priv(indio_dev); 347 struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -357,6 +373,10 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
357 int ret = -EINVAL; 373 int ret = -EINVAL;
358 374
359 mutex_lock(&chip->lock); 375 mutex_lock(&chip->lock);
376 if (chip->suspended) {
377 ret = -EBUSY;
378 goto write_done;
379 }
360 switch (mask) { 380 switch (mask) {
361 case IIO_CHAN_INFO_CALIBSCALE: 381 case IIO_CHAN_INFO_CALIBSCALE:
362 if (chan->type == IIO_LIGHT) { 382 if (chan->type == IIO_LIGHT) {
@@ -366,13 +386,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
366 } 386 }
367 break; 387 break;
368 case IIO_CHAN_INFO_INT_TIME: 388 case IIO_CHAN_INFO_INT_TIME:
369 if (chan->type == IIO_LIGHT) { 389 if (chan->type == IIO_LIGHT && !val)
370 if (val) {
371 mutex_unlock(&chip->lock);
372 return -EINVAL;
373 }
374 ret = isl29018_set_integration_time(chip, val2); 390 ret = isl29018_set_integration_time(chip, val2);
375 }
376 break; 391 break;
377 case IIO_CHAN_INFO_SCALE: 392 case IIO_CHAN_INFO_SCALE:
378 if (chan->type == IIO_LIGHT) 393 if (chan->type == IIO_LIGHT)
@@ -381,6 +396,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
381 default: 396 default:
382 break; 397 break;
383 } 398 }
399
400write_done:
384 mutex_unlock(&chip->lock); 401 mutex_unlock(&chip->lock);
385 402
386 return ret; 403 return ret;
@@ -397,8 +414,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
397 414
398 mutex_lock(&chip->lock); 415 mutex_lock(&chip->lock);
399 if (chip->suspended) { 416 if (chip->suspended) {
400 mutex_unlock(&chip->lock); 417 ret = -EBUSY;
401 return -EBUSY; 418 goto read_done;
402 } 419 }
403 switch (mask) { 420 switch (mask) {
404 case IIO_CHAN_INFO_RAW: 421 case IIO_CHAN_INFO_RAW:
@@ -445,7 +462,10 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
445 default: 462 default:
446 break; 463 break;
447 } 464 }
465
466read_done:
448 mutex_unlock(&chip->lock); 467 mutex_unlock(&chip->lock);
468
449 return ret; 469 return ret;
450} 470}
451 471
@@ -482,14 +502,9 @@ static const struct iio_chan_spec isl29023_channels[] = {
482 ISL29018_IR_CHANNEL, 502 ISL29018_IR_CHANNEL,
483}; 503};
484 504
485static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO, 505static IIO_DEVICE_ATTR_RO(in_illuminance_integration_time_available, 0);
486 isl29018_show_int_time_available, NULL, 0); 506static IIO_DEVICE_ATTR_RO(in_illuminance_scale_available, 0);
487static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO, 507static IIO_DEVICE_ATTR_RW(proximity_on_chip_ambient_infrared_suppression, 0);
488 isl29018_show_scale_available, NULL, 0);
489static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
490 S_IRUGO | S_IWUSR,
491 isl29018_show_prox_infrared_suppression,
492 isl29018_store_prox_infrared_suppression, 0);
493 508
494#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr) 509#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
495 510
@@ -514,30 +529,6 @@ static const struct attribute_group isl29023_group = {
514 .attrs = isl29023_attributes, 529 .attrs = isl29023_attributes,
515}; 530};
516 531
517static int isl29035_detect(struct isl29018_chip *chip)
518{
519 int status;
520 unsigned int id;
521 struct device *dev = regmap_get_device(chip->regmap);
522
523 status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
524 if (status < 0) {
525 dev_err(dev,
526 "Error reading ID register with error %d\n",
527 status);
528 return status;
529 }
530
531 id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
532
533 if (id != ISL29035_DEVICE_ID)
534 return -ENODEV;
535
536 /* Clear brownout bit */
537 return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
538 ISL29035_BOUT_MASK, 0);
539}
540
541enum { 532enum {
542 isl29018, 533 isl29018,
543 isl29023, 534 isl29023,
@@ -550,12 +541,31 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
550 struct device *dev = regmap_get_device(chip->regmap); 541 struct device *dev = regmap_get_device(chip->regmap);
551 542
552 if (chip->type == isl29035) { 543 if (chip->type == isl29035) {
553 status = isl29035_detect(chip); 544 unsigned int id;
545
546 status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
547 if (status < 0) {
548 dev_err(dev,
549 "Error reading ID register with error %d\n",
550 status);
551 return status;
552 }
553
554 id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
555
556 if (id != ISL29035_DEVICE_ID)
557 return -ENODEV;
558
559 /* Clear brownout bit */
560 status = regmap_update_bits(chip->regmap,
561 ISL29035_REG_DEVICE_ID,
562 ISL29035_BOUT_MASK, 0);
554 if (status < 0) 563 if (status < 0)
555 return status; 564 return status;
556 } 565 }
557 566
558 /* Code added per Intersil Application Note 1534: 567 /*
568 * Code added per Intersil Application Note 1534:
559 * When VDD sinks to approximately 1.8V or below, some of 569 * When VDD sinks to approximately 1.8V or below, some of
560 * the part's registers may change their state. When VDD 570 * the part's registers may change their state. When VDD
561 * recovers to 2.25V (or greater), the part may thus be in an 571 * recovers to 2.25V (or greater), the part may thus be in an
@@ -582,7 +592,8 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
582 return status; 592 return status;
583 } 593 }
584 594
585 /* See Intersil AN1534 comments above. 595 /*
596 * See Intersil AN1534 comments above.
586 * "Operating Mode" (COMMAND1) register is reprogrammed when 597 * "Operating Mode" (COMMAND1) register is reprogrammed when
587 * data is read from the device. 598 * data is read from the device.
588 */ 599 */
@@ -605,12 +616,10 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
605 616
606 status = isl29018_set_integration_time(chip, 617 status = isl29018_set_integration_time(chip,
607 isl29018_int_utimes[chip->type][chip->int_time]); 618 isl29018_int_utimes[chip->type][chip->int_time]);
608 if (status < 0) { 619 if (status < 0)
609 dev_err(dev, "Init of isl29018 fails\n"); 620 dev_err(dev, "Init of isl29018 fails\n");
610 return status;
611 }
612 621
613 return 0; 622 return status;
614} 623}
615 624
616static const struct iio_info isl29018_info = { 625static const struct iio_info isl29018_info = {
@@ -713,6 +722,7 @@ static int isl29018_probe(struct i2c_client *client,
713 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); 722 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
714 if (!indio_dev) 723 if (!indio_dev)
715 return -ENOMEM; 724 return -ENOMEM;
725
716 chip = iio_priv(indio_dev); 726 chip = iio_priv(indio_dev);
717 727
718 i2c_set_clientdata(client, indio_dev); 728 i2c_set_clientdata(client, indio_dev);
@@ -752,6 +762,7 @@ static int isl29018_probe(struct i2c_client *client,
752 indio_dev->name = name; 762 indio_dev->name = name;
753 indio_dev->dev.parent = &client->dev; 763 indio_dev->dev.parent = &client->dev;
754 indio_dev->modes = INDIO_DIRECT_MODE; 764 indio_dev->modes = INDIO_DIRECT_MODE;
765
755 return devm_iio_device_register(&client->dev, indio_dev); 766 return devm_iio_device_register(&client->dev, indio_dev);
756} 767}
757 768
@@ -762,13 +773,15 @@ static int isl29018_suspend(struct device *dev)
762 773
763 mutex_lock(&chip->lock); 774 mutex_lock(&chip->lock);
764 775
765 /* Since this driver uses only polling commands, we are by default in 776 /*
777 * Since this driver uses only polling commands, we are by default in
766 * auto shutdown (ie, power-down) mode. 778 * auto shutdown (ie, power-down) mode.
767 * So we do not have much to do here. 779 * So we do not have much to do here.
768 */ 780 */
769 chip->suspended = true; 781 chip->suspended = true;
770 782
771 mutex_unlock(&chip->lock); 783 mutex_unlock(&chip->lock);
784
772 return 0; 785 return 0;
773} 786}
774 787
@@ -784,6 +797,7 @@ static int isl29018_resume(struct device *dev)
784 chip->suspended = false; 797 chip->suspended = false;
785 798
786 mutex_unlock(&chip->lock); 799 mutex_unlock(&chip->lock);
800
787 return err; 801 return err;
788} 802}
789 803
@@ -807,7 +821,6 @@ static const struct i2c_device_id isl29018_id[] = {
807 {"isl29035", isl29035}, 821 {"isl29035", isl29035},
808 {} 822 {}
809}; 823};
810
811MODULE_DEVICE_TABLE(i2c, isl29018_id); 824MODULE_DEVICE_TABLE(i2c, isl29018_id);
812 825
813static const struct of_device_id isl29018_of_match[] = { 826static const struct of_device_id isl29018_of_match[] = {
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a3d0b6..b30e0c1c6cc4 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -631,14 +631,16 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
631 631
632 switch (mask) { 632 switch (mask) {
633 case IIO_CHAN_INFO_PROCESSED: 633 case IIO_CHAN_INFO_PROCESSED:
634 if (iio_buffer_enabled(indio_dev))
635 return -EBUSY;
636
637 switch (chan->type) { 634 switch (chan->type) {
638 case IIO_LIGHT: 635 case IIO_LIGHT:
636 ret = iio_device_claim_direct_mode(indio_dev);
637 if (ret)
638 return ret;
639
639 mutex_lock(&data->lock_als); 640 mutex_lock(&data->lock_als);
640 ret = ltr501_read_als(data, buf); 641 ret = ltr501_read_als(data, buf);
641 mutex_unlock(&data->lock_als); 642 mutex_unlock(&data->lock_als);
643 iio_device_release_direct_mode(indio_dev);
642 if (ret < 0) 644 if (ret < 0)
643 return ret; 645 return ret;
644 *val = ltr501_calculate_lux(le16_to_cpu(buf[1]), 646 *val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -648,8 +650,9 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
648 return -EINVAL; 650 return -EINVAL;
649 } 651 }
650 case IIO_CHAN_INFO_RAW: 652 case IIO_CHAN_INFO_RAW:
651 if (iio_buffer_enabled(indio_dev)) 653 ret = iio_device_claim_direct_mode(indio_dev);
652 return -EBUSY; 654 if (ret)
655 return ret;
653 656
654 switch (chan->type) { 657 switch (chan->type) {
655 case IIO_INTENSITY: 658 case IIO_INTENSITY:
@@ -657,21 +660,28 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
657 ret = ltr501_read_als(data, buf); 660 ret = ltr501_read_als(data, buf);
658 mutex_unlock(&data->lock_als); 661 mutex_unlock(&data->lock_als);
659 if (ret < 0) 662 if (ret < 0)
660 return ret; 663 break;
661 *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ? 664 *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
662 buf[0] : buf[1]); 665 buf[0] : buf[1]);
663 return IIO_VAL_INT; 666 ret = IIO_VAL_INT;
667 break;
664 case IIO_PROXIMITY: 668 case IIO_PROXIMITY:
665 mutex_lock(&data->lock_ps); 669 mutex_lock(&data->lock_ps);
666 ret = ltr501_read_ps(data); 670 ret = ltr501_read_ps(data);
667 mutex_unlock(&data->lock_ps); 671 mutex_unlock(&data->lock_ps);
668 if (ret < 0) 672 if (ret < 0)
669 return ret; 673 break;
670 *val = ret & LTR501_PS_DATA_MASK; 674 *val = ret & LTR501_PS_DATA_MASK;
671 return IIO_VAL_INT; 675 ret = IIO_VAL_INT;
676 break;
672 default: 677 default:
673 return -EINVAL; 678 ret = -EINVAL;
679 break;
674 } 680 }
681
682 iio_device_release_direct_mode(indio_dev);
683 return ret;
684
675 case IIO_CHAN_INFO_SCALE: 685 case IIO_CHAN_INFO_SCALE:
676 switch (chan->type) { 686 switch (chan->type) {
677 case IIO_INTENSITY: 687 case IIO_INTENSITY:
@@ -729,8 +739,9 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
729 int i, ret, freq_val, freq_val2; 739 int i, ret, freq_val, freq_val2;
730 struct ltr501_chip_info *info = data->chip_info; 740 struct ltr501_chip_info *info = data->chip_info;
731 741
732 if (iio_buffer_enabled(indio_dev)) 742 ret = iio_device_claim_direct_mode(indio_dev);
733 return -EBUSY; 743 if (ret)
744 return ret;
734 745
735 switch (mask) { 746 switch (mask) {
736 case IIO_CHAN_INFO_SCALE: 747 case IIO_CHAN_INFO_SCALE:
@@ -739,85 +750,105 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
739 i = ltr501_get_gain_index(info->als_gain, 750 i = ltr501_get_gain_index(info->als_gain,
740 info->als_gain_tbl_size, 751 info->als_gain_tbl_size,
741 val, val2); 752 val, val2);
742 if (i < 0) 753 if (i < 0) {
743 return -EINVAL; 754 ret = -EINVAL;
755 break;
756 }
744 757
745 data->als_contr &= ~info->als_gain_mask; 758 data->als_contr &= ~info->als_gain_mask;
746 data->als_contr |= i << info->als_gain_shift; 759 data->als_contr |= i << info->als_gain_shift;
747 760
748 return regmap_write(data->regmap, LTR501_ALS_CONTR, 761 ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
749 data->als_contr); 762 data->als_contr);
763 break;
750 case IIO_PROXIMITY: 764 case IIO_PROXIMITY:
751 i = ltr501_get_gain_index(info->ps_gain, 765 i = ltr501_get_gain_index(info->ps_gain,
752 info->ps_gain_tbl_size, 766 info->ps_gain_tbl_size,
753 val, val2); 767 val, val2);
754 if (i < 0) 768 if (i < 0) {
755 return -EINVAL; 769 ret = -EINVAL;
770 break;
771 }
756 data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK; 772 data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
757 data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT; 773 data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
758 774
759 return regmap_write(data->regmap, LTR501_PS_CONTR, 775 ret = regmap_write(data->regmap, LTR501_PS_CONTR,
760 data->ps_contr); 776 data->ps_contr);
777 break;
761 default: 778 default:
762 return -EINVAL; 779 ret = -EINVAL;
780 break;
763 } 781 }
782 break;
783
764 case IIO_CHAN_INFO_INT_TIME: 784 case IIO_CHAN_INFO_INT_TIME:
765 switch (chan->type) { 785 switch (chan->type) {
766 case IIO_INTENSITY: 786 case IIO_INTENSITY:
767 if (val != 0) 787 if (val != 0) {
768 return -EINVAL; 788 ret = -EINVAL;
789 break;
790 }
769 mutex_lock(&data->lock_als); 791 mutex_lock(&data->lock_als);
770 i = ltr501_set_it_time(data, val2); 792 ret = ltr501_set_it_time(data, val2);
771 mutex_unlock(&data->lock_als); 793 mutex_unlock(&data->lock_als);
772 return i; 794 break;
773 default: 795 default:
774 return -EINVAL; 796 ret = -EINVAL;
797 break;
775 } 798 }
799 break;
800
776 case IIO_CHAN_INFO_SAMP_FREQ: 801 case IIO_CHAN_INFO_SAMP_FREQ:
777 switch (chan->type) { 802 switch (chan->type) {
778 case IIO_INTENSITY: 803 case IIO_INTENSITY:
779 ret = ltr501_als_read_samp_freq(data, &freq_val, 804 ret = ltr501_als_read_samp_freq(data, &freq_val,
780 &freq_val2); 805 &freq_val2);
781 if (ret < 0) 806 if (ret < 0)
782 return ret; 807 break;
783 808
784 ret = ltr501_als_write_samp_freq(data, val, val2); 809 ret = ltr501_als_write_samp_freq(data, val, val2);
785 if (ret < 0) 810 if (ret < 0)
786 return ret; 811 break;
787 812
788 /* update persistence count when changing frequency */ 813 /* update persistence count when changing frequency */
789 ret = ltr501_write_intr_prst(data, chan->type, 814 ret = ltr501_write_intr_prst(data, chan->type,
790 0, data->als_period); 815 0, data->als_period);
791 816
792 if (ret < 0) 817 if (ret < 0)
793 return ltr501_als_write_samp_freq(data, 818 ret = ltr501_als_write_samp_freq(data, freq_val,
794 freq_val, 819 freq_val2);
795 freq_val2); 820 break;
796 return ret;
797 case IIO_PROXIMITY: 821 case IIO_PROXIMITY:
798 ret = ltr501_ps_read_samp_freq(data, &freq_val, 822 ret = ltr501_ps_read_samp_freq(data, &freq_val,
799 &freq_val2); 823 &freq_val2);
800 if (ret < 0) 824 if (ret < 0)
801 return ret; 825 break;
802 826
803 ret = ltr501_ps_write_samp_freq(data, val, val2); 827 ret = ltr501_ps_write_samp_freq(data, val, val2);
804 if (ret < 0) 828 if (ret < 0)
805 return ret; 829 break;
806 830
807 /* update persistence count when changing frequency */ 831 /* update persistence count when changing frequency */
808 ret = ltr501_write_intr_prst(data, chan->type, 832 ret = ltr501_write_intr_prst(data, chan->type,
809 0, data->ps_period); 833 0, data->ps_period);
810 834
811 if (ret < 0) 835 if (ret < 0)
812 return ltr501_ps_write_samp_freq(data, 836 ret = ltr501_ps_write_samp_freq(data, freq_val,
813 freq_val, 837 freq_val2);
814 freq_val2); 838 break;
815 return ret;
816 default: 839 default:
817 return -EINVAL; 840 ret = -EINVAL;
841 break;
818 } 842 }
843 break;
844
845 default:
846 ret = -EINVAL;
847 break;
819 } 848 }
820 return -EINVAL; 849
850 iio_device_release_direct_mode(indio_dev);
851 return ret;
821} 852}
822 853
823static int ltr501_read_thresh(struct iio_dev *indio_dev, 854static int ltr501_read_thresh(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 6511b20a2a29..a144ca3461fc 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -204,17 +204,18 @@ static int max44000_write_alspga(struct max44000_data *data, int val)
204static int max44000_read_alsval(struct max44000_data *data) 204static int max44000_read_alsval(struct max44000_data *data)
205{ 205{
206 u16 regval; 206 u16 regval;
207 __be16 val;
207 int alstim, ret; 208 int alstim, ret;
208 209
209 ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI, 210 ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
210 &regval, sizeof(regval)); 211 &val, sizeof(val));
211 if (ret < 0) 212 if (ret < 0)
212 return ret; 213 return ret;
213 alstim = ret = max44000_read_alstim(data); 214 alstim = ret = max44000_read_alstim(data);
214 if (ret < 0) 215 if (ret < 0)
215 return ret; 216 return ret;
216 217
217 regval = be16_to_cpu(regval); 218 regval = be16_to_cpu(val);
218 219
219 /* 220 /*
220 * Overflow is explained on datasheet page 17. 221 * Overflow is explained on datasheet page 17.
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
new file mode 100644
index 000000000000..a78b6025c465
--- /dev/null
+++ b/drivers/iio/light/tsl2583.c
@@ -0,0 +1,913 @@
1/*
2 * Device driver for monitoring ambient light intensity (lux)
3 * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
4 *
5 * Copyright (c) 2011, TAOS Corporation.
6 * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/i2c.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/string.h>
24#include <linux/mutex.h>
25#include <linux/unistd.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/iio/iio.h>
29#include <linux/iio/sysfs.h>
30
31/* Device Registers and Masks */
32#define TSL2583_CNTRL 0x00
33#define TSL2583_ALS_TIME 0X01
34#define TSL2583_INTERRUPT 0x02
35#define TSL2583_GAIN 0x07
36#define TSL2583_REVID 0x11
37#define TSL2583_CHIPID 0x12
38#define TSL2583_ALS_CHAN0LO 0x14
39#define TSL2583_ALS_CHAN0HI 0x15
40#define TSL2583_ALS_CHAN1LO 0x16
41#define TSL2583_ALS_CHAN1HI 0x17
42#define TSL2583_TMR_LO 0x18
43#define TSL2583_TMR_HI 0x19
44
45/* tsl2583 cmd reg masks */
46#define TSL2583_CMD_REG 0x80
47#define TSL2583_CMD_SPL_FN 0x60
48#define TSL2583_CMD_ALS_INT_CLR 0x01
49
50/* tsl2583 cntrl reg masks */
51#define TSL2583_CNTL_ADC_ENBL 0x02
52#define TSL2583_CNTL_PWR_OFF 0x00
53#define TSL2583_CNTL_PWR_ON 0x01
54
55/* tsl2583 status reg masks */
56#define TSL2583_STA_ADC_VALID 0x01
57#define TSL2583_STA_ADC_INTR 0x10
58
59/* Lux calculation constants */
60#define TSL2583_LUX_CALC_OVER_FLOW 65535
61
62#define TSL2583_INTERRUPT_DISABLED 0x00
63
64#define TSL2583_CHIP_ID 0x90
65#define TSL2583_CHIP_ID_MASK 0xf0
66
67/* Per-device data */
68struct tsl2583_als_info {
69 u16 als_ch0;
70 u16 als_ch1;
71 u16 lux;
72};
73
74struct tsl2583_lux {
75 unsigned int ratio;
76 unsigned int ch0;
77 unsigned int ch1;
78};
79
80static const struct tsl2583_lux tsl2583_default_lux[] = {
81 { 9830, 8520, 15729 },
82 { 12452, 10807, 23344 },
83 { 14746, 6383, 11705 },
84 { 17695, 4063, 6554 },
85 { 0, 0, 0 } /* Termination segment */
86};
87
88#define TSL2583_MAX_LUX_TABLE_ENTRIES 11
89
90struct tsl2583_settings {
91 int als_time;
92 int als_gain;
93 int als_gain_trim;
94 int als_cal_target;
95
96 /*
97 * This structure is intentionally large to accommodate updates via
98 * sysfs. Sized to 11 = max 10 segments + 1 termination segment.
99 * Assumption is that one and only one type of glass used.
100 */
101 struct tsl2583_lux als_device_lux[TSL2583_MAX_LUX_TABLE_ENTRIES];
102};
103
104struct tsl2583_chip {
105 struct mutex als_mutex;
106 struct i2c_client *client;
107 struct tsl2583_als_info als_cur_info;
108 struct tsl2583_settings als_settings;
109 int als_time_scale;
110 int als_saturation;
111 bool suspended;
112};
113
114struct gainadj {
115 s16 ch0;
116 s16 ch1;
117 s16 mean;
118};
119
120/* Index = (0 - 3) Used to validate the gain selection index */
121static const struct gainadj gainadj[] = {
122 { 1, 1, 1 },
123 { 8, 8, 8 },
124 { 16, 16, 16 },
125 { 107, 115, 111 }
126};
127
128/*
129 * Provides initial operational parameter defaults.
130 * These defaults may be changed through the device's sysfs files.
131 */
132static void tsl2583_defaults(struct tsl2583_chip *chip)
133{
134 /*
135 * The integration time must be a multiple of 50ms and within the
136 * range [50, 600] ms.
137 */
138 chip->als_settings.als_time = 100;
139
140 /*
141 * This is an index into the gainadj table. Assume clear glass as the
142 * default.
143 */
144 chip->als_settings.als_gain = 0;
145
146 /* Default gain trim to account for aperture effects */
147 chip->als_settings.als_gain_trim = 1000;
148
149 /* Known external ALS reading used for calibration */
150 chip->als_settings.als_cal_target = 130;
151
152 /* Default lux table. */
153 memcpy(chip->als_settings.als_device_lux, tsl2583_default_lux,
154 sizeof(tsl2583_default_lux));
155}
156
157/*
158 * Reads and calculates current lux value.
159 * The raw ch0 and ch1 values of the ambient light sensed in the last
160 * integration cycle are read from the device.
161 * Time scale factor array values are adjusted based on the integration time.
162 * The raw values are multiplied by a scale factor, and device gain is obtained
163 * using gain index. Limit checks are done next, then the ratio of a multiple
164 * of ch1 value, to the ch0 value, is calculated. The array als_device_lux[]
165 * declared above is then scanned to find the first ratio value that is just
166 * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
167 * the array are then used along with the time scale factor array values, to
168 * calculate the lux.
169 */
170static int tsl2583_get_lux(struct iio_dev *indio_dev)
171{
172 u16 ch0, ch1; /* separated ch0/ch1 data from device */
173 u32 lux; /* raw lux calculated from device data */
174 u64 lux64;
175 u32 ratio;
176 u8 buf[5];
177 struct tsl2583_lux *p;
178 struct tsl2583_chip *chip = iio_priv(indio_dev);
179 int i, ret;
180
181 ret = i2c_smbus_read_byte_data(chip->client, TSL2583_CMD_REG);
182 if (ret < 0) {
183 dev_err(&chip->client->dev, "%s: failed to read CMD_REG register\n",
184 __func__);
185 goto done;
186 }
187
188 /* is data new & valid */
189 if (!(ret & TSL2583_STA_ADC_INTR)) {
190 dev_err(&chip->client->dev, "%s: data not valid; returning last value\n",
191 __func__);
192 ret = chip->als_cur_info.lux; /* return LAST VALUE */
193 goto done;
194 }
195
196 for (i = 0; i < 4; i++) {
197 int reg = TSL2583_CMD_REG | (TSL2583_ALS_CHAN0LO + i);
198
199 ret = i2c_smbus_read_byte_data(chip->client, reg);
200 if (ret < 0) {
201 dev_err(&chip->client->dev, "%s: failed to read register %x\n",
202 __func__, reg);
203 goto done;
204 }
205 buf[i] = ret;
206 }
207
208 /*
209 * Clear the pending interrupt status bit on the chip to allow the next
210 * integration cycle to start. This has to be done even though this
211 * driver currently does not support interrupts.
212 */
213 ret = i2c_smbus_write_byte(chip->client,
214 (TSL2583_CMD_REG | TSL2583_CMD_SPL_FN |
215 TSL2583_CMD_ALS_INT_CLR));
216 if (ret < 0) {
217 dev_err(&chip->client->dev, "%s: failed to clear the interrupt bit\n",
218 __func__);
219 goto done; /* have no data, so return failure */
220 }
221
222 /* extract ALS/lux data */
223 ch0 = le16_to_cpup((const __le16 *)&buf[0]);
224 ch1 = le16_to_cpup((const __le16 *)&buf[2]);
225
226 chip->als_cur_info.als_ch0 = ch0;
227 chip->als_cur_info.als_ch1 = ch1;
228
229 if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
230 goto return_max;
231
232 if (!ch0) {
233 /*
234 * The sensor appears to be in total darkness so set the
235 * calculated lux to 0 and return early to avoid a division by
236 * zero below when calculating the ratio.
237 */
238 ret = 0;
239 chip->als_cur_info.lux = 0;
240 goto done;
241 }
242
243 /* calculate ratio */
244 ratio = (ch1 << 15) / ch0;
245
246 /* convert to unscaled lux using the pointer to the table */
247 for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
248 p->ratio != 0 && p->ratio < ratio; p++)
249 ;
250
251 if (p->ratio == 0) {
252 lux = 0;
253 } else {
254 u32 ch0lux, ch1lux;
255
256 ch0lux = ((ch0 * p->ch0) +
257 (gainadj[chip->als_settings.als_gain].ch0 >> 1))
258 / gainadj[chip->als_settings.als_gain].ch0;
259 ch1lux = ((ch1 * p->ch1) +
260 (gainadj[chip->als_settings.als_gain].ch1 >> 1))
261 / gainadj[chip->als_settings.als_gain].ch1;
262
263 /* note: lux is 31 bit max at this point */
264 if (ch1lux > ch0lux) {
265 dev_dbg(&chip->client->dev, "%s: No Data - Returning 0\n",
266 __func__);
267 ret = 0;
268 chip->als_cur_info.lux = 0;
269 goto done;
270 }
271
272 lux = ch0lux - ch1lux;
273 }
274
275 /* adjust for active time scale */
276 if (chip->als_time_scale == 0)
277 lux = 0;
278 else
279 lux = (lux + (chip->als_time_scale >> 1)) /
280 chip->als_time_scale;
281
282 /*
283 * Adjust for active gain scale.
284 * The tsl2583_default_lux tables above have a factor of 8192 built in,
285 * so we need to shift right.
286 * User-specified gain provides a multiplier.
287 * Apply user-specified gain before shifting right to retain precision.
288 * Use 64 bits to avoid overflow on multiplication.
289 * Then go back to 32 bits before division to avoid using div_u64().
290 */
291 lux64 = lux;
292 lux64 = lux64 * chip->als_settings.als_gain_trim;
293 lux64 >>= 13;
294 lux = lux64;
295 lux = (lux + 500) / 1000;
296
297 if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
298return_max:
299 lux = TSL2583_LUX_CALC_OVER_FLOW;
300 }
301
302 /* Update the structure with the latest VALID lux. */
303 chip->als_cur_info.lux = lux;
304 ret = lux;
305
306done:
307 return ret;
308}
309
310/*
311 * Obtain single reading and calculate the als_gain_trim (later used
312 * to derive actual lux).
313 * Return updated gain_trim value.
314 */
315static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
316{
317 struct tsl2583_chip *chip = iio_priv(indio_dev);
318 unsigned int gain_trim_val;
319 int ret;
320 int lux_val;
321
322 ret = i2c_smbus_read_byte_data(chip->client,
323 TSL2583_CMD_REG | TSL2583_CNTRL);
324 if (ret < 0) {
325 dev_err(&chip->client->dev,
326 "%s: failed to read from the CNTRL register\n",
327 __func__);
328 return ret;
329 }
330
331 if ((ret & (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON))
332 != (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON)) {
333 dev_err(&chip->client->dev,
334 "%s: Device is not powered on and/or ADC is not enabled\n",
335 __func__);
336 return -EINVAL;
337 } else if ((ret & TSL2583_STA_ADC_VALID) != TSL2583_STA_ADC_VALID) {
338 dev_err(&chip->client->dev,
339 "%s: The two ADC channels have not completed an integration cycle\n",
340 __func__);
341 return -ENODATA;
342 }
343
344 lux_val = tsl2583_get_lux(indio_dev);
345 if (lux_val < 0) {
346 dev_err(&chip->client->dev, "%s: failed to get lux\n",
347 __func__);
348 return lux_val;
349 }
350
351 gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
352 * chip->als_settings.als_gain_trim) / lux_val);
353 if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
354 dev_err(&chip->client->dev,
355 "%s: trim_val of %d is not within the range [250, 4000]\n",
356 __func__, gain_trim_val);
357 return -ENODATA;
358 }
359
360 chip->als_settings.als_gain_trim = (int)gain_trim_val;
361
362 return 0;
363}
364
365static int tsl2583_set_als_time(struct tsl2583_chip *chip)
366{
367 int als_count, als_time, ret;
368 u8 val;
369
370 /* determine als integration register */
371 als_count = (chip->als_settings.als_time * 100 + 135) / 270;
372 if (!als_count)
373 als_count = 1; /* ensure at least one cycle */
374
375 /* convert back to time (encompasses overrides) */
376 als_time = (als_count * 27 + 5) / 10;
377
378 val = 256 - als_count;
379 ret = i2c_smbus_write_byte_data(chip->client,
380 TSL2583_CMD_REG | TSL2583_ALS_TIME,
381 val);
382 if (ret < 0) {
383 dev_err(&chip->client->dev, "%s: failed to set the als time to %d\n",
384 __func__, val);
385 return ret;
386 }
387
388 /* set chip struct re scaling and saturation */
389 chip->als_saturation = als_count * 922; /* 90% of full scale */
390 chip->als_time_scale = (als_time + 25) / 50;
391
392 return ret;
393}
394
395static int tsl2583_set_als_gain(struct tsl2583_chip *chip)
396{
397 int ret;
398
399 /* Set the gain based on als_settings struct */
400 ret = i2c_smbus_write_byte_data(chip->client,
401 TSL2583_CMD_REG | TSL2583_GAIN,
402 chip->als_settings.als_gain);
403 if (ret < 0)
404 dev_err(&chip->client->dev,
405 "%s: failed to set the gain to %d\n", __func__,
406 chip->als_settings.als_gain);
407
408 return ret;
409}
410
411static int tsl2583_set_power_state(struct tsl2583_chip *chip, u8 state)
412{
413 int ret;
414
415 ret = i2c_smbus_write_byte_data(chip->client,
416 TSL2583_CMD_REG | TSL2583_CNTRL, state);
417 if (ret < 0)
418 dev_err(&chip->client->dev,
419 "%s: failed to set the power state to %d\n", __func__,
420 state);
421
422 return ret;
423}
424
425/*
426 * Turn the device on.
427 * Configuration must be set before calling this function.
428 */
429static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
430{
431 struct tsl2583_chip *chip = iio_priv(indio_dev);
432 int ret;
433
434 /* Power on the device; ADC off. */
435 ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON);
436 if (ret < 0)
437 return ret;
438
439 ret = i2c_smbus_write_byte_data(chip->client,
440 TSL2583_CMD_REG | TSL2583_INTERRUPT,
441 TSL2583_INTERRUPT_DISABLED);
442 if (ret < 0) {
443 dev_err(&chip->client->dev,
444 "%s: failed to disable interrupts\n", __func__);
445 return ret;
446 }
447
448 ret = tsl2583_set_als_time(chip);
449 if (ret < 0)
450 return ret;
451
452 ret = tsl2583_set_als_gain(chip);
453 if (ret < 0)
454 return ret;
455
456 usleep_range(3000, 3500);
457
458 ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON |
459 TSL2583_CNTL_ADC_ENBL);
460 if (ret < 0)
461 return ret;
462
463 chip->suspended = false;
464
465 return ret;
466}
467
468/* Sysfs Interface Functions */
469
470static ssize_t in_illuminance_input_target_show(struct device *dev,
471 struct device_attribute *attr,
472 char *buf)
473{
474 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
475 struct tsl2583_chip *chip = iio_priv(indio_dev);
476 int ret;
477
478 mutex_lock(&chip->als_mutex);
479 ret = sprintf(buf, "%d\n", chip->als_settings.als_cal_target);
480 mutex_unlock(&chip->als_mutex);
481
482 return ret;
483}
484
485static ssize_t in_illuminance_input_target_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t len)
488{
489 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
490 struct tsl2583_chip *chip = iio_priv(indio_dev);
491 int value;
492
493 if (kstrtoint(buf, 0, &value) || !value)
494 return -EINVAL;
495
496 mutex_lock(&chip->als_mutex);
497 chip->als_settings.als_cal_target = value;
498 mutex_unlock(&chip->als_mutex);
499
500 return len;
501}
502
503static ssize_t in_illuminance_calibrate_store(struct device *dev,
504 struct device_attribute *attr,
505 const char *buf, size_t len)
506{
507 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
508 struct tsl2583_chip *chip = iio_priv(indio_dev);
509 int value, ret;
510
511 if (kstrtoint(buf, 0, &value) || value != 1)
512 return -EINVAL;
513
514 mutex_lock(&chip->als_mutex);
515
516 if (chip->suspended) {
517 ret = -EBUSY;
518 goto done;
519 }
520
521 ret = tsl2583_als_calibrate(indio_dev);
522 if (ret < 0)
523 goto done;
524
525 ret = len;
526done:
527 mutex_unlock(&chip->als_mutex);
528
529 return ret;
530}
531
532static ssize_t in_illuminance_lux_table_show(struct device *dev,
533 struct device_attribute *attr,
534 char *buf)
535{
536 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
537 struct tsl2583_chip *chip = iio_priv(indio_dev);
538 unsigned int i;
539 int offset = 0;
540
541 for (i = 0; i < ARRAY_SIZE(chip->als_settings.als_device_lux); i++) {
542 offset += sprintf(buf + offset, "%u,%u,%u,",
543 chip->als_settings.als_device_lux[i].ratio,
544 chip->als_settings.als_device_lux[i].ch0,
545 chip->als_settings.als_device_lux[i].ch1);
546 if (chip->als_settings.als_device_lux[i].ratio == 0) {
547 /*
548 * We just printed the first "0" entry.
549 * Now get rid of the extra "," and break.
550 */
551 offset--;
552 break;
553 }
554 }
555
556 offset += sprintf(buf + offset, "\n");
557
558 return offset;
559}
560
561static ssize_t in_illuminance_lux_table_store(struct device *dev,
562 struct device_attribute *attr,
563 const char *buf, size_t len)
564{
565 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
566 struct tsl2583_chip *chip = iio_priv(indio_dev);
567 const unsigned int max_ints = TSL2583_MAX_LUX_TABLE_ENTRIES * 3;
568 int value[TSL2583_MAX_LUX_TABLE_ENTRIES * 3 + 1];
569 int ret = -EINVAL;
570 unsigned int n;
571
572 mutex_lock(&chip->als_mutex);
573
574 get_options(buf, ARRAY_SIZE(value), value);
575
576 /*
577 * We now have an array of ints starting at value[1], and
578 * enumerated by value[0].
579 * We expect each group of three ints is one table entry,
580 * and the last table entry is all 0.
581 */
582 n = value[0];
583 if ((n % 3) || n < 6 || n > max_ints) {
584 dev_err(dev,
585 "%s: The number of entries in the lux table must be a multiple of 3 and within the range [6, %d]\n",
586 __func__, max_ints);
587 goto done;
588 }
589 if ((value[n - 2] | value[n - 1] | value[n]) != 0) {
590 dev_err(dev, "%s: The last 3 entries in the lux table must be zeros.\n",
591 __func__);
592 goto done;
593 }
594
595 memcpy(chip->als_settings.als_device_lux, &value[1],
596 value[0] * sizeof(value[1]));
597
598 ret = len;
599
600done:
601 mutex_unlock(&chip->als_mutex);
602
603 return ret;
604}
605
606static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
607static IIO_CONST_ATTR(in_illuminance_integration_time_available,
608 "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
609static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
610static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
611static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
612
613static struct attribute *sysfs_attrs_ctrl[] = {
614 &iio_const_attr_in_illuminance_calibscale_available.dev_attr.attr,
615 &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
616 &iio_dev_attr_in_illuminance_input_target.dev_attr.attr,
617 &iio_dev_attr_in_illuminance_calibrate.dev_attr.attr,
618 &iio_dev_attr_in_illuminance_lux_table.dev_attr.attr,
619 NULL
620};
621
622static const struct attribute_group tsl2583_attribute_group = {
623 .attrs = sysfs_attrs_ctrl,
624};
625
626static const struct iio_chan_spec tsl2583_channels[] = {
627 {
628 .type = IIO_LIGHT,
629 .modified = 1,
630 .channel2 = IIO_MOD_LIGHT_IR,
631 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
632 },
633 {
634 .type = IIO_LIGHT,
635 .modified = 1,
636 .channel2 = IIO_MOD_LIGHT_BOTH,
637 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
638 },
639 {
640 .type = IIO_LIGHT,
641 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
642 BIT(IIO_CHAN_INFO_CALIBBIAS) |
643 BIT(IIO_CHAN_INFO_CALIBSCALE) |
644 BIT(IIO_CHAN_INFO_INT_TIME),
645 },
646};
647
648static int tsl2583_read_raw(struct iio_dev *indio_dev,
649 struct iio_chan_spec const *chan,
650 int *val, int *val2, long mask)
651{
652 struct tsl2583_chip *chip = iio_priv(indio_dev);
653 int ret = -EINVAL;
654
655 mutex_lock(&chip->als_mutex);
656
657 if (chip->suspended) {
658 ret = -EBUSY;
659 goto read_done;
660 }
661
662 switch (mask) {
663 case IIO_CHAN_INFO_RAW:
664 if (chan->type == IIO_LIGHT) {
665 ret = tsl2583_get_lux(indio_dev);
666 if (ret < 0)
667 goto read_done;
668
669 /*
670 * From page 20 of the TSL2581, TSL2583 data
671 * sheet (TAOS134 − MARCH 2011):
672 *
673 * One of the photodiodes (channel 0) is
674 * sensitive to both visible and infrared light,
675 * while the second photodiode (channel 1) is
676 * sensitive primarily to infrared light.
677 */
678 if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
679 *val = chip->als_cur_info.als_ch0;
680 else
681 *val = chip->als_cur_info.als_ch1;
682
683 ret = IIO_VAL_INT;
684 }
685 break;
686 case IIO_CHAN_INFO_PROCESSED:
687 if (chan->type == IIO_LIGHT) {
688 ret = tsl2583_get_lux(indio_dev);
689 if (ret < 0)
690 goto read_done;
691
692 *val = ret;
693 ret = IIO_VAL_INT;
694 }
695 break;
696 case IIO_CHAN_INFO_CALIBBIAS:
697 if (chan->type == IIO_LIGHT) {
698 *val = chip->als_settings.als_gain_trim;
699 ret = IIO_VAL_INT;
700 }
701 break;
702 case IIO_CHAN_INFO_CALIBSCALE:
703 if (chan->type == IIO_LIGHT) {
704 *val = gainadj[chip->als_settings.als_gain].mean;
705 ret = IIO_VAL_INT;
706 }
707 break;
708 case IIO_CHAN_INFO_INT_TIME:
709 if (chan->type == IIO_LIGHT) {
710 *val = 0;
711 *val2 = chip->als_settings.als_time;
712 ret = IIO_VAL_INT_PLUS_MICRO;
713 }
714 break;
715 default:
716 break;
717 }
718
719read_done:
720 mutex_unlock(&chip->als_mutex);
721
722 return ret;
723}
724
725static int tsl2583_write_raw(struct iio_dev *indio_dev,
726 struct iio_chan_spec const *chan,
727 int val, int val2, long mask)
728{
729 struct tsl2583_chip *chip = iio_priv(indio_dev);
730 int ret = -EINVAL;
731
732 mutex_lock(&chip->als_mutex);
733
734 if (chip->suspended) {
735 ret = -EBUSY;
736 goto write_done;
737 }
738
739 switch (mask) {
740 case IIO_CHAN_INFO_CALIBBIAS:
741 if (chan->type == IIO_LIGHT) {
742 chip->als_settings.als_gain_trim = val;
743 ret = 0;
744 }
745 break;
746 case IIO_CHAN_INFO_CALIBSCALE:
747 if (chan->type == IIO_LIGHT) {
748 unsigned int i;
749
750 for (i = 0; i < ARRAY_SIZE(gainadj); i++) {
751 if (gainadj[i].mean == val) {
752 chip->als_settings.als_gain = i;
753 ret = tsl2583_set_als_gain(chip);
754 break;
755 }
756 }
757 }
758 break;
759 case IIO_CHAN_INFO_INT_TIME:
760 if (chan->type == IIO_LIGHT && !val && val2 >= 50 &&
761 val2 <= 650 && !(val2 % 50)) {
762 chip->als_settings.als_time = val2;
763 ret = tsl2583_set_als_time(chip);
764 }
765 break;
766 default:
767 break;
768 }
769
770write_done:
771 mutex_unlock(&chip->als_mutex);
772
773 return ret;
774}
775
776static const struct iio_info tsl2583_info = {
777 .attrs = &tsl2583_attribute_group,
778 .driver_module = THIS_MODULE,
779 .read_raw = tsl2583_read_raw,
780 .write_raw = tsl2583_write_raw,
781};
782
783static int tsl2583_probe(struct i2c_client *clientp,
784 const struct i2c_device_id *idp)
785{
786 int ret;
787 struct tsl2583_chip *chip;
788 struct iio_dev *indio_dev;
789
790 if (!i2c_check_functionality(clientp->adapter,
791 I2C_FUNC_SMBUS_BYTE_DATA)) {
792 dev_err(&clientp->dev, "%s: i2c smbus byte data functionality is unsupported\n",
793 __func__);
794 return -EOPNOTSUPP;
795 }
796
797 indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
798 if (!indio_dev)
799 return -ENOMEM;
800
801 chip = iio_priv(indio_dev);
802 chip->client = clientp;
803 i2c_set_clientdata(clientp, indio_dev);
804
805 mutex_init(&chip->als_mutex);
806 chip->suspended = true;
807
808 ret = i2c_smbus_read_byte_data(clientp,
809 TSL2583_CMD_REG | TSL2583_CHIPID);
810 if (ret < 0) {
811 dev_err(&clientp->dev,
812 "%s: failed to read the chip ID register\n", __func__);
813 return ret;
814 }
815
816 if ((ret & TSL2583_CHIP_ID_MASK) != TSL2583_CHIP_ID) {
817 dev_err(&clientp->dev, "%s: received an unknown chip ID %x\n",
818 __func__, ret);
819 return -EINVAL;
820 }
821
822 indio_dev->info = &tsl2583_info;
823 indio_dev->channels = tsl2583_channels;
824 indio_dev->num_channels = ARRAY_SIZE(tsl2583_channels);
825 indio_dev->dev.parent = &clientp->dev;
826 indio_dev->modes = INDIO_DIRECT_MODE;
827 indio_dev->name = chip->client->name;
828
829 ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
830 if (ret) {
831 dev_err(&clientp->dev, "%s: iio registration failed\n",
832 __func__);
833 return ret;
834 }
835
836 /* Load up the V2 defaults (these are hard coded defaults for now) */
837 tsl2583_defaults(chip);
838
839 /* Make sure the chip is on */
840 ret = tsl2583_chip_init_and_power_on(indio_dev);
841 if (ret < 0)
842 return ret;
843
844 dev_info(&clientp->dev, "Light sensor found.\n");
845
846 return 0;
847}
848
849static int __maybe_unused tsl2583_suspend(struct device *dev)
850{
851 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
852 struct tsl2583_chip *chip = iio_priv(indio_dev);
853 int ret;
854
855 mutex_lock(&chip->als_mutex);
856
857 ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
858 chip->suspended = true;
859
860 mutex_unlock(&chip->als_mutex);
861
862 return ret;
863}
864
865static int __maybe_unused tsl2583_resume(struct device *dev)
866{
867 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
868 struct tsl2583_chip *chip = iio_priv(indio_dev);
869 int ret;
870
871 mutex_lock(&chip->als_mutex);
872
873 ret = tsl2583_chip_init_and_power_on(indio_dev);
874
875 mutex_unlock(&chip->als_mutex);
876
877 return ret;
878}
879
880static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
881
882static struct i2c_device_id tsl2583_idtable[] = {
883 { "tsl2580", 0 },
884 { "tsl2581", 1 },
885 { "tsl2583", 2 },
886 {}
887};
888MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
889
890static const struct of_device_id tsl2583_of_match[] = {
891 { .compatible = "amstaos,tsl2580", },
892 { .compatible = "amstaos,tsl2581", },
893 { .compatible = "amstaos,tsl2583", },
894 { },
895};
896MODULE_DEVICE_TABLE(of, tsl2583_of_match);
897
898/* Driver definition */
899static struct i2c_driver tsl2583_driver = {
900 .driver = {
901 .name = "tsl2583",
902 .pm = &tsl2583_pm_ops,
903 .of_match_table = tsl2583_of_match,
904 },
905 .id_table = tsl2583_idtable,
906 .probe = tsl2583_probe,
907};
908module_i2c_driver(tsl2583_driver);
909
910MODULE_AUTHOR("J. August Brenner <jbrenner@taosinc.com>");
911MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
912MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
913MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 217353145676..ce09d771c1fb 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -287,7 +287,7 @@ static int ak8974_await_drdy(struct ak8974 *ak8974)
287 return 0; 287 return 0;
288} 288}
289 289
290static int ak8974_getresult(struct ak8974 *ak8974, s16 *result) 290static int ak8974_getresult(struct ak8974 *ak8974, __le16 *result)
291{ 291{
292 unsigned int src; 292 unsigned int src;
293 int ret; 293 int ret;
@@ -395,7 +395,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
395static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val) 395static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
396{ 396{
397 int ret; 397 int ret;
398 u16 bulk; 398 __le16 bulk;
399 399
400 ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2); 400 ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
401 if (ret) 401 if (ret)
@@ -453,7 +453,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
453 long mask) 453 long mask)
454{ 454{
455 struct ak8974 *ak8974 = iio_priv(indio_dev); 455 struct ak8974 *ak8974 = iio_priv(indio_dev);
456 s16 hw_values[3]; 456 __le16 hw_values[3];
457 int ret = -EINVAL; 457 int ret = -EINVAL;
458 458
459 pm_runtime_get_sync(&ak8974->i2c->dev); 459 pm_runtime_get_sync(&ak8974->i2c->dev);
@@ -494,7 +494,7 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
494{ 494{
495 struct ak8974 *ak8974 = iio_priv(indio_dev); 495 struct ak8974 *ak8974 = iio_priv(indio_dev);
496 int ret; 496 int ret;
497 s16 hw_values[8]; /* Three axes + 64bit padding */ 497 __le16 hw_values[8]; /* Three axes + 64bit padding */
498 498
499 pm_runtime_get_sync(&ak8974->i2c->dev); 499 pm_runtime_get_sync(&ak8974->i2c->dev);
500 mutex_lock(&ak8974->lock); 500 mutex_lock(&ak8974->lock);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index af8606cc7812..825369fb1c57 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -690,6 +690,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
690 struct ak8975_data *data = iio_priv(indio_dev); 690 struct ak8975_data *data = iio_priv(indio_dev);
691 const struct i2c_client *client = data->client; 691 const struct i2c_client *client = data->client;
692 const struct ak_def *def = data->def; 692 const struct ak_def *def = data->def;
693 __le16 rval;
693 u16 buff; 694 u16 buff;
694 int ret; 695 int ret;
695 696
@@ -703,7 +704,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
703 704
704 ret = i2c_smbus_read_i2c_block_data_or_emulated( 705 ret = i2c_smbus_read_i2c_block_data_or_emulated(
705 client, def->data_regs[index], 706 client, def->data_regs[index],
706 sizeof(buff), (u8*)&buff); 707 sizeof(rval), (u8*)&rval);
707 if (ret < 0) 708 if (ret < 0)
708 goto exit; 709 goto exit;
709 710
@@ -713,7 +714,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
713 pm_runtime_put_autosuspend(&data->client->dev); 714 pm_runtime_put_autosuspend(&data->client->dev);
714 715
715 /* Swap bytes and convert to valid range. */ 716 /* Swap bytes and convert to valid range. */
716 buff = le16_to_cpu(buff); 717 buff = le16_to_cpu(rval);
717 *val = clamp_t(s16, buff, -def->range, def->range); 718 *val = clamp_t(s16, buff, -def->range, def->range);
718 return IIO_VAL_INT; 719 return IIO_VAL_INT;
719 720
@@ -813,6 +814,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
813 const struct ak_def *def = data->def; 814 const struct ak_def *def = data->def;
814 int ret; 815 int ret;
815 s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ 816 s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
817 __le16 fval[3];
816 818
817 mutex_lock(&data->lock); 819 mutex_lock(&data->lock);
818 820
@@ -826,17 +828,17 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
826 */ 828 */
827 ret = i2c_smbus_read_i2c_block_data_or_emulated(client, 829 ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
828 def->data_regs[0], 830 def->data_regs[0],
829 3 * sizeof(buff[0]), 831 3 * sizeof(fval[0]),
830 (u8 *)buff); 832 (u8 *)fval);
831 if (ret < 0) 833 if (ret < 0)
832 goto unlock; 834 goto unlock;
833 835
834 mutex_unlock(&data->lock); 836 mutex_unlock(&data->lock);
835 837
836 /* Clamp to valid range. */ 838 /* Clamp to valid range. */
837 buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range); 839 buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
838 buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range); 840 buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
839 buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range); 841 buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
840 842
841 iio_push_to_buffers_with_timestamp(indio_dev, buff, 843 iio_push_to_buffers_with_timestamp(indio_dev, buff,
842 iio_get_time_ns(indio_dev)); 844 iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d8a0c8da8db0..0e791b02ed4a 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -42,9 +42,17 @@ enum magn_3d_channel {
42 MAGN_3D_CHANNEL_MAX, 42 MAGN_3D_CHANNEL_MAX,
43}; 43};
44 44
45struct common_attributes {
46 int scale_pre_decml;
47 int scale_post_decml;
48 int scale_precision;
49 int value_offset;
50};
51
45struct magn_3d_state { 52struct magn_3d_state {
46 struct hid_sensor_hub_callbacks callbacks; 53 struct hid_sensor_hub_callbacks callbacks;
47 struct hid_sensor_common common_attributes; 54 struct hid_sensor_common magn_flux_attributes;
55 struct hid_sensor_common rot_attributes;
48 struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX]; 56 struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
49 57
50 /* dynamically sized array to hold sensor values */ 58 /* dynamically sized array to hold sensor values */
@@ -52,10 +60,8 @@ struct magn_3d_state {
52 /* array of pointers to sensor value */ 60 /* array of pointers to sensor value */
53 u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX]; 61 u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
54 62
55 int scale_pre_decml; 63 struct common_attributes magn_flux_attr;
56 int scale_post_decml; 64 struct common_attributes rot_attr;
57 int scale_precision;
58 int value_offset;
59}; 65};
60 66
61static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = { 67static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -162,41 +168,74 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
162 *val2 = 0; 168 *val2 = 0;
163 switch (mask) { 169 switch (mask) {
164 case 0: 170 case 0:
165 hid_sensor_power_state(&magn_state->common_attributes, true); 171 hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
166 report_id = 172 report_id =
167 magn_state->magn[chan->address].report_id; 173 magn_state->magn[chan->address].report_id;
168 address = magn_3d_addresses[chan->address]; 174 address = magn_3d_addresses[chan->address];
169 if (report_id >= 0) 175 if (report_id >= 0)
170 *val = sensor_hub_input_attr_get_raw_value( 176 *val = sensor_hub_input_attr_get_raw_value(
171 magn_state->common_attributes.hsdev, 177 magn_state->magn_flux_attributes.hsdev,
172 HID_USAGE_SENSOR_COMPASS_3D, address, 178 HID_USAGE_SENSOR_COMPASS_3D, address,
173 report_id, 179 report_id,
174 SENSOR_HUB_SYNC); 180 SENSOR_HUB_SYNC);
175 else { 181 else {
176 *val = 0; 182 *val = 0;
177 hid_sensor_power_state(&magn_state->common_attributes, 183 hid_sensor_power_state(
178 false); 184 &magn_state->magn_flux_attributes,
185 false);
179 return -EINVAL; 186 return -EINVAL;
180 } 187 }
181 hid_sensor_power_state(&magn_state->common_attributes, false); 188 hid_sensor_power_state(&magn_state->magn_flux_attributes,
189 false);
182 ret_type = IIO_VAL_INT; 190 ret_type = IIO_VAL_INT;
183 break; 191 break;
184 case IIO_CHAN_INFO_SCALE: 192 case IIO_CHAN_INFO_SCALE:
185 *val = magn_state->scale_pre_decml; 193 switch (chan->type) {
186 *val2 = magn_state->scale_post_decml; 194 case IIO_MAGN:
187 ret_type = magn_state->scale_precision; 195 *val = magn_state->magn_flux_attr.scale_pre_decml;
196 *val2 = magn_state->magn_flux_attr.scale_post_decml;
197 ret_type = magn_state->magn_flux_attr.scale_precision;
198 break;
199 case IIO_ROT:
200 *val = magn_state->rot_attr.scale_pre_decml;
201 *val2 = magn_state->rot_attr.scale_post_decml;
202 ret_type = magn_state->rot_attr.scale_precision;
203 break;
204 default:
205 ret_type = -EINVAL;
206 }
188 break; 207 break;
189 case IIO_CHAN_INFO_OFFSET: 208 case IIO_CHAN_INFO_OFFSET:
190 *val = magn_state->value_offset; 209 switch (chan->type) {
191 ret_type = IIO_VAL_INT; 210 case IIO_MAGN:
211 *val = magn_state->magn_flux_attr.value_offset;
212 ret_type = IIO_VAL_INT;
213 break;
214 case IIO_ROT:
215 *val = magn_state->rot_attr.value_offset;
216 ret_type = IIO_VAL_INT;
217 break;
218 default:
219 ret_type = -EINVAL;
220 }
192 break; 221 break;
193 case IIO_CHAN_INFO_SAMP_FREQ: 222 case IIO_CHAN_INFO_SAMP_FREQ:
194 ret_type = hid_sensor_read_samp_freq_value( 223 ret_type = hid_sensor_read_samp_freq_value(
195 &magn_state->common_attributes, val, val2); 224 &magn_state->magn_flux_attributes, val, val2);
196 break; 225 break;
197 case IIO_CHAN_INFO_HYSTERESIS: 226 case IIO_CHAN_INFO_HYSTERESIS:
198 ret_type = hid_sensor_read_raw_hyst_value( 227 switch (chan->type) {
199 &magn_state->common_attributes, val, val2); 228 case IIO_MAGN:
229 ret_type = hid_sensor_read_raw_hyst_value(
230 &magn_state->magn_flux_attributes, val, val2);
231 break;
232 case IIO_ROT:
233 ret_type = hid_sensor_read_raw_hyst_value(
234 &magn_state->rot_attributes, val, val2);
235 break;
236 default:
237 ret_type = -EINVAL;
238 }
200 break; 239 break;
201 default: 240 default:
202 ret_type = -EINVAL; 241 ret_type = -EINVAL;
@@ -219,11 +258,21 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
219 switch (mask) { 258 switch (mask) {
220 case IIO_CHAN_INFO_SAMP_FREQ: 259 case IIO_CHAN_INFO_SAMP_FREQ:
221 ret = hid_sensor_write_samp_freq_value( 260 ret = hid_sensor_write_samp_freq_value(
222 &magn_state->common_attributes, val, val2); 261 &magn_state->magn_flux_attributes, val, val2);
223 break; 262 break;
224 case IIO_CHAN_INFO_HYSTERESIS: 263 case IIO_CHAN_INFO_HYSTERESIS:
225 ret = hid_sensor_write_raw_hyst_value( 264 switch (chan->type) {
226 &magn_state->common_attributes, val, val2); 265 case IIO_MAGN:
266 ret = hid_sensor_write_raw_hyst_value(
267 &magn_state->magn_flux_attributes, val, val2);
268 break;
269 case IIO_ROT:
270 ret = hid_sensor_write_raw_hyst_value(
271 &magn_state->rot_attributes, val, val2);
272 break;
273 default:
274 ret = -EINVAL;
275 }
227 break; 276 break;
228 default: 277 default:
229 ret = -EINVAL; 278 ret = -EINVAL;
@@ -254,7 +303,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
254 struct magn_3d_state *magn_state = iio_priv(indio_dev); 303 struct magn_3d_state *magn_state = iio_priv(indio_dev);
255 304
256 dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n"); 305 dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
257 if (atomic_read(&magn_state->common_attributes.data_ready)) 306 if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
258 hid_sensor_push_data(indio_dev, magn_state->iio_vals); 307 hid_sensor_push_data(indio_dev, magn_state->iio_vals);
259 308
260 return 0; 309 return 0;
@@ -389,21 +438,48 @@ static int magn_3d_parse_report(struct platform_device *pdev,
389 dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n", 438 dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
390 *chan_count); 439 *chan_count);
391 440
392 st->scale_precision = hid_sensor_format_scale( 441 st->magn_flux_attr.scale_precision = hid_sensor_format_scale(
393 HID_USAGE_SENSOR_COMPASS_3D, 442 HID_USAGE_SENSOR_COMPASS_3D,
394 &st->magn[CHANNEL_SCAN_INDEX_X], 443 &st->magn[CHANNEL_SCAN_INDEX_X],
395 &st->scale_pre_decml, &st->scale_post_decml); 444 &st->magn_flux_attr.scale_pre_decml,
445 &st->magn_flux_attr.scale_post_decml);
446 st->rot_attr.scale_precision
447 = hid_sensor_format_scale(
448 HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
449 &st->magn[CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP],
450 &st->rot_attr.scale_pre_decml,
451 &st->rot_attr.scale_post_decml);
396 452
397 /* Set Sensitivity field ids, when there is no individual modifier */ 453 /* Set Sensitivity field ids, when there is no individual modifier */
398 if (st->common_attributes.sensitivity.index < 0) { 454 if (st->magn_flux_attributes.sensitivity.index < 0) {
399 sensor_hub_input_get_attribute_info(hsdev, 455 sensor_hub_input_get_attribute_info(hsdev,
400 HID_FEATURE_REPORT, usage_id, 456 HID_FEATURE_REPORT, usage_id,
401 HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | 457 HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
402 HID_USAGE_SENSOR_DATA_ORIENTATION, 458 HID_USAGE_SENSOR_DATA_ORIENTATION,
403 &st->common_attributes.sensitivity); 459 &st->magn_flux_attributes.sensitivity);
460 dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
461 st->magn_flux_attributes.sensitivity.index,
462 st->magn_flux_attributes.sensitivity.report_id);
463 }
464 if (st->magn_flux_attributes.sensitivity.index < 0) {
465 sensor_hub_input_get_attribute_info(hsdev,
466 HID_FEATURE_REPORT, usage_id,
467 HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
468 HID_USAGE_SENSOR_ORIENT_MAGN_FLUX,
469 &st->magn_flux_attributes.sensitivity);
470 dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
471 st->magn_flux_attributes.sensitivity.index,
472 st->magn_flux_attributes.sensitivity.report_id);
473 }
474 if (st->rot_attributes.sensitivity.index < 0) {
475 sensor_hub_input_get_attribute_info(hsdev,
476 HID_FEATURE_REPORT, usage_id,
477 HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
478 HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
479 &st->rot_attributes.sensitivity);
404 dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", 480 dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
405 st->common_attributes.sensitivity.index, 481 st->rot_attributes.sensitivity.index,
406 st->common_attributes.sensitivity.report_id); 482 st->rot_attributes.sensitivity.report_id);
407 } 483 }
408 484
409 return 0; 485 return 0;
@@ -428,16 +504,17 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
428 platform_set_drvdata(pdev, indio_dev); 504 platform_set_drvdata(pdev, indio_dev);
429 505
430 magn_state = iio_priv(indio_dev); 506 magn_state = iio_priv(indio_dev);
431 magn_state->common_attributes.hsdev = hsdev; 507 magn_state->magn_flux_attributes.hsdev = hsdev;
432 magn_state->common_attributes.pdev = pdev; 508 magn_state->magn_flux_attributes.pdev = pdev;
433 509
434 ret = hid_sensor_parse_common_attributes(hsdev, 510 ret = hid_sensor_parse_common_attributes(hsdev,
435 HID_USAGE_SENSOR_COMPASS_3D, 511 HID_USAGE_SENSOR_COMPASS_3D,
436 &magn_state->common_attributes); 512 &magn_state->magn_flux_attributes);
437 if (ret) { 513 if (ret) {
438 dev_err(&pdev->dev, "failed to setup common attributes\n"); 514 dev_err(&pdev->dev, "failed to setup common attributes\n");
439 return ret; 515 return ret;
440 } 516 }
517 magn_state->rot_attributes = magn_state->magn_flux_attributes;
441 518
442 ret = magn_3d_parse_report(pdev, hsdev, 519 ret = magn_3d_parse_report(pdev, hsdev,
443 &channels, &chan_count, 520 &channels, &chan_count,
@@ -460,9 +537,9 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
460 dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); 537 dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
461 return ret; 538 return ret;
462 } 539 }
463 atomic_set(&magn_state->common_attributes.data_ready, 0); 540 atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
464 ret = hid_sensor_setup_trigger(indio_dev, name, 541 ret = hid_sensor_setup_trigger(indio_dev, name,
465 &magn_state->common_attributes); 542 &magn_state->magn_flux_attributes);
466 if (ret < 0) { 543 if (ret < 0) {
467 dev_err(&pdev->dev, "trigger setup failed\n"); 544 dev_err(&pdev->dev, "trigger setup failed\n");
468 goto error_unreg_buffer_funcs; 545 goto error_unreg_buffer_funcs;
@@ -489,7 +566,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
489error_iio_unreg: 566error_iio_unreg:
490 iio_device_unregister(indio_dev); 567 iio_device_unregister(indio_dev);
491error_remove_trigger: 568error_remove_trigger:
492 hid_sensor_remove_trigger(&magn_state->common_attributes); 569 hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
493error_unreg_buffer_funcs: 570error_unreg_buffer_funcs:
494 iio_triggered_buffer_cleanup(indio_dev); 571 iio_triggered_buffer_cleanup(indio_dev);
495 return ret; 572 return ret;
@@ -504,7 +581,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
504 581
505 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D); 582 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
506 iio_device_unregister(indio_dev); 583 iio_device_unregister(indio_dev);
507 hid_sensor_remove_trigger(&magn_state->common_attributes); 584 hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
508 iio_triggered_buffer_cleanup(indio_dev); 585 iio_triggered_buffer_cleanup(indio_dev);
509 586
510 return 0; 587 return 0;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 3e1f06b2224c..8e1b0861fbe4 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -46,139 +46,12 @@
46#define ST_MAGN_FS_AVL_15000MG 15000 46#define ST_MAGN_FS_AVL_15000MG 15000
47#define ST_MAGN_FS_AVL_16000MG 16000 47#define ST_MAGN_FS_AVL_16000MG 16000
48 48
49/* CUSTOM VALUES FOR SENSOR 0 */ 49/* Special L addresses for Sensor 2 */
50#define ST_MAGN_0_ODR_ADDR 0x00
51#define ST_MAGN_0_ODR_MASK 0x1c
52#define ST_MAGN_0_ODR_AVL_1HZ_VAL 0x00
53#define ST_MAGN_0_ODR_AVL_2HZ_VAL 0x01
54#define ST_MAGN_0_ODR_AVL_3HZ_VAL 0x02
55#define ST_MAGN_0_ODR_AVL_8HZ_VAL 0x03
56#define ST_MAGN_0_ODR_AVL_15HZ_VAL 0x04
57#define ST_MAGN_0_ODR_AVL_30HZ_VAL 0x05
58#define ST_MAGN_0_ODR_AVL_75HZ_VAL 0x06
59#define ST_MAGN_0_ODR_AVL_220HZ_VAL 0x07
60#define ST_MAGN_0_PW_ADDR 0x02
61#define ST_MAGN_0_PW_MASK 0x03
62#define ST_MAGN_0_PW_ON 0x00
63#define ST_MAGN_0_PW_OFF 0x03
64#define ST_MAGN_0_FS_ADDR 0x01
65#define ST_MAGN_0_FS_MASK 0xe0
66#define ST_MAGN_0_FS_AVL_1300_VAL 0x01
67#define ST_MAGN_0_FS_AVL_1900_VAL 0x02
68#define ST_MAGN_0_FS_AVL_2500_VAL 0x03
69#define ST_MAGN_0_FS_AVL_4000_VAL 0x04
70#define ST_MAGN_0_FS_AVL_4700_VAL 0x05
71#define ST_MAGN_0_FS_AVL_5600_VAL 0x06
72#define ST_MAGN_0_FS_AVL_8100_VAL 0x07
73#define ST_MAGN_0_FS_AVL_1300_GAIN_XY 1100
74#define ST_MAGN_0_FS_AVL_1900_GAIN_XY 855
75#define ST_MAGN_0_FS_AVL_2500_GAIN_XY 670
76#define ST_MAGN_0_FS_AVL_4000_GAIN_XY 450
77#define ST_MAGN_0_FS_AVL_4700_GAIN_XY 400
78#define ST_MAGN_0_FS_AVL_5600_GAIN_XY 330
79#define ST_MAGN_0_FS_AVL_8100_GAIN_XY 230
80#define ST_MAGN_0_FS_AVL_1300_GAIN_Z 980
81#define ST_MAGN_0_FS_AVL_1900_GAIN_Z 760
82#define ST_MAGN_0_FS_AVL_2500_GAIN_Z 600
83#define ST_MAGN_0_FS_AVL_4000_GAIN_Z 400
84#define ST_MAGN_0_FS_AVL_4700_GAIN_Z 355
85#define ST_MAGN_0_FS_AVL_5600_GAIN_Z 295
86#define ST_MAGN_0_FS_AVL_8100_GAIN_Z 205
87#define ST_MAGN_0_MULTIREAD_BIT false
88
89/* CUSTOM VALUES FOR SENSOR 1 */
90#define ST_MAGN_1_WAI_EXP 0x3c
91#define ST_MAGN_1_ODR_ADDR 0x00
92#define ST_MAGN_1_ODR_MASK 0x1c
93#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
94#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
95#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
96#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
97#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
98#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
99#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
100#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
101#define ST_MAGN_1_PW_ADDR 0x02
102#define ST_MAGN_1_PW_MASK 0x03
103#define ST_MAGN_1_PW_ON 0x00
104#define ST_MAGN_1_PW_OFF 0x03
105#define ST_MAGN_1_FS_ADDR 0x01
106#define ST_MAGN_1_FS_MASK 0xe0
107#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
108#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
109#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
110#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
111#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
112#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
113#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
114#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
115#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
116#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
117#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
118#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
119#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
120#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
121#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
122#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
123#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
124#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
125#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
126#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
127#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
128#define ST_MAGN_1_MULTIREAD_BIT false
129
130/* CUSTOM VALUES FOR SENSOR 2 */
131#define ST_MAGN_2_WAI_EXP 0x3d
132#define ST_MAGN_2_ODR_ADDR 0x20
133#define ST_MAGN_2_ODR_MASK 0x1c
134#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
135#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
136#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
137#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
138#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
139#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
140#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
141#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
142#define ST_MAGN_2_PW_ADDR 0x22
143#define ST_MAGN_2_PW_MASK 0x03
144#define ST_MAGN_2_PW_ON 0x00
145#define ST_MAGN_2_PW_OFF 0x03
146#define ST_MAGN_2_FS_ADDR 0x21
147#define ST_MAGN_2_FS_MASK 0x60
148#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
149#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
150#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
151#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
152#define ST_MAGN_2_FS_AVL_4000_GAIN 146
153#define ST_MAGN_2_FS_AVL_8000_GAIN 292
154#define ST_MAGN_2_FS_AVL_12000_GAIN 438
155#define ST_MAGN_2_FS_AVL_16000_GAIN 584
156#define ST_MAGN_2_MULTIREAD_BIT false
157#define ST_MAGN_2_OUT_X_L_ADDR 0x28 50#define ST_MAGN_2_OUT_X_L_ADDR 0x28
158#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a 51#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
159#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c 52#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
160 53
161/* CUSTOM VALUES FOR SENSOR 3 */ 54/* Special L addresses for sensor 3 */
162#define ST_MAGN_3_WAI_ADDR 0x4f
163#define ST_MAGN_3_WAI_EXP 0x40
164#define ST_MAGN_3_ODR_ADDR 0x60
165#define ST_MAGN_3_ODR_MASK 0x0c
166#define ST_MAGN_3_ODR_AVL_10HZ_VAL 0x00
167#define ST_MAGN_3_ODR_AVL_20HZ_VAL 0x01
168#define ST_MAGN_3_ODR_AVL_50HZ_VAL 0x02
169#define ST_MAGN_3_ODR_AVL_100HZ_VAL 0x03
170#define ST_MAGN_3_PW_ADDR 0x60
171#define ST_MAGN_3_PW_MASK 0x03
172#define ST_MAGN_3_PW_ON 0x00
173#define ST_MAGN_3_PW_OFF 0x03
174#define ST_MAGN_3_BDU_ADDR 0x62
175#define ST_MAGN_3_BDU_MASK 0x10
176#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
177#define ST_MAGN_3_DRDY_INT_MASK 0x01
178#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
179#define ST_MAGN_3_IHL_IRQ_MASK 0x04
180#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
181#define ST_MAGN_3_MULTIREAD_BIT false
182#define ST_MAGN_3_OUT_X_L_ADDR 0x68 55#define ST_MAGN_3_OUT_X_L_ADDR 0x68
183#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a 56#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
184#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c 57#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
@@ -240,77 +113,78 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
240 }, 113 },
241 .ch = (struct iio_chan_spec *)st_magn_16bit_channels, 114 .ch = (struct iio_chan_spec *)st_magn_16bit_channels,
242 .odr = { 115 .odr = {
243 .addr = ST_MAGN_0_ODR_ADDR, 116 .addr = 0x00,
244 .mask = ST_MAGN_0_ODR_MASK, 117 .mask = 0x1c,
245 .odr_avl = { 118 .odr_avl = {
246 { 1, ST_MAGN_0_ODR_AVL_1HZ_VAL, }, 119 { .hz = 1, .value = 0x00 },
247 { 2, ST_MAGN_0_ODR_AVL_2HZ_VAL, }, 120 { .hz = 2, .value = 0x01 },
248 { 3, ST_MAGN_0_ODR_AVL_3HZ_VAL, }, 121 { .hz = 3, .value = 0x02 },
249 { 8, ST_MAGN_0_ODR_AVL_8HZ_VAL, }, 122 { .hz = 8, .value = 0x03 },
250 { 15, ST_MAGN_0_ODR_AVL_15HZ_VAL, }, 123 { .hz = 15, .value = 0x04 },
251 { 30, ST_MAGN_0_ODR_AVL_30HZ_VAL, }, 124 { .hz = 30, .value = 0x05 },
252 { 75, ST_MAGN_0_ODR_AVL_75HZ_VAL, }, 125 { .hz = 75, .value = 0x06 },
126 /* 220 Hz, 0x07 reportedly exist */
253 }, 127 },
254 }, 128 },
255 .pw = { 129 .pw = {
256 .addr = ST_MAGN_0_PW_ADDR, 130 .addr = 0x02,
257 .mask = ST_MAGN_0_PW_MASK, 131 .mask = 0x03,
258 .value_on = ST_MAGN_0_PW_ON, 132 .value_on = 0x00,
259 .value_off = ST_MAGN_0_PW_OFF, 133 .value_off = 0x03,
260 }, 134 },
261 .fs = { 135 .fs = {
262 .addr = ST_MAGN_0_FS_ADDR, 136 .addr = 0x01,
263 .mask = ST_MAGN_0_FS_MASK, 137 .mask = 0xe0,
264 .fs_avl = { 138 .fs_avl = {
265 [0] = { 139 [0] = {
266 .num = ST_MAGN_FS_AVL_1300MG, 140 .num = ST_MAGN_FS_AVL_1300MG,
267 .value = ST_MAGN_0_FS_AVL_1300_VAL, 141 .value = 0x01,
268 .gain = ST_MAGN_0_FS_AVL_1300_GAIN_XY, 142 .gain = 1100,
269 .gain2 = ST_MAGN_0_FS_AVL_1300_GAIN_Z, 143 .gain2 = 980,
270 }, 144 },
271 [1] = { 145 [1] = {
272 .num = ST_MAGN_FS_AVL_1900MG, 146 .num = ST_MAGN_FS_AVL_1900MG,
273 .value = ST_MAGN_0_FS_AVL_1900_VAL, 147 .value = 0x02,
274 .gain = ST_MAGN_0_FS_AVL_1900_GAIN_XY, 148 .gain = 855,
275 .gain2 = ST_MAGN_0_FS_AVL_1900_GAIN_Z, 149 .gain2 = 760,
276 }, 150 },
277 [2] = { 151 [2] = {
278 .num = ST_MAGN_FS_AVL_2500MG, 152 .num = ST_MAGN_FS_AVL_2500MG,
279 .value = ST_MAGN_0_FS_AVL_2500_VAL, 153 .value = 0x03,
280 .gain = ST_MAGN_0_FS_AVL_2500_GAIN_XY, 154 .gain = 670,
281 .gain2 = ST_MAGN_0_FS_AVL_2500_GAIN_Z, 155 .gain2 = 600,
282 }, 156 },
283 [3] = { 157 [3] = {
284 .num = ST_MAGN_FS_AVL_4000MG, 158 .num = ST_MAGN_FS_AVL_4000MG,
285 .value = ST_MAGN_0_FS_AVL_4000_VAL, 159 .value = 0x04,
286 .gain = ST_MAGN_0_FS_AVL_4000_GAIN_XY, 160 .gain = 450,
287 .gain2 = ST_MAGN_0_FS_AVL_4000_GAIN_Z, 161 .gain2 = 400,
288 }, 162 },
289 [4] = { 163 [4] = {
290 .num = ST_MAGN_FS_AVL_4700MG, 164 .num = ST_MAGN_FS_AVL_4700MG,
291 .value = ST_MAGN_0_FS_AVL_4700_VAL, 165 .value = 0x05,
292 .gain = ST_MAGN_0_FS_AVL_4700_GAIN_XY, 166 .gain = 400,
293 .gain2 = ST_MAGN_0_FS_AVL_4700_GAIN_Z, 167 .gain2 = 355,
294 }, 168 },
295 [5] = { 169 [5] = {
296 .num = ST_MAGN_FS_AVL_5600MG, 170 .num = ST_MAGN_FS_AVL_5600MG,
297 .value = ST_MAGN_0_FS_AVL_5600_VAL, 171 .value = 0x06,
298 .gain = ST_MAGN_0_FS_AVL_5600_GAIN_XY, 172 .gain = 330,
299 .gain2 = ST_MAGN_0_FS_AVL_5600_GAIN_Z, 173 .gain2 = 295,
300 }, 174 },
301 [6] = { 175 [6] = {
302 .num = ST_MAGN_FS_AVL_8100MG, 176 .num = ST_MAGN_FS_AVL_8100MG,
303 .value = ST_MAGN_0_FS_AVL_8100_VAL, 177 .value = 0x07,
304 .gain = ST_MAGN_0_FS_AVL_8100_GAIN_XY, 178 .gain = 230,
305 .gain2 = ST_MAGN_0_FS_AVL_8100_GAIN_Z, 179 .gain2 = 205,
306 }, 180 },
307 }, 181 },
308 }, 182 },
309 .multi_read_bit = ST_MAGN_0_MULTIREAD_BIT, 183 .multi_read_bit = false,
310 .bootime = 2, 184 .bootime = 2,
311 }, 185 },
312 { 186 {
313 .wai = ST_MAGN_1_WAI_EXP, 187 .wai = 0x3c,
314 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 188 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
315 .sensors_supported = { 189 .sensors_supported = {
316 [0] = LSM303DLHC_MAGN_DEV_NAME, 190 [0] = LSM303DLHC_MAGN_DEV_NAME,
@@ -318,175 +192,175 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
318 }, 192 },
319 .ch = (struct iio_chan_spec *)st_magn_16bit_channels, 193 .ch = (struct iio_chan_spec *)st_magn_16bit_channels,
320 .odr = { 194 .odr = {
321 .addr = ST_MAGN_1_ODR_ADDR, 195 .addr = 0x00,
322 .mask = ST_MAGN_1_ODR_MASK, 196 .mask = 0x1c,
323 .odr_avl = { 197 .odr_avl = {
324 { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, }, 198 { .hz = 1, .value = 0x00 },
325 { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, }, 199 { .hz = 2, .value = 0x01 },
326 { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, }, 200 { .hz = 3, .value = 0x02 },
327 { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, }, 201 { .hz = 8, .value = 0x03 },
328 { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, }, 202 { .hz = 15, .value = 0x04 },
329 { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, }, 203 { .hz = 30, .value = 0x05 },
330 { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, }, 204 { .hz = 75, .value = 0x06 },
331 { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, }, 205 { .hz = 220, .value = 0x07 },
332 }, 206 },
333 }, 207 },
334 .pw = { 208 .pw = {
335 .addr = ST_MAGN_1_PW_ADDR, 209 .addr = 0x02,
336 .mask = ST_MAGN_1_PW_MASK, 210 .mask = 0x03,
337 .value_on = ST_MAGN_1_PW_ON, 211 .value_on = 0x00,
338 .value_off = ST_MAGN_1_PW_OFF, 212 .value_off = 0x03,
339 }, 213 },
340 .fs = { 214 .fs = {
341 .addr = ST_MAGN_1_FS_ADDR, 215 .addr = 0x01,
342 .mask = ST_MAGN_1_FS_MASK, 216 .mask = 0xe0,
343 .fs_avl = { 217 .fs_avl = {
344 [0] = { 218 [0] = {
345 .num = ST_MAGN_FS_AVL_1300MG, 219 .num = ST_MAGN_FS_AVL_1300MG,
346 .value = ST_MAGN_1_FS_AVL_1300_VAL, 220 .value = 0x01,
347 .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY, 221 .gain = 909,
348 .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z, 222 .gain2 = 1020,
349 }, 223 },
350 [1] = { 224 [1] = {
351 .num = ST_MAGN_FS_AVL_1900MG, 225 .num = ST_MAGN_FS_AVL_1900MG,
352 .value = ST_MAGN_1_FS_AVL_1900_VAL, 226 .value = 0x02,
353 .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY, 227 .gain = 1169,
354 .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z, 228 .gain2 = 1315,
355 }, 229 },
356 [2] = { 230 [2] = {
357 .num = ST_MAGN_FS_AVL_2500MG, 231 .num = ST_MAGN_FS_AVL_2500MG,
358 .value = ST_MAGN_1_FS_AVL_2500_VAL, 232 .value = 0x03,
359 .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY, 233 .gain = 1492,
360 .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z, 234 .gain2 = 1666,
361 }, 235 },
362 [3] = { 236 [3] = {
363 .num = ST_MAGN_FS_AVL_4000MG, 237 .num = ST_MAGN_FS_AVL_4000MG,
364 .value = ST_MAGN_1_FS_AVL_4000_VAL, 238 .value = 0x04,
365 .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY, 239 .gain = 2222,
366 .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z, 240 .gain2 = 2500,
367 }, 241 },
368 [4] = { 242 [4] = {
369 .num = ST_MAGN_FS_AVL_4700MG, 243 .num = ST_MAGN_FS_AVL_4700MG,
370 .value = ST_MAGN_1_FS_AVL_4700_VAL, 244 .value = 0x05,
371 .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY, 245 .gain = 2500,
372 .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z, 246 .gain2 = 2816,
373 }, 247 },
374 [5] = { 248 [5] = {
375 .num = ST_MAGN_FS_AVL_5600MG, 249 .num = ST_MAGN_FS_AVL_5600MG,
376 .value = ST_MAGN_1_FS_AVL_5600_VAL, 250 .value = 0x06,
377 .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY, 251 .gain = 3030,
378 .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z, 252 .gain2 = 3389,
379 }, 253 },
380 [6] = { 254 [6] = {
381 .num = ST_MAGN_FS_AVL_8100MG, 255 .num = ST_MAGN_FS_AVL_8100MG,
382 .value = ST_MAGN_1_FS_AVL_8100_VAL, 256 .value = 0x07,
383 .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY, 257 .gain = 4347,
384 .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z, 258 .gain2 = 4878,
385 }, 259 },
386 }, 260 },
387 }, 261 },
388 .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT, 262 .multi_read_bit = false,
389 .bootime = 2, 263 .bootime = 2,
390 }, 264 },
391 { 265 {
392 .wai = ST_MAGN_2_WAI_EXP, 266 .wai = 0x3d,
393 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 267 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
394 .sensors_supported = { 268 .sensors_supported = {
395 [0] = LIS3MDL_MAGN_DEV_NAME, 269 [0] = LIS3MDL_MAGN_DEV_NAME,
396 }, 270 },
397 .ch = (struct iio_chan_spec *)st_magn_2_16bit_channels, 271 .ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
398 .odr = { 272 .odr = {
399 .addr = ST_MAGN_2_ODR_ADDR, 273 .addr = 0x20,
400 .mask = ST_MAGN_2_ODR_MASK, 274 .mask = 0x1c,
401 .odr_avl = { 275 .odr_avl = {
402 { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, }, 276 { .hz = 1, .value = 0x00 },
403 { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, }, 277 { .hz = 2, .value = 0x01 },
404 { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, }, 278 { .hz = 3, .value = 0x02 },
405 { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, }, 279 { .hz = 5, .value = 0x03 },
406 { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, }, 280 { .hz = 10, .value = 0x04 },
407 { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, }, 281 { .hz = 20, .value = 0x05 },
408 { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, }, 282 { .hz = 40, .value = 0x06 },
409 { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, }, 283 { .hz = 80, .value = 0x07 },
410 }, 284 },
411 }, 285 },
412 .pw = { 286 .pw = {
413 .addr = ST_MAGN_2_PW_ADDR, 287 .addr = 0x22,
414 .mask = ST_MAGN_2_PW_MASK, 288 .mask = 0x03,
415 .value_on = ST_MAGN_2_PW_ON, 289 .value_on = 0x00,
416 .value_off = ST_MAGN_2_PW_OFF, 290 .value_off = 0x03,
417 }, 291 },
418 .fs = { 292 .fs = {
419 .addr = ST_MAGN_2_FS_ADDR, 293 .addr = 0x21,
420 .mask = ST_MAGN_2_FS_MASK, 294 .mask = 0x60,
421 .fs_avl = { 295 .fs_avl = {
422 [0] = { 296 [0] = {
423 .num = ST_MAGN_FS_AVL_4000MG, 297 .num = ST_MAGN_FS_AVL_4000MG,
424 .value = ST_MAGN_2_FS_AVL_4000_VAL, 298 .value = 0x00,
425 .gain = ST_MAGN_2_FS_AVL_4000_GAIN, 299 .gain = 146,
426 }, 300 },
427 [1] = { 301 [1] = {
428 .num = ST_MAGN_FS_AVL_8000MG, 302 .num = ST_MAGN_FS_AVL_8000MG,
429 .value = ST_MAGN_2_FS_AVL_8000_VAL, 303 .value = 0x01,
430 .gain = ST_MAGN_2_FS_AVL_8000_GAIN, 304 .gain = 292,
431 }, 305 },
432 [2] = { 306 [2] = {
433 .num = ST_MAGN_FS_AVL_12000MG, 307 .num = ST_MAGN_FS_AVL_12000MG,
434 .value = ST_MAGN_2_FS_AVL_12000_VAL, 308 .value = 0x02,
435 .gain = ST_MAGN_2_FS_AVL_12000_GAIN, 309 .gain = 438,
436 }, 310 },
437 [3] = { 311 [3] = {
438 .num = ST_MAGN_FS_AVL_16000MG, 312 .num = ST_MAGN_FS_AVL_16000MG,
439 .value = ST_MAGN_2_FS_AVL_16000_VAL, 313 .value = 0x03,
440 .gain = ST_MAGN_2_FS_AVL_16000_GAIN, 314 .gain = 584,
441 }, 315 },
442 }, 316 },
443 }, 317 },
444 .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT, 318 .multi_read_bit = false,
445 .bootime = 2, 319 .bootime = 2,
446 }, 320 },
447 { 321 {
448 .wai = ST_MAGN_3_WAI_EXP, 322 .wai = 0x40,
449 .wai_addr = ST_MAGN_3_WAI_ADDR, 323 .wai_addr = 0x4f,
450 .sensors_supported = { 324 .sensors_supported = {
451 [0] = LSM303AGR_MAGN_DEV_NAME, 325 [0] = LSM303AGR_MAGN_DEV_NAME,
452 }, 326 },
453 .ch = (struct iio_chan_spec *)st_magn_3_16bit_channels, 327 .ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
454 .odr = { 328 .odr = {
455 .addr = ST_MAGN_3_ODR_ADDR, 329 .addr = 0x60,
456 .mask = ST_MAGN_3_ODR_MASK, 330 .mask = 0x0c,
457 .odr_avl = { 331 .odr_avl = {
458 { 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, }, 332 { .hz = 10, .value = 0x00 },
459 { 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, }, 333 { .hz = 20, .value = 0x01 },
460 { 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, }, 334 { .hz = 50, .value = 0x02 },
461 { 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, }, 335 { .hz = 100, .value = 0x03 },
462 }, 336 },
463 }, 337 },
464 .pw = { 338 .pw = {
465 .addr = ST_MAGN_3_PW_ADDR, 339 .addr = 0x60,
466 .mask = ST_MAGN_3_PW_MASK, 340 .mask = 0x03,
467 .value_on = ST_MAGN_3_PW_ON, 341 .value_on = 0x00,
468 .value_off = ST_MAGN_3_PW_OFF, 342 .value_off = 0x03,
469 }, 343 },
470 .fs = { 344 .fs = {
471 .fs_avl = { 345 .fs_avl = {
472 [0] = { 346 [0] = {
473 .num = ST_MAGN_FS_AVL_15000MG, 347 .num = ST_MAGN_FS_AVL_15000MG,
474 .gain = ST_MAGN_3_FS_AVL_15000_GAIN, 348 .gain = 1500,
475 }, 349 },
476 }, 350 },
477 }, 351 },
478 .bdu = { 352 .bdu = {
479 .addr = ST_MAGN_3_BDU_ADDR, 353 .addr = 0x62,
480 .mask = ST_MAGN_3_BDU_MASK, 354 .mask = 0x10,
481 }, 355 },
482 .drdy_irq = { 356 .drdy_irq = {
483 .addr = ST_MAGN_3_DRDY_IRQ_ADDR, 357 .addr = 0x62,
484 .mask_int1 = ST_MAGN_3_DRDY_INT_MASK, 358 .mask_int1 = 0x01,
485 .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR, 359 .addr_ihl = 0x63,
486 .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK, 360 .mask_ihl = 0x04,
487 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 361 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
488 }, 362 },
489 .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT, 363 .multi_read_bit = false,
490 .bootime = 2, 364 .bootime = 2,
491 }, 365 },
492}; 366};
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 13b6ae2fcf7b..0d1bcf89ae17 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -38,7 +38,7 @@
38 38
39struct mcp4531_cfg { 39struct mcp4531_cfg {
40 int wipers; 40 int wipers;
41 int max_pos; 41 int avail[3];
42 int kohms; 42 int kohms;
43}; 43};
44 44
@@ -78,38 +78,38 @@ enum mcp4531_type {
78}; 78};
79 79
80static const struct mcp4531_cfg mcp4531_cfg[] = { 80static const struct mcp4531_cfg mcp4531_cfg[] = {
81 [MCP453x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, }, 81 [MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
82 [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, }, 82 [MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
83 [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, }, 83 [MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
84 [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, }, 84 [MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
85 [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, }, 85 [MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
86 [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, }, 86 [MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
87 [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, }, 87 [MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
88 [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, }, 88 [MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
89 [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, }, 89 [MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
90 [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, }, 90 [MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
91 [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, }, 91 [MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
92 [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, }, 92 [MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
93 [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, }, 93 [MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
94 [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, }, 94 [MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
95 [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, }, 95 [MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
96 [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, }, 96 [MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
97 [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, }, 97 [MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
98 [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, }, 98 [MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
99 [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, }, 99 [MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
100 [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, }, 100 [MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
101 [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, }, 101 [MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
102 [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, }, 102 [MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
103 [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, }, 103 [MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
104 [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, }, 104 [MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
105 [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, }, 105 [MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
106 [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, }, 106 [MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
107 [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, }, 107 [MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
108 [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, }, 108 [MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
109 [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, }, 109 [MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
110 [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, }, 110 [MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
111 [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, }, 111 [MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
112 [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, }, 112 [MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
113}; 113};
114 114
115#define MCP4531_WRITE (0 << 2) 115#define MCP4531_WRITE (0 << 2)
@@ -124,13 +124,14 @@ struct mcp4531_data {
124 const struct mcp4531_cfg *cfg; 124 const struct mcp4531_cfg *cfg;
125}; 125};
126 126
127#define MCP4531_CHANNEL(ch) { \ 127#define MCP4531_CHANNEL(ch) { \
128 .type = IIO_RESISTANCE, \ 128 .type = IIO_RESISTANCE, \
129 .indexed = 1, \ 129 .indexed = 1, \
130 .output = 1, \ 130 .output = 1, \
131 .channel = (ch), \ 131 .channel = (ch), \
132 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 132 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
133 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ 133 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
134 .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
134} 135}
135 136
136static const struct iio_chan_spec mcp4531_channels[] = { 137static const struct iio_chan_spec mcp4531_channels[] = {
@@ -156,13 +157,31 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
156 return IIO_VAL_INT; 157 return IIO_VAL_INT;
157 case IIO_CHAN_INFO_SCALE: 158 case IIO_CHAN_INFO_SCALE:
158 *val = 1000 * data->cfg->kohms; 159 *val = 1000 * data->cfg->kohms;
159 *val2 = data->cfg->max_pos; 160 *val2 = data->cfg->avail[2];
160 return IIO_VAL_FRACTIONAL; 161 return IIO_VAL_FRACTIONAL;
161 } 162 }
162 163
163 return -EINVAL; 164 return -EINVAL;
164} 165}
165 166
167static int mcp4531_read_avail(struct iio_dev *indio_dev,
168 struct iio_chan_spec const *chan,
169 const int **vals, int *type, int *length,
170 long mask)
171{
172 struct mcp4531_data *data = iio_priv(indio_dev);
173
174 switch (mask) {
175 case IIO_CHAN_INFO_RAW:
176 *length = ARRAY_SIZE(data->cfg->avail);
177 *vals = data->cfg->avail;
178 *type = IIO_VAL_INT;
179 return IIO_AVAIL_RANGE;
180 }
181
182 return -EINVAL;
183}
184
166static int mcp4531_write_raw(struct iio_dev *indio_dev, 185static int mcp4531_write_raw(struct iio_dev *indio_dev,
167 struct iio_chan_spec const *chan, 186 struct iio_chan_spec const *chan,
168 int val, int val2, long mask) 187 int val, int val2, long mask)
@@ -172,7 +191,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
172 191
173 switch (mask) { 192 switch (mask) {
174 case IIO_CHAN_INFO_RAW: 193 case IIO_CHAN_INFO_RAW:
175 if (val > data->cfg->max_pos || val < 0) 194 if (val > data->cfg->avail[2] || val < 0)
176 return -EINVAL; 195 return -EINVAL;
177 break; 196 break;
178 default: 197 default:
@@ -186,6 +205,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
186 205
187static const struct iio_info mcp4531_info = { 206static const struct iio_info mcp4531_info = {
188 .read_raw = mcp4531_read_raw, 207 .read_raw = mcp4531_read_raw,
208 .read_avail = mcp4531_read_avail,
189 .write_raw = mcp4531_write_raw, 209 .write_raw = mcp4531_write_raw,
190 .driver_module = THIS_MODULE, 210 .driver_module = THIS_MODULE,
191}; 211};
diff --git a/drivers/iio/potentiostat/Kconfig b/drivers/iio/potentiostat/Kconfig
new file mode 100644
index 000000000000..1e3baf2cc97d
--- /dev/null
+++ b/drivers/iio/potentiostat/Kconfig
@@ -0,0 +1,22 @@
1#
2# Potentiostat drivers
3#
4# When adding new entries keep the list in alphabetical order
5
6menu "Digital potentiostats"
7
8config LMP91000
9 tristate "Texas Instruments LMP91000 potentiostat driver"
10 depends on I2C
11 select REGMAP_I2C
12 select IIO_BUFFER
13 select IIO_BUFFER_CB
14 select IIO_TRIGGERED_BUFFER
15 help
16 Say yes here to build support for the Texas Instruments
17 LMP91000 digital potentiostat chip.
18
19 To compile this driver as a module, choose M here: the
20 module will be called lmp91000
21
22endmenu
diff --git a/drivers/iio/potentiostat/Makefile b/drivers/iio/potentiostat/Makefile
new file mode 100644
index 000000000000..64d315ef4449
--- /dev/null
+++ b/drivers/iio/potentiostat/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for industrial I/O potentiostat drivers
3#
4
5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_LMP91000) += lmp91000.o
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
new file mode 100644
index 000000000000..e22714365022
--- /dev/null
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -0,0 +1,446 @@
1/*
2 * lmp91000.c - Support for Texas Instruments digital potentiostats
3 *
4 * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * TODO: bias voltage + polarity control, and multiple chip support
17 */
18
19#include <linux/module.h>
20#include <linux/i2c.h>
21#include <linux/delay.h>
22#include <linux/of.h>
23#include <linux/regmap.h>
24#include <linux/iio/iio.h>
25#include <linux/iio/buffer.h>
26#include <linux/iio/consumer.h>
27#include <linux/iio/trigger.h>
28#include <linux/iio/trigger_consumer.h>
29#include <linux/iio/triggered_buffer.h>
30
31#define LMP91000_REG_LOCK 0x01
32#define LMP91000_REG_TIACN 0x10
33#define LMP91000_REG_TIACN_GAIN_SHIFT 2
34
35#define LMP91000_REG_REFCN 0x11
36#define LMP91000_REG_REFCN_EXT_REF 0x20
37#define LMP91000_REG_REFCN_50_ZERO 0x80
38
39#define LMP91000_REG_MODECN 0x12
40#define LMP91000_REG_MODECN_3LEAD 0x03
41#define LMP91000_REG_MODECN_TEMP 0x07
42
43#define LMP91000_DRV_NAME "lmp91000"
44
45static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
46 120000, 350000 };
47
48static const int lmp91000_rload[] = { 10, 33, 50, 100 };
49
50#define LMP91000_TEMP_BASE -40
51
52static const u16 lmp91000_temp_lut[] = {
53 1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
54 1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
55 1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
56 1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
57 1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
58 1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
59 1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
60 1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
61 1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
62 1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
63 1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004, 996,
64 987, 979, 971, 962, 954, 945, 937, 929, 920, 912,
65 903, 895, 886, 878, 870, 861 };
66
67static const struct regmap_config lmp91000_regmap_config = {
68 .reg_bits = 8,
69 .val_bits = 8,
70};
71
72struct lmp91000_data {
73 struct regmap *regmap;
74 struct device *dev;
75
76 struct iio_trigger *trig;
77 struct iio_cb_buffer *cb_buffer;
78 struct iio_channel *adc_chan;
79
80 struct completion completion;
81 u8 chan_select;
82
83 u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
84};
85
86static const struct iio_chan_spec lmp91000_channels[] = {
87 { /* chemical channel mV */
88 .type = IIO_VOLTAGE,
89 .channel = 0,
90 .address = LMP91000_REG_MODECN_3LEAD,
91 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
92 BIT(IIO_CHAN_INFO_OFFSET) |
93 BIT(IIO_CHAN_INFO_SCALE),
94 .scan_index = 0,
95 .scan_type = {
96 .sign = 's',
97 .realbits = 32,
98 .storagebits = 32,
99 },
100 },
101 IIO_CHAN_SOFT_TIMESTAMP(1),
102 { /* temperature channel mV */
103 .type = IIO_TEMP,
104 .channel = 1,
105 .address = LMP91000_REG_MODECN_TEMP,
106 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
107 .scan_index = -1,
108 },
109};
110
111static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
112{
113 int state, ret;
114
115 ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
116 if (ret)
117 return -EINVAL;
118
119 ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
120 if (ret)
121 return -EINVAL;
122
123 /* delay till first temperature reading is complete */
124 if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
125 usleep_range(3000, 4000);
126
127 data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
128
129 iio_trigger_poll_chained(data->trig);
130
131 ret = wait_for_completion_timeout(&data->completion, HZ);
132 reinit_completion(&data->completion);
133
134 if (!ret)
135 return -ETIMEDOUT;
136
137 *val = data->buffer[data->chan_select];
138
139 return 0;
140}
141
142static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
143{
144 struct iio_poll_func *pf = private;
145 struct iio_dev *indio_dev = pf->indio_dev;
146 struct lmp91000_data *data = iio_priv(indio_dev);
147 int ret, val;
148
149 memset(data->buffer, 0, sizeof(data->buffer));
150
151 ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
152 if (!ret) {
153 data->buffer[0] = val;
154 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
155 iio_get_time_ns(indio_dev));
156 }
157
158 iio_trigger_notify_done(indio_dev->trig);
159
160 return IRQ_HANDLED;
161}
162
163static int lmp91000_read_raw(struct iio_dev *indio_dev,
164 struct iio_chan_spec const *chan,
165 int *val, int *val2, long mask)
166{
167 struct lmp91000_data *data = iio_priv(indio_dev);
168
169 switch (mask) {
170 case IIO_CHAN_INFO_RAW:
171 case IIO_CHAN_INFO_PROCESSED: {
172 int ret = iio_channel_start_all_cb(data->cb_buffer);
173
174 if (ret)
175 return ret;
176
177 ret = lmp91000_read(data, chan->address, val);
178
179 iio_channel_stop_all_cb(data->cb_buffer);
180
181 if (ret)
182 return ret;
183
184 if (mask == IIO_CHAN_INFO_PROCESSED) {
185 int tmp, i;
186
187 ret = iio_convert_raw_to_processed(data->adc_chan,
188 *val, &tmp, 1);
189 if (ret)
190 return ret;
191
192 for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
193 if (lmp91000_temp_lut[i] < tmp)
194 break;
195
196 *val = (LMP91000_TEMP_BASE + i) * 1000;
197 }
198 return IIO_VAL_INT;
199 }
200 case IIO_CHAN_INFO_OFFSET:
201 return iio_read_channel_offset(data->adc_chan, val, val2);
202 case IIO_CHAN_INFO_SCALE:
203 return iio_read_channel_scale(data->adc_chan, val, val2);
204 }
205
206 return -EINVAL;
207}
208
209static const struct iio_info lmp91000_info = {
210 .driver_module = THIS_MODULE,
211 .read_raw = lmp91000_read_raw,
212};
213
214static int lmp91000_read_config(struct lmp91000_data *data)
215{
216 struct device *dev = data->dev;
217 struct device_node *np = dev->of_node;
218 unsigned int reg, val;
219 int i, ret;
220
221 ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
222 if (ret) {
223 if (of_property_read_bool(np, "ti,external-tia-resistor"))
224 val = 0;
225 else {
226 dev_err(dev, "no ti,tia-gain-ohm defined");
227 return ret;
228 }
229 }
230
231 ret = -EINVAL;
232 for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
233 if (lmp91000_tia_gain[i] == val) {
234 reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
235 ret = 0;
236 break;
237 }
238 }
239
240 if (ret) {
241 dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
242 return ret;
243 }
244
245 ret = of_property_read_u32(np, "ti,rload-ohm", &val);
246 if (ret) {
247 val = 100;
248 dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
249 }
250
251 ret = -EINVAL;
252 for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
253 if (lmp91000_rload[i] == val) {
254 reg |= i;
255 ret = 0;
256 break;
257 }
258 }
259
260 if (ret) {
261 dev_err(dev, "invalid ti,rload-ohm %d\n", val);
262 return ret;
263 }
264
265 regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
266 regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
267 regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
268 | LMP91000_REG_REFCN_50_ZERO);
269 regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
270
271 return 0;
272}
273
274static int lmp91000_buffer_cb(const void *val, void *private)
275{
276 struct iio_dev *indio_dev = private;
277 struct lmp91000_data *data = iio_priv(indio_dev);
278
279 data->buffer[data->chan_select] = *((int *)val);
280 complete_all(&data->completion);
281
282 return 0;
283}
284
285static const struct iio_trigger_ops lmp91000_trigger_ops = {
286 .owner = THIS_MODULE,
287};
288
289
290static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
291{
292 struct lmp91000_data *data = iio_priv(indio_dev);
293
294 return iio_channel_start_all_cb(data->cb_buffer);
295}
296
297static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
298{
299 struct lmp91000_data *data = iio_priv(indio_dev);
300
301 iio_channel_stop_all_cb(data->cb_buffer);
302
303 return 0;
304}
305
306static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
307 .preenable = lmp91000_buffer_preenable,
308 .postenable = iio_triggered_buffer_postenable,
309 .predisable = lmp91000_buffer_predisable,
310};
311
312static int lmp91000_probe(struct i2c_client *client,
313 const struct i2c_device_id *id)
314{
315 struct device *dev = &client->dev;
316 struct lmp91000_data *data;
317 struct iio_dev *indio_dev;
318 int ret;
319
320 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
321 if (!indio_dev)
322 return -ENOMEM;
323
324 indio_dev->info = &lmp91000_info;
325 indio_dev->channels = lmp91000_channels;
326 indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
327 indio_dev->name = LMP91000_DRV_NAME;
328 indio_dev->modes = INDIO_DIRECT_MODE;
329 i2c_set_clientdata(client, indio_dev);
330
331 data = iio_priv(indio_dev);
332 data->dev = dev;
333 data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
334 if (IS_ERR(data->regmap)) {
335 dev_err(dev, "regmap initialization failed.\n");
336 return PTR_ERR(data->regmap);
337 }
338
339 data->trig = devm_iio_trigger_alloc(data->dev, "%s-mux%d",
340 indio_dev->name, indio_dev->id);
341 if (!data->trig) {
342 dev_err(dev, "cannot allocate iio trigger.\n");
343 return -ENOMEM;
344 }
345
346 data->trig->ops = &lmp91000_trigger_ops;
347 data->trig->dev.parent = dev;
348 init_completion(&data->completion);
349
350 ret = lmp91000_read_config(data);
351 if (ret)
352 return ret;
353
354 ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
355 data->trig);
356 if (ret) {
357 dev_err(dev, "cannot set immutable trigger.\n");
358 return ret;
359 }
360
361 ret = iio_trigger_register(data->trig);
362 if (ret) {
363 dev_err(dev, "cannot register iio trigger.\n");
364 return ret;
365 }
366
367 ret = iio_triggered_buffer_setup(indio_dev, NULL,
368 &lmp91000_buffer_handler,
369 &lmp91000_buffer_setup_ops);
370 if (ret)
371 goto error_unreg_trigger;
372
373 data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
374 indio_dev);
375
376 if (IS_ERR(data->cb_buffer)) {
377 if (PTR_ERR(data->cb_buffer) == -ENODEV)
378 ret = -EPROBE_DEFER;
379 else
380 ret = PTR_ERR(data->cb_buffer);
381
382 goto error_unreg_buffer;
383 }
384
385 data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
386
387 ret = iio_device_register(indio_dev);
388 if (ret)
389 goto error_unreg_cb_buffer;
390
391 return 0;
392
393error_unreg_cb_buffer:
394 iio_channel_release_all_cb(data->cb_buffer);
395
396error_unreg_buffer:
397 iio_triggered_buffer_cleanup(indio_dev);
398
399error_unreg_trigger:
400 iio_trigger_unregister(data->trig);
401
402 return ret;
403}
404
405static int lmp91000_remove(struct i2c_client *client)
406{
407 struct iio_dev *indio_dev = i2c_get_clientdata(client);
408 struct lmp91000_data *data = iio_priv(indio_dev);
409
410 iio_device_unregister(indio_dev);
411
412 iio_channel_stop_all_cb(data->cb_buffer);
413 iio_channel_release_all_cb(data->cb_buffer);
414
415 iio_triggered_buffer_cleanup(indio_dev);
416 iio_trigger_unregister(data->trig);
417
418 return 0;
419}
420
421static const struct of_device_id lmp91000_of_match[] = {
422 { .compatible = "ti,lmp91000", },
423 { },
424};
425MODULE_DEVICE_TABLE(of, lmp91000_of_match);
426
427static const struct i2c_device_id lmp91000_id[] = {
428 { "lmp91000", 0 },
429 {}
430};
431MODULE_DEVICE_TABLE(i2c, lmp91000_id);
432
433static struct i2c_driver lmp91000_driver = {
434 .driver = {
435 .name = LMP91000_DRV_NAME,
436 .of_match_table = of_match_ptr(lmp91000_of_match),
437 },
438 .probe = lmp91000_probe,
439 .remove = lmp91000_remove,
440 .id_table = lmp91000_id,
441};
442module_i2c_driver(lmp91000_driver);
443
444MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
445MODULE_DESCRIPTION("LMP91000 digital potentiostat");
446MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15cd416365c1..bd8d96b96771 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,16 @@
5 5
6menu "Pressure sensors" 6menu "Pressure sensors"
7 7
8config ABP060MG
9 tristate "Honeywell ABP pressure sensor driver"
10 depends on I2C
11 help
12 Say yes here to build support for the Honeywell ABP pressure
13 sensors.
14
15 To compile this driver as a module, choose M here: the module
16 will be called abp060mg.
17
8config BMP280 18config BMP280
9 tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver" 19 tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
10 depends on (I2C || SPI_MASTER) 20 depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index fff77185a5cc..de3dbc81dc5a 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5# When adding new entries keep the list in alphabetical order 5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_ABP060MG) += abp060mg.o
6obj-$(CONFIG_BMP280) += bmp280.o 7obj-$(CONFIG_BMP280) += bmp280.o
7bmp280-objs := bmp280-core.o bmp280-regmap.o 8bmp280-objs := bmp280-core.o bmp280-regmap.o
8obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o 9obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
new file mode 100644
index 000000000000..43bdd0b9155f
--- /dev/null
+++ b/drivers/iio/pressure/abp060mg.c
@@ -0,0 +1,276 @@
1/*
2 * Copyright (C) 2016 - Marcin Malagowski <mrc@bourne.st>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/i2c.h>
18#include <linux/io.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/iio/iio.h>
22
23#define ABP060MG_ERROR_MASK 0xC000
24#define ABP060MG_RESP_TIME_MS 40
25#define ABP060MG_MIN_COUNTS 1638 /* = 0x0666 (10% of u14) */
26#define ABP060MG_MAX_COUNTS 14745 /* = 0x3999 (90% of u14) */
27#define ABP060MG_NUM_COUNTS (ABP060MG_MAX_COUNTS - ABP060MG_MIN_COUNTS)
28
29enum abp_variant {
30 /* gage [kPa] */
31 ABP006KG, ABP010KG, ABP016KG, ABP025KG, ABP040KG, ABP060KG, ABP100KG,
32 ABP160KG, ABP250KG, ABP400KG, ABP600KG, ABP001GG,
33 /* differential [kPa] */
34 ABP006KD, ABP010KD, ABP016KD, ABP025KD, ABP040KD, ABP060KD, ABP100KD,
35 ABP160KD, ABP250KD, ABP400KD,
36 /* gage [psi] */
37 ABP001PG, ABP005PG, ABP015PG, ABP030PG, ABP060PG, ABP100PG, ABP150PG,
38 /* differential [psi] */
39 ABP001PD, ABP005PD, ABP015PD, ABP030PD, ABP060PD,
40};
41
42struct abp_config {
43 int min;
44 int max;
45};
46
47static struct abp_config abp_config[] = {
48 /* mbar & kPa variants */
49 [ABP006KG] = { .min = 0, .max = 6000 },
50 [ABP010KG] = { .min = 0, .max = 10000 },
51 [ABP016KG] = { .min = 0, .max = 16000 },
52 [ABP025KG] = { .min = 0, .max = 25000 },
53 [ABP040KG] = { .min = 0, .max = 40000 },
54 [ABP060KG] = { .min = 0, .max = 60000 },
55 [ABP100KG] = { .min = 0, .max = 100000 },
56 [ABP160KG] = { .min = 0, .max = 160000 },
57 [ABP250KG] = { .min = 0, .max = 250000 },
58 [ABP400KG] = { .min = 0, .max = 400000 },
59 [ABP600KG] = { .min = 0, .max = 600000 },
60 [ABP001GG] = { .min = 0, .max = 1000000 },
61 [ABP006KD] = { .min = -6000, .max = 6000 },
62 [ABP010KD] = { .min = -10000, .max = 10000 },
63 [ABP016KD] = { .min = -16000, .max = 16000 },
64 [ABP025KD] = { .min = -25000, .max = 25000 },
65 [ABP040KD] = { .min = -40000, .max = 40000 },
66 [ABP060KD] = { .min = -60000, .max = 60000 },
67 [ABP100KD] = { .min = -100000, .max = 100000 },
68 [ABP160KD] = { .min = -160000, .max = 160000 },
69 [ABP250KD] = { .min = -250000, .max = 250000 },
70 [ABP400KD] = { .min = -400000, .max = 400000 },
71 /* psi variants (1 psi ~ 6895 Pa) */
72 [ABP001PG] = { .min = 0, .max = 6985 },
73 [ABP005PG] = { .min = 0, .max = 34474 },
74 [ABP015PG] = { .min = 0, .max = 103421 },
75 [ABP030PG] = { .min = 0, .max = 206843 },
76 [ABP060PG] = { .min = 0, .max = 413686 },
77 [ABP100PG] = { .min = 0, .max = 689476 },
78 [ABP150PG] = { .min = 0, .max = 1034214 },
79 [ABP001PD] = { .min = -6895, .max = 6895 },
80 [ABP005PD] = { .min = -34474, .max = 34474 },
81 [ABP015PD] = { .min = -103421, .max = 103421 },
82 [ABP030PD] = { .min = -206843, .max = 206843 },
83 [ABP060PD] = { .min = -413686, .max = 413686 },
84};
85
86struct abp_state {
87 struct i2c_client *client;
88 struct mutex lock;
89
90 /*
91 * bus-dependent MEASURE_REQUEST length.
92 * If no SMBUS_QUICK support, need to send dummy byte
93 */
94 int mreq_len;
95
96 /* model-dependent values (calculated on probe) */
97 int scale;
98 int offset;
99};
100
101static const struct iio_chan_spec abp060mg_channels[] = {
102 {
103 .type = IIO_PRESSURE,
104 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
105 BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
106 },
107};
108
109static int abp060mg_get_measurement(struct abp_state *state, int *val)
110{
111 struct i2c_client *client = state->client;
112 __be16 buf[2];
113 u16 pressure;
114 int ret;
115
116 buf[0] = 0;
117 ret = i2c_master_send(client, (u8 *)&buf, state->mreq_len);
118 if (ret < 0)
119 return ret;
120
121 msleep_interruptible(ABP060MG_RESP_TIME_MS);
122
123 ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
124 if (ret < 0)
125 return ret;
126
127 pressure = be16_to_cpu(buf[0]);
128 if (pressure & ABP060MG_ERROR_MASK)
129 return -EIO;
130
131 if (pressure < ABP060MG_MIN_COUNTS || pressure > ABP060MG_MAX_COUNTS)
132 return -EIO;
133
134 *val = pressure;
135
136 return IIO_VAL_INT;
137}
138
139static int abp060mg_read_raw(struct iio_dev *indio_dev,
140 struct iio_chan_spec const *chan, int *val,
141 int *val2, long mask)
142{
143 struct abp_state *state = iio_priv(indio_dev);
144 int ret;
145
146 mutex_lock(&state->lock);
147
148 switch (mask) {
149 case IIO_CHAN_INFO_RAW:
150 ret = abp060mg_get_measurement(state, val);
151 break;
152 case IIO_CHAN_INFO_OFFSET:
153 *val = state->offset;
154 ret = IIO_VAL_INT;
155 break;
156 case IIO_CHAN_INFO_SCALE:
157 *val = state->scale;
158 *val2 = ABP060MG_NUM_COUNTS * 1000; /* to kPa */
159 ret = IIO_VAL_FRACTIONAL;
160 break;
161 default:
162 ret = -EINVAL;
163 break;
164 }
165
166 mutex_unlock(&state->lock);
167 return ret;
168}
169
170static const struct iio_info abp060mg_info = {
171 .driver_module = THIS_MODULE,
172 .read_raw = abp060mg_read_raw,
173};
174
175static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id)
176{
177 struct abp_state *state = iio_priv(indio_dev);
178 struct abp_config *cfg = &abp_config[id];
179
180 state->scale = cfg->max - cfg->min;
181 state->offset = -ABP060MG_MIN_COUNTS;
182
183 if (cfg->min < 0) /* differential */
184 state->offset -= ABP060MG_NUM_COUNTS >> 1;
185}
186
187static int abp060mg_probe(struct i2c_client *client,
188 const struct i2c_device_id *id)
189{
190 struct iio_dev *indio_dev;
191 struct abp_state *state;
192 unsigned long cfg_id = id->driver_data;
193
194 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
195 if (!indio_dev)
196 return -ENOMEM;
197
198 state = iio_priv(indio_dev);
199 i2c_set_clientdata(client, state);
200 state->client = client;
201
202 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
203 state->mreq_len = 1;
204
205 abp060mg_init_device(indio_dev, cfg_id);
206
207 indio_dev->dev.parent = &client->dev;
208 indio_dev->name = dev_name(&client->dev);
209 indio_dev->modes = INDIO_DIRECT_MODE;
210 indio_dev->info = &abp060mg_info;
211
212 indio_dev->channels = abp060mg_channels;
213 indio_dev->num_channels = ARRAY_SIZE(abp060mg_channels);
214
215 mutex_init(&state->lock);
216
217 return devm_iio_device_register(&client->dev, indio_dev);
218}
219
220static const struct i2c_device_id abp060mg_id_table[] = {
221 /* mbar & kPa variants (abp060m [60 mbar] == abp006k [6 kPa]) */
222 /* gage: */
223 { "abp060mg", ABP006KG }, { "abp006kg", ABP006KG },
224 { "abp100mg", ABP010KG }, { "abp010kg", ABP010KG },
225 { "abp160mg", ABP016KG }, { "abp016kg", ABP016KG },
226 { "abp250mg", ABP025KG }, { "abp025kg", ABP025KG },
227 { "abp400mg", ABP040KG }, { "abp040kg", ABP040KG },
228 { "abp600mg", ABP060KG }, { "abp060kg", ABP060KG },
229 { "abp001bg", ABP100KG }, { "abp100kg", ABP100KG },
230 { "abp1_6bg", ABP160KG }, { "abp160kg", ABP160KG },
231 { "abp2_5bg", ABP250KG }, { "abp250kg", ABP250KG },
232 { "abp004bg", ABP400KG }, { "abp400kg", ABP400KG },
233 { "abp006bg", ABP600KG }, { "abp600kg", ABP600KG },
234 { "abp010bg", ABP001GG }, { "abp001gg", ABP001GG },
235 /* differential: */
236 { "abp060md", ABP006KD }, { "abp006kd", ABP006KD },
237 { "abp100md", ABP010KD }, { "abp010kd", ABP010KD },
238 { "abp160md", ABP016KD }, { "abp016kd", ABP016KD },
239 { "abp250md", ABP025KD }, { "abp025kd", ABP025KD },
240 { "abp400md", ABP040KD }, { "abp040kd", ABP040KD },
241 { "abp600md", ABP060KD }, { "abp060kd", ABP060KD },
242 { "abp001bd", ABP100KD }, { "abp100kd", ABP100KD },
243 { "abp1_6bd", ABP160KD }, { "abp160kd", ABP160KD },
244 { "abp2_5bd", ABP250KD }, { "abp250kd", ABP250KD },
245 { "abp004bd", ABP400KD }, { "abp400kd", ABP400KD },
246 /* psi variants */
247 /* gage: */
248 { "abp001pg", ABP001PG },
249 { "abp005pg", ABP005PG },
250 { "abp015pg", ABP015PG },
251 { "abp030pg", ABP030PG },
252 { "abp060pg", ABP060PG },
253 { "abp100pg", ABP100PG },
254 { "abp150pg", ABP150PG },
255 /* differential: */
256 { "abp001pd", ABP001PD },
257 { "abp005pd", ABP005PD },
258 { "abp015pd", ABP015PD },
259 { "abp030pd", ABP030PD },
260 { "abp060pd", ABP060PD },
261 { /* empty */ },
262};
263MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
264
265static struct i2c_driver abp060mg_driver = {
266 .driver = {
267 .name = "abp060mg",
268 },
269 .probe = abp060mg_probe,
270 .id_table = abp060mg_id_table,
271};
272module_i2c_driver(abp060mg_driver);
273
274MODULE_AUTHOR("Marcin Malagowski <mrc@bourne.st>");
275MODULE_DESCRIPTION("Honeywell ABP pressure sensor driver");
276MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 6392d7b62841..cc3f84139157 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -82,8 +82,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
82 82
83 switch (mask) { 83 switch (mask) {
84 case IIO_CHAN_INFO_RAW: 84 case IIO_CHAN_INFO_RAW:
85 if (iio_buffer_enabled(indio_dev)) 85 ret = iio_device_claim_direct_mode(indio_dev);
86 return -EBUSY; 86 if (ret)
87 return ret;
87 88
88 switch (chan->type) { 89 switch (chan->type) {
89 case IIO_PRESSURE: /* in 0.25 pascal / LSB */ 90 case IIO_PRESSURE: /* in 0.25 pascal / LSB */
@@ -91,32 +92,39 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
91 ret = mpl3115_request(data); 92 ret = mpl3115_request(data);
92 if (ret < 0) { 93 if (ret < 0) {
93 mutex_unlock(&data->lock); 94 mutex_unlock(&data->lock);
94 return ret; 95 break;
95 } 96 }
96 ret = i2c_smbus_read_i2c_block_data(data->client, 97 ret = i2c_smbus_read_i2c_block_data(data->client,
97 MPL3115_OUT_PRESS, 3, (u8 *) &tmp); 98 MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
98 mutex_unlock(&data->lock); 99 mutex_unlock(&data->lock);
99 if (ret < 0) 100 if (ret < 0)
100 return ret; 101 break;
101 *val = be32_to_cpu(tmp) >> 12; 102 *val = be32_to_cpu(tmp) >> 12;
102 return IIO_VAL_INT; 103 ret = IIO_VAL_INT;
104 break;
103 case IIO_TEMP: /* in 0.0625 celsius / LSB */ 105 case IIO_TEMP: /* in 0.0625 celsius / LSB */
104 mutex_lock(&data->lock); 106 mutex_lock(&data->lock);
105 ret = mpl3115_request(data); 107 ret = mpl3115_request(data);
106 if (ret < 0) { 108 if (ret < 0) {
107 mutex_unlock(&data->lock); 109 mutex_unlock(&data->lock);
108 return ret; 110 break;
109 } 111 }
110 ret = i2c_smbus_read_i2c_block_data(data->client, 112 ret = i2c_smbus_read_i2c_block_data(data->client,
111 MPL3115_OUT_TEMP, 2, (u8 *) &tmp); 113 MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
112 mutex_unlock(&data->lock); 114 mutex_unlock(&data->lock);
113 if (ret < 0) 115 if (ret < 0)
114 return ret; 116 break;
115 *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11); 117 *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
116 return IIO_VAL_INT; 118 ret = IIO_VAL_INT;
119 break;
117 default: 120 default:
118 return -EINVAL; 121 ret = -EINVAL;
122 break;
119 } 123 }
124
125 iio_device_release_direct_mode(indio_dev);
126 return ret;
127
120 case IIO_CHAN_INFO_SCALE: 128 case IIO_CHAN_INFO_SCALE:
121 switch (chan->type) { 129 switch (chan->type) {
122 case IIO_PRESSURE: 130 case IIO_PRESSURE:
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f0c880..6bd53e702667 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -392,17 +392,14 @@ static int ms5611_init(struct iio_dev *indio_dev)
392 392
393 /* Enable attached regulator if any. */ 393 /* Enable attached regulator if any. */
394 st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd"); 394 st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
395 if (!IS_ERR(st->vdd)) { 395 if (IS_ERR(st->vdd))
396 ret = regulator_enable(st->vdd); 396 return PTR_ERR(st->vdd);
397 if (ret) { 397
398 dev_err(indio_dev->dev.parent, 398 ret = regulator_enable(st->vdd);
399 "failed to enable Vdd supply: %d\n", ret); 399 if (ret) {
400 return ret; 400 dev_err(indio_dev->dev.parent,
401 } 401 "failed to enable Vdd supply: %d\n", ret);
402 } else { 402 return ret;
403 ret = PTR_ERR(st->vdd);
404 if (ret != -ENODEV)
405 return ret;
406 } 403 }
407 404
408 ret = ms5611_reset(indio_dev); 405 ret = ms5611_reset(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a75eb3a..e19e0787864c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -112,115 +112,24 @@
112#define ST_PRESS_1_OUT_XL_ADDR 0x28 112#define ST_PRESS_1_OUT_XL_ADDR 0x28
113#define ST_TEMP_1_OUT_L_ADDR 0x2b 113#define ST_TEMP_1_OUT_L_ADDR 0x2b
114 114
115/*
116 * CUSTOM VALUES FOR LPS331AP SENSOR
117 * See LPS331AP datasheet:
118 * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
119 */
120#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
121#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
122#define ST_PRESS_LPS331AP_ODR_MASK 0x70
123#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
124#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
125#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
126#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
127#define ST_PRESS_LPS331AP_PW_ADDR 0x20
128#define ST_PRESS_LPS331AP_PW_MASK 0x80
129#define ST_PRESS_LPS331AP_FS_ADDR 0x23
130#define ST_PRESS_LPS331AP_FS_MASK 0x30
131#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
132#define ST_PRESS_LPS331AP_BDU_MASK 0x04
133#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
134#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
135#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
136#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
137#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
138#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
139#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
140#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
141
142/*
143 * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
144 */
145
146/* LPS001WP pressure resolution */ 115/* LPS001WP pressure resolution */
147#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL 116#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
148/* LPS001WP temperature resolution */ 117/* LPS001WP temperature resolution */
149#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL 118#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
150 119/* LPS001WP pressure gain */
151#define ST_PRESS_LPS001WP_WAI_EXP 0xba
152#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
153#define ST_PRESS_LPS001WP_ODR_MASK 0x30
154#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
155#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
156#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
157#define ST_PRESS_LPS001WP_PW_ADDR 0x20
158#define ST_PRESS_LPS001WP_PW_MASK 0x40
159#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \ 120#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
160 (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR) 121 (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
161#define ST_PRESS_LPS001WP_BDU_ADDR 0x20 122/* LPS001WP pressure and temp L addresses */
162#define ST_PRESS_LPS001WP_BDU_MASK 0x04
163#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
164#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28 123#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
165#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a 124#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
166 125
167/* 126/* LPS25H pressure and temp L addresses */
168 * CUSTOM VALUES FOR LPS25H SENSOR
169 * See LPS25H datasheet:
170 * http://www2.st.com/resource/en/datasheet/lps25h.pdf
171 */
172#define ST_PRESS_LPS25H_WAI_EXP 0xbd
173#define ST_PRESS_LPS25H_ODR_ADDR 0x20
174#define ST_PRESS_LPS25H_ODR_MASK 0x70
175#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
176#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
177#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
178#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
179#define ST_PRESS_LPS25H_PW_ADDR 0x20
180#define ST_PRESS_LPS25H_PW_MASK 0x80
181#define ST_PRESS_LPS25H_BDU_ADDR 0x20
182#define ST_PRESS_LPS25H_BDU_MASK 0x04
183#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
184#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
185#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
186#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
187#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
188#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
189#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
190#define ST_PRESS_LPS25H_MULTIREAD_BIT true
191#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 127#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
192#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b 128#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
193 129
194/*
195 * CUSTOM VALUES FOR LPS22HB SENSOR
196 * See LPS22HB datasheet:
197 * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
198 */
199
200/* LPS22HB temperature sensitivity */ 130/* LPS22HB temperature sensitivity */
201#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL 131#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
202 132
203#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
204#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
205#define ST_PRESS_LPS22HB_ODR_MASK 0x70
206#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
207#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
208#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
209#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
210#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
211#define ST_PRESS_LPS22HB_PW_ADDR 0x10
212#define ST_PRESS_LPS22HB_PW_MASK 0x70
213#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
214#define ST_PRESS_LPS22HB_BDU_MASK 0x02
215#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
216#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
217#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
218#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
219#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
220#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
221#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
222#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
223
224static const struct iio_chan_spec st_press_1_channels[] = { 133static const struct iio_chan_spec st_press_1_channels[] = {
225 { 134 {
226 .type = IIO_PRESSURE, 135 .type = IIO_PRESSURE,
@@ -321,7 +230,12 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
321 230
322static const struct st_sensor_settings st_press_sensors_settings[] = { 231static const struct st_sensor_settings st_press_sensors_settings[] = {
323 { 232 {
324 .wai = ST_PRESS_LPS331AP_WAI_EXP, 233 /*
234 * CUSTOM VALUES FOR LPS331AP SENSOR
235 * See LPS331AP datasheet:
236 * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
237 */
238 .wai = 0xbb,
325 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 239 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
326 .sensors_supported = { 240 .sensors_supported = {
327 [0] = LPS331AP_PRESS_DEV_NAME, 241 [0] = LPS331AP_PRESS_DEV_NAME,
@@ -329,24 +243,24 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
329 .ch = (struct iio_chan_spec *)st_press_1_channels, 243 .ch = (struct iio_chan_spec *)st_press_1_channels,
330 .num_ch = ARRAY_SIZE(st_press_1_channels), 244 .num_ch = ARRAY_SIZE(st_press_1_channels),
331 .odr = { 245 .odr = {
332 .addr = ST_PRESS_LPS331AP_ODR_ADDR, 246 .addr = 0x20,
333 .mask = ST_PRESS_LPS331AP_ODR_MASK, 247 .mask = 0x70,
334 .odr_avl = { 248 .odr_avl = {
335 { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, }, 249 { .hz = 1, .value = 0x01 },
336 { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, }, 250 { .hz = 7, .value = 0x05 },
337 { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, }, 251 { .hz = 13, .value = 0x06 },
338 { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, }, 252 { .hz = 25, .value = 0x07 },
339 }, 253 },
340 }, 254 },
341 .pw = { 255 .pw = {
342 .addr = ST_PRESS_LPS331AP_PW_ADDR, 256 .addr = 0x20,
343 .mask = ST_PRESS_LPS331AP_PW_MASK, 257 .mask = 0x80,
344 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 258 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
345 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 259 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
346 }, 260 },
347 .fs = { 261 .fs = {
348 .addr = ST_PRESS_LPS331AP_FS_ADDR, 262 .addr = 0x23,
349 .mask = ST_PRESS_LPS331AP_FS_MASK, 263 .mask = 0x30,
350 .fs_avl = { 264 .fs_avl = {
351 /* 265 /*
352 * Pressure and temperature sensitivity values 266 * Pressure and temperature sensitivity values
@@ -360,24 +274,27 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
360 }, 274 },
361 }, 275 },
362 .bdu = { 276 .bdu = {
363 .addr = ST_PRESS_LPS331AP_BDU_ADDR, 277 .addr = 0x20,
364 .mask = ST_PRESS_LPS331AP_BDU_MASK, 278 .mask = 0x04,
365 }, 279 },
366 .drdy_irq = { 280 .drdy_irq = {
367 .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR, 281 .addr = 0x22,
368 .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK, 282 .mask_int1 = 0x04,
369 .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK, 283 .mask_int2 = 0x20,
370 .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR, 284 .addr_ihl = 0x22,
371 .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK, 285 .mask_ihl = 0x80,
372 .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR, 286 .addr_od = 0x22,
373 .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK, 287 .mask_od = 0x40,
374 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 288 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
375 }, 289 },
376 .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT, 290 .multi_read_bit = true,
377 .bootime = 2, 291 .bootime = 2,
378 }, 292 },
379 { 293 {
380 .wai = ST_PRESS_LPS001WP_WAI_EXP, 294 /*
295 * CUSTOM VALUES FOR LPS001WP SENSOR
296 */
297 .wai = 0xba,
381 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 298 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
382 .sensors_supported = { 299 .sensors_supported = {
383 [0] = LPS001WP_PRESS_DEV_NAME, 300 [0] = LPS001WP_PRESS_DEV_NAME,
@@ -385,17 +302,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
385 .ch = (struct iio_chan_spec *)st_press_lps001wp_channels, 302 .ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
386 .num_ch = ARRAY_SIZE(st_press_lps001wp_channels), 303 .num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
387 .odr = { 304 .odr = {
388 .addr = ST_PRESS_LPS001WP_ODR_ADDR, 305 .addr = 0x20,
389 .mask = ST_PRESS_LPS001WP_ODR_MASK, 306 .mask = 0x30,
390 .odr_avl = { 307 .odr_avl = {
391 { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, }, 308 { .hz = 1, .value = 0x01 },
392 { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, }, 309 { .hz = 7, .value = 0x02 },
393 { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, }, 310 { .hz = 13, .value = 0x03 },
394 }, 311 },
395 }, 312 },
396 .pw = { 313 .pw = {
397 .addr = ST_PRESS_LPS001WP_PW_ADDR, 314 .addr = 0x20,
398 .mask = ST_PRESS_LPS001WP_PW_MASK, 315 .mask = 0x40,
399 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 316 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
400 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 317 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
401 }, 318 },
@@ -413,17 +330,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
413 }, 330 },
414 }, 331 },
415 .bdu = { 332 .bdu = {
416 .addr = ST_PRESS_LPS001WP_BDU_ADDR, 333 .addr = 0x20,
417 .mask = ST_PRESS_LPS001WP_BDU_MASK, 334 .mask = 0x04,
418 }, 335 },
419 .drdy_irq = { 336 .drdy_irq = {
420 .addr = 0, 337 .addr = 0,
421 }, 338 },
422 .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT, 339 .multi_read_bit = true,
423 .bootime = 2, 340 .bootime = 2,
424 }, 341 },
425 { 342 {
426 .wai = ST_PRESS_LPS25H_WAI_EXP, 343 /*
344 * CUSTOM VALUES FOR LPS25H SENSOR
345 * See LPS25H datasheet:
346 * http://www2.st.com/resource/en/datasheet/lps25h.pdf
347 */
348 .wai = 0xbd,
427 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 349 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
428 .sensors_supported = { 350 .sensors_supported = {
429 [0] = LPS25H_PRESS_DEV_NAME, 351 [0] = LPS25H_PRESS_DEV_NAME,
@@ -431,18 +353,18 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
431 .ch = (struct iio_chan_spec *)st_press_1_channels, 353 .ch = (struct iio_chan_spec *)st_press_1_channels,
432 .num_ch = ARRAY_SIZE(st_press_1_channels), 354 .num_ch = ARRAY_SIZE(st_press_1_channels),
433 .odr = { 355 .odr = {
434 .addr = ST_PRESS_LPS25H_ODR_ADDR, 356 .addr = 0x20,
435 .mask = ST_PRESS_LPS25H_ODR_MASK, 357 .mask = 0x70,
436 .odr_avl = { 358 .odr_avl = {
437 { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, }, 359 { .hz = 1, .value = 0x01 },
438 { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, }, 360 { .hz = 7, .value = 0x02 },
439 { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, }, 361 { .hz = 13, .value = 0x03 },
440 { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, }, 362 { .hz = 25, .value = 0x04 },
441 }, 363 },
442 }, 364 },
443 .pw = { 365 .pw = {
444 .addr = ST_PRESS_LPS25H_PW_ADDR, 366 .addr = 0x20,
445 .mask = ST_PRESS_LPS25H_PW_MASK, 367 .mask = 0x80,
446 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, 368 .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
447 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 369 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
448 }, 370 },
@@ -460,24 +382,29 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
460 }, 382 },
461 }, 383 },
462 .bdu = { 384 .bdu = {
463 .addr = ST_PRESS_LPS25H_BDU_ADDR, 385 .addr = 0x20,
464 .mask = ST_PRESS_LPS25H_BDU_MASK, 386 .mask = 0x04,
465 }, 387 },
466 .drdy_irq = { 388 .drdy_irq = {
467 .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR, 389 .addr = 0x23,
468 .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK, 390 .mask_int1 = 0x01,
469 .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK, 391 .mask_int2 = 0x10,
470 .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR, 392 .addr_ihl = 0x22,
471 .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK, 393 .mask_ihl = 0x80,
472 .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR, 394 .addr_od = 0x22,
473 .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK, 395 .mask_od = 0x40,
474 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 396 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
475 }, 397 },
476 .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT, 398 .multi_read_bit = true,
477 .bootime = 2, 399 .bootime = 2,
478 }, 400 },
479 { 401 {
480 .wai = ST_PRESS_LPS22HB_WAI_EXP, 402 /*
403 * CUSTOM VALUES FOR LPS22HB SENSOR
404 * See LPS22HB datasheet:
405 * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
406 */
407 .wai = 0xb1,
481 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, 408 .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
482 .sensors_supported = { 409 .sensors_supported = {
483 [0] = LPS22HB_PRESS_DEV_NAME, 410 [0] = LPS22HB_PRESS_DEV_NAME,
@@ -485,19 +412,19 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
485 .ch = (struct iio_chan_spec *)st_press_lps22hb_channels, 412 .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
486 .num_ch = ARRAY_SIZE(st_press_lps22hb_channels), 413 .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
487 .odr = { 414 .odr = {
488 .addr = ST_PRESS_LPS22HB_ODR_ADDR, 415 .addr = 0x10,
489 .mask = ST_PRESS_LPS22HB_ODR_MASK, 416 .mask = 0x70,
490 .odr_avl = { 417 .odr_avl = {
491 { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, }, 418 { .hz = 1, .value = 0x01 },
492 { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, }, 419 { .hz = 10, .value = 0x02 },
493 { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, }, 420 { .hz = 25, .value = 0x03 },
494 { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, }, 421 { .hz = 50, .value = 0x04 },
495 { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, }, 422 { .hz = 75, .value = 0x05 },
496 }, 423 },
497 }, 424 },
498 .pw = { 425 .pw = {
499 .addr = ST_PRESS_LPS22HB_PW_ADDR, 426 .addr = 0x10,
500 .mask = ST_PRESS_LPS22HB_PW_MASK, 427 .mask = 0x70,
501 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 428 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
502 }, 429 },
503 .fs = { 430 .fs = {
@@ -514,20 +441,20 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
514 }, 441 },
515 }, 442 },
516 .bdu = { 443 .bdu = {
517 .addr = ST_PRESS_LPS22HB_BDU_ADDR, 444 .addr = 0x10,
518 .mask = ST_PRESS_LPS22HB_BDU_MASK, 445 .mask = 0x02,
519 }, 446 },
520 .drdy_irq = { 447 .drdy_irq = {
521 .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR, 448 .addr = 0x12,
522 .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK, 449 .mask_int1 = 0x04,
523 .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK, 450 .mask_int2 = 0x08,
524 .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR, 451 .addr_ihl = 0x12,
525 .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK, 452 .mask_ihl = 0x80,
526 .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR, 453 .addr_od = 0x12,
527 .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK, 454 .mask_od = 0x40,
528 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 455 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
529 }, 456 },
530 .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT, 457 .multi_read_bit = true,
531 }, 458 },
532}; 459};
533 460
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb46fda6..c720c3ac0b9b 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -147,12 +147,8 @@ struct zpa2326_private {
147#define zpa2326_warn(_idev, _format, _arg...) \ 147#define zpa2326_warn(_idev, _format, _arg...) \
148 dev_warn(_idev->dev.parent, _format, ##_arg) 148 dev_warn(_idev->dev.parent, _format, ##_arg)
149 149
150#ifdef DEBUG
151#define zpa2326_dbg(_idev, _format, _arg...) \ 150#define zpa2326_dbg(_idev, _format, _arg...) \
152 dev_dbg(_idev->dev.parent, _format, ##_arg) 151 dev_dbg(_idev->dev.parent, _format, ##_arg)
153#else
154#define zpa2326_dbg(_idev, _format, _arg...)
155#endif
156 152
157bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg) 153bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
158{ 154{
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 3141c3c161bb..1fa9eefa0982 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -301,8 +301,6 @@ static int lidar_probe(struct i2c_client *client,
301 if (ret) 301 if (ret)
302 goto error_unreg_buffer; 302 goto error_unreg_buffer;
303 pm_runtime_enable(&client->dev); 303 pm_runtime_enable(&client->dev);
304
305 pm_runtime_mark_last_busy(&client->dev);
306 pm_runtime_idle(&client->dev); 304 pm_runtime_idle(&client->dev);
307 305
308 return 0; 306 return 0;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c8f027b4ea4c..0f3fab47fe48 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -183,6 +183,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
183 tscadc->irq = err; 183 tscadc->irq = err;
184 184
185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
186 tscadc->tscadc_phys_base = res->start;
186 tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res); 187 tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
187 if (IS_ERR(tscadc->tscadc_base)) 188 if (IS_ERR(tscadc->tscadc_base))
188 return PTR_ERR(tscadc->tscadc_base); 189 return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index b9fbd0107008..b21d8aa8d653 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1567,7 +1567,6 @@ static const struct net_device_ops slic_netdev_ops = {
1567 .ndo_set_mac_address = eth_mac_addr, 1567 .ndo_set_mac_address = eth_mac_addr,
1568 .ndo_get_stats64 = slic_get_stats, 1568 .ndo_get_stats64 = slic_get_stats,
1569 .ndo_set_rx_mode = slic_set_rx_mode, 1569 .ndo_set_rx_mode = slic_set_rx_mode,
1570 .ndo_change_mtu = eth_change_mtu,
1571 .ndo_validate_addr = eth_validate_addr, 1570 .ndo_validate_addr = eth_validate_addr,
1572}; 1571};
1573 1572
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 8abd80dbcbed..47268ecedc4d 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/mfd/core.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
@@ -87,6 +88,41 @@ exit:
87 return ret; 88 return ret;
88} 89}
89 90
91static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
92{
93 struct cros_ec_command *msg;
94 int ret;
95
96 if (ec->features[0] == -1U && ec->features[1] == -1U) {
97 /* features bitmap not read yet */
98
99 msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
100 if (!msg)
101 return -ENOMEM;
102
103 msg->version = 0;
104 msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
105 msg->insize = sizeof(ec->features);
106 msg->outsize = 0;
107
108 ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
109 if (ret < 0 || msg->result != EC_RES_SUCCESS) {
110 dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
111 ret, msg->result);
112 memset(ec->features, 0, sizeof(ec->features));
113 }
114
115 memcpy(ec->features, msg->data, sizeof(ec->features));
116
117 dev_dbg(ec->dev, "EC features %08x %08x\n",
118 ec->features[0], ec->features[1]);
119
120 kfree(msg);
121 }
122
123 return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
124}
125
90/* Device file ops */ 126/* Device file ops */
91static int ec_device_open(struct inode *inode, struct file *filp) 127static int ec_device_open(struct inode *inode, struct file *filp)
92{ 128{
@@ -230,6 +266,123 @@ static void __remove(struct device *dev)
230 kfree(ec); 266 kfree(ec);
231} 267}
232 268
269static void cros_ec_sensors_register(struct cros_ec_dev *ec)
270{
271 /*
272 * Issue a command to get the number of sensor reported.
273 * Build an array of sensors driver and register them all.
274 */
275 int ret, i, id, sensor_num;
276 struct mfd_cell *sensor_cells;
277 struct cros_ec_sensor_platform *sensor_platforms;
278 int sensor_type[MOTIONSENSE_TYPE_MAX];
279 struct ec_params_motion_sense *params;
280 struct ec_response_motion_sense *resp;
281 struct cros_ec_command *msg;
282
283 msg = kzalloc(sizeof(struct cros_ec_command) +
284 max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
285 if (msg == NULL)
286 return;
287
288 msg->version = 2;
289 msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
290 msg->outsize = sizeof(*params);
291 msg->insize = sizeof(*resp);
292
293 params = (struct ec_params_motion_sense *)msg->data;
294 params->cmd = MOTIONSENSE_CMD_DUMP;
295
296 ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
297 if (ret < 0 || msg->result != EC_RES_SUCCESS) {
298 dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
299 ret, msg->result);
300 goto error;
301 }
302
303 resp = (struct ec_response_motion_sense *)msg->data;
304 sensor_num = resp->dump.sensor_count;
305 /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
306 sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
307 GFP_KERNEL);
308 if (sensor_cells == NULL)
309 goto error;
310
311 sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
312 (sensor_num + 1), GFP_KERNEL);
313 if (sensor_platforms == NULL)
314 goto error_platforms;
315
316 memset(sensor_type, 0, sizeof(sensor_type));
317 id = 0;
318 for (i = 0; i < sensor_num; i++) {
319 params->cmd = MOTIONSENSE_CMD_INFO;
320 params->info.sensor_num = i;
321 ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
322 if (ret < 0 || msg->result != EC_RES_SUCCESS) {
323 dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
324 i, ret, msg->result);
325 continue;
326 }
327 switch (resp->info.type) {
328 case MOTIONSENSE_TYPE_ACCEL:
329 sensor_cells[id].name = "cros-ec-accel";
330 break;
331 case MOTIONSENSE_TYPE_GYRO:
332 sensor_cells[id].name = "cros-ec-gyro";
333 break;
334 case MOTIONSENSE_TYPE_MAG:
335 sensor_cells[id].name = "cros-ec-mag";
336 break;
337 case MOTIONSENSE_TYPE_PROX:
338 sensor_cells[id].name = "cros-ec-prox";
339 break;
340 case MOTIONSENSE_TYPE_LIGHT:
341 sensor_cells[id].name = "cros-ec-light";
342 break;
343 case MOTIONSENSE_TYPE_ACTIVITY:
344 sensor_cells[id].name = "cros-ec-activity";
345 break;
346 default:
347 dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
348 continue;
349 }
350 sensor_platforms[id].sensor_num = i;
351 sensor_cells[id].id = sensor_type[resp->info.type];
352 sensor_cells[id].platform_data = &sensor_platforms[id];
353 sensor_cells[id].pdata_size =
354 sizeof(struct cros_ec_sensor_platform);
355
356 sensor_type[resp->info.type]++;
357 id++;
358 }
359 if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
360 sensor_platforms[id].sensor_num = sensor_num;
361
362 sensor_cells[id].name = "cros-ec-angle";
363 sensor_cells[id].id = 0;
364 sensor_cells[id].platform_data = &sensor_platforms[id];
365 sensor_cells[id].pdata_size =
366 sizeof(struct cros_ec_sensor_platform);
367 id++;
368 }
369 if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
370 sensor_cells[id].name = "cros-ec-ring";
371 id++;
372 }
373
374 ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
375 NULL, 0, NULL);
376 if (ret)
377 dev_err(ec->dev, "failed to add EC sensors\n");
378
379 kfree(sensor_platforms);
380error_platforms:
381 kfree(sensor_cells);
382error:
383 kfree(msg);
384}
385
233static int ec_device_probe(struct platform_device *pdev) 386static int ec_device_probe(struct platform_device *pdev)
234{ 387{
235 int retval = -ENOMEM; 388 int retval = -ENOMEM;
@@ -245,6 +398,8 @@ static int ec_device_probe(struct platform_device *pdev)
245 ec->ec_dev = dev_get_drvdata(dev->parent); 398 ec->ec_dev = dev_get_drvdata(dev->parent);
246 ec->dev = dev; 399 ec->dev = dev;
247 ec->cmd_offset = ec_platform->cmd_offset; 400 ec->cmd_offset = ec_platform->cmd_offset;
401 ec->features[0] = -1U; /* Not cached yet */
402 ec->features[1] = -1U; /* Not cached yet */
248 device_initialize(&ec->class_dev); 403 device_initialize(&ec->class_dev);
249 cdev_init(&ec->cdev, &fops); 404 cdev_init(&ec->cdev, &fops);
250 405
@@ -282,6 +437,10 @@ static int ec_device_probe(struct platform_device *pdev)
282 goto dev_reg_failed; 437 goto dev_reg_failed;
283 } 438 }
284 439
440 /* check whether this EC is a sensor hub. */
441 if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
442 cros_ec_sensors_register(ec);
443
285 return 0; 444 return 0;
286 445
287dev_reg_failed: 446dev_reg_failed:
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 58a7b3504b82..cd005cd41413 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,8 +24,6 @@ menuconfig STAGING
24 24
25if STAGING 25if STAGING
26 26
27source "drivers/staging/slicoss/Kconfig"
28
29source "drivers/staging/wlan-ng/Kconfig" 27source "drivers/staging/wlan-ng/Kconfig"
30 28
31source "drivers/staging/comedi/Kconfig" 29source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2fa9745db614..831e2e891989 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,7 +1,6 @@
1# Makefile for staging directory 1# Makefile for staging directory
2 2
3obj-y += media/ 3obj-y += media/
4obj-$(CONFIG_SLICOSS) += slicoss/
5obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 4obj-$(CONFIG_PRISM2_USB) += wlan-ng/
6obj-$(CONFIG_COMEDI) += comedi/ 5obj-$(CONFIG_COMEDI) += comedi/
7obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ 6obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
@@ -41,4 +40,4 @@ obj-$(CONFIG_MOST) += most/
41obj-$(CONFIG_ISDN_I4L) += i4l/ 40obj-$(CONFIG_ISDN_I4L) += i4l/
42obj-$(CONFIG_KS7010) += ks7010/ 41obj-$(CONFIG_KS7010) += ks7010/
43obj-$(CONFIG_GREYBUS) += greybus/ 42obj-$(CONFIG_GREYBUS) += greybus/
44obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/ 43obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 64d8c8720960..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,13 +25,5 @@ ion/
25 exposes existing cma regions and doesn't reserve unecessarily memory when 25 exposes existing cma regions and doesn't reserve unecessarily memory when
26 booting a system which doesn't use ion. 26 booting a system which doesn't use ion.
27 27
28sync framework:
29 - remove CONFIG_SW_SYNC_USER, it is used only for testing/debugging and
30 should not be upstreamed.
31 - port CONFIG_SW_SYNC_USER tests interfaces to use debugfs somehow
32 - port libsync tests to kselftest
33 - clean up and ABI check for security issues
34 - move it to drivers/base/dma-buf
35
36Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: 28Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
37Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> 29Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ca9a53c03f0f..7cbad0d45b9c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -100,39 +100,43 @@ static DEFINE_MUTEX(ashmem_mutex);
100static struct kmem_cache *ashmem_area_cachep __read_mostly; 100static struct kmem_cache *ashmem_area_cachep __read_mostly;
101static struct kmem_cache *ashmem_range_cachep __read_mostly; 101static struct kmem_cache *ashmem_range_cachep __read_mostly;
102 102
103#define range_size(range) \ 103static inline unsigned long range_size(struct ashmem_range *range)
104 ((range)->pgend - (range)->pgstart + 1) 104{
105 return range->pgend - range->pgstart + 1;
106}
105 107
106#define range_on_lru(range) \ 108static inline bool range_on_lru(struct ashmem_range *range)
107 ((range)->purged == ASHMEM_NOT_PURGED) 109{
110 return range->purged == ASHMEM_NOT_PURGED;
111}
108 112
109static inline int page_range_subsumes_range(struct ashmem_range *range, 113static inline bool page_range_subsumes_range(struct ashmem_range *range,
110 size_t start, size_t end) 114 size_t start, size_t end)
111{ 115{
112 return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); 116 return (range->pgstart >= start) && (range->pgend <= end);
113} 117}
114 118
115static inline int page_range_subsumed_by_range(struct ashmem_range *range, 119static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
116 size_t start, size_t end) 120 size_t start, size_t end)
117{ 121{
118 return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); 122 return (range->pgstart <= start) && (range->pgend >= end);
119} 123}
120 124
121static inline int page_in_range(struct ashmem_range *range, size_t page) 125static inline bool page_in_range(struct ashmem_range *range, size_t page)
122{ 126{
123 return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); 127 return (range->pgstart <= page) && (range->pgend >= page);
124} 128}
125 129
126static inline int page_range_in_range(struct ashmem_range *range, 130static inline bool page_range_in_range(struct ashmem_range *range,
127 size_t start, size_t end) 131 size_t start, size_t end)
128{ 132{
129 return (page_in_range(range, start) || page_in_range(range, end) || 133 return page_in_range(range, start) || page_in_range(range, end) ||
130 page_range_subsumes_range(range, start, end)); 134 page_range_subsumes_range(range, start, end);
131} 135}
132 136
133static inline int range_before_page(struct ashmem_range *range, size_t page) 137static inline bool range_before_page(struct ashmem_range *range, size_t page)
134{ 138{
135 return ((range)->pgend < (page)); 139 return range->pgend < page;
136} 140}
137 141
138#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 142#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7ef02b..d5cc3070e83f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1013,7 +1013,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1013 return 0; 1013 return 0;
1014} 1014}
1015 1015
1016static struct dma_buf_ops dma_buf_ops = { 1016static const struct dma_buf_ops dma_buf_ops = {
1017 .map_dma_buf = ion_map_dma_buf, 1017 .map_dma_buf = ion_map_dma_buf,
1018 .unmap_dma_buf = ion_unmap_dma_buf, 1018 .unmap_dma_buf = ion_unmap_dma_buf,
1019 .mmap = ion_mmap, 1019 .mmap = ion_mmap,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c76c753..cf5c010d32bc 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -58,7 +58,7 @@ static struct ion_platform_heap dummy_heaps[] = {
58 }, 58 },
59}; 59};
60 60
61static struct ion_platform_data dummy_ion_pdata = { 61static const struct ion_platform_data dummy_ion_pdata = {
62 .nr = ARRAY_SIZE(dummy_heaps), 62 .nr = ARRAY_SIZE(dummy_heaps),
63 .heaps = dummy_heaps, 63 .heaps = dummy_heaps,
64}; 64};
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d505af8..3ebbb75746e8 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -30,7 +30,7 @@
30 30
31static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | 31static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
32 __GFP_NORETRY) & ~__GFP_RECLAIM; 32 __GFP_NORETRY) & ~__GFP_RECLAIM;
33static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO); 33static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
34static const unsigned int orders[] = {8, 4, 0}; 34static const unsigned int orders[] = {8, 4, 0};
35 35
36static int order_to_index(unsigned int order) 36static int order_to_index(unsigned int order)
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
index ffef06f63133..480242e02f8d 100644
--- a/drivers/staging/android/uapi/ion_test.h
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -66,5 +66,4 @@ struct ion_test_rw_data {
66#define ION_IOC_TEST_KERNEL_MAPPING \ 66#define ION_IOC_TEST_KERNEL_MAPPING \
67 _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) 67 _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
68 68
69
70#endif /* _UAPI_LINUX_ION_H */ 69#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 7b8be5293883..bf3fe7c61be5 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -68,7 +68,7 @@ struct clk_wzrd {
68 struct clk *axi_clk; 68 struct clk *axi_clk;
69 struct clk *clks_internal[wzrd_clk_int_max]; 69 struct clk *clks_internal[wzrd_clk_int_max];
70 struct clk *clkout[WZRD_NUM_OUTPUTS]; 70 struct clk *clkout[WZRD_NUM_OUTPUTS];
71 int speed_grade; 71 unsigned int speed_grade;
72 bool suspended; 72 bool suspended;
73}; 73};
74 74
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 08fb26b51a5f..a1c1081906c5 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -245,6 +245,22 @@ enum comedi_subdevice_type {
245/* configuration instructions */ 245/* configuration instructions */
246 246
247/** 247/**
248 * enum comedi_io_direction - COMEDI I/O directions
249 * @COMEDI_INPUT: Input.
250 * @COMEDI_OUTPUT: Output.
251 * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
252 *
253 * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
254 * report a direction. They may also be used in other places where a direction
255 * needs to be specified.
256 */
257enum comedi_io_direction {
258 COMEDI_INPUT = 0,
259 COMEDI_OUTPUT = 1,
260 COMEDI_OPENDRAIN = 2
261};
262
263/**
248 * enum configuration_ids - COMEDI configuration instruction codes 264 * enum configuration_ids - COMEDI configuration instruction codes
249 * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input. 265 * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
250 * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output. 266 * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
@@ -296,9 +312,9 @@ enum comedi_subdevice_type {
296 * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity. 312 * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
297 */ 313 */
298enum configuration_ids { 314enum configuration_ids {
299 INSN_CONFIG_DIO_INPUT = 0, 315 INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
300 INSN_CONFIG_DIO_OUTPUT = 1, 316 INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
301 INSN_CONFIG_DIO_OPENDRAIN = 2, 317 INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
302 INSN_CONFIG_ANALOG_TRIG = 16, 318 INSN_CONFIG_ANALOG_TRIG = 16,
303/* INSN_CONFIG_WAVEFORM = 17, */ 319/* INSN_CONFIG_WAVEFORM = 17, */
304/* INSN_CONFIG_TRIG = 18, */ 320/* INSN_CONFIG_TRIG = 18, */
@@ -397,22 +413,6 @@ enum comedi_digital_trig_op {
397}; 413};
398 414
399/** 415/**
400 * enum comedi_io_direction - COMEDI I/O directions
401 * @COMEDI_INPUT: Input.
402 * @COMEDI_OUTPUT: Output.
403 * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
404 *
405 * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
406 * report a direction. They may also be used in other places where a direction
407 * needs to be specified.
408 */
409enum comedi_io_direction {
410 COMEDI_INPUT = 0,
411 COMEDI_OUTPUT = 1,
412 COMEDI_OPENDRAIN = 2
413};
414
415/**
416 * enum comedi_support_level - support level for a COMEDI feature 416 * enum comedi_support_level - support level for a COMEDI feature
417 * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature. 417 * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
418 * @COMEDI_SUPPORTED: Feature is supported. 418 * @COMEDI_SUPPORTED: Feature is supported.
@@ -1104,18 +1104,19 @@ enum ni_gpct_other_select {
1104enum ni_gpct_arm_source { 1104enum ni_gpct_arm_source {
1105 NI_GPCT_ARM_IMMEDIATE = 0x0, 1105 NI_GPCT_ARM_IMMEDIATE = 0x0,
1106 /* 1106 /*
1107 * Start both the counter and the adjacent pared 1107 * Start both the counter and the adjacent paired counter simultaneously
1108 * counter simultaneously
1109 */ 1108 */
1110 NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, 1109 NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
1111 /* 1110 /*
1112 * NI doesn't document bits for selecting hardware arm triggers. 1111 * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
1113 * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least 1112 * bits (3 bits for 660x or 5 bits for m-series) through to the
1114 * significant bits (3 bits for 660x or 5 bits for m-series) 1113 * hardware. To select a hardware trigger, pass the appropriate select
1115 * through to the hardware. This will at least allow someone to 1114 * bit, e.g.,
1116 * figure out what the bits do later. 1115 * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
1116 * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
1117 */ 1117 */
1118 NI_GPCT_ARM_UNKNOWN = 0x1000, 1118 NI_GPCT_HW_ARM = 0x1000,
1119 NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */
1119}; 1120};
1120 1121
1121/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */ 1122/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb637665eb7..0c7c37a8ff33 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -426,6 +426,18 @@ enum comedi_cb {
426 * handler will be called with the COMEDI device structure's board_ptr member 426 * handler will be called with the COMEDI device structure's board_ptr member
427 * pointing to the matched pointer to a board name within the driver's private 427 * pointing to the matched pointer to a board name within the driver's private
428 * array of static, read-only board type information. 428 * array of static, read-only board type information.
429 *
430 * The @detach handler has two roles. If a COMEDI device was successfully
431 * configured by the @attach or @auto_attach handler, it is called when the
432 * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
433 * unloading of the driver, or due to device removal). It is also called when
434 * the @attach or @auto_attach handler returns an error. Therefore, the
435 * @attach or @auto_attach handlers can defer clean-up on error until the
436 * @detach handler is called. If the @attach or @auto_attach handlers free
437 * any resources themselves, they must prevent the @detach handler from
438 * freeing the same resources. The @detach handler must not assume that all
439 * resources requested by the @attach or @auto_attach handler were
440 * successfully allocated.
429 */ 441 */
430struct comedi_driver { 442struct comedi_driver {
431 /* private: */ 443 /* private: */
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index ccb37d1f0f8e..987414741605 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -248,8 +248,8 @@ static void cb_pcidda_write_caldac(struct comedi_device *dev,
248 cb_pcidda_serial_out(dev, value, num_caldac_bits); 248 cb_pcidda_serial_out(dev, value, num_caldac_bits);
249 249
250/* 250/*
251* latch stream into appropriate caldac deselect reference dac 251 * latch stream into appropriate caldac deselect reference dac
252*/ 252 */
253 cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT; 253 cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
254 /* deactivate caldacs (one caldac for every two channels) */ 254 /* deactivate caldacs (one caldac for every two channels) */
255 for (i = 0; i < max_num_caldacs; i++) 255 for (i = 0; i < max_num_caldacs; i++)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index b1c0860135d0..05126ba4ba51 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -837,7 +837,7 @@ static int mite_setup(struct comedi_device *dev, struct mite *mite,
837 * of 0x61f and bursts worked. 6281 powered up with register value of 837 * of 0x61f and bursts worked. 6281 powered up with register value of
838 * 0x1f and bursts didn't work. The NI windows driver reads the 838 * 0x1f and bursts didn't work. The NI windows driver reads the
839 * register, then does a bitwise-or of 0x600 with it and writes it back. 839 * register, then does a bitwise-or of 0x600 with it and writes it back.
840 * 840 *
841 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be 841 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
842 * written and read back. The bits 0x1f always read as 1. 842 * written and read back. The bits 0x1f always read as 1.
843 * The rest always read as zero. 843 * The rest always read as zero.
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0f97d7b611d7..b2e382888981 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1832,11 +1832,10 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1832 unsigned int *data) 1832 unsigned int *data)
1833{ 1833{
1834 struct ni_private *devpriv = dev->private; 1834 struct ni_private *devpriv = dev->private;
1835 unsigned int mask = (s->maxdata + 1) >> 1; 1835 unsigned int mask = s->maxdata;
1836 int i, n; 1836 int i, n;
1837 unsigned int signbits; 1837 unsigned int signbits;
1838 unsigned int d; 1838 unsigned int d;
1839 unsigned long dl;
1840 1839
1841 ni_load_channelgain_list(dev, s, 1, &insn->chanspec); 1840 ni_load_channelgain_list(dev, s, 1, &insn->chanspec);
1842 1841
@@ -1875,7 +1874,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1875 return -ETIME; 1874 return -ETIME;
1876 } 1875 }
1877 d += signbits; 1876 d += signbits;
1878 data[n] = d; 1877 data[n] = d & 0xffff;
1879 } 1878 }
1880 } else if (devpriv->is_6143) { 1879 } else if (devpriv->is_6143) {
1881 for (n = 0; n < insn->n; n++) { 1880 for (n = 0; n < insn->n; n++) {
@@ -1887,15 +1886,15 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1887 * bit to move a single 16bit stranded sample into 1886 * bit to move a single 16bit stranded sample into
1888 * the FIFO. 1887 * the FIFO.
1889 */ 1888 */
1890 dl = 0; 1889 d = 0;
1891 for (i = 0; i < NI_TIMEOUT; i++) { 1890 for (i = 0; i < NI_TIMEOUT; i++) {
1892 if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 1891 if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
1893 0x01) { 1892 0x01) {
1894 /* Get stranded sample into FIFO */ 1893 /* Get stranded sample into FIFO */
1895 ni_writel(dev, 0x01, 1894 ni_writel(dev, 0x01,
1896 NI6143_AI_FIFO_CTRL_REG); 1895 NI6143_AI_FIFO_CTRL_REG);
1897 dl = ni_readl(dev, 1896 d = ni_readl(dev,
1898 NI6143_AI_FIFO_DATA_REG); 1897 NI6143_AI_FIFO_DATA_REG);
1899 break; 1898 break;
1900 } 1899 }
1901 } 1900 }
@@ -1903,7 +1902,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1903 dev_err(dev->class_dev, "timeout\n"); 1902 dev_err(dev->class_dev, "timeout\n");
1904 return -ETIME; 1903 return -ETIME;
1905 } 1904 }
1906 data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF; 1905 data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF;
1907 } 1906 }
1908 } else { 1907 } else {
1909 for (n = 0; n < insn->n; n++) { 1908 for (n = 0; n < insn->n; n++) {
@@ -1919,14 +1918,13 @@ static int ni_ai_insn_read(struct comedi_device *dev,
1919 return -ETIME; 1918 return -ETIME;
1920 } 1919 }
1921 if (devpriv->is_m_series) { 1920 if (devpriv->is_m_series) {
1922 dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG); 1921 d = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
1923 dl &= mask; 1922 d &= mask;
1924 data[n] = dl; 1923 data[n] = d;
1925 } else { 1924 } else {
1926 d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG); 1925 d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
1927 /* subtle: needs to be short addition */
1928 d += signbits; 1926 d += signbits;
1929 data[n] = d; 1927 data[n] = d & 0xffff;
1930 } 1928 }
1931 } 1929 }
1932 } 1930 }
@@ -2729,66 +2727,36 @@ static int ni_ao_insn_write(struct comedi_device *dev,
2729 return insn->n; 2727 return insn->n;
2730} 2728}
2731 2729
2732static int ni_ao_insn_config(struct comedi_device *dev, 2730/*
2733 struct comedi_subdevice *s, 2731 * Arms the AO device in preparation for a trigger event.
2734 struct comedi_insn *insn, unsigned int *data) 2732 * This function also allocates and prepares a DMA channel (or FIFO if DMA is
2735{ 2733 * not used). As a part of this preparation, this function preloads the DAC
2736 const struct ni_board_struct *board = dev->board_ptr; 2734 * registers with the first values of the output stream. This ensures that the
2737 struct ni_private *devpriv = dev->private; 2735 * first clock cycle after the trigger can be used for output.
2738 unsigned int nbytes; 2736 *
2739 2737 * Note that this function _must_ happen after a user has written data to the
2740 switch (data[0]) { 2738 * output buffers via either mmap or write(fileno,...).
2741 case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: 2739 */
2742 switch (data[1]) { 2740static int ni_ao_arm(struct comedi_device *dev,
2743 case COMEDI_OUTPUT: 2741 struct comedi_subdevice *s)
2744 nbytes = comedi_samples_to_bytes(s,
2745 board->ao_fifo_depth);
2746 data[2] = 1 + nbytes;
2747 if (devpriv->mite)
2748 data[2] += devpriv->mite->fifo_size;
2749 break;
2750 case COMEDI_INPUT:
2751 data[2] = 0;
2752 break;
2753 default:
2754 return -EINVAL;
2755 }
2756 return 0;
2757 default:
2758 break;
2759 }
2760
2761 return -EINVAL;
2762}
2763
2764static int ni_ao_inttrig(struct comedi_device *dev,
2765 struct comedi_subdevice *s,
2766 unsigned int trig_num)
2767{ 2742{
2768 struct ni_private *devpriv = dev->private; 2743 struct ni_private *devpriv = dev->private;
2769 struct comedi_cmd *cmd = &s->async->cmd;
2770 int ret; 2744 int ret;
2771 int interrupt_b_bits; 2745 int interrupt_b_bits;
2772 int i; 2746 int i;
2773 static const int timeout = 1000; 2747 static const int timeout = 1000;
2774 2748
2775 /* 2749 /*
2776 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. 2750 * Prevent ao from doing things like trying to allocate the ao dma
2777 * For backwards compatibility, also allow trig_num == 0 when 2751 * channel multiple times.
2778 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
2779 * in that case, the internal trigger is being used as a pre-trigger
2780 * before the external trigger.
2781 */ 2752 */
2782 if (!(trig_num == cmd->start_arg || 2753 if (!devpriv->ao_needs_arming) {
2783 (trig_num == 0 && cmd->start_src != TRIG_INT))) 2754 dev_dbg(dev->class_dev, "%s: device does not need arming!\n",
2755 __func__);
2784 return -EINVAL; 2756 return -EINVAL;
2757 }
2785 2758
2786 /* 2759 devpriv->ao_needs_arming = 0;
2787 * Null trig at beginning prevent ao start trigger from executing more
2788 * than once per command (and doing things like trying to allocate the
2789 * ao dma channel multiple times).
2790 */
2791 s->async->inttrig = NULL;
2792 2760
2793 ni_set_bits(dev, NISTC_INTB_ENA_REG, 2761 ni_set_bits(dev, NISTC_INTB_ENA_REG,
2794 NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0); 2762 NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0);
@@ -2840,6 +2808,75 @@ static int ni_ao_inttrig(struct comedi_device *dev,
2840 devpriv->ao_cmd1, 2808 devpriv->ao_cmd1,
2841 NISTC_AO_CMD1_REG); 2809 NISTC_AO_CMD1_REG);
2842 2810
2811 return 0;
2812}
2813
2814static int ni_ao_insn_config(struct comedi_device *dev,
2815 struct comedi_subdevice *s,
2816 struct comedi_insn *insn, unsigned int *data)
2817{
2818 const struct ni_board_struct *board = dev->board_ptr;
2819 struct ni_private *devpriv = dev->private;
2820 unsigned int nbytes;
2821
2822 switch (data[0]) {
2823 case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
2824 switch (data[1]) {
2825 case COMEDI_OUTPUT:
2826 nbytes = comedi_samples_to_bytes(s,
2827 board->ao_fifo_depth);
2828 data[2] = 1 + nbytes;
2829 if (devpriv->mite)
2830 data[2] += devpriv->mite->fifo_size;
2831 break;
2832 case COMEDI_INPUT:
2833 data[2] = 0;
2834 break;
2835 default:
2836 return -EINVAL;
2837 }
2838 return 0;
2839 case INSN_CONFIG_ARM:
2840 return ni_ao_arm(dev, s);
2841 default:
2842 break;
2843 }
2844
2845 return -EINVAL;
2846}
2847
2848static int ni_ao_inttrig(struct comedi_device *dev,
2849 struct comedi_subdevice *s,
2850 unsigned int trig_num)
2851{
2852 struct ni_private *devpriv = dev->private;
2853 struct comedi_cmd *cmd = &s->async->cmd;
2854 int ret;
2855
2856 /*
2857 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
2858 * For backwards compatibility, also allow trig_num == 0 when
2859 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
2860 * in that case, the internal trigger is being used as a pre-trigger
2861 * before the external trigger.
2862 */
2863 if (!(trig_num == cmd->start_arg ||
2864 (trig_num == 0 && cmd->start_src != TRIG_INT)))
2865 return -EINVAL;
2866
2867 /*
2868 * Null trig at beginning prevent ao start trigger from executing more
2869 * than once per command.
2870 */
2871 s->async->inttrig = NULL;
2872
2873 if (devpriv->ao_needs_arming) {
2874 /* only arm this device if it still needs arming */
2875 ret = ni_ao_arm(dev, s);
2876 if (ret)
2877 return ret;
2878 }
2879
2843 ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2, 2880 ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2,
2844 NISTC_AO_CMD2_REG); 2881 NISTC_AO_CMD2_REG);
2845 2882
@@ -3227,10 +3264,17 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
3227 ni_ao_cmd_set_interrupts(dev, s); 3264 ni_ao_cmd_set_interrupts(dev, s);
3228 3265
3229 /* 3266 /*
3230 * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be 3267 * arm(ing) must happen later so that DMA can be setup and DACs
3231 * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA 3268 * preloaded with the actual output buffer before starting.
3232 * must be setup and initially written to before arm/start happen. 3269 *
3270 * start(ing) must happen _after_ arming is completed. Starting can be
3271 * done either via ni_ao_inttrig, or via an external trigger.
3272 *
3273 * **Currently, ni_ao_inttrig will automatically attempt a call to
3274 * ni_ao_arm if the device still needs arming at that point. This
3275 * allows backwards compatibility.
3233 */ 3276 */
3277 devpriv->ao_needs_arming = 1;
3234 return 0; 3278 return 0;
3235} 3279}
3236 3280
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1966519cb6e5..f27b545f83eb 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1053,6 +1053,20 @@ struct ni_private {
1053 unsigned int is_67xx:1; 1053 unsigned int is_67xx:1;
1054 unsigned int is_6711:1; 1054 unsigned int is_6711:1;
1055 unsigned int is_6713:1; 1055 unsigned int is_6713:1;
1056
1057 /*
1058 * Boolean value of whether device needs to be armed.
1059 *
1060 * Currently, only NI AO devices are known to be needing arming, since
1061 * the DAC registers must be preloaded before triggering.
1062 * This variable should only be set true during a command operation
1063 * (e.g ni_ao_cmd) and should then be set false by the arming
1064 * function (e.g. ni_ao_arm).
1065 *
1066 * This variable helps to ensure that multiple DMA allocations are not
1067 * possible.
1068 */
1069 unsigned int ao_needs_arming:1;
1056}; 1070};
1057 1071
1058static const struct comedi_lrange range_ni_E_ao_ext; 1072static const struct comedi_lrange range_ni_E_ao_ext;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 5ab49a798164..15cb4088467b 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -452,8 +452,9 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
452 unsigned int bits = 0; 452 unsigned int bits = 0;
453 unsigned int reg; 453 unsigned int reg;
454 unsigned int mode; 454 unsigned int mode;
455 unsigned int clk_src; 455 unsigned int clk_src = 0;
456 u64 ps; 456 u64 ps = 0;
457 int ret;
457 bool force_alt_sync; 458 bool force_alt_sync;
458 459
459 /* only m series and 660x variants have counting mode registers */ 460 /* only m series and 660x variants have counting mode registers */
@@ -483,9 +484,12 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
483 break; 484 break;
484 } 485 }
485 486
486 ni_tio_generic_clock_src_select(counter, &clk_src); 487 ret = ni_tio_generic_clock_src_select(counter, &clk_src);
487 ni_tio_clock_period_ps(counter, clk_src, &ps); 488 if (ret)
488 489 return;
490 ret = ni_tio_clock_period_ps(counter, clk_src, &ps);
491 if (ret)
492 return;
489 /* 493 /*
490 * It's not clear what we should do if clock_period is unknown, so we 494 * It's not clear what we should do if clock_period is unknown, so we
491 * are not using the alt sync bit in that case. 495 * are not using the alt sync bit in that case.
@@ -809,7 +813,7 @@ static int ni_tio_get_clock_src(struct ni_gpct *counter,
809 unsigned int *clock_source, 813 unsigned int *clock_source,
810 unsigned int *period_ns) 814 unsigned int *period_ns)
811{ 815{
812 u64 temp64; 816 u64 temp64 = 0;
813 int ret; 817 int ret;
814 818
815 ret = ni_tio_generic_clock_src_select(counter, clock_source); 819 ret = ni_tio_generic_clock_src_select(counter, clock_source);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 5aeed44dff70..5b5df0596ad9 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -771,9 +771,9 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
771 s->async->scans_done < cmd->stop_arg)) { 771 s->async->scans_done < cmd->stop_arg)) {
772 if (!devpriv->ai_cmd_canceled) { 772 if (!devpriv->ai_cmd_canceled) {
773 /* 773 /*
774 * Wait for running dma transfer to end, 774 * Wait for running dma transfer to end,
775 * do cleanup in interrupt. 775 * do cleanup in interrupt.
776 */ 776 */
777 devpriv->ai_cmd_canceled = 1; 777 devpriv->ai_cmd_canceled = 1;
778 return 0; 778 return 0;
779 } 779 }
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c14a02564432..0dd5fe286855 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -75,24 +75,24 @@ struct s626_buffer_dma {
75}; 75};
76 76
77struct s626_private { 77struct s626_private {
78 uint8_t ai_cmd_running; /* ai_cmd is running */ 78 u8 ai_cmd_running; /* ai_cmd is running */
79 unsigned int ai_sample_timer; /* time between samples in 79 unsigned int ai_sample_timer; /* time between samples in
80 * units of the timer */ 80 * units of the timer */
81 int ai_convert_count; /* conversion counter */ 81 int ai_convert_count; /* conversion counter */
82 unsigned int ai_convert_timer; /* time between conversion in 82 unsigned int ai_convert_timer; /* time between conversion in
83 * units of the timer */ 83 * units of the timer */
84 uint16_t counter_int_enabs; /* counter interrupt enable mask 84 u16 counter_int_enabs; /* counter interrupt enable mask
85 * for MISC2 register */ 85 * for MISC2 register */
86 uint8_t adc_items; /* number of items in ADC poll list */ 86 u8 adc_items; /* number of items in ADC poll list */
87 struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1) 87 struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
88 * program */ 88 * program */
89 struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data 89 struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
90 * and hold DAC data */ 90 * and hold DAC data */
91 uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer 91 u32 *dac_wbuf; /* pointer to logical adrs of DMA buffer
92 * used to hold DAC data */ 92 * used to hold DAC data */
93 uint16_t dacpol; /* image of DAC polarity register */ 93 u16 dacpol; /* image of DAC polarity register */
94 uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */ 94 u8 trim_setpoint[12]; /* images of TrimDAC setpoints */
95 uint32_t i2c_adrs; /* I2C device address for onboard EEPROM 95 u32 i2c_adrs; /* I2C device address for onboard EEPROM
96 * (board rev dependent) */ 96 * (board rev dependent) */
97}; 97};
98 98
@@ -179,7 +179,7 @@ static void s626_debi_transfer(struct comedi_device *dev)
179/* 179/*
180 * Read a value from a gate array register. 180 * Read a value from a gate array register.
181 */ 181 */
182static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr) 182static u16 s626_debi_read(struct comedi_device *dev, u16 addr)
183{ 183{
184 /* Set up DEBI control register value in shadow RAM */ 184 /* Set up DEBI control register value in shadow RAM */
185 writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD); 185 writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -193,8 +193,8 @@ static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
193/* 193/*
194 * Write a value to a gate array register. 194 * Write a value to a gate array register.
195 */ 195 */
196static void s626_debi_write(struct comedi_device *dev, uint16_t addr, 196static void s626_debi_write(struct comedi_device *dev, u16 addr,
197 uint16_t wdata) 197 u16 wdata)
198{ 198{
199 /* Set up DEBI control register value in shadow RAM */ 199 /* Set up DEBI control register value in shadow RAM */
200 writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD); 200 writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -241,7 +241,7 @@ static int s626_i2c_handshake_eoc(struct comedi_device *dev,
241 return -EBUSY; 241 return -EBUSY;
242} 242}
243 243
244static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val) 244static int s626_i2c_handshake(struct comedi_device *dev, u32 val)
245{ 245{
246 unsigned int ctrl; 246 unsigned int ctrl;
247 int ret; 247 int ret;
@@ -267,8 +267,8 @@ static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
267 return ctrl & S626_I2C_ERR; 267 return ctrl & S626_I2C_ERR;
268} 268}
269 269
270/* Read uint8_t from EEPROM. */ 270/* Read u8 from EEPROM. */
271static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr) 271static u8 s626_i2c_read(struct comedi_device *dev, u8 addr)
272{ 272{
273 struct s626_private *devpriv = dev->private; 273 struct s626_private *devpriv = dev->private;
274 274
@@ -304,10 +304,10 @@ static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
304/* *********** DAC FUNCTIONS *********** */ 304/* *********** DAC FUNCTIONS *********** */
305 305
306/* TrimDac LogicalChan-to-PhysicalChan mapping table. */ 306/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
307static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 }; 307static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
308 308
309/* TrimDac LogicalChan-to-EepromAdrs mapping table. */ 309/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
310static const uint8_t s626_trimadrs[] = { 310static const u8 s626_trimadrs[] = {
311 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 311 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
312}; 312};
313 313
@@ -357,7 +357,7 @@ static int s626_send_dac_eoc(struct comedi_device *dev,
357 * channel 2. Assumes: (1) TSL2 slot records initialized, and (2) 357 * channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
358 * dacpol contains valid target image. 358 * dacpol contains valid target image.
359 */ 359 */
360static int s626_send_dac(struct comedi_device *dev, uint32_t val) 360static int s626_send_dac(struct comedi_device *dev, u32 val)
361{ 361{
362 struct s626_private *devpriv = dev->private; 362 struct s626_private *devpriv = dev->private;
363 int ret; 363 int ret;
@@ -516,12 +516,12 @@ static int s626_send_dac(struct comedi_device *dev, uint32_t val)
516 * Private helper function: Write setpoint to an application DAC channel. 516 * Private helper function: Write setpoint to an application DAC channel.
517 */ 517 */
518static int s626_set_dac(struct comedi_device *dev, 518static int s626_set_dac(struct comedi_device *dev,
519 uint16_t chan, int16_t dacdata) 519 u16 chan, int16_t dacdata)
520{ 520{
521 struct s626_private *devpriv = dev->private; 521 struct s626_private *devpriv = dev->private;
522 uint16_t signmask; 522 u16 signmask;
523 uint32_t ws_image; 523 u32 ws_image;
524 uint32_t val; 524 u32 val;
525 525
526 /* 526 /*
527 * Adjust DAC data polarity and set up Polarity Control Register image. 527 * Adjust DAC data polarity and set up Polarity Control Register image.
@@ -535,7 +535,7 @@ static int s626_set_dac(struct comedi_device *dev,
535 } 535 }
536 536
537 /* Limit DAC setpoint value to valid range. */ 537 /* Limit DAC setpoint value to valid range. */
538 if ((uint16_t)dacdata > 0x1FFF) 538 if ((u16)dacdata > 0x1FFF)
539 dacdata = 0x1FFF; 539 dacdata = 0x1FFF;
540 540
541 /* 541 /*
@@ -575,23 +575,23 @@ static int s626_set_dac(struct comedi_device *dev,
575 * (write to non-existent trimdac). */ 575 * (write to non-existent trimdac). */
576 val |= 0x00004000; /* Address the two main dual-DAC devices 576 val |= 0x00004000; /* Address the two main dual-DAC devices
577 * (TSL's chip select enables target device). */ 577 * (TSL's chip select enables target device). */
578 val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel 578 val |= ((u32)(chan & 1) << 15); /* Address the DAC channel
579 * within the device. */ 579 * within the device. */
580 val |= (uint32_t)dacdata; /* Include DAC setpoint data. */ 580 val |= (u32)dacdata; /* Include DAC setpoint data. */
581 return s626_send_dac(dev, val); 581 return s626_send_dac(dev, val);
582} 582}
583 583
584static int s626_write_trim_dac(struct comedi_device *dev, 584static int s626_write_trim_dac(struct comedi_device *dev,
585 uint8_t logical_chan, uint8_t dac_data) 585 u8 logical_chan, u8 dac_data)
586{ 586{
587 struct s626_private *devpriv = dev->private; 587 struct s626_private *devpriv = dev->private;
588 uint32_t chan; 588 u32 chan;
589 589
590 /* 590 /*
591 * Save the new setpoint in case the application needs to read it back 591 * Save the new setpoint in case the application needs to read it back
592 * later. 592 * later.
593 */ 593 */
594 devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data; 594 devpriv->trim_setpoint[logical_chan] = (u8)dac_data;
595 595
596 /* Map logical channel number to physical channel number. */ 596 /* Map logical channel number to physical channel number. */
597 chan = s626_trimchan[logical_chan]; 597 chan = s626_trimchan[logical_chan];
@@ -633,7 +633,7 @@ static int s626_write_trim_dac(struct comedi_device *dev,
633 633
634static int s626_load_trim_dacs(struct comedi_device *dev) 634static int s626_load_trim_dacs(struct comedi_device *dev)
635{ 635{
636 uint8_t i; 636 u8 i;
637 int ret; 637 int ret;
638 638
639 /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */ 639 /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
@@ -661,7 +661,7 @@ static int s626_load_trim_dacs(struct comedi_device *dev)
661 * latches B. 661 * latches B.
662 */ 662 */
663static void s626_set_latch_source(struct comedi_device *dev, 663static void s626_set_latch_source(struct comedi_device *dev,
664 unsigned int chan, uint16_t value) 664 unsigned int chan, u16 value)
665{ 665{
666 s626_debi_replace(dev, S626_LP_CRB(chan), 666 s626_debi_replace(dev, S626_LP_CRB(chan),
667 ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC), 667 ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
@@ -672,7 +672,7 @@ static void s626_set_latch_source(struct comedi_device *dev,
672 * Write value into counter preload register. 672 * Write value into counter preload register.
673 */ 673 */
674static void s626_preload(struct comedi_device *dev, 674static void s626_preload(struct comedi_device *dev,
675 unsigned int chan, uint32_t value) 675 unsigned int chan, u32 value)
676{ 676{
677 s626_debi_write(dev, S626_LP_CNTR(chan), value); 677 s626_debi_write(dev, S626_LP_CNTR(chan), value);
678 s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16); 678 s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
@@ -686,7 +686,7 @@ static void s626_preload(struct comedi_device *dev,
686static void s626_reset_cap_flags(struct comedi_device *dev, 686static void s626_reset_cap_flags(struct comedi_device *dev,
687 unsigned int chan) 687 unsigned int chan)
688{ 688{
689 uint16_t set; 689 u16 set;
690 690
691 set = S626_SET_CRB_INTRESETCMD(1); 691 set = S626_SET_CRB_INTRESETCMD(1);
692 if (chan < 3) 692 if (chan < 3)
@@ -704,12 +704,12 @@ static void s626_reset_cap_flags(struct comedi_device *dev,
704 * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc. 704 * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
705 */ 705 */
706static void s626_set_mode_a(struct comedi_device *dev, 706static void s626_set_mode_a(struct comedi_device *dev,
707 unsigned int chan, uint16_t setup, 707 unsigned int chan, u16 setup,
708 uint16_t disable_int_src) 708 u16 disable_int_src)
709{ 709{
710 struct s626_private *devpriv = dev->private; 710 struct s626_private *devpriv = dev->private;
711 uint16_t cra; 711 u16 cra;
712 uint16_t crb; 712 u16 crb;
713 unsigned int cntsrc, clkmult, clkpol; 713 unsigned int cntsrc, clkmult, clkpol;
714 714
715 /* Initialize CRA and CRB images. */ 715 /* Initialize CRA and CRB images. */
@@ -782,12 +782,12 @@ static void s626_set_mode_a(struct comedi_device *dev,
782} 782}
783 783
784static void s626_set_mode_b(struct comedi_device *dev, 784static void s626_set_mode_b(struct comedi_device *dev,
785 unsigned int chan, uint16_t setup, 785 unsigned int chan, u16 setup,
786 uint16_t disable_int_src) 786 u16 disable_int_src)
787{ 787{
788 struct s626_private *devpriv = dev->private; 788 struct s626_private *devpriv = dev->private;
789 uint16_t cra; 789 u16 cra;
790 uint16_t crb; 790 u16 crb;
791 unsigned int cntsrc, clkmult, clkpol; 791 unsigned int cntsrc, clkmult, clkpol;
792 792
793 /* Initialize CRA and CRB images. */ 793 /* Initialize CRA and CRB images. */
@@ -868,7 +868,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
868 868
869static void s626_set_mode(struct comedi_device *dev, 869static void s626_set_mode(struct comedi_device *dev,
870 unsigned int chan, 870 unsigned int chan,
871 uint16_t setup, uint16_t disable_int_src) 871 u16 setup, u16 disable_int_src)
872{ 872{
873 if (chan < 3) 873 if (chan < 3)
874 s626_set_mode_a(dev, chan, setup, disable_int_src); 874 s626_set_mode_a(dev, chan, setup, disable_int_src);
@@ -880,7 +880,7 @@ static void s626_set_mode(struct comedi_device *dev,
880 * Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index. 880 * Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
881 */ 881 */
882static void s626_set_enable(struct comedi_device *dev, 882static void s626_set_enable(struct comedi_device *dev,
883 unsigned int chan, uint16_t enab) 883 unsigned int chan, u16 enab)
884{ 884{
885 unsigned int mask = S626_CRBMSK_INTCTRL; 885 unsigned int mask = S626_CRBMSK_INTCTRL;
886 unsigned int set; 886 unsigned int set;
@@ -901,11 +901,11 @@ static void s626_set_enable(struct comedi_device *dev,
901 * 2=OverflowA (B counters only), 3=disabled. 901 * 2=OverflowA (B counters only), 3=disabled.
902 */ 902 */
903static void s626_set_load_trig(struct comedi_device *dev, 903static void s626_set_load_trig(struct comedi_device *dev,
904 unsigned int chan, uint16_t trig) 904 unsigned int chan, u16 trig)
905{ 905{
906 uint16_t reg; 906 u16 reg;
907 uint16_t mask; 907 u16 mask;
908 uint16_t set; 908 u16 set;
909 909
910 if (chan < 3) { 910 if (chan < 3) {
911 reg = S626_LP_CRA(chan); 911 reg = S626_LP_CRA(chan);
@@ -925,11 +925,11 @@ static void s626_set_load_trig(struct comedi_device *dev,
925 * 2=IndexOnly, 3=IndexAndOverflow. 925 * 2=IndexOnly, 3=IndexAndOverflow.
926 */ 926 */
927static void s626_set_int_src(struct comedi_device *dev, 927static void s626_set_int_src(struct comedi_device *dev,
928 unsigned int chan, uint16_t int_source) 928 unsigned int chan, u16 int_source)
929{ 929{
930 struct s626_private *devpriv = dev->private; 930 struct s626_private *devpriv = dev->private;
931 uint16_t cra_reg = S626_LP_CRA(chan); 931 u16 cra_reg = S626_LP_CRA(chan);
932 uint16_t crb_reg = S626_LP_CRB(chan); 932 u16 crb_reg = S626_LP_CRB(chan);
933 933
934 if (chan < 3) { 934 if (chan < 3) {
935 /* Reset any pending counter overflow or index captures */ 935 /* Reset any pending counter overflow or index captures */
@@ -941,7 +941,7 @@ static void s626_set_int_src(struct comedi_device *dev,
941 s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A, 941 s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
942 S626_SET_CRA_INTSRC_A(int_source)); 942 S626_SET_CRA_INTSRC_A(int_source));
943 } else { 943 } else {
944 uint16_t crb; 944 u16 crb;
945 945
946 /* Cache writeable CRB register image */ 946 /* Cache writeable CRB register image */
947 crb = s626_debi_read(dev, crb_reg); 947 crb = s626_debi_read(dev, crb_reg);
@@ -985,7 +985,7 @@ static void s626_pulse_index(struct comedi_device *dev,
985 unsigned int chan) 985 unsigned int chan)
986{ 986{
987 if (chan < 3) { 987 if (chan < 3) {
988 uint16_t cra; 988 u16 cra;
989 989
990 cra = s626_debi_read(dev, S626_LP_CRA(chan)); 990 cra = s626_debi_read(dev, S626_LP_CRA(chan));
991 991
@@ -994,7 +994,7 @@ static void s626_pulse_index(struct comedi_device *dev,
994 (cra ^ S626_CRAMSK_INDXPOL_A)); 994 (cra ^ S626_CRAMSK_INDXPOL_A));
995 s626_debi_write(dev, S626_LP_CRA(chan), cra); 995 s626_debi_write(dev, S626_LP_CRA(chan), cra);
996 } else { 996 } else {
997 uint16_t crb; 997 u16 crb;
998 998
999 crb = s626_debi_read(dev, S626_LP_CRB(chan)); 999 crb = s626_debi_read(dev, S626_LP_CRB(chan));
1000 crb &= ~S626_CRBMSK_INTCTRL; 1000 crb &= ~S626_CRBMSK_INTCTRL;
@@ -1062,7 +1062,7 @@ static int s626_dio_clear_irq(struct comedi_device *dev)
1062} 1062}
1063 1063
1064static void s626_handle_dio_interrupt(struct comedi_device *dev, 1064static void s626_handle_dio_interrupt(struct comedi_device *dev,
1065 uint16_t irqbit, uint8_t group) 1065 u16 irqbit, u8 group)
1066{ 1066{
1067 struct s626_private *devpriv = dev->private; 1067 struct s626_private *devpriv = dev->private;
1068 struct comedi_subdevice *s = dev->read_subdev; 1068 struct comedi_subdevice *s = dev->read_subdev;
@@ -1110,8 +1110,8 @@ static void s626_handle_dio_interrupt(struct comedi_device *dev,
1110 1110
1111static void s626_check_dio_interrupts(struct comedi_device *dev) 1111static void s626_check_dio_interrupts(struct comedi_device *dev)
1112{ 1112{
1113 uint16_t irqbit; 1113 u16 irqbit;
1114 uint8_t group; 1114 u8 group;
1115 1115
1116 for (group = 0; group < S626_DIO_BANKS; group++) { 1116 for (group = 0; group < S626_DIO_BANKS; group++) {
1117 /* read interrupt type */ 1117 /* read interrupt type */
@@ -1131,7 +1131,7 @@ static void s626_check_counter_interrupts(struct comedi_device *dev)
1131 struct comedi_subdevice *s = dev->read_subdev; 1131 struct comedi_subdevice *s = dev->read_subdev;
1132 struct comedi_async *async = s->async; 1132 struct comedi_async *async = s->async;
1133 struct comedi_cmd *cmd = &async->cmd; 1133 struct comedi_cmd *cmd = &async->cmd;
1134 uint16_t irqbit; 1134 u16 irqbit;
1135 1135
1136 /* read interrupt type */ 1136 /* read interrupt type */
1137 irqbit = s626_debi_read(dev, S626_LP_RDMISC2); 1137 irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
@@ -1196,7 +1196,7 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
1196 * first uint16_t in the buffer because it contains junk data 1196 * first uint16_t in the buffer because it contains junk data
1197 * from the final ADC of the previous poll list scan. 1197 * from the final ADC of the previous poll list scan.
1198 */ 1198 */
1199 uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1; 1199 u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1;
1200 int i; 1200 int i;
1201 1201
1202 /* get the data and hand it over to comedi */ 1202 /* get the data and hand it over to comedi */
@@ -1231,7 +1231,7 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
1231{ 1231{
1232 struct comedi_device *dev = d; 1232 struct comedi_device *dev = d;
1233 unsigned long flags; 1233 unsigned long flags;
1234 uint32_t irqtype, irqstatus; 1234 u32 irqtype, irqstatus;
1235 1235
1236 if (!dev->attached) 1236 if (!dev->attached)
1237 return IRQ_NONE; 1237 return IRQ_NONE;
@@ -1272,25 +1272,25 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
1272/* 1272/*
1273 * This function builds the RPS program for hardware driven acquisition. 1273 * This function builds the RPS program for hardware driven acquisition.
1274 */ 1274 */
1275static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) 1275static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
1276{ 1276{
1277 struct s626_private *devpriv = dev->private; 1277 struct s626_private *devpriv = dev->private;
1278 struct comedi_subdevice *s = dev->read_subdev; 1278 struct comedi_subdevice *s = dev->read_subdev;
1279 struct comedi_cmd *cmd = &s->async->cmd; 1279 struct comedi_cmd *cmd = &s->async->cmd;
1280 uint32_t *rps; 1280 u32 *rps;
1281 uint32_t jmp_adrs; 1281 u32 jmp_adrs;
1282 uint16_t i; 1282 u16 i;
1283 uint16_t n; 1283 u16 n;
1284 uint32_t local_ppl; 1284 u32 local_ppl;
1285 1285
1286 /* Stop RPS program in case it is currently running */ 1286 /* Stop RPS program in case it is currently running */
1287 s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1); 1287 s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
1288 1288
1289 /* Set starting logical address to write RPS commands. */ 1289 /* Set starting logical address to write RPS commands. */
1290 rps = (uint32_t *)devpriv->rps_buf.logical_base; 1290 rps = (u32 *)devpriv->rps_buf.logical_base;
1291 1291
1292 /* Initialize RPS instruction pointer */ 1292 /* Initialize RPS instruction pointer */
1293 writel((uint32_t)devpriv->rps_buf.physical_base, 1293 writel((u32)devpriv->rps_buf.physical_base,
1294 dev->mmio + S626_P_RPSADDR1); 1294 dev->mmio + S626_P_RPSADDR1);
1295 1295
1296 /* Construct RPS program in rps_buf DMA buffer */ 1296 /* Construct RPS program in rps_buf DMA buffer */
@@ -1372,8 +1372,8 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
1372 * flushes the RPS' instruction prefetch pipeline. 1372 * flushes the RPS' instruction prefetch pipeline.
1373 */ 1373 */
1374 jmp_adrs = 1374 jmp_adrs =
1375 (uint32_t)devpriv->rps_buf.physical_base + 1375 (u32)devpriv->rps_buf.physical_base +
1376 (uint32_t)((unsigned long)rps - 1376 (u32)((unsigned long)rps -
1377 (unsigned long)devpriv-> 1377 (unsigned long)devpriv->
1378 rps_buf.logical_base); 1378 rps_buf.logical_base);
1379 for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) { 1379 for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
@@ -1408,7 +1408,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
1408 /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */ 1408 /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
1409 *rps++ = S626_RPS_STREG | 1409 *rps++ = S626_RPS_STREG |
1410 (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2); 1410 (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
1411 *rps++ = (uint32_t)devpriv->ana_buf.physical_base + 1411 *rps++ = (u32)devpriv->ana_buf.physical_base +
1412 (devpriv->adc_items << 2); 1412 (devpriv->adc_items << 2);
1413 1413
1414 /* 1414 /*
@@ -1452,7 +1452,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
1452 1452
1453 /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */ 1453 /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
1454 *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2); 1454 *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
1455 *rps++ = (uint32_t)devpriv->ana_buf.physical_base + 1455 *rps++ = (u32)devpriv->ana_buf.physical_base +
1456 (devpriv->adc_items << 2); 1456 (devpriv->adc_items << 2);
1457 1457
1458 /* Indicate ADC scan loop is finished. */ 1458 /* Indicate ADC scan loop is finished. */
@@ -1465,7 +1465,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
1465 1465
1466 /* Restart RPS program at its beginning. */ 1466 /* Restart RPS program at its beginning. */
1467 *rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */ 1467 *rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
1468 *rps++ = (uint32_t)devpriv->rps_buf.physical_base; 1468 *rps++ = (u32)devpriv->rps_buf.physical_base;
1469 1469
1470 /* End of RPS program build */ 1470 /* End of RPS program build */
1471} 1471}
@@ -1488,11 +1488,11 @@ static int s626_ai_insn_read(struct comedi_device *dev,
1488 struct comedi_insn *insn, 1488 struct comedi_insn *insn,
1489 unsigned int *data) 1489 unsigned int *data)
1490{ 1490{
1491 uint16_t chan = CR_CHAN(insn->chanspec); 1491 u16 chan = CR_CHAN(insn->chanspec);
1492 uint16_t range = CR_RANGE(insn->chanspec); 1492 u16 range = CR_RANGE(insn->chanspec);
1493 uint16_t adc_spec = 0; 1493 u16 adc_spec = 0;
1494 uint32_t gpio_image; 1494 u32 gpio_image;
1495 uint32_t tmp; 1495 u32 tmp;
1496 int ret; 1496 int ret;
1497 int n; 1497 int n;
1498 1498
@@ -1585,7 +1585,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
1585 return n; 1585 return n;
1586} 1586}
1587 1587
1588static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd) 1588static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd)
1589{ 1589{
1590 int n; 1590 int n;
1591 1591
@@ -1651,7 +1651,7 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
1651static void s626_timer_load(struct comedi_device *dev, 1651static void s626_timer_load(struct comedi_device *dev,
1652 unsigned int chan, int tick) 1652 unsigned int chan, int tick)
1653{ 1653{
1654 uint16_t setup = 1654 u16 setup =
1655 /* Preload upon index. */ 1655 /* Preload upon index. */
1656 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | 1656 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
1657 /* Disable hardware index. */ 1657 /* Disable hardware index. */
@@ -1664,7 +1664,7 @@ static void s626_timer_load(struct comedi_device *dev,
1664 S626_SET_STD_CLKMULT(S626_CLKMULT_1X) | 1664 S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
1665 /* Enabled by index */ 1665 /* Enabled by index */
1666 S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX); 1666 S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
1667 uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA; 1667 u16 value_latchsrc = S626_LATCHSRC_A_INDXA;
1668 /* uint16_t enab = S626_CLKENAB_ALWAYS; */ 1668 /* uint16_t enab = S626_CLKENAB_ALWAYS; */
1669 1669
1670 s626_set_mode(dev, chan, setup, false); 1670 s626_set_mode(dev, chan, setup, false);
@@ -1693,7 +1693,7 @@ static void s626_timer_load(struct comedi_device *dev,
1693static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) 1693static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1694{ 1694{
1695 struct s626_private *devpriv = dev->private; 1695 struct s626_private *devpriv = dev->private;
1696 uint8_t ppl[16]; 1696 u8 ppl[16];
1697 struct comedi_cmd *cmd = &s->async->cmd; 1697 struct comedi_cmd *cmd = &s->async->cmd;
1698 int tick; 1698 int tick;
1699 1699
@@ -1953,7 +1953,7 @@ static int s626_ao_insn_write(struct comedi_device *dev,
1953 1953
1954static void s626_dio_init(struct comedi_device *dev) 1954static void s626_dio_init(struct comedi_device *dev)
1955{ 1955{
1956 uint16_t group; 1956 u16 group;
1957 1957
1958 /* Prepare to treat writes to WRCapSel as capture disables. */ 1958 /* Prepare to treat writes to WRCapSel as capture disables. */
1959 s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP); 1959 s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
@@ -2017,7 +2017,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
2017 struct comedi_insn *insn, unsigned int *data) 2017 struct comedi_insn *insn, unsigned int *data)
2018{ 2018{
2019 unsigned int chan = CR_CHAN(insn->chanspec); 2019 unsigned int chan = CR_CHAN(insn->chanspec);
2020 uint16_t setup = 2020 u16 setup =
2021 /* Preload upon index. */ 2021 /* Preload upon index. */
2022 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | 2022 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
2023 /* Disable hardware index. */ 2023 /* Disable hardware index. */
@@ -2032,8 +2032,8 @@ static int s626_enc_insn_config(struct comedi_device *dev,
2032 S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX); 2032 S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
2033 /* uint16_t disable_int_src = true; */ 2033 /* uint16_t disable_int_src = true; */
2034 /* uint32_t Preloadvalue; //Counter initial value */ 2034 /* uint32_t Preloadvalue; //Counter initial value */
2035 uint16_t value_latchsrc = S626_LATCHSRC_AB_READ; 2035 u16 value_latchsrc = S626_LATCHSRC_AB_READ;
2036 uint16_t enab = S626_CLKENAB_ALWAYS; 2036 u16 enab = S626_CLKENAB_ALWAYS;
2037 2037
2038 /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */ 2038 /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
2039 2039
@@ -2052,7 +2052,7 @@ static int s626_enc_insn_read(struct comedi_device *dev,
2052 unsigned int *data) 2052 unsigned int *data)
2053{ 2053{
2054 unsigned int chan = CR_CHAN(insn->chanspec); 2054 unsigned int chan = CR_CHAN(insn->chanspec);
2055 uint16_t cntr_latch_reg = S626_LP_CNTR(chan); 2055 u16 cntr_latch_reg = S626_LP_CNTR(chan);
2056 int i; 2056 int i;
2057 2057
2058 for (i = 0; i < insn->n; i++) { 2058 for (i = 0; i < insn->n; i++) {
@@ -2090,7 +2090,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
2090 return 1; 2090 return 1;
2091} 2091}
2092 2092
2093static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image) 2093static void s626_write_misc2(struct comedi_device *dev, u16 new_image)
2094{ 2094{
2095 s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE); 2095 s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
2096 s626_debi_write(dev, S626_LP_WRMISC2, new_image); 2096 s626_debi_write(dev, S626_LP_WRMISC2, new_image);
@@ -2100,7 +2100,7 @@ static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
2100static void s626_counters_init(struct comedi_device *dev) 2100static void s626_counters_init(struct comedi_device *dev)
2101{ 2101{
2102 int chan; 2102 int chan;
2103 uint16_t setup = 2103 u16 setup =
2104 /* Preload upon index. */ 2104 /* Preload upon index. */
2105 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | 2105 S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
2106 /* Disable hardware index. */ 2106 /* Disable hardware index. */
@@ -2169,7 +2169,7 @@ static int s626_initialize(struct comedi_device *dev)
2169{ 2169{
2170 struct s626_private *devpriv = dev->private; 2170 struct s626_private *devpriv = dev->private;
2171 dma_addr_t phys_buf; 2171 dma_addr_t phys_buf;
2172 uint16_t chan; 2172 u16 chan;
2173 int i; 2173 int i;
2174 int ret; 2174 int ret;
2175 2175
@@ -2248,7 +2248,7 @@ static int s626_initialize(struct comedi_device *dev)
2248 */ 2248 */
2249 2249
2250 /* Physical start of RPS program */ 2250 /* Physical start of RPS program */
2251 writel((uint32_t)devpriv->rps_buf.physical_base, 2251 writel((u32)devpriv->rps_buf.physical_base,
2252 dev->mmio + S626_P_RPSADDR1); 2252 dev->mmio + S626_P_RPSADDR1);
2253 /* RPS program performs no explicit mem writes */ 2253 /* RPS program performs no explicit mem writes */
2254 writel(0, dev->mmio + S626_P_RPSPAGE1); 2254 writel(0, dev->mmio + S626_P_RPSPAGE1);
@@ -2318,16 +2318,16 @@ static int s626_initialize(struct comedi_device *dev)
2318 * enabled. 2318 * enabled.
2319 */ 2319 */
2320 phys_buf = devpriv->ana_buf.physical_base + 2320 phys_buf = devpriv->ana_buf.physical_base +
2321 (S626_DAC_WDMABUF_OS * sizeof(uint32_t)); 2321 (S626_DAC_WDMABUF_OS * sizeof(u32));
2322 writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT); 2322 writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
2323 writel((uint32_t)(phys_buf + sizeof(uint32_t)), 2323 writel((u32)(phys_buf + sizeof(u32)),
2324 dev->mmio + S626_P_PROTA2_OUT); 2324 dev->mmio + S626_P_PROTA2_OUT);
2325 2325
2326 /* 2326 /*
2327 * Cache Audio2's output DMA buffer logical address. This is 2327 * Cache Audio2's output DMA buffer logical address. This is
2328 * where DAC data is buffered for A2 output DMA transfers. 2328 * where DAC data is buffered for A2 output DMA transfers.
2329 */ 2329 */
2330 devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base + 2330 devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base +
2331 S626_DAC_WDMABUF_OS; 2331 S626_DAC_WDMABUF_OS;
2332 2332
2333 /* 2333 /*
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index d0a8a28edd36..55d43c076b1c 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -250,3 +250,15 @@ int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
250 return n; 250 return n;
251} 251}
252EXPORT_SYMBOL_GPL(comedi_get_n_channels); 252EXPORT_SYMBOL_GPL(comedi_get_n_channels);
253
254static int __init kcomedilib_module_init(void)
255{
256 return 0;
257}
258
259static void __exit kcomedilib_module_exit(void)
260{
261}
262
263module_init(kcomedilib_module_init);
264module_exit(kcomedilib_module_exit);
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
index 995c874f40eb..40ff0d007695 100644
--- a/drivers/staging/dgnc/Makefile
+++ b/drivers/staging/dgnc/Makefile
@@ -2,5 +2,4 @@ obj-$(CONFIG_DGNC) += dgnc.o
2 2
3dgnc-objs := dgnc_cls.o dgnc_driver.o\ 3dgnc-objs := dgnc_cls.o dgnc_driver.o\
4 dgnc_mgmt.o dgnc_neo.o\ 4 dgnc_mgmt.o dgnc_neo.o\
5 dgnc_tty.o dgnc_sysfs.o\ 5 dgnc_tty.o dgnc_utils.o
6 dgnc_utils.o
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index aedca66cbe41..c20ffdd254d8 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -385,9 +385,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
385 ch->ch_rxcount++; 385 ch->ch_rxcount++;
386 } 386 }
387 387
388 /* 388 /* Write new final heads to channel structure. */
389 * Write new final heads to channel structure. 389
390 */
391 ch->ch_r_head = head & RQUEUEMASK; 390 ch->ch_r_head = head & RQUEUEMASK;
392 ch->ch_e_head = head & EQUEUEMASK; 391 ch->ch_e_head = head & EQUEUEMASK;
393 392
@@ -666,9 +665,8 @@ static void cls_param(struct tty_struct *tty)
666 if (!bd || bd->magic != DGNC_BOARD_MAGIC) 665 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
667 return; 666 return;
668 667
669 /* 668 /* If baud rate is zero, flush queues, and set mval to drop DTR. */
670 * If baud rate is zero, flush queues, and set mval to drop DTR. 669
671 */
672 if ((ch->ch_c_cflag & (CBAUD)) == 0) { 670 if ((ch->ch_c_cflag & (CBAUD)) == 0) {
673 ch->ch_r_head = 0; 671 ch->ch_r_head = 0;
674 ch->ch_r_tail = 0; 672 ch->ch_r_tail = 0;
@@ -887,9 +885,8 @@ static void cls_param(struct tty_struct *tty)
887 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); 885 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
888} 886}
889 887
890/* 888/* Our board poller function. */
891 * Our board poller function. 889
892 */
893static void cls_tasklet(unsigned long data) 890static void cls_tasklet(unsigned long data)
894{ 891{
895 struct dgnc_board *bd = (struct dgnc_board *)data; 892 struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -914,9 +911,8 @@ static void cls_tasklet(unsigned long data)
914 */ 911 */
915 spin_lock_irqsave(&bd->bd_intr_lock, flags); 912 spin_lock_irqsave(&bd->bd_intr_lock, flags);
916 913
917 /* 914 /* If board is ready, parse deeper to see if there is anything to do. */
918 * If board is ready, parse deeper to see if there is anything to do. 915
919 */
920 if ((state == BOARD_READY) && (ports > 0)) { 916 if ((state == BOARD_READY) && (ports > 0)) {
921 /* Loop on each port */ 917 /* Loop on each port */
922 for (i = 0; i < ports; i++) { 918 for (i = 0; i < ports; i++) {
@@ -938,9 +934,8 @@ static void cls_tasklet(unsigned long data)
938 cls_copy_data_from_queue_to_uart(ch); 934 cls_copy_data_from_queue_to_uart(ch);
939 dgnc_wakeup_writes(ch); 935 dgnc_wakeup_writes(ch);
940 936
941 /* 937 /* Check carrier function. */
942 * Check carrier function. 938
943 */
944 dgnc_carrier(ch); 939 dgnc_carrier(ch);
945 940
946 /* 941 /*
@@ -992,9 +987,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
992 for (i = 0; i < brd->nasync; i++) 987 for (i = 0; i < brd->nasync; i++)
993 cls_parse_isr(brd, i); 988 cls_parse_isr(brd, i);
994 989
995 /* 990 /* Schedule tasklet to more in-depth servicing at a better time. */
996 * Schedule tasklet to more in-depth servicing at a better time. 991
997 */
998 tasklet_schedule(&brd->helper_tasklet); 992 tasklet_schedule(&brd->helper_tasklet);
999 993
1000 spin_unlock_irqrestore(&brd->bd_intr_lock, flags); 994 spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1043,9 +1037,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
1043 un->un_flags |= UN_EMPTY; 1037 un->un_flags |= UN_EMPTY;
1044 spin_unlock_irqrestore(&ch->ch_lock, flags); 1038 spin_unlock_irqrestore(&ch->ch_lock, flags);
1045 1039
1046 /* 1040 /* NOTE: Do something with time passed in. */
1047 * NOTE: Do something with time passed in.
1048 */
1049 1041
1050 /* If ret is non-zero, user ctrl-c'ed us */ 1042 /* If ret is non-zero, user ctrl-c'ed us */
1051 1043
@@ -1112,9 +1104,8 @@ static void cls_uart_init(struct channel_t *ch)
1112 readb(&ch->ch_cls_uart->msr); 1104 readb(&ch->ch_cls_uart->msr);
1113} 1105}
1114 1106
1115/* 1107/* Turns off UART. */
1116 * Turns off UART. 1108
1117 */
1118static void cls_uart_off(struct channel_t *ch) 1109static void cls_uart_off(struct channel_t *ch)
1119{ 1110{
1120 writeb(0, &ch->ch_cls_uart->ier); 1111 writeb(0, &ch->ch_cls_uart->ier);
@@ -1160,9 +1151,8 @@ static void cls_send_break(struct channel_t *ch, int msecs)
1160 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) 1151 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1161 return; 1152 return;
1162 1153
1163 /* 1154 /* If we receive a time of 0, this means turn off the break. */
1164 * If we receive a time of 0, this means turn off the break. 1155
1165 */
1166 if (msecs == 0) { 1156 if (msecs == 0) {
1167 /* Turn break off, and unset some variables */ 1157 /* Turn break off, and unset some variables */
1168 if (ch->ch_flags & CH_BREAK_SENDING) { 1158 if (ch->ch_flags & CH_BREAK_SENDING) {
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 2597e36d38c4..463ad30efb3b 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -69,7 +69,7 @@ struct cls_uart_struct {
69#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ 69#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
70#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ 70#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
71#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ 71#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
72#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ 72#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
73#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */ 73#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
74#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */ 74#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
75#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */ 75#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index fd372d3afa46..5381dbddd8bb 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -24,31 +24,14 @@
24#include "dgnc_tty.h" 24#include "dgnc_tty.h"
25#include "dgnc_cls.h" 25#include "dgnc_cls.h"
26#include "dgnc_neo.h" 26#include "dgnc_neo.h"
27#include "dgnc_sysfs.h"
28 27
29MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Digi International, http://www.digi.com"); 29MODULE_AUTHOR("Digi International, http://www.digi.com");
31MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); 30MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
32MODULE_SUPPORTED_DEVICE("dgnc"); 31MODULE_SUPPORTED_DEVICE("dgnc");
33 32
34/************************************************************************** 33/* File operations permitted on Control/Management major. */
35 *
36 * protos for this file
37 *
38 */
39static int dgnc_start(void);
40static int dgnc_request_irq(struct dgnc_board *brd);
41static void dgnc_free_irq(struct dgnc_board *brd);
42static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id);
43static void dgnc_cleanup_board(struct dgnc_board *brd);
44static void dgnc_poll_handler(ulong dummy);
45static int dgnc_init_one(struct pci_dev *pdev,
46 const struct pci_device_id *ent);
47static int dgnc_do_remap(struct dgnc_board *brd);
48 34
49/*
50 * File operations permitted on Control/Management major.
51 */
52static const struct file_operations dgnc_board_fops = { 35static const struct file_operations dgnc_board_fops = {
53 .owner = THIS_MODULE, 36 .owner = THIS_MODULE,
54 .unlocked_ioctl = dgnc_mgmt_ioctl, 37 .unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -56,9 +39,8 @@ static const struct file_operations dgnc_board_fops = {
56 .release = dgnc_mgmt_close 39 .release = dgnc_mgmt_close
57}; 40};
58 41
59/* 42/* Globals */
60 * Globals 43
61 */
62uint dgnc_num_boards; 44uint dgnc_num_boards;
63struct dgnc_board *dgnc_board[MAXBOARDS]; 45struct dgnc_board *dgnc_board[MAXBOARDS];
64DEFINE_SPINLOCK(dgnc_global_lock); 46DEFINE_SPINLOCK(dgnc_global_lock);
@@ -66,14 +48,12 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
66uint dgnc_major; 48uint dgnc_major;
67int dgnc_poll_tick = 20; /* Poll interval - 20 ms */ 49int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
68 50
69/* 51/* Static vars. */
70 * Static vars. 52
71 */
72static struct class *dgnc_class; 53static struct class *dgnc_class;
73 54
74/* 55/* Poller stuff */
75 * Poller stuff 56
76 */
77static ulong dgnc_poll_time; /* Time of next poll */ 57static ulong dgnc_poll_time; /* Time of next poll */
78static uint dgnc_poll_stop; /* Used to tell poller to stop */ 58static uint dgnc_poll_stop; /* Used to tell poller to stop */
79static struct timer_list dgnc_poll_timer; 59static struct timer_list dgnc_poll_timer;
@@ -93,7 +73,7 @@ struct board_id {
93 unsigned int is_pci_express; 73 unsigned int is_pci_express;
94}; 74};
95 75
96static struct board_id dgnc_ids[] = { 76static const struct board_id dgnc_ids[] = {
97 { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 }, 77 { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
98 { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 }, 78 { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
99 { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 }, 79 { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -114,274 +94,20 @@ static struct board_id dgnc_ids[] = {
114 { NULL, 0, 0 } 94 { NULL, 0, 0 }
115}; 95};
116 96
117static struct pci_driver dgnc_driver = { 97/* Remap PCI memory. */
118 .name = "dgnc",
119 .probe = dgnc_init_one,
120 .id_table = dgnc_pci_tbl,
121};
122
123/************************************************************************
124 *
125 * Driver load/unload functions
126 *
127 ************************************************************************/
128
129static void cleanup(bool sysfiles)
130{
131 int i;
132 unsigned long flags;
133
134 spin_lock_irqsave(&dgnc_poll_lock, flags);
135 dgnc_poll_stop = 1;
136 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
137
138 /* Turn off poller right away. */
139 del_timer_sync(&dgnc_poll_timer);
140
141 if (sysfiles)
142 dgnc_remove_driver_sysfiles(&dgnc_driver);
143
144 device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
145 class_destroy(dgnc_class);
146 unregister_chrdev(dgnc_major, "dgnc");
147
148 for (i = 0; i < dgnc_num_boards; ++i) {
149 dgnc_remove_ports_sysfiles(dgnc_board[i]);
150 dgnc_cleanup_tty(dgnc_board[i]);
151 dgnc_cleanup_board(dgnc_board[i]);
152 }
153
154 dgnc_tty_post_uninit();
155}
156
157/*
158 * dgnc_cleanup_module()
159 *
160 * Module unload. This is where it all ends.
161 */
162static void __exit dgnc_cleanup_module(void)
163{
164 cleanup(true);
165 pci_unregister_driver(&dgnc_driver);
166}
167
168/*
169 * init_module()
170 *
171 * Module load. This is where it all starts.
172 */
173static int __init dgnc_init_module(void)
174{
175 int rc;
176
177 /*
178 * Initialize global stuff
179 */
180 rc = dgnc_start();
181
182 if (rc < 0)
183 return rc;
184
185 /*
186 * Find and configure all the cards
187 */
188 rc = pci_register_driver(&dgnc_driver);
189 if (rc) {
190 pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
191 cleanup(false);
192 return rc;
193 }
194 dgnc_create_driver_sysfiles(&dgnc_driver);
195
196 return 0;
197}
198
199module_init(dgnc_init_module);
200module_exit(dgnc_cleanup_module);
201 98
202/* 99static int dgnc_do_remap(struct dgnc_board *brd)
203 * Start of driver.
204 */
205static int dgnc_start(void)
206{ 100{
207 int rc = 0; 101 int rc = 0;
208 unsigned long flags;
209 struct device *dev;
210
211 /* make sure timer is initialized before we do anything else */
212 init_timer(&dgnc_poll_timer);
213
214 /*
215 * Register our base character device into the kernel.
216 * This allows the download daemon to connect to the downld device
217 * before any of the boards are init'ed.
218 *
219 * Register management/dpa devices
220 */
221 rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
222 if (rc < 0) {
223 pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
224 return rc;
225 }
226 dgnc_major = rc;
227
228 dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
229 if (IS_ERR(dgnc_class)) {
230 rc = PTR_ERR(dgnc_class);
231 pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
232 goto failed_class;
233 }
234
235 dev = device_create(dgnc_class, NULL,
236 MKDEV(dgnc_major, 0),
237 NULL, "dgnc_mgmt");
238 if (IS_ERR(dev)) {
239 rc = PTR_ERR(dev);
240 pr_err(DRVSTR ": Can't create device (%d)\n", rc);
241 goto failed_device;
242 }
243
244 /*
245 * Init any global tty stuff.
246 */
247 rc = dgnc_tty_preinit();
248
249 if (rc < 0) {
250 pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
251 goto failed_tty;
252 }
253
254 /* Start the poller */
255 spin_lock_irqsave(&dgnc_poll_lock, flags);
256 setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
257 dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
258 dgnc_poll_timer.expires = dgnc_poll_time;
259 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
260
261 add_timer(&dgnc_poll_timer);
262
263 return 0;
264
265failed_tty:
266 device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
267failed_device:
268 class_destroy(dgnc_class);
269failed_class:
270 unregister_chrdev(dgnc_major, "dgnc");
271 return rc;
272}
273
274/* returns count (>= 0), or negative on error */
275static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
276{
277 int rc;
278 struct dgnc_board *brd;
279
280 /* wake up and enable device */
281 rc = pci_enable_device(pdev);
282
283 if (rc)
284 return -EIO;
285
286 brd = dgnc_found_board(pdev, ent->driver_data);
287 if (IS_ERR(brd))
288 return PTR_ERR(brd);
289
290 /*
291 * Do tty device initialization.
292 */
293
294 rc = dgnc_tty_register(brd);
295 if (rc < 0) {
296 pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
297 goto failed;
298 }
299
300 rc = dgnc_request_irq(brd);
301 if (rc < 0) {
302 pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
303 goto unregister_tty;
304 }
305
306 rc = dgnc_tty_init(brd);
307 if (rc < 0) {
308 pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
309 goto free_irq;
310 }
311
312 brd->state = BOARD_READY;
313 brd->dpastatus = BD_RUNNING;
314 102
315 dgnc_create_ports_sysfiles(brd); 103 brd->re_map_membase = ioremap(brd->membase, 0x1000);
316 104 if (!brd->re_map_membase)
317 dgnc_board[dgnc_num_boards++] = brd; 105 rc = -ENOMEM;
318
319 return 0;
320
321free_irq:
322 dgnc_free_irq(brd);
323unregister_tty:
324 dgnc_tty_unregister(brd);
325
326failed:
327 kfree(brd);
328 106
329 return rc; 107 return rc;
330} 108}
331 109
332/* 110/*
333 * dgnc_cleanup_board()
334 *
335 * Free all the memory associated with a board
336 */
337static void dgnc_cleanup_board(struct dgnc_board *brd)
338{
339 int i = 0;
340
341 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
342 return;
343
344 switch (brd->device) {
345 case PCI_DEVICE_CLASSIC_4_DID:
346 case PCI_DEVICE_CLASSIC_8_DID:
347 case PCI_DEVICE_CLASSIC_4_422_DID:
348 case PCI_DEVICE_CLASSIC_8_422_DID:
349
350 /* Tell card not to interrupt anymore. */
351 outb(0, brd->iobase + 0x4c);
352 break;
353
354 default:
355 break;
356 }
357
358 if (brd->irq)
359 free_irq(brd->irq, brd);
360
361 tasklet_kill(&brd->helper_tasklet);
362
363 if (brd->re_map_membase) {
364 iounmap(brd->re_map_membase);
365 brd->re_map_membase = NULL;
366 }
367
368 /* Free all allocated channels structs */
369 for (i = 0; i < MAXPORTS ; i++) {
370 if (brd->channels[i]) {
371 kfree(brd->channels[i]->ch_rqueue);
372 kfree(brd->channels[i]->ch_equeue);
373 kfree(brd->channels[i]->ch_wqueue);
374 kfree(brd->channels[i]);
375 brd->channels[i] = NULL;
376 }
377 }
378
379 dgnc_board[brd->boardnum] = NULL;
380
381 kfree(brd);
382}
383
384/*
385 * dgnc_found_board() 111 * dgnc_found_board()
386 * 112 *
387 * A board has been found, init it. 113 * A board has been found, init it.
@@ -587,21 +313,6 @@ static void dgnc_free_irq(struct dgnc_board *brd)
587} 313}
588 314
589/* 315/*
590 * Remap PCI memory.
591 */
592static int dgnc_do_remap(struct dgnc_board *brd)
593{
594 int rc = 0;
595
596 brd->re_map_membase = ioremap(brd->membase, 0x1000);
597 if (!brd->re_map_membase)
598 rc = -ENOMEM;
599
600 return rc;
601}
602
603/*
604 *
605 * Function: 316 * Function:
606 * 317 *
607 * dgnc_poll_handler 318 * dgnc_poll_handler
@@ -623,7 +334,6 @@ static int dgnc_do_remap(struct dgnc_board *brd)
623 * As each timer expires, it determines (a) whether the "transmit" 334 * As each timer expires, it determines (a) whether the "transmit"
624 * waiter needs to be woken up, and (b) whether the poller needs to 335 * waiter needs to be woken up, and (b) whether the poller needs to
625 * be rescheduled. 336 * be rescheduled.
626 *
627 */ 337 */
628 338
629static void dgnc_poll_handler(ulong dummy) 339static void dgnc_poll_handler(ulong dummy)
@@ -651,9 +361,8 @@ static void dgnc_poll_handler(ulong dummy)
651 spin_unlock_irqrestore(&brd->bd_lock, flags); 361 spin_unlock_irqrestore(&brd->bd_lock, flags);
652 } 362 }
653 363
654 /* 364 /* Schedule ourself back at the nominal wakeup interval. */
655 * Schedule ourself back at the nominal wakeup interval. 365
656 */
657 spin_lock_irqsave(&dgnc_poll_lock, flags); 366 spin_lock_irqsave(&dgnc_poll_lock, flags);
658 dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick); 367 dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
659 368
@@ -669,3 +378,240 @@ static void dgnc_poll_handler(ulong dummy)
669 if (!dgnc_poll_stop) 378 if (!dgnc_poll_stop)
670 add_timer(&dgnc_poll_timer); 379 add_timer(&dgnc_poll_timer);
671} 380}
381
382/* returns count (>= 0), or negative on error */
383static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
384{
385 int rc;
386 struct dgnc_board *brd;
387
388 /* wake up and enable device */
389 rc = pci_enable_device(pdev);
390
391 if (rc)
392 return -EIO;
393
394 brd = dgnc_found_board(pdev, ent->driver_data);
395 if (IS_ERR(brd))
396 return PTR_ERR(brd);
397
398 /* Do tty device initialization. */
399
400 rc = dgnc_tty_register(brd);
401 if (rc < 0) {
402 pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
403 goto failed;
404 }
405
406 rc = dgnc_request_irq(brd);
407 if (rc < 0) {
408 pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
409 goto unregister_tty;
410 }
411
412 rc = dgnc_tty_init(brd);
413 if (rc < 0) {
414 pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
415 goto free_irq;
416 }
417
418 brd->state = BOARD_READY;
419 brd->dpastatus = BD_RUNNING;
420
421 dgnc_board[dgnc_num_boards++] = brd;
422
423 return 0;
424
425free_irq:
426 dgnc_free_irq(brd);
427unregister_tty:
428 dgnc_tty_unregister(brd);
429
430failed:
431 kfree(brd);
432
433 return rc;
434}
435
436static struct pci_driver dgnc_driver = {
437 .name = "dgnc",
438 .probe = dgnc_init_one,
439 .id_table = dgnc_pci_tbl,
440};
441
442/* Start of driver. */
443
444static int dgnc_start(void)
445{
446 int rc = 0;
447 unsigned long flags;
448 struct device *dev;
449
450 /* make sure timer is initialized before we do anything else */
451 init_timer(&dgnc_poll_timer);
452
453 /*
454 * Register our base character device into the kernel.
455 * This allows the download daemon to connect to the downld device
456 * before any of the boards are init'ed.
457 *
458 * Register management/dpa devices
459 */
460 rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
461 if (rc < 0) {
462 pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
463 return rc;
464 }
465 dgnc_major = rc;
466
467 dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
468 if (IS_ERR(dgnc_class)) {
469 rc = PTR_ERR(dgnc_class);
470 pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
471 goto failed_class;
472 }
473
474 dev = device_create(dgnc_class, NULL,
475 MKDEV(dgnc_major, 0),
476 NULL, "dgnc_mgmt");
477 if (IS_ERR(dev)) {
478 rc = PTR_ERR(dev);
479 pr_err(DRVSTR ": Can't create device (%d)\n", rc);
480 goto failed_device;
481 }
482
483 /* Start the poller */
484 spin_lock_irqsave(&dgnc_poll_lock, flags);
485 setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
486 dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
487 dgnc_poll_timer.expires = dgnc_poll_time;
488 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
489
490 add_timer(&dgnc_poll_timer);
491
492 return 0;
493
494failed_device:
495 class_destroy(dgnc_class);
496failed_class:
497 unregister_chrdev(dgnc_major, "dgnc");
498 return rc;
499}
500
501/*
502 * dgnc_cleanup_board()
503 *
504 * Free all the memory associated with a board
505 */
506static void dgnc_cleanup_board(struct dgnc_board *brd)
507{
508 int i = 0;
509
510 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
511 return;
512
513 switch (brd->device) {
514 case PCI_DEVICE_CLASSIC_4_DID:
515 case PCI_DEVICE_CLASSIC_8_DID:
516 case PCI_DEVICE_CLASSIC_4_422_DID:
517 case PCI_DEVICE_CLASSIC_8_422_DID:
518
519 /* Tell card not to interrupt anymore. */
520 outb(0, brd->iobase + 0x4c);
521 break;
522
523 default:
524 break;
525 }
526
527 if (brd->irq)
528 free_irq(brd->irq, brd);
529
530 tasklet_kill(&brd->helper_tasklet);
531
532 if (brd->re_map_membase) {
533 iounmap(brd->re_map_membase);
534 brd->re_map_membase = NULL;
535 }
536
537 /* Free all allocated channels structs */
538 for (i = 0; i < MAXPORTS ; i++) {
539 if (brd->channels[i]) {
540 kfree(brd->channels[i]->ch_rqueue);
541 kfree(brd->channels[i]->ch_equeue);
542 kfree(brd->channels[i]->ch_wqueue);
543 kfree(brd->channels[i]);
544 brd->channels[i] = NULL;
545 }
546 }
547
548 dgnc_board[brd->boardnum] = NULL;
549
550 kfree(brd);
551}
552
553/* Driver load/unload functions */
554
555static void cleanup(void)
556{
557 int i;
558 unsigned long flags;
559
560 spin_lock_irqsave(&dgnc_poll_lock, flags);
561 dgnc_poll_stop = 1;
562 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
563
564 /* Turn off poller right away. */
565 del_timer_sync(&dgnc_poll_timer);
566
567 device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
568 class_destroy(dgnc_class);
569 unregister_chrdev(dgnc_major, "dgnc");
570
571 for (i = 0; i < dgnc_num_boards; ++i) {
572 dgnc_cleanup_tty(dgnc_board[i]);
573 dgnc_cleanup_board(dgnc_board[i]);
574 }
575}
576
577/*
578 * dgnc_cleanup_module()
579 *
580 * Module unload. This is where it all ends.
581 */
582static void __exit dgnc_cleanup_module(void)
583{
584 cleanup();
585 pci_unregister_driver(&dgnc_driver);
586}
587
588/*
589 * init_module()
590 *
591 * Module load. This is where it all starts.
592 */
593static int __init dgnc_init_module(void)
594{
595 int rc;
596
597 /* Initialize global stuff */
598
599 rc = dgnc_start();
600
601 if (rc < 0)
602 return rc;
603
604 /* Find and configure all the cards */
605
606 rc = pci_register_driver(&dgnc_driver);
607 if (rc) {
608 pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
609 cleanup();
610 return rc;
611 }
612
613 return 0;
614}
615
616module_init(dgnc_init_module);
617module_exit(dgnc_cleanup_module);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 879202663a98..c8119f2fe881 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -12,11 +12,8 @@
12 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 12 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details. 13 * PURPOSE. See the GNU General Public License for more details.
14 * 14 *
15 *************************************************************************
16 *
17 * Driver includes 15 * Driver includes
18 * 16 */
19 *************************************************************************/
20 17
21#ifndef __DGNC_DRIVER_H 18#ifndef __DGNC_DRIVER_H
22#define __DGNC_DRIVER_H 19#define __DGNC_DRIVER_H
@@ -26,19 +23,14 @@
26#include <linux/interrupt.h> 23#include <linux/interrupt.h>
27 24
28#include "digi.h" /* Digi specific ioctl header */ 25#include "digi.h" /* Digi specific ioctl header */
29#include "dgnc_sysfs.h" /* Support for SYSFS */
30 26
31/************************************************************************* 27/* Driver defines */
32 *
33 * Driver defines
34 *
35 *************************************************************************/
36 28
37/* Driver identification and error statments */ 29/* Driver identification and error statements */
38#define PROCSTR "dgnc" /* /proc entries */ 30#define PROCSTR "dgnc" /* /proc entries */
39#define DEVSTR "/dev/dg/dgnc" /* /dev entries */ 31#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
40#define DRVSTR "dgnc" /* Driver name string */ 32#define DRVSTR "dgnc" /* Driver name string */
41#define DG_PART "40002369_F" /* RPM part number */ 33#define DG_PART "40002369_F" /* RPM part number */
42 34
43#define TRC_TO_CONSOLE 1 35#define TRC_TO_CONSOLE 1
44 36
@@ -61,7 +53,8 @@
61#define PORT_NUM(dev) ((dev) & 0x7f) 53#define PORT_NUM(dev) ((dev) & 0x7f)
62#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80) 54#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
63 55
64/* MAX number of stop characters we will send 56/*
57 *MAX number of stop characters we will send
65 * when our read queue is getting full 58 * when our read queue is getting full
66 */ 59 */
67#define MAX_STOPS_SENT 5 60#define MAX_STOPS_SENT 5
@@ -88,35 +81,28 @@
88#define _POSIX_VDISABLE '\0' 81#define _POSIX_VDISABLE '\0'
89#endif 82#endif
90 83
91/* 84/* All the possible states the driver can be while being loaded. */
92 * All the possible states the driver can be while being loaded. 85
93 */
94enum { 86enum {
95 DRIVER_INITIALIZED = 0, 87 DRIVER_INITIALIZED = 0,
96 DRIVER_READY 88 DRIVER_READY
97}; 89};
98 90
99/* 91/* All the possible states the board can be while booting up. */
100 * All the possible states the board can be while booting up. 92
101 */
102enum { 93enum {
103 BOARD_FAILED = 0, 94 BOARD_FAILED = 0,
104 BOARD_FOUND, 95 BOARD_FOUND,
105 BOARD_READY 96 BOARD_READY
106}; 97};
107 98
108/************************************************************************* 99/* Structures and closely related defines. */
109 *
110 * Structures and closely related defines.
111 *
112 *************************************************************************/
113 100
114struct dgnc_board; 101struct dgnc_board;
115struct channel_t; 102struct channel_t;
116 103
117/************************************************************************ 104/* Per board operations structure */
118 * Per board operations structure * 105
119 ************************************************************************/
120struct board_ops { 106struct board_ops {
121 void (*tasklet)(unsigned long data); 107 void (*tasklet)(unsigned long data);
122 irqreturn_t (*intr)(int irq, void *voidbrd); 108 irqreturn_t (*intr)(int irq, void *voidbrd);
@@ -138,16 +124,14 @@ struct board_ops {
138 void (*send_immediate_char)(struct channel_t *ch, unsigned char); 124 void (*send_immediate_char)(struct channel_t *ch, unsigned char);
139}; 125};
140 126
141/************************************************************************ 127/* Device flag definitions for bd_flags. */
142 * Device flag definitions for bd_flags. 128
143 ************************************************************************/
144#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */ 129#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
145 130
146/* 131/* Per-board information */
147 * Per-board information 132
148 */
149struct dgnc_board { 133struct dgnc_board {
150 int magic; /* Board Magic number. */ 134 int magic; /* Board Magic number. */
151 int boardnum; /* Board number: 0-32 */ 135 int boardnum; /* Board number: 0-32 */
152 136
153 int type; /* Type of board */ 137 int type; /* Type of board */
@@ -220,62 +204,56 @@ struct dgnc_board {
220 204
221}; 205};
222 206
223/************************************************************************ 207/* Unit flag definitions for un_flags. */
224 * Unit flag definitions for un_flags. 208#define UN_ISOPEN 0x0001 /* Device is open */
225 ************************************************************************/ 209#define UN_CLOSING 0x0002 /* Line is being closed */
226#define UN_ISOPEN 0x0001 /* Device is open */ 210#define UN_IMM 0x0004 /* Service immediately */
227#define UN_CLOSING 0x0002 /* Line is being closed */ 211#define UN_BUSY 0x0008 /* Some work this channel */
228#define UN_IMM 0x0004 /* Service immediately */ 212#define UN_BREAKI 0x0010 /* Input break received */
229#define UN_BUSY 0x0008 /* Some work this channel */
230#define UN_BREAKI 0x0010 /* Input break received */
231#define UN_PWAIT 0x0020 /* Printer waiting for terminal */ 213#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
232#define UN_TIME 0x0040 /* Waiting on time */ 214#define UN_TIME 0x0040 /* Waiting on time */
233#define UN_EMPTY 0x0080 /* Waiting output queue empty */ 215#define UN_EMPTY 0x0080 /* Waiting output queue empty */
234#define UN_LOW 0x0100 /* Waiting output low water mark*/ 216#define UN_LOW 0x0100 /* Waiting output low water mark*/
235#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */ 217#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
236#define UN_WOPEN 0x0400 /* Device waiting for open */ 218#define UN_WOPEN 0x0400 /* Device waiting for open */
237#define UN_WIOCTL 0x0800 /* Device waiting for open */ 219#define UN_WIOCTL 0x0800 /* Device waiting for open */
238#define UN_HANGUP 0x8000 /* Carrier lost */ 220#define UN_HANGUP 0x8000 /* Carrier lost */
239 221
240struct device; 222struct device;
241 223
242/************************************************************************ 224/* Structure for terminal or printer unit. */
243 * Structure for terminal or printer unit.
244 ************************************************************************/
245struct un_t { 225struct un_t {
246 int magic; /* Unit Magic Number. */ 226 int magic; /* Unit Magic Number. */
247 struct channel_t *un_ch; 227 struct channel_t *un_ch;
248 ulong un_time; 228 ulong un_time;
249 uint un_type; 229 uint un_type;
250 uint un_open_count; /* Counter of opens to port */ 230 uint un_open_count; /* Counter of opens to port */
251 struct tty_struct *un_tty;/* Pointer to unit tty structure */ 231 struct tty_struct *un_tty; /* Pointer to unit tty structure */
252 uint un_flags; /* Unit flags */ 232 uint un_flags; /* Unit flags */
253 wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */ 233 wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
254 uint un_dev; /* Minor device number */ 234 uint un_dev; /* Minor device number */
255 struct device *un_sysfs; 235 struct device *un_sysfs;
256}; 236};
257 237
258/************************************************************************ 238/* Device flag definitions for ch_flags. */
259 * Device flag definitions for ch_flags. 239#define CH_PRON 0x0001 /* Printer on string */
260 ************************************************************************/ 240#define CH_STOP 0x0002 /* Output is stopped */
261#define CH_PRON 0x0001 /* Printer on string */ 241#define CH_STOPI 0x0004 /* Input is stopped */
262#define CH_STOP 0x0002 /* Output is stopped */ 242#define CH_CD 0x0008 /* Carrier is present */
263#define CH_STOPI 0x0004 /* Input is stopped */ 243#define CH_FCAR 0x0010 /* Carrier forced on */
264#define CH_CD 0x0008 /* Carrier is present */ 244#define CH_HANGUP 0x0020 /* Hangup received */
265#define CH_FCAR 0x0010 /* Carrier forced on */ 245
266#define CH_HANGUP 0x0020 /* Hangup received */ 246#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
267 247#define CH_OPENING 0x0080 /* Port in fragile open state */
268#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */ 248#define CH_CLOSING 0x0100 /* Port in fragile close state */
269#define CH_OPENING 0x0080 /* Port in fragile open state */ 249#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
270#define CH_CLOSING 0x0100 /* Port in fragile close state */ 250#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
271#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */ 251#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
272#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */ 252#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
273#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */ 253#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
274#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
275#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
276#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */ 254#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
277#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */ 255#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
278#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */ 256#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
279 257
280/* Our Read/Error/Write queue sizes */ 258/* Our Read/Error/Write queue sizes */
281#define RQUEUEMASK 0x1FFF /* 8 K - 1 */ 259#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -285,43 +263,41 @@ struct un_t {
285#define EQUEUESIZE RQUEUESIZE 263#define EQUEUESIZE RQUEUESIZE
286#define WQUEUESIZE (WQUEUEMASK + 1) 264#define WQUEUESIZE (WQUEUEMASK + 1)
287 265
288/************************************************************************ 266/* Channel information structure. */
289 * Channel information structure.
290 ************************************************************************/
291struct channel_t { 267struct channel_t {
292 int magic; /* Channel Magic Number */ 268 int magic; /* Channel Magic Number */
293 struct dgnc_board *ch_bd; /* Board structure pointer */ 269 struct dgnc_board *ch_bd; /* Board structure pointer */
294 struct digi_t ch_digi; /* Transparent Print structure */ 270 struct digi_t ch_digi; /* Transparent Print structure */
295 struct un_t ch_tun; /* Terminal unit info */ 271 struct un_t ch_tun; /* Terminal unit info */
296 struct un_t ch_pun; /* Printer unit info */ 272 struct un_t ch_pun; /* Printer unit info */
297 273
298 spinlock_t ch_lock; /* provide for serialization */ 274 spinlock_t ch_lock; /* provide for serialization */
299 wait_queue_head_t ch_flags_wait; 275 wait_queue_head_t ch_flags_wait;
300 276
301 uint ch_portnum; /* Port number, 0 offset. */ 277 uint ch_portnum; /* Port number, 0 offset. */
302 uint ch_open_count; /* open count */ 278 uint ch_open_count; /* open count */
303 uint ch_flags; /* Channel flags */ 279 uint ch_flags; /* Channel flags */
304 280
305 ulong ch_close_delay; /* How long we should 281 ulong ch_close_delay; /* How long we should
306 * drop RTS/DTR for 282 * drop RTS/DTR for
307 */ 283 */
308 284
309 ulong ch_cpstime; /* Time for CPS calculations */ 285 ulong ch_cpstime; /* Time for CPS calculations */
310 286
311 tcflag_t ch_c_iflag; /* channel iflags */ 287 tcflag_t ch_c_iflag; /* channel iflags */
312 tcflag_t ch_c_cflag; /* channel cflags */ 288 tcflag_t ch_c_cflag; /* channel cflags */
313 tcflag_t ch_c_oflag; /* channel oflags */ 289 tcflag_t ch_c_oflag; /* channel oflags */
314 tcflag_t ch_c_lflag; /* channel lflags */ 290 tcflag_t ch_c_lflag; /* channel lflags */
315 unsigned char ch_stopc; /* Stop character */ 291 unsigned char ch_stopc; /* Stop character */
316 unsigned char ch_startc; /* Start character */ 292 unsigned char ch_startc; /* Start character */
317 293
318 uint ch_old_baud; /* Cache of the current baud */ 294 uint ch_old_baud; /* Cache of the current baud */
319 uint ch_custom_speed;/* Custom baud, if set */ 295 uint ch_custom_speed;/* Custom baud, if set */
320 296
321 uint ch_wopen; /* Waiting for open process cnt */ 297 uint ch_wopen; /* Waiting for open process cnt */
322 298
323 unsigned char ch_mostat; /* FEP output modem status */ 299 unsigned char ch_mostat; /* FEP output modem status */
324 unsigned char ch_mistat; /* FEP input modem status */ 300 unsigned char ch_mistat; /* FEP input modem status */
325 301
326 struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the 302 struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the
327 * "mapped" UART struct 303 * "mapped" UART struct
@@ -347,10 +323,10 @@ struct channel_t {
347 ulong ch_rxcount; /* total of data received so far */ 323 ulong ch_rxcount; /* total of data received so far */
348 ulong ch_txcount; /* total of data transmitted so far */ 324 ulong ch_txcount; /* total of data transmitted so far */
349 325
350 unsigned char ch_r_tlevel; /* Receive Trigger level */ 326 unsigned char ch_r_tlevel; /* Receive Trigger level */
351 unsigned char ch_t_tlevel; /* Transmit Trigger level */ 327 unsigned char ch_t_tlevel; /* Transmit Trigger level */
352 328
353 unsigned char ch_r_watermark; /* Receive Watermark */ 329 unsigned char ch_r_watermark; /* Receive Watermark */
354 330
355 ulong ch_stop_sending_break; /* Time we should STOP 331 ulong ch_stop_sending_break; /* Time we should STOP
356 * sending a break 332 * sending a break
@@ -374,16 +350,15 @@ struct channel_t {
374 350
375}; 351};
376 352
377/* 353/* Our Global Variables. */
378 * Our Global Variables. 354
379 */
380extern uint dgnc_major; /* Our driver/mgmt major */ 355extern uint dgnc_major; /* Our driver/mgmt major */
381extern int dgnc_poll_tick; /* Poll interval - 20 ms */ 356extern int dgnc_poll_tick; /* Poll interval - 20 ms */
382extern spinlock_t dgnc_global_lock; /* Driver global spinlock */ 357extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
383extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */ 358extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
384extern uint dgnc_num_boards; /* Total number of boards */ 359extern uint dgnc_num_boards; /* Total number of boards */
385extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board 360extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board
386 * structs 361 * structs
387 */ 362 */
388 363
389#endif 364#endif
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 683c098391d9..9d9b15d6358a 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -13,13 +13,11 @@
13 * PURPOSE. See the GNU General Public License for more details. 13 * PURPOSE. See the GNU General Public License for more details.
14 */ 14 */
15 15
16/************************************************************************ 16/*
17 *
18 * This file implements the mgmt functionality for the 17 * This file implements the mgmt functionality for the
19 * Neo and ClassicBoard based product lines. 18 * Neo and ClassicBoard based product lines.
20 *
21 ************************************************************************
22 */ 19 */
20
23#include <linux/kernel.h> 21#include <linux/kernel.h>
24#include <linux/ctype.h> 22#include <linux/ctype.h>
25#include <linux/sched.h> /* For jiffies, task states */ 23#include <linux/sched.h> /* For jiffies, task states */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 5becb3741b67..3eefefe53174 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -107,7 +107,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
107 /* Turn off auto Xon flow control */ 107 /* Turn off auto Xon flow control */
108 efr &= ~UART_17158_EFR_IXON; 108 efr &= ~UART_17158_EFR_IXON;
109 109
110 /* Why? Because Exar's spec says we have to zero it 110 /*
111 * Why? Because Exar's spec says we have to zero it
111 * out before setting it 112 * out before setting it
112 */ 113 */
113 writeb(0, &ch->ch_neo_uart->efr); 114 writeb(0, &ch->ch_neo_uart->efr);
@@ -145,7 +146,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
145 ier &= ~UART_17158_IER_XOFF; 146 ier &= ~UART_17158_IER_XOFF;
146 efr &= ~UART_17158_EFR_IXOFF; 147 efr &= ~UART_17158_EFR_IXOFF;
147 148
148 /* Why? Because Exar's spec says we have to zero it 149 /*
150 * Why? Because Exar's spec says we have to zero it
149 * out before setting it 151 * out before setting it
150 */ 152 */
151 writeb(0, &ch->ch_neo_uart->efr); 153 writeb(0, &ch->ch_neo_uart->efr);
@@ -185,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
185 /* Turn on auto Xon flow control */ 187 /* Turn on auto Xon flow control */
186 efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON); 188 efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
187 189
188 /* Why? Because Exar's spec says we have to zero it 190 /*
191 * Why? Because Exar's spec says we have to zero it
189 * out before setting it 192 * out before setting it
190 */ 193 */
191 writeb(0, &ch->ch_neo_uart->efr); 194 writeb(0, &ch->ch_neo_uart->efr);
@@ -225,7 +228,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
225 ier |= UART_17158_IER_XOFF; 228 ier |= UART_17158_IER_XOFF;
226 efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); 229 efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
227 230
228 /* Why? Because Exar's spec says we have to zero it 231 /*
232 * Why? Because Exar's spec says we have to zero it
229 * out before setting it 233 * out before setting it
230 */ 234 */
231 writeb(0, &ch->ch_neo_uart->efr); 235 writeb(0, &ch->ch_neo_uart->efr);
@@ -268,7 +272,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
268 else 272 else
269 efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); 273 efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
270 274
271 /* Why? Because Exar's spec says we have to zero 275 /*
276 * Why? Because Exar's spec says we have to zero
272 * it out before setting it 277 * it out before setting it
273 */ 278 */
274 writeb(0, &ch->ch_neo_uart->efr); 279 writeb(0, &ch->ch_neo_uart->efr);
@@ -308,7 +313,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
308 else 313 else
309 efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); 314 efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
310 315
311 /* Why? Because Exar's spec says we have to zero it 316 /*
317 * Why? Because Exar's spec says we have to zero it
312 * out before setting it 318 * out before setting it
313 */ 319 */
314 writeb(0, &ch->ch_neo_uart->efr); 320 writeb(0, &ch->ch_neo_uart->efr);
@@ -351,9 +357,8 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
351 neo_pci_posting_flush(ch->ch_bd); 357 neo_pci_posting_flush(ch->ch_bd);
352} 358}
353 359
354/* 360/* No locks are assumed to be held when calling this function. */
355 * No locks are assumed to be held when calling this function. 361
356 */
357static inline void neo_clear_break(struct channel_t *ch, int force) 362static inline void neo_clear_break(struct channel_t *ch, int force)
358{ 363{
359 unsigned long flags; 364 unsigned long flags;
@@ -381,9 +386,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
381 spin_unlock_irqrestore(&ch->ch_lock, flags); 386 spin_unlock_irqrestore(&ch->ch_lock, flags);
382} 387}
383 388
384/* 389/* Parse the ISR register. */
385 * Parse the ISR register. 390
386 */
387static inline void neo_parse_isr(struct dgnc_board *brd, uint port) 391static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
388{ 392{
389 struct channel_t *ch; 393 struct channel_t *ch;
@@ -412,8 +416,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
412 if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { 416 if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
413 /* Read data from uart -> queue */ 417 /* Read data from uart -> queue */
414 neo_copy_data_from_uart_to_queue(ch); 418 neo_copy_data_from_uart_to_queue(ch);
415 419 /*
416 /* Call our tty layer to enforce queue 420 * Call our tty layer to enforce queue
417 * flow control if needed. 421 * flow control if needed.
418 */ 422 */
419 spin_lock_irqsave(&ch->ch_lock, flags); 423 spin_lock_irqsave(&ch->ch_lock, flags);
@@ -438,7 +442,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
438 * one it was, so we can suspend or resume data flow. 442 * one it was, so we can suspend or resume data flow.
439 */ 443 */
440 if (cause == UART_17158_XON_DETECT) { 444 if (cause == UART_17158_XON_DETECT) {
441 /* Is output stopped right now, if so, 445 /*
446 * Is output stopped right now, if so,
442 * resume it 447 * resume it
443 */ 448 */
444 if (brd->channels[port]->ch_flags & CH_STOP) { 449 if (brd->channels[port]->ch_flags & CH_STOP) {
@@ -609,9 +614,8 @@ static void neo_param(struct tty_struct *tty)
609 if (!bd || bd->magic != DGNC_BOARD_MAGIC) 614 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
610 return; 615 return;
611 616
612 /* 617 /* If baud rate is zero, flush queues, and set mval to drop DTR. */
613 * If baud rate is zero, flush queues, and set mval to drop DTR. 618
614 */
615 if ((ch->ch_c_cflag & (CBAUD)) == 0) { 619 if ((ch->ch_c_cflag & (CBAUD)) == 0) {
616 ch->ch_r_head = 0; 620 ch->ch_r_head = 0;
617 ch->ch_r_tail = 0; 621 ch->ch_r_tail = 0;
@@ -672,7 +676,8 @@ static void neo_param(struct tty_struct *tty)
672 4800, 9600, 19200, 38400 } 676 4800, 9600, 19200, 38400 }
673 }; 677 };
674 678
675 /* Only use the TXPrint baud rate if the terminal unit 679 /*
680 * Only use the TXPrint baud rate if the terminal unit
676 * is NOT open 681 * is NOT open
677 */ 682 */
678 if (!(ch->ch_tun.un_flags & UN_ISOPEN) && 683 if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
@@ -797,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
797 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { 802 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
798 neo_set_cts_flow_control(ch); 803 neo_set_cts_flow_control(ch);
799 } else if (ch->ch_c_iflag & IXON) { 804 } else if (ch->ch_c_iflag & IXON) {
800 /* If start/stop is set to disable, then we should 805 /*
806 * If start/stop is set to disable, then we should
801 * disable flow control 807 * disable flow control
802 */ 808 */
803 if ((ch->ch_startc == _POSIX_VDISABLE) || 809 if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -812,7 +818,8 @@ static void neo_param(struct tty_struct *tty)
812 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { 818 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
813 neo_set_rts_flow_control(ch); 819 neo_set_rts_flow_control(ch);
814 } else if (ch->ch_c_iflag & IXOFF) { 820 } else if (ch->ch_c_iflag & IXOFF) {
815 /* If start/stop is set to disable, then we should 821 /*
822 * If start/stop is set to disable, then we should
816 * disable flow control 823 * disable flow control
817 */ 824 */
818 if ((ch->ch_startc == _POSIX_VDISABLE) || 825 if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -840,9 +847,8 @@ static void neo_param(struct tty_struct *tty)
840 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); 847 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
841} 848}
842 849
843/* 850/* Our board poller function. */
844 * Our board poller function. 851
845 */
846static void neo_tasklet(unsigned long data) 852static void neo_tasklet(unsigned long data)
847{ 853{
848 struct dgnc_board *bd = (struct dgnc_board *)data; 854 struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -867,9 +873,8 @@ static void neo_tasklet(unsigned long data)
867 */ 873 */
868 spin_lock_irqsave(&bd->bd_intr_lock, flags); 874 spin_lock_irqsave(&bd->bd_intr_lock, flags);
869 875
870 /* 876 /* If board is ready, parse deeper to see if there is anything to do. */
871 * If board is ready, parse deeper to see if there is anything to do. 877
872 */
873 if ((state == BOARD_READY) && (ports > 0)) { 878 if ((state == BOARD_READY) && (ports > 0)) {
874 /* Loop on each port */ 879 /* Loop on each port */
875 for (i = 0; i < ports; i++) { 880 for (i = 0; i < ports; i++) {
@@ -997,9 +1002,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
997 break; 1002 break;
998 1003
999 case UART_17158_RX_LINE_STATUS: 1004 case UART_17158_RX_LINE_STATUS:
1000 /* 1005
1001 * RXRDY and RX LINE Status (logic OR of LSR[4:1]) 1006 /* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */
1002 */ 1007
1003 neo_parse_lsr(brd, port); 1008 neo_parse_lsr(brd, port);
1004 break; 1009 break;
1005 1010
@@ -1022,9 +1027,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
1022 break; 1027 break;
1023 1028
1024 case UART_17158_MSR: 1029 case UART_17158_MSR:
1025 /* 1030
1026 * MSR or flow control was seen. 1031 /* MSR or flow control was seen. */
1027 */ 1032
1028 neo_parse_isr(brd, port); 1033 neo_parse_isr(brd, port);
1029 break; 1034 break;
1030 1035
@@ -1041,9 +1046,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
1041 port++; 1046 port++;
1042 } 1047 }
1043 1048
1044 /* 1049 /* Schedule tasklet to more in-depth servicing at a better time. */
1045 * Schedule tasklet to more in-depth servicing at a better time. 1050
1046 */
1047 tasklet_schedule(&brd->helper_tasklet); 1051 tasklet_schedule(&brd->helper_tasklet);
1048 1052
1049 spin_unlock_irqrestore(&brd->bd_intr_lock, flags); 1053 spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1238,9 +1242,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
1238 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); 1242 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
1239 } 1243 }
1240 1244
1241 /* 1245 /* Discard character if we are ignoring the error mask. */
1242 * Discard character if we are ignoring the error mask. 1246
1243 */
1244 if (linestatus & error_mask) { 1247 if (linestatus & error_mask) {
1245 unsigned char discard; 1248 unsigned char discard;
1246 1249
@@ -1279,9 +1282,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
1279 ch->ch_rxcount++; 1282 ch->ch_rxcount++;
1280 } 1283 }
1281 1284
1282 /* 1285 /* Write new final heads to channel structure. */
1283 * Write new final heads to channel structure. 1286
1284 */
1285 ch->ch_r_head = head & RQUEUEMASK; 1287 ch->ch_r_head = head & RQUEUEMASK;
1286 ch->ch_e_head = head & EQUEUEMASK; 1288 ch->ch_e_head = head & EQUEUEMASK;
1287 1289
@@ -1412,9 +1414,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
1412 (ch->ch_flags & CH_BREAK_SENDING)) 1414 (ch->ch_flags & CH_BREAK_SENDING))
1413 goto exit_unlock; 1415 goto exit_unlock;
1414 1416
1415 /* 1417 /* If FIFOs are disabled. Send data directly to txrx register */
1416 * If FIFOs are disabled. Send data directly to txrx register 1418
1417 */
1418 if (!(ch->ch_flags & CH_FIFO_ENABLED)) { 1419 if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
1419 unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr); 1420 unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
1420 1421
@@ -1458,9 +1459,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
1458 goto exit_unlock; 1459 goto exit_unlock;
1459 } 1460 }
1460 1461
1461 /* 1462 /* We have to do it this way, because of the EXAR TXFIFO count bug. */
1462 * We have to do it this way, because of the EXAR TXFIFO count bug. 1463
1463 */
1464 if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) { 1464 if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
1465 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) 1465 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
1466 goto exit_unlock; 1466 goto exit_unlock;
@@ -1645,9 +1645,8 @@ static void neo_send_stop_character(struct channel_t *ch)
1645 } 1645 }
1646} 1646}
1647 1647
1648/* 1648/* neo_uart_init */
1649 * neo_uart_init 1649
1650 */
1651static void neo_uart_init(struct channel_t *ch) 1650static void neo_uart_init(struct channel_t *ch)
1652{ 1651{
1653 writeb(0, &ch->ch_neo_uart->ier); 1652 writeb(0, &ch->ch_neo_uart->ier);
@@ -1668,9 +1667,8 @@ static void neo_uart_init(struct channel_t *ch)
1668 neo_pci_posting_flush(ch->ch_bd); 1667 neo_pci_posting_flush(ch->ch_bd);
1669} 1668}
1670 1669
1671/* 1670/* Make the UART completely turn off. */
1672 * Make the UART completely turn off. 1671
1673 */
1674static void neo_uart_off(struct channel_t *ch) 1672static void neo_uart_off(struct channel_t *ch)
1675{ 1673{
1676 /* Turn off UART enhanced bits */ 1674 /* Turn off UART enhanced bits */
@@ -1705,9 +1703,8 @@ static uint neo_get_uart_bytes_left(struct channel_t *ch)
1705/* Channel lock MUST be held by the calling function! */ 1703/* Channel lock MUST be held by the calling function! */
1706static void neo_send_break(struct channel_t *ch, int msecs) 1704static void neo_send_break(struct channel_t *ch, int msecs)
1707{ 1705{
1708 /* 1706 /* If we receive a time of 0, this means turn off the break. */
1709 * If we receive a time of 0, this means turn off the break. 1707
1710 */
1711 if (msecs == 0) { 1708 if (msecs == 0) {
1712 if (ch->ch_flags & CH_BREAK_SENDING) { 1709 if (ch->ch_flags & CH_BREAK_SENDING) {
1713 unsigned char temp = readb(&ch->ch_neo_uart->lcr); 1710 unsigned char temp = readb(&ch->ch_neo_uart->lcr);
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index abddd48353d0..77ecd9baae45 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -18,37 +18,38 @@
18 18
19#include "dgnc_driver.h" 19#include "dgnc_driver.h"
20 20
21/************************************************************************ 21/*
22 * Per channel/port NEO UART structure * 22 * Per channel/port NEO UART structure
23 ************************************************************************ 23 * Base Structure Entries Usage Meanings to Host
24 * Base Structure Entries Usage Meanings to Host * 24 *
25 * * 25 * W = read write R = read only
26 * W = read write R = read only * 26 * U = Unused.
27 * U = Unused. * 27 */
28 ************************************************************************/
29 28
30struct neo_uart_struct { 29struct neo_uart_struct {
31 u8 txrx; /* WR RHR/THR - Holding Reg */ 30 u8 txrx; /* WR RHR/THR - Holding Reg */
32 u8 ier; /* WR IER - Interrupt Enable Reg */ 31 u8 ier; /* WR IER - Interrupt Enable Reg */
33 u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */ 32 u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo
33 * Control Reg
34 */
34 u8 lcr; /* WR LCR - Line Control Reg */ 35 u8 lcr; /* WR LCR - Line Control Reg */
35 u8 mcr; /* WR MCR - Modem Control Reg */ 36 u8 mcr; /* WR MCR - Modem Control Reg */
36 u8 lsr; /* WR LSR - Line Status Reg */ 37 u8 lsr; /* WR LSR - Line Status Reg */
37 u8 msr; /* WR MSR - Modem Status Reg */ 38 u8 msr; /* WR MSR - Modem Status Reg */
38 u8 spr; /* WR SPR - Scratch Pad Reg */ 39 u8 spr; /* WR SPR - Scratch Pad Reg */
39 u8 fctr; /* WR FCTR - Feature Control Reg */ 40 u8 fctr; /* WR FCTR - Feature Control Reg */
40 u8 efr; /* WR EFR - Enhanced Function Reg */ 41 u8 efr; /* WR EFR - Enhanced Function Reg */
41 u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */ 42 u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
42 u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */ 43 u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
43 u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */ 44 u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
44 u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */ 45 u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
45 u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */ 46 u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
46 u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */ 47 u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
47 48
48 u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */ 49 u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
49 u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */ 50 u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
50 u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */ 51 u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
51 u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */ 52 u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
52}; 53};
53 54
54/* Where to read the extended interrupt register (32bits instead of 8bits) */ 55/* Where to read the extended interrupt register (32bits instead of 8bits) */
@@ -108,7 +109,9 @@ struct neo_uart_struct {
108/* 17158 Extended IIR's */ 109/* 17158 Extended IIR's */
109#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */ 110#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
110#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */ 111#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
111#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */ 112#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR
113 * state change
114 */
112#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */ 115#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
113 116
114/* 117/*
@@ -119,8 +122,12 @@ struct neo_uart_struct {
119#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */ 122#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
120#define UART_17158_TXRDY 0x3 /* TX Ready */ 123#define UART_17158_TXRDY 0x3 /* TX Ready */
121#define UART_17158_MSR 0x4 /* Modem State Change */ 124#define UART_17158_MSR 0x4 /* Modem State Change */
122#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */ 125#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding
123#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */ 126 * Reg Empty
127 */
128#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO
129 * Data error
130 */
124 131
125/* 132/*
126 * These are the EXTENDED definitions for the 17C158's Interrupt 133 * These are the EXTENDED definitions for the 17C158's Interrupt
@@ -130,19 +137,22 @@ struct neo_uart_struct {
130#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ 137#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
131#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ 138#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
132#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ 139#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
133#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ 140#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
134 141
135#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */ 142#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an
136#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */ 143 * incoming XOFF char
144 */
145#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an
146 * incoming XON char
147 */
137 148
138#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */ 149#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
139#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */ 150#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
140#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */ 151#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
141#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */ 152#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
142 153
143/* 154/* Our Global Variables */
144 * Our Global Variables 155
145 */
146extern struct board_ops dgnc_neo_ops; 156extern struct board_ops dgnc_neo_ops;
147 157
148#endif 158#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
deleted file mode 100644
index 290bf6e226ac..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ /dev/null
@@ -1,703 +0,0 @@
1/*
2 * Copyright 2004 Digi International (www.digi.com)
3 * Scott H Kilau <Scott_Kilau at digi dot com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
12 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/ctype.h>
19#include <linux/string.h>
20#include <linux/serial_reg.h>
21#include <linux/device.h>
22#include <linux/pci.h>
23#include <linux/kdev_t.h>
24
25#include "dgnc_driver.h"
26#include "dgnc_mgmt.h"
27
28static ssize_t version_show(struct device_driver *ddp, char *buf)
29{
30 return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
31}
32static DRIVER_ATTR_RO(version);
33
34static ssize_t boards_show(struct device_driver *ddp, char *buf)
35{
36 return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
37}
38static DRIVER_ATTR_RO(boards);
39
40static ssize_t maxboards_show(struct device_driver *ddp, char *buf)
41{
42 return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
43}
44static DRIVER_ATTR_RO(maxboards);
45
46static ssize_t pollrate_show(struct device_driver *ddp, char *buf)
47{
48 return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
49}
50
51static ssize_t pollrate_store(struct device_driver *ddp,
52 const char *buf, size_t count)
53{
54 unsigned long flags;
55 int tick;
56 int ret;
57
58 ret = sscanf(buf, "%d\n", &tick);
59 if (ret != 1)
60 return -EINVAL;
61
62 spin_lock_irqsave(&dgnc_poll_lock, flags);
63 dgnc_poll_tick = tick;
64 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
65
66 return count;
67}
68static DRIVER_ATTR_RW(pollrate);
69
70void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
71{
72 int rc = 0;
73 struct device_driver *driverfs = &dgnc_driver->driver;
74
75 rc |= driver_create_file(driverfs, &driver_attr_version);
76 rc |= driver_create_file(driverfs, &driver_attr_boards);
77 rc |= driver_create_file(driverfs, &driver_attr_maxboards);
78 rc |= driver_create_file(driverfs, &driver_attr_pollrate);
79 if (rc)
80 pr_err("DGNC: sysfs driver_create_file failed!\n");
81}
82
83void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
84{
85 struct device_driver *driverfs = &dgnc_driver->driver;
86
87 driver_remove_file(driverfs, &driver_attr_version);
88 driver_remove_file(driverfs, &driver_attr_boards);
89 driver_remove_file(driverfs, &driver_attr_maxboards);
90 driver_remove_file(driverfs, &driver_attr_pollrate);
91}
92
93#define DGNC_VERIFY_BOARD(p, bd) \
94 do { \
95 if (!p) \
96 return 0; \
97 \
98 bd = dev_get_drvdata(p); \
99 if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
100 return 0; \
101 if (bd->state != BOARD_READY) \
102 return 0; \
103 } while (0)
104
105static ssize_t vpd_show(struct device *p, struct device_attribute *attr,
106 char *buf)
107{
108 struct dgnc_board *bd;
109 int count = 0;
110 int i = 0;
111
112 DGNC_VERIFY_BOARD(p, bd);
113
114 count += sprintf(buf + count,
115 "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
116 for (i = 0; i < 0x40 * 2; i++) {
117 if (!(i % 16))
118 count += sprintf(buf + count, "\n%04X ", i * 2);
119 count += sprintf(buf + count, "%02X ", bd->vpd[i]);
120 }
121 count += sprintf(buf + count, "\n");
122
123 return count;
124}
125static DEVICE_ATTR_RO(vpd);
126
127static ssize_t serial_number_show(struct device *p,
128 struct device_attribute *attr, char *buf)
129{
130 struct dgnc_board *bd;
131 int count = 0;
132
133 DGNC_VERIFY_BOARD(p, bd);
134
135 if (bd->serial_num[0] == '\0')
136 count += sprintf(buf + count, "<UNKNOWN>\n");
137 else
138 count += sprintf(buf + count, "%s\n", bd->serial_num);
139
140 return count;
141}
142static DEVICE_ATTR_RO(serial_number);
143
144static ssize_t ports_state_show(struct device *p,
145 struct device_attribute *attr, char *buf)
146{
147 struct dgnc_board *bd;
148 int count = 0;
149 int i = 0;
150
151 DGNC_VERIFY_BOARD(p, bd);
152
153 for (i = 0; i < bd->nasync; i++) {
154 count += snprintf(buf + count, PAGE_SIZE - count,
155 "%d %s\n", bd->channels[i]->ch_portnum,
156 bd->channels[i]->ch_open_count ? "Open" : "Closed");
157 }
158 return count;
159}
160static DEVICE_ATTR_RO(ports_state);
161
162static ssize_t ports_baud_show(struct device *p,
163 struct device_attribute *attr, char *buf)
164{
165 struct dgnc_board *bd;
166 int count = 0;
167 int i = 0;
168
169 DGNC_VERIFY_BOARD(p, bd);
170
171 for (i = 0; i < bd->nasync; i++) {
172 count += snprintf(buf + count, PAGE_SIZE - count,
173 "%d %d\n", bd->channels[i]->ch_portnum,
174 bd->channels[i]->ch_old_baud);
175 }
176 return count;
177}
178static DEVICE_ATTR_RO(ports_baud);
179
180static ssize_t ports_msignals_show(struct device *p,
181 struct device_attribute *attr, char *buf)
182{
183 struct dgnc_board *bd;
184 int count = 0;
185 int i = 0;
186
187 DGNC_VERIFY_BOARD(p, bd);
188
189 for (i = 0; i < bd->nasync; i++) {
190 struct channel_t *ch = bd->channels[i];
191
192 if (ch->ch_open_count) {
193 count += snprintf(buf + count, PAGE_SIZE - count,
194 "%d %s %s %s %s %s %s\n",
195 ch->ch_portnum,
196 (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
197 (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
198 (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
199 (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
200 (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
201 (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
202 } else {
203 count += snprintf(buf + count, PAGE_SIZE - count,
204 "%d\n", ch->ch_portnum);
205 }
206 }
207 return count;
208}
209static DEVICE_ATTR_RO(ports_msignals);
210
211static ssize_t ports_iflag_show(struct device *p,
212 struct device_attribute *attr, char *buf)
213{
214 struct dgnc_board *bd;
215 int count = 0;
216 int i = 0;
217
218 DGNC_VERIFY_BOARD(p, bd);
219
220 for (i = 0; i < bd->nasync; i++) {
221 count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
222 bd->channels[i]->ch_portnum,
223 bd->channels[i]->ch_c_iflag);
224 }
225 return count;
226}
227static DEVICE_ATTR_RO(ports_iflag);
228
229static ssize_t ports_cflag_show(struct device *p,
230 struct device_attribute *attr, char *buf)
231{
232 struct dgnc_board *bd;
233 int count = 0;
234 int i = 0;
235
236 DGNC_VERIFY_BOARD(p, bd);
237
238 for (i = 0; i < bd->nasync; i++) {
239 count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
240 bd->channels[i]->ch_portnum,
241 bd->channels[i]->ch_c_cflag);
242 }
243 return count;
244}
245static DEVICE_ATTR_RO(ports_cflag);
246
247static ssize_t ports_oflag_show(struct device *p,
248 struct device_attribute *attr, char *buf)
249{
250 struct dgnc_board *bd;
251 int count = 0;
252 int i = 0;
253
254 DGNC_VERIFY_BOARD(p, bd);
255
256 for (i = 0; i < bd->nasync; i++) {
257 count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
258 bd->channels[i]->ch_portnum,
259 bd->channels[i]->ch_c_oflag);
260 }
261 return count;
262}
263static DEVICE_ATTR_RO(ports_oflag);
264
265static ssize_t ports_lflag_show(struct device *p,
266 struct device_attribute *attr, char *buf)
267{
268 struct dgnc_board *bd;
269 int count = 0;
270 int i = 0;
271
272 DGNC_VERIFY_BOARD(p, bd);
273
274 for (i = 0; i < bd->nasync; i++) {
275 count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
276 bd->channels[i]->ch_portnum,
277 bd->channels[i]->ch_c_lflag);
278 }
279 return count;
280}
281static DEVICE_ATTR_RO(ports_lflag);
282
283static ssize_t ports_digi_flag_show(struct device *p,
284 struct device_attribute *attr, char *buf)
285{
286 struct dgnc_board *bd;
287 int count = 0;
288 int i = 0;
289
290 DGNC_VERIFY_BOARD(p, bd);
291
292 for (i = 0; i < bd->nasync; i++) {
293 count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
294 bd->channels[i]->ch_portnum,
295 bd->channels[i]->ch_digi.digi_flags);
296 }
297 return count;
298}
299static DEVICE_ATTR_RO(ports_digi_flag);
300
301static ssize_t ports_rxcount_show(struct device *p,
302 struct device_attribute *attr, char *buf)
303{
304 struct dgnc_board *bd;
305 int count = 0;
306 int i = 0;
307
308 DGNC_VERIFY_BOARD(p, bd);
309
310 for (i = 0; i < bd->nasync; i++) {
311 count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
312 bd->channels[i]->ch_portnum,
313 bd->channels[i]->ch_rxcount);
314 }
315 return count;
316}
317static DEVICE_ATTR_RO(ports_rxcount);
318
319static ssize_t ports_txcount_show(struct device *p,
320 struct device_attribute *attr, char *buf)
321{
322 struct dgnc_board *bd;
323 int count = 0;
324 int i = 0;
325
326 DGNC_VERIFY_BOARD(p, bd);
327
328 for (i = 0; i < bd->nasync; i++) {
329 count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
330 bd->channels[i]->ch_portnum,
331 bd->channels[i]->ch_txcount);
332 }
333 return count;
334}
335static DEVICE_ATTR_RO(ports_txcount);
336
337/* this function creates the sys files that will export each signal status
338 * to sysfs each value will be put in a separate filename
339 */
340void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
341{
342 int rc = 0;
343
344 dev_set_drvdata(&bd->pdev->dev, bd);
345 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
346 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
347 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
348 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
349 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
350 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
351 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
352 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
353 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
354 rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
355 rc |= device_create_file(&bd->pdev->dev, &dev_attr_vpd);
356 rc |= device_create_file(&bd->pdev->dev, &dev_attr_serial_number);
357 if (rc)
358 dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n");
359}
360
361/* removes all the sys files created for that port */
362void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
363{
364 device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
365 device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
366 device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
367 device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
368 device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
369 device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
370 device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
371 device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
372 device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
373 device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
374 device_remove_file(&bd->pdev->dev, &dev_attr_vpd);
375 device_remove_file(&bd->pdev->dev, &dev_attr_serial_number);
376}
377
378static ssize_t tty_state_show(struct device *d,
379 struct device_attribute *attr, char *buf)
380{
381 struct dgnc_board *bd;
382 struct channel_t *ch;
383 struct un_t *un;
384
385 if (!d)
386 return 0;
387 un = dev_get_drvdata(d);
388 if (!un || un->magic != DGNC_UNIT_MAGIC)
389 return 0;
390 ch = un->un_ch;
391 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
392 return 0;
393 bd = ch->ch_bd;
394 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
395 return 0;
396 if (bd->state != BOARD_READY)
397 return 0;
398
399 return snprintf(buf, PAGE_SIZE, "%s",
400 un->un_open_count ? "Open" : "Closed");
401}
402static DEVICE_ATTR_RO(tty_state);
403
404static ssize_t tty_baud_show(struct device *d,
405 struct device_attribute *attr, char *buf)
406{
407 struct dgnc_board *bd;
408 struct channel_t *ch;
409 struct un_t *un;
410
411 if (!d)
412 return 0;
413 un = dev_get_drvdata(d);
414 if (!un || un->magic != DGNC_UNIT_MAGIC)
415 return 0;
416 ch = un->un_ch;
417 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
418 return 0;
419 bd = ch->ch_bd;
420 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
421 return 0;
422 if (bd->state != BOARD_READY)
423 return 0;
424
425 return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
426}
427static DEVICE_ATTR_RO(tty_baud);
428
429static ssize_t tty_msignals_show(struct device *d,
430 struct device_attribute *attr, char *buf)
431{
432 struct dgnc_board *bd;
433 struct channel_t *ch;
434 struct un_t *un;
435
436 if (!d)
437 return 0;
438 un = dev_get_drvdata(d);
439 if (!un || un->magic != DGNC_UNIT_MAGIC)
440 return 0;
441 ch = un->un_ch;
442 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
443 return 0;
444 bd = ch->ch_bd;
445 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
446 return 0;
447 if (bd->state != BOARD_READY)
448 return 0;
449
450 if (ch->ch_open_count) {
451 return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
452 (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
453 (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
454 (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
455 (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
456 (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
457 (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
458 }
459 return 0;
460}
461static DEVICE_ATTR_RO(tty_msignals);
462
463static ssize_t tty_iflag_show(struct device *d,
464 struct device_attribute *attr, char *buf)
465{
466 struct dgnc_board *bd;
467 struct channel_t *ch;
468 struct un_t *un;
469
470 if (!d)
471 return 0;
472 un = dev_get_drvdata(d);
473 if (!un || un->magic != DGNC_UNIT_MAGIC)
474 return 0;
475 ch = un->un_ch;
476 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
477 return 0;
478 bd = ch->ch_bd;
479 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
480 return 0;
481 if (bd->state != BOARD_READY)
482 return 0;
483
484 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
485}
486static DEVICE_ATTR_RO(tty_iflag);
487
488static ssize_t tty_cflag_show(struct device *d,
489 struct device_attribute *attr, char *buf)
490{
491 struct dgnc_board *bd;
492 struct channel_t *ch;
493 struct un_t *un;
494
495 if (!d)
496 return 0;
497 un = dev_get_drvdata(d);
498 if (!un || un->magic != DGNC_UNIT_MAGIC)
499 return 0;
500 ch = un->un_ch;
501 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
502 return 0;
503 bd = ch->ch_bd;
504 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
505 return 0;
506 if (bd->state != BOARD_READY)
507 return 0;
508
509 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
510}
511static DEVICE_ATTR_RO(tty_cflag);
512
513static ssize_t tty_oflag_show(struct device *d,
514 struct device_attribute *attr, char *buf)
515{
516 struct dgnc_board *bd;
517 struct channel_t *ch;
518 struct un_t *un;
519
520 if (!d)
521 return 0;
522 un = dev_get_drvdata(d);
523 if (!un || un->magic != DGNC_UNIT_MAGIC)
524 return 0;
525 ch = un->un_ch;
526 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
527 return 0;
528 bd = ch->ch_bd;
529 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
530 return 0;
531 if (bd->state != BOARD_READY)
532 return 0;
533
534 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
535}
536static DEVICE_ATTR_RO(tty_oflag);
537
538static ssize_t tty_lflag_show(struct device *d,
539 struct device_attribute *attr, char *buf)
540{
541 struct dgnc_board *bd;
542 struct channel_t *ch;
543 struct un_t *un;
544
545 if (!d)
546 return 0;
547 un = dev_get_drvdata(d);
548 if (!un || un->magic != DGNC_UNIT_MAGIC)
549 return 0;
550 ch = un->un_ch;
551 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
552 return 0;
553 bd = ch->ch_bd;
554 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
555 return 0;
556 if (bd->state != BOARD_READY)
557 return 0;
558
559 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
560}
561static DEVICE_ATTR_RO(tty_lflag);
562
563static ssize_t tty_digi_flag_show(struct device *d,
564 struct device_attribute *attr, char *buf)
565{
566 struct dgnc_board *bd;
567 struct channel_t *ch;
568 struct un_t *un;
569
570 if (!d)
571 return 0;
572 un = dev_get_drvdata(d);
573 if (!un || un->magic != DGNC_UNIT_MAGIC)
574 return 0;
575 ch = un->un_ch;
576 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
577 return 0;
578 bd = ch->ch_bd;
579 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
580 return 0;
581 if (bd->state != BOARD_READY)
582 return 0;
583
584 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
585}
586static DEVICE_ATTR_RO(tty_digi_flag);
587
588static ssize_t tty_rxcount_show(struct device *d,
589 struct device_attribute *attr, char *buf)
590{
591 struct dgnc_board *bd;
592 struct channel_t *ch;
593 struct un_t *un;
594
595 if (!d)
596 return 0;
597 un = dev_get_drvdata(d);
598 if (!un || un->magic != DGNC_UNIT_MAGIC)
599 return 0;
600 ch = un->un_ch;
601 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
602 return 0;
603 bd = ch->ch_bd;
604 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
605 return 0;
606 if (bd->state != BOARD_READY)
607 return 0;
608
609 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
610}
611static DEVICE_ATTR_RO(tty_rxcount);
612
613static ssize_t tty_txcount_show(struct device *d,
614 struct device_attribute *attr, char *buf)
615{
616 struct dgnc_board *bd;
617 struct channel_t *ch;
618 struct un_t *un;
619
620 if (!d)
621 return 0;
622 un = dev_get_drvdata(d);
623 if (!un || un->magic != DGNC_UNIT_MAGIC)
624 return 0;
625 ch = un->un_ch;
626 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
627 return 0;
628 bd = ch->ch_bd;
629 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
630 return 0;
631 if (bd->state != BOARD_READY)
632 return 0;
633
634 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
635}
636static DEVICE_ATTR_RO(tty_txcount);
637
638static ssize_t tty_custom_name_show(struct device *d,
639 struct device_attribute *attr, char *buf)
640{
641 struct dgnc_board *bd;
642 struct channel_t *ch;
643 struct un_t *un;
644
645 if (!d)
646 return 0;
647 un = dev_get_drvdata(d);
648 if (!un || un->magic != DGNC_UNIT_MAGIC)
649 return 0;
650 ch = un->un_ch;
651 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
652 return 0;
653 bd = ch->ch_bd;
654 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
655 return 0;
656 if (bd->state != BOARD_READY)
657 return 0;
658
659 return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
660 (un->un_type == DGNC_PRINT) ? "pr" : "tty",
661 bd->boardnum + 1, 'a' + ch->ch_portnum);
662}
663static DEVICE_ATTR_RO(tty_custom_name);
664
665static struct attribute *dgnc_sysfs_tty_entries[] = {
666 &dev_attr_tty_state.attr,
667 &dev_attr_tty_baud.attr,
668 &dev_attr_tty_msignals.attr,
669 &dev_attr_tty_iflag.attr,
670 &dev_attr_tty_cflag.attr,
671 &dev_attr_tty_oflag.attr,
672 &dev_attr_tty_lflag.attr,
673 &dev_attr_tty_digi_flag.attr,
674 &dev_attr_tty_rxcount.attr,
675 &dev_attr_tty_txcount.attr,
676 &dev_attr_tty_custom_name.attr,
677 NULL
678};
679
680static const struct attribute_group dgnc_tty_attribute_group = {
681 .name = NULL,
682 .attrs = dgnc_sysfs_tty_entries,
683};
684
685void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
686{
687 int ret;
688
689 ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
690 if (ret) {
691 dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n");
692 sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
693 return;
694 }
695
696 dev_set_drvdata(c, un);
697}
698
699void dgnc_remove_tty_sysfs(struct device *c)
700{
701 sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
702}
703
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
deleted file mode 100644
index 7be7d55bc49e..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright 2003 Digi International (www.digi.com)
3 * Scott H Kilau <Scott_Kilau at digi dot com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
12 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 */
15
16#ifndef __DGNC_SYSFS_H
17#define __DGNC_SYSFS_H
18
19#include <linux/device.h>
20#include "dgnc_driver.h"
21
22struct dgnc_board;
23struct channel_t;
24struct un_t;
25struct pci_driver;
26struct class_device;
27
28void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
29void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
30
31void dgnc_create_driver_sysfiles(struct pci_driver *);
32void dgnc_remove_driver_sysfiles(struct pci_driver *);
33
34int dgnc_tty_class_init(void);
35int dgnc_tty_class_destroy(void);
36
37void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
38void dgnc_remove_tty_sysfs(struct device *c);
39
40#endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 953d9310fa74..1e10c0fe4745 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -13,13 +13,9 @@
13 * PURPOSE. See the GNU General Public License for more details. 13 * PURPOSE. See the GNU General Public License for more details.
14 */ 14 */
15 15
16/************************************************************************ 16/*
17 *
18 * This file implements the tty driver functionality for the 17 * This file implements the tty driver functionality for the
19 * Neo and ClassicBoard PCI based product lines. 18 * Neo and ClassicBoard PCI based product lines.
20 *
21 ************************************************************************
22 *
23 */ 19 */
24 20
25#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -39,27 +35,20 @@
39#include "dgnc_tty.h" 35#include "dgnc_tty.h"
40#include "dgnc_neo.h" 36#include "dgnc_neo.h"
41#include "dgnc_cls.h" 37#include "dgnc_cls.h"
42#include "dgnc_sysfs.h"
43#include "dgnc_utils.h" 38#include "dgnc_utils.h"
44 39
45/* 40/* Default transparent print information. */
46 * internal variables 41
47 */ 42static const struct digi_t dgnc_digi_init = {
48static unsigned char *dgnc_TmpWriteBuf; 43 .digi_flags = DIGI_COOK, /* Flags */
49 44 .digi_maxcps = 100, /* Max CPS */
50/* 45 .digi_maxchar = 50, /* Max chars in print queue */
51 * Default transparent print information. 46 .digi_bufsize = 100, /* Printer buffer size */
52 */ 47 .digi_onlen = 4, /* size of printer on string */
53static struct digi_t dgnc_digi_init = { 48 .digi_offlen = 4, /* size of printer off string */
54 .digi_flags = DIGI_COOK, /* Flags */ 49 .digi_onstr = "\033[5i", /* ANSI printer on string ] */
55 .digi_maxcps = 100, /* Max CPS */ 50 .digi_offstr = "\033[4i", /* ANSI printer off string ] */
56 .digi_maxchar = 50, /* Max chars in print queue */ 51 .digi_term = "ansi" /* default terminal type */
57 .digi_bufsize = 100, /* Printer buffer size */
58 .digi_onlen = 4, /* size of printer on string */
59 .digi_offlen = 4, /* size of printer off string */
60 .digi_onstr = "\033[5i", /* ANSI printer on string ] */
61 .digi_offstr = "\033[4i", /* ANSI printer off string ] */
62 .digi_term = "ansi" /* default terminal type */
63}; 52};
64 53
65/* 54/*
@@ -69,7 +58,7 @@ static struct digi_t dgnc_digi_init = {
69 * This defines a raw port at 9600 baud, 8 data bits, no parity, 58 * This defines a raw port at 9600 baud, 8 data bits, no parity,
70 * 1 stop bit. 59 * 1 stop bit.
71 */ 60 */
72static struct ktermios DgncDefaultTermios = { 61static struct ktermios default_termios = {
73 .c_iflag = (DEFAULT_IFLAGS), /* iflags */ 62 .c_iflag = (DEFAULT_IFLAGS), /* iflags */
74 .c_oflag = (DEFAULT_OFLAGS), /* oflags */ 63 .c_oflag = (DEFAULT_OFLAGS), /* oflags */
75 .c_cflag = (DEFAULT_CFLAGS), /* cflags */ 64 .c_cflag = (DEFAULT_CFLAGS), /* cflags */
@@ -113,6 +102,8 @@ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf,
113static void dgnc_tty_set_termios(struct tty_struct *tty, 102static void dgnc_tty_set_termios(struct tty_struct *tty,
114 struct ktermios *old_termios); 103 struct ktermios *old_termios);
115static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch); 104static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
105static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char line);
106static void dgnc_wake_up_unit(struct un_t *unit);
116 107
117static const struct tty_operations dgnc_tty_ops = { 108static const struct tty_operations dgnc_tty_ops = {
118 .open = dgnc_tty_open, 109 .open = dgnc_tty_open,
@@ -137,36 +128,7 @@ static const struct tty_operations dgnc_tty_ops = {
137 .send_xchar = dgnc_tty_send_xchar 128 .send_xchar = dgnc_tty_send_xchar
138}; 129};
139 130
140/************************************************************************ 131/* TTY Initialization/Cleanup Functions */
141 *
142 * TTY Initialization/Cleanup Functions
143 *
144 ************************************************************************/
145
146/*
147 * dgnc_tty_preinit()
148 *
149 * Initialize any global tty related data before we download any boards.
150 */
151int dgnc_tty_preinit(void)
152{
153 /*
154 * Allocate a buffer for doing the copy from user space to
155 * kernel space in dgnc_write(). We only use one buffer and
156 * control access to it with a semaphore. If we are paging, we
157 * are already in trouble so one buffer won't hurt much anyway.
158 *
159 * We are okay to sleep in the malloc, as this routine
160 * is only called during module load, (not in interrupt context),
161 * and with no locks held.
162 */
163 dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
164
165 if (!dgnc_TmpWriteBuf)
166 return -ENOMEM;
167
168 return 0;
169}
170 132
171/* 133/*
172 * dgnc_tty_register() 134 * dgnc_tty_register()
@@ -194,7 +156,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
194 brd->serial_driver->minor_start = 0; 156 brd->serial_driver->minor_start = 0;
195 brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 157 brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
196 brd->serial_driver->subtype = SERIAL_TYPE_NORMAL; 158 brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
197 brd->serial_driver->init_termios = DgncDefaultTermios; 159 brd->serial_driver->init_termios = default_termios;
198 brd->serial_driver->driver_name = DRVSTR; 160 brd->serial_driver->driver_name = DRVSTR;
199 161
200 /* 162 /*
@@ -233,7 +195,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
233 brd->print_driver->minor_start = 0x80; 195 brd->print_driver->minor_start = 0x80;
234 brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL; 196 brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
235 brd->print_driver->subtype = SERIAL_TYPE_NORMAL; 197 brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
236 brd->print_driver->init_termios = DgncDefaultTermios; 198 brd->print_driver->init_termios = default_termios;
237 brd->print_driver->driver_name = DRVSTR; 199 brd->print_driver->driver_name = DRVSTR;
238 200
239 /* 201 /*
@@ -285,9 +247,7 @@ int dgnc_tty_init(struct dgnc_board *brd)
285 if (!brd) 247 if (!brd)
286 return -ENXIO; 248 return -ENXIO;
287 249
288 /* 250 /* Initialize board structure elements. */
289 * Initialize board structure elements.
290 */
291 251
292 vaddr = brd->re_map_membase; 252 vaddr = brd->re_map_membase;
293 253
@@ -345,12 +305,10 @@ int dgnc_tty_init(struct dgnc_board *brd)
345 classp = tty_register_device(brd->serial_driver, i, 305 classp = tty_register_device(brd->serial_driver, i,
346 &ch->ch_bd->pdev->dev); 306 &ch->ch_bd->pdev->dev);
347 ch->ch_tun.un_sysfs = classp; 307 ch->ch_tun.un_sysfs = classp;
348 dgnc_create_tty_sysfs(&ch->ch_tun, classp);
349 308
350 classp = tty_register_device(brd->print_driver, i, 309 classp = tty_register_device(brd->print_driver, i,
351 &ch->ch_bd->pdev->dev); 310 &ch->ch_bd->pdev->dev);
352 ch->ch_pun.un_sysfs = classp; 311 ch->ch_pun.un_sysfs = classp;
353 dgnc_create_tty_sysfs(&ch->ch_pun, classp);
354 } 312 }
355 } 313 }
356 314
@@ -365,17 +323,6 @@ err_free_channels:
365} 323}
366 324
367/* 325/*
368 * dgnc_tty_post_uninit()
369 *
370 * UnInitialize any global tty related data.
371 */
372void dgnc_tty_post_uninit(void)
373{
374 kfree(dgnc_TmpWriteBuf);
375 dgnc_TmpWriteBuf = NULL;
376}
377
378/*
379 * dgnc_cleanup_tty() 326 * dgnc_cleanup_tty()
380 * 327 *
381 * Uninitialize the TTY portion of this driver. Free all memory and 328 * Uninitialize the TTY portion of this driver. Free all memory and
@@ -385,20 +332,14 @@ void dgnc_cleanup_tty(struct dgnc_board *brd)
385{ 332{
386 int i = 0; 333 int i = 0;
387 334
388 for (i = 0; i < brd->nasync; i++) { 335 for (i = 0; i < brd->nasync; i++)
389 if (brd->channels[i])
390 dgnc_remove_tty_sysfs(brd->channels[i]->
391 ch_tun.un_sysfs);
392 tty_unregister_device(brd->serial_driver, i); 336 tty_unregister_device(brd->serial_driver, i);
393 } 337
394 tty_unregister_driver(brd->serial_driver); 338 tty_unregister_driver(brd->serial_driver);
395 339
396 for (i = 0; i < brd->nasync; i++) { 340 for (i = 0; i < brd->nasync; i++)
397 if (brd->channels[i])
398 dgnc_remove_tty_sysfs(brd->channels[i]->
399 ch_pun.un_sysfs);
400 tty_unregister_device(brd->print_driver, i); 341 tty_unregister_device(brd->print_driver, i);
401 } 342
402 tty_unregister_driver(brd->print_driver); 343 tty_unregister_driver(brd->print_driver);
403 344
404 put_tty_driver(brd->serial_driver); 345 put_tty_driver(brd->serial_driver);
@@ -437,9 +378,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
437 } 378 }
438 379
439 if (n > 0) { 380 if (n > 0) {
440 /* 381 /* Move rest of data. */
441 * Move rest of data.
442 */
443 remain = n; 382 remain = n;
444 memcpy(ch->ch_wqueue + head, buf, remain); 383 memcpy(ch->ch_wqueue + head, buf, remain);
445 head += remain; 384 head += remain;
@@ -509,9 +448,8 @@ void dgnc_input(struct channel_t *ch)
509 goto exit_unlock; 448 goto exit_unlock;
510 } 449 }
511 450
512 /* 451 /* If we are throttled, simply don't read any data. */
513 * If we are throttled, simply don't read any data. 452
514 */
515 if (ch->ch_flags & CH_FORCED_STOPI) 453 if (ch->ch_flags & CH_FORCED_STOPI)
516 goto exit_unlock; 454 goto exit_unlock;
517 455
@@ -624,10 +562,10 @@ exit_unlock:
624 tty_ldisc_deref(ld); 562 tty_ldisc_deref(ld);
625} 563}
626 564
627/************************************************************************ 565/*
628 * Determines when CARRIER changes state and takes appropriate 566 * Determines when CARRIER changes state and takes appropriate
629 * action. 567 * action.
630 ************************************************************************/ 568 */
631void dgnc_carrier(struct channel_t *ch) 569void dgnc_carrier(struct channel_t *ch)
632{ 570{
633 int virt_carrier = 0; 571 int virt_carrier = 0;
@@ -645,28 +583,24 @@ void dgnc_carrier(struct channel_t *ch)
645 if (ch->ch_c_cflag & CLOCAL) 583 if (ch->ch_c_cflag & CLOCAL)
646 virt_carrier = 1; 584 virt_carrier = 1;
647 585
648 /* 586 /* Test for a VIRTUAL carrier transition to HIGH. */
649 * Test for a VIRTUAL carrier transition to HIGH. 587
650 */
651 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { 588 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
652 /* 589 /*
653 * When carrier rises, wake any threads waiting 590 * When carrier rises, wake any threads waiting
654 * for carrier in the open routine. 591 * for carrier in the open routine.
655 */ 592 */
656
657 if (waitqueue_active(&ch->ch_flags_wait)) 593 if (waitqueue_active(&ch->ch_flags_wait))
658 wake_up_interruptible(&ch->ch_flags_wait); 594 wake_up_interruptible(&ch->ch_flags_wait);
659 } 595 }
660 596
661 /* 597 /* Test for a PHYSICAL carrier transition to HIGH. */
662 * Test for a PHYSICAL carrier transition to HIGH. 598
663 */
664 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { 599 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
665 /* 600 /*
666 * When carrier rises, wake any threads waiting 601 * When carrier rises, wake any threads waiting
667 * for carrier in the open routine. 602 * for carrier in the open routine.
668 */ 603 */
669
670 if (waitqueue_active(&ch->ch_flags_wait)) 604 if (waitqueue_active(&ch->ch_flags_wait))
671 wake_up_interruptible(&ch->ch_flags_wait); 605 wake_up_interruptible(&ch->ch_flags_wait);
672 } 606 }
@@ -704,9 +638,8 @@ void dgnc_carrier(struct channel_t *ch)
704 tty_hangup(ch->ch_pun.un_tty); 638 tty_hangup(ch->ch_pun.un_tty);
705 } 639 }
706 640
707 /* 641 /* Make sure that our cached values reflect the current reality. */
708 * Make sure that our cached values reflect the current reality. 642
709 */
710 if (virt_carrier == 1) 643 if (virt_carrier == 1)
711 ch->ch_flags |= CH_FCAR; 644 ch->ch_flags |= CH_FCAR;
712 else 645 else
@@ -718,9 +651,8 @@ void dgnc_carrier(struct channel_t *ch)
718 ch->ch_flags &= ~CH_CD; 651 ch->ch_flags &= ~CH_CD;
719} 652}
720 653
721/* 654/* Assign the custom baud rate to the channel structure */
722 * Assign the custom baud rate to the channel structure 655
723 */
724static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate) 656static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
725{ 657{
726 int testdiv; 658 int testdiv;
@@ -854,6 +786,12 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
854 } 786 }
855} 787}
856 788
789static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char sig)
790{
791 ch->ch_mostat &= ~(sig);
792 ch->ch_bd->bd_ops->assert_modem_signals(ch);
793}
794
857void dgnc_wakeup_writes(struct channel_t *ch) 795void dgnc_wakeup_writes(struct channel_t *ch)
858{ 796{
859 int qlen = 0; 797 int qlen = 0;
@@ -864,9 +802,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
864 802
865 spin_lock_irqsave(&ch->ch_lock, flags); 803 spin_lock_irqsave(&ch->ch_lock, flags);
866 804
867 /* 805 /* If channel now has space, wake up anyone waiting on the condition. */
868 * If channel now has space, wake up anyone waiting on the condition. 806
869 */
870 qlen = ch->ch_w_head - ch->ch_w_tail; 807 qlen = ch->ch_w_head - ch->ch_w_tail;
871 if (qlen < 0) 808 if (qlen < 0)
872 qlen += WQUEUESIZE; 809 qlen += WQUEUESIZE;
@@ -892,19 +829,15 @@ void dgnc_wakeup_writes(struct channel_t *ch)
892 * If RTS Toggle mode is on, whenever 829 * If RTS Toggle mode is on, whenever
893 * the queue and UART is empty, keep RTS low. 830 * the queue and UART is empty, keep RTS low.
894 */ 831 */
895 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { 832 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
896 ch->ch_mostat &= ~(UART_MCR_RTS); 833 dgnc_set_signal_low(ch, UART_MCR_RTS);
897 ch->ch_bd->bd_ops->assert_modem_signals(ch);
898 }
899 834
900 /* 835 /*
901 * If DTR Toggle mode is on, whenever 836 * If DTR Toggle mode is on, whenever
902 * the queue and UART is empty, keep DTR low. 837 * the queue and UART is empty, keep DTR low.
903 */ 838 */
904 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { 839 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
905 ch->ch_mostat &= ~(UART_MCR_DTR); 840 dgnc_set_signal_low(ch, UART_MCR_DTR);
906 ch->ch_bd->bd_ops->assert_modem_signals(ch);
907 }
908 } 841 }
909 } 842 }
910 843
@@ -930,7 +863,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
930 spin_unlock_irqrestore(&ch->ch_lock, flags); 863 spin_unlock_irqrestore(&ch->ch_lock, flags);
931} 864}
932 865
933struct dgnc_board *find_board_by_major(unsigned int major) 866static struct dgnc_board *find_board_by_major(unsigned int major)
934{ 867{
935 int i; 868 int i;
936 869
@@ -948,16 +881,10 @@ struct dgnc_board *find_board_by_major(unsigned int major)
948 return NULL; 881 return NULL;
949} 882}
950 883
951/************************************************************************ 884/* TTY Entry points and helper functions */
952 * 885
953 * TTY Entry points and helper functions 886/* dgnc_tty_open() */
954 *
955 ************************************************************************/
956 887
957/*
958 * dgnc_tty_open()
959 *
960 */
961static int dgnc_tty_open(struct tty_struct *tty, struct file *file) 888static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
962{ 889{
963 struct dgnc_board *brd; 890 struct dgnc_board *brd;
@@ -1045,8 +972,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1045 * ch_flags_wait to wake us back up. 972 * ch_flags_wait to wake us back up.
1046 */ 973 */
1047 rc = wait_event_interruptible(ch->ch_flags_wait, 974 rc = wait_event_interruptible(ch->ch_flags_wait,
1048 (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & 975 (((ch->ch_tun.un_flags |
1049 UN_CLOSING) == 0)); 976 ch->ch_pun.un_flags) & UN_CLOSING) == 0));
1050 977
1051 /* If ret is non-zero, user ctrl-c'ed us */ 978 /* If ret is non-zero, user ctrl-c'ed us */
1052 if (rc) 979 if (rc)
@@ -1057,9 +984,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1057 /* Store our unit into driver_data, so we always have it available. */ 984 /* Store our unit into driver_data, so we always have it available. */
1058 tty->driver_data = un; 985 tty->driver_data = un;
1059 986
1060 /* 987 /* Initialize tty's */
1061 * Initialize tty's 988
1062 */
1063 if (!(un->un_flags & UN_ISOPEN)) { 989 if (!(un->un_flags & UN_ISOPEN)) {
1064 /* Store important variables. */ 990 /* Store important variables. */
1065 un->un_tty = tty; 991 un->un_tty = tty;
@@ -1096,13 +1022,10 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1096 ch->ch_flags &= ~(CH_OPENING); 1022 ch->ch_flags &= ~(CH_OPENING);
1097 wake_up_interruptible(&ch->ch_flags_wait); 1023 wake_up_interruptible(&ch->ch_flags_wait);
1098 1024
1099 /* 1025 /* Initialize if neither terminal or printer is open. */
1100 * Initialize if neither terminal or printer is open. 1026
1101 */
1102 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { 1027 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
1103 /* 1028 /* Flush input queues. */
1104 * Flush input queues.
1105 */
1106 ch->ch_r_head = 0; 1029 ch->ch_r_head = 0;
1107 ch->ch_r_tail = 0; 1030 ch->ch_r_tail = 0;
1108 ch->ch_e_head = 0; 1031 ch->ch_e_head = 0;
@@ -1138,16 +1061,13 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1138 brd->bd_ops->uart_init(ch); 1061 brd->bd_ops->uart_init(ch);
1139 } 1062 }
1140 1063
1141 /* 1064 /* Run param in case we changed anything */
1142 * Run param in case we changed anything 1065
1143 */
1144 brd->bd_ops->param(tty); 1066 brd->bd_ops->param(tty);
1145 1067
1146 dgnc_carrier(ch); 1068 dgnc_carrier(ch);
1147 1069
1148 /* 1070 /* follow protocol for opening port */
1149 * follow protocol for opening port
1150 */
1151 1071
1152 spin_unlock_irqrestore(&ch->ch_lock, flags); 1072 spin_unlock_irqrestore(&ch->ch_lock, flags);
1153 1073
@@ -1248,9 +1168,8 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
1248 break; 1168 break;
1249 } 1169 }
1250 1170
1251 /* 1171 /* Store the flags before we let go of channel lock */
1252 * Store the flags before we let go of channel lock 1172
1253 */
1254 if (sleep_on_un_flags) 1173 if (sleep_on_un_flags)
1255 old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; 1174 old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
1256 else 1175 else
@@ -1269,12 +1188,13 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
1269 * from the current value. 1188 * from the current value.
1270 */ 1189 */
1271 if (sleep_on_un_flags) 1190 if (sleep_on_un_flags)
1272 retval = wait_event_interruptible(un->un_flags_wait, 1191 retval = wait_event_interruptible
1273 (old_flags != (ch->ch_tun.un_flags | 1192 (un->un_flags_wait,
1274 ch->ch_pun.un_flags))); 1193 (old_flags != (ch->ch_tun.un_flags |
1194 ch->ch_pun.un_flags)));
1275 else 1195 else
1276 retval = wait_event_interruptible(ch->ch_flags_wait, 1196 retval = wait_event_interruptible(ch->ch_flags_wait,
1277 (old_flags != ch->ch_flags)); 1197 (old_flags != ch->ch_flags));
1278 1198
1279 /* 1199 /*
1280 * We got woken up for some reason. 1200 * We got woken up for some reason.
@@ -1304,10 +1224,8 @@ static void dgnc_tty_hangup(struct tty_struct *tty)
1304 dgnc_tty_flush_buffer(tty); 1224 dgnc_tty_flush_buffer(tty);
1305} 1225}
1306 1226
1307/* 1227/* dgnc_tty_close() */
1308 * dgnc_tty_close() 1228
1309 *
1310 */
1311static void dgnc_tty_close(struct tty_struct *tty, struct file *file) 1229static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
1312{ 1230{
1313 struct dgnc_board *bd; 1231 struct dgnc_board *bd;
@@ -1377,9 +1295,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
1377 !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { 1295 !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
1378 ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI); 1296 ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
1379 1297
1380 /* 1298 /* turn off print device when closing print device. */
1381 * turn off print device when closing print device. 1299
1382 */
1383 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { 1300 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
1384 dgnc_wmove(ch, ch->ch_digi.digi_offstr, 1301 dgnc_wmove(ch, ch->ch_digi.digi_offstr,
1385 (int)ch->ch_digi.digi_offlen); 1302 (int)ch->ch_digi.digi_offlen);
@@ -1399,9 +1316,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
1399 1316
1400 tty->closing = 0; 1317 tty->closing = 0;
1401 1318
1402 /* 1319 /* If we have HUPCL set, lower DTR and RTS */
1403 * If we have HUPCL set, lower DTR and RTS 1320
1404 */
1405 if (ch->ch_c_cflag & HUPCL) { 1321 if (ch->ch_c_cflag & HUPCL) {
1406 /* Drop RTS/DTR */ 1322 /* Drop RTS/DTR */
1407 ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); 1323 ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
@@ -1424,9 +1340,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
1424 /* Turn off UART interrupts for this port */ 1340 /* Turn off UART interrupts for this port */
1425 ch->ch_bd->bd_ops->uart_off(ch); 1341 ch->ch_bd->bd_ops->uart_off(ch);
1426 } else { 1342 } else {
1427 /* 1343 /* turn off print device when closing print device. */
1428 * turn off print device when closing print device. 1344
1429 */
1430 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { 1345 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
1431 dgnc_wmove(ch, ch->ch_digi.digi_offstr, 1346 dgnc_wmove(ch, ch->ch_digi.digi_offstr,
1432 (int)ch->ch_digi.digi_offlen); 1347 (int)ch->ch_digi.digi_offlen);
@@ -1543,7 +1458,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
1543 int ret = 0; 1458 int ret = 0;
1544 unsigned long flags; 1459 unsigned long flags;
1545 1460
1546 if (!tty || !dgnc_TmpWriteBuf) 1461 if (!tty)
1547 return 0; 1462 return 0;
1548 1463
1549 un = tty->driver_data; 1464 un = tty->driver_data;
@@ -1598,9 +1513,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
1598 */ 1513 */
1599static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c) 1514static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
1600{ 1515{
1601 /* 1516 /* Simply call tty_write. */
1602 * Simply call tty_write. 1517
1603 */
1604 dgnc_tty_write(tty, &c, 1); 1518 dgnc_tty_write(tty, &c, 1);
1605 return 1; 1519 return 1;
1606} 1520}
@@ -1623,7 +1537,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
1623 ushort tmask; 1537 ushort tmask;
1624 uint remain; 1538 uint remain;
1625 1539
1626 if (!tty || !dgnc_TmpWriteBuf) 1540 if (!tty)
1627 return 0; 1541 return 0;
1628 1542
1629 un = tty->driver_data; 1543 un = tty->driver_data;
@@ -1667,9 +1581,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
1667 */ 1581 */
1668 count = min(count, bufcount); 1582 count = min(count, bufcount);
1669 1583
1670 /* 1584 /* Bail if no space left. */
1671 * Bail if no space left. 1585
1672 */
1673 if (count <= 0) 1586 if (count <= 0)
1674 goto exit_retry; 1587 goto exit_retry;
1675 1588
@@ -1712,9 +1625,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
1712 } 1625 }
1713 1626
1714 if (n > 0) { 1627 if (n > 0) {
1715 /* 1628 /* Move rest of data. */
1716 * Move rest of data.
1717 */
1718 remain = n; 1629 remain = n;
1719 memcpy(ch->ch_wqueue + head, buf, remain); 1630 memcpy(ch->ch_wqueue + head, buf, remain);
1720 head += remain; 1631 head += remain;
@@ -1749,9 +1660,7 @@ exit_retry:
1749 return 0; 1660 return 0;
1750} 1661}
1751 1662
1752/* 1663/* Return modem signals to ld. */
1753 * Return modem signals to ld.
1754 */
1755 1664
1756static int dgnc_tty_tiocmget(struct tty_struct *tty) 1665static int dgnc_tty_tiocmget(struct tty_struct *tty)
1757{ 1666{
@@ -1960,9 +1869,8 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
1960 dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n"); 1869 dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
1961} 1870}
1962 1871
1963/* 1872/* Return modem signals to ld. */
1964 * Return modem signals to ld. 1873
1965 */
1966static inline int dgnc_get_mstat(struct channel_t *ch) 1874static inline int dgnc_get_mstat(struct channel_t *ch)
1967{ 1875{
1968 unsigned char mstat; 1876 unsigned char mstat;
@@ -1994,9 +1902,8 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
1994 return result; 1902 return result;
1995} 1903}
1996 1904
1997/* 1905/* Return modem signals to ld. */
1998 * Return modem signals to ld. 1906
1999 */
2000static int dgnc_get_modem_info(struct channel_t *ch, 1907static int dgnc_get_modem_info(struct channel_t *ch,
2001 unsigned int __user *value) 1908 unsigned int __user *value)
2002{ 1909{
@@ -2070,9 +1977,6 @@ static int dgnc_set_modem_info(struct channel_t *ch,
2070 * dgnc_tty_digigeta() 1977 * dgnc_tty_digigeta()
2071 * 1978 *
2072 * Ioctl to get the information for ditty. 1979 * Ioctl to get the information for ditty.
2073 *
2074 *
2075 *
2076 */ 1980 */
2077static int dgnc_tty_digigeta(struct tty_struct *tty, 1981static int dgnc_tty_digigeta(struct tty_struct *tty,
2078 struct digi_t __user *retinfo) 1982 struct digi_t __user *retinfo)
@@ -2112,9 +2016,6 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
2112 * dgnc_tty_digiseta() 2016 * dgnc_tty_digiseta()
2113 * 2017 *
2114 * Ioctl to set the information for ditty. 2018 * Ioctl to set the information for ditty.
2115 *
2116 *
2117 *
2118 */ 2019 */
2119static int dgnc_tty_digiseta(struct tty_struct *tty, 2020static int dgnc_tty_digiseta(struct tty_struct *tty,
2120 struct digi_t __user *new_info) 2021 struct digi_t __user *new_info)
@@ -2145,9 +2046,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
2145 2046
2146 spin_lock_irqsave(&ch->ch_lock, flags); 2047 spin_lock_irqsave(&ch->ch_lock, flags);
2147 2048
2148 /* 2049 /* Handle transitions to and from RTS Toggle. */
2149 * Handle transistions to and from RTS Toggle. 2050
2150 */
2151 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && 2051 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) &&
2152 (new_digi.digi_flags & DIGI_RTS_TOGGLE)) 2052 (new_digi.digi_flags & DIGI_RTS_TOGGLE))
2153 ch->ch_mostat &= ~(UART_MCR_RTS); 2053 ch->ch_mostat &= ~(UART_MCR_RTS);
@@ -2155,9 +2055,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
2155 !(new_digi.digi_flags & DIGI_RTS_TOGGLE)) 2055 !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
2156 ch->ch_mostat |= (UART_MCR_RTS); 2056 ch->ch_mostat |= (UART_MCR_RTS);
2157 2057
2158 /* 2058 /* Handle transitions to and from DTR Toggle. */
2159 * Handle transistions to and from DTR Toggle. 2059
2160 */
2161 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && 2060 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) &&
2162 (new_digi.digi_flags & DIGI_DTR_TOGGLE)) 2061 (new_digi.digi_flags & DIGI_DTR_TOGGLE))
2163 ch->ch_mostat &= ~(UART_MCR_DTR); 2062 ch->ch_mostat &= ~(UART_MCR_DTR);
@@ -2195,9 +2094,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
2195 return 0; 2094 return 0;
2196} 2095}
2197 2096
2198/* 2097/* dgnc_set_termios() */
2199 * dgnc_set_termios() 2098
2200 */
2201static void dgnc_tty_set_termios(struct tty_struct *tty, 2099static void dgnc_tty_set_termios(struct tty_struct *tty,
2202 struct ktermios *old_termios) 2100 struct ktermios *old_termios)
2203{ 2101{
@@ -2428,11 +2326,18 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
2428 spin_unlock_irqrestore(&ch->ch_lock, flags); 2326 spin_unlock_irqrestore(&ch->ch_lock, flags);
2429} 2327}
2430 2328
2431/***************************************************************************** 2329/*
2432 * 2330 * dgnc_wake_up_unit()
2433 * The IOCTL function and all of its helpers
2434 * 2331 *
2435 *****************************************************************************/ 2332 * Wakes up processes waiting in the unit's (teminal/printer) wait queue
2333 */
2334static void dgnc_wake_up_unit(struct un_t *unit)
2335{
2336 unit->un_flags &= ~(UN_LOW | UN_EMPTY);
2337 wake_up_interruptible(&unit->un_flags_wait);
2338}
2339
2340/* The IOCTL function and all of its helpers */
2436 2341
2437/* 2342/*
2438 * dgnc_tty_ioctl() 2343 * dgnc_tty_ioctl()
@@ -2506,7 +2411,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2506 return 0; 2411 return 0;
2507 2412
2508 case TCSBRKP: 2413 case TCSBRKP:
2509 /* support for POSIX tcsendbreak() 2414 /*
2415 * support for POSIX tcsendbreak()
2510 * According to POSIX.1 spec (7.2.2.1.2) breaks should be 2416 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
2511 * between 0.25 and 0.5 seconds so we'll ask for something 2417 * between 0.25 and 0.5 seconds so we'll ask for something
2512 * in the middle: 0.375 seconds. 2418 * in the middle: 0.375 seconds.
@@ -2583,9 +2489,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2583 spin_unlock_irqrestore(&ch->ch_lock, flags); 2489 spin_unlock_irqrestore(&ch->ch_lock, flags);
2584 return dgnc_set_modem_info(ch, cmd, uarg); 2490 return dgnc_set_modem_info(ch, cmd, uarg);
2585 2491
2586 /* 2492 /* Here are any additional ioctl's that we want to implement */
2587 * Here are any additional ioctl's that we want to implement
2588 */
2589 2493
2590 case TCFLSH: 2494 case TCFLSH:
2591 /* 2495 /*
@@ -2615,17 +2519,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2615 ch->ch_w_head = ch->ch_w_tail; 2519 ch->ch_w_head = ch->ch_w_tail;
2616 ch_bd_ops->flush_uart_write(ch); 2520 ch_bd_ops->flush_uart_write(ch);
2617 2521
2618 if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) { 2522 if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY))
2619 ch->ch_tun.un_flags &= 2523 dgnc_wake_up_unit(&ch->ch_tun);
2620 ~(UN_LOW | UN_EMPTY); 2524
2621 wake_up_interruptible(&ch->ch_tun.un_flags_wait); 2525 if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY))
2622 } 2526 dgnc_wake_up_unit(&ch->ch_pun);
2623
2624 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
2625 ch->ch_pun.un_flags &=
2626 ~(UN_LOW | UN_EMPTY);
2627 wake_up_interruptible(&ch->ch_pun.un_flags_wait);
2628 }
2629 } 2527 }
2630 } 2528 }
2631 2529
@@ -2705,9 +2603,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2705 case DIGI_LOOPBACK: 2603 case DIGI_LOOPBACK:
2706 { 2604 {
2707 uint loopback = 0; 2605 uint loopback = 0;
2708 /* Let go of locks when accessing user space, 2606 /*
2607 * Let go of locks when accessing user space,
2709 * could sleep 2608 * could sleep
2710 */ 2609 */
2711 spin_unlock_irqrestore(&ch->ch_lock, flags); 2610 spin_unlock_irqrestore(&ch->ch_lock, flags);
2712 rc = get_user(loopback, (unsigned int __user *)arg); 2611 rc = get_user(loopback, (unsigned int __user *)arg);
2713 if (rc) 2612 if (rc)
@@ -2749,7 +2648,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2749 * This ioctl allows insertion of a character into the front 2648 * This ioctl allows insertion of a character into the front
2750 * of any pending data to be transmitted. 2649 * of any pending data to be transmitted.
2751 * 2650 *
2752 * This ioctl is to satify the "Send Character Immediate" 2651 * This ioctl is to satisfy the "Send Character Immediate"
2753 * call that the RealPort protocol spec requires. 2652 * call that the RealPort protocol spec requires.
2754 */ 2653 */
2755 case DIGI_REALPORT_SENDIMMEDIATE: 2654 case DIGI_REALPORT_SENDIMMEDIATE:
@@ -2769,7 +2668,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2769 /* 2668 /*
2770 * This ioctl returns all the current counts for the port. 2669 * This ioctl returns all the current counts for the port.
2771 * 2670 *
2772 * This ioctl is to satify the "Line Error Counters" 2671 * This ioctl is to satisfy the "Line Error Counters"
2773 * call that the RealPort protocol spec requires. 2672 * call that the RealPort protocol spec requires.
2774 */ 2673 */
2775 case DIGI_REALPORT_GETCOUNTERS: 2674 case DIGI_REALPORT_GETCOUNTERS:
@@ -2795,7 +2694,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2795 /* 2694 /*
2796 * This ioctl returns all current events. 2695 * This ioctl returns all current events.
2797 * 2696 *
2798 * This ioctl is to satify the "Event Reporting" 2697 * This ioctl is to satisfy the "Event Reporting"
2799 * call that the RealPort protocol spec requires. 2698 * call that the RealPort protocol spec requires.
2800 */ 2699 */
2801 case DIGI_REALPORT_GETEVENTS: 2700 case DIGI_REALPORT_GETEVENTS:
@@ -2831,23 +2730,23 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2831 2730
2832 spin_unlock_irqrestore(&ch->ch_lock, flags); 2731 spin_unlock_irqrestore(&ch->ch_lock, flags);
2833 2732
2834 /* 2733 /* Get data from user first. */
2835 * Get data from user first. 2734
2836 */
2837 if (copy_from_user(&buf, uarg, sizeof(buf))) 2735 if (copy_from_user(&buf, uarg, sizeof(buf)))
2838 return -EFAULT; 2736 return -EFAULT;
2839 2737
2840 spin_lock_irqsave(&ch->ch_lock, flags); 2738 spin_lock_irqsave(&ch->ch_lock, flags);
2841 2739
2842 /* 2740 /* Figure out how much data is in our RX and TX queues. */
2843 * Figure out how much data is in our RX and TX queues. 2741
2844 */
2845 buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK; 2742 buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
2846 buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK; 2743 buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
2847 2744
2848 /* 2745 /*
2849 * Is the UART empty? Add that value to whats in our TX queue. 2746 * Is the UART empty?
2747 * Add that value to whats in our TX queue.
2850 */ 2748 */
2749
2851 count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch); 2750 count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
2852 2751
2853 /* 2752 /*
@@ -2867,9 +2766,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2867 if (buf.txbuf > tdist) 2766 if (buf.txbuf > tdist)
2868 buf.txbuf = tdist; 2767 buf.txbuf = tdist;
2869 2768
2870 /* 2769 /* Report whether our queue and UART TX are completely empty. */
2871 * Report whether our queue and UART TX are completely empty. 2770
2872 */
2873 if (count) 2771 if (count)
2874 buf.txdone = 0; 2772 buf.txdone = 0;
2875 else 2773 else
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 24c9a412211e..1ee0eeeb4730 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -21,11 +21,9 @@
21int dgnc_tty_register(struct dgnc_board *brd); 21int dgnc_tty_register(struct dgnc_board *brd);
22void dgnc_tty_unregister(struct dgnc_board *brd); 22void dgnc_tty_unregister(struct dgnc_board *brd);
23 23
24int dgnc_tty_preinit(void); 24int dgnc_tty_init(struct dgnc_board *brd);
25int dgnc_tty_init(struct dgnc_board *);
26 25
27void dgnc_tty_post_uninit(void); 26void dgnc_cleanup_tty(struct dgnc_board *brd);
28void dgnc_cleanup_tty(struct dgnc_board *);
29 27
30void dgnc_input(struct channel_t *ch); 28void dgnc_input(struct channel_t *ch);
31void dgnc_carrier(struct channel_t *ch); 29void dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 5b983e6f5ee2..ec2e3dda6119 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -17,16 +17,16 @@
17#define __DIGI_H 17#define __DIGI_H
18 18
19#ifndef TIOCM_LE 19#ifndef TIOCM_LE
20#define TIOCM_LE 0x01 /* line enable */ 20#define TIOCM_LE 0x01 /* line enable */
21#define TIOCM_DTR 0x02 /* data terminal ready */ 21#define TIOCM_DTR 0x02 /* data terminal ready */
22#define TIOCM_RTS 0x04 /* request to send */ 22#define TIOCM_RTS 0x04 /* request to send */
23#define TIOCM_ST 0x08 /* secondary transmit */ 23#define TIOCM_ST 0x08 /* secondary transmit */
24#define TIOCM_SR 0x10 /* secondary receive */ 24#define TIOCM_SR 0x10 /* secondary receive */
25#define TIOCM_CTS 0x20 /* clear to send */ 25#define TIOCM_CTS 0x20 /* clear to send */
26#define TIOCM_CAR 0x40 /* carrier detect */ 26#define TIOCM_CAR 0x40 /* carrier detect */
27#define TIOCM_RNG 0x80 /* ring indicator */ 27#define TIOCM_RNG 0x80 /* ring indicator */
28#define TIOCM_DSR 0x100 /* data set ready */ 28#define TIOCM_DSR 0x100 /* data set ready */
29#define TIOCM_RI TIOCM_RNG /* ring (alternate) */ 29#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
30#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */ 30#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
31#endif 31#endif
32 32
@@ -40,72 +40,71 @@
40#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */ 40#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */
41#endif 41#endif
42 42
43#define DIGI_GETA (('e' << 8) | 94) /* Read params */ 43#define DIGI_GETA (('e' << 8) | 94) /* Read params */
44#define DIGI_SETA (('e' << 8) | 95) /* Set params */ 44#define DIGI_SETA (('e' << 8) | 95) /* Set params */
45#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */ 45#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
46#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */ 46#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
47#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */ 47#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
48#define DIGI_LOOPBACK (('d' << 8) | 252) /* 48#define DIGI_LOOPBACK (('d' << 8) | 252) /*
49 * Enable/disable UART 49 * Enable/disable UART
50 * internal loopback 50 * internal loopback
51 */ 51 */
52#define DIGI_FAST 0x0002 /* Fast baud rates */ 52#define DIGI_FAST 0x0002 /* Fast baud rates */
53#define RTSPACE 0x0004 /* RTS input flow control */ 53#define RTSPACE 0x0004 /* RTS input flow control */
54#define CTSPACE 0x0008 /* CTS output flow control */ 54#define CTSPACE 0x0008 /* CTS output flow control */
55#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */ 55#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
56#define DIGI_FORCEDCD 0x0100 /* Force carrier */ 56#define DIGI_FORCEDCD 0x0100 /* Force carrier */
57#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */ 57#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
58#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/ 58#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
59#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */ 59#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
60#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */ 60#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
61#define DIGI_PLEN 28 /* String length */ 61#define DIGI_PLEN 28 /* String length */
62#define DIGI_TSIZ 10 /* Terminal string len */ 62#define DIGI_TSIZ 10 /* Terminal string len */
63 63
64/************************************************************************ 64/*
65 * Structure used with ioctl commands for DIGI parameters. 65 * Structure used with ioctl commands for DIGI parameters.
66 ************************************************************************/ 66 */
67struct digi_t { 67struct digi_t {
68 unsigned short digi_flags; /* Flags (see above) */ 68 unsigned short digi_flags; /* Flags (see above) */
69 unsigned short digi_maxcps; /* Max printer CPS */ 69 unsigned short digi_maxcps; /* Max printer CPS */
70 unsigned short digi_maxchar; /* Max chars in print queue */ 70 unsigned short digi_maxchar; /* Max chars in print queue */
71 unsigned short digi_bufsize; /* Buffer size */ 71 unsigned short digi_bufsize; /* Buffer size */
72 unsigned char digi_onlen; /* Length of ON string */ 72 unsigned char digi_onlen; /* Length of ON string */
73 unsigned char digi_offlen; /* Length of OFF string */ 73 unsigned char digi_offlen; /* Length of OFF string */
74 char digi_onstr[DIGI_PLEN]; /* Printer on string */ 74 char digi_onstr[DIGI_PLEN]; /* Printer on string */
75 char digi_offstr[DIGI_PLEN]; /* Printer off string */ 75 char digi_offstr[DIGI_PLEN]; /* Printer off string */
76 char digi_term[DIGI_TSIZ]; /* terminal string */ 76 char digi_term[DIGI_TSIZ]; /* terminal string */
77}; 77};
78 78
79/************************************************************************ 79/* Structure to get driver status information */
80 * Structure to get driver status information 80
81 ************************************************************************/
82struct digi_dinfo { 81struct digi_dinfo {
83 unsigned int dinfo_nboards; /* # boards configured */ 82 unsigned int dinfo_nboards; /* # boards configured */
84 char dinfo_reserved[12]; /* for future expansion */ 83 char dinfo_reserved[12]; /* for future expansion */
85 char dinfo_version[16]; /* driver version */ 84 char dinfo_version[16]; /* driver version */
86}; 85};
87 86
88#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */ 87#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
89 88
90/************************************************************************ 89/*
91 * Structure used with ioctl commands for per-board information 90 * Structure used with ioctl commands for per-board information
92 * 91 *
93 * physsize and memsize differ when board has "windowed" memory 92 * physsize and memsize differ when board has "windowed" memory
94 ************************************************************************/ 93 */
95struct digi_info { 94struct digi_info {
96 unsigned int info_bdnum; /* Board number (0 based) */ 95 unsigned int info_bdnum; /* Board number (0 based) */
97 unsigned int info_ioport; /* io port address */ 96 unsigned int info_ioport; /* io port address */
98 unsigned int info_physaddr; /* memory address */ 97 unsigned int info_physaddr; /* memory address */
99 unsigned int info_physsize; /* Size of host mem window */ 98 unsigned int info_physsize; /* Size of host mem window */
100 unsigned int info_memsize; /* Amount of dual-port mem */ 99 unsigned int info_memsize; /* Amount of dual-port mem */
101 /* on board */ 100 /* on board */
102 unsigned short info_bdtype; /* Board type */ 101 unsigned short info_bdtype; /* Board type */
103 unsigned short info_nports; /* number of ports */ 102 unsigned short info_nports; /* number of ports */
104 char info_bdstate; /* board state */ 103 char info_bdstate; /* board state */
105 char info_reserved[7]; /* for future expansion */ 104 char info_reserved[7]; /* for future expansion */
106}; 105};
107 106
108#define DIGI_GETBD (('d' << 8) | 249) /* get board info */ 107#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
109 108
110struct digi_getbuffer /* Struct for holding buffer use counts */ 109struct digi_getbuffer /* Struct for holding buffer use counts */
111{ 110{
@@ -139,7 +138,7 @@ struct digi_getcounter {
139#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111) 138#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
140 139
141#define EV_OPU 0x0001 /* !<Output paused by client */ 140#define EV_OPU 0x0001 /* !<Output paused by client */
142#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */ 141#define EV_OPS 0x0002 /* !<Output paused by regular sw flowctrl */
143#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */ 142#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
144#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */ 143#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
145#define EV_TXB 0x0040 /* !<Transmit break pending */ 144#define EV_TXB 0x0040 /* !<Transmit break pending */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index c3e298843b43..3f42fa8b0bf3 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -153,7 +153,6 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
153 udc = (struct nbu2ss_udc *)_req->context; 153 udc = (struct nbu2ss_udc *)_req->context;
154 p_ctrl = &udc->ctrl; 154 p_ctrl = &udc->ctrl;
155 if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 155 if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
156
157 if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) { 156 if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
158 /*-------------------------------------------------*/ 157 /*-------------------------------------------------*/
159 /* SET_FEATURE */ 158 /* SET_FEATURE */
@@ -263,7 +262,7 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
263 } 262 }
264 263
265 _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); 264 _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
266 _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct)); 265 _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
267 266
268 if (ep->direct == USB_DIR_OUT) { 267 if (ep->direct == USB_DIR_OUT) {
269 /*---------------------------------------------------------*/ 268 /*---------------------------------------------------------*/
@@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end(
460 if (length) 459 if (length)
461 _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32); 460 _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
462 461
463 data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND; 462 data = (((length) << 5) & EPn_DW) | EPn_DEND;
464 _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data); 463 _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
465 464
466 _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO); 465 _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -753,7 +752,6 @@ static int _nbu2ss_ep0_out_transfer(
753 /* Receive data confirmation */ 752 /* Receive data confirmation */
754 iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA; 753 iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
755 if (iRecvLength != 0) { 754 if (iRecvLength != 0) {
756
757 fRcvZero = 0; 755 fRcvZero = 0;
758 756
759 iRemainSize = req->req.length - req->req.actual; 757 iRemainSize = req->req.length - req->req.actual;
@@ -928,9 +926,8 @@ static int _nbu2ss_epn_out_pio(
928 926
929 req->req.actual += result; 927 req->req.actual += result;
930 928
931 if ((req->req.actual == req->req.length) 929 if ((req->req.actual == req->req.length) ||
932 || ((req->req.actual % ep->ep.maxpacket) != 0)) { 930 ((req->req.actual % ep->ep.maxpacket) != 0)) {
933
934 result = 0; 931 result = 0;
935 } 932 }
936 933
@@ -956,9 +953,8 @@ static int _nbu2ss_epn_out_data(
956 953
957 iBufSize = min((req->req.length - req->req.actual), data_size); 954 iBufSize = min((req->req.length - req->req.actual), data_size);
958 955
959 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) 956 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
960 && (req->req.dma != 0) 957 (iBufSize >= sizeof(u32))) {
961 && (iBufSize >= sizeof(u32))) {
962 nret = _nbu2ss_out_dma(udc, req, num, iBufSize); 958 nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
963 } else { 959 } else {
964 iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket); 960 iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
@@ -999,9 +995,8 @@ static int _nbu2ss_epn_out_transfer(
999 } 995 }
1000 } 996 }
1001 } else { 997 } else {
1002 if ((req->req.actual == req->req.length) 998 if ((req->req.actual == req->req.length) ||
1003 || ((req->req.actual % ep->ep.maxpacket) != 0)) { 999 ((req->req.actual % ep->ep.maxpacket) != 0)) {
1004
1005 result = 0; 1000 result = 0;
1006 } 1001 }
1007 } 1002 }
@@ -1170,9 +1165,8 @@ static int _nbu2ss_epn_in_data(
1170 1165
1171 num = ep->epnum - 1; 1166 num = ep->epnum - 1;
1172 1167
1173 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) 1168 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
1174 && (req->req.dma != 0) 1169 (data_size >= sizeof(u32))) {
1175 && (data_size >= sizeof(u32))) {
1176 nret = _nbu2ss_in_dma(udc, ep, req, num, data_size); 1170 nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
1177 } else { 1171 } else {
1178 data_size = min_t(u32, data_size, ep->ep.maxpacket); 1172 data_size = min_t(u32, data_size, ep->ep.maxpacket);
@@ -1557,7 +1551,6 @@ static void _nbu2ss_epn_set_stall(
1557 for (limit_cnt = 0 1551 for (limit_cnt = 0
1558 ; limit_cnt < IN_DATA_EMPTY_COUNT 1552 ; limit_cnt < IN_DATA_EMPTY_COUNT
1559 ; limit_cnt++) { 1553 ; limit_cnt++) {
1560
1561 regdata = _nbu2ss_readl( 1554 regdata = _nbu2ss_readl(
1562 &preg->EP_REGS[ep->epnum - 1].EP_STATUS); 1555 &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
1563 1556
@@ -1582,11 +1575,8 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
1582 u8 ep_adrs; 1575 u8 ep_adrs;
1583 int result = -EINVAL; 1576 int result = -EINVAL;
1584 1577
1585 if ((udc->ctrl.wValue != 0x0000) 1578 if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
1586 || (direction != USB_DIR_IN)) {
1587
1588 return result; 1579 return result;
1589 }
1590 1580
1591 length = min_t(u16, udc->ctrl.wLength, sizeof(status_data)); 1581 length = min_t(u16, udc->ctrl.wLength, sizeof(status_data));
1592 1582
@@ -1852,7 +1842,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
1852 1842
1853 status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS); 1843 status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
1854 intr = status & EP0_STATUS_RW_BIT; 1844 intr = status & EP0_STATUS_RW_BIT;
1855 _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr); 1845 _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
1856 1846
1857 status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT 1847 status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
1858 | STG_END_INT | EP0_OUT_NULL_INT); 1848 | STG_END_INT | EP0_OUT_NULL_INT);
@@ -1897,9 +1887,8 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
1897 break; 1887 break;
1898 1888
1899 case EP0_OUT_STATUS_PAHSE: 1889 case EP0_OUT_STATUS_PAHSE:
1900 if ((status & STG_END_INT) 1890 if ((status & STG_END_INT) || (status & SETUP_INT) ||
1901 || (status & SETUP_INT) 1891 (status & EP0_OUT_NULL_INT)) {
1902 || (status & EP0_OUT_NULL_INT)) {
1903 status &= ~(STG_END_INT 1892 status &= ~(STG_END_INT
1904 | EP0_OUT_INT 1893 | EP0_OUT_INT
1905 | EP0_OUT_NULL_INT); 1894 | EP0_OUT_NULL_INT);
@@ -1982,7 +1971,6 @@ static inline void _nbu2ss_epn_in_int(
1982 1971
1983 } else { 1972 } else {
1984 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) { 1973 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
1985
1986 status = 1974 status =
1987 _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS); 1975 _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
1988 1976
@@ -2127,7 +2115,7 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
2127 status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS); 2115 status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
2128 2116
2129 /* Interrupt Clear */ 2117 /* Interrupt Clear */
2130 _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status); 2118 _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
2131 2119
2132 req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue); 2120 req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
2133 if (!req) { 2121 if (!req) {
@@ -2330,7 +2318,6 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
2330 /* VBUS ON Check*/ 2318 /* VBUS ON Check*/
2331 reg_dt = gpio_get_value(VBUS_VALUE); 2319 reg_dt = gpio_get_value(VBUS_VALUE);
2332 if (reg_dt == 0) { 2320 if (reg_dt == 0) {
2333
2334 udc->linux_suspended = 0; 2321 udc->linux_suspended = 0;
2335 2322
2336 _nbu2ss_reset_controller(udc); 2323 _nbu2ss_reset_controller(udc);
@@ -2502,7 +2489,6 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
2502 int_bit = status >> 8; 2489 int_bit = status >> 8;
2503 2490
2504 for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) { 2491 for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
2505
2506 if (0x01 & int_bit) 2492 if (0x01 & int_bit)
2507 _nbu2ss_ep_int(udc, epnum); 2493 _nbu2ss_ep_int(udc, epnum);
2508 2494
@@ -2546,9 +2532,8 @@ static int nbu2ss_ep_enable(
2546 } 2532 }
2547 2533
2548 ep_type = usb_endpoint_type(desc); 2534 ep_type = usb_endpoint_type(desc);
2549 if ((ep_type == USB_ENDPOINT_XFER_CONTROL) 2535 if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
2550 || (ep_type == USB_ENDPOINT_XFER_ISOC)) { 2536 (ep_type == USB_ENDPOINT_XFER_ISOC)) {
2551
2552 pr_err(" *** %s, bat bmAttributes\n", __func__); 2537 pr_err(" *** %s, bat bmAttributes\n", __func__);
2553 return -EINVAL; 2538 return -EINVAL;
2554 } 2539 }
@@ -2557,9 +2542,7 @@ static int nbu2ss_ep_enable(
2557 if (udc->vbus_active == 0) 2542 if (udc->vbus_active == 0)
2558 return -ESHUTDOWN; 2543 return -ESHUTDOWN;
2559 2544
2560 if ((!udc->driver) 2545 if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
2561 || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
2562
2563 dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__); 2546 dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
2564 return -ESHUTDOWN; 2547 return -ESHUTDOWN;
2565 } 2548 }
@@ -2674,10 +2657,7 @@ static int nbu2ss_ep_queue(
2674 } 2657 }
2675 2658
2676 req = container_of(_req, struct nbu2ss_req, req); 2659 req = container_of(_req, struct nbu2ss_req, req);
2677 if (unlikely 2660 if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) {
2678 (!_req->complete || !_req->buf
2679 || !list_empty(&req->queue))) {
2680
2681 if (!_req->complete) 2661 if (!_req->complete)
2682 pr_err("udc: %s --- !_req->complete\n", __func__); 2662 pr_err("udc: %s --- !_req->complete\n", __func__);
2683 2663
@@ -2736,7 +2716,6 @@ static int nbu2ss_ep_queue(
2736 list_add_tail(&req->queue, &ep->queue); 2716 list_add_tail(&req->queue, &ep->queue);
2737 2717
2738 if (bflag && !ep->stalled) { 2718 if (bflag && !ep->stalled) {
2739
2740 result = _nbu2ss_start_transfer(udc, ep, req, FALSE); 2719 result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
2741 if (result < 0) { 2720 if (result < 0) {
2742 dev_err(udc->dev, " *** %s, result = %d\n", __func__, 2721 dev_err(udc->dev, " *** %s, result = %d\n", __func__,
@@ -2938,7 +2917,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
2938} 2917}
2939 2918
2940/*-------------------------------------------------------------------------*/ 2919/*-------------------------------------------------------------------------*/
2941static struct usb_ep_ops nbu2ss_ep_ops = { 2920static const struct usb_ep_ops nbu2ss_ep_ops = {
2942 .enable = nbu2ss_ep_enable, 2921 .enable = nbu2ss_ep_enable,
2943 .disable = nbu2ss_ep_disable, 2922 .disable = nbu2ss_ep_disable,
2944 2923
@@ -2979,9 +2958,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
2979 if (data == 0) 2958 if (data == 0)
2980 return -EINVAL; 2959 return -EINVAL;
2981 2960
2982 data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME; 2961 return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
2983
2984 return data;
2985} 2962}
2986 2963
2987/*-------------------------------------------------------------------------*/ 2964/*-------------------------------------------------------------------------*/
@@ -3307,8 +3284,8 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
3307 for (i = 0; i < NUM_ENDPOINTS; i++) { 3284 for (i = 0; i < NUM_ENDPOINTS; i++) {
3308 ep = &udc->ep[i]; 3285 ep = &udc->ep[i];
3309 if (ep->virt_buf) 3286 if (ep->virt_buf)
3310 dma_free_coherent(NULL, PAGE_SIZE, 3287 dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
3311 (void *)ep->virt_buf, ep->phys_buf); 3288 ep->phys_buf);
3312 } 3289 }
3313 3290
3314 /* Interrupt Handler - Release */ 3291 /* Interrupt Handler - Release */
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 7561385761e9..a6e3af74a904 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -264,6 +264,39 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
264 } 264 }
265} 265}
266 266
267static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
268 int y, signed short *convert_buf,
269 signed short pixel, signed short error)
270{
271 u16 i, j;
272
273 /* diffusion matrix row */
274 for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
275 /* diffusion matrix column */
276 for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
277 signed short *write_pos;
278 signed char coeff;
279
280 /* skip pixels out of zone */
281 if (x + i < 0 || x + i >= xres || y + j >= yres)
282 continue;
283 write_pos = &convert_buf[(y + j) * xres + x + i];
284 coeff = diffusing_matrix[i][j];
285 if (-1 == coeff)
286 /* pixel itself */
287 *write_pos = pixel;
288 else {
289 signed short p = *write_pos + error * coeff;
290
291 if (p > WHITE)
292 p = WHITE;
293 if (p < BLACK)
294 p = BLACK;
295 *write_pos = p;
296 }
297 }
298}
299
267static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) 300static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
268{ 301{
269 u16 *vmem16 = (u16 *)par->info->screen_buffer; 302 u16 *vmem16 = (u16 *)par->info->screen_buffer;
@@ -303,7 +336,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
303 signed short error_b = pixel - BLACK; 336 signed short error_b = pixel - BLACK;
304 signed short error_w = pixel - WHITE; 337 signed short error_w = pixel - WHITE;
305 signed short error; 338 signed short error;
306 u16 i, j;
307 339
308 /* what color close? */ 340 /* what color close? */
309 if (abs(error_b) >= abs(error_w)) { 341 if (abs(error_b) >= abs(error_w)) {
@@ -318,36 +350,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
318 350
319 error /= 8; 351 error /= 8;
320 352
321 /* diffusion matrix row */ 353 iterate_diffusion_matrix(par->info->var.xres,
322 for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i) 354 par->info->var.yres,
323 /* diffusion matrix column */ 355 x, y, convert_buf,
324 for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) { 356 pixel, error);
325 signed short *write_pos;
326 signed char coeff;
327
328 /* skip pixels out of zone */
329 if (x + i < 0 ||
330 x + i >= par->info->var.xres
331 || y + j >= par->info->var.yres)
332 continue;
333 write_pos = &convert_buf[
334 (y + j) * par->info->var.xres +
335 x + i];
336 coeff = diffusing_matrix[i][j];
337 if (coeff == -1)
338 /* pixel itself */
339 *write_pos = pixel;
340 else {
341 signed short p = *write_pos +
342 error * coeff;
343
344 if (p > WHITE)
345 p = WHITE;
346 if (p < BLACK)
347 p = BLACK;
348 *write_pos = p;
349 }
350 }
351 } 357 }
352 358
353 /* 1 string = 2 pages */ 359 /* 1 string = 2 pages */
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index c31e2e051d4a..19e33bab9cac 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -33,26 +33,23 @@
33 "04 16 2 7 6 3 2 1 7 7" 33 "04 16 2 7 6 3 2 1 7 7"
34 34
35static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */ 35static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
36module_param(bt, uint, 0); 36module_param(bt, uint, 0000);
37MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits"); 37MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
38 38
39static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */ 39static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
40module_param(vc, uint, 0); 40module_param(vc, uint, 0000);
41MODULE_PARM_DESC(vc, 41MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1");
42"Sets the ratio factor of Vci to generate the reference voltages Vci1");
43 42
44static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */ 43static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
45module_param(vrh, uint, 0); 44module_param(vrh, uint, 0000);
46MODULE_PARM_DESC(vrh, 45MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
47"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
48 46
49static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */ 47static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
50module_param(vdv, uint, 0); 48module_param(vdv, uint, 0000);
51MODULE_PARM_DESC(vdv, 49MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom");
52"Select the factor of VREG1OUT to set the amplitude of Vcom");
53 50
54static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */ 51static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
55module_param(vcm, uint, 0); 52module_param(vcm, uint, 0000);
56MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage"); 53MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
57 54
58/* 55/*
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 242adb3859bd..4e75f5abe2f9 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -27,7 +27,7 @@
27#define WIDTH 320 27#define WIDTH 320
28#define HEIGHT 480 28#define HEIGHT 480
29 29
30static int default_init_sequence[] = { 30static s16 default_init_sequence[] = {
31 /* SLP_OUT - Sleep out */ 31 /* SLP_OUT - Sleep out */
32 -1, MIPI_DCS_EXIT_SLEEP_MODE, 32 -1, MIPI_DCS_EXIT_SLEEP_MODE,
33 -2, 50, 33 -2, 50,
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index fa38d8885f0b..f4b314265f9e 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -26,7 +26,7 @@
26#define HEIGHT 480 26#define HEIGHT 480
27 27
28/* this init sequence matches PiScreen */ 28/* this init sequence matches PiScreen */
29static int default_init_sequence[] = { 29static s16 default_init_sequence[] = {
30 /* Interface Mode Control */ 30 /* Interface Mode Control */
31 -1, 0xb0, 0x0, 31 -1, 0xb0, 0x0,
32 -1, MIPI_DCS_EXIT_SLEEP_MODE, 32 -1, MIPI_DCS_EXIT_SLEEP_MODE,
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 774b0ff69e6d..eb712aa0d692 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -24,7 +24,7 @@
24 24
25#define DRVNAME "fb_s6d02a1" 25#define DRVNAME "fb_s6d02a1"
26 26
27static int default_init_sequence[] = { 27static s16 default_init_sequence[] = {
28 28
29 -1, 0xf0, 0x5a, 0x5a, 29 -1, 0xf0, 0x5a, 0x5a,
30 30
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 6670f2bb62ec..710b74bbba97 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -25,7 +25,7 @@
25#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \ 25#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
26 "0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10" 26 "0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
27 27
28static int default_init_sequence[] = { 28static s16 default_init_sequence[] = {
29 -1, MIPI_DCS_SOFT_RESET, 29 -1, MIPI_DCS_SOFT_RESET,
30 -2, 150, /* delay */ 30 -2, 150, /* delay */
31 31
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 587f68aa466c..bbe89c9c4fb9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -253,7 +253,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
253 "%s: polarity=%d, power=%d, fb_blank=%d\n", 253 "%s: polarity=%d, power=%d, fb_blank=%d\n",
254 __func__, polarity, bd->props.power, bd->props.fb_blank); 254 __func__, polarity, bd->props.power, bd->props.fb_blank);
255 255
256 if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK)) 256 if ((bd->props.power == FB_BLANK_UNBLANK) &&
257 (bd->props.fb_blank == FB_BLANK_UNBLANK))
257 gpio_set_value(par->gpio.led[0], polarity); 258 gpio_set_value(par->gpio.led[0], polarity);
258 else 259 else
259 gpio_set_value(par->gpio.led[0], !polarity); 260 gpio_set_value(par->gpio.led[0], !polarity);
@@ -299,7 +300,8 @@ void fbtft_register_backlight(struct fbtft_par *par)
299 bl_props.state |= BL_CORE_DRIVER1; 300 bl_props.state |= BL_CORE_DRIVER1;
300 301
301 bd = backlight_device_register(dev_driver_string(par->info->device), 302 bd = backlight_device_register(dev_driver_string(par->info->device),
302 par->info->device, par, &fbtft_bl_ops, &bl_props); 303 par->info->device, par,
304 &fbtft_bl_ops, &bl_props);
303 if (IS_ERR(bd)) { 305 if (IS_ERR(bd)) {
304 dev_err(par->info->device, 306 dev_err(par->info->device,
305 "cannot register backlight device (%ld)\n", 307 "cannot register backlight device (%ld)\n",
@@ -350,9 +352,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
350 bool timeit = false; 352 bool timeit = false;
351 int ret = 0; 353 int ret = 0;
352 354
353 if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) { 355 if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
356 DEBUG_TIME_EACH_UPDATE))) {
354 if ((par->debug & DEBUG_TIME_EACH_UPDATE) || 357 if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
355 ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) { 358 ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
359 !par->first_update_done)) {
356 ts_start = ktime_get(); 360 ts_start = ktime_get();
357 timeit = true; 361 timeit = true;
358 } 362 }
@@ -361,15 +365,17 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
361 /* Sanity checks */ 365 /* Sanity checks */
362 if (start_line > end_line) { 366 if (start_line > end_line) {
363 dev_warn(par->info->device, 367 dev_warn(par->info->device,
364 "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n", 368 "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
365 __func__, start_line, end_line); 369 __func__, start_line, end_line);
366 start_line = 0; 370 start_line = 0;
367 end_line = par->info->var.yres - 1; 371 end_line = par->info->var.yres - 1;
368 } 372 }
369 if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) { 373 if (start_line > par->info->var.yres - 1 ||
374 end_line > par->info->var.yres - 1) {
370 dev_warn(par->info->device, 375 dev_warn(par->info->device,
371 "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n", 376 "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
372 __func__, start_line, end_line, par->info->var.yres - 1); 377 __func__, start_line,
378 end_line, par->info->var.yres - 1);
373 start_line = 0; 379 start_line = 0;
374 end_line = par->info->var.yres - 1; 380 end_line = par->info->var.yres - 1;
375 } 381 }
@@ -660,12 +666,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
660 unsigned int bpp = display->bpp; 666 unsigned int bpp = display->bpp;
661 unsigned int fps = display->fps; 667 unsigned int fps = display->fps;
662 int vmem_size, i; 668 int vmem_size, i;
663 int *init_sequence = display->init_sequence; 669 s16 *init_sequence = display->init_sequence;
664 char *gamma = display->gamma; 670 char *gamma = display->gamma;
665 unsigned long *gamma_curves = NULL; 671 unsigned long *gamma_curves = NULL;
666 672
667 /* sanity check */ 673 /* sanity check */
668 if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) { 674 if (display->gamma_num * display->gamma_len >
675 FBTFT_GAMMA_MAX_VALUES_TOTAL) {
669 dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n", 676 dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
670 FBTFT_GAMMA_MAX_VALUES_TOTAL); 677 FBTFT_GAMMA_MAX_VALUES_TOTAL);
671 return NULL; 678 return NULL;
@@ -832,11 +839,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
832#ifdef CONFIG_HAS_DMA 839#ifdef CONFIG_HAS_DMA
833 if (dma) { 840 if (dma) {
834 dev->coherent_dma_mask = ~0; 841 dev->coherent_dma_mask = ~0;
835 txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA); 842 txbuf = dmam_alloc_coherent(dev, txbuflen,
843 &par->txbuf.dma, GFP_DMA);
836 } else 844 } else
837#endif 845#endif
838 { 846 {
839 txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL); 847 txbuf = devm_kzalloc(par->info->device,
848 txbuflen, GFP_KERNEL);
840 } 849 }
841 if (!txbuf) 850 if (!txbuf)
842 goto alloc_fail; 851 goto alloc_fail;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 89c4b5b76ce6..aacdde92cc2e 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -124,7 +124,7 @@ struct fbtft_display {
124 unsigned int bpp; 124 unsigned int bpp;
125 unsigned int fps; 125 unsigned int fps;
126 int txbuflen; 126 int txbuflen;
127 int *init_sequence; 127 s16 *init_sequence;
128 char *gamma; 128 char *gamma;
129 int gamma_num; 129 int gamma_num;
130 int gamma_len; 130 int gamma_len;
@@ -229,7 +229,7 @@ struct fbtft_par {
229 int led[16]; 229 int led[16];
230 int aux[16]; 230 int aux[16];
231 } gpio; 231 } gpio;
232 int *init_sequence; 232 s16 *init_sequence;
233 struct { 233 struct {
234 struct mutex lock; 234 struct mutex lock;
235 unsigned long *curves; 235 unsigned long *curves;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index e9211831b6a1..de46f8d988d2 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -96,9 +96,9 @@ static unsigned int buswidth = 8;
96module_param(buswidth, uint, 0); 96module_param(buswidth, uint, 0);
97MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument"); 97MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
98 98
99static int init[FBTFT_MAX_INIT_SEQUENCE]; 99static s16 init[FBTFT_MAX_INIT_SEQUENCE];
100static int init_num; 100static int init_num;
101module_param_array(init, int, &init_num, 0); 101module_param_array(init, short, &init_num, 0);
102MODULE_PARM_DESC(init, "Init sequence, used with the custom argument"); 102MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
103 103
104static unsigned long debug; 104static unsigned long debug;
@@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
131 "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \ 131 "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
132 "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20" 132 "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
133 133
134static int cberry28_init_sequence[] = { 134static s16 cberry28_init_sequence[] = {
135 /* turn off sleep mode */ 135 /* turn off sleep mode */
136 -1, MIPI_DCS_EXIT_SLEEP_MODE, 136 -1, MIPI_DCS_EXIT_SLEEP_MODE,
137 -2, 120, 137 -2, 120,
@@ -180,7 +180,7 @@ static int cberry28_init_sequence[] = {
180 -3, 180 -3,
181}; 181};
182 182
183static int hy28b_init_sequence[] = { 183static s16 hy28b_init_sequence[] = {
184 -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001, 184 -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
185 -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, 185 -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
186 -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, 186 -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
@@ -211,7 +211,7 @@ static int hy28b_init_sequence[] = {
211 "04 1F 4 7 7 0 7 7 6 0\n" \ 211 "04 1F 4 7 7 0 7 7 6 0\n" \
212 "0F 00 1 7 4 0 0 0 6 7" 212 "0F 00 1 7 4 0 0 0 6 7"
213 213
214static int pitft_init_sequence[] = { 214static s16 pitft_init_sequence[] = {
215 -1, MIPI_DCS_SOFT_RESET, 215 -1, MIPI_DCS_SOFT_RESET,
216 -2, 5, 216 -2, 5,
217 -1, MIPI_DCS_SET_DISPLAY_OFF, 217 -1, MIPI_DCS_SET_DISPLAY_OFF,
@@ -242,7 +242,7 @@ static int pitft_init_sequence[] = {
242 -3 242 -3
243}; 243};
244 244
245static int waveshare32b_init_sequence[] = { 245static s16 waveshare32b_init_sequence[] = {
246 -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, 246 -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
247 -1, 0xCF, 0x00, 0xC1, 0x30, 247 -1, 0xCF, 0x00, 0xC1, 0x30,
248 -1, 0xE8, 0x85, 0x00, 0x78, 248 -1, 0xE8, 0x85, 0x00, 0x78,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index ce0d254148e4..ded10718712b 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -38,9 +38,9 @@ static unsigned int height;
38module_param(height, uint, 0); 38module_param(height, uint, 0);
39MODULE_PARM_DESC(height, "Display height"); 39MODULE_PARM_DESC(height, "Display height");
40 40
41static int init[512]; 41static s16 init[512];
42static int init_num; 42static int init_num;
43module_param_array(init, int, &init_num, 0); 43module_param_array(init, short, &init_num, 0);
44MODULE_PARM_DESC(init, "Init sequence"); 44MODULE_PARM_DESC(init, "Init sequence");
45 45
46static unsigned int setaddrwin; 46static unsigned int setaddrwin;
@@ -63,68 +63,316 @@ static bool latched;
63module_param(latched, bool, 0); 63module_param(latched, bool, 0);
64MODULE_PARM_DESC(latched, "Use with latched 16-bit databus"); 64MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
65 65
66static int *initp; 66static s16 *initp;
67static int initp_num; 67static int initp_num;
68 68
69/* default init sequences */ 69/* default init sequences */
70static int st7735r_init[] = { 70static s16 st7735r_init[] = {
71-1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D, 71 -1, 0x01,
72-1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E, 72 -2, 150,
73-1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10, 73 -1, 0x11,
74-1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 }; 74 -2, 500,
75 75 -1, 0xB1, 0x01, 0x2C, 0x2D,
76static int ssd1289_init[] = { 76 -1, 0xB2, 0x01, 0x2C, 0x2D,
77-1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600, 77 -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
78-1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000, 78 -1, 0xB4, 0x07,
79-1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00, 79 -1, 0xC0, 0xA2, 0x02, 0x84,
80-1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204, 80 -1, 0xC1, 0xC5,
81-1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000, 81 -1, 0xC2, 0x0A, 0x00,
82-1, 0x4e, 0x0000, -1, 0x22, -3 }; 82 -1, 0xC3, 0x8A, 0x2A,
83 83 -1, 0xC4, 0x8A, 0xEE,
84static int hx8340bn_init[] = { 84 -1, 0xC5, 0x0E,
85-1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11, 85 -1, 0x20,
86-1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A, 86 -1, 0x36, 0xC0,
87-1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10, 87 -1, 0x3A, 0x05,
88-1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 }; 88 -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
89 89 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
90static int ili9225_init[] = { 90 -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
91-1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000, 91 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
92-1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F, 92 -1, 0x29,
93-1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF, 93 -2, 100,
94-1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303, 94 -1, 0x13,
95-1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50, 95 -2, 10,
96-1, 0x0007, 0x1017, -2, 50, -3 }; 96 -3
97 97};
98static int ili9320_init[] = { 98
99-1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202, 99static s16 ssd1289_init[] = {
100-1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007, 100 -1, 0x00, 0x0001,
101-1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800, 101 -1, 0x03, 0xA8A4,
102-1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004, 102 -1, 0x0C, 0x0000,
103-1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807, 103 -1, 0x0D, 0x080C,
104-1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000, 104 -1, 0x0E, 0x2B00,
105-1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010, 105 -1, 0x1E, 0x00B7,
106-1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 }; 106 -1, 0x01, 0x2B3F,
107 107 -1, 0x02, 0x0600,
108static int ili9325_init[] = { 108 -1, 0x10, 0x0000,
109-1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, 109 -1, 0x11, 0x6070,
110-1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, 110 -1, 0x05, 0x0000,
111-1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50, 111 -1, 0x06, 0x0000,
112-1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, 112 -1, 0x16, 0xEF1C,
113-1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707, 113 -1, 0x17, 0x0003,
114-1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700, 114 -1, 0x07, 0x0233,
115-1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, 115 -1, 0x0B, 0x0000,
116-1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 }; 116 -1, 0x0F, 0x0000,
117 117 -1, 0x41, 0x0000,
118static int ili9341_init[] = { 118 -1, 0x42, 0x0000,
119-1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79, 119 -1, 0x48, 0x0000,
120-1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11, 120 -1, 0x49, 0x013F,
121-1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07, 121 -1, 0x4A, 0x0000,
122-1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 }; 122 -1, 0x4B, 0x0000,
123 123 -1, 0x44, 0xEF00,
124static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74, 124 -1, 0x45, 0x0000,
125 -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00, 125 -1, 0x46, 0x013F,
126 -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05, 126 -1, 0x30, 0x0707,
127 -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 }; 127 -1, 0x31, 0x0204,
128 -1, 0x32, 0x0204,
129 -1, 0x33, 0x0502,
130 -1, 0x34, 0x0507,
131 -1, 0x35, 0x0204,
132 -1, 0x36, 0x0204,
133 -1, 0x37, 0x0502,
134 -1, 0x3A, 0x0302,
135 -1, 0x3B, 0x0302,
136 -1, 0x23, 0x0000,
137 -1, 0x24, 0x0000,
138 -1, 0x25, 0x8000,
139 -1, 0x4f, 0x0000,
140 -1, 0x4e, 0x0000,
141 -1, 0x22,
142 -3
143};
144
145static s16 hx8340bn_init[] = {
146 -1, 0xC1, 0xFF, 0x83, 0x40,
147 -1, 0x11,
148 -2, 150,
149 -1, 0xCA, 0x70, 0x00, 0xD9,
150 -1, 0xB0, 0x01, 0x11,
151 -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
152 -2, 20,
153 -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
154 -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
155 -2, 10,
156 -1, 0xB5, 0x35, 0x20, 0x45,
157 -1, 0xB4, 0x33, 0x25, 0x4C,
158 -2, 10,
159 -1, 0x3A, 0x05,
160 -1, 0x29,
161 -2, 10,
162 -3
163};
164
165static s16 ili9225_init[] = {
166 -1, 0x0001, 0x011C,
167 -1, 0x0002, 0x0100,
168 -1, 0x0003, 0x1030,
169 -1, 0x0008, 0x0808,
170 -1, 0x000C, 0x0000,
171 -1, 0x000F, 0x0A01,
172 -1, 0x0020, 0x0000,
173 -1, 0x0021, 0x0000,
174 -2, 50,
175 -1, 0x0010, 0x0A00,
176 -1, 0x0011, 0x1038,
177 -2, 50,
178 -1, 0x0012, 0x1121,
179 -1, 0x0013, 0x004E,
180 -1, 0x0014, 0x676F,
181 -1, 0x0030, 0x0000,
182 -1, 0x0031, 0x00DB,
183 -1, 0x0032, 0x0000,
184 -1, 0x0033, 0x0000,
185 -1, 0x0034, 0x00DB,
186 -1, 0x0035, 0x0000,
187 -1, 0x0036, 0x00AF,
188 -1, 0x0037, 0x0000,
189 -1, 0x0038, 0x00DB,
190 -1, 0x0039, 0x0000,
191 -1, 0x0050, 0x0000,
192 -1, 0x0051, 0x060A,
193 -1, 0x0052, 0x0D0A,
194 -1, 0x0053, 0x0303,
195 -1, 0x0054, 0x0A0D,
196 -1, 0x0055, 0x0A06,
197 -1, 0x0056, 0x0000,
198 -1, 0x0057, 0x0303,
199 -1, 0x0058, 0x0000,
200 -1, 0x0059, 0x0000,
201 -2, 50,
202 -1, 0x0007, 0x1017,
203 -2, 50,
204 -3
205};
206
207static s16 ili9320_init[] = {
208 -1, 0x00E5, 0x8000,
209 -1, 0x0000, 0x0001,
210 -1, 0x0001, 0x0100,
211 -1, 0x0002, 0x0700,
212 -1, 0x0003, 0x1030,
213 -1, 0x0004, 0x0000,
214 -1, 0x0008, 0x0202,
215 -1, 0x0009, 0x0000,
216 -1, 0x000A, 0x0000,
217 -1, 0x000C, 0x0000,
218 -1, 0x000D, 0x0000,
219 -1, 0x000F, 0x0000,
220 -1, 0x0010, 0x0000,
221 -1, 0x0011, 0x0007,
222 -1, 0x0012, 0x0000,
223 -1, 0x0013, 0x0000,
224 -2, 200,
225 -1, 0x0010, 0x17B0,
226 -1, 0x0011, 0x0031,
227 -2, 50,
228 -1, 0x0012, 0x0138,
229 -2, 50,
230 -1, 0x0013, 0x1800,
231 -1, 0x0029, 0x0008,
232 -2, 50,
233 -1, 0x0020, 0x0000,
234 -1, 0x0021, 0x0000,
235 -1, 0x0030, 0x0000,
236 -1, 0x0031, 0x0505,
237 -1, 0x0032, 0x0004,
238 -1, 0x0035, 0x0006,
239 -1, 0x0036, 0x0707,
240 -1, 0x0037, 0x0105,
241 -1, 0x0038, 0x0002,
242 -1, 0x0039, 0x0707,
243 -1, 0x003C, 0x0704,
244 -1, 0x003D, 0x0807,
245 -1, 0x0050, 0x0000,
246 -1, 0x0051, 0x00EF,
247 -1, 0x0052, 0x0000,
248 -1, 0x0053, 0x013F,
249 -1, 0x0060, 0x2700,
250 -1, 0x0061, 0x0001,
251 -1, 0x006A, 0x0000,
252 -1, 0x0080, 0x0000,
253 -1, 0x0081, 0x0000,
254 -1, 0x0082, 0x0000,
255 -1, 0x0083, 0x0000,
256 -1, 0x0084, 0x0000,
257 -1, 0x0085, 0x0000,
258 -1, 0x0090, 0x0010,
259 -1, 0x0092, 0x0000,
260 -1, 0x0093, 0x0003,
261 -1, 0x0095, 0x0110,
262 -1, 0x0097, 0x0000,
263 -1, 0x0098, 0x0000,
264 -1, 0x0007, 0x0173,
265 -3
266};
267
268static s16 ili9325_init[] = {
269 -1, 0x00E3, 0x3008,
270 -1, 0x00E7, 0x0012,
271 -1, 0x00EF, 0x1231,
272 -1, 0x0001, 0x0100,
273 -1, 0x0002, 0x0700,
274 -1, 0x0003, 0x1030,
275 -1, 0x0004, 0x0000,
276 -1, 0x0008, 0x0207,
277 -1, 0x0009, 0x0000,
278 -1, 0x000A, 0x0000,
279 -1, 0x000C, 0x0000,
280 -1, 0x000D, 0x0000,
281 -1, 0x000F, 0x0000,
282 -1, 0x0010, 0x0000,
283 -1, 0x0011, 0x0007,
284 -1, 0x0012, 0x0000,
285 -1, 0x0013, 0x0000,
286 -2, 200,
287 -1, 0x0010, 0x1690,
288 -1, 0x0011, 0x0223,
289 -2, 50,
290 -1, 0x0012, 0x000D,
291 -2, 50,
292 -1, 0x0013, 0x1200,
293 -1, 0x0029, 0x000A,
294 -1, 0x002B, 0x000C,
295 -2, 50,
296 -1, 0x0020, 0x0000,
297 -1, 0x0021, 0x0000,
298 -1, 0x0030, 0x0000,
299 -1, 0x0031, 0x0506,
300 -1, 0x0032, 0x0104,
301 -1, 0x0035, 0x0207,
302 -1, 0x0036, 0x000F,
303 -1, 0x0037, 0x0306,
304 -1, 0x0038, 0x0102,
305 -1, 0x0039, 0x0707,
306 -1, 0x003C, 0x0702,
307 -1, 0x003D, 0x1604,
308 -1, 0x0050, 0x0000,
309 -1, 0x0051, 0x00EF,
310 -1, 0x0052, 0x0000,
311 -1, 0x0053, 0x013F,
312 -1, 0x0060, 0xA700,
313 -1, 0x0061, 0x0001,
314 -1, 0x006A, 0x0000,
315 -1, 0x0080, 0x0000,
316 -1, 0x0081, 0x0000,
317 -1, 0x0082, 0x0000,
318 -1, 0x0083, 0x0000,
319 -1, 0x0084, 0x0000,
320 -1, 0x0085, 0x0000,
321 -1, 0x0090, 0x0010,
322 -1, 0x0092, 0x0600,
323 -1, 0x0007, 0x0133,
324 -3
325};
326
327static s16 ili9341_init[] = {
328 -1, 0x28,
329 -2, 20,
330 -1, 0xCF, 0x00, 0x83, 0x30,
331 -1, 0xED, 0x64, 0x03, 0x12, 0x81,
332 -1, 0xE8, 0x85, 0x01, 0x79,
333 -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
334 -1, 0xF7, 0x20,
335 -1, 0xEA, 0x00, 0x00,
336 -1, 0xC0, 0x26,
337 -1, 0xC1, 0x11,
338 -1, 0xC5, 0x35, 0x3E,
339 -1, 0xC7, 0xBE,
340 -1, 0xB1, 0x00, 0x1B,
341 -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
342 -1, 0xB7, 0x07,
343 -1, 0x3A, 0x55,
344 -1, 0x36, 0x48,
345 -1, 0x11,
346 -2, 120,
347 -1, 0x29,
348 -2, 20,
349 -3
350};
351
352static s16 ssd1351_init[] = {
353 -1, 0xfd, 0x12,
354 -1, 0xfd, 0xb1,
355 -1, 0xae,
356 -1, 0xb3, 0xf1,
357 -1, 0xca, 0x7f,
358 -1, 0xa0, 0x74,
359 -1, 0x15, 0x00, 0x7f,
360 -1, 0x75, 0x00, 0x7f,
361 -1, 0xa1, 0x00,
362 -1, 0xa2, 0x00,
363 -1, 0xb5, 0x00,
364 -1, 0xab, 0x01,
365 -1, 0xb1, 0x32,
366 -1, 0xb4, 0xa0, 0xb5, 0x55,
367 -1, 0xbb, 0x17,
368 -1, 0xbe, 0x05,
369 -1, 0xc1, 0xc8, 0x80, 0xc8,
370 -1, 0xc7, 0x0f,
371 -1, 0xb6, 0x01,
372 -1, 0xa6,
373 -1, 0xaf,
374 -3
375};
128 376
129/** 377/**
130 * struct flexfb_lcd_controller - Describes the LCD controller properties 378 * struct flexfb_lcd_controller - Describes the LCD controller properties
@@ -142,7 +390,7 @@ struct flexfb_lcd_controller {
142 unsigned int height; 390 unsigned int height;
143 unsigned int setaddrwin; 391 unsigned int setaddrwin;
144 unsigned int regwidth; 392 unsigned int regwidth;
145 int *init_seq; 393 s16 *init_seq;
146 int init_seq_sz; 394 int init_seq_sz;
147}; 395};
148 396
@@ -582,6 +830,7 @@ static const struct platform_device_id flexfb_platform_ids[] = {
582 { "flexpfb", 0 }, 830 { "flexpfb", 0 },
583 { }, 831 { },
584}; 832};
833MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
585 834
586static struct platform_driver flexfb_platform_driver = { 835static struct platform_driver flexfb_platform_driver = {
587 .driver = { 836 .driver = {
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 1f959339c671..5c009ab48f00 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -1,25 +1,17 @@
1# 1#
2# Freescale Management Complex (MC) bus drivers 2# DPAA2 fsl-mc bus
3# 3#
4# Copyright (C) 2014 Freescale Semiconductor, Inc. 4# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5# 5#
6# This file is released under the GPLv2 6# This file is released under the GPLv2
7# 7#
8 8
9config FSL_MC_BUS 9config FSL_MC_BUS
10 bool "Freescale Management Complex (MC) bus driver" 10 bool "QorIQ DPAA2 fsl-mc bus driver"
11 depends on OF && ARM64 11 depends on OF && ARCH_LAYERSCAPE
12 select GENERIC_MSI_IRQ_DOMAIN 12 select GENERIC_MSI_IRQ_DOMAIN
13 help 13 help
14 Driver to enable the bus infrastructure for the Freescale 14 Driver to enable the bus infrastructure for the QorIQ DPAA2
15 QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware 15 architecture. The fsl-mc bus driver handles discovery of
16 module of the QorIQ LS2 SoCs, that does resource management 16 DPAA2 objects (which are represented as Linux devices) and
17 for hardware building-blocks in the SoC that can be used 17 binding objects to drivers.
18 to dynamically create networking hardware objects such as
19 network interfaces (NICs), crypto accelerator instances,
20 or L2 switches.
21
22 Only enable this option when building the kernel for
23 Freescale QorQIQ LS2xxxx SoCs.
24
25
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
index 2860411ddb51..7d86539b5414 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -33,37 +33,48 @@
33#define _FSL_DPBP_CMD_H 33#define _FSL_DPBP_CMD_H
34 34
35/* DPBP Version */ 35/* DPBP Version */
36#define DPBP_VER_MAJOR 2 36#define DPBP_VER_MAJOR 3
37#define DPBP_VER_MINOR 2 37#define DPBP_VER_MINOR 2
38 38
39/* Command versioning */
40#define DPBP_CMD_BASE_VERSION 1
41#define DPBP_CMD_ID_OFFSET 4
42
43#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
44
39/* Command IDs */ 45/* Command IDs */
40#define DPBP_CMDID_CLOSE 0x800 46#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
41#define DPBP_CMDID_OPEN 0x804 47#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
42#define DPBP_CMDID_CREATE 0x904 48#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
43#define DPBP_CMDID_DESTROY 0x900 49#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
44 50#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
45#define DPBP_CMDID_ENABLE 0x002 51
46#define DPBP_CMDID_DISABLE 0x003 52#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
47#define DPBP_CMDID_GET_ATTR 0x004 53#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
48#define DPBP_CMDID_RESET 0x005 54#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
49#define DPBP_CMDID_IS_ENABLED 0x006 55#define DPBP_CMDID_RESET DPBP_CMD(0x005)
50 56#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
51#define DPBP_CMDID_SET_IRQ 0x010 57
52#define DPBP_CMDID_GET_IRQ 0x011 58#define DPBP_CMDID_SET_IRQ DPBP_CMD(0x010)
53#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 59#define DPBP_CMDID_GET_IRQ DPBP_CMD(0x011)
54#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 60#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
55#define DPBP_CMDID_SET_IRQ_MASK 0x014 61#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
56#define DPBP_CMDID_GET_IRQ_MASK 0x015 62#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
57#define DPBP_CMDID_GET_IRQ_STATUS 0x016 63#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
58#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 64#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
59 65#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
60#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 66
61#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 67#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x01b0)
68#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x01b1)
62 69
63struct dpbp_cmd_open { 70struct dpbp_cmd_open {
64 __le32 dpbp_id; 71 __le32 dpbp_id;
65}; 72};
66 73
74struct dpbp_cmd_destroy {
75 __le32 object_id;
76};
77
67#define DPBP_ENABLE 0x1 78#define DPBP_ENABLE 0x1
68 79
69struct dpbp_rsp_is_enabled { 80struct dpbp_rsp_is_enabled {
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 5d4cd812a400..cf4782f6a049 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -32,7 +32,8 @@
32#include "../include/mc-sys.h" 32#include "../include/mc-sys.h"
33#include "../include/mc-cmd.h" 33#include "../include/mc-cmd.h"
34#include "../include/dpbp.h" 34#include "../include/dpbp.h"
35#include "../include/dpbp-cmd.h" 35
36#include "dpbp-cmd.h"
36 37
37/** 38/**
38 * dpbp_open() - Open a control session for the specified object. 39 * dpbp_open() - Open a control session for the specified object.
@@ -107,28 +108,26 @@ EXPORT_SYMBOL(dpbp_close);
107/** 108/**
108 * dpbp_create() - Create the DPBP object. 109 * dpbp_create() - Create the DPBP object.
109 * @mc_io: Pointer to MC portal's I/O object 110 * @mc_io: Pointer to MC portal's I/O object
111 * @dprc_token: Parent container token; '0' for default container
110 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 112 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
111 * @cfg: Configuration structure 113 * @cfg: Configuration structure
112 * @token: Returned token; use in subsequent API calls 114 * @obj_id: Returned object id; use in subsequent API calls
113 * 115 *
114 * Create the DPBP object, allocate required resources and 116 * Create the DPBP object, allocate required resources and
115 * perform required initialization. 117 * perform required initialization.
116 * 118 *
117 * The object can be created either by declaring it in the 119 * This function accepts an authentication token of a parent
118 * DPL file, or by calling this function. 120 * container that this object should be assigned to and returns
119 * This function returns a unique authentication token, 121 * an object id. This object_id will be used in all subsequent calls to
120 * associated with the specific object ID and the specific MC 122 * this specific object.
121 * portal; this token must be used in all subsequent calls to
122 * this specific object. For objects that are created using the
123 * DPL file, call dpbp_open function to get an authentication
124 * token first.
125 * 123 *
126 * Return: '0' on Success; Error code otherwise. 124 * Return: '0' on Success; Error code otherwise.
127 */ 125 */
128int dpbp_create(struct fsl_mc_io *mc_io, 126int dpbp_create(struct fsl_mc_io *mc_io,
127 u16 dprc_token,
129 u32 cmd_flags, 128 u32 cmd_flags,
130 const struct dpbp_cfg *cfg, 129 const struct dpbp_cfg *cfg,
131 u16 *token) 130 u32 *obj_id)
132{ 131{
133 struct mc_command cmd = { 0 }; 132 struct mc_command cmd = { 0 };
134 int err; 133 int err;
@@ -137,7 +136,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
137 136
138 /* prepare command */ 137 /* prepare command */
139 cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, 138 cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
140 cmd_flags, 0); 139 cmd_flags, dprc_token);
141 140
142 /* send command to mc*/ 141 /* send command to mc*/
143 err = mc_send_command(mc_io, &cmd); 142 err = mc_send_command(mc_io, &cmd);
@@ -145,7 +144,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
145 return err; 144 return err;
146 145
147 /* retrieve response parameters */ 146 /* retrieve response parameters */
148 *token = mc_cmd_hdr_read_token(&cmd); 147 *obj_id = mc_cmd_read_object_id(&cmd);
149 148
150 return 0; 149 return 0;
151} 150}
@@ -153,20 +152,25 @@ int dpbp_create(struct fsl_mc_io *mc_io,
153/** 152/**
154 * dpbp_destroy() - Destroy the DPBP object and release all its resources. 153 * dpbp_destroy() - Destroy the DPBP object and release all its resources.
155 * @mc_io: Pointer to MC portal's I/O object 154 * @mc_io: Pointer to MC portal's I/O object
155 * @dprc_token: Parent container token; '0' for default container
156 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 156 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
157 * @token: Token of DPBP object 157 * @obj_id: ID of DPBP object
158 * 158 *
159 * Return: '0' on Success; error code otherwise. 159 * Return: '0' on Success; error code otherwise.
160 */ 160 */
161int dpbp_destroy(struct fsl_mc_io *mc_io, 161int dpbp_destroy(struct fsl_mc_io *mc_io,
162 u16 dprc_token,
162 u32 cmd_flags, 163 u32 cmd_flags,
163 u16 token) 164 u32 obj_id)
164{ 165{
166 struct dpbp_cmd_destroy *cmd_params;
165 struct mc_command cmd = { 0 }; 167 struct mc_command cmd = { 0 };
166 168
167 /* prepare command */ 169 /* prepare command */
168 cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, 170 cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
169 cmd_flags, token); 171 cmd_flags, dprc_token);
172 cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
173 cmd_params->object_id = cpu_to_le32(obj_id);
170 174
171 /* send command to mc*/ 175 /* send command to mc*/
172 return mc_send_command(mc_io, &cmd); 176 return mc_send_command(mc_io, &cmd);
@@ -609,8 +613,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
609 rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params; 613 rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
610 attr->bpid = le16_to_cpu(rsp_params->bpid); 614 attr->bpid = le16_to_cpu(rsp_params->bpid);
611 attr->id = le32_to_cpu(rsp_params->id); 615 attr->id = le32_to_cpu(rsp_params->id);
612 attr->version.major = le16_to_cpu(rsp_params->version_major);
613 attr->version.minor = le16_to_cpu(rsp_params->version_minor);
614 616
615 return 0; 617 return 0;
616} 618}
@@ -689,3 +691,35 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
689 691
690 return 0; 692 return 0;
691} 693}
694
695/**
696 * dpbp_get_api_version - Get Data Path Buffer Pool API version
697 * @mc_io: Pointer to Mc portal's I/O object
698 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
699 * @major_ver: Major version of Buffer Pool API
700 * @minor_ver: Minor version of Buffer Pool API
701 *
702 * Return: '0' on Success; Error code otherwise.
703 */
704int dpbp_get_api_version(struct fsl_mc_io *mc_io,
705 u32 cmd_flags,
706 u16 *major_ver,
707 u16 *minor_ver)
708{
709 struct mc_command cmd = { 0 };
710 int err;
711
712 /* prepare command */
713 cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
714 cmd_flags, 0);
715
716 /* send command to mc */
717 err = mc_send_command(mc_io, &cmd);
718 if (err)
719 return err;
720
721 /* retrieve response parameters */
722 mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
723
724 return 0;
725}
diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
index 536b2ef13507..d0a5e194c5e1 100644
--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index d098a6d8f6bc..7cb514963c26 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -33,25 +33,32 @@
33#define _FSL_DPMCP_CMD_H 33#define _FSL_DPMCP_CMD_H
34 34
35/* Minimal supported DPMCP Version */ 35/* Minimal supported DPMCP Version */
36#define DPMCP_MIN_VER_MAJOR 3 36#define DPMCP_MIN_VER_MAJOR 3
37#define DPMCP_MIN_VER_MINOR 0 37#define DPMCP_MIN_VER_MINOR 0
38
39/* Command versioning */
40#define DPMCP_CMD_BASE_VERSION 1
41#define DPMCP_CMD_ID_OFFSET 4
42
43#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
38 44
39/* Command IDs */ 45/* Command IDs */
40#define DPMCP_CMDID_CLOSE 0x800 46#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
41#define DPMCP_CMDID_OPEN 0x80b 47#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
42#define DPMCP_CMDID_CREATE 0x90b 48#define DPMCP_CMDID_CREATE DPMCP_CMD(0x90b)
43#define DPMCP_CMDID_DESTROY 0x900 49#define DPMCP_CMDID_DESTROY DPMCP_CMD(0x98b)
44 50#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
45#define DPMCP_CMDID_GET_ATTR 0x004 51
46#define DPMCP_CMDID_RESET 0x005 52#define DPMCP_CMDID_GET_ATTR DPMCP_CMD(0x004)
47 53#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
48#define DPMCP_CMDID_SET_IRQ 0x010 54
49#define DPMCP_CMDID_GET_IRQ 0x011 55#define DPMCP_CMDID_SET_IRQ DPMCP_CMD(0x010)
50#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 56#define DPMCP_CMDID_GET_IRQ DPMCP_CMD(0x011)
51#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 57#define DPMCP_CMDID_SET_IRQ_ENABLE DPMCP_CMD(0x012)
52#define DPMCP_CMDID_SET_IRQ_MASK 0x014 58#define DPMCP_CMDID_GET_IRQ_ENABLE DPMCP_CMD(0x013)
53#define DPMCP_CMDID_GET_IRQ_MASK 0x015 59#define DPMCP_CMDID_SET_IRQ_MASK DPMCP_CMD(0x014)
54#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 60#define DPMCP_CMDID_GET_IRQ_MASK DPMCP_CMD(0x015)
61#define DPMCP_CMDID_GET_IRQ_STATUS DPMCP_CMD(0x016)
55 62
56struct dpmcp_cmd_open { 63struct dpmcp_cmd_open {
57 __le32 dpmcp_id; 64 __le32 dpmcp_id;
@@ -61,6 +68,10 @@ struct dpmcp_cmd_create {
61 __le32 portal_id; 68 __le32 portal_id;
62}; 69};
63 70
71struct dpmcp_cmd_destroy {
72 __le32 object_id;
73};
74
64struct dpmcp_cmd_set_irq { 75struct dpmcp_cmd_set_irq {
65 /* cmd word 0 */ 76 /* cmd word 0 */
66 u8 irq_index; 77 u8 irq_index;
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index 55766f78a528..e4d16519bcb4 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -106,28 +106,29 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
106/** 106/**
107 * dpmcp_create() - Create the DPMCP object. 107 * dpmcp_create() - Create the DPMCP object.
108 * @mc_io: Pointer to MC portal's I/O object 108 * @mc_io: Pointer to MC portal's I/O object
109 * @dprc_token: Parent container token; '0' for default container
109 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 110 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
110 * @cfg: Configuration structure 111 * @cfg: Configuration structure
111 * @token: Returned token; use in subsequent API calls 112 * @obj_id: Returned object id; use in subsequent API calls
112 * 113 *
113 * Create the DPMCP object, allocate required resources and 114 * Create the DPMCP object, allocate required resources and
114 * perform required initialization. 115 * perform required initialization.
115 * 116 *
116 * The object can be created either by declaring it in the 117 * The object can be created either by declaring it in the
117 * DPL file, or by calling this function. 118 * DPL file, or by calling this function.
118 * This function returns a unique authentication token, 119
119 * associated with the specific object ID and the specific MC 120 * This function accepts an authentication token of a parent
120 * portal; this token must be used in all subsequent calls to 121 * container that this object should be assigned to and returns
121 * this specific object. For objects that are created using the 122 * an object id. This object_id will be used in all subsequent calls to
122 * DPL file, call dpmcp_open function to get an authentication 123 * this specific object.
123 * token first.
124 * 124 *
125 * Return: '0' on Success; Error code otherwise. 125 * Return: '0' on Success; Error code otherwise.
126 */ 126 */
127int dpmcp_create(struct fsl_mc_io *mc_io, 127int dpmcp_create(struct fsl_mc_io *mc_io,
128 u16 dprc_token,
128 u32 cmd_flags, 129 u32 cmd_flags,
129 const struct dpmcp_cfg *cfg, 130 const struct dpmcp_cfg *cfg,
130 u16 *token) 131 u32 *obj_id)
131{ 132{
132 struct mc_command cmd = { 0 }; 133 struct mc_command cmd = { 0 };
133 struct dpmcp_cmd_create *cmd_params; 134 struct dpmcp_cmd_create *cmd_params;
@@ -136,7 +137,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
136 137
137 /* prepare command */ 138 /* prepare command */
138 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, 139 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
139 cmd_flags, 0); 140 cmd_flags, dprc_token);
140 cmd_params = (struct dpmcp_cmd_create *)cmd.params; 141 cmd_params = (struct dpmcp_cmd_create *)cmd.params;
141 cmd_params->portal_id = cpu_to_le32(cfg->portal_id); 142 cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
142 143
@@ -146,7 +147,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
146 return err; 147 return err;
147 148
148 /* retrieve response parameters */ 149 /* retrieve response parameters */
149 *token = mc_cmd_hdr_read_token(&cmd); 150 *obj_id = mc_cmd_read_object_id(&cmd);
150 151
151 return 0; 152 return 0;
152} 153}
@@ -154,20 +155,25 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
154/** 155/**
155 * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. 156 * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
156 * @mc_io: Pointer to MC portal's I/O object 157 * @mc_io: Pointer to MC portal's I/O object
158 * @dprc_token: Parent container token; '0' for default container
157 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 159 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
158 * @token: Token of DPMCP object 160 * @obj_id: ID of DPMCP object
159 * 161 *
160 * Return: '0' on Success; error code otherwise. 162 * Return: '0' on Success; error code otherwise.
161 */ 163 */
162int dpmcp_destroy(struct fsl_mc_io *mc_io, 164int dpmcp_destroy(struct fsl_mc_io *mc_io,
165 u16 dprc_token,
163 u32 cmd_flags, 166 u32 cmd_flags,
164 u16 token) 167 u32 obj_id)
165{ 168{
166 struct mc_command cmd = { 0 }; 169 struct mc_command cmd = { 0 };
170 struct dpmcp_cmd_destroy *cmd_params;
167 171
168 /* prepare command */ 172 /* prepare command */
169 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, 173 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
170 cmd_flags, token); 174 cmd_flags, dprc_token);
175 cmd_params = (struct dpmcp_cmd_destroy *)cmd.params;
176 cmd_params->object_id = cpu_to_le32(obj_id);
171 177
172 /* send command to mc*/ 178 /* send command to mc*/
173 return mc_send_command(mc_io, &cmd); 179 return mc_send_command(mc_io, &cmd);
@@ -497,8 +503,38 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
497 /* retrieve response parameters */ 503 /* retrieve response parameters */
498 rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params; 504 rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
499 attr->id = le32_to_cpu(rsp_params->id); 505 attr->id = le32_to_cpu(rsp_params->id);
500 attr->version.major = le16_to_cpu(rsp_params->version_major); 506
501 attr->version.minor = le16_to_cpu(rsp_params->version_minor); 507 return 0;
508}
509
510/**
511 * dpmcp_get_api_version - Get Data Path Management Command Portal API version
512 * @mc_io: Pointer to Mc portal's I/O object
513 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
514 * @major_ver: Major version of Data Path Management Command Portal API
515 * @minor_ver: Minor version of Data Path Management Command Portal API
516 *
517 * Return: '0' on Success; Error code otherwise.
518 */
519int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
520 u32 cmd_flags,
521 u16 *major_ver,
522 u16 *minor_ver)
523{
524 struct mc_command cmd = { 0 };
525 int err;
526
527 /* prepare command */
528 cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
529 cmd_flags, 0);
530
531 /* send command to mc */
532 err = mc_send_command(mc_io, &cmd);
533 if (err)
534 return err;
535
536 /* retrieve response parameters */
537 mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
502 538
503 return 0; 539 return 0;
504} 540}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index fe79d4d9293d..98a100d543f6 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -32,23 +32,24 @@
32#ifndef __FSL_DPMCP_H 32#ifndef __FSL_DPMCP_H
33#define __FSL_DPMCP_H 33#define __FSL_DPMCP_H
34 34
35/* Data Path Management Command Portal API 35/*
36 * Data Path Management Command Portal API
36 * Contains initialization APIs and runtime control APIs for DPMCP 37 * Contains initialization APIs and runtime control APIs for DPMCP
37 */ 38 */
38 39
39struct fsl_mc_io; 40struct fsl_mc_io;
40 41
41int dpmcp_open(struct fsl_mc_io *mc_io, 42int dpmcp_open(struct fsl_mc_io *mc_io,
42 uint32_t cmd_flags, 43 u32 cmd_flags,
43 int dpmcp_id, 44 int dpmcp_id,
44 uint16_t *token); 45 u16 *token);
45 46
46/* Get portal ID from pool */ 47/* Get portal ID from pool */
47#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) 48#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
48 49
49int dpmcp_close(struct fsl_mc_io *mc_io, 50int dpmcp_close(struct fsl_mc_io *mc_io,
50 uint32_t cmd_flags, 51 u32 cmd_flags,
51 uint16_t token); 52 u16 token);
52 53
53/** 54/**
54 * struct dpmcp_cfg - Structure representing DPMCP configuration 55 * struct dpmcp_cfg - Structure representing DPMCP configuration
@@ -59,18 +60,20 @@ struct dpmcp_cfg {
59 int portal_id; 60 int portal_id;
60}; 61};
61 62
62int dpmcp_create(struct fsl_mc_io *mc_io, 63int dpmcp_create(struct fsl_mc_io *mc_io,
63 uint32_t cmd_flags, 64 u16 dprc_token,
64 const struct dpmcp_cfg *cfg, 65 u32 cmd_flags,
65 uint16_t *token); 66 const struct dpmcp_cfg *cfg,
67 u32 *obj_id);
66 68
67int dpmcp_destroy(struct fsl_mc_io *mc_io, 69int dpmcp_destroy(struct fsl_mc_io *mc_io,
68 uint32_t cmd_flags, 70 u16 dprc_token,
69 uint16_t token); 71 u32 cmd_flags,
72 u32 obj_id);
70 73
71int dpmcp_reset(struct fsl_mc_io *mc_io, 74int dpmcp_reset(struct fsl_mc_io *mc_io,
72 uint32_t cmd_flags, 75 u32 cmd_flags,
73 uint16_t token); 76 u16 token);
74 77
75/* IRQ */ 78/* IRQ */
76/* IRQ Index */ 79/* IRQ Index */
@@ -85,75 +88,65 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
85 * @irq_num: A user defined number associated with this IRQ 88 * @irq_num: A user defined number associated with this IRQ
86 */ 89 */
87struct dpmcp_irq_cfg { 90struct dpmcp_irq_cfg {
88 uint64_t paddr; 91 u64 paddr;
89 uint32_t val; 92 u32 val;
90 int irq_num; 93 int irq_num;
91}; 94};
92 95
93int dpmcp_set_irq(struct fsl_mc_io *mc_io, 96int dpmcp_set_irq(struct fsl_mc_io *mc_io,
94 uint32_t cmd_flags, 97 u32 cmd_flags,
95 uint16_t token, 98 u16 token,
96 uint8_t irq_index, 99 u8 irq_index,
97 struct dpmcp_irq_cfg *irq_cfg); 100 struct dpmcp_irq_cfg *irq_cfg);
98 101
99int dpmcp_get_irq(struct fsl_mc_io *mc_io, 102int dpmcp_get_irq(struct fsl_mc_io *mc_io,
100 uint32_t cmd_flags, 103 u32 cmd_flags,
101 uint16_t token, 104 u16 token,
102 uint8_t irq_index, 105 u8 irq_index,
103 int *type, 106 int *type,
104 struct dpmcp_irq_cfg *irq_cfg); 107 struct dpmcp_irq_cfg *irq_cfg);
105 108
106int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, 109int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
107 uint32_t cmd_flags, 110 u32 cmd_flags,
108 uint16_t token, 111 u16 token,
109 uint8_t irq_index, 112 u8 irq_index,
110 uint8_t en); 113 u8 en);
111 114
112int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, 115int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
113 uint32_t cmd_flags, 116 u32 cmd_flags,
114 uint16_t token, 117 u16 token,
115 uint8_t irq_index, 118 u8 irq_index,
116 uint8_t *en); 119 u8 *en);
117 120
118int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, 121int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
119 uint32_t cmd_flags, 122 u32 cmd_flags,
120 uint16_t token, 123 u16 token,
121 uint8_t irq_index, 124 u8 irq_index,
122 uint32_t mask); 125 u32 mask);
123 126
124int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, 127int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
125 uint32_t cmd_flags, 128 u32 cmd_flags,
126 uint16_t token, 129 u16 token,
127 uint8_t irq_index, 130 u8 irq_index,
128 uint32_t *mask); 131 u32 *mask);
129 132
130int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, 133int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
131 uint32_t cmd_flags, 134 u32 cmd_flags,
132 uint16_t token, 135 u16 token,
133 uint8_t irq_index, 136 u8 irq_index,
134 uint32_t *status); 137 u32 *status);
135 138
136/** 139/**
137 * struct dpmcp_attr - Structure representing DPMCP attributes 140 * struct dpmcp_attr - Structure representing DPMCP attributes
138 * @id: DPMCP object ID 141 * @id: DPMCP object ID
139 * @version: DPMCP version
140 */ 142 */
141struct dpmcp_attr { 143struct dpmcp_attr {
142 int id; 144 int id;
143 /**
144 * struct version - Structure representing DPMCP version
145 * @major: DPMCP major version
146 * @minor: DPMCP minor version
147 */
148 struct {
149 uint16_t major;
150 uint16_t minor;
151 } version;
152}; 145};
153 146
154int dpmcp_get_attributes(struct fsl_mc_io *mc_io, 147int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
155 uint32_t cmd_flags, 148 u32 cmd_flags,
156 uint16_t token, 149 u16 token,
157 struct dpmcp_attr *attr); 150 struct dpmcp_attr *attr);
158 151
159#endif /* __FSL_DPMCP_H */ 152#endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index a7b77d58c8cd..cdddfb80eecc 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -12,7 +12,6 @@
12 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
14 * 14 *
15 *
16 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -41,13 +40,14 @@
41#ifndef __FSL_DPMNG_CMD_H 40#ifndef __FSL_DPMNG_CMD_H
42#define __FSL_DPMNG_CMD_H 41#define __FSL_DPMNG_CMD_H
43 42
44/* Command IDs */ 43/* Command versioning */
45#define DPMNG_CMDID_GET_CONT_ID 0x830 44#define DPMNG_CMD_BASE_VERSION 1
46#define DPMNG_CMDID_GET_VERSION 0x831 45#define DPMNG_CMD_ID_OFFSET 4
47 46
48struct dpmng_rsp_get_container_id { 47#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
49 __le32 container_id; 48
50}; 49/* Command IDs */
50#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
51 51
52struct dpmng_rsp_get_version { 52struct dpmng_rsp_get_version {
53 __le32 revision; 53 __le32 revision;
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index 96b1d67756fa..ad5d5bbec529 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
72} 72}
73EXPORT_SYMBOL(mc_get_version); 73EXPORT_SYMBOL(mc_get_version);
74 74
75/**
76 * dpmng_get_container_id() - Get container ID associated with a given portal.
77 * @mc_io: Pointer to MC portal's I/O object
78 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
79 * @container_id: Requested container ID
80 *
81 * Return: '0' on Success; Error code otherwise.
82 */
83int dpmng_get_container_id(struct fsl_mc_io *mc_io,
84 u32 cmd_flags,
85 int *container_id)
86{
87 struct mc_command cmd = { 0 };
88 struct dpmng_rsp_get_container_id *rsp_params;
89 int err;
90
91 /* prepare command */
92 cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
93 cmd_flags,
94 0);
95
96 /* send command to mc*/
97 err = mc_send_command(mc_io, &cmd);
98 if (err)
99 return err;
100
101 /* retrieve response parameters */
102 rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
103 *container_id = le32_to_cpu(rsp_params->container_id);
104
105 return 0;
106}
107
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 009d65673155..588b8cafdbc7 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -12,7 +12,6 @@
12 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
14 * 14 *
15 *
16 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -42,48 +41,56 @@
42#define _FSL_DPRC_CMD_H 41#define _FSL_DPRC_CMD_H
43 42
44/* Minimal supported DPRC Version */ 43/* Minimal supported DPRC Version */
45#define DPRC_MIN_VER_MAJOR 5 44#define DPRC_MIN_VER_MAJOR 6
46#define DPRC_MIN_VER_MINOR 0 45#define DPRC_MIN_VER_MINOR 0
47 46
47/* Command versioning */
48#define DPRC_CMD_BASE_VERSION 1
49#define DPRC_CMD_ID_OFFSET 4
50
51#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
52
48/* Command IDs */ 53/* Command IDs */
49#define DPRC_CMDID_CLOSE 0x800 54#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
50#define DPRC_CMDID_OPEN 0x805 55#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
51#define DPRC_CMDID_CREATE 0x905 56#define DPRC_CMDID_CREATE DPRC_CMD(0x905)
52 57#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
53#define DPRC_CMDID_GET_ATTR 0x004 58
54#define DPRC_CMDID_RESET_CONT 0x005 59#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
55 60#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
56#define DPRC_CMDID_SET_IRQ 0x010 61
57#define DPRC_CMDID_GET_IRQ 0x011 62#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
58#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 63#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
59#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 64#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
60#define DPRC_CMDID_SET_IRQ_MASK 0x014 65#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
61#define DPRC_CMDID_GET_IRQ_MASK 0x015 66#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
62#define DPRC_CMDID_GET_IRQ_STATUS 0x016 67#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
63#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 68#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
64 69#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
65#define DPRC_CMDID_CREATE_CONT 0x151 70
66#define DPRC_CMDID_DESTROY_CONT 0x152 71#define DPRC_CMDID_CREATE_CONT DPRC_CMD(0x151)
67#define DPRC_CMDID_SET_RES_QUOTA 0x155 72#define DPRC_CMDID_DESTROY_CONT DPRC_CMD(0x152)
68#define DPRC_CMDID_GET_RES_QUOTA 0x156 73#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
69#define DPRC_CMDID_ASSIGN 0x157 74#define DPRC_CMDID_SET_RES_QUOTA DPRC_CMD(0x155)
70#define DPRC_CMDID_UNASSIGN 0x158 75#define DPRC_CMDID_GET_RES_QUOTA DPRC_CMD(0x156)
71#define DPRC_CMDID_GET_OBJ_COUNT 0x159 76#define DPRC_CMDID_ASSIGN DPRC_CMD(0x157)
72#define DPRC_CMDID_GET_OBJ 0x15A 77#define DPRC_CMDID_UNASSIGN DPRC_CMD(0x158)
73#define DPRC_CMDID_GET_RES_COUNT 0x15B 78#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
74#define DPRC_CMDID_GET_RES_IDS 0x15C 79#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
75#define DPRC_CMDID_GET_OBJ_REG 0x15E 80#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
76#define DPRC_CMDID_SET_OBJ_IRQ 0x15F 81#define DPRC_CMDID_GET_RES_IDS DPRC_CMD(0x15C)
77#define DPRC_CMDID_GET_OBJ_IRQ 0x160 82#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
78#define DPRC_CMDID_SET_OBJ_LABEL 0x161 83#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
79#define DPRC_CMDID_GET_OBJ_DESC 0x162 84#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
80 85#define DPRC_CMDID_SET_OBJ_LABEL DPRC_CMD(0x161)
81#define DPRC_CMDID_CONNECT 0x167 86#define DPRC_CMDID_GET_OBJ_DESC DPRC_CMD(0x162)
82#define DPRC_CMDID_DISCONNECT 0x168 87
83#define DPRC_CMDID_GET_POOL 0x169 88#define DPRC_CMDID_CONNECT DPRC_CMD(0x167)
84#define DPRC_CMDID_GET_POOL_COUNT 0x16A 89#define DPRC_CMDID_DISCONNECT DPRC_CMD(0x168)
85 90#define DPRC_CMDID_GET_POOL DPRC_CMD(0x169)
86#define DPRC_CMDID_GET_CONNECTION 0x16C 91#define DPRC_CMDID_GET_POOL_COUNT DPRC_CMD(0x16A)
92
93#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
87 94
88struct dprc_cmd_open { 95struct dprc_cmd_open {
89 __le32 container_id; 96 __le32 container_id;
@@ -199,9 +206,6 @@ struct dprc_rsp_get_attributes {
199 /* response word 1 */ 206 /* response word 1 */
200 __le32 options; 207 __le32 options;
201 __le32 portal_id; 208 __le32 portal_id;
202 /* response word 2 */
203 __le16 version_major;
204 __le16 version_minor;
205}; 209};
206 210
207struct dprc_cmd_set_res_quota { 211struct dprc_cmd_set_res_quota {
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index c5ee4639682b..4e416d89b736 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale data path resource container (DPRC) driver 2 * Freescale data path resource container (DPRC) driver
3 * 3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc. 4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
@@ -505,7 +505,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
505 dprc_irq0_handler, 505 dprc_irq0_handler,
506 dprc_irq0_handler_thread, 506 dprc_irq0_handler_thread,
507 IRQF_NO_SUSPEND | IRQF_ONESHOT, 507 IRQF_NO_SUSPEND | IRQF_ONESHOT,
508 "FSL MC DPRC irq0", 508 dev_name(&mc_dev->dev),
509 &mc_dev->dev); 509 &mc_dev->dev);
510 if (error < 0) { 510 if (error < 0) {
511 dev_err(&mc_dev->dev, 511 dev_err(&mc_dev->dev,
@@ -597,6 +597,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
597 struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); 597 struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
598 bool mc_io_created = false; 598 bool mc_io_created = false;
599 bool msi_domain_set = false; 599 bool msi_domain_set = false;
600 u16 major_ver, minor_ver;
600 601
601 if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) 602 if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
602 return -EINVAL; 603 return -EINVAL;
@@ -669,13 +670,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
669 goto error_cleanup_open; 670 goto error_cleanup_open;
670 } 671 }
671 672
672 if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || 673 error = dprc_get_api_version(mc_dev->mc_io, 0,
673 (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && 674 &major_ver,
674 mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { 675 &minor_ver);
676 if (error < 0) {
677 dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
678 error);
679 goto error_cleanup_open;
680 }
681
682 if (major_ver < DPRC_MIN_VER_MAJOR ||
683 (major_ver == DPRC_MIN_VER_MAJOR &&
684 minor_ver < DPRC_MIN_VER_MINOR)) {
675 dev_err(&mc_dev->dev, 685 dev_err(&mc_dev->dev,
676 "ERROR: DPRC version %d.%d not supported\n", 686 "ERROR: DPRC version %d.%d not supported\n",
677 mc_bus->dprc_attr.version.major, 687 major_ver, minor_ver);
678 mc_bus->dprc_attr.version.minor);
679 error = -ENOTSUPP; 688 error = -ENOTSUPP;
680 goto error_cleanup_open; 689 goto error_cleanup_open;
681 } 690 }
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 9fea3def6041..572edd4c066e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
@@ -565,8 +565,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
565 attr->icid = le16_to_cpu(rsp_params->icid); 565 attr->icid = le16_to_cpu(rsp_params->icid);
566 attr->options = le32_to_cpu(rsp_params->options); 566 attr->options = le32_to_cpu(rsp_params->options);
567 attr->portal_id = le32_to_cpu(rsp_params->portal_id); 567 attr->portal_id = le32_to_cpu(rsp_params->portal_id);
568 attr->version.major = le16_to_cpu(rsp_params->version_major);
569 attr->version.minor = le16_to_cpu(rsp_params->version_minor);
570 568
571 return 0; 569 return 0;
572} 570}
@@ -1386,3 +1384,66 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
1386 1384
1387 return 0; 1385 return 0;
1388} 1386}
1387
1388/**
1389 * dprc_get_api_version - Get Data Path Resource Container API version
1390 * @mc_io: Pointer to Mc portal's I/O object
1391 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1392 * @major_ver: Major version of Data Path Resource Container API
1393 * @minor_ver: Minor version of Data Path Resource Container API
1394 *
1395 * Return: '0' on Success; Error code otherwise.
1396 */
1397int dprc_get_api_version(struct fsl_mc_io *mc_io,
1398 u32 cmd_flags,
1399 u16 *major_ver,
1400 u16 *minor_ver)
1401{
1402 struct mc_command cmd = { 0 };
1403 int err;
1404
1405 /* prepare command */
1406 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
1407 cmd_flags, 0);
1408
1409 /* send command to mc */
1410 err = mc_send_command(mc_io, &cmd);
1411 if (err)
1412 return err;
1413
1414 /* retrieve response parameters */
1415 mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
1416
1417 return 0;
1418}
1419
1420/**
1421 * dprc_get_container_id - Get container ID associated with a given portal.
1422 * @mc_io: Pointer to Mc portal's I/O object
1423 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1424 * @container_id: Requested container id
1425 *
1426 * Return: '0' on Success; Error code otherwise.
1427 */
1428int dprc_get_container_id(struct fsl_mc_io *mc_io,
1429 u32 cmd_flags,
1430 int *container_id)
1431{
1432 struct mc_command cmd = { 0 };
1433 int err;
1434
1435 /* prepare command */
1436 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
1437 cmd_flags,
1438 0);
1439
1440 /* send command to mc*/
1441 err = mc_send_command(mc_io, &cmd);
1442 if (err)
1443 return err;
1444
1445 /* retrieve response parameters */
1446 *container_id = (int)mc_cmd_read_object_id(&cmd);
1447
1448 return 0;
1449}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index e93ab53bae67..ce07096c3b1f 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale MC object device allocator driver 2 * fsl-mc object allocator driver
3 * 3 *
4 * Copyright (C) 2013 Freescale Semiconductor, Inc. 4 * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
5 * 5 *
6 * This file is licensed under the terms of the GNU General Public 6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any 7 * License version 2. This program is licensed "as is" without any
@@ -12,9 +12,9 @@
12#include <linux/msi.h> 12#include <linux/msi.h>
13#include "../include/mc-bus.h" 13#include "../include/mc-bus.h"
14#include "../include/mc-sys.h" 14#include "../include/mc-sys.h"
15#include "../include/dpbp-cmd.h"
16#include "../include/dpcon-cmd.h"
17 15
16#include "dpbp-cmd.h"
17#include "dpcon-cmd.h"
18#include "fsl-mc-private.h" 18#include "fsl-mc-private.h"
19 19
20#define FSL_MC_IS_ALLOCATABLE(_obj_type) \ 20#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
@@ -23,15 +23,12 @@
23 strcmp(_obj_type, "dpcon") == 0) 23 strcmp(_obj_type, "dpcon") == 0)
24 24
25/** 25/**
26 * fsl_mc_resource_pool_add_device - add allocatable device to a resource 26 * fsl_mc_resource_pool_add_device - add allocatable object to a resource
27 * pool of a given MC bus 27 * pool of a given fsl-mc bus
28 * 28 *
29 * @mc_bus: pointer to the MC bus 29 * @mc_bus: pointer to the fsl-mc bus
30 * @pool_type: MC bus pool type 30 * @pool_type: pool type
31 * @mc_dev: Pointer to allocatable MC object device 31 * @mc_dev: pointer to allocatable fsl-mc device
32 *
33 * It adds an allocatable MC object device to a container's resource pool of
34 * the given resource type
35 */ 32 */
36static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus 33static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
37 *mc_bus, 34 *mc_bus,
@@ -95,10 +92,10 @@ out:
95 * fsl_mc_resource_pool_remove_device - remove an allocatable device from a 92 * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
96 * resource pool 93 * resource pool
97 * 94 *
98 * @mc_dev: Pointer to allocatable MC object device 95 * @mc_dev: pointer to allocatable fsl-mc device
99 * 96 *
100 * It permanently removes an allocatable MC object device from the resource 97 * It permanently removes an allocatable fsl-mc device from the resource
101 * pool, the device is currently in, as long as it is in the pool's free list. 98 * pool. It's an error if the device is in use.
102 */ 99 */
103static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device 100static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
104 *mc_dev) 101 *mc_dev)
@@ -255,17 +252,18 @@ out_unlock:
255EXPORT_SYMBOL_GPL(fsl_mc_resource_free); 252EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
256 253
257/** 254/**
258 * fsl_mc_object_allocate - Allocates a MC object device of the given 255 * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
259 * pool type from a given MC bus 256 * pool type from a given fsl-mc bus instance
260 * 257 *
261 * @mc_dev: MC device for which the MC object device is to be allocated 258 * @mc_dev: fsl-mc device which is used in conjunction with the
262 * @pool_type: MC bus resource pool type 259 * allocated object
263 * @new_mc_dev: Pointer to area where the pointer to the allocated 260 * @pool_type: pool type
264 * MC object device is to be returned 261 * @new_mc_dev: pointer to area where the pointer to the allocated device
262 * is to be returned
265 * 263 *
266 * This function allocates a MC object device from the device's parent DPRC, 264 * Allocatable objects are always used in conjunction with some functional
267 * from the corresponding MC bus' pool of allocatable MC object devices of 265 * device. This function allocates an object of the specified type from
268 * the given resource type. mc_dev cannot be a DPRC itself. 266 * the DPRC containing the functional device.
269 * 267 *
270 * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC 268 * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
271 * portals are allocated using fsl_mc_portal_allocate(), instead of 269 * portals are allocated using fsl_mc_portal_allocate(), instead of
@@ -312,10 +310,9 @@ error:
312EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); 310EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
313 311
314/** 312/**
315 * fsl_mc_object_free - Returns an allocatable MC object device to the 313 * fsl_mc_object_free - Returns an fsl-mc object to the resource
316 * corresponding resource pool of a given MC bus. 314 * pool where it came from.
317 * 315 * @mc_adev: Pointer to the fsl-mc device
318 * @mc_adev: Pointer to the MC object device
319 */ 316 */
320void fsl_mc_object_free(struct fsl_mc_device *mc_adev) 317void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
321{ 318{
@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
332EXPORT_SYMBOL_GPL(fsl_mc_object_free); 329EXPORT_SYMBOL_GPL(fsl_mc_object_free);
333 330
334/* 331/*
335 * Initialize the interrupt pool associated with a MC bus. 332 * A DPRC and the devices in the DPRC all share the same GIC-ITS device
336 * It allocates a block of IRQs from the GIC-ITS 333 * ID. A block of IRQs is pre-allocated and maintained in a pool
334 * from which devices can allocate them when needed.
335 */
336
337/*
338 * Initialize the interrupt pool associated with an fsl-mc bus.
339 * It allocates a block of IRQs from the GIC-ITS.
337 */ 340 */
338int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, 341int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
339 unsigned int irq_count) 342 unsigned int irq_count)
@@ -395,7 +398,7 @@ cleanup_msi_irqs:
395EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); 398EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
396 399
397/** 400/**
398 * Teardown the interrupt pool associated with an MC bus. 401 * Teardown the interrupt pool associated with an fsl-mc bus.
399 * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. 402 * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
400 */ 403 */
401void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) 404void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
422EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); 425EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
423 426
424/** 427/**
425 * It allocates the IRQs required by a given MC object device. The 428 * Allocate the IRQs required by a given fsl-mc device.
426 * IRQs are allocated from the interrupt pool associated with the
427 * MC bus that contains the device, if the device is not a DPRC device.
428 * Otherwise, the IRQs are allocated from the interrupt pool associated
429 * with the MC bus that represents the DPRC device itself.
430 */ 429 */
431int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) 430int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
432{ 431{
@@ -495,8 +494,7 @@ error_resource_alloc:
495EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); 494EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
496 495
497/* 496/*
498 * It frees the IRQs that were allocated for a MC object device, by 497 * Frees the IRQs that were allocated for an fsl-mc device.
499 * returning them to the corresponding interrupt pool.
500 */ 498 */
501void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) 499void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
502{ 500{
@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
605 return error; 603 return error;
606 604
607 dev_dbg(&mc_dev->dev, 605 dev_dbg(&mc_dev->dev,
608 "Allocatable MC object device bound to fsl_mc_allocator driver"); 606 "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
609 return 0; 607 return 0;
610} 608}
611 609
@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
627 } 625 }
628 626
629 dev_dbg(&mc_dev->dev, 627 dev_dbg(&mc_dev->dev,
630 "Allocatable MC object device unbound from fsl_mc_allocator driver"); 628 "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
631 return 0; 629 return 0;
632} 630}
633 631
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 44f64b6f0fc9..5ac373c0c716 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale Management Complex (MC) bus driver 2 * Freescale Management Complex (MC) bus driver
3 * 3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc. 4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
@@ -9,6 +9,8 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#define pr_fmt(fmt) "fsl-mc: " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/of_device.h> 15#include <linux/of_device.h>
14#include <linux/of_address.h> 16#include <linux/of_address.h>
@@ -34,7 +36,7 @@ static struct kmem_cache *mc_dev_cache;
34 36
35/** 37/**
36 * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device 38 * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
37 * @root_mc_bus_dev: MC object device representing the root DPRC 39 * @root_mc_bus_dev: fsl-mc device representing the root DPRC
38 * @num_translation_ranges: number of entries in addr_translation_ranges 40 * @num_translation_ranges: number of entries in addr_translation_ranges
39 * @translation_ranges: array of bus to system address translation ranges 41 * @translation_ranges: array of bus to system address translation ranges
40 */ 42 */
@@ -62,8 +64,8 @@ struct fsl_mc_addr_translation_range {
62 64
63/** 65/**
64 * fsl_mc_bus_match - device to driver matching callback 66 * fsl_mc_bus_match - device to driver matching callback
65 * @dev: the MC object device structure to match against 67 * @dev: the fsl-mc device to match against
66 * @drv: the device driver to search for matching MC object device id 68 * @drv: the device driver to search for matching fsl-mc object type
67 * structures 69 * structures
68 * 70 *
69 * Returns 1 on success, 0 otherwise. 71 * Returns 1 on success, 0 otherwise.
@@ -91,7 +93,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
91 93
92 /* 94 /*
93 * Traverse the match_id table of the given driver, trying to find 95 * Traverse the match_id table of the given driver, trying to find
94 * a matching for the given MC object device. 96 * a matching for the given device.
95 */ 97 */
96 for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { 98 for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
97 if (id->vendor == mc_dev->obj_desc.vendor && 99 if (id->vendor == mc_dev->obj_desc.vendor &&
@@ -164,8 +166,7 @@ static int fsl_mc_driver_probe(struct device *dev)
164 166
165 error = mc_drv->probe(mc_dev); 167 error = mc_drv->probe(mc_dev);
166 if (error < 0) { 168 if (error < 0) {
167 dev_err(dev, "MC object device probe callback failed: %d\n", 169 dev_err(dev, "%s failed: %d\n", __func__, error);
168 error);
169 return error; 170 return error;
170 } 171 }
171 172
@@ -183,9 +184,7 @@ static int fsl_mc_driver_remove(struct device *dev)
183 184
184 error = mc_drv->remove(mc_dev); 185 error = mc_drv->remove(mc_dev);
185 if (error < 0) { 186 if (error < 0) {
186 dev_err(dev, 187 dev_err(dev, "%s failed: %d\n", __func__, error);
187 "MC object device remove callback failed: %d\n",
188 error);
189 return error; 188 return error;
190 } 189 }
191 190
@@ -232,8 +231,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
232 return error; 231 return error;
233 } 232 }
234 233
235 pr_info("MC object device driver %s registered\n",
236 mc_driver->driver.name);
237 return 0; 234 return 0;
238} 235}
239EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); 236EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
@@ -315,21 +312,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
315 return error; 312 return error;
316} 313}
317 314
318static int get_dprc_version(struct fsl_mc_io *mc_io,
319 int container_id, u16 *major, u16 *minor)
320{
321 struct dprc_attributes attr;
322 int error;
323
324 error = get_dprc_attr(mc_io, container_id, &attr);
325 if (error == 0) {
326 *major = attr.version.major;
327 *minor = attr.version.minor;
328 }
329
330 return error;
331}
332
333static int translate_mc_addr(struct fsl_mc_device *mc_dev, 315static int translate_mc_addr(struct fsl_mc_device *mc_dev,
334 enum dprc_region_type mc_region_type, 316 enum dprc_region_type mc_region_type,
335 u64 mc_offset, phys_addr_t *phys_addr) 317 u64 mc_offset, phys_addr_t *phys_addr)
@@ -452,7 +434,7 @@ bool fsl_mc_is_root_dprc(struct device *dev)
452} 434}
453 435
454/** 436/**
455 * Add a newly discovered MC object device to be visible in Linux 437 * Add a newly discovered fsl-mc device to be visible in Linux
456 */ 438 */
457int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, 439int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
458 struct fsl_mc_io *mc_io, 440 struct fsl_mc_io *mc_io,
@@ -533,8 +515,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
533 goto error_cleanup_dev; 515 goto error_cleanup_dev;
534 } else { 516 } else {
535 /* 517 /*
536 * A non-DPRC MC object device has to be a child of another 518 * A non-DPRC object has to be a child of a DPRC, use the
537 * MC object (specifically a DPRC object) 519 * parent's ICID and interrupt domain.
538 */ 520 */
539 mc_dev->icid = parent_mc_dev->icid; 521 mc_dev->icid = parent_mc_dev->icid;
540 mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; 522 mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
@@ -572,8 +554,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
572 } 554 }
573 555
574 (void)get_device(&mc_dev->dev); 556 (void)get_device(&mc_dev->dev);
575 dev_dbg(parent_dev, "Added MC object device %s\n", 557 dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
576 dev_name(&mc_dev->dev));
577 558
578 *new_mc_dev = mc_dev; 559 *new_mc_dev = mc_dev;
579 return 0; 560 return 0;
@@ -590,10 +571,10 @@ error_cleanup_dev:
590EXPORT_SYMBOL_GPL(fsl_mc_device_add); 571EXPORT_SYMBOL_GPL(fsl_mc_device_add);
591 572
592/** 573/**
593 * fsl_mc_device_remove - Remove a MC object device from being visible to 574 * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
594 * Linux 575 * Linux
595 * 576 *
596 * @mc_dev: Pointer to a MC object device object 577 * @mc_dev: Pointer to an fsl-mc device
597 */ 578 */
598void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) 579void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
599{ 580{
@@ -749,8 +730,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
749 struct mc_version mc_version; 730 struct mc_version mc_version;
750 struct resource res; 731 struct resource res;
751 732
752 dev_info(&pdev->dev, "Root MC bus device probed");
753
754 mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); 733 mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
755 if (!mc) 734 if (!mc)
756 return -ENOMEM; 735 return -ENOMEM;
@@ -783,8 +762,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
783 goto error_cleanup_mc_io; 762 goto error_cleanup_mc_io;
784 } 763 }
785 764
786 dev_info(&pdev->dev, 765 dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
787 "Freescale Management Complex Firmware version: %u.%u.%u\n",
788 mc_version.major, mc_version.minor, mc_version.revision); 766 mc_version.major, mc_version.minor, mc_version.revision);
789 767
790 error = get_mc_addr_translation_ranges(&pdev->dev, 768 error = get_mc_addr_translation_ranges(&pdev->dev,
@@ -793,7 +771,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
793 if (error < 0) 771 if (error < 0)
794 goto error_cleanup_mc_io; 772 goto error_cleanup_mc_io;
795 773
796 error = dpmng_get_container_id(mc_io, 0, &container_id); 774 error = dprc_get_container_id(mc_io, 0, &container_id);
797 if (error < 0) { 775 if (error < 0) {
798 dev_err(&pdev->dev, 776 dev_err(&pdev->dev,
799 "dpmng_get_container_id() failed: %d\n", error); 777 "dpmng_get_container_id() failed: %d\n", error);
@@ -801,8 +779,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
801 } 779 }
802 780
803 memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); 781 memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
804 error = get_dprc_version(mc_io, container_id, 782 error = dprc_get_api_version(mc_io, 0,
805 &obj_desc.ver_major, &obj_desc.ver_minor); 783 &obj_desc.ver_major,
784 &obj_desc.ver_minor);
806 if (error < 0) 785 if (error < 0)
807 goto error_cleanup_mc_io; 786 goto error_cleanup_mc_io;
808 787
@@ -840,7 +819,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
840 fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); 819 fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
841 mc->root_mc_bus_dev->mc_io = NULL; 820 mc->root_mc_bus_dev->mc_io = NULL;
842 821
843 dev_info(&pdev->dev, "Root MC bus device removed");
844 return 0; 822 return 0;
845} 823}
846 824
@@ -875,12 +853,10 @@ static int __init fsl_mc_bus_driver_init(void)
875 853
876 error = bus_register(&fsl_mc_bus_type); 854 error = bus_register(&fsl_mc_bus_type);
877 if (error < 0) { 855 if (error < 0) {
878 pr_err("fsl-mc bus type registration failed: %d\n", error); 856 pr_err("bus type registration failed: %d\n", error);
879 goto error_cleanup_cache; 857 goto error_cleanup_cache;
880 } 858 }
881 859
882 pr_info("fsl-mc bus type registered\n");
883
884 error = platform_driver_register(&fsl_mc_bus_driver); 860 error = platform_driver_register(&fsl_mc_bus_driver);
885 if (error < 0) { 861 if (error < 0) {
886 pr_err("platform_driver_register() failed: %d\n", error); 862 pr_err("platform_driver_register() failed: %d\n", error);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b1fa18..7975c6e6fee3 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale Management Complex (MC) bus driver MSI support 2 * Freescale Management Complex (MC) bus driver MSI support
3 * 3 *
4 * Copyright (C) 2015 Freescale Semiconductor, Inc. 4 * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index d459c2673f39..5c49c9d2df6a 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -10,6 +10,9 @@
10#ifndef _FSL_MC_PRIVATE_H_ 10#ifndef _FSL_MC_PRIVATE_H_
11#define _FSL_MC_PRIVATE_H_ 11#define _FSL_MC_PRIVATE_H_
12 12
13#include "../include/mc.h"
14#include "../include/mc-bus.h"
15
13int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, 16int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
14 struct fsl_mc_io *mc_io, 17 struct fsl_mc_io *mc_io,
15 struct device *parent_dev, 18 struct device *parent_dev,
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac640752f..6b1cd574644f 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale Management Complex (MC) bus driver MSI support 2 * Freescale Management Complex (MC) bus driver MSI support
3 * 3 *
4 * Copyright (C) 2015 Freescale Semiconductor, Inc. 4 * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
@@ -19,7 +19,7 @@
19#include "../include/mc-bus.h" 19#include "../include/mc-bus.h"
20 20
21static struct irq_chip its_msi_irq_chip = { 21static struct irq_chip its_msi_irq_chip = {
22 .name = "fsl-mc-bus-msi", 22 .name = "ITS-fMSI",
23 .irq_mask = irq_chip_mask_parent, 23 .irq_mask = irq_chip_mask_parent,
24 .irq_unmask = irq_chip_unmask_parent, 24 .irq_unmask = irq_chip_unmask_parent,
25 .irq_eoi = irq_chip_eoi_parent, 25 .irq_eoi = irq_chip_eoi_parent,
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index 798c965fe203..d66b87f0903b 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2016 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
11 * names of any contributors may be used to endorse or promote products 12 * names of any contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission. 13 * derived from this software without specific prior written permission.
13 * 14 *
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software 16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any 17 * Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 285917c7c8e4..4d82802b384d 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -1,4 +1,5 @@
1/* Copyright 2013-2014 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * I/O services to send MC commands to the MC hardware 4 * I/O services to send MC commands to the MC hardware
4 * 5 *
@@ -13,7 +14,6 @@
13 * names of any contributors may be used to endorse or promote products 14 * names of any contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission. 15 * derived from this software without specific prior written permission.
15 * 16 *
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the 17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") as published by the Free Software 18 * GNU General Public License ("GPL") as published by the Free Software
19 * Foundation, either version 2 of that License or (at your option) any 19 * Foundation, either version 2 of that License or (at your option) any
@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
67 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; 67 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
68 u16 cmd_id = le16_to_cpu(hdr->cmd_id); 68 u16 cmd_id = le16_to_cpu(hdr->cmd_id);
69 69
70 return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT; 70 return cmd_id;
71} 71}
72 72
73static int mc_status_to_error(enum mc_cmd_status status) 73static int mc_status_to_error(enum mc_cmd_status status)
@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
200 200
201 if (time_after_eq(jiffies, jiffies_until_timeout)) { 201 if (time_after_eq(jiffies, jiffies_until_timeout)) {
202 dev_dbg(mc_io->dev, 202 dev_dbg(mc_io->dev,
203 "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", 203 "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
204 mc_io->portal_phys_addr, 204 mc_io->portal_phys_addr,
205 (unsigned int)mc_cmd_hdr_read_token(cmd), 205 (unsigned int)mc_cmd_hdr_read_token(cmd),
206 (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); 206 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
240 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; 240 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
241 if (timeout_usecs == 0) { 241 if (timeout_usecs == 0) {
242 dev_dbg(mc_io->dev, 242 dev_dbg(mc_io->dev,
243 "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", 243 "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
244 mc_io->portal_phys_addr, 244 mc_io->portal_phys_addr,
245 (unsigned int)mc_cmd_hdr_read_token(cmd), 245 (unsigned int)mc_cmd_hdr_read_token(cmd),
246 (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); 246 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
294 294
295 if (status != MC_CMD_STATUS_OK) { 295 if (status != MC_CMD_STATUS_OK) {
296 dev_dbg(mc_io->dev, 296 dev_dbg(mc_io->dev,
297 "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", 297 "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
298 mc_io->portal_phys_addr, 298 mc_io->portal_phys_addr,
299 (unsigned int)mc_cmd_hdr_read_token(cmd), 299 (unsigned int)mc_cmd_hdr_read_token(cmd),
300 (unsigned int)mc_cmd_hdr_read_cmdid(cmd), 300 (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index e14e85a5d6df..bf34b1e0e730 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
32#ifndef __FSL_DPBP_H 33#ifndef __FSL_DPBP_H
33#define __FSL_DPBP_H 34#define __FSL_DPBP_H
34 35
35/* Data Path Buffer Pool API 36/*
37 * Data Path Buffer Pool API
36 * Contains initialization APIs and runtime control APIs for DPBP 38 * Contains initialization APIs and runtime control APIs for DPBP
37 */ 39 */
38 40
@@ -44,8 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
44 u16 *token); 46 u16 *token);
45 47
46int dpbp_close(struct fsl_mc_io *mc_io, 48int dpbp_close(struct fsl_mc_io *mc_io,
47 u32 cmd_flags, 49 u32 cmd_flags,
48 u16 token); 50 u16 token);
49 51
50/** 52/**
51 * struct dpbp_cfg - Structure representing DPBP configuration 53 * struct dpbp_cfg - Structure representing DPBP configuration
@@ -55,14 +57,16 @@ struct dpbp_cfg {
55 u32 options; 57 u32 options;
56}; 58};
57 59
58int dpbp_create(struct fsl_mc_io *mc_io, 60int dpbp_create(struct fsl_mc_io *mc_io,
59 u32 cmd_flags, 61 u16 dprc_token,
60 const struct dpbp_cfg *cfg, 62 u32 cmd_flags,
61 u16 *token); 63 const struct dpbp_cfg *cfg,
64 u32 *obj_id);
62 65
63int dpbp_destroy(struct fsl_mc_io *mc_io, 66int dpbp_destroy(struct fsl_mc_io *mc_io,
67 u16 dprc_token,
64 u32 cmd_flags, 68 u32 cmd_flags,
65 u16 token); 69 u32 obj_id);
66 70
67int dpbp_enable(struct fsl_mc_io *mc_io, 71int dpbp_enable(struct fsl_mc_io *mc_io,
68 u32 cmd_flags, 72 u32 cmd_flags,
@@ -88,85 +92,75 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
88 * @irq_num: A user defined number associated with this IRQ 92 * @irq_num: A user defined number associated with this IRQ
89 */ 93 */
90struct dpbp_irq_cfg { 94struct dpbp_irq_cfg {
91 u64 addr; 95 u64 addr;
92 u32 val; 96 u32 val;
93 int irq_num; 97 int irq_num;
94}; 98};
95 99
96int dpbp_set_irq(struct fsl_mc_io *mc_io, 100int dpbp_set_irq(struct fsl_mc_io *mc_io,
97 u32 cmd_flags, 101 u32 cmd_flags,
98 u16 token, 102 u16 token,
99 u8 irq_index, 103 u8 irq_index,
100 struct dpbp_irq_cfg *irq_cfg); 104 struct dpbp_irq_cfg *irq_cfg);
101 105
102int dpbp_get_irq(struct fsl_mc_io *mc_io, 106int dpbp_get_irq(struct fsl_mc_io *mc_io,
103 u32 cmd_flags, 107 u32 cmd_flags,
104 u16 token, 108 u16 token,
105 u8 irq_index, 109 u8 irq_index,
106 int *type, 110 int *type,
107 struct dpbp_irq_cfg *irq_cfg); 111 struct dpbp_irq_cfg *irq_cfg);
108 112
109int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, 113int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
110 u32 cmd_flags, 114 u32 cmd_flags,
111 u16 token, 115 u16 token,
112 u8 irq_index, 116 u8 irq_index,
113 u8 en); 117 u8 en);
114 118
115int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, 119int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
116 u32 cmd_flags, 120 u32 cmd_flags,
117 u16 token, 121 u16 token,
118 u8 irq_index, 122 u8 irq_index,
119 u8 *en); 123 u8 *en);
120 124
121int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, 125int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
122 u32 cmd_flags, 126 u32 cmd_flags,
123 u16 token, 127 u16 token,
124 u8 irq_index, 128 u8 irq_index,
125 u32 mask); 129 u32 mask);
126 130
127int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, 131int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
128 u32 cmd_flags, 132 u32 cmd_flags,
129 u16 token, 133 u16 token,
130 u8 irq_index, 134 u8 irq_index,
131 u32 *mask); 135 u32 *mask);
132 136
133int dpbp_get_irq_status(struct fsl_mc_io *mc_io, 137int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
134 u32 cmd_flags, 138 u32 cmd_flags,
135 u16 token, 139 u16 token,
136 u8 irq_index, 140 u8 irq_index,
137 u32 *status); 141 u32 *status);
138 142
139int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, 143int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
140 u32 cmd_flags, 144 u32 cmd_flags,
141 u16 token, 145 u16 token,
142 u8 irq_index, 146 u8 irq_index,
143 u32 status); 147 u32 status);
144 148
145/** 149/**
146 * struct dpbp_attr - Structure representing DPBP attributes 150 * struct dpbp_attr - Structure representing DPBP attributes
147 * @id: DPBP object ID 151 * @id: DPBP object ID
148 * @version: DPBP version
149 * @bpid: Hardware buffer pool ID; should be used as an argument in 152 * @bpid: Hardware buffer pool ID; should be used as an argument in
150 * acquire/release operations on buffers 153 * acquire/release operations on buffers
151 */ 154 */
152struct dpbp_attr { 155struct dpbp_attr {
153 int id; 156 int id;
154 /**
155 * struct version - Structure representing DPBP version
156 * @major: DPBP major version
157 * @minor: DPBP minor version
158 */
159 struct {
160 u16 major;
161 u16 minor;
162 } version;
163 u16 bpid; 157 u16 bpid;
164}; 158};
165 159
166int dpbp_get_attributes(struct fsl_mc_io *mc_io, 160int dpbp_get_attributes(struct fsl_mc_io *mc_io,
167 u32 cmd_flags, 161 u32 cmd_flags,
168 u16 token, 162 u16 token,
169 struct dpbp_attr *attr); 163 struct dpbp_attr *attr);
170 164
171/** 165/**
172 * DPBP notifications options 166 * DPBP notifications options
@@ -196,24 +190,29 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
196 * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values 190 * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
197 */ 191 */
198struct dpbp_notification_cfg { 192struct dpbp_notification_cfg {
199 u32 depletion_entry; 193 u32 depletion_entry;
200 u32 depletion_exit; 194 u32 depletion_exit;
201 u32 surplus_entry; 195 u32 surplus_entry;
202 u32 surplus_exit; 196 u32 surplus_exit;
203 u64 message_iova; 197 u64 message_iova;
204 u64 message_ctx; 198 u64 message_ctx;
205 u16 options; 199 u16 options;
206}; 200};
207 201
208int dpbp_set_notifications(struct fsl_mc_io *mc_io, 202int dpbp_set_notifications(struct fsl_mc_io *mc_io,
209 u32 cmd_flags, 203 u32 cmd_flags,
210 u16 token, 204 u16 token,
211 struct dpbp_notification_cfg *cfg); 205 struct dpbp_notification_cfg *cfg);
206
207int dpbp_get_notifications(struct fsl_mc_io *mc_io,
208 u32 cmd_flags,
209 u16 token,
210 struct dpbp_notification_cfg *cfg);
212 211
213int dpbp_get_notifications(struct fsl_mc_io *mc_io, 212int dpbp_get_api_version(struct fsl_mc_io *mc_io,
214 u32 cmd_flags, 213 u32 cmd_flags,
215 u16 token, 214 u16 *major_ver,
216 struct dpbp_notification_cfg *cfg); 215 u16 *minor_ver);
217 216
218/** @} */ 217/** @} */
219 218
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
index e5cfd017f9a5..7d8e255da578 100644
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ b/drivers/staging/fsl-mc/include/dpmng.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
32#ifndef __FSL_DPMNG_H 33#ifndef __FSL_DPMNG_H
33#define __FSL_DPMNG_H 34#define __FSL_DPMNG_H
34 35
35/* Management Complex General API 36/*
37 * Management Complex General API
36 * Contains general API for the Management Complex firmware 38 * Contains general API for the Management Complex firmware
37 */ 39 */
38 40
@@ -58,12 +60,12 @@ struct mc_version {
58 u32 revision; 60 u32 revision;
59}; 61};
60 62
61int mc_get_version(struct fsl_mc_io *mc_io, 63int mc_get_version(struct fsl_mc_io *mc_io,
62 u32 cmd_flags, 64 u32 cmd_flags,
63 struct mc_version *mc_ver_info); 65 struct mc_version *mc_ver_info);
64 66
65int dpmng_get_container_id(struct fsl_mc_io *mc_io, 67int dpmng_get_container_id(struct fsl_mc_io *mc_io,
66 u32 cmd_flags, 68 u32 cmd_flags,
67 int *container_id); 69 int *container_id);
68 70
69#endif /* __FSL_DPMNG_H */ 71#endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 593b2bbe7f71..f9ea769ccfab 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -34,7 +35,8 @@
34 35
35#include "mc-cmd.h" 36#include "mc-cmd.h"
36 37
37/* Data Path Resource Container API 38/*
39 * Data Path Resource Container API
38 * Contains DPRC API for managing and querying DPAA resources 40 * Contains DPRC API for managing and querying DPAA resources
39 */ 41 */
40 42
@@ -70,12 +72,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
70 * and can be retrieved using dprc_get_attributes() 72 * and can be retrieved using dprc_get_attributes()
71 */ 73 */
72 74
73/* Spawn Policy Option allowed - Indicates that the new container is allowed 75/*
76 * Spawn Policy Option allowed - Indicates that the new container is allowed
74 * to spawn and have its own child containers. 77 * to spawn and have its own child containers.
75 */ 78 */
76#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 79#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
77 80
78/* General Container allocation policy - Indicates that the new container is 81/*
82 * General Container allocation policy - Indicates that the new container is
79 * allowed to allocate requested resources from its parent container; if not 83 * allowed to allocate requested resources from its parent container; if not
80 * set, the container is only allowed to use resources in its own pools; Note 84 * set, the container is only allowed to use resources in its own pools; Note
81 * that this is a container's global policy, but the parent container may 85 * that this is a container's global policy, but the parent container may
@@ -83,12 +87,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
83 */ 87 */
84#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 88#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
85 89
86/* Object initialization allowed - software context associated with this 90/*
91 * Object initialization allowed - software context associated with this
87 * container is allowed to invoke object initialization operations. 92 * container is allowed to invoke object initialization operations.
88 */ 93 */
89#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 94#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
90 95
91/* Topology change allowed - software context associated with this 96/*
97 * Topology change allowed - software context associated with this
92 * container is allowed to invoke topology operations, such as attach/detach 98 * container is allowed to invoke topology operations, such as attach/detach
93 * of network objects. 99 * of network objects.
94 */ 100 */
@@ -116,17 +122,17 @@ struct dprc_cfg {
116 char label[16]; 122 char label[16];
117}; 123};
118 124
119int dprc_create_container(struct fsl_mc_io *mc_io, 125int dprc_create_container(struct fsl_mc_io *mc_io,
120 u32 cmd_flags, 126 u32 cmd_flags,
121 u16 token, 127 u16 token,
122 struct dprc_cfg *cfg, 128 struct dprc_cfg *cfg,
123 int *child_container_id, 129 int *child_container_id,
124 u64 *child_portal_offset); 130 u64 *child_portal_offset);
125 131
126int dprc_destroy_container(struct fsl_mc_io *mc_io, 132int dprc_destroy_container(struct fsl_mc_io *mc_io,
127 u32 cmd_flags, 133 u32 cmd_flags,
128 u16 token, 134 u16 token,
129 int child_container_id); 135 int child_container_id);
130 136
131int dprc_reset_container(struct fsl_mc_io *mc_io, 137int dprc_reset_container(struct fsl_mc_io *mc_io,
132 u32 cmd_flags, 138 u32 cmd_flags,
@@ -139,7 +145,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
139#define DPRC_IRQ_INDEX 0 145#define DPRC_IRQ_INDEX 0
140 146
141/* Number of dprc's IRQs */ 147/* Number of dprc's IRQs */
142#define DPRC_NUM_OF_IRQS 1 148#define DPRC_NUM_OF_IRQS 1
143 149
144/* DPRC IRQ events */ 150/* DPRC IRQ events */
145 151
@@ -151,12 +157,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
151#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 157#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
152/* IRQ event - Indicates that resources removed from the container */ 158/* IRQ event - Indicates that resources removed from the container */
153#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 159#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
154/* IRQ event - Indicates that one of the descendant containers that opened by 160/*
161 * IRQ event - Indicates that one of the descendant containers that opened by
155 * this container is destroyed 162 * this container is destroyed
156 */ 163 */
157#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 164#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
158 165
159/* IRQ event - Indicates that on one of the container's opened object is 166/*
167 * IRQ event - Indicates that on one of the container's opened object is
160 * destroyed 168 * destroyed
161 */ 169 */
162#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 170#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
@@ -171,59 +179,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
171 * @irq_num: A user defined number associated with this IRQ 179 * @irq_num: A user defined number associated with this IRQ
172 */ 180 */
173struct dprc_irq_cfg { 181struct dprc_irq_cfg {
174 phys_addr_t paddr; 182 phys_addr_t paddr;
175 u32 val; 183 u32 val;
176 int irq_num; 184 int irq_num;
177}; 185};
178 186
179int dprc_set_irq(struct fsl_mc_io *mc_io, 187int dprc_set_irq(struct fsl_mc_io *mc_io,
180 u32 cmd_flags, 188 u32 cmd_flags,
181 u16 token, 189 u16 token,
182 u8 irq_index, 190 u8 irq_index,
183 struct dprc_irq_cfg *irq_cfg); 191 struct dprc_irq_cfg *irq_cfg);
184 192
185int dprc_get_irq(struct fsl_mc_io *mc_io, 193int dprc_get_irq(struct fsl_mc_io *mc_io,
186 u32 cmd_flags, 194 u32 cmd_flags,
187 u16 token, 195 u16 token,
188 u8 irq_index, 196 u8 irq_index,
189 int *type, 197 int *type,
190 struct dprc_irq_cfg *irq_cfg); 198 struct dprc_irq_cfg *irq_cfg);
191 199
192int dprc_set_irq_enable(struct fsl_mc_io *mc_io, 200int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
193 u32 cmd_flags, 201 u32 cmd_flags,
194 u16 token, 202 u16 token,
195 u8 irq_index, 203 u8 irq_index,
196 u8 en); 204 u8 en);
197 205
198int dprc_get_irq_enable(struct fsl_mc_io *mc_io, 206int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
199 u32 cmd_flags, 207 u32 cmd_flags,
200 u16 token, 208 u16 token,
201 u8 irq_index, 209 u8 irq_index,
202 u8 *en); 210 u8 *en);
203 211
204int dprc_set_irq_mask(struct fsl_mc_io *mc_io, 212int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
205 u32 cmd_flags, 213 u32 cmd_flags,
206 u16 token, 214 u16 token,
207 u8 irq_index, 215 u8 irq_index,
208 u32 mask); 216 u32 mask);
209 217
210int dprc_get_irq_mask(struct fsl_mc_io *mc_io, 218int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
211 u32 cmd_flags, 219 u32 cmd_flags,
212 u16 token, 220 u16 token,
213 u8 irq_index, 221 u8 irq_index,
214 u32 *mask); 222 u32 *mask);
215 223
216int dprc_get_irq_status(struct fsl_mc_io *mc_io, 224int dprc_get_irq_status(struct fsl_mc_io *mc_io,
217 u32 cmd_flags, 225 u32 cmd_flags,
218 u16 token, 226 u16 token,
219 u8 irq_index, 227 u8 irq_index,
220 u32 *status); 228 u32 *status);
221 229
222int dprc_clear_irq_status(struct fsl_mc_io *mc_io, 230int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
223 u32 cmd_flags, 231 u32 cmd_flags,
224 u16 token, 232 u16 token,
225 u8 irq_index, 233 u8 irq_index,
226 u32 status); 234 u32 status);
227 235
228/** 236/**
229 * struct dprc_attributes - Container attributes 237 * struct dprc_attributes - Container attributes
@@ -231,63 +239,56 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
231 * @icid: Container's ICID 239 * @icid: Container's ICID
232 * @portal_id: Container's portal ID 240 * @portal_id: Container's portal ID
233 * @options: Container's options as set at container's creation 241 * @options: Container's options as set at container's creation
234 * @version: DPRC version
235 */ 242 */
236struct dprc_attributes { 243struct dprc_attributes {
237 int container_id; 244 int container_id;
238 u16 icid; 245 u16 icid;
239 int portal_id; 246 int portal_id;
240 u64 options; 247 u64 options;
241 /**
242 * struct version - DPRC version
243 * @major: DPRC major version
244 * @minor: DPRC minor version
245 */
246 struct {
247 u16 major;
248 u16 minor;
249 } version;
250}; 248};
251 249
252int dprc_get_attributes(struct fsl_mc_io *mc_io, 250int dprc_get_attributes(struct fsl_mc_io *mc_io,
253 u32 cmd_flags, 251 u32 cmd_flags,
254 u16 token, 252 u16 token,
255 struct dprc_attributes *attributes); 253 struct dprc_attributes *attributes);
256 254
257int dprc_set_res_quota(struct fsl_mc_io *mc_io, 255int dprc_set_res_quota(struct fsl_mc_io *mc_io,
258 u32 cmd_flags, 256 u32 cmd_flags,
259 u16 token, 257 u16 token,
260 int child_container_id, 258 int child_container_id,
261 char *type, 259 char *type,
262 u16 quota); 260 u16 quota);
263 261
264int dprc_get_res_quota(struct fsl_mc_io *mc_io, 262int dprc_get_res_quota(struct fsl_mc_io *mc_io,
265 u32 cmd_flags, 263 u32 cmd_flags,
266 u16 token, 264 u16 token,
267 int child_container_id, 265 int child_container_id,
268 char *type, 266 char *type,
269 u16 *quota); 267 u16 *quota);
270 268
271/* Resource request options */ 269/* Resource request options */
272 270
273/* Explicit resource ID request - The requested objects/resources 271/*
272 * Explicit resource ID request - The requested objects/resources
274 * are explicit and sequential (in case of resources). 273 * are explicit and sequential (in case of resources).
275 * The base ID is given at res_req at base_align field 274 * The base ID is given at res_req at base_align field
276 */ 275 */
277#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 276#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
278 277
279/* Aligned resources request - Relevant only for resources 278/*
279 * Aligned resources request - Relevant only for resources
280 * request (and not objects). Indicates that resources base ID should be 280 * request (and not objects). Indicates that resources base ID should be
281 * sequential and aligned to the value given at dprc_res_req base_align field 281 * sequential and aligned to the value given at dprc_res_req base_align field
282 */ 282 */
283#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 283#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
284 284
285/* Plugged Flag - Relevant only for object assignment request. 285/*
286 * Plugged Flag - Relevant only for object assignment request.
286 * Indicates that after all objects assigned. An interrupt will be invoked at 287 * Indicates that after all objects assigned. An interrupt will be invoked at
287 * the relevant GPP. The assigned object will be marked as plugged. 288 * the relevant GPP. The assigned object will be marked as plugged.
288 * plugged objects can't be assigned from their container 289 * plugged objects can't be assigned from their container
289 */ 290 */
290#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 291#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
291 292
292/** 293/**
293 * struct dprc_res_req - Resource request descriptor, to be used in assignment 294 * struct dprc_res_req - Resource request descriptor, to be used in assignment
@@ -312,33 +313,33 @@ struct dprc_res_req {
312 int id_base_align; 313 int id_base_align;
313}; 314};
314 315
315int dprc_assign(struct fsl_mc_io *mc_io, 316int dprc_assign(struct fsl_mc_io *mc_io,
316 u32 cmd_flags, 317 u32 cmd_flags,
317 u16 token, 318 u16 token,
318 int container_id, 319 int container_id,
319 struct dprc_res_req *res_req); 320 struct dprc_res_req *res_req);
320 321
321int dprc_unassign(struct fsl_mc_io *mc_io, 322int dprc_unassign(struct fsl_mc_io *mc_io,
322 u32 cmd_flags, 323 u32 cmd_flags,
323 u16 token, 324 u16 token,
324 int child_container_id, 325 int child_container_id,
325 struct dprc_res_req *res_req); 326 struct dprc_res_req *res_req);
326 327
327int dprc_get_pool_count(struct fsl_mc_io *mc_io, 328int dprc_get_pool_count(struct fsl_mc_io *mc_io,
328 u32 cmd_flags, 329 u32 cmd_flags,
329 u16 token, 330 u16 token,
330 int *pool_count); 331 int *pool_count);
331 332
332int dprc_get_pool(struct fsl_mc_io *mc_io, 333int dprc_get_pool(struct fsl_mc_io *mc_io,
333 u32 cmd_flags, 334 u32 cmd_flags,
334 u16 token, 335 u16 token,
335 int pool_index, 336 int pool_index,
336 char *type); 337 char *type);
337 338
338int dprc_get_obj_count(struct fsl_mc_io *mc_io, 339int dprc_get_obj_count(struct fsl_mc_io *mc_io,
339 u32 cmd_flags, 340 u32 cmd_flags,
340 u16 token, 341 u16 token,
341 int *obj_count); 342 int *obj_count);
342 343
343/* Objects Attributes Flags */ 344/* Objects Attributes Flags */
344 345
@@ -353,7 +354,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
353 * masters; 354 * masters;
354 * user is responsible for proper memory handling through IOMMU configuration. 355 * user is responsible for proper memory handling through IOMMU configuration.
355 */ 356 */
356#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 357#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
357 358
358/** 359/**
359 * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() 360 * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
@@ -381,41 +382,41 @@ struct dprc_obj_desc {
381 u16 flags; 382 u16 flags;
382}; 383};
383 384
384int dprc_get_obj(struct fsl_mc_io *mc_io, 385int dprc_get_obj(struct fsl_mc_io *mc_io,
385 u32 cmd_flags, 386 u32 cmd_flags,
386 u16 token, 387 u16 token,
387 int obj_index, 388 int obj_index,
388 struct dprc_obj_desc *obj_desc); 389 struct dprc_obj_desc *obj_desc);
389 390
390int dprc_get_obj_desc(struct fsl_mc_io *mc_io, 391int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
391 u32 cmd_flags, 392 u32 cmd_flags,
392 u16 token, 393 u16 token,
393 char *obj_type, 394 char *obj_type,
394 int obj_id, 395 int obj_id,
395 struct dprc_obj_desc *obj_desc); 396 struct dprc_obj_desc *obj_desc);
396 397
397int dprc_set_obj_irq(struct fsl_mc_io *mc_io, 398int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
398 u32 cmd_flags, 399 u32 cmd_flags,
399 u16 token, 400 u16 token,
400 char *obj_type, 401 char *obj_type,
401 int obj_id, 402 int obj_id,
402 u8 irq_index, 403 u8 irq_index,
403 struct dprc_irq_cfg *irq_cfg); 404 struct dprc_irq_cfg *irq_cfg);
404 405
405int dprc_get_obj_irq(struct fsl_mc_io *mc_io, 406int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
406 u32 cmd_flags, 407 u32 cmd_flags,
407 u16 token, 408 u16 token,
408 char *obj_type, 409 char *obj_type,
409 int obj_id, 410 int obj_id,
410 u8 irq_index, 411 u8 irq_index,
411 int *type, 412 int *type,
412 struct dprc_irq_cfg *irq_cfg); 413 struct dprc_irq_cfg *irq_cfg);
413 414
414int dprc_get_res_count(struct fsl_mc_io *mc_io, 415int dprc_get_res_count(struct fsl_mc_io *mc_io,
415 u32 cmd_flags, 416 u32 cmd_flags,
416 u16 token, 417 u16 token,
417 char *type, 418 char *type,
418 int *res_count); 419 int *res_count);
419 420
420/** 421/**
421 * enum dprc_iter_status - Iteration status 422 * enum dprc_iter_status - Iteration status
@@ -444,11 +445,11 @@ struct dprc_res_ids_range_desc {
444 enum dprc_iter_status iter_status; 445 enum dprc_iter_status iter_status;
445}; 446};
446 447
447int dprc_get_res_ids(struct fsl_mc_io *mc_io, 448int dprc_get_res_ids(struct fsl_mc_io *mc_io,
448 u32 cmd_flags, 449 u32 cmd_flags,
449 u16 token, 450 u16 token,
450 char *type, 451 char *type,
451 struct dprc_res_ids_range_desc *range_desc); 452 struct dprc_res_ids_range_desc *range_desc);
452 453
453/* Region flags */ 454/* Region flags */
454/* Cacheable - Indicates that region should be mapped as cacheable */ 455/* Cacheable - Indicates that region should be mapped as cacheable */
@@ -481,20 +482,20 @@ struct dprc_region_desc {
481 enum dprc_region_type type; 482 enum dprc_region_type type;
482}; 483};
483 484
484int dprc_get_obj_region(struct fsl_mc_io *mc_io, 485int dprc_get_obj_region(struct fsl_mc_io *mc_io,
485 u32 cmd_flags, 486 u32 cmd_flags,
486 u16 token, 487 u16 token,
487 char *obj_type, 488 char *obj_type,
488 int obj_id, 489 int obj_id,
489 u8 region_index, 490 u8 region_index,
490 struct dprc_region_desc *region_desc); 491 struct dprc_region_desc *region_desc);
491 492
492int dprc_set_obj_label(struct fsl_mc_io *mc_io, 493int dprc_set_obj_label(struct fsl_mc_io *mc_io,
493 u32 cmd_flags, 494 u32 cmd_flags,
494 u16 token, 495 u16 token,
495 char *obj_type, 496 char *obj_type,
496 int obj_id, 497 int obj_id,
497 char *label); 498 char *label);
498 499
499/** 500/**
500 * struct dprc_endpoint - Endpoint description for link connect/disconnect 501 * struct dprc_endpoint - Endpoint description for link connect/disconnect
@@ -521,24 +522,33 @@ struct dprc_connection_cfg {
521 u32 max_rate; 522 u32 max_rate;
522}; 523};
523 524
524int dprc_connect(struct fsl_mc_io *mc_io, 525int dprc_connect(struct fsl_mc_io *mc_io,
525 u32 cmd_flags, 526 u32 cmd_flags,
526 u16 token, 527 u16 token,
527 const struct dprc_endpoint *endpoint1, 528 const struct dprc_endpoint *endpoint1,
528 const struct dprc_endpoint *endpoint2, 529 const struct dprc_endpoint *endpoint2,
529 const struct dprc_connection_cfg *cfg); 530 const struct dprc_connection_cfg *cfg);
530 531
531int dprc_disconnect(struct fsl_mc_io *mc_io, 532int dprc_disconnect(struct fsl_mc_io *mc_io,
532 u32 cmd_flags, 533 u32 cmd_flags,
533 u16 token, 534 u16 token,
534 const struct dprc_endpoint *endpoint); 535 const struct dprc_endpoint *endpoint);
535 536
536int dprc_get_connection(struct fsl_mc_io *mc_io, 537int dprc_get_connection(struct fsl_mc_io *mc_io,
537 u32 cmd_flags, 538 u32 cmd_flags,
538 u16 token, 539 u16 token,
539 const struct dprc_endpoint *endpoint1, 540 const struct dprc_endpoint *endpoint1,
540 struct dprc_endpoint *endpoint2, 541 struct dprc_endpoint *endpoint2,
541 int *state); 542 int *state);
543
544int dprc_get_api_version(struct fsl_mc_io *mc_io,
545 u32 cmd_flags,
546 u16 *major_ver,
547 u16 *minor_ver);
548
549int dprc_get_container_id(struct fsl_mc_io *mc_io,
550 u32 cmd_flags,
551 int *container_id);
542 552
543#endif /* _FSL_DPRC_H */ 553#endif /* _FSL_DPRC_H */
544 554
diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
index 170684a57ca2..42700de94d59 100644
--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ b/drivers/staging/fsl-mc/include/mc-bus.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale Management Complex (MC) bus declarations 2 * Freescale Management Complex (MC) bus declarations
3 * 3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc. 4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
@@ -42,8 +42,8 @@ struct msi_domain_info;
42 */ 42 */
43struct fsl_mc_resource_pool { 43struct fsl_mc_resource_pool {
44 enum fsl_mc_pool_type type; 44 enum fsl_mc_pool_type type;
45 int16_t max_count; 45 int max_count;
46 int16_t free_count; 46 int free_count;
47 struct mutex mutex; /* serializes access to free_list */ 47 struct mutex mutex; /* serializes access to free_list */
48 struct list_head free_list; 48 struct list_head free_list;
49 struct fsl_mc_bus *mc_bus; 49 struct fsl_mc_bus *mc_bus;
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 5decb9890c31..2e08aa31b084 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2015 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met: 5 * modification, are permitted provided that the following conditions are met:
@@ -48,6 +49,15 @@ struct mc_command {
48 u64 params[MC_CMD_NUM_OF_PARAMS]; 49 u64 params[MC_CMD_NUM_OF_PARAMS];
49}; 50};
50 51
52struct mc_rsp_create {
53 __le32 object_id;
54};
55
56struct mc_rsp_api_ver {
57 __le16 major_ver;
58 __le16 minor_ver;
59};
60
51enum mc_cmd_status { 61enum mc_cmd_status {
52 MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ 62 MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
53 MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ 63 MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -72,11 +82,6 @@ enum mc_cmd_status {
72/* Command completion flag */ 82/* Command completion flag */
73#define MC_CMD_FLAG_INTR_DIS 0x01 83#define MC_CMD_FLAG_INTR_DIS 0x01
74 84
75#define MC_CMD_HDR_CMDID_MASK 0xFFF0
76#define MC_CMD_HDR_CMDID_SHIFT 4
77#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
78#define MC_CMD_HDR_TOKEN_SHIFT 6
79
80static inline u64 mc_encode_cmd_header(u16 cmd_id, 85static inline u64 mc_encode_cmd_header(u16 cmd_id,
81 u32 cmd_flags, 86 u32 cmd_flags,
82 u16 token) 87 u16 token)
@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
84 u64 header = 0; 89 u64 header = 0;
85 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; 90 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
86 91
87 hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) & 92 hdr->cmd_id = cpu_to_le16(cmd_id);
88 MC_CMD_HDR_CMDID_MASK); 93 hdr->token = cpu_to_le16(token);
89 hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
90 MC_CMD_HDR_TOKEN_MASK);
91 hdr->status = MC_CMD_STATUS_READY; 94 hdr->status = MC_CMD_STATUS_READY;
92 if (cmd_flags & MC_CMD_FLAG_PRI) 95 if (cmd_flags & MC_CMD_FLAG_PRI)
93 hdr->flags_hw = MC_CMD_FLAG_PRI; 96 hdr->flags_hw = MC_CMD_FLAG_PRI;
@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
102 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; 105 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
103 u16 token = le16_to_cpu(hdr->token); 106 u16 token = le16_to_cpu(hdr->token);
104 107
105 return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT; 108 return token;
109}
110
111static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
112{
113 struct mc_rsp_create *rsp_params;
114
115 rsp_params = (struct mc_rsp_create *)cmd->params;
116 return le32_to_cpu(rsp_params->object_id);
117}
118
119static inline void mc_cmd_read_api_version(struct mc_command *cmd,
120 u16 *major_ver,
121 u16 *minor_ver)
122{
123 struct mc_rsp_api_ver *rsp_params;
124
125 rsp_params = (struct mc_rsp_api_ver *)cmd->params;
126 *major_ver = le16_to_cpu(rsp_params->major_ver);
127 *minor_ver = le16_to_cpu(rsp_params->minor_ver);
106} 128}
107 129
108#endif /* __FSL_MC_CMD_H */ 130#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
index 89ad0cf54702..dca7f9084e05 100644
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ b/drivers/staging/fsl-mc/include/mc-sys.h
@@ -1,4 +1,5 @@
1/* Copyright 2013-2014 Freescale Semiconductor Inc. 1/*
2 * Copyright 2013-2016 Freescale Semiconductor Inc.
2 * 3 *
3 * Interface of the I/O services to send MC commands to the MC hardware 4 * Interface of the I/O services to send MC commands to the MC hardware
4 * 5 *
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index f6e720e84460..1c46c0c2a895 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale Management Complex (MC) bus public interface 2 * Freescale Management Complex (MC) bus public interface
3 * 3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc. 4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5 * Author: German Rivera <German.Rivera@freescale.com> 5 * Author: German Rivera <German.Rivera@freescale.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
81 */ 81 */
82struct fsl_mc_resource { 82struct fsl_mc_resource {
83 enum fsl_mc_pool_type type; 83 enum fsl_mc_pool_type type;
84 int32_t id; 84 s32 id;
85 void *data; 85 void *data;
86 struct fsl_mc_resource_pool *parent_pool; 86 struct fsl_mc_resource_pool *parent_pool;
87 struct list_head node; 87 struct list_head node;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 49c718b91e55..41a49c8194e5 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1667,12 +1667,6 @@ static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
1667 pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); 1667 pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
1668} 1668}
1669 1669
1670static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
1671{
1672 pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
1673 pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
1674}
1675
1676static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt) 1670static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
1677{ 1671{
1678 pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK); 1672 pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 88414e5a70cc..7ddeabc0e50a 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -47,15 +47,15 @@ struct phy_dev {
47 void *priv_dev; 47 void *priv_dev;
48 struct net_device *dev[MAX_NIC_TYPE]; 48 struct net_device *dev[MAX_NIC_TYPE];
49 int (*send_hci_func)(void *priv_dev, void *data, int len, 49 int (*send_hci_func)(void *priv_dev, void *data, int len,
50 void (*cb)(void *cb_data), void *cb_data); 50 void (*cb)(void *cb_data), void *cb_data);
51 int (*send_sdu_func)(void *priv_dev, void *data, int len, 51 int (*send_sdu_func)(void *priv_dev, void *data, int len,
52 unsigned int dftEpsId, unsigned int epsId, 52 unsigned int dftEpsId, unsigned int epsId,
53 void (*cb)(void *cb_data), void *cb_data, 53 void (*cb)(void *cb_data), void *cb_data,
54 int dev_idx, int nic_type); 54 int dev_idx, int nic_type);
55 int (*rcv_func)(void *priv_dev, 55 int (*rcv_func)(void *priv_dev,
56 int (*cb)(void *cb_data, void *data, int len, 56 int (*cb)(void *cb_data, void *data, int len,
57 int context), 57 int context),
58 void *cb_data, int context); 58 void *cb_data, int context);
59 struct gdm_endian * (*get_endian)(void *priv_dev); 59 struct gdm_endian * (*get_endian)(void *priv_dev);
60}; 60};
61 61
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
index 297438b4ddcb..195c5902989f 100644
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -17,7 +17,6 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/tty.h> 18#include <linux/tty.h>
19 19
20
21#define TTY_MAX_COUNT 2 20#define TTY_MAX_COUNT 2
22 21
23#define MAX_ISSUE_NUM 3 22#define MAX_ISSUE_NUM 3
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index 7cf979b3f826..5ebd73157f5a 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -18,7 +18,8 @@
18#include <net/sock.h> 18#include <net/sock.h>
19 19
20struct sock *netlink_init(int unit, 20struct sock *netlink_init(int unit,
21 void (*cb)(struct net_device *dev, u16 type, void *msg, int len)); 21 void (*cb)(struct net_device *dev,
22 u16 type, void *msg, int len));
22int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len); 23int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
23 24
24#endif /* _NETLINK_K_H_ */ 25#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 70323aa11f24..3fda0cd6bb42 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -183,7 +183,7 @@ static int standby_boot_seq(struct platform_device *pdev)
183 * Pasted from WDM spec, 183 * Pasted from WDM spec,
184 * - A falling edge on POWEROFF_L is detected (a) 184 * - A falling edge on POWEROFF_L is detected (a)
185 * - WDM enters standby mode, but no output signals are changed 185 * - WDM enters standby mode, but no output signals are changed
186 * */ 186 */
187 187
188 /* TODO: POWEROFF_L is input to WDM module */ 188 /* TODO: POWEROFF_L is input to WDM module */
189 apb->state = ARCHE_PLATFORM_STATE_STANDBY; 189 apb->state = ARCHE_PLATFORM_STATE_STANDBY;
@@ -285,8 +285,10 @@ static ssize_t state_store(struct device *dev,
285 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING) 285 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
286 return count; 286 return count;
287 287
288 /* First we want to make sure we power off everything 288 /*
289 * and then enter FW flashing state */ 289 * First we want to make sure we power off everything
290 * and then enter FW flashing state
291 */
290 poweroff_seq(pdev); 292 poweroff_seq(pdev);
291 ret = fw_flashing_seq(pdev); 293 ret = fw_flashing_seq(pdev);
292 } else { 294 } else {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index d33d6fe078ad..338c2d3ee842 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -457,7 +457,8 @@ retry:
457 goto exit; 457 goto exit;
458 458
459 /* First we want to make sure we power off everything 459 /* First we want to make sure we power off everything
460 * and then activate back again */ 460 * and then activate back again
461 */
461 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff); 462 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
462 arche_platform_poweroff_seq(arche_pdata); 463 arche_platform_poweroff_seq(arche_pdata);
463 464
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 8a0744b58a32..f8862c6d7102 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -405,7 +405,6 @@ static void gbcodec_shutdown(struct snd_pcm_substream *substream,
405 params->state = GBAUDIO_CODEC_SHUTDOWN; 405 params->state = GBAUDIO_CODEC_SHUTDOWN;
406 mutex_unlock(&codec->lock); 406 mutex_unlock(&codec->lock);
407 pm_relax(dai->dev); 407 pm_relax(dai->dev);
408 return;
409} 408}
410 409
411static int gbcodec_hw_params(struct snd_pcm_substream *substream, 410static int gbcodec_hw_params(struct snd_pcm_substream *substream,
@@ -655,8 +654,10 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
655 ret = gb_audio_apbridgea_shutdown_rx(data->connection, 654 ret = gb_audio_apbridgea_shutdown_rx(data->connection,
656 0); 655 0);
657 params->state = GBAUDIO_CODEC_STOP; 656 params->state = GBAUDIO_CODEC_STOP;
658 } else 657 } else {
659 ret = -EINVAL; 658 ret = -EINVAL;
659 }
660
660 if (ret) 661 if (ret)
661 dev_err_ratelimited(dai->dev, 662 dev_err_ratelimited(dai->dev,
662 "%s:Error during %s %s stream:%d\n", 663 "%s:Error during %s %s stream:%d\n",
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index ca027bd99ad7..62fd93939a1f 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -158,7 +158,6 @@ struct gbaudio_module_info {
158 int dev_id; /* check if it should be bundle_id/hd_cport_id */ 158 int dev_id; /* check if it should be bundle_id/hd_cport_id */
159 int vid; 159 int vid;
160 int pid; 160 int pid;
161 int slot;
162 int type; 161 int type;
163 int set_uevent; 162 int set_uevent;
164 char vstr[NAME_SIZE]; 163 char vstr[NAME_SIZE];
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index c4ca09754a6a..5ab8f5e0ed3f 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -18,10 +18,9 @@
18 18
19struct gb_audio_manager_module_descriptor { 19struct gb_audio_manager_module_descriptor {
20 char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN]; 20 char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
21 int slot;
22 int vid; 21 int vid;
23 int pid; 22 int pid;
24 int cport; 23 int intf_id;
25 unsigned int ip_devices; 24 unsigned int ip_devices;
26 unsigned int op_devices; 25 unsigned int op_devices;
27}; 26};
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index a10e96ad79c1..adc16977452d 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -81,16 +81,6 @@ static ssize_t gb_audio_module_name_show(
81static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute = 81static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
82 __ATTR(name, 0664, gb_audio_module_name_show, NULL); 82 __ATTR(name, 0664, gb_audio_module_name_show, NULL);
83 83
84static ssize_t gb_audio_module_slot_show(
85 struct gb_audio_manager_module *module,
86 struct gb_audio_manager_module_attribute *attr, char *buf)
87{
88 return sprintf(buf, "%d", module->desc.slot);
89}
90
91static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
92 __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
93
94static ssize_t gb_audio_module_vid_show( 84static ssize_t gb_audio_module_vid_show(
95 struct gb_audio_manager_module *module, 85 struct gb_audio_manager_module *module,
96 struct gb_audio_manager_module_attribute *attr, char *buf) 86 struct gb_audio_manager_module_attribute *attr, char *buf)
@@ -111,16 +101,16 @@ static ssize_t gb_audio_module_pid_show(
111static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute = 101static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
112 __ATTR(pid, 0664, gb_audio_module_pid_show, NULL); 102 __ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
113 103
114static ssize_t gb_audio_module_cport_show( 104static ssize_t gb_audio_module_intf_id_show(
115 struct gb_audio_manager_module *module, 105 struct gb_audio_manager_module *module,
116 struct gb_audio_manager_module_attribute *attr, char *buf) 106 struct gb_audio_manager_module_attribute *attr, char *buf)
117{ 107{
118 return sprintf(buf, "%d", module->desc.cport); 108 return sprintf(buf, "%d", module->desc.intf_id);
119} 109}
120 110
121static struct gb_audio_manager_module_attribute 111static struct gb_audio_manager_module_attribute
122 gb_audio_module_cport_attribute = 112 gb_audio_module_intf_id_attribute =
123 __ATTR(cport, 0664, gb_audio_module_cport_show, NULL); 113 __ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL);
124 114
125static ssize_t gb_audio_module_ip_devices_show( 115static ssize_t gb_audio_module_ip_devices_show(
126 struct gb_audio_manager_module *module, 116 struct gb_audio_manager_module *module,
@@ -146,10 +136,9 @@ static struct gb_audio_manager_module_attribute
146 136
147static struct attribute *gb_audio_module_default_attrs[] = { 137static struct attribute *gb_audio_module_default_attrs[] = {
148 &gb_audio_module_name_attribute.attr, 138 &gb_audio_module_name_attribute.attr,
149 &gb_audio_module_slot_attribute.attr,
150 &gb_audio_module_vid_attribute.attr, 139 &gb_audio_module_vid_attribute.attr,
151 &gb_audio_module_pid_attribute.attr, 140 &gb_audio_module_pid_attribute.attr,
152 &gb_audio_module_cport_attribute.attr, 141 &gb_audio_module_intf_id_attribute.attr,
153 &gb_audio_module_ip_devices_attribute.attr, 142 &gb_audio_module_ip_devices_attribute.attr,
154 &gb_audio_module_op_devices_attribute.attr, 143 &gb_audio_module_op_devices_attribute.attr,
155 NULL, /* need to NULL terminate the list of attributes */ 144 NULL, /* need to NULL terminate the list of attributes */
@@ -164,29 +153,26 @@ static struct kobj_type gb_audio_module_type = {
164static void send_add_uevent(struct gb_audio_manager_module *module) 153static void send_add_uevent(struct gb_audio_manager_module *module)
165{ 154{
166 char name_string[128]; 155 char name_string[128];
167 char slot_string[64];
168 char vid_string[64]; 156 char vid_string[64];
169 char pid_string[64]; 157 char pid_string[64];
170 char cport_string[64]; 158 char intf_id_string[64];
171 char ip_devices_string[64]; 159 char ip_devices_string[64];
172 char op_devices_string[64]; 160 char op_devices_string[64];
173 161
174 char *envp[] = { 162 char *envp[] = {
175 name_string, 163 name_string,
176 slot_string,
177 vid_string, 164 vid_string,
178 pid_string, 165 pid_string,
179 cport_string, 166 intf_id_string,
180 ip_devices_string, 167 ip_devices_string,
181 op_devices_string, 168 op_devices_string,
182 NULL 169 NULL
183 }; 170 };
184 171
185 snprintf(name_string, 128, "NAME=%s", module->desc.name); 172 snprintf(name_string, 128, "NAME=%s", module->desc.name);
186 snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
187 snprintf(vid_string, 64, "VID=%d", module->desc.vid); 173 snprintf(vid_string, 64, "VID=%d", module->desc.vid);
188 snprintf(pid_string, 64, "PID=%d", module->desc.pid); 174 snprintf(pid_string, 64, "PID=%d", module->desc.pid);
189 snprintf(cport_string, 64, "CPORT=%d", module->desc.cport); 175 snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id);
190 snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X", 176 snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
191 module->desc.ip_devices); 177 module->desc.ip_devices);
192 snprintf(op_devices_string, 64, "O/P DEVICES=0x%X", 178 snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
@@ -246,13 +232,12 @@ int gb_audio_manager_module_create(
246 232
247void gb_audio_manager_module_dump(struct gb_audio_manager_module *module) 233void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
248{ 234{
249 pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n", 235 pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n",
250 module->id, 236 module->id,
251 module->desc.name, 237 module->desc.name,
252 module->desc.slot,
253 module->desc.vid, 238 module->desc.vid,
254 module->desc.pid, 239 module->desc.pid,
255 module->desc.cport, 240 module->desc.intf_id,
256 module->desc.ip_devices, 241 module->desc.ip_devices,
257 module->desc.op_devices); 242 module->desc.op_devices);
258} 243}
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index d8bf8591ff9e..34ebd147052f 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -20,10 +20,9 @@ static ssize_t manager_sysfs_add_store(
20 20
21 int num = sscanf(buf, 21 int num = sscanf(buf,
22 "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s " 22 "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
23 "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X" 23 "vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
24 "o/p devices=0x%X", 24 desc.name, &desc.vid, &desc.pid, &desc.intf_id,
25 desc.name, &desc.slot, &desc.vid, &desc.pid, 25 &desc.ip_devices, &desc.op_devices);
26 &desc.cport, &desc.ip_devices, &desc.op_devices);
27 26
28 if (num != 7) 27 if (num != 7)
29 return -EINVAL; 28 return -EINVAL;
@@ -44,7 +43,7 @@ static ssize_t manager_sysfs_remove_store(
44{ 43{
45 int id; 44 int id;
46 45
47 int num = sscanf(buf, "%d", &id); 46 int num = kstrtoint(buf, 10, &id);
48 47
49 if (num != 1) 48 if (num != 1)
50 return -EINVAL; 49 return -EINVAL;
@@ -65,16 +64,17 @@ static ssize_t manager_sysfs_dump_store(
65{ 64{
66 int id; 65 int id;
67 66
68 int num = sscanf(buf, "%d", &id); 67 int num = kstrtoint(buf, 10, &id);
69 68
70 if (num == 1) { 69 if (num == 1) {
71 num = gb_audio_manager_dump_module(id); 70 num = gb_audio_manager_dump_module(id);
72 if (num) 71 if (num)
73 return num; 72 return num;
74 } else if (!strncmp("all", buf, 3)) 73 } else if (!strncmp("all", buf, 3)) {
75 gb_audio_manager_dump_all(); 74 gb_audio_manager_dump_all();
76 else 75 } else {
77 return -EINVAL; 76 return -EINVAL;
77 }
78 78
79 return count; 79 return count;
80} 80}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index ae1c0fa85752..17a9948b1ba1 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -207,10 +207,8 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
207 struct gbaudio_data_connection *dai; 207 struct gbaudio_data_connection *dai;
208 208
209 dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL); 209 dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
210 if (!dai) { 210 if (!dai)
211 dev_err(gbmodule->dev, "DAI Malloc failure\n");
212 return -ENOMEM; 211 return -ENOMEM;
213 }
214 212
215 connection = gb_connection_create_offloaded(bundle, 213 connection = gb_connection_create_offloaded(bundle,
216 le16_to_cpu(cport_desc->id), 214 le16_to_cpu(cport_desc->id),
@@ -345,10 +343,9 @@ static int gb_audio_probe(struct gb_bundle *bundle,
345 dev_dbg(dev, "Inform set_event:%d to above layer\n", 1); 343 dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
346 /* prepare for the audio manager */ 344 /* prepare for the audio manager */
347 strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN); 345 strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
348 desc.slot = 1; /* todo */
349 desc.vid = 2; /* todo */ 346 desc.vid = 2; /* todo */
350 desc.pid = 3; /* todo */ 347 desc.pid = 3; /* todo */
351 desc.cport = gbmodule->dev_id; 348 desc.intf_id = gbmodule->dev_id;
352 desc.op_devices = gbmodule->op_devices; 349 desc.op_devices = gbmodule->op_devices;
353 desc.ip_devices = gbmodule->ip_devices; 350 desc.ip_devices = gbmodule->ip_devices;
354 gbmodule->manager_id = gb_audio_manager_add(&desc); 351 gbmodule->manager_id = gb_audio_manager_add(&desc);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index b6251691a33d..8b216ca99cf9 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -114,6 +114,7 @@ static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
114 const char *name) 114 const char *name)
115{ 115{
116 struct gbaudio_widget *widget; 116 struct gbaudio_widget *widget;
117
117 list_for_each_entry(widget, &module->widget_list, list) { 118 list_for_each_entry(widget, &module->widget_list, list) {
118 if (!strncmp(widget->name, name, NAME_SIZE)) 119 if (!strncmp(widget->name, name, NAME_SIZE))
119 return widget->id; 120 return widget->id;
@@ -1044,8 +1045,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
1044 control->texts = (const char * const *) 1045 control->texts = (const char * const *)
1045 gb_generate_enum_strings(module, gbenum); 1046 gb_generate_enum_strings(module, gbenum);
1046 control->items = gbenum->items; 1047 control->items = gbenum->items;
1047 } else 1048 } else {
1048 csize = sizeof(struct gb_audio_control); 1049 csize = sizeof(struct gb_audio_control);
1050 }
1051
1049 *w_size += csize; 1052 *w_size += csize;
1050 curr = (void *)curr + csize; 1053 curr = (void *)curr + csize;
1051 list_add(&control->list, &module->widget_ctl_list); 1054 list_add(&control->list, &module->widget_ctl_list);
@@ -1190,8 +1193,9 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
1190 control->texts = (const char * const *) 1193 control->texts = (const char * const *)
1191 gb_generate_enum_strings(module, gbenum); 1194 gb_generate_enum_strings(module, gbenum);
1192 control->items = gbenum->items; 1195 control->items = gbenum->items;
1193 } else 1196 } else {
1194 csize = sizeof(struct gb_audio_control); 1197 csize = sizeof(struct gb_audio_control);
1198 }
1195 1199
1196 list_add(&control->list, &module->ctl_list); 1200 list_add(&control->list, &module->ctl_list);
1197 dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id, 1201 dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 491bdd720c0c..1c5b41ae6774 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -289,6 +289,7 @@ static const int gb_camera_configure_streams_validate_response(
289 289
290 for (i = 0; i < resp->num_streams; i++) { 290 for (i = 0; i < resp->num_streams; i++) {
291 struct gb_camera_stream_config_response *cfg = &resp->config[i]; 291 struct gb_camera_stream_config_response *cfg = &resp->config[i];
292
292 if (cfg->padding) { 293 if (cfg->padding) {
293 gcam_err(gcam, "stream #%u padding != 0\n", i); 294 gcam_err(gcam, "stream #%u padding != 0\n", i);
294 return -EIO; 295 return -EIO;
@@ -796,7 +797,7 @@ static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
796 if (gb_nstreams > GB_CAMERA_MAX_STREAMS) 797 if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
797 return -EINVAL; 798 return -EINVAL;
798 799
799 gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL); 800 gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL);
800 if (!gb_streams) 801 if (!gb_streams)
801 return -ENOMEM; 802 return -ENOMEM;
802 803
@@ -937,7 +938,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
937 return ret; 938 return ret;
938 939
939 /* For each stream to configure parse width, height and format */ 940 /* For each stream to configure parse width, height and format */
940 streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL); 941 streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL);
941 if (!streams) 942 if (!streams)
942 return -ENOMEM; 943 return -ENOMEM;
943 944
@@ -1118,7 +1119,7 @@ static ssize_t gb_camera_debugfs_write(struct file *file,
1118 char *kbuf; 1119 char *kbuf;
1119 1120
1120 if (len > 1024) 1121 if (len > 1024)
1121 return -EINVAL; 1122 return -EINVAL;
1122 1123
1123 kbuf = kmalloc(len + 1, GFP_KERNEL); 1124 kbuf = kmalloc(len + 1, GFP_KERNEL);
1124 if (!kbuf) 1125 if (!kbuf)
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index baab460eeaa3..f1d256df06d5 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -175,10 +175,9 @@ static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
175 u8 *data; 175 u8 *data;
176 int retval; 176 int retval;
177 177
178 data = kmalloc(size, GFP_KERNEL); 178 data = kmemdup(req, size, GFP_KERNEL);
179 if (!data) 179 if (!data)
180 return -ENOMEM; 180 return -ENOMEM;
181 memcpy(data, req, size);
182 181
183 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 182 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
184 cmd, 183 cmd,
@@ -1034,7 +1033,7 @@ static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
1034 goto err_free_req; 1033 goto err_free_req;
1035 1034
1036 rpc->req->type = type; 1035 rpc->req->type = type;
1037 rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size); 1036 rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
1038 memcpy(rpc->req->data, payload, size); 1037 memcpy(rpc->req->data, payload, size);
1039 1038
1040 init_completion(&rpc->response_received); 1039 init_completion(&rpc->response_received);
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 70dd9e5a1cf2..1a18ab1ff8aa 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -55,8 +55,10 @@ static int gb_log_request_handler(struct gb_operation *op)
55 /* Ensure the buffer is 0 terminated */ 55 /* Ensure the buffer is 0 terminated */
56 receive->msg[len - 1] = '\0'; 56 receive->msg[len - 1] = '\0';
57 57
58 /* Print with dev_dbg() so that it can be easily turned off using 58 /*
59 * dynamic debugging (and prevent any DoS) */ 59 * Print with dev_dbg() so that it can be easily turned off using
60 * dynamic debugging (and prevent any DoS)
61 */
60 dev_dbg(dev, "%s", receive->msg); 62 dev_dbg(dev, "%s", receive->msg);
61 63
62 return 0; 64 return 0;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 5649ef1e379d..66b37ea29ef0 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -191,9 +191,8 @@ static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
191 state_changed = 1; 191 state_changed = 1;
192 } 192 }
193 193
194 if (event & GB_SDIO_WP) { 194 if (event & GB_SDIO_WP)
195 host->read_only = true; 195 host->read_only = true;
196 }
197 196
198 if (state_changed) { 197 if (state_changed) {
199 dev_info(mmc_dev(host->mmc), "card %s now event\n", 198 dev_info(mmc_dev(host->mmc), "card %s now event\n",
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
index 2e68af7dea6d..e586627f4bbc 100644
--- a/drivers/staging/greybus/timesync.c
+++ b/drivers/staging/greybus/timesync.c
@@ -807,11 +807,11 @@ static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
807 return -EINVAL; 807 return -EINVAL;
808 808
809 mutex_lock(&timesync_svc->mutex); 809 mutex_lock(&timesync_svc->mutex);
810 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) { 810 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID)
811 gb_timesync_set_state_atomic(timesync_svc, state); 811 gb_timesync_set_state_atomic(timesync_svc, state);
812 } else { 812 else
813 ret = -ENODEV; 813 ret = -ENODEV;
814 } 814
815 mutex_unlock(&timesync_svc->mutex); 815 mutex_unlock(&timesync_svc->mutex);
816 return ret; 816 return ret;
817} 817}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 2633d2bfb1b4..6d39f4a04754 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -623,9 +623,6 @@ static int get_serial_info(struct gb_tty *gb_tty,
623{ 623{
624 struct serial_struct tmp; 624 struct serial_struct tmp;
625 625
626 if (!info)
627 return -EINVAL;
628
629 memset(&tmp, 0, sizeof(tmp)); 626 memset(&tmp, 0, sizeof(tmp));
630 tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST; 627 tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
631 tmp.type = PORT_16550A; 628 tmp.type = PORT_16550A;
@@ -711,25 +708,20 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
711 return retval; 708 return retval;
712} 709}
713 710
714static int get_serial_usage(struct gb_tty *gb_tty, 711static int gb_tty_get_icount(struct tty_struct *tty,
715 struct serial_icounter_struct __user *count) 712 struct serial_icounter_struct *icount)
716{ 713{
717 struct serial_icounter_struct icount; 714 struct gb_tty *gb_tty = tty->driver_data;
718 int retval = 0;
719
720 memset(&icount, 0, sizeof(icount));
721 icount.dsr = gb_tty->iocount.dsr;
722 icount.rng = gb_tty->iocount.rng;
723 icount.dcd = gb_tty->iocount.dcd;
724 icount.frame = gb_tty->iocount.frame;
725 icount.overrun = gb_tty->iocount.overrun;
726 icount.parity = gb_tty->iocount.parity;
727 icount.brk = gb_tty->iocount.brk;
728 715
729 if (copy_to_user(count, &icount, sizeof(icount)) > 0) 716 icount->dsr = gb_tty->iocount.dsr;
730 retval = -EFAULT; 717 icount->rng = gb_tty->iocount.rng;
718 icount->dcd = gb_tty->iocount.dcd;
719 icount->frame = gb_tty->iocount.frame;
720 icount->overrun = gb_tty->iocount.overrun;
721 icount->parity = gb_tty->iocount.parity;
722 icount->brk = gb_tty->iocount.brk;
731 723
732 return retval; 724 return 0;
733} 725}
734 726
735static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd, 727static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
@@ -746,9 +738,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
746 (struct serial_struct __user *)arg); 738 (struct serial_struct __user *)arg);
747 case TIOCMIWAIT: 739 case TIOCMIWAIT:
748 return wait_serial_change(gb_tty, arg); 740 return wait_serial_change(gb_tty, arg);
749 case TIOCGICOUNT:
750 return get_serial_usage(gb_tty,
751 (struct serial_icounter_struct __user *)arg);
752 } 741 }
753 742
754 return -ENOIOCTLCMD; 743 return -ENOIOCTLCMD;
@@ -830,9 +819,10 @@ static const struct tty_operations gb_ops = {
830 .set_termios = gb_tty_set_termios, 819 .set_termios = gb_tty_set_termios,
831 .tiocmget = gb_tty_tiocmget, 820 .tiocmget = gb_tty_tiocmget,
832 .tiocmset = gb_tty_tiocmset, 821 .tiocmset = gb_tty_tiocmset,
822 .get_icount = gb_tty_get_icount,
833}; 823};
834 824
835static struct tty_port_operations gb_port_ops = { 825static const struct tty_port_operations gb_port_ops = {
836 .dtr_rts = gb_tty_dtr_rts, 826 .dtr_rts = gb_tty_dtr_rts,
837 .activate = gb_tty_port_activate, 827 .activate = gb_tty_port_activate,
838 .shutdown = gb_tty_port_shutdown, 828 .shutdown = gb_tty_port_shutdown,
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 8ed4d395be58..19b550fff04b 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -38,7 +38,7 @@ static u8 bits_magic[] = {
38static struct platform_device *firmware_pdev; 38static struct platform_device *firmware_pdev;
39 39
40static char *file = "xlinx_fpga_firmware.bit"; 40static char *file = "xlinx_fpga_firmware.bit";
41module_param(file, charp, S_IRUGO); 41module_param(file, charp, 0444);
42MODULE_PARM_DESC(file, "Xilinx FPGA firmware file."); 42MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
43 43
44static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize) 44static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index ad7a0391369f..76ff5de65781 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -259,6 +259,7 @@ act2000_isa_receive(act2000_card *card)
259 "act2000_isa_receive: Invalid CAPI msg\n"); 259 "act2000_isa_receive: Invalid CAPI msg\n");
260 { 260 {
261 int i; __u8 *p; __u8 *t; __u8 tmp[30]; 261 int i; __u8 *p; __u8 *t; __u8 tmp[30];
262
262 for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++) 263 for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++)
263 t += sprintf(t, "%02x ", *(p++)); 264 t += sprintf(t, "%02x ", *(p++));
264 printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp); 265 printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp);
diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 62f56294853c..61386a78fb91 100644
--- a/drivers/staging/i4l/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
@@ -99,7 +99,7 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr)
99 for (i = 0; i < num_valid_imsg; i++) 99 for (i = 0; i < num_valid_imsg; i++)
100 if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) && 100 if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) &&
101 (hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) { 101 (hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) {
102 return (i ? 1 : 2); 102 return i ? 1 : 2;
103 } 103 }
104 return 0; 104 return 0;
105} 105}
@@ -506,6 +506,7 @@ static int
506new_plci(act2000_card *card, __u16 plci) 506new_plci(act2000_card *card, __u16 plci)
507{ 507{
508 int i; 508 int i;
509
509 for (i = 0; i < ACT2000_BCH; i++) 510 for (i = 0; i < ACT2000_BCH; i++)
510 if (card->bch[i].plci == 0x8000) { 511 if (card->bch[i].plci == 0x8000) {
511 card->bch[i].plci = plci; 512 card->bch[i].plci = plci;
@@ -518,6 +519,7 @@ static int
518find_plci(act2000_card *card, __u16 plci) 519find_plci(act2000_card *card, __u16 plci)
519{ 520{
520 int i; 521 int i;
522
521 for (i = 0; i < ACT2000_BCH; i++) 523 for (i = 0; i < ACT2000_BCH; i++)
522 if (card->bch[i].plci == plci) 524 if (card->bch[i].plci == plci)
523 return i; 525 return i;
@@ -528,6 +530,7 @@ static int
528find_ncci(act2000_card *card, __u16 ncci) 530find_ncci(act2000_card *card, __u16 ncci)
529{ 531{
530 int i; 532 int i;
533
531 for (i = 0; i < ACT2000_BCH; i++) 534 for (i = 0; i < ACT2000_BCH; i++)
532 if (card->bch[i].ncci == ncci) 535 if (card->bch[i].ncci == ncci)
533 return i; 536 return i;
@@ -538,6 +541,7 @@ static int
538find_dialing(act2000_card *card, __u16 callref) 541find_dialing(act2000_card *card, __u16 callref)
539{ 542{
540 int i; 543 int i;
544
541 for (i = 0; i < ACT2000_BCH; i++) 545 for (i = 0; i < ACT2000_BCH; i++)
542 if ((card->bch[i].callref == callref) && 546 if ((card->bch[i].callref == callref) &&
543 (card->bch[i].fsm_state == ACT2000_STATE_OCALL)) 547 (card->bch[i].fsm_state == ACT2000_STATE_OCALL))
@@ -1088,6 +1092,7 @@ actcapi_debug_msg(struct sk_buff *skb, int direction)
1088 int l = msg->hdr.len - 12; 1092 int l = msg->hdr.len - 12;
1089 int j; 1093 int j;
1090 char *p = tmp; 1094 char *p = tmp;
1095
1091 for (j = 0; j < l; j++) 1096 for (j = 0; j < l; j++)
1092 p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]); 1097 p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]);
1093 printk(KERN_DEBUG " D = '%s'\n", tmp); 1098 printk(KERN_DEBUG " D = '%s'\n", tmp);
diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 99c9c0a1c63e..6aa120319e52 100644
--- a/drivers/staging/i4l/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
@@ -19,8 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/init.h> 20#include <linux/init.h>
21 21
22static unsigned short act2000_isa_ports[] = 22static unsigned short act2000_isa_ports[] = {
23{
24 0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380, 23 0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
25 0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60, 24 0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
26}; 25};
@@ -95,7 +94,7 @@ act2000_find_msn(act2000_card *card, char *msn, int ia5)
95 p = p->next; 94 p = p->next;
96 } 95 }
97 if (!ia5) 96 if (!ia5)
98 return (1 << (eaz - '0')); 97 return 1 << (eaz - '0');
99 else 98 else
100 return eaz; 99 return eaz;
101} 100}
@@ -111,10 +110,10 @@ act2000_find_eaz(act2000_card *card, char eaz)
111 110
112 while (p) { 111 while (p) {
113 if (p->eaz == eaz) 112 if (p->eaz == eaz)
114 return (p->msn); 113 return p->msn;
115 p = p->next; 114 p = p->next;
116 } 115 }
117 return ("\0"); 116 return "\0";
118} 117}
119 118
120/* 119/*
@@ -293,7 +292,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
293 if (ret) 292 if (ret)
294 return ret; 293 return ret;
295 if (card->flags & ACT2000_FLAGS_RUNNING) 294 if (card->flags & ACT2000_FLAGS_RUNNING)
296 return (actcapi_manufacturer_req_msn(card)); 295 return actcapi_manufacturer_req_msn(card);
297 return 0; 296 return 0;
298 case ACT2000_IOCTL_ADDCARD: 297 case ACT2000_IOCTL_ADDCARD:
299 if (copy_from_user(&cdef, arg, 298 if (copy_from_user(&cdef, arg,
@@ -377,6 +376,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
377 } 376 }
378 if (card->ptype == ISDN_PTYPE_1TR6) { 377 if (card->ptype == ISDN_PTYPE_1TR6) {
379 int i; 378 int i;
379
380 chan->eazmask = 0; 380 chan->eazmask = 0;
381 for (i = 0; i < strlen(c->parm.num); i++) 381 for (i = 0; i < strlen(c->parm.num); i++)
382 if (isdigit(c->parm.num[i])) 382 if (isdigit(c->parm.num[i]))
@@ -512,7 +512,7 @@ if_command(isdn_ctrl *c)
512 act2000_card *card = act2000_findcard(c->driver); 512 act2000_card *card = act2000_findcard(c->driver);
513 513
514 if (card) 514 if (card)
515 return (act2000_command(card, c)); 515 return act2000_command(card, c);
516 printk(KERN_ERR 516 printk(KERN_ERR
517 "act2000: if_command %d called with invalid driverId %d!\n", 517 "act2000: if_command %d called with invalid driverId %d!\n",
518 c->command, c->driver); 518 c->command, c->driver);
@@ -527,7 +527,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
527 if (card) { 527 if (card) {
528 if (!(card->flags & ACT2000_FLAGS_RUNNING)) 528 if (!(card->flags & ACT2000_FLAGS_RUNNING))
529 return -ENODEV; 529 return -ENODEV;
530 return (len); 530 return len;
531 } 531 }
532 printk(KERN_ERR 532 printk(KERN_ERR
533 "act2000: if_writecmd called with invalid driverId!\n"); 533 "act2000: if_writecmd called with invalid driverId!\n");
@@ -542,7 +542,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
542 if (card) { 542 if (card) {
543 if (!(card->flags & ACT2000_FLAGS_RUNNING)) 543 if (!(card->flags & ACT2000_FLAGS_RUNNING))
544 return -ENODEV; 544 return -ENODEV;
545 return (act2000_readstatus(buf, len, card)); 545 return act2000_readstatus(buf, len, card);
546 } 546 }
547 printk(KERN_ERR 547 printk(KERN_ERR
548 "act2000: if_readstatus called with invalid driverId!\n"); 548 "act2000: if_readstatus called with invalid driverId!\n");
@@ -557,7 +557,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
557 if (card) { 557 if (card) {
558 if (!(card->flags & ACT2000_FLAGS_RUNNING)) 558 if (!(card->flags & ACT2000_FLAGS_RUNNING))
559 return -ENODEV; 559 return -ENODEV;
560 return (act2000_sendbuf(card, channel, ack, skb)); 560 return act2000_sendbuf(card, channel, ack, skb);
561 } 561 }
562 printk(KERN_ERR 562 printk(KERN_ERR
563 "act2000: if_sendbuf called with invalid driverId!\n"); 563 "act2000: if_sendbuf called with invalid driverId!\n");
@@ -574,6 +574,7 @@ act2000_alloccard(int bus, int port, int irq, char *id)
574{ 574{
575 int i; 575 int i;
576 act2000_card *card; 576 act2000_card *card;
577
577 if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) { 578 if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
578 printk(KERN_WARNING 579 printk(KERN_WARNING
579 "act2000: (%s) Could not allocate card-struct.\n", id); 580 "act2000: (%s) Could not allocate card-struct.\n", id);
@@ -776,7 +777,7 @@ act2000_addcard(int bus, int port, int irq, char *id)
776 failed++; 777 failed++;
777 } 778 }
778 } 779 }
779 return (added - failed); 780 return added - failed;
780} 781}
781 782
782#define DRIVERNAME "IBM Active 2000 ISDN driver" 783#define DRIVERNAME "IBM Active 2000 ISDN driver"
@@ -795,6 +796,7 @@ static void __exit act2000_exit(void)
795{ 796{
796 act2000_card *card = cards; 797 act2000_card *card = cards;
797 act2000_card *last; 798 act2000_card *last;
799
798 while (card) { 800 while (card) {
799 unregister_card(card); 801 unregister_card(card);
800 del_timer_sync(&card->ptimer); 802 del_timer_sync(&card->ptimer);
diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 514bfc2c5b53..3750ba38adc5 100644
--- a/drivers/staging/i4l/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -411,8 +411,7 @@ typedef struct icn_stat {
411 int action; 411 int action;
412} icn_stat; 412} icn_stat;
413/* *INDENT-OFF* */ 413/* *INDENT-OFF* */
414static icn_stat icn_stat_table[] = 414static icn_stat icn_stat_table[] = {
415{
416 {"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */ 415 {"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
417 {"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */ 416 {"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
418 /* 417 /*
diff --git a/drivers/staging/i4l/icn/icn.h b/drivers/staging/i4l/icn/icn.h
index f8f2e76d34bf..07e2e0196527 100644
--- a/drivers/staging/i4l/icn/icn.h
+++ b/drivers/staging/i4l/icn/icn.h
@@ -54,7 +54,7 @@ typedef struct icn_cdef {
54 54
55/* some useful macros for debugging */ 55/* some useful macros for debugging */
56#ifdef ICN_DEBUG_PORT 56#ifdef ICN_DEBUG_PORT
57#define OUTB_P(v, p) {printk(KERN_DEBUG "icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);} 57#define OUTB_P(v, p) {pr_debug("icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
58#else 58#else
59#define OUTB_P outb 59#define OUTB_P outb
60#endif 60#endif
@@ -186,8 +186,7 @@ typedef icn_dev *icn_devptr;
186#ifdef __KERNEL__ 186#ifdef __KERNEL__
187 187
188static icn_card *cards = (icn_card *) 0; 188static icn_card *cards = (icn_card *) 0;
189static u_char chan2bank[] = 189static u_char chan2bank[] = {0, 4, 8, 12}; /* for icn_map_channel() */
190{0, 4, 8, 12}; /* for icn_map_channel() */
191 190
192static icn_dev dev; 191static icn_dev dev;
193 192
diff --git a/drivers/staging/i4l/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c
index efb6d6a3639a..212ab0b229d4 100644
--- a/drivers/staging/i4l/pcbit/callbacks.c
+++ b/drivers/staging/i4l/pcbit/callbacks.c
@@ -22,7 +22,7 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24 24
25#include <asm/io.h> 25#include <linux/io.h>
26 26
27#include <linux/isdnif.h> 27#include <linux/isdnif.h>
28 28
diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 373f90feda5a..a6c4e00dc726 100644
--- a/drivers/staging/i4l/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
@@ -27,7 +27,6 @@
27 * encode our number in CallerPN and ConnectedPN 27 * encode our number in CallerPN and ConnectedPN
28 */ 28 */
29 29
30#include <linux/string.h>
31#include <linux/kernel.h> 30#include <linux/kernel.h>
32 31
33#include <linux/types.h> 32#include <linux/types.h>
@@ -36,8 +35,8 @@
36 35
37#include <linux/skbuff.h> 36#include <linux/skbuff.h>
38 37
39#include <asm/io.h> 38#include <linux/io.h>
40#include <asm/string.h> 39#include <linux/string.h>
41 40
42#include <linux/isdnif.h> 41#include <linux/isdnif.h>
43 42
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index d417df5efb5f..89b0b5b94ce5 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -27,12 +27,11 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/string.h>
31#include <linux/skbuff.h> 30#include <linux/skbuff.h>
32 31
33#include <linux/isdnif.h> 32#include <linux/isdnif.h>
34#include <asm/string.h> 33#include <linux/string.h>
35#include <asm/io.h> 34#include <linux/io.h>
36#include <linux/ioport.h> 35#include <linux/ioport.h>
37 36
38#include "pcbit.h" 37#include "pcbit.h"
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index 6d291d548423..5980d1b5da95 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -23,7 +23,7 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24 24
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <asm/io.h> 26#include <linux/io.h>
27 27
28#include <linux/isdnif.h> 28#include <linux/isdnif.h>
29 29
diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index a136c72547e5..0592bf6ee9c9 100644
--- a/drivers/staging/i4l/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
@@ -36,7 +36,7 @@
36 36
37#include <linux/isdnif.h> 37#include <linux/isdnif.h>
38 38
39#include <asm/io.h> 39#include <linux/io.h>
40 40
41 41
42#include "pcbit.h" 42#include "pcbit.h"
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 470f7ad9c073..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,6 +0,0 @@
1What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
2KernelVersion: 2.6.37
3Contact: linux-iio@vger.kernel.org
4Description:
5 This property causes an internal calibration of the als gain trim
6 value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 93a896883e37..4922402e2e98 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,76 +1,8 @@
12009 8/18 12016 10/09
2
3Core:
41) Get reviews
52) Additional testing
63) Ensure all desirable features present by adding more devices.
7 Major changes not expected except in response to comments
8
9Max1363 core:
101) Possibly add sysfs exports of constant useful to userspace.
11Would be nice
122) Support hardware generated interrupts
133) Expand device set. Lots of other maxim adc's have very
14 similar interfaces.
15
16MXS LRADC driver:
17This is a classic MFD device as it combines the following subdevices
18 - touchscreen controller (input subsystem related device)
19 - general purpose ADC channels
20 - battery voltage monitor (power subsystem related device)
21 - die temperature monitor (thermal management)
22
23At least the battery voltage and die temperature feature is required in-kernel
24by a driver of the SoC's battery charging unit to avoid any damage to the
25silicon and the battery.
26
27TSL2561
28Would be nice
291) Open question of userspace vs kernel space balance when
30converting to useful light measurements from device ones.
312) Add sysfs elements necessary to allow device agnostic
32unit conversion.
33
34LIS3L02DQ core
35
36LIS3L02DQ ring
37
38KXSD9
39Currently minimal driver, would be nice to add:
401) Support for all chip generated interrupts (events),
41basically get support up to level of lis3l02dq driver.
42
43Ring buffer core
44
45SCA3000
46Would be nice
471) Testing on devices other than sca3000-e05
48
49Trigger core support
501) Discussion of approach. Is it general enough?
51
52Ring Buffer:
531) Discussion of approach.
54There are probably better ways of doing this. The
55intention is to allow for more than one software ring
56buffer implementation as different users will have
57different requirements. This one suits mid range
58frequencies (100Hz - 4kHz).
592) Lots of testing
60
61GPIO trigger
621) Add control over the type of interrupt etc. This will
63necessitate a header that is also visible from arch board
64files. (avoided at the moment to keep the driver set
65contained in staging).
66 2
67ADI Drivers: 3ADI Drivers:
68CC the device-drivers-devel@blackfin.uclinux.org mailing list when 4CC the device-drivers-devel@blackfin.uclinux.org mailing list when
69e-mailing the normal IIO list (see below). 5e-mailing the normal IIO list (see below).
70 6
71Documentation
721) Lots of cleanup and expansion.
732) Some device require individual docs.
74
75Contact: Jonathan Cameron <jic23@kernel.org>. 7Contact: Jonathan Cameron <jic23@kernel.org>.
76Mailing list: linux-iio@vger.kernel.org 8Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 1c994b57c7d2..c6b0f5eae7ab 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,14 +51,4 @@ config ADIS16240
51 To compile this driver as a module, say M here: the module will be 51 To compile this driver as a module, say M here: the module will be
52 called adis16240. 52 called adis16240.
53 53
54config SCA3000
55 depends on IIO_BUFFER
56 depends on SPI
57 tristate "VTI SCA3000 series accelerometers"
58 help
59 Say Y here to build support for the VTI SCA3000 series of SPI
60 accelerometers. These devices use a hardware ring buffer.
61
62 To compile this driver as a module, say M here: the module will be
63 called sca3000.
64endmenu 54endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1810a434a755..febb137b60c4 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -13,6 +13,3 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
13 13
14adis16240-y := adis16240_core.o 14adis16240-y := adis16240_core.o
15obj-$(CONFIG_ADIS16240) += adis16240.o 15obj-$(CONFIG_ADIS16240) += adis16240.o
16
17sca3000-y := sca3000_core.o sca3000_ring.o
18obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
deleted file mode 100644
index 4dcc8575cbe3..000000000000
--- a/drivers/staging/iio/accel/sca3000.h
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * sca3000.c -- support VTI sca3000 series accelerometers
3 * via SPI
4 *
5 * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
6 *
7 * Partly based upon tle62x0.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Initial mode is direct measurement.
14 *
15 * Untested things
16 *
17 * Temperature reading (the e05 I'm testing with doesn't have a sensor)
18 *
19 * Free fall detection mode - supported but untested as I'm not droping my
20 * dubious wire rig far enough to test it.
21 *
22 * Unsupported as yet
23 *
24 * Time stamping of data from ring. Various ideas on how to do this but none
25 * are remotely simple. Suggestions welcome.
26 *
27 * Individual enabling disabling of channels going into ring buffer
28 *
29 * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
30 *
31 * Motion detector using AND combinations of signals.
32 *
33 * Note: Be very careful about not touching an register bytes marked
34 * as reserved on the data sheet. They really mean it as changing convents of
35 * some will cause the device to lock up.
36 *
37 * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
38 * Can probably alleviate this by reading the interrupt register on start, but
39 * that is really just brushing the problem under the carpet.
40 */
41#ifndef _SCA3000
42#define _SCA3000
43
44#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
45#define SCA3000_READ_REG(a) ((a) << 2)
46
47#define SCA3000_REG_ADDR_REVID 0x00
48#define SCA3000_REVID_MAJOR_MASK 0xf0
49#define SCA3000_REVID_MINOR_MASK 0x0f
50
51#define SCA3000_REG_ADDR_STATUS 0x02
52#define SCA3000_LOCKED 0x20
53#define SCA3000_EEPROM_CS_ERROR 0x02
54#define SCA3000_SPI_FRAME_ERROR 0x01
55
56/* All reads done using register decrement so no need to directly access LSBs */
57#define SCA3000_REG_ADDR_X_MSB 0x05
58#define SCA3000_REG_ADDR_Y_MSB 0x07
59#define SCA3000_REG_ADDR_Z_MSB 0x09
60
61#define SCA3000_REG_ADDR_RING_OUT 0x0f
62
63/* Temp read untested - the e05 doesn't have the sensor */
64#define SCA3000_REG_ADDR_TEMP_MSB 0x13
65
66#define SCA3000_REG_ADDR_MODE 0x14
67#define SCA3000_MODE_PROT_MASK 0x28
68
69#define SCA3000_RING_BUF_ENABLE 0x80
70#define SCA3000_RING_BUF_8BIT 0x40
71/*
72 * Free fall detection triggers an interrupt if the acceleration
73 * is below a threshold for equivalent of 25cm drop
74 */
75#define SCA3000_FREE_FALL_DETECT 0x10
76#define SCA3000_MEAS_MODE_NORMAL 0x00
77#define SCA3000_MEAS_MODE_OP_1 0x01
78#define SCA3000_MEAS_MODE_OP_2 0x02
79
80/*
81 * In motion detection mode the accelerations are band pass filtered
82 * (approx 1 - 25Hz) and then a programmable threshold used to trigger
83 * and interrupt.
84 */
85#define SCA3000_MEAS_MODE_MOT_DET 0x03
86
87#define SCA3000_REG_ADDR_BUF_COUNT 0x15
88
89#define SCA3000_REG_ADDR_INT_STATUS 0x16
90
91#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80
92#define SCA3000_INT_STATUS_HALF 0x40
93
94#define SCA3000_INT_STATUS_FREE_FALL 0x08
95#define SCA3000_INT_STATUS_Y_TRIGGER 0x04
96#define SCA3000_INT_STATUS_X_TRIGGER 0x02
97#define SCA3000_INT_STATUS_Z_TRIGGER 0x01
98
99/* Used to allow access to multiplexed registers */
100#define SCA3000_REG_ADDR_CTRL_SEL 0x18
101/* Only available for SCA3000-D03 and SCA3000-D01 */
102#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
103#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
104#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
105#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
106#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
107/*
108 * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
109 * will not function
110 */
111#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
112#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
113#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
114#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
115#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
116#define SCA3000_OUT_CTRL_BUF_DIV_MASK 0x03
117#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
118#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
119
120/*
121 * Control which motion detector interrupts are on.
122 * For now only OR combinations are supported.
123 */
124#define SCA3000_MD_CTRL_PROT_MASK 0xC0
125#define SCA3000_MD_CTRL_OR_Y 0x01
126#define SCA3000_MD_CTRL_OR_X 0x02
127#define SCA3000_MD_CTRL_OR_Z 0x04
128/* Currently unsupported */
129#define SCA3000_MD_CTRL_AND_Y 0x08
130#define SCA3000_MD_CTRL_AND_X 0x10
131#define SAC3000_MD_CTRL_AND_Z 0x20
132
133/*
134 * Some control registers of complex access methods requiring this register to
135 * be used to remove a lock.
136 */
137#define SCA3000_REG_ADDR_UNLOCK 0x1e
138
139#define SCA3000_REG_ADDR_INT_MASK 0x21
140#define SCA3000_INT_MASK_PROT_MASK 0x1C
141
142#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80
143#define SCA3000_INT_MASK_RING_HALF 0x40
144
145#define SCA3000_INT_MASK_ALL_INTS 0x02
146#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
147#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
148
149/* Values of multiplexed registers (write to ctrl_data after select) */
150#define SCA3000_REG_ADDR_CTRL_DATA 0x22
151
152/*
153 * Measurement modes available on some sca3000 series chips. Code assumes others
154 * may become available in the future.
155 *
156 * Bypass - Bypass the low-pass filter in the signal channel so as to increase
157 * signal bandwidth.
158 *
159 * Narrow - Narrow low-pass filtering of the signal channel and half output
160 * data rate by decimation.
161 *
162 * Wide - Widen low-pass filtering of signal channel to increase bandwidth
163 */
164#define SCA3000_OP_MODE_BYPASS 0x01
165#define SCA3000_OP_MODE_NARROW 0x02
166#define SCA3000_OP_MODE_WIDE 0x04
167#define SCA3000_MAX_TX 6
168#define SCA3000_MAX_RX 2
169
170/**
171 * struct sca3000_state - device instance state information
172 * @us: the associated spi device
173 * @info: chip variant information
174 * @interrupt_handler_ws: event interrupt handler for all events
175 * @last_timestamp: the timestamp of the last event
176 * @mo_det_use_count: reference counter for the motion detection unit
177 * @lock: lock used to protect elements of sca3000_state
178 * and the underlying device state.
179 * @bpse: number of bits per scan element
180 * @tx: dma-able transmit buffer
181 * @rx: dma-able receive buffer
182 **/
183struct sca3000_state {
184 struct spi_device *us;
185 const struct sca3000_chip_info *info;
186 struct work_struct interrupt_handler_ws;
187 s64 last_timestamp;
188 int mo_det_use_count;
189 struct mutex lock;
190 int bpse;
191 /* Can these share a cacheline ? */
192 u8 rx[2] ____cacheline_aligned;
193 u8 tx[6] ____cacheline_aligned;
194};
195
196/**
197 * struct sca3000_chip_info - model dependent parameters
198 * @scale: scale * 10^-6
199 * @temp_output: some devices have temperature sensors.
200 * @measurement_mode_freq: normal mode sampling frequency
201 * @option_mode_1: first optional mode. Not all models have one
202 * @option_mode_1_freq: option mode 1 sampling frequency
203 * @option_mode_2: second optional mode. Not all chips have one
204 * @option_mode_2_freq: option mode 2 sampling frequency
205 *
206 * This structure is used to hold information about the functionality of a given
207 * sca3000 variant.
208 **/
209struct sca3000_chip_info {
210 unsigned int scale;
211 bool temp_output;
212 int measurement_mode_freq;
213 int option_mode_1;
214 int option_mode_1_freq;
215 int option_mode_2;
216 int option_mode_2_freq;
217 int mot_det_mult_xz[6];
218 int mot_det_mult_y[7];
219};
220
221int sca3000_read_data_short(struct sca3000_state *st,
222 u8 reg_address_high,
223 int len);
224
225/**
226 * sca3000_write_reg() write a single register
227 * @address: address of register on chip
228 * @val: value to be written to register
229 *
230 * The main lock must be held.
231 **/
232int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
233
234#ifdef CONFIG_IIO_BUFFER
235/**
236 * sca3000_register_ring_funcs() setup the ring state change functions
237 **/
238void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
239
240/**
241 * sca3000_configure_ring() - allocate and configure ring buffer
242 * @indio_dev: iio-core device whose ring is to be configured
243 *
244 * The hardware ring buffer needs far fewer ring buffer functions than
245 * a software one as a lot of things are handled automatically.
246 * This function also tells the iio core that our device supports a
247 * hardware ring buffer mode.
248 **/
249int sca3000_configure_ring(struct iio_dev *indio_dev);
250
251/**
252 * sca3000_unconfigure_ring() - deallocate the ring buffer
253 * @indio_dev: iio-core device whose ring we are freeing
254 **/
255void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
256
257/**
258 * sca3000_ring_int_process() handles ring related event pushing and escalation
259 * @val: the event code
260 **/
261void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
262
263#else
264static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
265{
266}
267
268static inline
269int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
270{
271 return 0;
272}
273
274static inline void sca3000_ring_int_process(u8 val, void *ring)
275{
276}
277
278#endif
279#endif /* _SCA3000 */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
deleted file mode 100644
index 564b36d4f648..000000000000
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ /dev/null
@@ -1,1210 +0,0 @@
1/*
2 * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
9 *
10 * See industrialio/accels/sca3000.h for comments.
11 */
12
13#include <linux/interrupt.h>
14#include <linux/fs.h>
15#include <linux/device.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/spi/spi.h>
19#include <linux/sysfs.h>
20#include <linux/module.h>
21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h>
23#include <linux/iio/events.h>
24#include <linux/iio/buffer.h>
25
26#include "sca3000.h"
27
28enum sca3000_variant {
29 d01,
30 e02,
31 e04,
32 e05,
33};
34
35/*
36 * Note where option modes are not defined, the chip simply does not
37 * support any.
38 * Other chips in the sca3000 series use i2c and are not included here.
39 *
40 * Some of these devices are only listed in the family data sheet and
41 * do not actually appear to be available.
42 */
43static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
44 [d01] = {
45 .scale = 7357,
46 .temp_output = true,
47 .measurement_mode_freq = 250,
48 .option_mode_1 = SCA3000_OP_MODE_BYPASS,
49 .option_mode_1_freq = 250,
50 .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
51 .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
52 },
53 [e02] = {
54 .scale = 9810,
55 .measurement_mode_freq = 125,
56 .option_mode_1 = SCA3000_OP_MODE_NARROW,
57 .option_mode_1_freq = 63,
58 .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
59 .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
60 },
61 [e04] = {
62 .scale = 19620,
63 .measurement_mode_freq = 100,
64 .option_mode_1 = SCA3000_OP_MODE_NARROW,
65 .option_mode_1_freq = 50,
66 .option_mode_2 = SCA3000_OP_MODE_WIDE,
67 .option_mode_2_freq = 400,
68 .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
69 .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
70 },
71 [e05] = {
72 .scale = 61313,
73 .measurement_mode_freq = 200,
74 .option_mode_1 = SCA3000_OP_MODE_NARROW,
75 .option_mode_1_freq = 50,
76 .option_mode_2 = SCA3000_OP_MODE_WIDE,
77 .option_mode_2_freq = 400,
78 .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
79 .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
80 },
81};
82
83int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
84{
85 st->tx[0] = SCA3000_WRITE_REG(address);
86 st->tx[1] = val;
87 return spi_write(st->us, st->tx, 2);
88}
89
90int sca3000_read_data_short(struct sca3000_state *st,
91 u8 reg_address_high,
92 int len)
93{
94 struct spi_transfer xfer[2] = {
95 {
96 .len = 1,
97 .tx_buf = st->tx,
98 }, {
99 .len = len,
100 .rx_buf = st->rx,
101 }
102 };
103 st->tx[0] = SCA3000_READ_REG(reg_address_high);
104
105 return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
106}
107
108/**
109 * sca3000_reg_lock_on() test if the ctrl register lock is on
110 *
111 * Lock must be held.
112 **/
113static int sca3000_reg_lock_on(struct sca3000_state *st)
114{
115 int ret;
116
117 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
118 if (ret < 0)
119 return ret;
120
121 return !(st->rx[0] & SCA3000_LOCKED);
122}
123
124/**
125 * __sca3000_unlock_reg_lock() unlock the control registers
126 *
127 * Note the device does not appear to support doing this in a single transfer.
128 * This should only ever be used as part of ctrl reg read.
129 * Lock must be held before calling this
130 **/
131static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
132{
133 struct spi_transfer xfer[3] = {
134 {
135 .len = 2,
136 .cs_change = 1,
137 .tx_buf = st->tx,
138 }, {
139 .len = 2,
140 .cs_change = 1,
141 .tx_buf = st->tx + 2,
142 }, {
143 .len = 2,
144 .tx_buf = st->tx + 4,
145 },
146 };
147 st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
148 st->tx[1] = 0x00;
149 st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
150 st->tx[3] = 0x50;
151 st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
152 st->tx[5] = 0xA0;
153
154 return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
155}
156
157/**
158 * sca3000_write_ctrl_reg() write to a lock protect ctrl register
159 * @sel: selects which registers we wish to write to
160 * @val: the value to be written
161 *
162 * Certain control registers are protected against overwriting by the lock
163 * register and use a shared write address. This function allows writing of
164 * these registers.
165 * Lock must be held.
166 **/
167static int sca3000_write_ctrl_reg(struct sca3000_state *st,
168 u8 sel,
169 uint8_t val)
170{
171 int ret;
172
173 ret = sca3000_reg_lock_on(st);
174 if (ret < 0)
175 goto error_ret;
176 if (ret) {
177 ret = __sca3000_unlock_reg_lock(st);
178 if (ret)
179 goto error_ret;
180 }
181
182 /* Set the control select register */
183 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
184 if (ret)
185 goto error_ret;
186
187 /* Write the actual value into the register */
188 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
189
190error_ret:
191 return ret;
192}
193
194/**
195 * sca3000_read_ctrl_reg() read from lock protected control register.
196 *
197 * Lock must be held.
198 **/
199static int sca3000_read_ctrl_reg(struct sca3000_state *st,
200 u8 ctrl_reg)
201{
202 int ret;
203
204 ret = sca3000_reg_lock_on(st);
205 if (ret < 0)
206 goto error_ret;
207 if (ret) {
208 ret = __sca3000_unlock_reg_lock(st);
209 if (ret)
210 goto error_ret;
211 }
212 /* Set the control select register */
213 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
214 if (ret)
215 goto error_ret;
216 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
217 if (ret)
218 goto error_ret;
219 return st->rx[0];
220error_ret:
221 return ret;
222}
223
224/**
225 * sca3000_show_rev() - sysfs interface to read the chip revision number
226 **/
227static ssize_t sca3000_show_rev(struct device *dev,
228 struct device_attribute *attr,
229 char *buf)
230{
231 int len = 0, ret;
232 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
233 struct sca3000_state *st = iio_priv(indio_dev);
234
235 mutex_lock(&st->lock);
236 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
237 if (ret < 0)
238 goto error_ret;
239 len += sprintf(buf + len,
240 "major=%d, minor=%d\n",
241 st->rx[0] & SCA3000_REVID_MAJOR_MASK,
242 st->rx[0] & SCA3000_REVID_MINOR_MASK);
243error_ret:
244 mutex_unlock(&st->lock);
245
246 return ret ? ret : len;
247}
248
249/**
250 * sca3000_show_available_measurement_modes() display available modes
251 *
252 * This is all read from chip specific data in the driver. Not all
253 * of the sca3000 series support modes other than normal.
254 **/
255static ssize_t
256sca3000_show_available_measurement_modes(struct device *dev,
257 struct device_attribute *attr,
258 char *buf)
259{
260 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
261 struct sca3000_state *st = iio_priv(indio_dev);
262 int len = 0;
263
264 len += sprintf(buf + len, "0 - normal mode");
265 switch (st->info->option_mode_1) {
266 case SCA3000_OP_MODE_NARROW:
267 len += sprintf(buf + len, ", 1 - narrow mode");
268 break;
269 case SCA3000_OP_MODE_BYPASS:
270 len += sprintf(buf + len, ", 1 - bypass mode");
271 break;
272 }
273 switch (st->info->option_mode_2) {
274 case SCA3000_OP_MODE_WIDE:
275 len += sprintf(buf + len, ", 2 - wide mode");
276 break;
277 }
278 /* always supported */
279 len += sprintf(buf + len, " 3 - motion detection\n");
280
281 return len;
282}
283
284/**
285 * sca3000_show_measurement_mode() sysfs read of current mode
286 **/
287static ssize_t
288sca3000_show_measurement_mode(struct device *dev,
289 struct device_attribute *attr,
290 char *buf)
291{
292 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
293 struct sca3000_state *st = iio_priv(indio_dev);
294 int len = 0, ret;
295
296 mutex_lock(&st->lock);
297 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
298 if (ret)
299 goto error_ret;
300 /* mask bottom 2 bits - only ones that are relevant */
301 st->rx[0] &= 0x03;
302 switch (st->rx[0]) {
303 case SCA3000_MEAS_MODE_NORMAL:
304 len += sprintf(buf + len, "0 - normal mode\n");
305 break;
306 case SCA3000_MEAS_MODE_MOT_DET:
307 len += sprintf(buf + len, "3 - motion detection\n");
308 break;
309 case SCA3000_MEAS_MODE_OP_1:
310 switch (st->info->option_mode_1) {
311 case SCA3000_OP_MODE_NARROW:
312 len += sprintf(buf + len, "1 - narrow mode\n");
313 break;
314 case SCA3000_OP_MODE_BYPASS:
315 len += sprintf(buf + len, "1 - bypass mode\n");
316 break;
317 }
318 break;
319 case SCA3000_MEAS_MODE_OP_2:
320 switch (st->info->option_mode_2) {
321 case SCA3000_OP_MODE_WIDE:
322 len += sprintf(buf + len, "2 - wide mode\n");
323 break;
324 }
325 break;
326 }
327
328error_ret:
329 mutex_unlock(&st->lock);
330
331 return ret ? ret : len;
332}
333
334/**
335 * sca3000_store_measurement_mode() set the current mode
336 **/
337static ssize_t
338sca3000_store_measurement_mode(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf,
341 size_t len)
342{
343 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
344 struct sca3000_state *st = iio_priv(indio_dev);
345 int ret;
346 u8 mask = 0x03;
347 u8 val;
348
349 mutex_lock(&st->lock);
350 ret = kstrtou8(buf, 10, &val);
351 if (ret)
352 goto error_ret;
353 if (val > 3) {
354 ret = -EINVAL;
355 goto error_ret;
356 }
357 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
358 if (ret)
359 goto error_ret;
360 st->rx[0] &= ~mask;
361 st->rx[0] |= (val & mask);
362 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]);
363 if (ret)
364 goto error_ret;
365 mutex_unlock(&st->lock);
366
367 return len;
368
369error_ret:
370 mutex_unlock(&st->lock);
371
372 return ret;
373}
374
375/*
376 * Not even vaguely standard attributes so defined here rather than
377 * in the relevant IIO core headers
378 */
379static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
380 sca3000_show_available_measurement_modes,
381 NULL, 0);
382
383static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
384 sca3000_show_measurement_mode,
385 sca3000_store_measurement_mode,
386 0);
387
388/* More standard attributes */
389
390static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
391
392static const struct iio_event_spec sca3000_event = {
393 .type = IIO_EV_TYPE_MAG,
394 .dir = IIO_EV_DIR_RISING,
395 .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
396};
397
398#define SCA3000_CHAN(index, mod) \
399 { \
400 .type = IIO_ACCEL, \
401 .modified = 1, \
402 .channel2 = mod, \
403 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
404 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
405 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
406 .address = index, \
407 .scan_index = index, \
408 .scan_type = { \
409 .sign = 's', \
410 .realbits = 11, \
411 .storagebits = 16, \
412 .shift = 5, \
413 }, \
414 .event_spec = &sca3000_event, \
415 .num_event_specs = 1, \
416 }
417
418static const struct iio_chan_spec sca3000_channels[] = {
419 SCA3000_CHAN(0, IIO_MOD_X),
420 SCA3000_CHAN(1, IIO_MOD_Y),
421 SCA3000_CHAN(2, IIO_MOD_Z),
422};
423
424static const struct iio_chan_spec sca3000_channels_with_temp[] = {
425 SCA3000_CHAN(0, IIO_MOD_X),
426 SCA3000_CHAN(1, IIO_MOD_Y),
427 SCA3000_CHAN(2, IIO_MOD_Z),
428 {
429 .type = IIO_TEMP,
430 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
431 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
432 BIT(IIO_CHAN_INFO_OFFSET),
433 /* No buffer support */
434 .scan_index = -1,
435 },
436};
437
438static u8 sca3000_addresses[3][3] = {
439 [0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
440 SCA3000_MD_CTRL_OR_X},
441 [1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH,
442 SCA3000_MD_CTRL_OR_Y},
443 [2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH,
444 SCA3000_MD_CTRL_OR_Z},
445};
446
447/**
448 * __sca3000_get_base_freq() obtain mode specific base frequency
449 *
450 * lock must be held
451 **/
452static inline int __sca3000_get_base_freq(struct sca3000_state *st,
453 const struct sca3000_chip_info *info,
454 int *base_freq)
455{
456 int ret;
457
458 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
459 if (ret)
460 goto error_ret;
461 switch (0x03 & st->rx[0]) {
462 case SCA3000_MEAS_MODE_NORMAL:
463 *base_freq = info->measurement_mode_freq;
464 break;
465 case SCA3000_MEAS_MODE_OP_1:
466 *base_freq = info->option_mode_1_freq;
467 break;
468 case SCA3000_MEAS_MODE_OP_2:
469 *base_freq = info->option_mode_2_freq;
470 break;
471 default:
472 ret = -EINVAL;
473 }
474error_ret:
475 return ret;
476}
477
478/**
479 * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
480 *
481 * lock must be held
482 **/
483static int read_raw_samp_freq(struct sca3000_state *st, int *val)
484{
485 int ret;
486
487 ret = __sca3000_get_base_freq(st, st->info, val);
488 if (ret)
489 return ret;
490
491 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
492 if (ret < 0)
493 return ret;
494
495 if (*val > 0) {
496 ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK;
497 switch (ret) {
498 case SCA3000_OUT_CTRL_BUF_DIV_2:
499 *val /= 2;
500 break;
501 case SCA3000_OUT_CTRL_BUF_DIV_4:
502 *val /= 4;
503 break;
504 }
505 }
506
507 return 0;
508}
509
510/**
511 * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
512 *
513 * lock must be held
514 **/
515static int write_raw_samp_freq(struct sca3000_state *st, int val)
516{
517 int ret, base_freq, ctrlval;
518
519 ret = __sca3000_get_base_freq(st, st->info, &base_freq);
520 if (ret)
521 return ret;
522
523 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
524 if (ret < 0)
525 return ret;
526
527 ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK;
528
529 if (val == base_freq / 2)
530 ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
531 if (val == base_freq / 4)
532 ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
533 else if (val != base_freq)
534 return -EINVAL;
535
536 return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
537 ctrlval);
538}
539
540static int sca3000_read_raw(struct iio_dev *indio_dev,
541 struct iio_chan_spec const *chan,
542 int *val,
543 int *val2,
544 long mask)
545{
546 struct sca3000_state *st = iio_priv(indio_dev);
547 int ret;
548 u8 address;
549
550 switch (mask) {
551 case IIO_CHAN_INFO_RAW:
552 mutex_lock(&st->lock);
553 if (chan->type == IIO_ACCEL) {
554 if (st->mo_det_use_count) {
555 mutex_unlock(&st->lock);
556 return -EBUSY;
557 }
558 address = sca3000_addresses[chan->address][0];
559 ret = sca3000_read_data_short(st, address, 2);
560 if (ret < 0) {
561 mutex_unlock(&st->lock);
562 return ret;
563 }
564 *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
565 *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
566 (sizeof(*val) * 8 - 13);
567 } else {
568 /* get the temperature when available */
569 ret = sca3000_read_data_short(st,
570 SCA3000_REG_ADDR_TEMP_MSB,
571 2);
572 if (ret < 0) {
573 mutex_unlock(&st->lock);
574 return ret;
575 }
576 *val = ((st->rx[0] & 0x3F) << 3) |
577 ((st->rx[1] & 0xE0) >> 5);
578 }
579 mutex_unlock(&st->lock);
580 return IIO_VAL_INT;
581 case IIO_CHAN_INFO_SCALE:
582 *val = 0;
583 if (chan->type == IIO_ACCEL)
584 *val2 = st->info->scale;
585 else /* temperature */
586 *val2 = 555556;
587 return IIO_VAL_INT_PLUS_MICRO;
588 case IIO_CHAN_INFO_OFFSET:
589 *val = -214;
590 *val2 = 600000;
591 return IIO_VAL_INT_PLUS_MICRO;
592 case IIO_CHAN_INFO_SAMP_FREQ:
593 mutex_lock(&st->lock);
594 ret = read_raw_samp_freq(st, val);
595 mutex_unlock(&st->lock);
596 return ret ? ret : IIO_VAL_INT;
597 default:
598 return -EINVAL;
599 }
600}
601
602static int sca3000_write_raw(struct iio_dev *indio_dev,
603 struct iio_chan_spec const *chan,
604 int val, int val2, long mask)
605{
606 struct sca3000_state *st = iio_priv(indio_dev);
607 int ret;
608
609 switch (mask) {
610 case IIO_CHAN_INFO_SAMP_FREQ:
611 if (val2)
612 return -EINVAL;
613 mutex_lock(&st->lock);
614 ret = write_raw_samp_freq(st, val);
615 mutex_unlock(&st->lock);
616 return ret;
617 default:
618 return -EINVAL;
619 }
620
621 return ret;
622}
623
624/**
625 * sca3000_read_av_freq() sysfs function to get available frequencies
626 *
627 * The later modes are only relevant to the ring buffer - and depend on current
628 * mode. Note that data sheet gives rather wide tolerances for these so integer
629 * division will give good enough answer and not all chips have them specified
630 * at all.
631 **/
632static ssize_t sca3000_read_av_freq(struct device *dev,
633 struct device_attribute *attr,
634 char *buf)
635{
636 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
637 struct sca3000_state *st = iio_priv(indio_dev);
638 int len = 0, ret, val;
639
640 mutex_lock(&st->lock);
641 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
642 val = st->rx[0];
643 mutex_unlock(&st->lock);
644 if (ret)
645 goto error_ret;
646
647 switch (val & 0x03) {
648 case SCA3000_MEAS_MODE_NORMAL:
649 len += sprintf(buf + len, "%d %d %d\n",
650 st->info->measurement_mode_freq,
651 st->info->measurement_mode_freq / 2,
652 st->info->measurement_mode_freq / 4);
653 break;
654 case SCA3000_MEAS_MODE_OP_1:
655 len += sprintf(buf + len, "%d %d %d\n",
656 st->info->option_mode_1_freq,
657 st->info->option_mode_1_freq / 2,
658 st->info->option_mode_1_freq / 4);
659 break;
660 case SCA3000_MEAS_MODE_OP_2:
661 len += sprintf(buf + len, "%d %d %d\n",
662 st->info->option_mode_2_freq,
663 st->info->option_mode_2_freq / 2,
664 st->info->option_mode_2_freq / 4);
665 break;
666 }
667 return len;
668error_ret:
669 return ret;
670}
671
672/*
673 * Should only really be registered if ring buffer support is compiled in.
674 * Does no harm however and doing it right would add a fair bit of complexity
675 */
676static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
677
678/**
679 * sca3000_read_thresh() - query of a threshold
680 **/
681static int sca3000_read_thresh(struct iio_dev *indio_dev,
682 const struct iio_chan_spec *chan,
683 enum iio_event_type type,
684 enum iio_event_direction dir,
685 enum iio_event_info info,
686 int *val, int *val2)
687{
688 int ret, i;
689 struct sca3000_state *st = iio_priv(indio_dev);
690 int num = chan->channel2;
691
692 mutex_lock(&st->lock);
693 ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
694 mutex_unlock(&st->lock);
695 if (ret < 0)
696 return ret;
697 *val = 0;
698 if (num == 1)
699 for_each_set_bit(i, (unsigned long *)&ret,
700 ARRAY_SIZE(st->info->mot_det_mult_y))
701 *val += st->info->mot_det_mult_y[i];
702 else
703 for_each_set_bit(i, (unsigned long *)&ret,
704 ARRAY_SIZE(st->info->mot_det_mult_xz))
705 *val += st->info->mot_det_mult_xz[i];
706
707 return IIO_VAL_INT;
708}
709
710/**
711 * sca3000_write_thresh() control of threshold
712 **/
713static int sca3000_write_thresh(struct iio_dev *indio_dev,
714 const struct iio_chan_spec *chan,
715 enum iio_event_type type,
716 enum iio_event_direction dir,
717 enum iio_event_info info,
718 int val, int val2)
719{
720 struct sca3000_state *st = iio_priv(indio_dev);
721 int num = chan->channel2;
722 int ret;
723 int i;
724 u8 nonlinear = 0;
725
726 if (num == 1) {
727 i = ARRAY_SIZE(st->info->mot_det_mult_y);
728 while (i > 0)
729 if (val >= st->info->mot_det_mult_y[--i]) {
730 nonlinear |= (1 << i);
731 val -= st->info->mot_det_mult_y[i];
732 }
733 } else {
734 i = ARRAY_SIZE(st->info->mot_det_mult_xz);
735 while (i > 0)
736 if (val >= st->info->mot_det_mult_xz[--i]) {
737 nonlinear |= (1 << i);
738 val -= st->info->mot_det_mult_xz[i];
739 }
740 }
741
742 mutex_lock(&st->lock);
743 ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear);
744 mutex_unlock(&st->lock);
745
746 return ret;
747}
748
749static struct attribute *sca3000_attributes[] = {
750 &iio_dev_attr_revision.dev_attr.attr,
751 &iio_dev_attr_measurement_mode_available.dev_attr.attr,
752 &iio_dev_attr_measurement_mode.dev_attr.attr,
753 &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
754 NULL,
755};
756
757static const struct attribute_group sca3000_attribute_group = {
758 .attrs = sca3000_attributes,
759};
760
761/**
762 * sca3000_event_handler() - handling ring and non ring events
763 *
764 * Ring related interrupt handler. Depending on event, push to
765 * the ring buffer event chrdev or the event one.
766 *
767 * This function is complicated by the fact that the devices can signify ring
768 * and non ring events via the same interrupt line and they can only
769 * be distinguished via a read of the relevant status register.
770 **/
771static irqreturn_t sca3000_event_handler(int irq, void *private)
772{
773 struct iio_dev *indio_dev = private;
774 struct sca3000_state *st = iio_priv(indio_dev);
775 int ret, val;
776 s64 last_timestamp = iio_get_time_ns(indio_dev);
777
778 /*
779 * Could lead if badly timed to an extra read of status reg,
780 * but ensures no interrupt is missed.
781 */
782 mutex_lock(&st->lock);
783 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
784 val = st->rx[0];
785 mutex_unlock(&st->lock);
786 if (ret)
787 goto done;
788
789 sca3000_ring_int_process(val, indio_dev->buffer);
790
791 if (val & SCA3000_INT_STATUS_FREE_FALL)
792 iio_push_event(indio_dev,
793 IIO_MOD_EVENT_CODE(IIO_ACCEL,
794 0,
795 IIO_MOD_X_AND_Y_AND_Z,
796 IIO_EV_TYPE_MAG,
797 IIO_EV_DIR_FALLING),
798 last_timestamp);
799
800 if (val & SCA3000_INT_STATUS_Y_TRIGGER)
801 iio_push_event(indio_dev,
802 IIO_MOD_EVENT_CODE(IIO_ACCEL,
803 0,
804 IIO_MOD_Y,
805 IIO_EV_TYPE_MAG,
806 IIO_EV_DIR_RISING),
807 last_timestamp);
808
809 if (val & SCA3000_INT_STATUS_X_TRIGGER)
810 iio_push_event(indio_dev,
811 IIO_MOD_EVENT_CODE(IIO_ACCEL,
812 0,
813 IIO_MOD_X,
814 IIO_EV_TYPE_MAG,
815 IIO_EV_DIR_RISING),
816 last_timestamp);
817
818 if (val & SCA3000_INT_STATUS_Z_TRIGGER)
819 iio_push_event(indio_dev,
820 IIO_MOD_EVENT_CODE(IIO_ACCEL,
821 0,
822 IIO_MOD_Z,
823 IIO_EV_TYPE_MAG,
824 IIO_EV_DIR_RISING),
825 last_timestamp);
826
827done:
828 return IRQ_HANDLED;
829}
830
831/**
832 * sca3000_read_event_config() what events are enabled
833 **/
834static int sca3000_read_event_config(struct iio_dev *indio_dev,
835 const struct iio_chan_spec *chan,
836 enum iio_event_type type,
837 enum iio_event_direction dir)
838{
839 struct sca3000_state *st = iio_priv(indio_dev);
840 int ret;
841 u8 protect_mask = 0x03;
842 int num = chan->channel2;
843
844 /* read current value of mode register */
845 mutex_lock(&st->lock);
846 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
847 if (ret)
848 goto error_ret;
849
850 if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET) {
851 ret = 0;
852 } else {
853 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
854 if (ret < 0)
855 goto error_ret;
856 /* only supporting logical or's for now */
857 ret = !!(ret & sca3000_addresses[num][2]);
858 }
859error_ret:
860 mutex_unlock(&st->lock);
861
862 return ret;
863}
864
865/**
866 * sca3000_query_free_fall_mode() is free fall mode enabled
867 **/
868static ssize_t sca3000_query_free_fall_mode(struct device *dev,
869 struct device_attribute *attr,
870 char *buf)
871{
872 int ret;
873 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
874 struct sca3000_state *st = iio_priv(indio_dev);
875 int val;
876
877 mutex_lock(&st->lock);
878 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
879 val = st->rx[0];
880 mutex_unlock(&st->lock);
881 if (ret < 0)
882 return ret;
883 return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT));
884}
885
886/**
887 * sca3000_set_free_fall_mode() simple on off control for free fall int
888 *
889 * In these chips the free fall detector should send an interrupt if
890 * the device falls more than 25cm. This has not been tested due
891 * to fragile wiring.
892 **/
893static ssize_t sca3000_set_free_fall_mode(struct device *dev,
894 struct device_attribute *attr,
895 const char *buf,
896 size_t len)
897{
898 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
899 struct sca3000_state *st = iio_priv(indio_dev);
900 u8 val;
901 int ret;
902 u8 protect_mask = SCA3000_FREE_FALL_DETECT;
903
904 mutex_lock(&st->lock);
905 ret = kstrtou8(buf, 10, &val);
906 if (ret)
907 goto error_ret;
908
909 /* read current value of mode register */
910 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
911 if (ret)
912 goto error_ret;
913
914 /* if off and should be on */
915 if (val && !(st->rx[0] & protect_mask))
916 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
917 (st->rx[0] | SCA3000_FREE_FALL_DETECT));
918 /* if on and should be off */
919 else if (!val && (st->rx[0] & protect_mask))
920 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
921 (st->rx[0] & ~protect_mask));
922error_ret:
923 mutex_unlock(&st->lock);
924
925 return ret ? ret : len;
926}
927
928/**
929 * sca3000_write_event_config() simple on off control for motion detector
930 *
931 * This is a per axis control, but enabling any will result in the
932 * motion detector unit being enabled.
933 * N.B. enabling motion detector stops normal data acquisition.
934 * There is a complexity in knowing which mode to return to when
935 * this mode is disabled. Currently normal mode is assumed.
936 **/
937static int sca3000_write_event_config(struct iio_dev *indio_dev,
938 const struct iio_chan_spec *chan,
939 enum iio_event_type type,
940 enum iio_event_direction dir,
941 int state)
942{
943 struct sca3000_state *st = iio_priv(indio_dev);
944 int ret, ctrlval;
945 u8 protect_mask = 0x03;
946 int num = chan->channel2;
947
948 mutex_lock(&st->lock);
949 /*
950 * First read the motion detector config to find out if
951 * this axis is on
952 */
953 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
954 if (ret < 0)
955 goto exit_point;
956 ctrlval = ret;
957 /* if off and should be on */
958 if (state && !(ctrlval & sca3000_addresses[num][2])) {
959 ret = sca3000_write_ctrl_reg(st,
960 SCA3000_REG_CTRL_SEL_MD_CTRL,
961 ctrlval |
962 sca3000_addresses[num][2]);
963 if (ret)
964 goto exit_point;
965 st->mo_det_use_count++;
966 } else if (!state && (ctrlval & sca3000_addresses[num][2])) {
967 ret = sca3000_write_ctrl_reg(st,
968 SCA3000_REG_CTRL_SEL_MD_CTRL,
969 ctrlval &
970 ~(sca3000_addresses[num][2]));
971 if (ret)
972 goto exit_point;
973 st->mo_det_use_count--;
974 }
975
976 /* read current value of mode register */
977 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
978 if (ret)
979 goto exit_point;
980 /* if off and should be on */
981 if ((st->mo_det_use_count) &&
982 ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
983 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
984 (st->rx[0] & ~protect_mask)
985 | SCA3000_MEAS_MODE_MOT_DET);
986 /* if on and should be off */
987 else if (!(st->mo_det_use_count) &&
988 ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
989 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
990 (st->rx[0] & ~protect_mask));
991exit_point:
992 mutex_unlock(&st->lock);
993
994 return ret;
995}
996
997/* Free fall detector related event attribute */
998static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
999 in_accel_x & y & z_mag_falling_en,
1000 S_IRUGO | S_IWUSR,
1001 sca3000_query_free_fall_mode,
1002 sca3000_set_free_fall_mode,
1003 0);
1004
1005static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
1006 in_accel_x & y & z_mag_falling_period,
1007 "0.226");
1008
1009static struct attribute *sca3000_event_attributes[] = {
1010 &iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr,
1011 &iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr,
1012 NULL,
1013};
1014
1015static struct attribute_group sca3000_event_attribute_group = {
1016 .attrs = sca3000_event_attributes,
1017 .name = "events",
1018};
1019
1020/**
1021 * sca3000_clean_setup() get the device into a predictable state
1022 *
1023 * Devices use flash memory to store many of the register values
1024 * and hence can come up in somewhat unpredictable states.
1025 * Hence reset everything on driver load.
1026 **/
1027static int sca3000_clean_setup(struct sca3000_state *st)
1028{
1029 int ret;
1030
1031 mutex_lock(&st->lock);
1032 /* Ensure all interrupts have been acknowledged */
1033 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
1034 if (ret)
1035 goto error_ret;
1036
1037 /* Turn off all motion detection channels */
1038 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
1039 if (ret < 0)
1040 goto error_ret;
1041 ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
1042 ret & SCA3000_MD_CTRL_PROT_MASK);
1043 if (ret)
1044 goto error_ret;
1045
1046 /* Disable ring buffer */
1047 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
1048 if (ret < 0)
1049 goto error_ret;
1050 ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
1051 (ret & SCA3000_OUT_CTRL_PROT_MASK)
1052 | SCA3000_OUT_CTRL_BUF_X_EN
1053 | SCA3000_OUT_CTRL_BUF_Y_EN
1054 | SCA3000_OUT_CTRL_BUF_Z_EN
1055 | SCA3000_OUT_CTRL_BUF_DIV_4);
1056 if (ret)
1057 goto error_ret;
1058 /* Enable interrupts, relevant to mode and set up as active low */
1059 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
1060 if (ret)
1061 goto error_ret;
1062 ret = sca3000_write_reg(st,
1063 SCA3000_REG_ADDR_INT_MASK,
1064 (ret & SCA3000_INT_MASK_PROT_MASK)
1065 | SCA3000_INT_MASK_ACTIVE_LOW);
1066 if (ret)
1067 goto error_ret;
1068 /*
1069 * Select normal measurement mode, free fall off, ring off
1070 * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
1071 * as that occurs in one of the example on the datasheet
1072 */
1073 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
1074 if (ret)
1075 goto error_ret;
1076 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
1077 (st->rx[0] & SCA3000_MODE_PROT_MASK));
1078 st->bpse = 11;
1079
1080error_ret:
1081 mutex_unlock(&st->lock);
1082 return ret;
1083}
1084
1085static const struct iio_info sca3000_info = {
1086 .attrs = &sca3000_attribute_group,
1087 .read_raw = &sca3000_read_raw,
1088 .write_raw = &sca3000_write_raw,
1089 .event_attrs = &sca3000_event_attribute_group,
1090 .read_event_value = &sca3000_read_thresh,
1091 .write_event_value = &sca3000_write_thresh,
1092 .read_event_config = &sca3000_read_event_config,
1093 .write_event_config = &sca3000_write_event_config,
1094 .driver_module = THIS_MODULE,
1095};
1096
1097static int sca3000_probe(struct spi_device *spi)
1098{
1099 int ret;
1100 struct sca3000_state *st;
1101 struct iio_dev *indio_dev;
1102
1103 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
1104 if (!indio_dev)
1105 return -ENOMEM;
1106
1107 st = iio_priv(indio_dev);
1108 spi_set_drvdata(spi, indio_dev);
1109 st->us = spi;
1110 mutex_init(&st->lock);
1111 st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
1112 ->driver_data];
1113
1114 indio_dev->dev.parent = &spi->dev;
1115 indio_dev->name = spi_get_device_id(spi)->name;
1116 indio_dev->info = &sca3000_info;
1117 if (st->info->temp_output) {
1118 indio_dev->channels = sca3000_channels_with_temp;
1119 indio_dev->num_channels =
1120 ARRAY_SIZE(sca3000_channels_with_temp);
1121 } else {
1122 indio_dev->channels = sca3000_channels;
1123 indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
1124 }
1125 indio_dev->modes = INDIO_DIRECT_MODE;
1126
1127 sca3000_configure_ring(indio_dev);
1128 ret = iio_device_register(indio_dev);
1129 if (ret < 0)
1130 return ret;
1131
1132 if (spi->irq) {
1133 ret = request_threaded_irq(spi->irq,
1134 NULL,
1135 &sca3000_event_handler,
1136 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1137 "sca3000",
1138 indio_dev);
1139 if (ret)
1140 goto error_unregister_dev;
1141 }
1142 sca3000_register_ring_funcs(indio_dev);
1143 ret = sca3000_clean_setup(st);
1144 if (ret)
1145 goto error_free_irq;
1146 return 0;
1147
1148error_free_irq:
1149 if (spi->irq)
1150 free_irq(spi->irq, indio_dev);
1151error_unregister_dev:
1152 iio_device_unregister(indio_dev);
1153 return ret;
1154}
1155
1156static int sca3000_stop_all_interrupts(struct sca3000_state *st)
1157{
1158 int ret;
1159
1160 mutex_lock(&st->lock);
1161 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
1162 if (ret)
1163 goto error_ret;
1164 ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
1165 (st->rx[0] &
1166 ~(SCA3000_INT_MASK_RING_THREE_QUARTER |
1167 SCA3000_INT_MASK_RING_HALF |
1168 SCA3000_INT_MASK_ALL_INTS)));
1169error_ret:
1170 mutex_unlock(&st->lock);
1171 return ret;
1172}
1173
1174static int sca3000_remove(struct spi_device *spi)
1175{
1176 struct iio_dev *indio_dev = spi_get_drvdata(spi);
1177 struct sca3000_state *st = iio_priv(indio_dev);
1178
1179 /* Must ensure no interrupts can be generated after this! */
1180 sca3000_stop_all_interrupts(st);
1181 if (spi->irq)
1182 free_irq(spi->irq, indio_dev);
1183 iio_device_unregister(indio_dev);
1184 sca3000_unconfigure_ring(indio_dev);
1185
1186 return 0;
1187}
1188
1189static const struct spi_device_id sca3000_id[] = {
1190 {"sca3000_d01", d01},
1191 {"sca3000_e02", e02},
1192 {"sca3000_e04", e04},
1193 {"sca3000_e05", e05},
1194 {}
1195};
1196MODULE_DEVICE_TABLE(spi, sca3000_id);
1197
1198static struct spi_driver sca3000_driver = {
1199 .driver = {
1200 .name = "sca3000",
1201 },
1202 .probe = sca3000_probe,
1203 .remove = sca3000_remove,
1204 .id_table = sca3000_id,
1205};
1206module_spi_driver(sca3000_driver);
1207
1208MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
1209MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
1210MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
deleted file mode 100644
index d1cb9b9cf22b..000000000000
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
9 *
10 */
11
12#include <linux/interrupt.h>
13#include <linux/fs.h>
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/spi/spi.h>
17#include <linux/sysfs.h>
18#include <linux/sched.h>
19#include <linux/poll.h>
20
21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h>
23#include <linux/iio/buffer.h>
24#include "../ring_hw.h"
25#include "sca3000.h"
26
27/* RFC / future work
28 *
29 * The internal ring buffer doesn't actually change what it holds depending
30 * on which signals are enabled etc, merely whether you can read them.
31 * As such the scan mode selection is somewhat different than for a software
32 * ring buffer and changing it actually covers any data already in the buffer.
33 * Currently scan elements aren't configured so it doesn't matter.
34 */
35
36static int sca3000_read_data(struct sca3000_state *st,
37 u8 reg_address_high,
38 u8 **rx_p,
39 int len)
40{
41 int ret;
42 struct spi_transfer xfer[2] = {
43 {
44 .len = 1,
45 .tx_buf = st->tx,
46 }, {
47 .len = len,
48 }
49 };
50 *rx_p = kmalloc(len, GFP_KERNEL);
51 if (!*rx_p) {
52 ret = -ENOMEM;
53 goto error_ret;
54 }
55 xfer[1].rx_buf = *rx_p;
56 st->tx[0] = SCA3000_READ_REG(reg_address_high);
57 ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
58 if (ret) {
59 dev_err(get_device(&st->us->dev), "problem reading register");
60 goto error_free_rx;
61 }
62
63 return 0;
64error_free_rx:
65 kfree(*rx_p);
66error_ret:
67 return ret;
68}
69
70/**
71 * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
72 * @r: the ring
73 * @count: number of samples to try and pull
74 * @data: output the actual samples pulled from the hw ring
75 *
76 * Currently does not provide timestamps. As the hardware doesn't add them they
77 * can only be inferred approximately from ring buffer events such as 50% full
78 * and knowledge of when buffer was last emptied. This is left to userspace.
79 **/
80static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
81 size_t count, char __user *buf)
82{
83 struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
84 struct iio_dev *indio_dev = hw_ring->private;
85 struct sca3000_state *st = iio_priv(indio_dev);
86 u8 *rx;
87 int ret, i, num_available, num_read = 0;
88 int bytes_per_sample = 1;
89
90 if (st->bpse == 11)
91 bytes_per_sample = 2;
92
93 mutex_lock(&st->lock);
94 if (count % bytes_per_sample) {
95 ret = -EINVAL;
96 goto error_ret;
97 }
98
99 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
100 if (ret)
101 goto error_ret;
102 num_available = st->rx[0];
103 /*
104 * num_available is the total number of samples available
105 * i.e. number of time points * number of channels.
106 */
107 if (count > num_available * bytes_per_sample)
108 num_read = num_available * bytes_per_sample;
109 else
110 num_read = count;
111
112 ret = sca3000_read_data(st,
113 SCA3000_REG_ADDR_RING_OUT,
114 &rx, num_read);
115 if (ret)
116 goto error_ret;
117
118 for (i = 0; i < num_read / sizeof(u16); i++)
119 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
120
121 if (copy_to_user(buf, rx, num_read))
122 ret = -EFAULT;
123 kfree(rx);
124 r->stufftoread = 0;
125error_ret:
126 mutex_unlock(&st->lock);
127
128 return ret ? ret : num_read;
129}
130
131static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
132{
133 return r->stufftoread ? r->watermark : 0;
134}
135
136/**
137 * sca3000_query_ring_int() is the hardware ring status interrupt enabled
138 **/
139static ssize_t sca3000_query_ring_int(struct device *dev,
140 struct device_attribute *attr,
141 char *buf)
142{
143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144 int ret, val;
145 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
146 struct sca3000_state *st = iio_priv(indio_dev);
147
148 mutex_lock(&st->lock);
149 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
150 val = st->rx[0];
151 mutex_unlock(&st->lock);
152 if (ret)
153 return ret;
154
155 return sprintf(buf, "%d\n", !!(val & this_attr->address));
156}
157
158/**
159 * sca3000_set_ring_int() set state of ring status interrupt
160 **/
161static ssize_t sca3000_set_ring_int(struct device *dev,
162 struct device_attribute *attr,
163 const char *buf,
164 size_t len)
165{
166 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
167 struct sca3000_state *st = iio_priv(indio_dev);
168 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
169 u8 val;
170 int ret;
171
172 mutex_lock(&st->lock);
173 ret = kstrtou8(buf, 10, &val);
174 if (ret)
175 goto error_ret;
176 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
177 if (ret)
178 goto error_ret;
179 if (val)
180 ret = sca3000_write_reg(st,
181 SCA3000_REG_ADDR_INT_MASK,
182 st->rx[0] | this_attr->address);
183 else
184 ret = sca3000_write_reg(st,
185 SCA3000_REG_ADDR_INT_MASK,
186 st->rx[0] & ~this_attr->address);
187error_ret:
188 mutex_unlock(&st->lock);
189
190 return ret ? ret : len;
191}
192
193static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
194 sca3000_query_ring_int,
195 sca3000_set_ring_int,
196 SCA3000_INT_MASK_RING_HALF);
197
198static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
199 sca3000_query_ring_int,
200 sca3000_set_ring_int,
201 SCA3000_INT_MASK_RING_THREE_QUARTER);
202
203static ssize_t sca3000_show_buffer_scale(struct device *dev,
204 struct device_attribute *attr,
205 char *buf)
206{
207 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
208 struct sca3000_state *st = iio_priv(indio_dev);
209
210 return sprintf(buf, "0.%06d\n", 4 * st->info->scale);
211}
212
213static IIO_DEVICE_ATTR(in_accel_scale,
214 S_IRUGO,
215 sca3000_show_buffer_scale,
216 NULL,
217 0);
218
219/*
220 * Ring buffer attributes
221 * This device is a bit unusual in that the sampling frequency and bpse
222 * only apply to the ring buffer. At all times full rate and accuracy
223 * is available via direct reading from registers.
224 */
225static const struct attribute *sca3000_ring_attributes[] = {
226 &iio_dev_attr_50_percent.dev_attr.attr,
227 &iio_dev_attr_75_percent.dev_attr.attr,
228 &iio_dev_attr_in_accel_scale.dev_attr.attr,
229 NULL,
230};
231
232static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
233{
234 struct iio_buffer *buf;
235 struct iio_hw_buffer *ring;
236
237 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
238 if (!ring)
239 return NULL;
240
241 ring->private = indio_dev;
242 buf = &ring->buf;
243 buf->stufftoread = 0;
244 buf->length = 64;
245 buf->attrs = sca3000_ring_attributes;
246 iio_buffer_init(buf);
247
248 return buf;
249}
250
251static void sca3000_ring_release(struct iio_buffer *r)
252{
253 kfree(iio_to_hw_buf(r));
254}
255
256static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
257 .read_first_n = &sca3000_read_first_n_hw_rb,
258 .data_available = sca3000_ring_buf_data_available,
259 .release = sca3000_ring_release,
260
261 .modes = INDIO_BUFFER_HARDWARE,
262};
263
264int sca3000_configure_ring(struct iio_dev *indio_dev)
265{
266 struct iio_buffer *buffer;
267
268 buffer = sca3000_rb_allocate(indio_dev);
269 if (!buffer)
270 return -ENOMEM;
271 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
272
273 indio_dev->buffer->access = &sca3000_ring_access_funcs;
274
275 iio_device_attach_buffer(indio_dev, buffer);
276
277 return 0;
278}
279
280void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
281{
282 iio_buffer_put(indio_dev->buffer);
283}
284
285static inline
286int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
287{
288 struct sca3000_state *st = iio_priv(indio_dev);
289 int ret;
290
291 mutex_lock(&st->lock);
292 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
293 if (ret)
294 goto error_ret;
295 if (state) {
296 dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
297 ret = sca3000_write_reg(st,
298 SCA3000_REG_ADDR_MODE,
299 (st->rx[0] | SCA3000_RING_BUF_ENABLE));
300 } else
301 ret = sca3000_write_reg(st,
302 SCA3000_REG_ADDR_MODE,
303 (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
304error_ret:
305 mutex_unlock(&st->lock);
306
307 return ret;
308}
309
310/**
311 * sca3000_hw_ring_preenable() hw ring buffer preenable function
312 *
313 * Very simple enable function as the chip will allows normal reads
314 * during ring buffer operation so as long as it is indeed running
315 * before we notify the core, the precise ordering does not matter.
316 **/
317static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
318{
319 return __sca3000_hw_ring_state_set(indio_dev, 1);
320}
321
322static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
323{
324 return __sca3000_hw_ring_state_set(indio_dev, 0);
325}
326
327static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
328 .preenable = &sca3000_hw_ring_preenable,
329 .postdisable = &sca3000_hw_ring_postdisable,
330};
331
332void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
333{
334 indio_dev->setup_ops = &sca3000_ring_setup_ops;
335}
336
337/**
338 * sca3000_ring_int_process() ring specific interrupt handling.
339 *
340 * This is only split from the main interrupt handler so as to
341 * reduce the amount of code if the ring buffer is not enabled.
342 **/
343void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
344{
345 if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
346 SCA3000_INT_STATUS_HALF)) {
347 ring->stufftoread = true;
348 wake_up_interruptible(&ring->pollq);
349 }
350}
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 3cdd83ccec8e..ac09485923b6 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -2,7 +2,6 @@
2# Makefile for industrial I/O ADC drivers 2# Makefile for industrial I/O ADC drivers
3# 3#
4 4
5ad7606-y := ad7606_core.o ad7606_ring.o
6obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o 5obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
7obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o 6obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
8obj-$(CONFIG_AD7606) += ad7606.o 7obj-$(CONFIG_AD7606) += ad7606.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1cf6b79801a9..1fb68c01abd5 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -152,7 +152,8 @@
152 */ 152 */
153 153
154struct ad7192_state { 154struct ad7192_state {
155 struct regulator *reg; 155 struct regulator *avdd;
156 struct regulator *dvdd;
156 u16 int_vref_mv; 157 u16 int_vref_mv;
157 u32 mclk; 158 u32 mclk;
158 u32 f_order; 159 u32 f_order;
@@ -322,57 +323,6 @@ out:
322 return ret; 323 return ret;
323} 324}
324 325
325static ssize_t ad7192_read_frequency(struct device *dev,
326 struct device_attribute *attr,
327 char *buf)
328{
329 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
330 struct ad7192_state *st = iio_priv(indio_dev);
331
332 return sprintf(buf, "%d\n", st->mclk /
333 (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
334}
335
336static ssize_t ad7192_write_frequency(struct device *dev,
337 struct device_attribute *attr,
338 const char *buf,
339 size_t len)
340{
341 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
342 struct ad7192_state *st = iio_priv(indio_dev);
343 unsigned long lval;
344 int div, ret;
345
346 ret = kstrtoul(buf, 10, &lval);
347 if (ret)
348 return ret;
349 if (lval == 0)
350 return -EINVAL;
351
352 ret = iio_device_claim_direct_mode(indio_dev);
353 if (ret)
354 return ret;
355
356 div = st->mclk / (lval * st->f_order * 1024);
357 if (div < 1 || div > 1023) {
358 ret = -EINVAL;
359 goto out;
360 }
361
362 st->mode &= ~AD7192_MODE_RATE(-1);
363 st->mode |= AD7192_MODE_RATE(div);
364 ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
365
366out:
367 iio_device_release_direct_mode(indio_dev);
368
369 return ret ? ret : len;
370}
371
372static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
373 ad7192_read_frequency,
374 ad7192_write_frequency);
375
376static ssize_t 326static ssize_t
377ad7192_show_scale_available(struct device *dev, 327ad7192_show_scale_available(struct device *dev,
378 struct device_attribute *attr, char *buf) 328 struct device_attribute *attr, char *buf)
@@ -471,7 +421,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
471 AD7192_REG_MODE); 421 AD7192_REG_MODE);
472 422
473static struct attribute *ad7192_attributes[] = { 423static struct attribute *ad7192_attributes[] = {
474 &iio_dev_attr_sampling_frequency.dev_attr.attr,
475 &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr, 424 &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
476 &iio_dev_attr_in_voltage_scale_available.dev_attr.attr, 425 &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
477 &iio_dev_attr_bridge_switch_en.dev_attr.attr, 426 &iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -484,7 +433,6 @@ static const struct attribute_group ad7192_attribute_group = {
484}; 433};
485 434
486static struct attribute *ad7195_attributes[] = { 435static struct attribute *ad7195_attributes[] = {
487 &iio_dev_attr_sampling_frequency.dev_attr.attr,
488 &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr, 436 &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
489 &iio_dev_attr_in_voltage_scale_available.dev_attr.attr, 437 &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
490 &iio_dev_attr_bridge_switch_en.dev_attr.attr, 438 &iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -536,6 +484,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
536 if (chan->type == IIO_TEMP) 484 if (chan->type == IIO_TEMP)
537 *val -= 273 * ad7192_get_temp_scale(unipolar); 485 *val -= 273 * ad7192_get_temp_scale(unipolar);
538 return IIO_VAL_INT; 486 return IIO_VAL_INT;
487 case IIO_CHAN_INFO_SAMP_FREQ:
488 *val = st->mclk /
489 (st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
490 return IIO_VAL_INT;
539 } 491 }
540 492
541 return -EINVAL; 493 return -EINVAL;
@@ -548,7 +500,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
548 long mask) 500 long mask)
549{ 501{
550 struct ad7192_state *st = iio_priv(indio_dev); 502 struct ad7192_state *st = iio_priv(indio_dev);
551 int ret, i; 503 int ret, i, div;
552 unsigned int tmp; 504 unsigned int tmp;
553 505
554 ret = iio_device_claim_direct_mode(indio_dev); 506 ret = iio_device_claim_direct_mode(indio_dev);
@@ -572,6 +524,22 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
572 break; 524 break;
573 } 525 }
574 break; 526 break;
527 case IIO_CHAN_INFO_SAMP_FREQ:
528 if (!val) {
529 ret = -EINVAL;
530 break;
531 }
532
533 div = st->mclk / (val * st->f_order * 1024);
534 if (div < 1 || div > 1023) {
535 ret = -EINVAL;
536 break;
537 }
538
539 st->mode &= ~AD7192_MODE_RATE(-1);
540 st->mode |= AD7192_MODE_RATE(div);
541 ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
542 break;
575 default: 543 default:
576 ret = -EINVAL; 544 ret = -EINVAL;
577 } 545 }
@@ -585,7 +553,14 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
585 struct iio_chan_spec const *chan, 553 struct iio_chan_spec const *chan,
586 long mask) 554 long mask)
587{ 555{
588 return IIO_VAL_INT_PLUS_NANO; 556 switch (mask) {
557 case IIO_CHAN_INFO_SCALE:
558 return IIO_VAL_INT_PLUS_NANO;
559 case IIO_CHAN_INFO_SAMP_FREQ:
560 return IIO_VAL_INT;
561 default:
562 return -EINVAL;
563 }
589} 564}
590 565
591static const struct iio_info ad7192_info = { 566static const struct iio_info ad7192_info = {
@@ -659,15 +634,30 @@ static int ad7192_probe(struct spi_device *spi)
659 634
660 st = iio_priv(indio_dev); 635 st = iio_priv(indio_dev);
661 636
662 st->reg = devm_regulator_get(&spi->dev, "vcc"); 637 st->avdd = devm_regulator_get(&spi->dev, "avdd");
663 if (!IS_ERR(st->reg)) { 638 if (IS_ERR(st->avdd))
664 ret = regulator_enable(st->reg); 639 return PTR_ERR(st->avdd);
665 if (ret)
666 return ret;
667 640
668 voltage_uv = regulator_get_voltage(st->reg); 641 ret = regulator_enable(st->avdd);
642 if (ret) {
643 dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
644 return ret;
645 }
646
647 st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
648 if (IS_ERR(st->dvdd)) {
649 ret = PTR_ERR(st->dvdd);
650 goto error_disable_avdd;
669 } 651 }
670 652
653 ret = regulator_enable(st->dvdd);
654 if (ret) {
655 dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
656 goto error_disable_avdd;
657 }
658
659 voltage_uv = regulator_get_voltage(st->avdd);
660
671 if (pdata->vref_mv) 661 if (pdata->vref_mv)
672 st->int_vref_mv = pdata->vref_mv; 662 st->int_vref_mv = pdata->vref_mv;
673 else if (voltage_uv) 663 else if (voltage_uv)
@@ -701,7 +691,7 @@ static int ad7192_probe(struct spi_device *spi)
701 691
702 ret = ad_sd_setup_buffer_and_trigger(indio_dev); 692 ret = ad_sd_setup_buffer_and_trigger(indio_dev);
703 if (ret) 693 if (ret)
704 goto error_disable_reg; 694 goto error_disable_dvdd;
705 695
706 ret = ad7192_setup(st, pdata); 696 ret = ad7192_setup(st, pdata);
707 if (ret) 697 if (ret)
@@ -714,9 +704,10 @@ static int ad7192_probe(struct spi_device *spi)
714 704
715error_remove_trigger: 705error_remove_trigger:
716 ad_sd_cleanup_buffer_and_trigger(indio_dev); 706 ad_sd_cleanup_buffer_and_trigger(indio_dev);
717error_disable_reg: 707error_disable_dvdd:
718 if (!IS_ERR(st->reg)) 708 regulator_disable(st->dvdd);
719 regulator_disable(st->reg); 709error_disable_avdd:
710 regulator_disable(st->avdd);
720 711
721 return ret; 712 return ret;
722} 713}
@@ -729,8 +720,8 @@ static int ad7192_remove(struct spi_device *spi)
729 iio_device_unregister(indio_dev); 720 iio_device_unregister(indio_dev);
730 ad_sd_cleanup_buffer_and_trigger(indio_dev); 721 ad_sd_cleanup_buffer_and_trigger(indio_dev);
731 722
732 if (!IS_ERR(st->reg)) 723 regulator_disable(st->dvdd);
733 regulator_disable(st->reg); 724 regulator_disable(st->avdd);
734 725
735 return 0; 726 return 0;
736} 727}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b460dda7eb65..ee679ac0368f 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -777,7 +777,7 @@ static struct attribute *ad7280_event_attributes[] = {
777 NULL, 777 NULL,
778}; 778};
779 779
780static struct attribute_group ad7280_event_attrs_group = { 780static const struct attribute_group ad7280_event_attrs_group = {
781 .attrs = ad7280_event_attributes, 781 .attrs = ad7280_event_attributes,
782}; 782};
783 783
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606.c
index f79ee61851f6..453190864b2f 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -13,7 +13,7 @@
13#include <linux/sysfs.h> 13#include <linux/sysfs.h>
14#include <linux/regulator/consumer.h> 14#include <linux/regulator/consumer.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/gpio.h> 16#include <linux/gpio/consumer.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h> 19#include <linux/module.h>
@@ -21,58 +21,109 @@
21#include <linux/iio/iio.h> 21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h> 22#include <linux/iio/sysfs.h>
23#include <linux/iio/buffer.h> 23#include <linux/iio/buffer.h>
24#include <linux/iio/trigger_consumer.h>
25#include <linux/iio/triggered_buffer.h>
24 26
25#include "ad7606.h" 27#include "ad7606.h"
26 28
27int ad7606_reset(struct ad7606_state *st) 29static int ad7606_reset(struct ad7606_state *st)
28{ 30{
29 if (gpio_is_valid(st->pdata->gpio_reset)) { 31 if (st->gpio_reset) {
30 gpio_set_value(st->pdata->gpio_reset, 1); 32 gpiod_set_value(st->gpio_reset, 1);
31 ndelay(100); /* t_reset >= 100ns */ 33 ndelay(100); /* t_reset >= 100ns */
32 gpio_set_value(st->pdata->gpio_reset, 0); 34 gpiod_set_value(st->gpio_reset, 0);
33 return 0; 35 return 0;
34 } 36 }
35 37
36 return -ENODEV; 38 return -ENODEV;
37} 39}
38 40
41static int ad7606_read_samples(struct ad7606_state *st)
42{
43 unsigned int num = st->chip_info->num_channels;
44 u16 *data = st->data;
45 int ret;
46
47 /*
48 * The frstdata signal is set to high while and after reading the sample
49 * of the first channel and low for all other channels. This can be used
50 * to check that the incoming data is correctly aligned. During normal
51 * operation the data should never become unaligned, but some glitch or
52 * electrostatic discharge might cause an extra read or clock cycle.
53 * Monitoring the frstdata signal allows to recover from such failure
54 * situations.
55 */
56
57 if (st->gpio_frstdata) {
58 ret = st->bops->read_block(st->dev, 1, data);
59 if (ret)
60 return ret;
61
62 if (!gpiod_get_value(st->gpio_frstdata)) {
63 ad7606_reset(st);
64 return -EIO;
65 }
66
67 data++;
68 num--;
69 }
70
71 return st->bops->read_block(st->dev, num, data);
72}
73
74static irqreturn_t ad7606_trigger_handler(int irq, void *p)
75{
76 struct iio_poll_func *pf = p;
77 struct ad7606_state *st = iio_priv(pf->indio_dev);
78
79 gpiod_set_value(st->gpio_convst, 1);
80
81 return IRQ_HANDLED;
82}
83
84/**
85 * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
86 * @work_s: the work struct through which this was scheduled
87 *
88 * Currently there is no option in this driver to disable the saving of
89 * timestamps within the ring.
90 * I think the one copy of this at a time was to avoid problems if the
91 * trigger was set far too high and the reads then locked up the computer.
92 **/
93static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
94{
95 struct ad7606_state *st = container_of(work_s, struct ad7606_state,
96 poll_work);
97 struct iio_dev *indio_dev = iio_priv_to_dev(st);
98 int ret;
99
100 ret = ad7606_read_samples(st);
101 if (ret == 0)
102 iio_push_to_buffers_with_timestamp(indio_dev, st->data,
103 iio_get_time_ns(indio_dev));
104
105 gpiod_set_value(st->gpio_convst, 0);
106 iio_trigger_notify_done(indio_dev->trig);
107}
108
39static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch) 109static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
40{ 110{
41 struct ad7606_state *st = iio_priv(indio_dev); 111 struct ad7606_state *st = iio_priv(indio_dev);
42 int ret; 112 int ret;
43 113
44 st->done = false; 114 st->done = false;
45 gpio_set_value(st->pdata->gpio_convst, 1); 115 gpiod_set_value(st->gpio_convst, 1);
46 116
47 ret = wait_event_interruptible(st->wq_data_avail, st->done); 117 ret = wait_event_interruptible(st->wq_data_avail, st->done);
48 if (ret) 118 if (ret)
49 goto error_ret; 119 goto error_ret;
50 120
51 if (gpio_is_valid(st->pdata->gpio_frstdata)) { 121 ret = ad7606_read_samples(st);
52 ret = st->bops->read_block(st->dev, 1, st->data); 122 if (ret == 0)
53 if (ret) 123 ret = st->data[ch];
54 goto error_ret;
55 if (!gpio_get_value(st->pdata->gpio_frstdata)) {
56 /* This should never happen */
57 ad7606_reset(st);
58 ret = -EIO;
59 goto error_ret;
60 }
61 ret = st->bops->read_block(st->dev,
62 st->chip_info->num_channels - 1, &st->data[1]);
63 if (ret)
64 goto error_ret;
65 } else {
66 ret = st->bops->read_block(st->dev,
67 st->chip_info->num_channels, st->data);
68 if (ret)
69 goto error_ret;
70 }
71
72 ret = st->data[ch];
73 124
74error_ret: 125error_ret:
75 gpio_set_value(st->pdata->gpio_convst, 0); 126 gpiod_set_value(st->gpio_convst, 0);
76 127
77 return ret; 128 return ret;
78} 129}
@@ -103,6 +154,9 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
103 *val = st->range * 2; 154 *val = st->range * 2;
104 *val2 = st->chip_info->channels[0].scan_type.realbits; 155 *val2 = st->chip_info->channels[0].scan_type.realbits;
105 return IIO_VAL_FRACTIONAL_LOG2; 156 return IIO_VAL_FRACTIONAL_LOG2;
157 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
158 *val = st->oversampling;
159 return IIO_VAL_INT;
106 } 160 }
107 return -EINVAL; 161 return -EINVAL;
108} 162}
@@ -129,12 +183,11 @@ static ssize_t ad7606_store_range(struct device *dev,
129 if (ret) 183 if (ret)
130 return ret; 184 return ret;
131 185
132 if (!(lval == 5000 || lval == 10000)) { 186 if (!(lval == 5000 || lval == 10000))
133 dev_err(dev, "range is not supported\n");
134 return -EINVAL; 187 return -EINVAL;
135 } 188
136 mutex_lock(&indio_dev->mlock); 189 mutex_lock(&indio_dev->mlock);
137 gpio_set_value(st->pdata->gpio_range, lval == 10000); 190 gpiod_set_value(st->gpio_range, lval == 10000);
138 st->range = lval; 191 st->range = lval;
139 mutex_unlock(&indio_dev->mlock); 192 mutex_unlock(&indio_dev->mlock);
140 193
@@ -145,19 +198,9 @@ static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
145 ad7606_show_range, ad7606_store_range, 0); 198 ad7606_show_range, ad7606_store_range, 0);
146static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000"); 199static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
147 200
148static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
153 struct ad7606_state *st = iio_priv(indio_dev);
154
155 return sprintf(buf, "%u\n", st->oversampling);
156}
157
158static int ad7606_oversampling_get_index(unsigned int val) 201static int ad7606_oversampling_get_index(unsigned int val)
159{ 202{
160 unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64}; 203 unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64};
161 int i; 204 int i;
162 205
163 for (i = 0; i < ARRAY_SIZE(supported); i++) 206 for (i = 0; i < ARRAY_SIZE(supported); i++)
@@ -167,44 +210,45 @@ static int ad7606_oversampling_get_index(unsigned int val)
167 return -EINVAL; 210 return -EINVAL;
168} 211}
169 212
170static ssize_t ad7606_store_oversampling_ratio(struct device *dev, 213static int ad7606_write_raw(struct iio_dev *indio_dev,
171 struct device_attribute *attr, 214 struct iio_chan_spec const *chan,
172 const char *buf, size_t count) 215 int val,
216 int val2,
217 long mask)
173{ 218{
174 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
175 struct ad7606_state *st = iio_priv(indio_dev); 219 struct ad7606_state *st = iio_priv(indio_dev);
176 unsigned long lval; 220 int values[3];
177 int ret; 221 int ret;
178 222
179 ret = kstrtoul(buf, 10, &lval); 223 switch (mask) {
180 if (ret) 224 case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
181 return ret; 225 if (val2)
226 return -EINVAL;
227 ret = ad7606_oversampling_get_index(val);
228 if (ret < 0)
229 return ret;
182 230
183 ret = ad7606_oversampling_get_index(lval); 231 values[0] = (ret >> 0) & 1;
184 if (ret < 0) { 232 values[1] = (ret >> 1) & 1;
185 dev_err(dev, "oversampling %lu is not supported\n", lval); 233 values[2] = (ret >> 2) & 1;
186 return ret;
187 }
188 234
189 mutex_lock(&indio_dev->mlock); 235 mutex_lock(&indio_dev->mlock);
190 gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); 236 gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
191 gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); 237 values);
192 gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); 238 st->oversampling = val;
193 st->oversampling = lval; 239 mutex_unlock(&indio_dev->mlock);
194 mutex_unlock(&indio_dev->mlock);
195 240
196 return count; 241 return 0;
242 default:
243 return -EINVAL;
244 }
197} 245}
198 246
199static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR, 247static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
200 ad7606_show_oversampling_ratio,
201 ad7606_store_oversampling_ratio, 0);
202static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
203 248
204static struct attribute *ad7606_attributes_os_and_range[] = { 249static struct attribute *ad7606_attributes_os_and_range[] = {
205 &iio_dev_attr_in_voltage_range.dev_attr.attr, 250 &iio_dev_attr_in_voltage_range.dev_attr.attr,
206 &iio_const_attr_in_voltage_range_available.dev_attr.attr, 251 &iio_const_attr_in_voltage_range_available.dev_attr.attr,
207 &iio_dev_attr_oversampling_ratio.dev_attr.attr,
208 &iio_const_attr_oversampling_ratio_available.dev_attr.attr, 252 &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
209 NULL, 253 NULL,
210}; 254};
@@ -214,7 +258,6 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = {
214}; 258};
215 259
216static struct attribute *ad7606_attributes_os[] = { 260static struct attribute *ad7606_attributes_os[] = {
217 &iio_dev_attr_oversampling_ratio.dev_attr.attr,
218 &iio_const_attr_oversampling_ratio_available.dev_attr.attr, 261 &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
219 NULL, 262 NULL,
220}; 263};
@@ -241,6 +284,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
241 .address = num, \ 284 .address = num, \
242 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 285 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
243 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\ 286 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
287 .info_mask_shared_by_all = \
288 BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
244 .scan_index = num, \ 289 .scan_index = num, \
245 .scan_type = { \ 290 .scan_type = { \
246 .sign = 's', \ 291 .sign = 's', \
@@ -267,20 +312,14 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
267 * More devices added in future 312 * More devices added in future
268 */ 313 */
269 [ID_AD7606_8] = { 314 [ID_AD7606_8] = {
270 .name = "ad7606",
271 .int_vref_mv = 2500,
272 .channels = ad7606_channels, 315 .channels = ad7606_channels,
273 .num_channels = 9, 316 .num_channels = 9,
274 }, 317 },
275 [ID_AD7606_6] = { 318 [ID_AD7606_6] = {
276 .name = "ad7606-6",
277 .int_vref_mv = 2500,
278 .channels = ad7606_channels, 319 .channels = ad7606_channels,
279 .num_channels = 7, 320 .num_channels = 7,
280 }, 321 },
281 [ID_AD7606_4] = { 322 [ID_AD7606_4] = {
282 .name = "ad7606-4",
283 .int_vref_mv = 2500,
284 .channels = ad7606_channels, 323 .channels = ad7606_channels,
285 .num_channels = 5, 324 .num_channels = 5,
286 }, 325 },
@@ -288,119 +327,34 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
288 327
289static int ad7606_request_gpios(struct ad7606_state *st) 328static int ad7606_request_gpios(struct ad7606_state *st)
290{ 329{
291 struct gpio gpio_array[3] = { 330 struct device *dev = st->dev;
292 [0] = { 331
293 .gpio = st->pdata->gpio_os0, 332 st->gpio_convst = devm_gpiod_get(dev, "conversion-start",
294 .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ? 333 GPIOD_OUT_LOW);
295 GPIOF_INIT_HIGH : GPIOF_INIT_LOW), 334 if (IS_ERR(st->gpio_convst))
296 .label = "AD7606_OS0", 335 return PTR_ERR(st->gpio_convst);
297 }, 336
298 [1] = { 337 st->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
299 .gpio = st->pdata->gpio_os1, 338 if (IS_ERR(st->gpio_reset))
300 .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ? 339 return PTR_ERR(st->gpio_reset);
301 GPIOF_INIT_HIGH : GPIOF_INIT_LOW), 340
302 .label = "AD7606_OS1", 341 st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW);
303 }, 342 if (IS_ERR(st->gpio_range))
304 [2] = { 343 return PTR_ERR(st->gpio_range);
305 .gpio = st->pdata->gpio_os2, 344
306 .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ? 345 st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
307 GPIOF_INIT_HIGH : GPIOF_INIT_LOW), 346 GPIOD_OUT_HIGH);
308 .label = "AD7606_OS2", 347 if (IS_ERR(st->gpio_standby))
309 }, 348 return PTR_ERR(st->gpio_standby);
310 }; 349
311 int ret; 350 st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data",
312 351 GPIOD_IN);
313 if (gpio_is_valid(st->pdata->gpio_convst)) { 352 if (IS_ERR(st->gpio_frstdata))
314 ret = gpio_request_one(st->pdata->gpio_convst, 353 return PTR_ERR(st->gpio_frstdata);
315 GPIOF_OUT_INIT_LOW, 354
316 "AD7606_CONVST"); 355 st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio",
317 if (ret) { 356 GPIOD_OUT_LOW);
318 dev_err(st->dev, "failed to request GPIO CONVST\n"); 357 return PTR_ERR_OR_ZERO(st->gpio_os);
319 goto error_ret;
320 }
321 } else {
322 ret = -EIO;
323 goto error_ret;
324 }
325
326 if (gpio_is_valid(st->pdata->gpio_os0) &&
327 gpio_is_valid(st->pdata->gpio_os1) &&
328 gpio_is_valid(st->pdata->gpio_os2)) {
329 ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
330 if (ret < 0)
331 goto error_free_convst;
332 }
333
334 if (gpio_is_valid(st->pdata->gpio_reset)) {
335 ret = gpio_request_one(st->pdata->gpio_reset,
336 GPIOF_OUT_INIT_LOW,
337 "AD7606_RESET");
338 if (ret < 0)
339 goto error_free_os;
340 }
341
342 if (gpio_is_valid(st->pdata->gpio_range)) {
343 ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
344 ((st->range == 10000) ? GPIOF_INIT_HIGH :
345 GPIOF_INIT_LOW), "AD7606_RANGE");
346 if (ret < 0)
347 goto error_free_reset;
348 }
349 if (gpio_is_valid(st->pdata->gpio_stby)) {
350 ret = gpio_request_one(st->pdata->gpio_stby,
351 GPIOF_OUT_INIT_HIGH,
352 "AD7606_STBY");
353 if (ret < 0)
354 goto error_free_range;
355 }
356
357 if (gpio_is_valid(st->pdata->gpio_frstdata)) {
358 ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
359 "AD7606_FRSTDATA");
360 if (ret < 0)
361 goto error_free_stby;
362 }
363
364 return 0;
365
366error_free_stby:
367 if (gpio_is_valid(st->pdata->gpio_stby))
368 gpio_free(st->pdata->gpio_stby);
369error_free_range:
370 if (gpio_is_valid(st->pdata->gpio_range))
371 gpio_free(st->pdata->gpio_range);
372error_free_reset:
373 if (gpio_is_valid(st->pdata->gpio_reset))
374 gpio_free(st->pdata->gpio_reset);
375error_free_os:
376 if (gpio_is_valid(st->pdata->gpio_os0) &&
377 gpio_is_valid(st->pdata->gpio_os1) &&
378 gpio_is_valid(st->pdata->gpio_os2))
379 gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
380error_free_convst:
381 gpio_free(st->pdata->gpio_convst);
382error_ret:
383 return ret;
384}
385
386static void ad7606_free_gpios(struct ad7606_state *st)
387{
388 if (gpio_is_valid(st->pdata->gpio_frstdata))
389 gpio_free(st->pdata->gpio_frstdata);
390 if (gpio_is_valid(st->pdata->gpio_stby))
391 gpio_free(st->pdata->gpio_stby);
392 if (gpio_is_valid(st->pdata->gpio_range))
393 gpio_free(st->pdata->gpio_range);
394 if (gpio_is_valid(st->pdata->gpio_reset))
395 gpio_free(st->pdata->gpio_reset);
396 if (gpio_is_valid(st->pdata->gpio_os0) &&
397 gpio_is_valid(st->pdata->gpio_os1) &&
398 gpio_is_valid(st->pdata->gpio_os2)) {
399 gpio_free(st->pdata->gpio_os2);
400 gpio_free(st->pdata->gpio_os1);
401 gpio_free(st->pdata->gpio_os0);
402 }
403 gpio_free(st->pdata->gpio_convst);
404} 358}
405 359
406/** 360/**
@@ -429,12 +383,14 @@ static const struct iio_info ad7606_info_no_os_or_range = {
429static const struct iio_info ad7606_info_os_and_range = { 383static const struct iio_info ad7606_info_os_and_range = {
430 .driver_module = THIS_MODULE, 384 .driver_module = THIS_MODULE,
431 .read_raw = &ad7606_read_raw, 385 .read_raw = &ad7606_read_raw,
386 .write_raw = &ad7606_write_raw,
432 .attrs = &ad7606_attribute_group_os_and_range, 387 .attrs = &ad7606_attribute_group_os_and_range,
433}; 388};
434 389
435static const struct iio_info ad7606_info_os = { 390static const struct iio_info ad7606_info_os = {
436 .driver_module = THIS_MODULE, 391 .driver_module = THIS_MODULE,
437 .read_raw = &ad7606_read_raw, 392 .read_raw = &ad7606_read_raw,
393 .write_raw = &ad7606_write_raw,
438 .attrs = &ad7606_attribute_group_os, 394 .attrs = &ad7606_attribute_group_os,
439}; 395};
440 396
@@ -444,81 +400,73 @@ static const struct iio_info ad7606_info_range = {
444 .attrs = &ad7606_attribute_group_range, 400 .attrs = &ad7606_attribute_group_range,
445}; 401};
446 402
447struct iio_dev *ad7606_probe(struct device *dev, int irq, 403int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
448 void __iomem *base_address, 404 const char *name, unsigned int id,
449 unsigned int id, 405 const struct ad7606_bus_ops *bops)
450 const struct ad7606_bus_ops *bops)
451{ 406{
452 struct ad7606_platform_data *pdata = dev->platform_data;
453 struct ad7606_state *st; 407 struct ad7606_state *st;
454 int ret; 408 int ret;
455 struct iio_dev *indio_dev; 409 struct iio_dev *indio_dev;
456 410
457 indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); 411 indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
458 if (!indio_dev) 412 if (!indio_dev)
459 return ERR_PTR(-ENOMEM); 413 return -ENOMEM;
460 414
461 st = iio_priv(indio_dev); 415 st = iio_priv(indio_dev);
462 416
463 st->dev = dev; 417 st->dev = dev;
464 st->bops = bops; 418 st->bops = bops;
465 st->base_address = base_address; 419 st->base_address = base_address;
466 st->range = pdata->default_range == 10000 ? 10000 : 5000; 420 st->range = 5000;
421 st->oversampling = 1;
422 INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
467 423
468 ret = ad7606_oversampling_get_index(pdata->default_os); 424 st->reg = devm_regulator_get(dev, "avcc");
469 if (ret < 0) { 425 if (IS_ERR(st->reg))
470 dev_warn(dev, "oversampling %d is not supported\n", 426 return PTR_ERR(st->reg);
471 pdata->default_os);
472 st->oversampling = 0;
473 } else {
474 st->oversampling = pdata->default_os;
475 }
476 427
477 st->reg = devm_regulator_get(dev, "vcc"); 428 ret = regulator_enable(st->reg);
478 if (!IS_ERR(st->reg)) { 429 if (ret) {
479 ret = regulator_enable(st->reg); 430 dev_err(dev, "Failed to enable specified AVcc supply\n");
480 if (ret) 431 return ret;
481 return ERR_PTR(ret);
482 } 432 }
483 433
484 st->pdata = pdata; 434 ret = ad7606_request_gpios(st);
435 if (ret)
436 goto error_disable_reg;
437
485 st->chip_info = &ad7606_chip_info_tbl[id]; 438 st->chip_info = &ad7606_chip_info_tbl[id];
486 439
487 indio_dev->dev.parent = dev; 440 indio_dev->dev.parent = dev;
488 if (gpio_is_valid(st->pdata->gpio_os0) && 441 if (st->gpio_os) {
489 gpio_is_valid(st->pdata->gpio_os1) && 442 if (st->gpio_range)
490 gpio_is_valid(st->pdata->gpio_os2)) {
491 if (gpio_is_valid(st->pdata->gpio_range))
492 indio_dev->info = &ad7606_info_os_and_range; 443 indio_dev->info = &ad7606_info_os_and_range;
493 else 444 else
494 indio_dev->info = &ad7606_info_os; 445 indio_dev->info = &ad7606_info_os;
495 } else { 446 } else {
496 if (gpio_is_valid(st->pdata->gpio_range)) 447 if (st->gpio_range)
497 indio_dev->info = &ad7606_info_range; 448 indio_dev->info = &ad7606_info_range;
498 else 449 else
499 indio_dev->info = &ad7606_info_no_os_or_range; 450 indio_dev->info = &ad7606_info_no_os_or_range;
500 } 451 }
501 indio_dev->modes = INDIO_DIRECT_MODE; 452 indio_dev->modes = INDIO_DIRECT_MODE;
502 indio_dev->name = st->chip_info->name; 453 indio_dev->name = name;
503 indio_dev->channels = st->chip_info->channels; 454 indio_dev->channels = st->chip_info->channels;
504 indio_dev->num_channels = st->chip_info->num_channels; 455 indio_dev->num_channels = st->chip_info->num_channels;
505 456
506 init_waitqueue_head(&st->wq_data_avail); 457 init_waitqueue_head(&st->wq_data_avail);
507 458
508 ret = ad7606_request_gpios(st);
509 if (ret)
510 goto error_disable_reg;
511
512 ret = ad7606_reset(st); 459 ret = ad7606_reset(st);
513 if (ret) 460 if (ret)
514 dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n"); 461 dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
515 462
516 ret = request_irq(irq, ad7606_interrupt, 463 ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name,
517 IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev); 464 indio_dev);
518 if (ret) 465 if (ret)
519 goto error_free_gpios; 466 goto error_disable_reg;
520 467
521 ret = ad7606_register_ring_funcs_and_init(indio_dev); 468 ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler,
469 NULL, NULL);
522 if (ret) 470 if (ret)
523 goto error_free_irq; 471 goto error_free_irq;
524 472
@@ -526,35 +474,31 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
526 if (ret) 474 if (ret)
527 goto error_unregister_ring; 475 goto error_unregister_ring;
528 476
529 return indio_dev; 477 dev_set_drvdata(dev, indio_dev);
478
479 return 0;
530error_unregister_ring: 480error_unregister_ring:
531 ad7606_ring_cleanup(indio_dev); 481 iio_triggered_buffer_cleanup(indio_dev);
532 482
533error_free_irq: 483error_free_irq:
534 free_irq(irq, indio_dev); 484 free_irq(irq, indio_dev);
535 485
536error_free_gpios:
537 ad7606_free_gpios(st);
538
539error_disable_reg: 486error_disable_reg:
540 if (!IS_ERR(st->reg)) 487 regulator_disable(st->reg);
541 regulator_disable(st->reg); 488 return ret;
542 return ERR_PTR(ret);
543} 489}
544EXPORT_SYMBOL_GPL(ad7606_probe); 490EXPORT_SYMBOL_GPL(ad7606_probe);
545 491
546int ad7606_remove(struct iio_dev *indio_dev, int irq) 492int ad7606_remove(struct device *dev, int irq)
547{ 493{
494 struct iio_dev *indio_dev = dev_get_drvdata(dev);
548 struct ad7606_state *st = iio_priv(indio_dev); 495 struct ad7606_state *st = iio_priv(indio_dev);
549 496
550 iio_device_unregister(indio_dev); 497 iio_device_unregister(indio_dev);
551 ad7606_ring_cleanup(indio_dev); 498 iio_triggered_buffer_cleanup(indio_dev);
552 499
553 free_irq(irq, indio_dev); 500 free_irq(irq, indio_dev);
554 if (!IS_ERR(st->reg)) 501 regulator_disable(st->reg);
555 regulator_disable(st->reg);
556
557 ad7606_free_gpios(st);
558 502
559 return 0; 503 return 0;
560} 504}
@@ -567,10 +511,9 @@ static int ad7606_suspend(struct device *dev)
567 struct iio_dev *indio_dev = dev_get_drvdata(dev); 511 struct iio_dev *indio_dev = dev_get_drvdata(dev);
568 struct ad7606_state *st = iio_priv(indio_dev); 512 struct ad7606_state *st = iio_priv(indio_dev);
569 513
570 if (gpio_is_valid(st->pdata->gpio_stby)) { 514 if (st->gpio_standby) {
571 if (gpio_is_valid(st->pdata->gpio_range)) 515 gpiod_set_value(st->gpio_range, 1);
572 gpio_set_value(st->pdata->gpio_range, 1); 516 gpiod_set_value(st->gpio_standby, 0);
573 gpio_set_value(st->pdata->gpio_stby, 0);
574 } 517 }
575 518
576 return 0; 519 return 0;
@@ -581,12 +524,9 @@ static int ad7606_resume(struct device *dev)
581 struct iio_dev *indio_dev = dev_get_drvdata(dev); 524 struct iio_dev *indio_dev = dev_get_drvdata(dev);
582 struct ad7606_state *st = iio_priv(indio_dev); 525 struct ad7606_state *st = iio_priv(indio_dev);
583 526
584 if (gpio_is_valid(st->pdata->gpio_stby)) { 527 if (st->gpio_standby) {
585 if (gpio_is_valid(st->pdata->gpio_range)) 528 gpiod_set_value(st->gpio_range, st->range == 10000);
586 gpio_set_value(st->pdata->gpio_range, 529 gpiod_set_value(st->gpio_standby, 1);
587 st->range == 10000);
588
589 gpio_set_value(st->pdata->gpio_stby, 1);
590 ad7606_reset(st); 530 ad7606_reset(st);
591 } 531 }
592 532
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 39f50440d915..746f9553d2ba 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -9,48 +9,14 @@
9#ifndef IIO_ADC_AD7606_H_ 9#ifndef IIO_ADC_AD7606_H_
10#define IIO_ADC_AD7606_H_ 10#define IIO_ADC_AD7606_H_
11 11
12/*
13 * TODO: struct ad7606_platform_data needs to go into include/linux/iio
14 */
15
16/**
17 * struct ad7606_platform_data - platform/board specific information
18 * @default_os: default oversampling value {0, 2, 4, 8, 16, 32, 64}
19 * @default_range: default range +/-{5000, 10000} mVolt
20 * @gpio_convst: number of gpio connected to the CONVST pin
21 * @gpio_reset: gpio connected to the RESET pin, if not used set to -1
22 * @gpio_range: gpio connected to the RANGE pin, if not used set to -1
23 * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1
24 * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1
25 * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1
26 * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1
27 * @gpio_stby: gpio connected to the STBY pin, if not used set to -1
28 */
29
30struct ad7606_platform_data {
31 unsigned int default_os;
32 unsigned int default_range;
33 unsigned int gpio_convst;
34 unsigned int gpio_reset;
35 unsigned int gpio_range;
36 unsigned int gpio_os0;
37 unsigned int gpio_os1;
38 unsigned int gpio_os2;
39 unsigned int gpio_frstdata;
40 unsigned int gpio_stby;
41};
42
43/** 12/**
44 * struct ad7606_chip_info - chip specific information 13 * struct ad7606_chip_info - chip specific information
45 * @name: identification string for chip 14 * @name: identification string for chip
46 * @int_vref_mv: the internal reference voltage
47 * @channels: channel specification 15 * @channels: channel specification
48 * @num_channels: number of channels 16 * @num_channels: number of channels
49 */ 17 */
50 18
51struct ad7606_chip_info { 19struct ad7606_chip_info {
52 const char *name;
53 u16 int_vref_mv;
54 const struct iio_chan_spec *channels; 20 const struct iio_chan_spec *channels;
55 unsigned int num_channels; 21 unsigned int num_channels;
56}; 22};
@@ -62,7 +28,6 @@ struct ad7606_chip_info {
62struct ad7606_state { 28struct ad7606_state {
63 struct device *dev; 29 struct device *dev;
64 const struct ad7606_chip_info *chip_info; 30 const struct ad7606_chip_info *chip_info;
65 struct ad7606_platform_data *pdata;
66 struct regulator *reg; 31 struct regulator *reg;
67 struct work_struct poll_work; 32 struct work_struct poll_work;
68 wait_queue_head_t wq_data_avail; 33 wait_queue_head_t wq_data_avail;
@@ -72,12 +37,19 @@ struct ad7606_state {
72 bool done; 37 bool done;
73 void __iomem *base_address; 38 void __iomem *base_address;
74 39
40 struct gpio_desc *gpio_convst;
41 struct gpio_desc *gpio_reset;
42 struct gpio_desc *gpio_range;
43 struct gpio_desc *gpio_standby;
44 struct gpio_desc *gpio_frstdata;
45 struct gpio_descs *gpio_os;
46
75 /* 47 /*
76 * DMA (thus cache coherency maintenance) requires the 48 * DMA (thus cache coherency maintenance) requires the
77 * transfer buffers to live in their own cache lines. 49 * transfer buffers to live in their own cache lines.
50 * 8 * 16-bit samples + 64-bit timestamp
78 */ 51 */
79 52 unsigned short data[12] ____cacheline_aligned;
80 unsigned short data[8] ____cacheline_aligned;
81}; 53};
82 54
83struct ad7606_bus_ops { 55struct ad7606_bus_ops {
@@ -85,11 +57,10 @@ struct ad7606_bus_ops {
85 int (*read_block)(struct device *, int, void *); 57 int (*read_block)(struct device *, int, void *);
86}; 58};
87 59
88struct iio_dev *ad7606_probe(struct device *dev, int irq, 60int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
89 void __iomem *base_address, unsigned int id, 61 const char *name, unsigned int id,
90 const struct ad7606_bus_ops *bops); 62 const struct ad7606_bus_ops *bops);
91int ad7606_remove(struct iio_dev *indio_dev, int irq); 63int ad7606_remove(struct device *dev, int irq);
92int ad7606_reset(struct ad7606_state *st);
93 64
94enum ad7606_supported_device_ids { 65enum ad7606_supported_device_ids {
95 ID_AD7606_8, 66 ID_AD7606_8,
@@ -97,9 +68,6 @@ enum ad7606_supported_device_ids {
97 ID_AD7606_4 68 ID_AD7606_4
98}; 69};
99 70
100int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
101void ad7606_ring_cleanup(struct iio_dev *indio_dev);
102
103#ifdef CONFIG_PM_SLEEP 71#ifdef CONFIG_PM_SLEEP
104extern const struct dev_pm_ops ad7606_pm_ops; 72extern const struct dev_pm_ops ad7606_pm_ops;
105#define AD7606_PM_OPS (&ad7606_pm_ops) 73#define AD7606_PM_OPS (&ad7606_pm_ops)
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 84d23930fdde..cd6c410c0484 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -49,8 +49,8 @@ static const struct ad7606_bus_ops ad7606_par8_bops = {
49 49
50static int ad7606_par_probe(struct platform_device *pdev) 50static int ad7606_par_probe(struct platform_device *pdev)
51{ 51{
52 const struct platform_device_id *id = platform_get_device_id(pdev);
52 struct resource *res; 53 struct resource *res;
53 struct iio_dev *indio_dev;
54 void __iomem *addr; 54 void __iomem *addr;
55 resource_size_t remap_size; 55 resource_size_t remap_size;
56 int irq; 56 int irq;
@@ -68,26 +68,15 @@ static int ad7606_par_probe(struct platform_device *pdev)
68 68
69 remap_size = resource_size(res); 69 remap_size = resource_size(res);
70 70
71 indio_dev = ad7606_probe(&pdev->dev, irq, addr, 71 return ad7606_probe(&pdev->dev, irq, addr,
72 platform_get_device_id(pdev)->driver_data, 72 id->name, id->driver_data,
73 remap_size > 1 ? &ad7606_par16_bops : 73 remap_size > 1 ? &ad7606_par16_bops :
74 &ad7606_par8_bops); 74 &ad7606_par8_bops);
75
76 if (IS_ERR(indio_dev))
77 return PTR_ERR(indio_dev);
78
79 platform_set_drvdata(pdev, indio_dev);
80
81 return 0;
82} 75}
83 76
84static int ad7606_par_remove(struct platform_device *pdev) 77static int ad7606_par_remove(struct platform_device *pdev)
85{ 78{
86 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 79 return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0));
87
88 ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
89
90 return 0;
91} 80}
92 81
93static const struct platform_device_id ad7606_driver_ids[] = { 82static const struct platform_device_id ad7606_driver_ids[] = {
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
deleted file mode 100644
index 0572df9aad85..000000000000
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright 2011-2012 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2.
5 *
6 */
7
8#include <linux/interrupt.h>
9#include <linux/gpio.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13
14#include <linux/iio/iio.h>
15#include <linux/iio/buffer.h>
16#include <linux/iio/trigger_consumer.h>
17#include <linux/iio/triggered_buffer.h>
18
19#include "ad7606.h"
20
21/**
22 * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
23 *
24 **/
25static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
26{
27 struct iio_poll_func *pf = p;
28 struct ad7606_state *st = iio_priv(pf->indio_dev);
29
30 gpio_set_value(st->pdata->gpio_convst, 1);
31
32 return IRQ_HANDLED;
33}
34
35/**
36 * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
37 * @work_s: the work struct through which this was scheduled
38 *
39 * Currently there is no option in this driver to disable the saving of
40 * timestamps within the ring.
41 * I think the one copy of this at a time was to avoid problems if the
42 * trigger was set far too high and the reads then locked up the computer.
43 **/
44static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
45{
46 struct ad7606_state *st = container_of(work_s, struct ad7606_state,
47 poll_work);
48 struct iio_dev *indio_dev = iio_priv_to_dev(st);
49 __u8 *buf;
50 int ret;
51
52 buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
53 if (!buf)
54 return;
55
56 if (gpio_is_valid(st->pdata->gpio_frstdata)) {
57 ret = st->bops->read_block(st->dev, 1, buf);
58 if (ret)
59 goto done;
60 if (!gpio_get_value(st->pdata->gpio_frstdata)) {
61 /* This should never happen. However
62 * some signal glitch caused by bad PCB desgin or
63 * electrostatic discharge, could cause an extra read
64 * or clock. This allows recovery.
65 */
66 ad7606_reset(st);
67 goto done;
68 }
69 ret = st->bops->read_block(st->dev,
70 st->chip_info->num_channels - 1, buf + 2);
71 if (ret)
72 goto done;
73 } else {
74 ret = st->bops->read_block(st->dev,
75 st->chip_info->num_channels, buf);
76 if (ret)
77 goto done;
78 }
79
80 iio_push_to_buffers_with_timestamp(indio_dev, buf,
81 iio_get_time_ns(indio_dev));
82done:
83 gpio_set_value(st->pdata->gpio_convst, 0);
84 iio_trigger_notify_done(indio_dev->trig);
85 kfree(buf);
86}
87
88int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
89{
90 struct ad7606_state *st = iio_priv(indio_dev);
91
92 INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
93
94 return iio_triggered_buffer_setup(indio_dev,
95 &ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh,
96 NULL);
97}
98
99void ad7606_ring_cleanup(struct iio_dev *indio_dev)
100{
101 iio_triggered_buffer_cleanup(indio_dev);
102}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 9587fa86dc69..c9b1f26685f4 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -42,25 +42,16 @@ static const struct ad7606_bus_ops ad7606_spi_bops = {
42 42
43static int ad7606_spi_probe(struct spi_device *spi) 43static int ad7606_spi_probe(struct spi_device *spi)
44{ 44{
45 struct iio_dev *indio_dev; 45 const struct spi_device_id *id = spi_get_device_id(spi);
46 46
47 indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL, 47 return ad7606_probe(&spi->dev, spi->irq, NULL,
48 spi_get_device_id(spi)->driver_data, 48 id->name, id->driver_data,
49 &ad7606_spi_bops); 49 &ad7606_spi_bops);
50
51 if (IS_ERR(indio_dev))
52 return PTR_ERR(indio_dev);
53
54 spi_set_drvdata(spi, indio_dev);
55
56 return 0;
57} 50}
58 51
59static int ad7606_spi_remove(struct spi_device *spi) 52static int ad7606_spi_remove(struct spi_device *spi)
60{ 53{
61 struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev); 54 return ad7606_remove(&spi->dev, spi->irq);
62
63 return ad7606_remove(indio_dev, spi->irq);
64} 55}
65 56
66static const struct spi_device_id ad7606_id[] = { 57static const struct spi_device_id ad7606_id[] = {
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index c9a0c2aa602f..e14960038d3e 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -173,14 +173,16 @@ static int ad7780_probe(struct spi_device *spi)
173 173
174 ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info); 174 ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
175 175
176 st->reg = devm_regulator_get(&spi->dev, "vcc"); 176 st->reg = devm_regulator_get(&spi->dev, "avdd");
177 if (!IS_ERR(st->reg)) { 177 if (IS_ERR(st->reg))
178 ret = regulator_enable(st->reg); 178 return PTR_ERR(st->reg);
179 if (ret) 179
180 return ret; 180 ret = regulator_enable(st->reg);
181 181 if (ret) {
182 voltage_uv = regulator_get_voltage(st->reg); 182 dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
183 return ret;
183 } 184 }
185 voltage_uv = regulator_get_voltage(st->reg);
184 186
185 st->chip_info = 187 st->chip_info =
186 &ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data]; 188 &ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -222,8 +224,7 @@ static int ad7780_probe(struct spi_device *spi)
222error_cleanup_buffer_and_trigger: 224error_cleanup_buffer_and_trigger:
223 ad_sd_cleanup_buffer_and_trigger(indio_dev); 225 ad_sd_cleanup_buffer_and_trigger(indio_dev);
224error_disable_reg: 226error_disable_reg:
225 if (!IS_ERR(st->reg)) 227 regulator_disable(st->reg);
226 regulator_disable(st->reg);
227 228
228 return ret; 229 return ret;
229} 230}
@@ -236,8 +237,7 @@ static int ad7780_remove(struct spi_device *spi)
236 iio_device_unregister(indio_dev); 237 iio_device_unregister(indio_dev);
237 ad_sd_cleanup_buffer_and_trigger(indio_dev); 238 ad_sd_cleanup_buffer_and_trigger(indio_dev);
238 239
239 if (!IS_ERR(st->reg)) 240 regulator_disable(st->reg);
240 regulator_disable(st->reg);
241 241
242 return 0; 242 return 0;
243} 243}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 5e8115b01011..72551f827382 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -327,7 +327,7 @@ static struct attribute *ad7816_event_attributes[] = {
327 NULL, 327 NULL,
328}; 328};
329 329
330static struct attribute_group ad7816_event_attribute_group = { 330static const struct attribute_group ad7816_event_attribute_group = {
331 .attrs = ad7816_event_attributes, 331 .attrs = ad7816_event_attributes,
332 .name = "events", 332 .name = "events",
333}; 333};
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe59c933..a7d90c8bac5e 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2039,7 +2039,7 @@ static struct attribute *adt7316_event_attributes[] = {
2039 NULL, 2039 NULL,
2040}; 2040};
2041 2041
2042static struct attribute_group adt7316_event_attribute_group = { 2042static const struct attribute_group adt7316_event_attribute_group = {
2043 .attrs = adt7316_event_attributes, 2043 .attrs = adt7316_event_attributes,
2044 .name = "events", 2044 .name = "events",
2045}; 2045};
@@ -2060,7 +2060,7 @@ static struct attribute *adt7516_event_attributes[] = {
2060 NULL, 2060 NULL,
2061}; 2061};
2062 2062
2063static struct attribute_group adt7516_event_attribute_group = { 2063static const struct attribute_group adt7516_event_attribute_group = {
2064 .attrs = adt7516_event_attributes, 2064 .attrs = adt7516_event_attributes,
2065 .name = "events", 2065 .name = "events",
2066}; 2066};
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a077fcfb..6998c3ddfb6a 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -562,7 +562,7 @@ static struct attribute *ad7150_event_attributes[] = {
562 NULL, 562 NULL,
563}; 563};
564 564
565static struct attribute_group ad7150_event_attribute_group = { 565static const struct attribute_group ad7150_event_attribute_group = {
566 .attrs = ad7150_event_attributes, 566 .attrs = ad7150_event_attributes,
567 .name = "events", 567 .name = "events",
568}; 568};
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 485d0a5af53c..b91b50f345bd 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -89,6 +89,7 @@ struct ad7152_chip_info {
89 */ 89 */
90 u8 filter_rate_setup; 90 u8 filter_rate_setup;
91 u8 setup[2]; 91 u8 setup[2];
92 struct mutex state_lock; /* protect hardware state */
92}; 93};
93 94
94static inline ssize_t ad7152_start_calib(struct device *dev, 95static inline ssize_t ad7152_start_calib(struct device *dev,
@@ -115,10 +116,10 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
115 else 116 else
116 regval |= AD7152_CONF_CH2EN; 117 regval |= AD7152_CONF_CH2EN;
117 118
118 mutex_lock(&indio_dev->mlock); 119 mutex_lock(&chip->state_lock);
119 ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval); 120 ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
120 if (ret < 0) { 121 if (ret < 0) {
121 mutex_unlock(&indio_dev->mlock); 122 mutex_unlock(&chip->state_lock);
122 return ret; 123 return ret;
123 } 124 }
124 125
@@ -126,14 +127,15 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
126 mdelay(20); 127 mdelay(20);
127 ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG); 128 ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
128 if (ret < 0) { 129 if (ret < 0) {
129 mutex_unlock(&indio_dev->mlock); 130 mutex_unlock(&chip->state_lock);
130 return ret; 131 return ret;
131 } 132 }
132 } while ((ret == regval) && timeout--); 133 } while ((ret == regval) && timeout--);
133 134
134 mutex_unlock(&indio_dev->mlock); 135 mutex_unlock(&chip->state_lock);
135 return len; 136 return len;
136} 137}
138
137static ssize_t ad7152_start_offset_calib(struct device *dev, 139static ssize_t ad7152_start_offset_calib(struct device *dev,
138 struct device_attribute *attr, 140 struct device_attribute *attr,
139 const char *buf, 141 const char *buf,
@@ -142,6 +144,7 @@ static ssize_t ad7152_start_offset_calib(struct device *dev,
142 return ad7152_start_calib(dev, attr, buf, len, 144 return ad7152_start_calib(dev, attr, buf, len,
143 AD7152_CONF_MODE_OFFS_CAL); 145 AD7152_CONF_MODE_OFFS_CAL);
144} 146}
147
145static ssize_t ad7152_start_gain_calib(struct device *dev, 148static ssize_t ad7152_start_gain_calib(struct device *dev,
146 struct device_attribute *attr, 149 struct device_attribute *attr,
147 const char *buf, 150 const char *buf,
@@ -165,63 +168,12 @@ static const unsigned char ad7152_filter_rate_table[][2] = {
165 {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1}, 168 {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
166}; 169};
167 170
168static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
169 struct device_attribute *attr,
170 char *buf)
171{
172 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
173 struct ad7152_chip_info *chip = iio_priv(indio_dev);
174
175 return sprintf(buf, "%d\n",
176 ad7152_filter_rate_table[chip->filter_rate_setup][0]);
177}
178
179static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf,
182 size_t len)
183{
184 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
185 struct ad7152_chip_info *chip = iio_priv(indio_dev);
186 u8 data;
187 int ret, i;
188
189 ret = kstrtou8(buf, 10, &data);
190 if (ret < 0)
191 return ret;
192
193 for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
194 if (data >= ad7152_filter_rate_table[i][0])
195 break;
196
197 if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
198 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
199
200 mutex_lock(&indio_dev->mlock);
201 ret = i2c_smbus_write_byte_data(chip->client,
202 AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
203 if (ret < 0) {
204 mutex_unlock(&indio_dev->mlock);
205 return ret;
206 }
207
208 chip->filter_rate_setup = i;
209 mutex_unlock(&indio_dev->mlock);
210
211 return len;
212}
213
214static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
215 ad7152_show_filter_rate_setup,
216 ad7152_store_filter_rate_setup);
217
218static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17"); 171static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
219 172
220static IIO_CONST_ATTR(in_capacitance_scale_available, 173static IIO_CONST_ATTR(in_capacitance_scale_available,
221 "0.000061050 0.000030525 0.000015263 0.000007631"); 174 "0.000061050 0.000030525 0.000015263 0.000007631");
222 175
223static struct attribute *ad7152_attributes[] = { 176static struct attribute *ad7152_attributes[] = {
224 &iio_dev_attr_sampling_frequency.dev_attr.attr,
225 &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr, 177 &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
226 &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr, 178 &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
227 &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr, 179 &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
@@ -247,6 +199,51 @@ static const int ad7152_scale_table[] = {
247 30525, 7631, 15263, 61050 199 30525, 7631, 15263, 61050
248}; 200};
249 201
202/**
203 * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
204 *
205 * lock must be held
206 **/
207static int ad7152_read_raw_samp_freq(struct device *dev, int *val)
208{
209 struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
210
211 *val = ad7152_filter_rate_table[chip->filter_rate_setup][0];
212
213 return 0;
214}
215
216/**
217 * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
218 *
219 * lock must be held
220 **/
221static int ad7152_write_raw_samp_freq(struct device *dev, int val)
222{
223 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
224 struct ad7152_chip_info *chip = iio_priv(indio_dev);
225 int ret, i;
226
227 for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
228 if (val >= ad7152_filter_rate_table[i][0])
229 break;
230
231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
233
234 mutex_lock(&chip->state_lock);
235 ret = i2c_smbus_write_byte_data(chip->client,
236 AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
237 if (ret < 0) {
238 mutex_unlock(&chip->state_lock);
239 return ret;
240 }
241
242 chip->filter_rate_setup = i;
243 mutex_unlock(&chip->state_lock);
244
245 return ret;
246}
250static int ad7152_write_raw(struct iio_dev *indio_dev, 247static int ad7152_write_raw(struct iio_dev *indio_dev,
251 struct iio_chan_spec const *chan, 248 struct iio_chan_spec const *chan,
252 int val, 249 int val,
@@ -256,7 +253,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
256 struct ad7152_chip_info *chip = iio_priv(indio_dev); 253 struct ad7152_chip_info *chip = iio_priv(indio_dev);
257 int ret, i; 254 int ret, i;
258 255
259 mutex_lock(&indio_dev->mlock); 256 mutex_lock(&chip->state_lock);
260 257
261 switch (mask) { 258 switch (mask) {
262 case IIO_CHAN_INFO_CALIBSCALE: 259 case IIO_CHAN_INFO_CALIBSCALE:
@@ -309,14 +306,26 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
309 306
310 ret = 0; 307 ret = 0;
311 break; 308 break;
309 case IIO_CHAN_INFO_SAMP_FREQ:
310 if (val2) {
311 ret = -EINVAL;
312 goto out;
313 }
314 ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val);
315 if (ret < 0)
316 goto out;
317
318 ret = 0;
319 break;
312 default: 320 default:
313 ret = -EINVAL; 321 ret = -EINVAL;
314 } 322 }
315 323
316out: 324out:
317 mutex_unlock(&indio_dev->mlock); 325 mutex_unlock(&chip->state_lock);
318 return ret; 326 return ret;
319} 327}
328
320static int ad7152_read_raw(struct iio_dev *indio_dev, 329static int ad7152_read_raw(struct iio_dev *indio_dev,
321 struct iio_chan_spec const *chan, 330 struct iio_chan_spec const *chan,
322 int *val, int *val2, 331 int *val, int *val2,
@@ -326,7 +335,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
326 int ret; 335 int ret;
327 u8 regval = 0; 336 u8 regval = 0;
328 337
329 mutex_lock(&indio_dev->mlock); 338 mutex_lock(&chip->state_lock);
330 339
331 switch (mask) { 340 switch (mask) {
332 case IIO_CHAN_INFO_RAW: 341 case IIO_CHAN_INFO_RAW:
@@ -403,11 +412,18 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
403 412
404 ret = IIO_VAL_INT_PLUS_NANO; 413 ret = IIO_VAL_INT_PLUS_NANO;
405 break; 414 break;
415 case IIO_CHAN_INFO_SAMP_FREQ:
416 ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val);
417 if (ret < 0)
418 goto out;
419
420 ret = IIO_VAL_INT;
421 break;
406 default: 422 default:
407 ret = -EINVAL; 423 ret = -EINVAL;
408 } 424 }
409out: 425out:
410 mutex_unlock(&indio_dev->mlock); 426 mutex_unlock(&chip->state_lock);
411 return ret; 427 return ret;
412} 428}
413 429
@@ -440,6 +456,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
440 BIT(IIO_CHAN_INFO_CALIBSCALE) | 456 BIT(IIO_CHAN_INFO_CALIBSCALE) |
441 BIT(IIO_CHAN_INFO_CALIBBIAS) | 457 BIT(IIO_CHAN_INFO_CALIBBIAS) |
442 BIT(IIO_CHAN_INFO_SCALE), 458 BIT(IIO_CHAN_INFO_SCALE),
459 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
443 }, { 460 }, {
444 .type = IIO_CAPACITANCE, 461 .type = IIO_CAPACITANCE,
445 .differential = 1, 462 .differential = 1,
@@ -450,6 +467,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
450 BIT(IIO_CHAN_INFO_CALIBSCALE) | 467 BIT(IIO_CHAN_INFO_CALIBSCALE) |
451 BIT(IIO_CHAN_INFO_CALIBBIAS) | 468 BIT(IIO_CHAN_INFO_CALIBBIAS) |
452 BIT(IIO_CHAN_INFO_SCALE), 469 BIT(IIO_CHAN_INFO_SCALE),
470 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
453 }, { 471 }, {
454 .type = IIO_CAPACITANCE, 472 .type = IIO_CAPACITANCE,
455 .indexed = 1, 473 .indexed = 1,
@@ -458,6 +476,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
458 BIT(IIO_CHAN_INFO_CALIBSCALE) | 476 BIT(IIO_CHAN_INFO_CALIBSCALE) |
459 BIT(IIO_CHAN_INFO_CALIBBIAS) | 477 BIT(IIO_CHAN_INFO_CALIBBIAS) |
460 BIT(IIO_CHAN_INFO_SCALE), 478 BIT(IIO_CHAN_INFO_SCALE),
479 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
461 }, { 480 }, {
462 .type = IIO_CAPACITANCE, 481 .type = IIO_CAPACITANCE,
463 .differential = 1, 482 .differential = 1,
@@ -468,8 +487,10 @@ static const struct iio_chan_spec ad7152_channels[] = {
468 BIT(IIO_CHAN_INFO_CALIBSCALE) | 487 BIT(IIO_CHAN_INFO_CALIBSCALE) |
469 BIT(IIO_CHAN_INFO_CALIBBIAS) | 488 BIT(IIO_CHAN_INFO_CALIBBIAS) |
470 BIT(IIO_CHAN_INFO_SCALE), 489 BIT(IIO_CHAN_INFO_SCALE),
490 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
471 } 491 }
472}; 492};
493
473/* 494/*
474 * device probe and remove 495 * device probe and remove
475 */ 496 */
@@ -489,6 +510,7 @@ static int ad7152_probe(struct i2c_client *client,
489 i2c_set_clientdata(client, indio_dev); 510 i2c_set_clientdata(client, indio_dev);
490 511
491 chip->client = client; 512 chip->client = client;
513 mutex_init(&chip->state_lock);
492 514
493 /* Establish that the iio_dev is a child of the i2c device */ 515 /* Establish that the iio_dev is a child of the i2c device */
494 indio_dev->name = id->name; 516 indio_dev->name = id->name;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 5771d4ee8ef1..81f8b9ee1120 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -70,8 +70,10 @@
70#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0) 70#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
71 71
72/* Config Register Bit Designations (AD7746_REG_CFG) */ 72/* Config Register Bit Designations (AD7746_REG_CFG) */
73#define AD7746_CONF_VTFS(x) ((x) << 6) 73#define AD7746_CONF_VTFS_SHIFT 6
74#define AD7746_CONF_CAPFS(x) ((x) << 3) 74#define AD7746_CONF_CAPFS_SHIFT 3
75#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
76#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
75#define AD7746_CONF_MODE_IDLE (0 << 0) 77#define AD7746_CONF_MODE_IDLE (0 << 0)
76#define AD7746_CONF_MODE_CONT_CONV (1 << 0) 78#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
77#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0) 79#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
@@ -122,7 +124,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
122 .indexed = 1, 124 .indexed = 1,
123 .channel = 0, 125 .channel = 0,
124 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 126 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
125 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 127 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
128 BIT(IIO_CHAN_INFO_SAMP_FREQ),
126 .address = AD7746_REG_VT_DATA_HIGH << 8 | 129 .address = AD7746_REG_VT_DATA_HIGH << 8 |
127 AD7746_VTSETUP_VTMD_EXT_VIN, 130 AD7746_VTSETUP_VTMD_EXT_VIN,
128 }, 131 },
@@ -132,7 +135,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
132 .channel = 1, 135 .channel = 1,
133 .extend_name = "supply", 136 .extend_name = "supply",
134 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 137 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
135 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 138 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
139 BIT(IIO_CHAN_INFO_SAMP_FREQ),
136 .address = AD7746_REG_VT_DATA_HIGH << 8 | 140 .address = AD7746_REG_VT_DATA_HIGH << 8 |
137 AD7746_VTSETUP_VTMD_VDD_MON, 141 AD7746_VTSETUP_VTMD_VDD_MON,
138 }, 142 },
@@ -159,7 +163,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
159 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | 163 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
160 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), 164 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
161 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | 165 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
162 BIT(IIO_CHAN_INFO_SCALE), 166 BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
163 .address = AD7746_REG_CAP_DATA_HIGH << 8, 167 .address = AD7746_REG_CAP_DATA_HIGH << 8,
164 }, 168 },
165 [CIN1_DIFF] = { 169 [CIN1_DIFF] = {
@@ -171,7 +175,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
171 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | 175 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
172 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), 176 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
173 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | 177 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
174 BIT(IIO_CHAN_INFO_SCALE), 178 BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
175 .address = AD7746_REG_CAP_DATA_HIGH << 8 | 179 .address = AD7746_REG_CAP_DATA_HIGH << 8 |
176 AD7746_CAPSETUP_CAPDIFF 180 AD7746_CAPSETUP_CAPDIFF
177 }, 181 },
@@ -182,7 +186,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
182 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | 186 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
183 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), 187 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
184 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | 188 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
185 BIT(IIO_CHAN_INFO_SCALE), 189 BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
186 .address = AD7746_REG_CAP_DATA_HIGH << 8 | 190 .address = AD7746_REG_CAP_DATA_HIGH << 8 |
187 AD7746_CAPSETUP_CIN2, 191 AD7746_CAPSETUP_CIN2,
188 }, 192 },
@@ -195,7 +199,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
195 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | 199 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
196 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), 200 BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
197 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | 201 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
198 BIT(IIO_CHAN_INFO_SCALE), 202 BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
199 .address = AD7746_REG_CAP_DATA_HIGH << 8 | 203 .address = AD7746_REG_CAP_DATA_HIGH << 8 |
200 AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2, 204 AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
201 } 205 }
@@ -215,15 +219,16 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
215 struct iio_chan_spec const *chan) 219 struct iio_chan_spec const *chan)
216{ 220{
217 struct ad7746_chip_info *chip = iio_priv(indio_dev); 221 struct ad7746_chip_info *chip = iio_priv(indio_dev);
218 int ret, delay; 222 int ret, delay, idx;
219 u8 vt_setup, cap_setup; 223 u8 vt_setup, cap_setup;
220 224
221 switch (chan->type) { 225 switch (chan->type) {
222 case IIO_CAPACITANCE: 226 case IIO_CAPACITANCE:
223 cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN; 227 cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
224 vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN; 228 vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
225 delay = ad7746_cap_filter_rate_table[(chip->config >> 3) & 229 idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
226 0x7][1]; 230 AD7746_CONF_CAPFS_SHIFT;
231 delay = ad7746_cap_filter_rate_table[idx][1];
227 232
228 if (chip->capdac_set != chan->channel) { 233 if (chip->capdac_set != chan->channel) {
229 ret = i2c_smbus_write_byte_data(chip->client, 234 ret = i2c_smbus_write_byte_data(chip->client,
@@ -244,8 +249,9 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
244 case IIO_TEMP: 249 case IIO_TEMP:
245 vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN; 250 vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
246 cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN; 251 cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
247 delay = ad7746_cap_filter_rate_table[(chip->config >> 6) & 252 idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
248 0x3][1]; 253 AD7746_CONF_VTFS_SHIFT;
254 delay = ad7746_cap_filter_rate_table[idx][1];
249 break; 255 break;
250 default: 256 default:
251 return -EINVAL; 257 return -EINVAL;
@@ -355,101 +361,47 @@ static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
355static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration, 361static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
356 S_IWUSR, NULL, ad7746_start_gain_calib, VIN); 362 S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
357 363
358static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev, 364static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
359 struct device_attribute *attr, 365 int val)
360 char *buf)
361{ 366{
362 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 367 int i;
363 struct ad7746_chip_info *chip = iio_priv(indio_dev);
364
365 return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
366 (chip->config >> 3) & 0x7][0]);
367}
368
369static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
370 struct device_attribute *attr,
371 const char *buf,
372 size_t len)
373{
374 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
375 struct ad7746_chip_info *chip = iio_priv(indio_dev);
376 u8 data;
377 int ret, i;
378
379 ret = kstrtou8(buf, 10, &data);
380 if (ret < 0)
381 return ret;
382 368
383 for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++) 369 for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
384 if (data >= ad7746_cap_filter_rate_table[i][0]) 370 if (val >= ad7746_cap_filter_rate_table[i][0])
385 break; 371 break;
386 372
387 if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table)) 373 if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
388 i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1; 374 i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
389 375
390 mutex_lock(&indio_dev->mlock); 376 chip->config &= ~AD7746_CONF_CAPFS_MASK;
391 chip->config &= ~AD7746_CONF_CAPFS(0x7); 377 chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
392 chip->config |= AD7746_CONF_CAPFS(i);
393 mutex_unlock(&indio_dev->mlock);
394 378
395 return len; 379 return 0;
396} 380}
397 381
398static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev, 382static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
399 struct device_attribute *attr, 383 int val)
400 char *buf)
401{ 384{
402 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 385 int i;
403 struct ad7746_chip_info *chip = iio_priv(indio_dev);
404
405 return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
406 (chip->config >> 6) & 0x3][0]);
407}
408
409static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
410 struct device_attribute *attr,
411 const char *buf,
412 size_t len)
413{
414 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
415 struct ad7746_chip_info *chip = iio_priv(indio_dev);
416 u8 data;
417 int ret, i;
418
419 ret = kstrtou8(buf, 10, &data);
420 if (ret < 0)
421 return ret;
422 386
423 for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++) 387 for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
424 if (data >= ad7746_vt_filter_rate_table[i][0]) 388 if (val >= ad7746_vt_filter_rate_table[i][0])
425 break; 389 break;
426 390
427 if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table)) 391 if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
428 i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1; 392 i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
429 393
430 mutex_lock(&indio_dev->mlock); 394 chip->config &= ~AD7746_CONF_VTFS_MASK;
431 chip->config &= ~AD7746_CONF_VTFS(0x3); 395 chip->config |= i << AD7746_CONF_VTFS_SHIFT;
432 chip->config |= AD7746_CONF_VTFS(i);
433 mutex_unlock(&indio_dev->mlock);
434 396
435 return len; 397 return 0;
436} 398}
437 399
438static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
439 S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
440 ad7746_store_cap_filter_rate_setup, 0);
441
442static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
443 S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
444 ad7746_store_vt_filter_rate_setup, 0);
445
446static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8"); 400static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
447static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available, 401static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
448 "91 84 50 26 16 13 11 9"); 402 "91 84 50 26 16 13 11 9");
449 403
450static struct attribute *ad7746_attributes[] = { 404static struct attribute *ad7746_attributes[] = {
451 &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
452 &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
453 &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr, 405 &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
454 &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr, 406 &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
455 &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr, 407 &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
@@ -547,6 +499,23 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
547 499
548 ret = 0; 500 ret = 0;
549 break; 501 break;
502 case IIO_CHAN_INFO_SAMP_FREQ:
503 if (val2) {
504 ret = -EINVAL;
505 goto out;
506 }
507
508 switch (chan->type) {
509 case IIO_CAPACITANCE:
510 ret = ad7746_store_cap_filter_rate_setup(chip, val);
511 break;
512 case IIO_VOLTAGE:
513 ret = ad7746_store_vt_filter_rate_setup(chip, val);
514 break;
515 default:
516 ret = -EINVAL;
517 }
518 break;
550 default: 519 default:
551 ret = -EINVAL; 520 ret = -EINVAL;
552 } 521 }
@@ -562,7 +531,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
562 long mask) 531 long mask)
563{ 532{
564 struct ad7746_chip_info *chip = iio_priv(indio_dev); 533 struct ad7746_chip_info *chip = iio_priv(indio_dev);
565 int ret, delay; 534 int ret, delay, idx;
566 u8 regval, reg; 535 u8 regval, reg;
567 536
568 mutex_lock(&indio_dev->mlock); 537 mutex_lock(&indio_dev->mlock);
@@ -667,6 +636,24 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
667 } 636 }
668 637
669 break; 638 break;
639 case IIO_CHAN_INFO_SAMP_FREQ:
640 switch (chan->type) {
641 case IIO_CAPACITANCE:
642 idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
643 AD7746_CONF_CAPFS_SHIFT;
644 *val = ad7746_cap_filter_rate_table[idx][0];
645 ret = IIO_VAL_INT;
646 break;
647 case IIO_VOLTAGE:
648 idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
649 AD7746_CONF_VTFS_SHIFT;
650 *val = ad7746_vt_filter_rate_table[idx][0];
651 ret = IIO_VAL_INT;
652 break;
653 default:
654 ret = -EINVAL;
655 }
656 break;
670 default: 657 default:
671 ret = -EINVAL; 658 ret = -EINVAL;
672 } 659 }
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 358400b22d33..a5b2f068168d 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -204,7 +204,6 @@ static int ad9832_probe(struct spi_device *spi)
204 struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev); 204 struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
205 struct iio_dev *indio_dev; 205 struct iio_dev *indio_dev;
206 struct ad9832_state *st; 206 struct ad9832_state *st;
207 struct regulator *reg;
208 int ret; 207 int ret;
209 208
210 if (!pdata) { 209 if (!pdata) {
@@ -212,21 +211,35 @@ static int ad9832_probe(struct spi_device *spi)
212 return -ENODEV; 211 return -ENODEV;
213 } 212 }
214 213
215 reg = devm_regulator_get(&spi->dev, "vcc");
216 if (!IS_ERR(reg)) {
217 ret = regulator_enable(reg);
218 if (ret)
219 return ret;
220 }
221
222 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 214 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
223 if (!indio_dev) { 215 if (!indio_dev)
224 ret = -ENOMEM; 216 return -ENOMEM;
225 goto error_disable_reg; 217
226 }
227 spi_set_drvdata(spi, indio_dev); 218 spi_set_drvdata(spi, indio_dev);
228 st = iio_priv(indio_dev); 219 st = iio_priv(indio_dev);
229 st->reg = reg; 220
221 st->avdd = devm_regulator_get(&spi->dev, "avdd");
222 if (IS_ERR(st->avdd))
223 return PTR_ERR(st->avdd);
224
225 ret = regulator_enable(st->avdd);
226 if (ret) {
227 dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
228 return ret;
229 }
230
231 st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
232 if (IS_ERR(st->dvdd)) {
233 ret = PTR_ERR(st->dvdd);
234 goto error_disable_avdd;
235 }
236
237 ret = regulator_enable(st->dvdd);
238 if (ret) {
239 dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
240 goto error_disable_avdd;
241 }
242
230 st->mclk = pdata->mclk; 243 st->mclk = pdata->mclk;
231 st->spi = spi; 244 st->spi = spi;
232 245
@@ -277,42 +290,43 @@ static int ad9832_probe(struct spi_device *spi)
277 ret = spi_sync(st->spi, &st->msg); 290 ret = spi_sync(st->spi, &st->msg);
278 if (ret) { 291 if (ret) {
279 dev_err(&spi->dev, "device init failed\n"); 292 dev_err(&spi->dev, "device init failed\n");
280 goto error_disable_reg; 293 goto error_disable_dvdd;
281 } 294 }
282 295
283 ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0); 296 ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
284 if (ret) 297 if (ret)
285 goto error_disable_reg; 298 goto error_disable_dvdd;
286 299
287 ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1); 300 ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
288 if (ret) 301 if (ret)
289 goto error_disable_reg; 302 goto error_disable_dvdd;
290 303
291 ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0); 304 ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
292 if (ret) 305 if (ret)
293 goto error_disable_reg; 306 goto error_disable_dvdd;
294 307
295 ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1); 308 ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
296 if (ret) 309 if (ret)
297 goto error_disable_reg; 310 goto error_disable_dvdd;
298 311
299 ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2); 312 ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
300 if (ret) 313 if (ret)
301 goto error_disable_reg; 314 goto error_disable_dvdd;
302 315
303 ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3); 316 ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
304 if (ret) 317 if (ret)
305 goto error_disable_reg; 318 goto error_disable_dvdd;
306 319
307 ret = iio_device_register(indio_dev); 320 ret = iio_device_register(indio_dev);
308 if (ret) 321 if (ret)
309 goto error_disable_reg; 322 goto error_disable_dvdd;
310 323
311 return 0; 324 return 0;
312 325
313error_disable_reg: 326error_disable_dvdd:
314 if (!IS_ERR(reg)) 327 regulator_disable(st->dvdd);
315 regulator_disable(reg); 328error_disable_avdd:
329 regulator_disable(st->avdd);
316 330
317 return ret; 331 return ret;
318} 332}
@@ -323,8 +337,8 @@ static int ad9832_remove(struct spi_device *spi)
323 struct ad9832_state *st = iio_priv(indio_dev); 337 struct ad9832_state *st = iio_priv(indio_dev);
324 338
325 iio_device_unregister(indio_dev); 339 iio_device_unregister(indio_dev);
326 if (!IS_ERR(st->reg)) 340 regulator_disable(st->dvdd);
327 regulator_disable(st->reg); 341 regulator_disable(st->avdd);
328 342
329 return 0; 343 return 0;
330} 344}
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index d32323b46be6..1b08b04482a4 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -58,7 +58,8 @@
58/** 58/**
59 * struct ad9832_state - driver instance specific data 59 * struct ad9832_state - driver instance specific data
60 * @spi: spi_device 60 * @spi: spi_device
61 * @reg: supply regulator 61 * @avdd: supply regulator for the analog section
62 * @dvdd: supply regulator for the digital section
62 * @mclk: external master clock 63 * @mclk: external master clock
63 * @ctrl_fp: cached frequency/phase control word 64 * @ctrl_fp: cached frequency/phase control word
64 * @ctrl_ss: cached sync/selsrc control word 65 * @ctrl_ss: cached sync/selsrc control word
@@ -76,7 +77,8 @@
76 77
77struct ad9832_state { 78struct ad9832_state {
78 struct spi_device *spi; 79 struct spi_device *spi;
79 struct regulator *reg; 80 struct regulator *avdd;
81 struct regulator *dvdd;
80 unsigned long mclk; 82 unsigned long mclk;
81 unsigned short ctrl_fp; 83 unsigned short ctrl_fp;
82 unsigned short ctrl_ss; 84 unsigned short ctrl_ss;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6366216e4f37..19216af1dfc9 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -329,11 +329,14 @@ static int ad9834_probe(struct spi_device *spi)
329 return -ENODEV; 329 return -ENODEV;
330 } 330 }
331 331
332 reg = devm_regulator_get(&spi->dev, "vcc"); 332 reg = devm_regulator_get(&spi->dev, "avdd");
333 if (!IS_ERR(reg)) { 333 if (IS_ERR(reg))
334 ret = regulator_enable(reg); 334 return PTR_ERR(reg);
335 if (ret) 335
336 return ret; 336 ret = regulator_enable(reg);
337 if (ret) {
338 dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
339 return ret;
337 } 340 }
338 341
339 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 342 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -416,8 +419,7 @@ static int ad9834_probe(struct spi_device *spi)
416 return 0; 419 return 0;
417 420
418error_disable_reg: 421error_disable_reg:
419 if (!IS_ERR(reg)) 422 regulator_disable(reg);
420 regulator_disable(reg);
421 423
422 return ret; 424 return ret;
423} 425}
@@ -428,8 +430,7 @@ static int ad9834_remove(struct spi_device *spi)
428 struct ad9834_state *st = iio_priv(indio_dev); 430 struct ad9834_state *st = iio_priv(indio_dev);
429 431
430 iio_device_unregister(indio_dev); 432 iio_device_unregister(indio_dev);
431 if (!IS_ERR(st->reg)) 433 regulator_disable(st->reg);
432 regulator_disable(st->reg);
433 434
434 return 0; 435 return 0;
435} 436}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3892a7470410..944789843938 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -726,13 +726,16 @@ static int ad5933_probe(struct i2c_client *client,
726 if (!pdata) 726 if (!pdata)
727 pdata = &ad5933_default_pdata; 727 pdata = &ad5933_default_pdata;
728 728
729 st->reg = devm_regulator_get(&client->dev, "vcc"); 729 st->reg = devm_regulator_get(&client->dev, "vdd");
730 if (!IS_ERR(st->reg)) { 730 if (IS_ERR(st->reg))
731 ret = regulator_enable(st->reg); 731 return PTR_ERR(st->reg);
732 if (ret) 732
733 return ret; 733 ret = regulator_enable(st->reg);
734 voltage_uv = regulator_get_voltage(st->reg); 734 if (ret) {
735 dev_err(&client->dev, "Failed to enable specified VDD supply\n");
736 return ret;
735 } 737 }
738 voltage_uv = regulator_get_voltage(st->reg);
736 739
737 if (voltage_uv) 740 if (voltage_uv)
738 st->vref_mv = voltage_uv / 1000; 741 st->vref_mv = voltage_uv / 1000;
@@ -775,8 +778,7 @@ static int ad5933_probe(struct i2c_client *client,
775error_unreg_ring: 778error_unreg_ring:
776 iio_kfifo_free(indio_dev->buffer); 779 iio_kfifo_free(indio_dev->buffer);
777error_disable_reg: 780error_disable_reg:
778 if (!IS_ERR(st->reg)) 781 regulator_disable(st->reg);
779 regulator_disable(st->reg);
780 782
781 return ret; 783 return ret;
782} 784}
@@ -788,8 +790,7 @@ static int ad5933_remove(struct i2c_client *client)
788 790
789 iio_device_unregister(indio_dev); 791 iio_device_unregister(indio_dev);
790 iio_kfifo_free(indio_dev->buffer); 792 iio_kfifo_free(indio_dev->buffer);
791 if (!IS_ERR(st->reg)) 793 regulator_disable(st->reg);
792 regulator_disable(st->reg);
793 794
794 return 0; 795 return 0;
795} 796}
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index ca8d6e66c899..4fbf6298c0f3 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,18 +3,6 @@
3# 3#
4menu "Light sensors" 4menu "Light sensors"
5 5
6config SENSORS_ISL29018
7 tristate "ISL 29018 light and proximity sensor"
8 depends on I2C
9 select REGMAP_I2C
10 default n
11 help
12 If you say yes here you get support for ambient light sensing and
13 proximity infrared sensing from Intersil ISL29018.
14 This driver will provide the measurements of ambient light intensity
15 in lux, proximity infrared sensing and normal infrared sensing.
16 Data from sensor is accessible via sysfs.
17
18config SENSORS_ISL29028 6config SENSORS_ISL29028
19 tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor" 7 tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
20 depends on I2C 8 depends on I2C
@@ -25,13 +13,6 @@ config SENSORS_ISL29028
25 Proximity value via iio. The ISL29028 provides the concurrent sensing 13 Proximity value via iio. The ISL29028 provides the concurrent sensing
26 of ambient light and proximity. 14 of ambient light and proximity.
27 15
28config TSL2583
29 tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
30 depends on I2C
31 help
32 Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
33 Access ALS data via iio, sysfs.
34
35config TSL2x7x 16config TSL2x7x
36 tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors" 17 tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
37 depends on I2C 18 depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 9960fdf7c15b..f8693e9fdc94 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,7 +2,5 @@
2# Makefile for industrial I/O Light sensors 2# Makefile for industrial I/O Light sensors
3# 3#
4 4
5obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
6obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o 5obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
7obj-$(CONFIG_TSL2583) += tsl2583.o
8obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o 6obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
deleted file mode 100644
index 08f1583ee34e..000000000000
--- a/drivers/staging/iio/light/tsl2583.c
+++ /dev/null
@@ -1,963 +0,0 @@
1/*
2 * Device driver for monitoring ambient light intensity (lux)
3 * within the TAOS tsl258x family of devices (tsl2580, tsl2581).
4 *
5 * Copyright (c) 2011, TAOS Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/i2c.h>
24#include <linux/errno.h>
25#include <linux/delay.h>
26#include <linux/string.h>
27#include <linux/mutex.h>
28#include <linux/unistd.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31#include <linux/iio/iio.h>
32
33#define TSL258X_MAX_DEVICE_REGS 32
34
35/* Triton register offsets */
36#define TSL258X_REG_MAX 8
37
38/* Device Registers and Masks */
39#define TSL258X_CNTRL 0x00
40#define TSL258X_ALS_TIME 0X01
41#define TSL258X_INTERRUPT 0x02
42#define TSL258X_GAIN 0x07
43#define TSL258X_REVID 0x11
44#define TSL258X_CHIPID 0x12
45#define TSL258X_ALS_CHAN0LO 0x14
46#define TSL258X_ALS_CHAN0HI 0x15
47#define TSL258X_ALS_CHAN1LO 0x16
48#define TSL258X_ALS_CHAN1HI 0x17
49#define TSL258X_TMR_LO 0x18
50#define TSL258X_TMR_HI 0x19
51
52/* tsl2583 cmd reg masks */
53#define TSL258X_CMD_REG 0x80
54#define TSL258X_CMD_SPL_FN 0x60
55#define TSL258X_CMD_ALS_INT_CLR 0X01
56
57/* tsl2583 cntrl reg masks */
58#define TSL258X_CNTL_ADC_ENBL 0x02
59#define TSL258X_CNTL_PWR_ON 0x01
60
61/* tsl2583 status reg masks */
62#define TSL258X_STA_ADC_VALID 0x01
63#define TSL258X_STA_ADC_INTR 0x10
64
65/* Lux calculation constants */
66#define TSL258X_LUX_CALC_OVER_FLOW 65535
67
68enum {
69 TSL258X_CHIP_UNKNOWN = 0,
70 TSL258X_CHIP_WORKING = 1,
71 TSL258X_CHIP_SUSPENDED = 2
72};
73
74/* Per-device data */
75struct taos_als_info {
76 u16 als_ch0;
77 u16 als_ch1;
78 u16 lux;
79};
80
81struct taos_settings {
82 int als_time;
83 int als_gain;
84 int als_gain_trim;
85 int als_cal_target;
86};
87
88struct tsl2583_chip {
89 struct mutex als_mutex;
90 struct i2c_client *client;
91 struct taos_als_info als_cur_info;
92 struct taos_settings taos_settings;
93 int als_time_scale;
94 int als_saturation;
95 int taos_chip_status;
96 u8 taos_config[8];
97};
98
99/*
100 * Initial values for device - this values can/will be changed by driver.
101 * and applications as needed.
102 * These values are dynamic.
103 */
104static const u8 taos_config[8] = {
105 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00
106}; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */
107
108struct taos_lux {
109 unsigned int ratio;
110 unsigned int ch0;
111 unsigned int ch1;
112};
113
114/* This structure is intentionally large to accommodate updates via sysfs. */
115/* Sized to 11 = max 10 segments + 1 termination segment */
116/* Assumption is one and only one type of glass used */
117static struct taos_lux taos_device_lux[11] = {
118 { 9830, 8520, 15729 },
119 { 12452, 10807, 23344 },
120 { 14746, 6383, 11705 },
121 { 17695, 4063, 6554 },
122};
123
124struct gainadj {
125 s16 ch0;
126 s16 ch1;
127};
128
129/* Index = (0 - 3) Used to validate the gain selection index */
130static const struct gainadj gainadj[] = {
131 { 1, 1 },
132 { 8, 8 },
133 { 16, 16 },
134 { 107, 115 }
135};
136
137/*
138 * Provides initial operational parameter defaults.
139 * These defaults may be changed through the device's sysfs files.
140 */
141static void taos_defaults(struct tsl2583_chip *chip)
142{
143 /* Operational parameters */
144 chip->taos_settings.als_time = 100;
145 /* must be a multiple of 50mS */
146 chip->taos_settings.als_gain = 0;
147 /* this is actually an index into the gain table */
148 /* assume clear glass as default */
149 chip->taos_settings.als_gain_trim = 1000;
150 /* default gain trim to account for aperture effects */
151 chip->taos_settings.als_cal_target = 130;
152 /* Known external ALS reading used for calibration */
153}
154
155/*
156 * Read a number of bytes starting at register (reg) location.
157 * Return 0, or i2c_smbus_write_byte ERROR code.
158 */
159static int
160taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
161{
162 int i, ret;
163
164 for (i = 0; i < len; i++) {
165 /* select register to write */
166 ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg));
167 if (ret < 0) {
168 dev_err(&client->dev,
169 "taos_i2c_read failed to write register %x\n",
170 reg);
171 return ret;
172 }
173 /* read the data */
174 *val = i2c_smbus_read_byte(client);
175 val++;
176 reg++;
177 }
178 return 0;
179}
180
181/*
182 * Reads and calculates current lux value.
183 * The raw ch0 and ch1 values of the ambient light sensed in the last
184 * integration cycle are read from the device.
185 * Time scale factor array values are adjusted based on the integration time.
186 * The raw values are multiplied by a scale factor, and device gain is obtained
187 * using gain index. Limit checks are done next, then the ratio of a multiple
188 * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[]
189 * declared above is then scanned to find the first ratio value that is just
190 * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
191 * the array are then used along with the time scale factor array values, to
192 * calculate the lux.
193 */
194static int taos_get_lux(struct iio_dev *indio_dev)
195{
196 u16 ch0, ch1; /* separated ch0/ch1 data from device */
197 u32 lux; /* raw lux calculated from device data */
198 u64 lux64;
199 u32 ratio;
200 u8 buf[5];
201 struct taos_lux *p;
202 struct tsl2583_chip *chip = iio_priv(indio_dev);
203 int i, ret;
204 u32 ch0lux = 0;
205 u32 ch1lux = 0;
206
207 if (mutex_trylock(&chip->als_mutex) == 0) {
208 dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
209 return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
210 }
211
212 if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
213 /* device is not enabled */
214 dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
215 ret = -EBUSY;
216 goto out_unlock;
217 }
218
219 ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
220 if (ret < 0) {
221 dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
222 goto out_unlock;
223 }
224 /* is data new & valid */
225 if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
226 dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
227 ret = chip->als_cur_info.lux; /* return LAST VALUE */
228 goto out_unlock;
229 }
230
231 for (i = 0; i < 4; i++) {
232 int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
233
234 ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
235 if (ret < 0) {
236 dev_err(&chip->client->dev,
237 "taos_get_lux failed to read register %x\n",
238 reg);
239 goto out_unlock;
240 }
241 }
242
243 /*
244 * clear status, really interrupt status (interrupts are off), but
245 * we use the bit anyway - don't forget 0x80 - this is a command
246 */
247 ret = i2c_smbus_write_byte(chip->client,
248 (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
249 TSL258X_CMD_ALS_INT_CLR));
250
251 if (ret < 0) {
252 dev_err(&chip->client->dev,
253 "taos_i2c_write_command failed in taos_get_lux, err = %d\n",
254 ret);
255 goto out_unlock; /* have no data, so return failure */
256 }
257
258 /* extract ALS/lux data */
259 ch0 = le16_to_cpup((const __le16 *)&buf[0]);
260 ch1 = le16_to_cpup((const __le16 *)&buf[2]);
261
262 chip->als_cur_info.als_ch0 = ch0;
263 chip->als_cur_info.als_ch1 = ch1;
264
265 if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
266 goto return_max;
267
268 if (!ch0) {
269 /* have no data, so return LAST VALUE */
270 ret = 0;
271 chip->als_cur_info.lux = 0;
272 goto out_unlock;
273 }
274 /* calculate ratio */
275 ratio = (ch1 << 15) / ch0;
276 /* convert to unscaled lux using the pointer to the table */
277 for (p = (struct taos_lux *)taos_device_lux;
278 p->ratio != 0 && p->ratio < ratio; p++)
279 ;
280
281 if (p->ratio == 0) {
282 lux = 0;
283 } else {
284 ch0lux = ((ch0 * p->ch0) +
285 (gainadj[chip->taos_settings.als_gain].ch0 >> 1))
286 / gainadj[chip->taos_settings.als_gain].ch0;
287 ch1lux = ((ch1 * p->ch1) +
288 (gainadj[chip->taos_settings.als_gain].ch1 >> 1))
289 / gainadj[chip->taos_settings.als_gain].ch1;
290 lux = ch0lux - ch1lux;
291 }
292
293 /* note: lux is 31 bit max at this point */
294 if (ch1lux > ch0lux) {
295 dev_dbg(&chip->client->dev, "No Data - Return last value\n");
296 ret = 0;
297 chip->als_cur_info.lux = 0;
298 goto out_unlock;
299 }
300
301 /* adjust for active time scale */
302 if (chip->als_time_scale == 0)
303 lux = 0;
304 else
305 lux = (lux + (chip->als_time_scale >> 1)) /
306 chip->als_time_scale;
307
308 /* Adjust for active gain scale.
309 * The taos_device_lux tables above have a factor of 8192 built in,
310 * so we need to shift right.
311 * User-specified gain provides a multiplier.
312 * Apply user-specified gain before shifting right to retain precision.
313 * Use 64 bits to avoid overflow on multiplication.
314 * Then go back to 32 bits before division to avoid using div_u64().
315 */
316 lux64 = lux;
317 lux64 = lux64 * chip->taos_settings.als_gain_trim;
318 lux64 >>= 13;
319 lux = lux64;
320 lux = (lux + 500) / 1000;
321 if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
322return_max:
323 lux = TSL258X_LUX_CALC_OVER_FLOW;
324 }
325
326 /* Update the structure with the latest VALID lux. */
327 chip->als_cur_info.lux = lux;
328 ret = lux;
329
330out_unlock:
331 mutex_unlock(&chip->als_mutex);
332 return ret;
333}
334
335/*
336 * Obtain single reading and calculate the als_gain_trim (later used
337 * to derive actual lux).
338 * Return updated gain_trim value.
339 */
340static int taos_als_calibrate(struct iio_dev *indio_dev)
341{
342 struct tsl2583_chip *chip = iio_priv(indio_dev);
343 u8 reg_val;
344 unsigned int gain_trim_val;
345 int ret;
346 int lux_val;
347
348 ret = i2c_smbus_write_byte(chip->client,
349 (TSL258X_CMD_REG | TSL258X_CNTRL));
350 if (ret < 0) {
351 dev_err(&chip->client->dev,
352 "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
353 ret);
354 return ret;
355 }
356
357 reg_val = i2c_smbus_read_byte(chip->client);
358 if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
359 != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
360 dev_err(&chip->client->dev,
361 "taos_als_calibrate failed: device not powered on with ADC enabled\n");
362 return -1;
363 }
364
365 ret = i2c_smbus_write_byte(chip->client,
366 (TSL258X_CMD_REG | TSL258X_CNTRL));
367 if (ret < 0) {
368 dev_err(&chip->client->dev,
369 "taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
370 ret);
371 return ret;
372 }
373 reg_val = i2c_smbus_read_byte(chip->client);
374
375 if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
376 dev_err(&chip->client->dev,
377 "taos_als_calibrate failed: STATUS - ADC not valid.\n");
378 return -ENODATA;
379 }
380 lux_val = taos_get_lux(indio_dev);
381 if (lux_val < 0) {
382 dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
383 return lux_val;
384 }
385 gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
386 * chip->taos_settings.als_gain_trim) / lux_val);
387
388 if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
389 dev_err(&chip->client->dev,
390 "taos_als_calibrate failed: trim_val of %d is out of range\n",
391 gain_trim_val);
392 return -ENODATA;
393 }
394 chip->taos_settings.als_gain_trim = (int)gain_trim_val;
395
396 return (int)gain_trim_val;
397}
398
399/*
400 * Turn the device on.
401 * Configuration must be set before calling this function.
402 */
403static int taos_chip_on(struct iio_dev *indio_dev)
404{
405 int i;
406 int ret;
407 u8 *uP;
408 u8 utmp;
409 int als_count;
410 int als_time;
411 struct tsl2583_chip *chip = iio_priv(indio_dev);
412
413 /* and make sure we're not already on */
414 if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
415 /* if forcing a register update - turn off, then on */
416 dev_info(&chip->client->dev, "device is already enabled\n");
417 return -EINVAL;
418 }
419
420 /* determine als integration register */
421 als_count = (chip->taos_settings.als_time * 100 + 135) / 270;
422 if (!als_count)
423 als_count = 1; /* ensure at least one cycle */
424
425 /* convert back to time (encompasses overrides) */
426 als_time = (als_count * 27 + 5) / 10;
427 chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count;
428
429 /* Set the gain based on taos_settings struct */
430 chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain;
431
432 /* set chip struct re scaling and saturation */
433 chip->als_saturation = als_count * 922; /* 90% of full scale */
434 chip->als_time_scale = (als_time + 25) / 50;
435
436 /*
437 * TSL258x Specific power-on / adc enable sequence
438 * Power on the device 1st.
439 */
440 utmp = TSL258X_CNTL_PWR_ON;
441 ret = i2c_smbus_write_byte_data(chip->client,
442 TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
443 if (ret < 0) {
444 dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
445 return ret;
446 }
447
448 /*
449 * Use the following shadow copy for our delay before enabling ADC.
450 * Write all the registers.
451 */
452 for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
453 ret = i2c_smbus_write_byte_data(chip->client,
454 TSL258X_CMD_REG + i,
455 *uP++);
456 if (ret < 0) {
457 dev_err(&chip->client->dev,
458 "taos_chip_on failed on reg %d.\n", i);
459 return ret;
460 }
461 }
462
463 usleep_range(3000, 3500);
464 /*
465 * NOW enable the ADC
466 * initialize the desired mode of operation
467 */
468 utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
469 ret = i2c_smbus_write_byte_data(chip->client,
470 TSL258X_CMD_REG | TSL258X_CNTRL,
471 utmp);
472 if (ret < 0) {
473 dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
474 return ret;
475 }
476 chip->taos_chip_status = TSL258X_CHIP_WORKING;
477
478 return ret;
479}
480
481static int taos_chip_off(struct iio_dev *indio_dev)
482{
483 struct tsl2583_chip *chip = iio_priv(indio_dev);
484
485 /* turn device off */
486 chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
487 return i2c_smbus_write_byte_data(chip->client,
488 TSL258X_CMD_REG | TSL258X_CNTRL,
489 0x00);
490}
491
492/* Sysfs Interface Functions */
493
494static ssize_t taos_power_state_show(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
498 struct tsl2583_chip *chip = iio_priv(indio_dev);
499
500 return sprintf(buf, "%d\n", chip->taos_chip_status);
501}
502
503static ssize_t taos_power_state_store(struct device *dev,
504 struct device_attribute *attr,
505 const char *buf, size_t len)
506{
507 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
508 int value;
509
510 if (kstrtoint(buf, 0, &value))
511 return -EINVAL;
512
513 if (!value)
514 taos_chip_off(indio_dev);
515 else
516 taos_chip_on(indio_dev);
517
518 return len;
519}
520
521static ssize_t taos_gain_show(struct device *dev,
522 struct device_attribute *attr, char *buf)
523{
524 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
525 struct tsl2583_chip *chip = iio_priv(indio_dev);
526 char gain[4] = {0};
527
528 switch (chip->taos_settings.als_gain) {
529 case 0:
530 strcpy(gain, "001");
531 break;
532 case 1:
533 strcpy(gain, "008");
534 break;
535 case 2:
536 strcpy(gain, "016");
537 break;
538 case 3:
539 strcpy(gain, "111");
540 break;
541 }
542
543 return sprintf(buf, "%s\n", gain);
544}
545
546static ssize_t taos_gain_store(struct device *dev,
547 struct device_attribute *attr,
548 const char *buf, size_t len)
549{
550 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
551 struct tsl2583_chip *chip = iio_priv(indio_dev);
552 int value;
553
554 if (kstrtoint(buf, 0, &value))
555 return -EINVAL;
556
557 switch (value) {
558 case 1:
559 chip->taos_settings.als_gain = 0;
560 break;
561 case 8:
562 chip->taos_settings.als_gain = 1;
563 break;
564 case 16:
565 chip->taos_settings.als_gain = 2;
566 break;
567 case 111:
568 chip->taos_settings.als_gain = 3;
569 break;
570 default:
571 dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n");
572 return -1;
573 }
574
575 return len;
576}
577
578static ssize_t taos_gain_available_show(struct device *dev,
579 struct device_attribute *attr,
580 char *buf)
581{
582 return sprintf(buf, "%s\n", "1 8 16 111");
583}
584
585static ssize_t taos_als_time_show(struct device *dev,
586 struct device_attribute *attr, char *buf)
587{
588 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
589 struct tsl2583_chip *chip = iio_priv(indio_dev);
590
591 return sprintf(buf, "%d\n", chip->taos_settings.als_time);
592}
593
594static ssize_t taos_als_time_store(struct device *dev,
595 struct device_attribute *attr,
596 const char *buf, size_t len)
597{
598 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
599 struct tsl2583_chip *chip = iio_priv(indio_dev);
600 int value;
601
602 if (kstrtoint(buf, 0, &value))
603 return -EINVAL;
604
605 if ((value < 50) || (value > 650))
606 return -EINVAL;
607
608 if (value % 50)
609 return -EINVAL;
610
611 chip->taos_settings.als_time = value;
612
613 return len;
614}
615
616static ssize_t taos_als_time_available_show(struct device *dev,
617 struct device_attribute *attr,
618 char *buf)
619{
620 return sprintf(buf, "%s\n",
621 "50 100 150 200 250 300 350 400 450 500 550 600 650");
622}
623
624static ssize_t taos_als_trim_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
626{
627 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
628 struct tsl2583_chip *chip = iio_priv(indio_dev);
629
630 return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
631}
632
633static ssize_t taos_als_trim_store(struct device *dev,
634 struct device_attribute *attr,
635 const char *buf, size_t len)
636{
637 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
638 struct tsl2583_chip *chip = iio_priv(indio_dev);
639 int value;
640
641 if (kstrtoint(buf, 0, &value))
642 return -EINVAL;
643
644 if (value)
645 chip->taos_settings.als_gain_trim = value;
646
647 return len;
648}
649
650static ssize_t taos_als_cal_target_show(struct device *dev,
651 struct device_attribute *attr,
652 char *buf)
653{
654 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
655 struct tsl2583_chip *chip = iio_priv(indio_dev);
656
657 return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
658}
659
660static ssize_t taos_als_cal_target_store(struct device *dev,
661 struct device_attribute *attr,
662 const char *buf, size_t len)
663{
664 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
665 struct tsl2583_chip *chip = iio_priv(indio_dev);
666 int value;
667
668 if (kstrtoint(buf, 0, &value))
669 return -EINVAL;
670
671 if (value)
672 chip->taos_settings.als_cal_target = value;
673
674 return len;
675}
676
677static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
678 char *buf)
679{
680 int ret;
681
682 ret = taos_get_lux(dev_to_iio_dev(dev));
683 if (ret < 0)
684 return ret;
685
686 return sprintf(buf, "%d\n", ret);
687}
688
689static ssize_t taos_do_calibrate(struct device *dev,
690 struct device_attribute *attr,
691 const char *buf, size_t len)
692{
693 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
694 int value;
695
696 if (kstrtoint(buf, 0, &value))
697 return -EINVAL;
698
699 if (value == 1)
700 taos_als_calibrate(indio_dev);
701
702 return len;
703}
704
705static ssize_t taos_luxtable_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
708 int i;
709 int offset = 0;
710
711 for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
712 offset += sprintf(buf + offset, "%u,%u,%u,",
713 taos_device_lux[i].ratio,
714 taos_device_lux[i].ch0,
715 taos_device_lux[i].ch1);
716 if (taos_device_lux[i].ratio == 0) {
717 /*
718 * We just printed the first "0" entry.
719 * Now get rid of the extra "," and break.
720 */
721 offset--;
722 break;
723 }
724 }
725
726 offset += sprintf(buf + offset, "\n");
727 return offset;
728}
729
730static ssize_t taos_luxtable_store(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf, size_t len)
733{
734 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
735 struct tsl2583_chip *chip = iio_priv(indio_dev);
736 int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
737 int n;
738
739 get_options(buf, ARRAY_SIZE(value), value);
740
741 /* We now have an array of ints starting at value[1], and
742 * enumerated by value[0].
743 * We expect each group of three ints is one table entry,
744 * and the last table entry is all 0.
745 */
746 n = value[0];
747 if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) {
748 dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
749 return -EINVAL;
750 }
751 if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
752 dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
753 return -EINVAL;
754 }
755
756 if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
757 taos_chip_off(indio_dev);
758
759 /* Zero out the table */
760 memset(taos_device_lux, 0, sizeof(taos_device_lux));
761 memcpy(taos_device_lux, &value[1], (value[0] * 4));
762
763 taos_chip_on(indio_dev);
764
765 return len;
766}
767
768static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
769 taos_power_state_show, taos_power_state_store);
770
771static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR,
772 taos_gain_show, taos_gain_store);
773static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO,
774 taos_gain_available_show, NULL);
775
776static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
777 taos_als_time_show, taos_als_time_store);
778static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO,
779 taos_als_time_available_show, NULL);
780
781static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR,
782 taos_als_trim_show, taos_als_trim_store);
783
784static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR,
785 taos_als_cal_target_show, taos_als_cal_target_store);
786
787static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL);
788static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate);
789static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
790 taos_luxtable_show, taos_luxtable_store);
791
792static struct attribute *sysfs_attrs_ctrl[] = {
793 &dev_attr_power_state.attr,
794 &dev_attr_illuminance0_calibscale.attr, /* Gain */
795 &dev_attr_illuminance0_calibscale_available.attr,
796 &dev_attr_illuminance0_integration_time.attr, /* I time*/
797 &dev_attr_illuminance0_integration_time_available.attr,
798 &dev_attr_illuminance0_calibbias.attr, /* trim */
799 &dev_attr_illuminance0_input_target.attr,
800 &dev_attr_illuminance0_input.attr,
801 &dev_attr_illuminance0_calibrate.attr,
802 &dev_attr_illuminance0_lux_table.attr,
803 NULL
804};
805
806static const struct attribute_group tsl2583_attribute_group = {
807 .attrs = sysfs_attrs_ctrl,
808};
809
810/* Use the default register values to identify the Taos device */
811static int taos_tsl258x_device(unsigned char *bufp)
812{
813 return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90);
814}
815
816static const struct iio_info tsl2583_info = {
817 .attrs = &tsl2583_attribute_group,
818 .driver_module = THIS_MODULE,
819};
820
821/*
822 * Client probe function - When a valid device is found, the driver's device
823 * data structure is updated, and initialization completes successfully.
824 */
825static int taos_probe(struct i2c_client *clientp,
826 const struct i2c_device_id *idp)
827{
828 int i, ret;
829 unsigned char buf[TSL258X_MAX_DEVICE_REGS];
830 struct tsl2583_chip *chip;
831 struct iio_dev *indio_dev;
832
833 if (!i2c_check_functionality(clientp->adapter,
834 I2C_FUNC_SMBUS_BYTE_DATA)) {
835 dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
836 return -EOPNOTSUPP;
837 }
838
839 indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
840 if (!indio_dev)
841 return -ENOMEM;
842 chip = iio_priv(indio_dev);
843 chip->client = clientp;
844 i2c_set_clientdata(clientp, indio_dev);
845
846 mutex_init(&chip->als_mutex);
847 chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
848 memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config));
849
850 for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) {
851 ret = i2c_smbus_write_byte(clientp,
852 (TSL258X_CMD_REG | (TSL258X_CNTRL + i)));
853 if (ret < 0) {
854 dev_err(&clientp->dev,
855 "i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n",
856 ret);
857 return ret;
858 }
859 ret = i2c_smbus_read_byte(clientp);
860 if (ret < 0) {
861 dev_err(&clientp->dev,
862 "i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n",
863 ret);
864 return ret;
865 }
866 buf[i] = ret;
867 }
868
869 if (!taos_tsl258x_device(buf)) {
870 dev_info(&clientp->dev,
871 "i2c device found but does not match expected id in taos_probe()\n");
872 return -EINVAL;
873 }
874
875 ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
876 if (ret < 0) {
877 dev_err(&clientp->dev,
878 "i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n",
879 ret);
880 return ret;
881 }
882
883 indio_dev->info = &tsl2583_info;
884 indio_dev->dev.parent = &clientp->dev;
885 indio_dev->modes = INDIO_DIRECT_MODE;
886 indio_dev->name = chip->client->name;
887 ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
888 if (ret) {
889 dev_err(&clientp->dev, "iio registration failed\n");
890 return ret;
891 }
892
893 /* Load up the V2 defaults (these are hard coded defaults for now) */
894 taos_defaults(chip);
895
896 /* Make sure the chip is on */
897 taos_chip_on(indio_dev);
898
899 dev_info(&clientp->dev, "Light sensor found.\n");
900 return 0;
901}
902
903#ifdef CONFIG_PM_SLEEP
904static int taos_suspend(struct device *dev)
905{
906 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
907 struct tsl2583_chip *chip = iio_priv(indio_dev);
908 int ret = 0;
909
910 mutex_lock(&chip->als_mutex);
911
912 if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
913 ret = taos_chip_off(indio_dev);
914 chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
915 }
916
917 mutex_unlock(&chip->als_mutex);
918 return ret;
919}
920
921static int taos_resume(struct device *dev)
922{
923 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
924 struct tsl2583_chip *chip = iio_priv(indio_dev);
925 int ret = 0;
926
927 mutex_lock(&chip->als_mutex);
928
929 if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
930 ret = taos_chip_on(indio_dev);
931
932 mutex_unlock(&chip->als_mutex);
933 return ret;
934}
935
936static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
937#define TAOS_PM_OPS (&taos_pm_ops)
938#else
939#define TAOS_PM_OPS NULL
940#endif
941
942static struct i2c_device_id taos_idtable[] = {
943 { "tsl2580", 0 },
944 { "tsl2581", 1 },
945 { "tsl2583", 2 },
946 {}
947};
948MODULE_DEVICE_TABLE(i2c, taos_idtable);
949
950/* Driver definition */
951static struct i2c_driver taos_driver = {
952 .driver = {
953 .name = "tsl2583",
954 .pm = TAOS_PM_OPS,
955 },
956 .id_table = taos_idtable,
957 .probe = taos_probe,
958};
959module_i2c_driver(taos_driver);
960
961MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
962MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
963MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index ebb8a1993303..3af8f77b8e41 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -465,38 +465,26 @@ err_ret:
465 return ret; 465 return ret;
466} 466}
467 467
468static ssize_t ade7758_read_frequency(struct device *dev, 468static int ade7758_read_samp_freq(struct device *dev, int *val)
469 struct device_attribute *attr, char *buf)
470{ 469{
471 int ret; 470 int ret;
472 u8 t; 471 u8 t;
473 int sps;
474 472
475 ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t); 473 ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
476 if (ret) 474 if (ret)
477 return ret; 475 return ret;
478 476
479 t = (t >> 5) & 0x3; 477 t = (t >> 5) & 0x3;
480 sps = 26040 / (1 << t); 478 *val = 26040 / (1 << t);
481 479
482 return sprintf(buf, "%d SPS\n", sps); 480 return 0;
483} 481}
484 482
485static ssize_t ade7758_write_frequency(struct device *dev, 483static int ade7758_write_samp_freq(struct device *dev, int val)
486 struct device_attribute *attr,
487 const char *buf, size_t len)
488{ 484{
489 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
490 u16 val;
491 int ret; 485 int ret;
492 u8 reg, t; 486 u8 reg, t;
493 487
494 ret = kstrtou16(buf, 10, &val);
495 if (ret)
496 return ret;
497
498 mutex_lock(&indio_dev->mlock);
499
500 switch (val) { 488 switch (val) {
501 case 26040: 489 case 26040:
502 t = 0; 490 t = 0;
@@ -525,9 +513,49 @@ static ssize_t ade7758_write_frequency(struct device *dev,
525 ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg); 513 ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
526 514
527out: 515out:
528 mutex_unlock(&indio_dev->mlock); 516 return ret;
517}
529 518
530 return ret ? ret : len; 519static int ade7758_read_raw(struct iio_dev *indio_dev,
520 struct iio_chan_spec const *chan,
521 int *val,
522 int *val2,
523 long mask)
524{
525 int ret;
526
527 switch (mask) {
528 case IIO_CHAN_INFO_SAMP_FREQ:
529 mutex_lock(&indio_dev->mlock);
530 ret = ade7758_read_samp_freq(&indio_dev->dev, val);
531 mutex_unlock(&indio_dev->mlock);
532 return ret;
533 default:
534 return -EINVAL;
535 }
536
537 return ret;
538}
539
540static int ade7758_write_raw(struct iio_dev *indio_dev,
541 struct iio_chan_spec const *chan,
542 int val, int val2, long mask)
543{
544 int ret;
545
546 switch (mask) {
547 case IIO_CHAN_INFO_SAMP_FREQ:
548 if (val2)
549 return -EINVAL;
550 mutex_lock(&indio_dev->mlock);
551 ret = ade7758_write_samp_freq(&indio_dev->dev, val);
552 mutex_unlock(&indio_dev->mlock);
553 return ret;
554 default:
555 return -EINVAL;
556 }
557
558 return ret;
531} 559}
532 560
533static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit); 561static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
@@ -553,17 +581,12 @@ static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
553static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit, 581static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
554 ADE7758_CVAHR); 582 ADE7758_CVAHR);
555 583
556static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
557 ade7758_read_frequency,
558 ade7758_write_frequency);
559
560static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255"); 584static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
561 585
562static struct attribute *ade7758_attributes[] = { 586static struct attribute *ade7758_attributes[] = {
563 &iio_dev_attr_in_temp_raw.dev_attr.attr, 587 &iio_dev_attr_in_temp_raw.dev_attr.attr,
564 &iio_const_attr_in_temp_offset.dev_attr.attr, 588 &iio_const_attr_in_temp_offset.dev_attr.attr,
565 &iio_const_attr_in_temp_scale.dev_attr.attr, 589 &iio_const_attr_in_temp_scale.dev_attr.attr,
566 &iio_dev_attr_sampling_frequency.dev_attr.attr,
567 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 590 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
568 &iio_dev_attr_awatthr.dev_attr.attr, 591 &iio_dev_attr_awatthr.dev_attr.attr,
569 &iio_dev_attr_bwatthr.dev_attr.attr, 592 &iio_dev_attr_bwatthr.dev_attr.attr,
@@ -611,6 +634,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
611 .type = IIO_VOLTAGE, 634 .type = IIO_VOLTAGE,
612 .indexed = 1, 635 .indexed = 1,
613 .channel = 0, 636 .channel = 0,
637 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
614 .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), 638 .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
615 .scan_index = 0, 639 .scan_index = 0,
616 .scan_type = { 640 .scan_type = {
@@ -622,6 +646,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
622 .type = IIO_CURRENT, 646 .type = IIO_CURRENT,
623 .indexed = 1, 647 .indexed = 1,
624 .channel = 0, 648 .channel = 0,
649 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
625 .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), 650 .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
626 .scan_index = 1, 651 .scan_index = 1,
627 .scan_type = { 652 .scan_type = {
@@ -634,6 +659,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
634 .indexed = 1, 659 .indexed = 1,
635 .channel = 0, 660 .channel = 0,
636 .extend_name = "apparent", 661 .extend_name = "apparent",
662 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
637 .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), 663 .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
638 .scan_index = 2, 664 .scan_index = 2,
639 .scan_type = { 665 .scan_type = {
@@ -646,6 +672,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
646 .indexed = 1, 672 .indexed = 1,
647 .channel = 0, 673 .channel = 0,
648 .extend_name = "active", 674 .extend_name = "active",
675 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
649 .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), 676 .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
650 .scan_index = 3, 677 .scan_index = 3,
651 .scan_type = { 678 .scan_type = {
@@ -658,6 +685,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
658 .indexed = 1, 685 .indexed = 1,
659 .channel = 0, 686 .channel = 0,
660 .extend_name = "reactive", 687 .extend_name = "reactive",
688 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
661 .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), 689 .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
662 .scan_index = 4, 690 .scan_index = 4,
663 .scan_type = { 691 .scan_type = {
@@ -669,6 +697,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
669 .type = IIO_VOLTAGE, 697 .type = IIO_VOLTAGE,
670 .indexed = 1, 698 .indexed = 1,
671 .channel = 1, 699 .channel = 1,
700 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
672 .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), 701 .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
673 .scan_index = 5, 702 .scan_index = 5,
674 .scan_type = { 703 .scan_type = {
@@ -680,6 +709,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
680 .type = IIO_CURRENT, 709 .type = IIO_CURRENT,
681 .indexed = 1, 710 .indexed = 1,
682 .channel = 1, 711 .channel = 1,
712 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
683 .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), 713 .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
684 .scan_index = 6, 714 .scan_index = 6,
685 .scan_type = { 715 .scan_type = {
@@ -692,6 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
692 .indexed = 1, 722 .indexed = 1,
693 .channel = 1, 723 .channel = 1,
694 .extend_name = "apparent", 724 .extend_name = "apparent",
725 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
695 .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), 726 .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
696 .scan_index = 7, 727 .scan_index = 7,
697 .scan_type = { 728 .scan_type = {
@@ -704,6 +735,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
704 .indexed = 1, 735 .indexed = 1,
705 .channel = 1, 736 .channel = 1,
706 .extend_name = "active", 737 .extend_name = "active",
738 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
707 .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), 739 .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
708 .scan_index = 8, 740 .scan_index = 8,
709 .scan_type = { 741 .scan_type = {
@@ -716,6 +748,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
716 .indexed = 1, 748 .indexed = 1,
717 .channel = 1, 749 .channel = 1,
718 .extend_name = "reactive", 750 .extend_name = "reactive",
751 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
719 .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), 752 .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
720 .scan_index = 9, 753 .scan_index = 9,
721 .scan_type = { 754 .scan_type = {
@@ -727,6 +760,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
727 .type = IIO_VOLTAGE, 760 .type = IIO_VOLTAGE,
728 .indexed = 1, 761 .indexed = 1,
729 .channel = 2, 762 .channel = 2,
763 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
730 .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), 764 .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
731 .scan_index = 10, 765 .scan_index = 10,
732 .scan_type = { 766 .scan_type = {
@@ -738,6 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
738 .type = IIO_CURRENT, 772 .type = IIO_CURRENT,
739 .indexed = 1, 773 .indexed = 1,
740 .channel = 2, 774 .channel = 2,
775 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
741 .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), 776 .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
742 .scan_index = 11, 777 .scan_index = 11,
743 .scan_type = { 778 .scan_type = {
@@ -750,6 +785,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
750 .indexed = 1, 785 .indexed = 1,
751 .channel = 2, 786 .channel = 2,
752 .extend_name = "apparent", 787 .extend_name = "apparent",
788 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
753 .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), 789 .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
754 .scan_index = 12, 790 .scan_index = 12,
755 .scan_type = { 791 .scan_type = {
@@ -762,6 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
762 .indexed = 1, 798 .indexed = 1,
763 .channel = 2, 799 .channel = 2,
764 .extend_name = "active", 800 .extend_name = "active",
801 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
765 .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), 802 .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
766 .scan_index = 13, 803 .scan_index = 13,
767 .scan_type = { 804 .scan_type = {
@@ -774,6 +811,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
774 .indexed = 1, 811 .indexed = 1,
775 .channel = 2, 812 .channel = 2,
776 .extend_name = "reactive", 813 .extend_name = "reactive",
814 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
777 .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), 815 .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
778 .scan_index = 14, 816 .scan_index = 14,
779 .scan_type = { 817 .scan_type = {
@@ -787,6 +825,8 @@ static const struct iio_chan_spec ade7758_channels[] = {
787 825
788static const struct iio_info ade7758_info = { 826static const struct iio_info ade7758_info = {
789 .attrs = &ade7758_attribute_group, 827 .attrs = &ade7758_attribute_group,
828 .read_raw = &ade7758_read_raw,
829 .write_raw = &ade7758_write_raw,
790 .driver_module = THIS_MODULE, 830 .driver_module = THIS_MODULE,
791}; 831};
792 832
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
deleted file mode 100644
index 75bf47bfee78..000000000000
--- a/drivers/staging/iio/ring_hw.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * ring_hw.h - common functionality for iio hardware ring buffers
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
9 *
10 */
11
12#ifndef _RING_HW_H_
13#define _RING_HW_H_
14
15/**
16 * struct iio_hw_ring_buffer- hardware ring buffer
17 * @buf: generic ring buffer elements
18 * @private: device specific data
19 */
20struct iio_hw_buffer {
21 struct iio_buffer buf;
22 void *private;
23};
24
25#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
26
27#endif /* _RING_HW_H_ */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 81c46f4d0935..a604c83c957e 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
35/* macro */ 35/* macro */
36 36
37#define inc_txqhead(priv) \ 37#define inc_txqhead(priv) \
38 (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE) 38 (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
39#define inc_txqtail(priv) \ 39#define inc_txqtail(priv) \
40 (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE) 40 (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
41#define cnt_txqbody(priv) \ 41#define cnt_txqbody(priv) \
42 (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE) 42 (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
43 43
44#define inc_rxqhead(priv) \ 44#define inc_rxqhead(priv) \
45 (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE) 45 (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
46#define inc_rxqtail(priv) \ 46#define inc_rxqtail(priv) \
47 (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE) 47 (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
48#define cnt_rxqbody(priv) \ 48#define cnt_rxqbody(priv) \
49 (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE) 49 (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
50 50
51static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address, 51static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
52 unsigned char *buffer, int length) 52 unsigned char *buffer, int length)
@@ -76,10 +76,9 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
76 card = priv->ks_wlan_hw.sdio_card; 76 card = priv->ks_wlan_hw.sdio_card;
77 77
78 if (length == 1) /* CMD52 */ 78 if (length == 1) /* CMD52 */
79 sdio_writeb(card->func, *buffer, (unsigned int)address, &rc); 79 sdio_writeb(card->func, *buffer, address, &rc);
80 else /* CMD53 */ 80 else /* CMD53 */
81 rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer, 81 rc = sdio_memcpy_toio(card->func, address, buffer, length);
82 length);
83 82
84 if (rc != 0) 83 if (rc != 0)
85 DPRINTK(1, "sdio error=%d size=%d\n", rc, length); 84 DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
@@ -255,7 +254,7 @@ int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
255 254
256static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p, 255static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
257 unsigned long size, 256 unsigned long size,
258 void (*complete_handler) (void *arg1, void *arg2), 257 void (*complete_handler)(void *arg1, void *arg2),
259 void *arg1, void *arg2) 258 void *arg1, void *arg2)
260{ 259{
261 struct tx_device_buffer *sp; 260 struct tx_device_buffer *sp;
@@ -294,6 +293,7 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
294 int retval; 293 int retval;
295 unsigned char rw_data; 294 unsigned char rw_data;
296 struct hostif_hdr *hdr; 295 struct hostif_hdr *hdr;
296
297 hdr = (struct hostif_hdr *)buffer; 297 hdr = (struct hostif_hdr *)buffer;
298 298
299 DPRINTK(4, "size=%d\n", hdr->size); 299 DPRINTK(4, "size=%d\n", hdr->size);
@@ -353,11 +353,12 @@ static void tx_device_task(void *dev)
353} 353}
354 354
355int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size, 355int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
356 void (*complete_handler) (void *arg1, void *arg2), 356 void (*complete_handler)(void *arg1, void *arg2),
357 void *arg1, void *arg2) 357 void *arg1, void *arg2)
358{ 358{
359 int result = 0; 359 int result = 0;
360 struct hostif_hdr *hdr; 360 struct hostif_hdr *hdr;
361
361 hdr = (struct hostif_hdr *)p; 362 hdr = (struct hostif_hdr *)p;
362 363
363 if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) { 364 if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
@@ -412,7 +413,7 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
412 /* receive data */ 413 /* receive data */
413 if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) { 414 if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
414 /* in case of buffer overflow */ 415 /* in case of buffer overflow */
415 DPRINTK(1, "rx buffer overflow \n"); 416 DPRINTK(1, "rx buffer overflow\n");
416 goto error_out; 417 goto error_out;
417 } 418 }
418 rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail]; 419 rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
@@ -658,10 +659,12 @@ static void ks_sdio_interrupt(struct sdio_func *func)
658static int trx_device_init(struct ks_wlan_private *priv) 659static int trx_device_init(struct ks_wlan_private *priv)
659{ 660{
660 /* initialize values (tx) */ 661 /* initialize values (tx) */
661 priv->tx_dev.qtail = priv->tx_dev.qhead = 0; 662 priv->tx_dev.qhead = 0;
663 priv->tx_dev.qtail = 0;
662 664
663 /* initialize values (rx) */ 665 /* initialize values (rx) */
664 priv->rx_dev.qtail = priv->rx_dev.qhead = 0; 666 priv->rx_dev.qhead = 0;
667 priv->rx_dev.qtail = 0;
665 668
666 /* initialize spinLock (tx,rx) */ 669 /* initialize spinLock (tx,rx) */
667 spin_lock_init(&priv->tx_dev.tx_dev_lock); 670 spin_lock_init(&priv->tx_dev.tx_dev_lock);
@@ -718,7 +721,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
718 return rc; 721 return rc;
719} 722}
720 723
721#define ROM_BUFF_SIZE (64*1024) 724#define ROM_BUFF_SIZE (64 * 1024)
722static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address, 725static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
723 unsigned char *data, unsigned int size) 726 unsigned char *data, unsigned int size)
724{ 727{
@@ -955,7 +958,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
955 priv = NULL; 958 priv = NULL;
956 netdev = NULL; 959 netdev = NULL;
957 960
958 /* initilize ks_sdio_card */ 961 /* initialize ks_sdio_card */
959 card = kzalloc(sizeof(*card), GFP_KERNEL); 962 card = kzalloc(sizeof(*card), GFP_KERNEL);
960 if (!card) 963 if (!card)
961 return -ENOMEM; 964 return -ENOMEM;
@@ -1117,6 +1120,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
1117 int ret; 1120 int ret;
1118 struct ks_sdio_card *card; 1121 struct ks_sdio_card *card;
1119 struct ks_wlan_private *priv; 1122 struct ks_wlan_private *priv;
1123
1120 DPRINTK(1, "ks7010_sdio_remove()\n"); 1124 DPRINTK(1, "ks7010_sdio_remove()\n");
1121 1125
1122 card = sdio_get_drvdata(func); 1126 card = sdio_get_drvdata(func);
@@ -1142,6 +1146,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
1142 /* send stop request to MAC */ 1146 /* send stop request to MAC */
1143 { 1147 {
1144 struct hostif_stop_request_t *pp; 1148 struct hostif_stop_request_t *pp;
1149
1145 pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL); 1150 pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
1146 if (!pp) { 1151 if (!pp) {
1147 DPRINTK(3, "allocate memory failed..\n"); 1152 DPRINTK(3, "allocate memory failed..\n");
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
index c72064b48bd8..0f5fd848e23d 100644
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for KeyStream, KS7010 based SDIO cards. 2 * Driver for KeyStream, KS7010 based SDIO cards.
3 * 3 *
4 * Copyright (C) 2006-2008 KeyStream Corp. 4 * Copyright (C) 2006-2008 KeyStream Corp.
5 * Copyright (C) 2009 Renesas Technology Corp. 5 * Copyright (C) 2009 Renesas Technology Corp.
@@ -41,7 +41,7 @@
41/* Write Index Register */ 41/* Write Index Register */
42#define WRITE_INDEX 0x000010 42#define WRITE_INDEX 0x000010
43 43
44/* Write Status/Read Data Size Register 44/* Write Status/Read Data Size Register
45 * for network packet (less than 2048 bytes data) 45 * for network packet (less than 2048 bytes data)
46 */ 46 */
47#define WSTATUS_RSIZE 0x000014 47#define WSTATUS_RSIZE 0x000014
@@ -53,14 +53,14 @@
53/* ARM to SD interrupt Pending */ 53/* ARM to SD interrupt Pending */
54#define INT_PENDING 0x000024 54#define INT_PENDING 0x000024
55 55
56#define INT_GCR_B (1<<7) 56#define INT_GCR_B BIT(7)
57#define INT_GCR_A (1<<6) 57#define INT_GCR_A BIT(6)
58#define INT_WRITE_STATUS (1<<5) 58#define INT_WRITE_STATUS BIT(5)
59#define INT_WRITE_INDEX (1<<4) 59#define INT_WRITE_INDEX BIT(4)
60#define INT_WRITE_SIZE (1<<3) 60#define INT_WRITE_SIZE BIT(3)
61#define INT_READ_STATUS (1<<2) 61#define INT_READ_STATUS BIT(2)
62#define INT_READ_INDEX (1<<1) 62#define INT_READ_INDEX BIT(1)
63#define INT_READ_SIZE (1<<0) 63#define INT_READ_SIZE BIT(0)
64 64
65/* General Communication Register A */ 65/* General Communication Register A */
66#define GCR_A 0x000028 66#define GCR_A 0x000028
@@ -100,7 +100,7 @@ struct hw_info_t {
100struct ks_sdio_packet { 100struct ks_sdio_packet {
101 struct ks_sdio_packet *next; 101 struct ks_sdio_packet *next;
102 u16 nb; 102 u16 nb;
103 u8 buffer[0] __attribute__ ((aligned(4))); 103 u8 buffer[0] __aligned(4);
104}; 104};
105 105
106struct ks_sdio_card { 106struct ks_sdio_card {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index c57ca581550a..1fbd495e5e63 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -23,11 +23,11 @@
23 23
24/* macro */ 24/* macro */
25#define inc_smeqhead(priv) \ 25#define inc_smeqhead(priv) \
26 ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE ) 26 (priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE)
27#define inc_smeqtail(priv) \ 27#define inc_smeqtail(priv) \
28 ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE ) 28 (priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE)
29#define cnt_smeqbody(priv) \ 29#define cnt_smeqbody(priv) \
30 (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE ) 30 (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE)
31 31
32#define KS_WLAN_MEM_FLAG (GFP_ATOMIC) 32#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
33 33
@@ -97,11 +97,10 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
97{ 97{
98 DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status)); 98 DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
99 99
100 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { 100 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
101 hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); 101 hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
102 } else { 102 else
103 priv->dev_state = DEVICE_STATE_READY; 103 priv->dev_state = DEVICE_STATE_READY;
104 }
105 return 0; 104 return 0;
106} 105}
107 106
@@ -187,13 +186,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
187 memcpy(wrqu.ap_addr.sa_data, 186 memcpy(wrqu.ap_addr.sa_data,
188 &(priv->current_ap.bssid[0]), ETH_ALEN); 187 &(priv->current_ap.bssid[0]), ETH_ALEN);
189 DPRINTK(3, 188 DPRINTK(3,
190 "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n", 189 "IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
191 (unsigned char)wrqu.ap_addr.sa_data[0],
192 (unsigned char)wrqu.ap_addr.sa_data[1],
193 (unsigned char)wrqu.ap_addr.sa_data[2],
194 (unsigned char)wrqu.ap_addr.sa_data[3],
195 (unsigned char)wrqu.ap_addr.sa_data[4],
196 (unsigned char)wrqu.ap_addr.sa_data[5]);
197 wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL); 190 wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
198 } 191 }
199 DPRINTK(4, "\n Link AP\n"); 192 DPRINTK(4, "\n Link AP\n");
@@ -420,16 +413,11 @@ void hostif_data_indication(struct ks_wlan_private *priv)
420 /* needed parameters: count, keyid, key type, TSC */ 413 /* needed parameters: count, keyid, key type, TSC */
421 sprintf(buf, 414 sprintf(buf,
422 "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=" 415 "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
423 "%02x:%02x:%02x:%02x:%02x:%02x)", 416 "%pM)",
424 auth_type - 1, 417 auth_type - 1,
425 eth_hdr-> 418 eth_hdr->
426 h_dest[0] & 0x01 ? "broad" : 419 h_dest[0] & 0x01 ? "broad" :
427 "uni", eth_hdr->h_source[0], 420 "uni", eth_hdr->h_source);
428 eth_hdr->h_source[1],
429 eth_hdr->h_source[2],
430 eth_hdr->h_source[3],
431 eth_hdr->h_source[4],
432 eth_hdr->h_source[5]);
433 memset(&wrqu, 0, sizeof(wrqu)); 421 memset(&wrqu, 0, sizeof(wrqu));
434 wrqu.data.length = strlen(buf); 422 wrqu.data.length = strlen(buf);
435 DPRINTK(4, 423 DPRINTK(4,
@@ -476,8 +464,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
476 skb->dev->last_rx = jiffies; 464 skb->dev->last_rx = jiffies;
477 netif_rx(skb); 465 netif_rx(skb);
478 } else { 466 } else {
479 printk(KERN_WARNING
480 "ks_wlan: Memory squeeze, dropping packet.\n");
481 priv->nstats.rx_dropped++; 467 priv->nstats.rx_dropped++;
482 } 468 }
483 break; 469 break;
@@ -511,8 +497,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
511 skb->dev->last_rx = jiffies; 497 skb->dev->last_rx = jiffies;
512 netif_rx(skb); 498 netif_rx(skb);
513 } else { 499 } else {
514 printk(KERN_WARNING
515 "ks_wlan: Memory squeeze, dropping packet.\n");
516 priv->nstats.rx_dropped++; 500 priv->nstats.rx_dropped++;
517 } 501 }
518 break; 502 break;
@@ -560,10 +544,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
560 dev->dev_addr[5] = priv->eth_addr[5]; 544 dev->dev_addr[5] = priv->eth_addr[5];
561 dev->dev_addr[6] = 0x00; 545 dev->dev_addr[6] = 0x00;
562 dev->dev_addr[7] = 0x00; 546 dev->dev_addr[7] = 0x00;
563 printk(KERN_INFO 547 netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
564 "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
565 priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
566 priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
567 break; 548 break;
568 case DOT11_PRODUCT_VERSION: 549 case DOT11_PRODUCT_VERSION:
569 /* firmware version */ 550 /* firmware version */
@@ -571,8 +552,8 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
571 priv->version_size = priv->rx_size; 552 priv->version_size = priv->rx_size;
572 memcpy(priv->firmware_version, priv->rxp, priv->rx_size); 553 memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
573 priv->firmware_version[priv->rx_size] = '\0'; 554 priv->firmware_version[priv->rx_size] = '\0';
574 printk(KERN_INFO "ks_wlan: firmware ver. = %s\n", 555 netdev_info(dev, "firmware ver. = %s\n",
575 priv->firmware_version); 556 priv->firmware_version);
576 hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION); 557 hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
577 /* wake_up_interruptible_all(&priv->confirm_wait); */ 558 /* wake_up_interruptible_all(&priv->confirm_wait); */
578 complete(&priv->confirm_wait); 559 complete(&priv->confirm_wait);
@@ -592,12 +573,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
592 } else if (priv->eeprom_sum.type == 1) { 573 } else if (priv->eeprom_sum.type == 1) {
593 if (priv->eeprom_sum.result == 0) { 574 if (priv->eeprom_sum.result == 0) {
594 priv->eeprom_checksum = EEPROM_NG; 575 priv->eeprom_checksum = EEPROM_NG;
595 printk("LOCAL_EEPROM_SUM NG\n"); 576 netdev_info(dev, "LOCAL_EEPROM_SUM NG\n");
596 } else if (priv->eeprom_sum.result == 1) { 577 } else if (priv->eeprom_sum.result == 1) {
597 priv->eeprom_checksum = EEPROM_OK; 578 priv->eeprom_checksum = EEPROM_OK;
598 } 579 }
599 } else { 580 } else {
600 printk("LOCAL_EEPROM_SUM error!\n"); 581 netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
601 } 582 }
602 break; 583 break;
603 default: 584 default:
@@ -705,15 +686,13 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
705 break; 686 break;
706 case DOT11_GMK1_TSC: 687 case DOT11_GMK1_TSC:
707 DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status); 688 DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
708 if (atomic_read(&priv->psstatus.snooze_guard)) { 689 if (atomic_read(&priv->psstatus.snooze_guard))
709 atomic_set(&priv->psstatus.snooze_guard, 0); 690 atomic_set(&priv->psstatus.snooze_guard, 0);
710 }
711 break; 691 break;
712 case DOT11_GMK2_TSC: 692 case DOT11_GMK2_TSC:
713 DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status); 693 DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
714 if (atomic_read(&priv->psstatus.snooze_guard)) { 694 if (atomic_read(&priv->psstatus.snooze_guard))
715 atomic_set(&priv->psstatus.snooze_guard, 0); 695 atomic_set(&priv->psstatus.snooze_guard, 0);
716 }
717 break; 696 break;
718 case LOCAL_PMK: 697 case LOCAL_PMK:
719 DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status); 698 DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
@@ -766,8 +745,9 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
766static 745static
767void hostif_start_confirm(struct ks_wlan_private *priv) 746void hostif_start_confirm(struct ks_wlan_private *priv)
768{ 747{
769#ifdef WPS 748#ifdef WPS
770 union iwreq_data wrqu; 749 union iwreq_data wrqu;
750
771 wrqu.data.length = 0; 751 wrqu.data.length = 0;
772 wrqu.data.flags = 0; 752 wrqu.data.flags = 0;
773 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 753 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
@@ -789,6 +769,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
789 unsigned int old_status = priv->connect_status; 769 unsigned int old_status = priv->connect_status;
790 struct net_device *netdev = priv->net_dev; 770 struct net_device *netdev = priv->net_dev;
791 union iwreq_data wrqu0; 771 union iwreq_data wrqu0;
772
792 connect_code = get_WORD(priv); 773 connect_code = get_WORD(priv);
793 774
794 switch (connect_code) { 775 switch (connect_code) {
@@ -894,7 +875,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
894 netif_carrier_off(netdev); 875 netif_carrier_off(netdev);
895 tmp = FORCE_DISCONNECT & priv->connect_status; 876 tmp = FORCE_DISCONNECT & priv->connect_status;
896 priv->connect_status = tmp | DISCONNECT_STATUS; 877 priv->connect_status = tmp | DISCONNECT_STATUS;
897 printk("IWEVENT: disconnect\n"); 878 netdev_info(netdev, "IWEVENT: disconnect\n");
898 879
899 wrqu0.data.length = 0; 880 wrqu0.data.length = 0;
900 wrqu0.data.flags = 0; 881 wrqu0.data.flags = 0;
@@ -904,7 +885,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
904 && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { 885 && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
905 eth_zero_addr(wrqu0.ap_addr.sa_data); 886 eth_zero_addr(wrqu0.ap_addr.sa_data);
906 DPRINTK(3, "IWEVENT: disconnect\n"); 887 DPRINTK(3, "IWEVENT: disconnect\n");
907 printk("IWEVENT: disconnect\n"); 888 netdev_info(netdev, "IWEVENT: disconnect\n");
908 DPRINTK(3, "disconnect :: scan_ind_count=%d\n", 889 DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
909 priv->scan_ind_count); 890 priv->scan_ind_count);
910 wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL); 891 wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
@@ -928,6 +909,7 @@ static
928void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv) 909void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
929{ 910{
930 uint16_t result_code; 911 uint16_t result_code;
912
931 DPRINTK(3, "\n"); 913 DPRINTK(3, "\n");
932 result_code = get_WORD(priv); 914 result_code = get_WORD(priv);
933 DPRINTK(3, "result code = %d\n", result_code); 915 DPRINTK(3, "result code = %d\n", result_code);
@@ -993,6 +975,7 @@ void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
993 unsigned int result_code; 975 unsigned int result_code;
994 struct net_device *dev = priv->net_dev; 976 struct net_device *dev = priv->net_dev;
995 union iwreq_data wrqu; 977 union iwreq_data wrqu;
978
996 result_code = get_DWORD(priv); 979 result_code = get_DWORD(priv);
997 DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code, 980 DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
998 priv->scan_ind_count); 981 priv->scan_ind_count);
@@ -1110,7 +1093,7 @@ void hostif_event_check(struct ks_wlan_private *priv)
1110 case HIF_AP_SET_CONF: 1093 case HIF_AP_SET_CONF:
1111 default: 1094 default:
1112 //DPRINTK(1, "undefined event[%04X]\n", event); 1095 //DPRINTK(1, "undefined event[%04X]\n", event);
1113 printk("undefined event[%04X]\n", event); 1096 netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
1114 /* wake_up_all(&priv->confirm_wait); */ 1097 /* wake_up_all(&priv->confirm_wait); */
1115 complete(&priv->confirm_wait); 1098 complete(&priv->confirm_wait);
1116 break; 1099 break;
@@ -1184,9 +1167,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
1184 eth = (struct ethhdr *)packet->data; 1167 eth = (struct ethhdr *)packet->data;
1185 if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) { 1168 if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
1186 DPRINTK(1, "invalid mac address !!\n"); 1169 DPRINTK(1, "invalid mac address !!\n");
1187 DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n", 1170 DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source);
1188 eth->h_source[0], eth->h_source[1], eth->h_source[2],
1189 eth->h_source[3], eth->h_source[4], eth->h_source[5]);
1190 dev_kfree_skb(packet); 1171 dev_kfree_skb(packet);
1191 kfree(pp); 1172 kfree(pp);
1192 return -3; 1173 return -3;
@@ -1244,7 +1225,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
1244 pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */ 1225 pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
1245 } else { 1226 } else {
1246 if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) { 1227 if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
1247 MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */ 1228 MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
1248 (uint8_t *) michel_mic. 1229 (uint8_t *) michel_mic.
1249 Result); 1230 Result);
1250 memcpy(p, michel_mic.Result, 8); 1231 memcpy(p, michel_mic.Result, 8);
@@ -1294,10 +1275,11 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
1294 return result; 1275 return result;
1295} 1276}
1296 1277
1297#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \ 1278#define ps_confirm_wait_inc(priv) do { \
1298 atomic_inc(&priv->psstatus.confirm_wait); \ 1279 if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \
1299 /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \ 1280 atomic_inc(&priv->psstatus.confirm_wait); \
1300 } }while(0) 1281 /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
1282 } } while (0)
1301 1283
1302static 1284static
1303void hostif_mib_get_request(struct ks_wlan_private *priv, 1285void hostif_mib_get_request(struct ks_wlan_private *priv,
@@ -1891,6 +1873,7 @@ static
1891void hostif_sme_set_wep(struct ks_wlan_private *priv, int type) 1873void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
1892{ 1874{
1893 uint32_t val; 1875 uint32_t val;
1876
1894 switch (type) { 1877 switch (type) {
1895 case SME_WEP_INDEX_REQUEST: 1878 case SME_WEP_INDEX_REQUEST:
1896 val = cpu_to_le32((uint32_t) (priv->reg.wep_index)); 1879 val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
@@ -1936,18 +1919,17 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
1936 break; 1919 break;
1937 } 1920 }
1938 1921
1939 return;
1940} 1922}
1941 1923
1942struct wpa_suite_t { 1924struct wpa_suite_t {
1943 unsigned short size; 1925 unsigned short size;
1944 unsigned char suite[4][CIPHER_ID_LEN]; 1926 unsigned char suite[4][CIPHER_ID_LEN];
1945} __attribute__ ((packed)); 1927} __packed;
1946 1928
1947struct rsn_mode_t { 1929struct rsn_mode_t {
1948 uint32_t rsn_mode; 1930 uint32_t rsn_mode;
1949 uint16_t rsn_capability; 1931 uint16_t rsn_capability;
1950} __attribute__ ((packed)); 1932} __packed;
1951 1933
1952static 1934static
1953void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type) 1935void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
@@ -2125,7 +2107,6 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
2125 break; 2107 break;
2126 2108
2127 } 2109 }
2128 return;
2129} 2110}
2130 2111
2131static 2112static
@@ -2216,10 +2197,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
2216 } else { 2197 } else {
2217 hostif_infrastructure_set2_request(priv); 2198 hostif_infrastructure_set2_request(priv);
2218 DPRINTK(2, 2199 DPRINTK(2,
2219 "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", 2200 "Infra bssid = %pM\n", priv->reg.bssid);
2220 priv->reg.bssid[0], priv->reg.bssid[1],
2221 priv->reg.bssid[2], priv->reg.bssid[3],
2222 priv->reg.bssid[4], priv->reg.bssid[5]);
2223 } 2201 }
2224 break; 2202 break;
2225 case MODE_ADHOC: 2203 case MODE_ADHOC:
@@ -2229,17 +2207,13 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
2229 } else { 2207 } else {
2230 hostif_adhoc_set2_request(priv); 2208 hostif_adhoc_set2_request(priv);
2231 DPRINTK(2, 2209 DPRINTK(2,
2232 "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", 2210 "Adhoc bssid = %pM\n", priv->reg.bssid);
2233 priv->reg.bssid[0], priv->reg.bssid[1],
2234 priv->reg.bssid[2], priv->reg.bssid[3],
2235 priv->reg.bssid[4], priv->reg.bssid[5]);
2236 } 2211 }
2237 break; 2212 break;
2238 default: 2213 default:
2239 break; 2214 break;
2240 } 2215 }
2241 2216
2242 return;
2243} 2217}
2244 2218
2245static 2219static
@@ -2340,7 +2314,6 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
2340 } 2314 }
2341 hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs); 2315 hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
2342 2316
2343 return;
2344} 2317}
2345 2318
2346static 2319static
@@ -2358,13 +2331,13 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
2358 break; 2331 break;
2359 } 2332 }
2360 2333
2361 return;
2362} 2334}
2363 2335
2364static 2336static
2365void hostif_sme_set_key(struct ks_wlan_private *priv, int type) 2337void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
2366{ 2338{
2367 uint32_t val; 2339 uint32_t val;
2340
2368 switch (type) { 2341 switch (type) {
2369 case SME_SET_FLAG: 2342 case SME_SET_FLAG:
2370 val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked)); 2343 val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
@@ -2416,7 +2389,6 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
2416 &priv->wpa.key[2].rx_seq[0]); 2389 &priv->wpa.key[2].rx_seq[0]);
2417 break; 2390 break;
2418 } 2391 }
2419 return;
2420} 2392}
2421 2393
2422static 2394static
@@ -2427,16 +2399,14 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
2427 struct { 2399 struct {
2428 uint8_t bssid[ETH_ALEN]; 2400 uint8_t bssid[ETH_ALEN];
2429 uint8_t pmkid[IW_PMKID_LEN]; 2401 uint8_t pmkid[IW_PMKID_LEN];
2430 } __attribute__ ((packed)) list[PMK_LIST_MAX]; 2402 } __packed list[PMK_LIST_MAX];
2431 } __attribute__ ((packed)) pmkcache; 2403 } __packed pmkcache;
2432 struct pmk_t *pmk; 2404 struct pmk_t *pmk;
2433 struct list_head *ptr;
2434 int i; 2405 int i;
2435 2406
2436 DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size); 2407 DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
2437 i = 0; 2408 i = 0;
2438 list_for_each(ptr, &priv->pmklist.head) { 2409 list_for_each_entry(pmk, &priv->pmklist.head, list) {
2439 pmk = list_entry(ptr, struct pmk_t, list);
2440 if (i < PMK_LIST_MAX) { 2410 if (i < PMK_LIST_MAX) {
2441 memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN); 2411 memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
2442 memcpy(pmkcache.list[i].pmkid, pmk->pmkid, 2412 memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
@@ -2461,9 +2431,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
2461 DPRINTK(3, "event=%d\n", event); 2431 DPRINTK(3, "event=%d\n", event);
2462 switch (event) { 2432 switch (event) {
2463 case SME_START: 2433 case SME_START:
2464 if (priv->dev_state == DEVICE_STATE_BOOT) { 2434 if (priv->dev_state == DEVICE_STATE_BOOT)
2465 hostif_mib_get_request(priv, DOT11_MAC_ADDRESS); 2435 hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
2466 }
2467 break; 2436 break;
2468 case SME_MULTICAST_REQUEST: 2437 case SME_MULTICAST_REQUEST:
2469 hostif_sme_multicast_set(priv); 2438 hostif_sme_multicast_set(priv);
@@ -2508,14 +2477,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
2508 } 2477 }
2509 break; 2478 break;
2510 case SME_GET_MAC_ADDRESS: 2479 case SME_GET_MAC_ADDRESS:
2511 if (priv->dev_state == DEVICE_STATE_BOOT) { 2480 if (priv->dev_state == DEVICE_STATE_BOOT)
2512 hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION); 2481 hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
2513 }
2514 break; 2482 break;
2515 case SME_GET_PRODUCT_VERSION: 2483 case SME_GET_PRODUCT_VERSION:
2516 if (priv->dev_state == DEVICE_STATE_BOOT) { 2484 if (priv->dev_state == DEVICE_STATE_BOOT)
2517 priv->dev_state = DEVICE_STATE_PREINIT; 2485 priv->dev_state = DEVICE_STATE_PREINIT;
2518 }
2519 break; 2486 break;
2520 case SME_STOP_REQUEST: 2487 case SME_STOP_REQUEST:
2521 hostif_stop_request(priv); 2488 hostif_stop_request(priv);
@@ -2594,9 +2561,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
2594 /* for power save */ 2561 /* for power save */
2595 atomic_set(&priv->psstatus.snooze_guard, 0); 2562 atomic_set(&priv->psstatus.snooze_guard, 0);
2596 atomic_set(&priv->psstatus.confirm_wait, 0); 2563 atomic_set(&priv->psstatus.confirm_wait, 0);
2597 if (priv->dev_state == DEVICE_STATE_PREINIT) { 2564 if (priv->dev_state == DEVICE_STATE_PREINIT)
2598 priv->dev_state = DEVICE_STATE_INIT; 2565 priv->dev_state = DEVICE_STATE_INIT;
2599 }
2600 /* wake_up_interruptible_all(&priv->confirm_wait); */ 2566 /* wake_up_interruptible_all(&priv->confirm_wait); */
2601 complete(&priv->confirm_wait); 2567 complete(&priv->confirm_wait);
2602 break; 2568 break;
@@ -2652,7 +2618,6 @@ void hostif_sme_task(unsigned long dev)
2652 tasklet_schedule(&priv->sme_task); 2618 tasklet_schedule(&priv->sme_task);
2653 } 2619 }
2654 } 2620 }
2655 return;
2656} 2621}
2657 2622
2658/* send to Station Management Entity module */ 2623/* send to Station Management Entity module */
@@ -2672,7 +2637,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
2672 } else { 2637 } else {
2673 /* in case of buffer overflow */ 2638 /* in case of buffer overflow */
2674 //DPRINTK(2,"sme queue buffer overflow\n"); 2639 //DPRINTK(2,"sme queue buffer overflow\n");
2675 printk("sme queue buffer overflow\n"); 2640 netdev_err(priv->net_dev, "sme queue buffer overflow\n");
2676 } 2641 }
2677 2642
2678 tasklet_schedule(&priv->sme_task); 2643 tasklet_schedule(&priv->sme_task);
@@ -2736,5 +2701,4 @@ int hostif_init(struct ks_wlan_private *priv)
2736void hostif_exit(struct ks_wlan_private *priv) 2701void hostif_exit(struct ks_wlan_private *priv)
2737{ 2702{
2738 tasklet_kill(&priv->sme_task); 2703 tasklet_kill(&priv->sme_task);
2739 return;
2740} 2704}
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index c2cc288ae899..279e9b06fc4b 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -14,7 +14,6 @@
14 14
15#define WPS 15#define WPS
16 16
17#include <linux/version.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/module.h> 19#include <linux/module.h>
@@ -25,13 +24,13 @@
25#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */ 24#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
26#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
27#include <linux/wireless.h> 26#include <linux/wireless.h>
28#include <asm/atomic.h> /* struct atmic_t */ 27#include <linux/atomic.h> /* struct atomic_t */
29#include <linux/timer.h> /* struct timer_list */ 28#include <linux/timer.h> /* struct timer_list */
30#include <linux/string.h> 29#include <linux/string.h>
31#include <linux/completion.h> /* struct completion */ 30#include <linux/completion.h> /* struct completion */
32#include <linux/workqueue.h> 31#include <linux/workqueue.h>
33 32
34#include <asm/io.h> 33#include <linux/io.h>
35 34
36#include "ks7010_sdio.h" 35#include "ks7010_sdio.h"
37 36
@@ -43,36 +42,36 @@
43#endif 42#endif
44 43
45struct ks_wlan_parameter { 44struct ks_wlan_parameter {
46 uint8_t operation_mode; /* Operation Mode */ 45 u8 operation_mode; /* Operation Mode */
47 uint8_t channel; /* Channel */ 46 u8 channel; /* Channel */
48 uint8_t tx_rate; /* Transmit Rate */ 47 u8 tx_rate; /* Transmit Rate */
49 struct { 48 struct {
50 uint8_t size; 49 u8 size;
51 uint8_t body[16]; 50 u8 body[16];
52 } rate_set; 51 } rate_set;
53 uint8_t bssid[ETH_ALEN]; /* BSSID */ 52 u8 bssid[ETH_ALEN]; /* BSSID */
54 struct { 53 struct {
55 uint8_t size; 54 u8 size;
56 uint8_t body[32 + 1]; 55 u8 body[32 + 1];
57 } ssid; /* SSID */ 56 } ssid; /* SSID */
58 uint8_t preamble; /* Preamble */ 57 u8 preamble; /* Preamble */
59 uint8_t powermgt; /* PowerManagementMode */ 58 u8 powermgt; /* PowerManagementMode */
60 uint32_t scan_type; /* AP List Scan Type */ 59 u32 scan_type; /* AP List Scan Type */
61#define BEACON_LOST_COUNT_MIN 0 60#define BEACON_LOST_COUNT_MIN 0
62#define BEACON_LOST_COUNT_MAX 65535 61#define BEACON_LOST_COUNT_MAX 65535
63 uint32_t beacon_lost_count; /* Beacon Lost Count */ 62 u32 beacon_lost_count; /* Beacon Lost Count */
64 uint32_t rts; /* RTS Threashold */ 63 u32 rts; /* RTS Threashold */
65 uint32_t fragment; /* Fragmentation Threashold */ 64 u32 fragment; /* Fragmentation Threashold */
66 uint32_t privacy_invoked; 65 u32 privacy_invoked;
67 uint32_t wep_index; 66 u32 wep_index;
68 struct { 67 struct {
69 uint8_t size; 68 u8 size;
70 uint8_t val[13 * 2 + 1]; 69 u8 val[13 * 2 + 1];
71 } wep_key[4]; 70 } wep_key[4];
72 uint16_t authenticate_type; 71 u16 authenticate_type;
73 uint16_t phy_type; /* 11b/11g/11bg mode type */ 72 u16 phy_type; /* 11b/11g/11bg mode type */
74 uint16_t cts_mode; /* for 11g/11bg mode cts mode */ 73 u16 cts_mode; /* for 11g/11bg mode cts mode */
75 uint16_t phy_info_timer; /* phy information timer */ 74 u16 phy_info_timer; /* phy information timer */
76}; 75};
77 76
78enum { 77enum {
@@ -216,37 +215,37 @@ struct hostt_t {
216 215
217#define RSN_IE_BODY_MAX 64 216#define RSN_IE_BODY_MAX 64
218struct rsn_ie_t { 217struct rsn_ie_t {
219 uint8_t id; /* 0xdd = WPA or 0x30 = RSN */ 218 u8 id; /* 0xdd = WPA or 0x30 = RSN */
220 uint8_t size; /* max ? 255 ? */ 219 u8 size; /* max ? 255 ? */
221 uint8_t body[RSN_IE_BODY_MAX]; 220 u8 body[RSN_IE_BODY_MAX];
222} __packed; 221} __packed;
223 222
224#ifdef WPS 223#ifdef WPS
225#define WPS_IE_BODY_MAX 255 224#define WPS_IE_BODY_MAX 255
226struct wps_ie_t { 225struct wps_ie_t {
227 uint8_t id; /* 221 'dd <len> 00 50 F2 04' */ 226 u8 id; /* 221 'dd <len> 00 50 F2 04' */
228 uint8_t size; /* max ? 255 ? */ 227 u8 size; /* max ? 255 ? */
229 uint8_t body[WPS_IE_BODY_MAX]; 228 u8 body[WPS_IE_BODY_MAX];
230} __packed; 229} __packed;
231#endif /* WPS */ 230#endif /* WPS */
232 231
233struct local_ap_t { 232struct local_ap_t {
234 uint8_t bssid[6]; 233 u8 bssid[6];
235 uint8_t rssi; 234 u8 rssi;
236 uint8_t sq; 235 u8 sq;
237 struct { 236 struct {
238 uint8_t size; 237 u8 size;
239 uint8_t body[32]; 238 u8 body[32];
240 uint8_t ssid_pad; 239 u8 ssid_pad;
241 } ssid; 240 } ssid;
242 struct { 241 struct {
243 uint8_t size; 242 u8 size;
244 uint8_t body[16]; 243 u8 body[16];
245 uint8_t rate_pad; 244 u8 rate_pad;
246 } rate_set; 245 } rate_set;
247 uint16_t capability; 246 u16 capability;
248 uint8_t channel; 247 u8 channel;
249 uint8_t noise; 248 u8 noise;
250 struct rsn_ie_t wpa_ie; 249 struct rsn_ie_t wpa_ie;
251 struct rsn_ie_t rsn_ie; 250 struct rsn_ie_t rsn_ie;
252#ifdef WPS 251#ifdef WPS
@@ -262,15 +261,15 @@ struct local_aplist_t {
262}; 261};
263 262
264struct local_gain_t { 263struct local_gain_t {
265 uint8_t TxMode; 264 u8 TxMode;
266 uint8_t RxMode; 265 u8 RxMode;
267 uint8_t TxGain; 266 u8 TxGain;
268 uint8_t RxGain; 267 u8 RxGain;
269}; 268};
270 269
271struct local_eeprom_sum_t { 270struct local_eeprom_sum_t {
272 uint8_t type; 271 u8 type;
273 uint8_t result; 272 u8 result;
274}; 273};
275 274
276enum { 275enum {
@@ -352,25 +351,25 @@ enum {
352#define MIC_KEY_SIZE 8 351#define MIC_KEY_SIZE 8
353 352
354struct wpa_key_t { 353struct wpa_key_t {
355 uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */ 354 u32 ext_flags; /* IW_ENCODE_EXT_xxx */
356 uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ 355 u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
357 uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ 356 u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
358 struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast 357 struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
359 * (group) keys or unicast address for 358 * (group) keys or unicast address for
360 * individual keys */ 359 * individual keys */
361 uint16_t alg; 360 u16 alg;
362 uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */ 361 u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
363 uint8_t key_val[IW_ENCODING_TOKEN_MAX]; 362 u8 key_val[IW_ENCODING_TOKEN_MAX];
364 uint8_t tx_mic_key[MIC_KEY_SIZE]; 363 u8 tx_mic_key[MIC_KEY_SIZE];
365 uint8_t rx_mic_key[MIC_KEY_SIZE]; 364 u8 rx_mic_key[MIC_KEY_SIZE];
366}; 365};
367#define WPA_KEY_INDEX_MAX 4 366#define WPA_KEY_INDEX_MAX 4
368#define WPA_RX_SEQ_LEN 6 367#define WPA_RX_SEQ_LEN 6
369 368
370struct mic_failure_t { 369struct mic_failure_t {
371 uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */ 370 u16 failure; /* MIC Failure counter 0 or 1 or 2 */
372 uint16_t counter; /* 1sec counter 0-60 */ 371 u16 counter; /* 1sec counter 0-60 */
373 uint32_t last_failure_time; 372 u32 last_failure_time;
374 int stop; /* stop flag */ 373 int stop; /* stop flag */
375}; 374};
376 375
@@ -391,12 +390,12 @@ struct wpa_status_t {
391#include <linux/list.h> 390#include <linux/list.h>
392#define PMK_LIST_MAX 8 391#define PMK_LIST_MAX 8
393struct pmk_list_t { 392struct pmk_list_t {
394 uint16_t size; 393 u16 size;
395 struct list_head head; 394 struct list_head head;
396 struct pmk_t { 395 struct pmk_t {
397 struct list_head list; 396 struct list_head list;
398 uint8_t bssid[ETH_ALEN]; 397 u8 bssid[ETH_ALEN];
399 uint8_t pmkid[IW_PMKID_LEN]; 398 u8 pmkid[IW_PMKID_LEN];
400 } pmk[PMK_LIST_MAX]; 399 } pmk[PMK_LIST_MAX];
401}; 400};
402 401
@@ -404,7 +403,7 @@ struct pmk_list_t {
404struct wps_status_t { 403struct wps_status_t {
405 int wps_enabled; 404 int wps_enabled;
406 int ielen; 405 int ielen;
407 uint8_t ie[255]; 406 u8 ie[255];
408}; 407};
409#endif /* WPS */ 408#endif /* WPS */
410 409
@@ -439,7 +438,7 @@ struct ks_wlan_private {
439 struct pmk_list_t pmklist; 438 struct pmk_list_t pmklist;
440 /* wireless parameter */ 439 /* wireless parameter */
441 struct ks_wlan_parameter reg; 440 struct ks_wlan_parameter reg;
442 uint8_t current_rate; 441 u8 current_rate;
443 442
444 char nick[IW_ESSID_MAX_SIZE + 1]; 443 char nick[IW_ESSID_MAX_SIZE + 1];
445 444
@@ -472,24 +471,24 @@ struct ks_wlan_private {
472 /* spinlock_t lock; */ 471 /* spinlock_t lock; */
473#define FORCE_DISCONNECT 0x80000000 472#define FORCE_DISCONNECT 0x80000000
474#define CONNECT_STATUS_MASK 0x7FFFFFFF 473#define CONNECT_STATUS_MASK 0x7FFFFFFF
475 uint32_t connect_status; /* connect status */ 474 u32 connect_status; /* connect status */
476 int infra_status; /* Infractructure status */ 475 int infra_status; /* Infractructure status */
477 476
478 uint8_t data_buff[0x1000]; 477 u8 data_buff[0x1000];
479 478
480 uint8_t scan_ssid_len; 479 u8 scan_ssid_len;
481 uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1]; 480 u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
482 struct local_gain_t gain; 481 struct local_gain_t gain;
483#ifdef WPS 482#ifdef WPS
484 struct net_device *l2_dev; 483 struct net_device *l2_dev;
485 int l2_fd; 484 int l2_fd;
486 struct wps_status_t wps; 485 struct wps_status_t wps;
487#endif /* WPS */ 486#endif /* WPS */
488 uint8_t sleep_mode; 487 u8 sleep_mode;
489 488
490 uint8_t region; 489 u8 region;
491 struct local_eeprom_sum_t eeprom_sum; 490 struct local_eeprom_sum_t eeprom_sum;
492 uint8_t eeprom_checksum; 491 u8 eeprom_checksum;
493 492
494 struct hostt_t hostt; 493 struct hostt_t hostt;
495 494
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index b2b4fa4c3834..e5d04adaeb1a 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -24,9 +24,9 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/ctype.h> 25#include <linux/ctype.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <asm/uaccess.h> 29#include <linux/uaccess.h>
30 30
31static int wep_on_off; 31static int wep_on_off;
32#define WEP_OFF 0 32#define WEP_OFF 0
@@ -50,10 +50,10 @@ static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
50/* A few details needed for WEP (Wireless Equivalent Privacy) */ 50/* A few details needed for WEP (Wireless Equivalent Privacy) */
51#define MAX_KEY_SIZE 13 /* 128 (?) bits */ 51#define MAX_KEY_SIZE 13 /* 128 (?) bits */
52#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */ 52#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
53typedef struct wep_key_t { 53struct wep_key {
54 u16 len; 54 u16 len;
55 u8 key[16]; /* 40-bit and 104-bit keys */ 55 u8 key[16]; /* 40-bit and 104-bit keys */
56} wep_key_t; 56};
57 57
58/* Backward compatibility */ 58/* Backward compatibility */
59#ifndef IW_ENCODE_NOKEY 59#ifndef IW_ENCODE_NOKEY
@@ -88,9 +88,9 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
88 88
89 DPRINTK(4, "in_interrupt = %ld\n", in_interrupt()); 89 DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
90 90
91 if (priv->dev_state < DEVICE_STATE_READY) { 91 if (priv->dev_state < DEVICE_STATE_READY)
92 return -1; /* not finished initialize */ 92 return -1; /* not finished initialize */
93 } 93
94 if (atomic_read(&update_phyinfo)) 94 if (atomic_read(&update_phyinfo))
95 return 1; 95 return 1;
96 96
@@ -182,19 +182,18 @@ static int ks_wlan_get_name(struct net_device *dev,
182 struct ks_wlan_private *priv = 182 struct ks_wlan_private *priv =
183 (struct ks_wlan_private *)netdev_priv(dev); 183 (struct ks_wlan_private *)netdev_priv(dev);
184 184
185 if (priv->sleep_mode == SLP_SLEEP) { 185 if (priv->sleep_mode == SLP_SLEEP)
186 return -EPERM; 186 return -EPERM;
187 } 187
188 /* for SLEEP MODE */ 188 /* for SLEEP MODE */
189 if (priv->dev_state < DEVICE_STATE_READY) { 189 if (priv->dev_state < DEVICE_STATE_READY)
190 strcpy(cwrq, "NOT READY!"); 190 strcpy(cwrq, "NOT READY!");
191 } else if (priv->reg.phy_type == D_11B_ONLY_MODE) { 191 else if (priv->reg.phy_type == D_11B_ONLY_MODE)
192 strcpy(cwrq, "IEEE 802.11b"); 192 strcpy(cwrq, "IEEE 802.11b");
193 } else if (priv->reg.phy_type == D_11G_ONLY_MODE) { 193 else if (priv->reg.phy_type == D_11G_ONLY_MODE)
194 strcpy(cwrq, "IEEE 802.11g"); 194 strcpy(cwrq, "IEEE 802.11g");
195 } else { 195 else
196 strcpy(cwrq, "IEEE 802.11b/g"); 196 strcpy(cwrq, "IEEE 802.11b/g");
197 }
198 197
199 return 0; 198 return 0;
200} 199}
@@ -209,9 +208,8 @@ static int ks_wlan_set_freq(struct net_device *dev,
209 (struct ks_wlan_private *)netdev_priv(dev); 208 (struct ks_wlan_private *)netdev_priv(dev);
210 int rc = -EINPROGRESS; /* Call commit handler */ 209 int rc = -EINPROGRESS; /* Call commit handler */
211 210
212 if (priv->sleep_mode == SLP_SLEEP) { 211 if (priv->sleep_mode == SLP_SLEEP)
213 return -EPERM; 212 return -EPERM;
214 }
215 213
216 /* for SLEEP MODE */ 214 /* for SLEEP MODE */
217 /* If setting by frequency, convert to a channel */ 215 /* If setting by frequency, convert to a channel */
@@ -219,6 +217,7 @@ static int ks_wlan_set_freq(struct net_device *dev,
219 (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) { 217 (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
220 int f = fwrq->m / 100000; 218 int f = fwrq->m / 100000;
221 int c = 0; 219 int c = 0;
220
222 while ((c < 14) && (f != frequency_list[c])) 221 while ((c < 14) && (f != frequency_list[c]))
223 c++; 222 c++;
224 /* Hack to fall through... */ 223 /* Hack to fall through... */
@@ -257,13 +256,13 @@ static int ks_wlan_get_freq(struct net_device *dev,
257 (struct ks_wlan_private *)netdev_priv(dev); 256 (struct ks_wlan_private *)netdev_priv(dev);
258 int f; 257 int f;
259 258
260 if (priv->sleep_mode == SLP_SLEEP) { 259 if (priv->sleep_mode == SLP_SLEEP)
261 return -EPERM; 260 return -EPERM;
262 } 261
263 /* for SLEEP MODE */ 262 /* for SLEEP MODE */
264 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { 263 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
265 f = (int)priv->current_ap.channel; 264 f = (int)priv->current_ap.channel;
266 } else 265 else
267 f = (int)priv->reg.channel; 266 f = (int)priv->reg.channel;
268 fwrq->m = frequency_list[f - 1] * 100000; 267 fwrq->m = frequency_list[f - 1] * 100000;
269 fwrq->e = 1; 268 fwrq->e = 1;
@@ -283,9 +282,9 @@ static int ks_wlan_set_essid(struct net_device *dev,
283 282
284 DPRINTK(2, " %d\n", dwrq->flags); 283 DPRINTK(2, " %d\n", dwrq->flags);
285 284
286 if (priv->sleep_mode == SLP_SLEEP) { 285 if (priv->sleep_mode == SLP_SLEEP)
287 return -EPERM; 286 return -EPERM;
288 } 287
289 288
290 /* for SLEEP MODE */ 289 /* for SLEEP MODE */
291 /* Check if we asked for `any' */ 290 /* Check if we asked for `any' */
@@ -301,14 +300,14 @@ static int ks_wlan_set_essid(struct net_device *dev,
301 len--; 300 len--;
302 301
303 /* Check the size of the string */ 302 /* Check the size of the string */
304 if (len > IW_ESSID_MAX_SIZE) { 303 if (len > IW_ESSID_MAX_SIZE)
305 return -EINVAL; 304 return -EINVAL;
306 } 305
307#else 306#else
308 /* Check the size of the string */ 307 /* Check the size of the string */
309 if (dwrq->length > IW_ESSID_MAX_SIZE + 1) { 308 if (dwrq->length > IW_ESSID_MAX_SIZE + 1)
310 return -E2BIG; 309 return -E2BIG;
311 } 310
312#endif 311#endif
313 312
314 /* Set the SSID */ 313 /* Set the SSID */
@@ -340,9 +339,9 @@ static int ks_wlan_get_essid(struct net_device *dev,
340 struct ks_wlan_private *priv = 339 struct ks_wlan_private *priv =
341 (struct ks_wlan_private *)netdev_priv(dev); 340 (struct ks_wlan_private *)netdev_priv(dev);
342 341
343 if (priv->sleep_mode == SLP_SLEEP) { 342 if (priv->sleep_mode == SLP_SLEEP)
344 return -EPERM; 343 return -EPERM;
345 } 344
346 345
347 /* for SLEEP MODE */ 346 /* for SLEEP MODE */
348 /* Note : if dwrq->flags != 0, we should 347 /* Note : if dwrq->flags != 0, we should
@@ -385,25 +384,23 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
385 384
386 DPRINTK(2, "\n"); 385 DPRINTK(2, "\n");
387 386
388 if (priv->sleep_mode == SLP_SLEEP) { 387 if (priv->sleep_mode == SLP_SLEEP)
389 return -EPERM; 388 return -EPERM;
390 } 389
391 /* for SLEEP MODE */ 390 /* for SLEEP MODE */
392 if (priv->reg.operation_mode == MODE_ADHOC || 391 if (priv->reg.operation_mode == MODE_ADHOC ||
393 priv->reg.operation_mode == MODE_INFRASTRUCTURE) { 392 priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
394 memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN); 393 memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN);
395 394
396 if (is_valid_ether_addr((u8 *) priv->reg.bssid)) { 395 if (is_valid_ether_addr((u8 *)priv->reg.bssid))
397 priv->need_commit |= SME_MODE_SET; 396 priv->need_commit |= SME_MODE_SET;
398 } 397
399 } else { 398 } else {
400 eth_zero_addr(priv->reg.bssid); 399 eth_zero_addr(priv->reg.bssid);
401 return -EOPNOTSUPP; 400 return -EOPNOTSUPP;
402 } 401 }
403 402
404 DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", 403 DPRINTK(2, "bssid = %pM\n", priv->reg.bssid);
405 priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
406 priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
407 404
408 /* Write it to the card */ 405 /* Write it to the card */
409 if (priv->need_commit) { 406 if (priv->need_commit) {
@@ -421,15 +418,14 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
421 struct ks_wlan_private *priv = 418 struct ks_wlan_private *priv =
422 (struct ks_wlan_private *)netdev_priv(dev); 419 (struct ks_wlan_private *)netdev_priv(dev);
423 420
424 if (priv->sleep_mode == SLP_SLEEP) { 421 if (priv->sleep_mode == SLP_SLEEP)
425 return -EPERM; 422 return -EPERM;
426 } 423
427 /* for SLEEP MODE */ 424 /* for SLEEP MODE */
428 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { 425 if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
429 memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN); 426 memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
430 } else { 427 else
431 eth_zero_addr(awrq->sa_data); 428 eth_zero_addr(awrq->sa_data);
432 }
433 429
434 awrq->sa_family = ARPHRD_ETHER; 430 awrq->sa_family = ARPHRD_ETHER;
435 431
@@ -445,15 +441,14 @@ static int ks_wlan_set_nick(struct net_device *dev,
445 struct ks_wlan_private *priv = 441 struct ks_wlan_private *priv =
446 (struct ks_wlan_private *)netdev_priv(dev); 442 (struct ks_wlan_private *)netdev_priv(dev);
447 443
448 if (priv->sleep_mode == SLP_SLEEP) { 444 if (priv->sleep_mode == SLP_SLEEP)
449 return -EPERM; 445 return -EPERM;
450 }
451 446
452 /* for SLEEP MODE */ 447 /* for SLEEP MODE */
453 /* Check the size of the string */ 448 /* Check the size of the string */
454 if (dwrq->length > 16 + 1) { 449 if (dwrq->length > 16 + 1)
455 return -E2BIG; 450 return -E2BIG;
456 } 451
457 memset(priv->nick, 0, sizeof(priv->nick)); 452 memset(priv->nick, 0, sizeof(priv->nick));
458 memcpy(priv->nick, extra, dwrq->length); 453 memcpy(priv->nick, extra, dwrq->length);
459 454
@@ -469,9 +464,9 @@ static int ks_wlan_get_nick(struct net_device *dev,
469 struct ks_wlan_private *priv = 464 struct ks_wlan_private *priv =
470 (struct ks_wlan_private *)netdev_priv(dev); 465 (struct ks_wlan_private *)netdev_priv(dev);
471 466
472 if (priv->sleep_mode == SLP_SLEEP) { 467 if (priv->sleep_mode == SLP_SLEEP)
473 return -EPERM; 468 return -EPERM;
474 } 469
475 /* for SLEEP MODE */ 470 /* for SLEEP MODE */
476 strncpy(extra, priv->nick, 16); 471 strncpy(extra, priv->nick, 16);
477 extra[16] = '\0'; 472 extra[16] = '\0';
@@ -490,9 +485,9 @@ static int ks_wlan_set_rate(struct net_device *dev,
490 (struct ks_wlan_private *)netdev_priv(dev); 485 (struct ks_wlan_private *)netdev_priv(dev);
491 int i = 0; 486 int i = 0;
492 487
493 if (priv->sleep_mode == SLP_SLEEP) { 488 if (priv->sleep_mode == SLP_SLEEP)
494 return -EPERM; 489 return -EPERM;
495 } 490
496 /* for SLEEP MODE */ 491 /* for SLEEP MODE */
497 if (priv->reg.phy_type == D_11B_ONLY_MODE) { 492 if (priv->reg.phy_type == D_11B_ONLY_MODE) {
498 if (vwrq->fixed == 1) { 493 if (vwrq->fixed == 1) {
@@ -727,13 +722,13 @@ static int ks_wlan_get_rate(struct net_device *dev,
727 DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n", 722 DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
728 in_interrupt(), atomic_read(&update_phyinfo)); 723 in_interrupt(), atomic_read(&update_phyinfo));
729 724
730 if (priv->sleep_mode == SLP_SLEEP) { 725 if (priv->sleep_mode == SLP_SLEEP)
731 return -EPERM; 726 return -EPERM;
732 } 727
733 /* for SLEEP MODE */ 728 /* for SLEEP MODE */
734 if (!atomic_read(&update_phyinfo)) { 729 if (!atomic_read(&update_phyinfo))
735 ks_wlan_update_phy_information(priv); 730 ks_wlan_update_phy_information(priv);
736 } 731
737 vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000; 732 vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
738 if (priv->reg.tx_rate == TX_RATE_FIXED) 733 if (priv->reg.tx_rate == TX_RATE_FIXED)
739 vwrq->fixed = 1; 734 vwrq->fixed = 1;
@@ -752,15 +747,15 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
752 (struct ks_wlan_private *)netdev_priv(dev); 747 (struct ks_wlan_private *)netdev_priv(dev);
753 int rthr = vwrq->value; 748 int rthr = vwrq->value;
754 749
755 if (priv->sleep_mode == SLP_SLEEP) { 750 if (priv->sleep_mode == SLP_SLEEP)
756 return -EPERM; 751 return -EPERM;
757 } 752
758 /* for SLEEP MODE */ 753 /* for SLEEP MODE */
759 if (vwrq->disabled) 754 if (vwrq->disabled)
760 rthr = 2347; 755 rthr = 2347;
761 if ((rthr < 0) || (rthr > 2347)) { 756 if ((rthr < 0) || (rthr > 2347))
762 return -EINVAL; 757 return -EINVAL;
763 } 758
764 priv->reg.rts = rthr; 759 priv->reg.rts = rthr;
765 priv->need_commit |= SME_RTS; 760 priv->need_commit |= SME_RTS;
766 761
@@ -775,9 +770,9 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
775 struct ks_wlan_private *priv = 770 struct ks_wlan_private *priv =
776 (struct ks_wlan_private *)netdev_priv(dev); 771 (struct ks_wlan_private *)netdev_priv(dev);
777 772
778 if (priv->sleep_mode == SLP_SLEEP) { 773 if (priv->sleep_mode == SLP_SLEEP)
779 return -EPERM; 774 return -EPERM;
780 } 775
781 /* for SLEEP MODE */ 776 /* for SLEEP MODE */
782 vwrq->value = priv->reg.rts; 777 vwrq->value = priv->reg.rts;
783 vwrq->disabled = (vwrq->value >= 2347); 778 vwrq->disabled = (vwrq->value >= 2347);
@@ -796,15 +791,15 @@ static int ks_wlan_set_frag(struct net_device *dev,
796 (struct ks_wlan_private *)netdev_priv(dev); 791 (struct ks_wlan_private *)netdev_priv(dev);
797 int fthr = vwrq->value; 792 int fthr = vwrq->value;
798 793
799 if (priv->sleep_mode == SLP_SLEEP) { 794 if (priv->sleep_mode == SLP_SLEEP)
800 return -EPERM; 795 return -EPERM;
801 } 796
802 /* for SLEEP MODE */ 797 /* for SLEEP MODE */
803 if (vwrq->disabled) 798 if (vwrq->disabled)
804 fthr = 2346; 799 fthr = 2346;
805 if ((fthr < 256) || (fthr > 2346)) { 800 if ((fthr < 256) || (fthr > 2346))
806 return -EINVAL; 801 return -EINVAL;
807 } 802
808 fthr &= ~0x1; /* Get an even value - is it really needed ??? */ 803 fthr &= ~0x1; /* Get an even value - is it really needed ??? */
809 priv->reg.fragment = fthr; 804 priv->reg.fragment = fthr;
810 priv->need_commit |= SME_FRAG; 805 priv->need_commit |= SME_FRAG;
@@ -821,9 +816,9 @@ static int ks_wlan_get_frag(struct net_device *dev,
821 struct ks_wlan_private *priv = 816 struct ks_wlan_private *priv =
822 (struct ks_wlan_private *)netdev_priv(dev); 817 (struct ks_wlan_private *)netdev_priv(dev);
823 818
824 if (priv->sleep_mode == SLP_SLEEP) { 819 if (priv->sleep_mode == SLP_SLEEP)
825 return -EPERM; 820 return -EPERM;
826 } 821
827 /* for SLEEP MODE */ 822 /* for SLEEP MODE */
828 vwrq->value = priv->reg.fragment; 823 vwrq->value = priv->reg.fragment;
829 vwrq->disabled = (vwrq->value >= 2346); 824 vwrq->disabled = (vwrq->value >= 2346);
@@ -835,7 +830,7 @@ static int ks_wlan_get_frag(struct net_device *dev,
835/*------------------------------------------------------------------*/ 830/*------------------------------------------------------------------*/
836/* Wireless Handler : set Mode of Operation */ 831/* Wireless Handler : set Mode of Operation */
837static int ks_wlan_set_mode(struct net_device *dev, 832static int ks_wlan_set_mode(struct net_device *dev,
838 struct iw_request_info *info, __u32 * uwrq, 833 struct iw_request_info *info, __u32 *uwrq,
839 char *extra) 834 char *extra)
840{ 835{
841 struct ks_wlan_private *priv = 836 struct ks_wlan_private *priv =
@@ -843,9 +838,9 @@ static int ks_wlan_set_mode(struct net_device *dev,
843 838
844 DPRINTK(2, "mode=%d\n", *uwrq); 839 DPRINTK(2, "mode=%d\n", *uwrq);
845 840
846 if (priv->sleep_mode == SLP_SLEEP) { 841 if (priv->sleep_mode == SLP_SLEEP)
847 return -EPERM; 842 return -EPERM;
848 } 843
849 /* for SLEEP MODE */ 844 /* for SLEEP MODE */
850 switch (*uwrq) { 845 switch (*uwrq) {
851 case IW_MODE_ADHOC: 846 case IW_MODE_ADHOC:
@@ -871,15 +866,14 @@ static int ks_wlan_set_mode(struct net_device *dev,
871/*------------------------------------------------------------------*/ 866/*------------------------------------------------------------------*/
872/* Wireless Handler : get Mode of Operation */ 867/* Wireless Handler : get Mode of Operation */
873static int ks_wlan_get_mode(struct net_device *dev, 868static int ks_wlan_get_mode(struct net_device *dev,
874 struct iw_request_info *info, __u32 * uwrq, 869 struct iw_request_info *info, __u32 *uwrq,
875 char *extra) 870 char *extra)
876{ 871{
877 struct ks_wlan_private *priv = 872 struct ks_wlan_private *priv =
878 (struct ks_wlan_private *)netdev_priv(dev); 873 (struct ks_wlan_private *)netdev_priv(dev);
879 874
880 if (priv->sleep_mode == SLP_SLEEP) { 875 if (priv->sleep_mode == SLP_SLEEP)
881 return -EPERM; 876 return -EPERM;
882 }
883 877
884 /* for SLEEP MODE */ 878 /* for SLEEP MODE */
885 /* If not managed, assume it's ad-hoc */ 879 /* If not managed, assume it's ad-hoc */
@@ -906,16 +900,15 @@ static int ks_wlan_set_encode(struct net_device *dev,
906 struct ks_wlan_private *priv = 900 struct ks_wlan_private *priv =
907 (struct ks_wlan_private *)netdev_priv(dev); 901 (struct ks_wlan_private *)netdev_priv(dev);
908 902
909 wep_key_t key; 903 struct wep_key key;
910 int index = (dwrq->flags & IW_ENCODE_INDEX); 904 int index = (dwrq->flags & IW_ENCODE_INDEX);
911 int current_index = priv->reg.wep_index; 905 int current_index = priv->reg.wep_index;
912 int i; 906 int i;
913 907
914 DPRINTK(2, "flags=%04X\n", dwrq->flags); 908 DPRINTK(2, "flags=%04X\n", dwrq->flags);
915 909
916 if (priv->sleep_mode == SLP_SLEEP) { 910 if (priv->sleep_mode == SLP_SLEEP)
917 return -EPERM; 911 return -EPERM;
918 }
919 912
920 /* for SLEEP MODE */ 913 /* for SLEEP MODE */
921 /* index check */ 914 /* index check */
@@ -959,9 +952,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
959 } 952 }
960 /* Send the key to the card */ 953 /* Send the key to the card */
961 priv->reg.wep_key[index].size = key.len; 954 priv->reg.wep_key[index].size = key.len;
962 for (i = 0; i < (priv->reg.wep_key[index].size); i++) { 955 for (i = 0; i < (priv->reg.wep_key[index].size); i++)
963 priv->reg.wep_key[index].val[i] = key.key[i]; 956 priv->reg.wep_key[index].val[i] = key.key[i];
964 } 957
965 priv->need_commit |= (SME_WEP_VAL1 << index); 958 priv->need_commit |= (SME_WEP_VAL1 << index);
966 priv->reg.wep_index = index; 959 priv->reg.wep_index = index;
967 priv->need_commit |= SME_WEP_INDEX; 960 priv->need_commit |= SME_WEP_INDEX;
@@ -973,9 +966,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
973 priv->reg.wep_key[2].size = 0; 966 priv->reg.wep_key[2].size = 0;
974 priv->reg.wep_key[3].size = 0; 967 priv->reg.wep_key[3].size = 0;
975 priv->reg.privacy_invoked = 0x00; 968 priv->reg.privacy_invoked = 0x00;
976 if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) { 969 if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
977 priv->need_commit |= SME_MODE_SET; 970 priv->need_commit |= SME_MODE_SET;
978 } 971
979 priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; 972 priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
980 wep_on_off = WEP_OFF; 973 wep_on_off = WEP_OFF;
981 priv->need_commit |= SME_WEP_FLAG; 974 priv->need_commit |= SME_WEP_FLAG;
@@ -997,14 +990,14 @@ static int ks_wlan_set_encode(struct net_device *dev,
997 priv->need_commit |= SME_WEP_FLAG; 990 priv->need_commit |= SME_WEP_FLAG;
998 991
999 if (dwrq->flags & IW_ENCODE_OPEN) { 992 if (dwrq->flags & IW_ENCODE_OPEN) {
1000 if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) { 993 if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
1001 priv->need_commit |= SME_MODE_SET; 994 priv->need_commit |= SME_MODE_SET;
1002 } 995
1003 priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; 996 priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
1004 } else if (dwrq->flags & IW_ENCODE_RESTRICTED) { 997 } else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1005 if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) { 998 if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
1006 priv->need_commit |= SME_MODE_SET; 999 priv->need_commit |= SME_MODE_SET;
1007 } 1000
1008 priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY; 1001 priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
1009 } 1002 }
1010// return -EINPROGRESS; /* Call commit handler */ 1003// return -EINPROGRESS; /* Call commit handler */
@@ -1026,9 +1019,9 @@ static int ks_wlan_get_encode(struct net_device *dev,
1026 char zeros[16]; 1019 char zeros[16];
1027 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1020 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1028 1021
1029 if (priv->sleep_mode == SLP_SLEEP) { 1022 if (priv->sleep_mode == SLP_SLEEP)
1030 return -EPERM; 1023 return -EPERM;
1031 } 1024
1032 /* for SLEEP MODE */ 1025 /* for SLEEP MODE */
1033 dwrq->flags = IW_ENCODE_DISABLED; 1026 dwrq->flags = IW_ENCODE_DISABLED;
1034 1027
@@ -1056,9 +1049,8 @@ static int ks_wlan_get_encode(struct net_device *dev,
1056 /* Copy the key to the user buffer */ 1049 /* Copy the key to the user buffer */
1057 if ((index >= 0) && (index < 4)) 1050 if ((index >= 0) && (index < 4))
1058 dwrq->length = priv->reg.wep_key[index].size; 1051 dwrq->length = priv->reg.wep_key[index].size;
1059 if (dwrq->length > 16) { 1052 if (dwrq->length > 16)
1060 dwrq->length = 0; 1053 dwrq->length = 0;
1061 }
1062#if 1 /* IW_ENCODE_NOKEY; */ 1054#if 1 /* IW_ENCODE_NOKEY; */
1063 if (dwrq->length) { 1055 if (dwrq->length) {
1064 if ((index >= 0) && (index < 4)) 1056 if ((index >= 0) && (index < 4))
@@ -1086,9 +1078,8 @@ static int ks_wlan_get_txpow(struct net_device *dev,
1086 struct iw_request_info *info, 1078 struct iw_request_info *info,
1087 struct iw_param *vwrq, char *extra) 1079 struct iw_param *vwrq, char *extra)
1088{ 1080{
1089 if (priv->sleep_mode == SLP_SLEEP) { 1081 if (priv->sleep_mode == SLP_SLEEP)
1090 return -EPERM; 1082 return -EPERM;
1091 }
1092 1083
1093 /* for SLEEP MODE */ 1084 /* for SLEEP MODE */
1094 /* Not Support */ 1085 /* Not Support */
@@ -1113,9 +1104,8 @@ static int ks_wlan_get_retry(struct net_device *dev,
1113 struct iw_request_info *info, 1104 struct iw_request_info *info,
1114 struct iw_param *vwrq, char *extra) 1105 struct iw_param *vwrq, char *extra)
1115{ 1106{
1116 if (priv->sleep_mode == SLP_SLEEP) { 1107 if (priv->sleep_mode == SLP_SLEEP)
1117 return -EPERM; 1108 return -EPERM;
1118 }
1119 1109
1120 /* for SLEEP MODE */ 1110 /* for SLEEP MODE */
1121 /* Not Support */ 1111 /* Not Support */
@@ -1139,9 +1129,9 @@ static int ks_wlan_get_range(struct net_device *dev,
1139 1129
1140 DPRINTK(2, "\n"); 1130 DPRINTK(2, "\n");
1141 1131
1142 if (priv->sleep_mode == SLP_SLEEP) { 1132 if (priv->sleep_mode == SLP_SLEEP)
1143 return -EPERM; 1133 return -EPERM;
1144 } 1134
1145 /* for SLEEP MODE */ 1135 /* for SLEEP MODE */
1146 dwrq->length = sizeof(struct iw_range); 1136 dwrq->length = sizeof(struct iw_range);
1147 memset(range, 0, sizeof(*range)); 1137 memset(range, 0, sizeof(*range));
@@ -1267,9 +1257,9 @@ static int ks_wlan_set_power(struct net_device *dev,
1267 (struct ks_wlan_private *)netdev_priv(dev); 1257 (struct ks_wlan_private *)netdev_priv(dev);
1268 short enabled; 1258 short enabled;
1269 1259
1270 if (priv->sleep_mode == SLP_SLEEP) { 1260 if (priv->sleep_mode == SLP_SLEEP)
1271 return -EPERM; 1261 return -EPERM;
1272 } 1262
1273 /* for SLEEP MODE */ 1263 /* for SLEEP MODE */
1274 enabled = vwrq->disabled ? 0 : 1; 1264 enabled = vwrq->disabled ? 0 : 1;
1275 if (enabled == 0) { /* 0 */ 1265 if (enabled == 0) { /* 0 */
@@ -1301,9 +1291,8 @@ static int ks_wlan_get_power(struct net_device *dev,
1301 struct ks_wlan_private *priv = 1291 struct ks_wlan_private *priv =
1302 (struct ks_wlan_private *)netdev_priv(dev); 1292 (struct ks_wlan_private *)netdev_priv(dev);
1303 1293
1304 if (priv->sleep_mode == SLP_SLEEP) { 1294 if (priv->sleep_mode == SLP_SLEEP)
1305 return -EPERM; 1295 return -EPERM;
1306 }
1307 /* for SLEEP MODE */ 1296 /* for SLEEP MODE */
1308 if (priv->reg.powermgt > 0) 1297 if (priv->reg.powermgt > 0)
1309 vwrq->disabled = 0; 1298 vwrq->disabled = 0;
@@ -1322,9 +1311,8 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
1322 struct ks_wlan_private *priv = 1311 struct ks_wlan_private *priv =
1323 (struct ks_wlan_private *)netdev_priv(dev); 1312 (struct ks_wlan_private *)netdev_priv(dev);
1324 1313
1325 if (priv->sleep_mode == SLP_SLEEP) { 1314 if (priv->sleep_mode == SLP_SLEEP)
1326 return -EPERM; 1315 return -EPERM;
1327 }
1328 /* for SLEEP MODE */ 1316 /* for SLEEP MODE */
1329 vwrq->qual = 0; /* not supported */ 1317 vwrq->qual = 0; /* not supported */
1330 vwrq->level = priv->wstats.qual.level; 1318 vwrq->level = priv->wstats.qual.level;
@@ -1372,9 +1360,8 @@ static int ks_wlan_get_aplist(struct net_device *dev,
1372 1360
1373 int i; 1361 int i;
1374 1362
1375 if (priv->sleep_mode == SLP_SLEEP) { 1363 if (priv->sleep_mode == SLP_SLEEP)
1376 return -EPERM; 1364 return -EPERM;
1377 }
1378 /* for SLEEP MODE */ 1365 /* for SLEEP MODE */
1379 for (i = 0; i < priv->aplist.size; i++) { 1366 for (i = 0; i < priv->aplist.size; i++) {
1380 memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]), 1367 memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
@@ -1404,11 +1391,11 @@ static int ks_wlan_set_scan(struct net_device *dev,
1404 struct ks_wlan_private *priv = 1391 struct ks_wlan_private *priv =
1405 (struct ks_wlan_private *)netdev_priv(dev); 1392 (struct ks_wlan_private *)netdev_priv(dev);
1406 struct iw_scan_req *req = NULL; 1393 struct iw_scan_req *req = NULL;
1394
1407 DPRINTK(2, "\n"); 1395 DPRINTK(2, "\n");
1408 1396
1409 if (priv->sleep_mode == SLP_SLEEP) { 1397 if (priv->sleep_mode == SLP_SLEEP)
1410 return -EPERM; 1398 return -EPERM;
1411 }
1412 1399
1413 /* for SLEEP MODE */ 1400 /* for SLEEP MODE */
1414 /* specified SSID SCAN */ 1401 /* specified SSID SCAN */
@@ -1598,11 +1585,11 @@ static int ks_wlan_get_scan(struct net_device *dev,
1598 (struct ks_wlan_private *)netdev_priv(dev); 1585 (struct ks_wlan_private *)netdev_priv(dev);
1599 int i; 1586 int i;
1600 char *current_ev = extra; 1587 char *current_ev = extra;
1588
1601 DPRINTK(2, "\n"); 1589 DPRINTK(2, "\n");
1602 1590
1603 if (priv->sleep_mode == SLP_SLEEP) { 1591 if (priv->sleep_mode == SLP_SLEEP)
1604 return -EPERM; 1592 return -EPERM;
1605 }
1606 /* for SLEEP MODE */ 1593 /* for SLEEP MODE */
1607 if (priv->sme_i.sme_flag & SME_AP_SCAN) { 1594 if (priv->sme_i.sme_flag & SME_AP_SCAN) {
1608 DPRINTK(2, "flag AP_SCAN\n"); 1595 DPRINTK(2, "flag AP_SCAN\n");
@@ -1675,9 +1662,8 @@ static int ks_wlan_set_genie(struct net_device *dev,
1675 1662
1676 DPRINTK(2, "\n"); 1663 DPRINTK(2, "\n");
1677 1664
1678 if (priv->sleep_mode == SLP_SLEEP) { 1665 if (priv->sleep_mode == SLP_SLEEP)
1679 return -EPERM; 1666 return -EPERM;
1680 }
1681 /* for SLEEP MODE */ 1667 /* for SLEEP MODE */
1682 return 0; 1668 return 0;
1683// return -EOPNOTSUPP; 1669// return -EOPNOTSUPP;
@@ -1696,26 +1682,23 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
1696 1682
1697 DPRINTK(2, "index=%d:value=%08X\n", index, value); 1683 DPRINTK(2, "index=%d:value=%08X\n", index, value);
1698 1684
1699 if (priv->sleep_mode == SLP_SLEEP) { 1685 if (priv->sleep_mode == SLP_SLEEP)
1700 return -EPERM; 1686 return -EPERM;
1701 }
1702 /* for SLEEP MODE */ 1687 /* for SLEEP MODE */
1703 switch (index) { 1688 switch (index) {
1704 case IW_AUTH_WPA_VERSION: /* 0 */ 1689 case IW_AUTH_WPA_VERSION: /* 0 */
1705 switch (value) { 1690 switch (value) {
1706 case IW_AUTH_WPA_VERSION_DISABLED: 1691 case IW_AUTH_WPA_VERSION_DISABLED:
1707 priv->wpa.version = value; 1692 priv->wpa.version = value;
1708 if (priv->wpa.rsn_enabled) { 1693 if (priv->wpa.rsn_enabled)
1709 priv->wpa.rsn_enabled = 0; 1694 priv->wpa.rsn_enabled = 0;
1710 }
1711 priv->need_commit |= SME_RSN; 1695 priv->need_commit |= SME_RSN;
1712 break; 1696 break;
1713 case IW_AUTH_WPA_VERSION_WPA: 1697 case IW_AUTH_WPA_VERSION_WPA:
1714 case IW_AUTH_WPA_VERSION_WPA2: 1698 case IW_AUTH_WPA_VERSION_WPA2:
1715 priv->wpa.version = value; 1699 priv->wpa.version = value;
1716 if (!(priv->wpa.rsn_enabled)) { 1700 if (!(priv->wpa.rsn_enabled))
1717 priv->wpa.rsn_enabled = 1; 1701 priv->wpa.rsn_enabled = 1;
1718 }
1719 priv->need_commit |= SME_RSN; 1702 priv->need_commit |= SME_RSN;
1720 break; 1703 break;
1721 default: 1704 default:
@@ -1832,11 +1815,11 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
1832 struct ks_wlan_private *priv = 1815 struct ks_wlan_private *priv =
1833 (struct ks_wlan_private *)netdev_priv(dev); 1816 (struct ks_wlan_private *)netdev_priv(dev);
1834 int index = (vwrq->flags & IW_AUTH_INDEX); 1817 int index = (vwrq->flags & IW_AUTH_INDEX);
1818
1835 DPRINTK(2, "index=%d\n", index); 1819 DPRINTK(2, "index=%d\n", index);
1836 1820
1837 if (priv->sleep_mode == SLP_SLEEP) { 1821 if (priv->sleep_mode == SLP_SLEEP)
1838 return -EPERM; 1822 return -EPERM;
1839 }
1840 1823
1841 /* for SLEEP MODE */ 1824 /* for SLEEP MODE */
1842 /* WPA (not used ?? wpa_supplicant) */ 1825 /* WPA (not used ?? wpa_supplicant) */
@@ -1886,18 +1869,17 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
1886 DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags, 1869 DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
1887 enc->ext_flags); 1870 enc->ext_flags);
1888 1871
1889 if (priv->sleep_mode == SLP_SLEEP) { 1872 if (priv->sleep_mode == SLP_SLEEP)
1890 return -EPERM; 1873 return -EPERM;
1891 } 1874
1892 /* for SLEEP MODE */ 1875 /* for SLEEP MODE */
1893 if (index < 1 || index > 4) 1876 if (index < 1 || index > 4)
1894 return -EINVAL; 1877 return -EINVAL;
1895 else 1878 else
1896 index--; 1879 index--;
1897 1880
1898 if (dwrq->flags & IW_ENCODE_DISABLED) { 1881 if (dwrq->flags & IW_ENCODE_DISABLED)
1899 priv->wpa.key[index].key_len = 0; 1882 priv->wpa.key[index].key_len = 0;
1900 }
1901 1883
1902 if (enc) { 1884 if (enc) {
1903 priv->wpa.key[index].ext_flags = enc->ext_flags; 1885 priv->wpa.key[index].ext_flags = enc->ext_flags;
@@ -1986,9 +1968,8 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
1986 struct ks_wlan_private *priv = 1968 struct ks_wlan_private *priv =
1987 (struct ks_wlan_private *)netdev_priv(dev); 1969 (struct ks_wlan_private *)netdev_priv(dev);
1988 1970
1989 if (priv->sleep_mode == SLP_SLEEP) { 1971 if (priv->sleep_mode == SLP_SLEEP)
1990 return -EPERM; 1972 return -EPERM;
1991 }
1992 1973
1993 /* for SLEEP MODE */ 1974 /* for SLEEP MODE */
1994 /* WPA (not used ?? wpa_supplicant) 1975 /* WPA (not used ?? wpa_supplicant)
@@ -2015,13 +1996,13 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
2015 1996
2016 DPRINTK(2, "\n"); 1997 DPRINTK(2, "\n");
2017 1998
2018 if (priv->sleep_mode == SLP_SLEEP) { 1999 if (priv->sleep_mode == SLP_SLEEP)
2019 return -EPERM; 2000 return -EPERM;
2020 } 2001
2021 /* for SLEEP MODE */ 2002 /* for SLEEP MODE */
2022 if (!extra) { 2003 if (!extra)
2023 return -EINVAL; 2004 return -EINVAL;
2024 } 2005
2025 pmksa = (struct iw_pmksa *)extra; 2006 pmksa = (struct iw_pmksa *)extra;
2026 DPRINTK(2, "cmd=%d\n", pmksa->cmd); 2007 DPRINTK(2, "cmd=%d\n", pmksa->cmd);
2027 2008
@@ -2141,16 +2122,16 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
2141/*------------------------------------------------------------------*/ 2122/*------------------------------------------------------------------*/
2142/* Private handler : set stop request */ 2123/* Private handler : set stop request */
2143static int ks_wlan_set_stop_request(struct net_device *dev, 2124static int ks_wlan_set_stop_request(struct net_device *dev,
2144 struct iw_request_info *info, __u32 * uwrq, 2125 struct iw_request_info *info, __u32 *uwrq,
2145 char *extra) 2126 char *extra)
2146{ 2127{
2147 struct ks_wlan_private *priv = 2128 struct ks_wlan_private *priv =
2148 (struct ks_wlan_private *)netdev_priv(dev); 2129 (struct ks_wlan_private *)netdev_priv(dev);
2149 DPRINTK(2, "\n"); 2130 DPRINTK(2, "\n");
2150 2131
2151 if (priv->sleep_mode == SLP_SLEEP) { 2132 if (priv->sleep_mode == SLP_SLEEP)
2152 return -EPERM; 2133 return -EPERM;
2153 } 2134
2154 /* for SLEEP MODE */ 2135 /* for SLEEP MODE */
2155 if (!(*uwrq)) 2136 if (!(*uwrq))
2156 return -EINVAL; 2137 return -EINVAL;
@@ -2173,15 +2154,14 @@ static int ks_wlan_set_mlme(struct net_device *dev,
2173 2154
2174 DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code); 2155 DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
2175 2156
2176 if (priv->sleep_mode == SLP_SLEEP) { 2157 if (priv->sleep_mode == SLP_SLEEP)
2177 return -EPERM; 2158 return -EPERM;
2178 } 2159
2179 /* for SLEEP MODE */ 2160 /* for SLEEP MODE */
2180 switch (mlme->cmd) { 2161 switch (mlme->cmd) {
2181 case IW_MLME_DEAUTH: 2162 case IW_MLME_DEAUTH:
2182 if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) { 2163 if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
2183 return 0; 2164 return 0;
2184 }
2185 case IW_MLME_DISASSOC: 2165 case IW_MLME_DISASSOC:
2186 mode = 1; 2166 mode = 1;
2187 return ks_wlan_set_stop_request(dev, NULL, &mode, NULL); 2167 return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2207,14 +2187,14 @@ static int ks_wlan_get_firmware_version(struct net_device *dev,
2207/*------------------------------------------------------------------*/ 2187/*------------------------------------------------------------------*/
2208/* Private handler : set force disconnect status */ 2188/* Private handler : set force disconnect status */
2209static int ks_wlan_set_detach(struct net_device *dev, 2189static int ks_wlan_set_detach(struct net_device *dev,
2210 struct iw_request_info *info, __u32 * uwrq, 2190 struct iw_request_info *info, __u32 *uwrq,
2211 char *extra) 2191 char *extra)
2212{ 2192{
2213 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2193 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2214 2194
2215 if (priv->sleep_mode == SLP_SLEEP) { 2195 if (priv->sleep_mode == SLP_SLEEP)
2216 return -EPERM; 2196 return -EPERM;
2217 } 2197
2218 /* for SLEEP MODE */ 2198 /* for SLEEP MODE */
2219 if (*uwrq == CONNECT_STATUS) { /* 0 */ 2199 if (*uwrq == CONNECT_STATUS) { /* 0 */
2220 priv->connect_status &= ~FORCE_DISCONNECT; 2200 priv->connect_status &= ~FORCE_DISCONNECT;
@@ -2232,14 +2212,14 @@ static int ks_wlan_set_detach(struct net_device *dev,
2232/*------------------------------------------------------------------*/ 2212/*------------------------------------------------------------------*/
2233/* Private handler : get force disconnect status */ 2213/* Private handler : get force disconnect status */
2234static int ks_wlan_get_detach(struct net_device *dev, 2214static int ks_wlan_get_detach(struct net_device *dev,
2235 struct iw_request_info *info, __u32 * uwrq, 2215 struct iw_request_info *info, __u32 *uwrq,
2236 char *extra) 2216 char *extra)
2237{ 2217{
2238 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2218 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2239 2219
2240 if (priv->sleep_mode == SLP_SLEEP) { 2220 if (priv->sleep_mode == SLP_SLEEP)
2241 return -EPERM; 2221 return -EPERM;
2242 } 2222
2243 /* for SLEEP MODE */ 2223 /* for SLEEP MODE */
2244 *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0); 2224 *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
2245 return 0; 2225 return 0;
@@ -2248,14 +2228,14 @@ static int ks_wlan_get_detach(struct net_device *dev,
2248/*------------------------------------------------------------------*/ 2228/*------------------------------------------------------------------*/
2249/* Private handler : get connect status */ 2229/* Private handler : get connect status */
2250static int ks_wlan_get_connect(struct net_device *dev, 2230static int ks_wlan_get_connect(struct net_device *dev,
2251 struct iw_request_info *info, __u32 * uwrq, 2231 struct iw_request_info *info, __u32 *uwrq,
2252 char *extra) 2232 char *extra)
2253{ 2233{
2254 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2234 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2255 2235
2256 if (priv->sleep_mode == SLP_SLEEP) { 2236 if (priv->sleep_mode == SLP_SLEEP)
2257 return -EPERM; 2237 return -EPERM;
2258 } 2238
2259 /* for SLEEP MODE */ 2239 /* for SLEEP MODE */
2260 *uwrq = (priv->connect_status & CONNECT_STATUS_MASK); 2240 *uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
2261 return 0; 2241 return 0;
@@ -2265,15 +2245,15 @@ static int ks_wlan_get_connect(struct net_device *dev,
2265/*------------------------------------------------------------------*/ 2245/*------------------------------------------------------------------*/
2266/* Private handler : set preamble */ 2246/* Private handler : set preamble */
2267static int ks_wlan_set_preamble(struct net_device *dev, 2247static int ks_wlan_set_preamble(struct net_device *dev,
2268 struct iw_request_info *info, __u32 * uwrq, 2248 struct iw_request_info *info, __u32 *uwrq,
2269 char *extra) 2249 char *extra)
2270{ 2250{
2271 struct ks_wlan_private *priv = 2251 struct ks_wlan_private *priv =
2272 (struct ks_wlan_private *)netdev_priv(dev); 2252 (struct ks_wlan_private *)netdev_priv(dev);
2273 2253
2274 if (priv->sleep_mode == SLP_SLEEP) { 2254 if (priv->sleep_mode == SLP_SLEEP)
2275 return -EPERM; 2255 return -EPERM;
2276 } 2256
2277 /* for SLEEP MODE */ 2257 /* for SLEEP MODE */
2278 if (*uwrq == LONG_PREAMBLE) { /* 0 */ 2258 if (*uwrq == LONG_PREAMBLE) { /* 0 */
2279 priv->reg.preamble = LONG_PREAMBLE; 2259 priv->reg.preamble = LONG_PREAMBLE;
@@ -2290,15 +2270,15 @@ static int ks_wlan_set_preamble(struct net_device *dev,
2290/*------------------------------------------------------------------*/ 2270/*------------------------------------------------------------------*/
2291/* Private handler : get preamble */ 2271/* Private handler : get preamble */
2292static int ks_wlan_get_preamble(struct net_device *dev, 2272static int ks_wlan_get_preamble(struct net_device *dev,
2293 struct iw_request_info *info, __u32 * uwrq, 2273 struct iw_request_info *info, __u32 *uwrq,
2294 char *extra) 2274 char *extra)
2295{ 2275{
2296 struct ks_wlan_private *priv = 2276 struct ks_wlan_private *priv =
2297 (struct ks_wlan_private *)netdev_priv(dev); 2277 (struct ks_wlan_private *)netdev_priv(dev);
2298 2278
2299 if (priv->sleep_mode == SLP_SLEEP) { 2279 if (priv->sleep_mode == SLP_SLEEP)
2300 return -EPERM; 2280 return -EPERM;
2301 } 2281
2302 /* for SLEEP MODE */ 2282 /* for SLEEP MODE */
2303 *uwrq = priv->reg.preamble; 2283 *uwrq = priv->reg.preamble;
2304 return 0; 2284 return 0;
@@ -2307,15 +2287,15 @@ static int ks_wlan_get_preamble(struct net_device *dev,
2307/*------------------------------------------------------------------*/ 2287/*------------------------------------------------------------------*/
2308/* Private handler : set power save mode */ 2288/* Private handler : set power save mode */
2309static int ks_wlan_set_powermgt(struct net_device *dev, 2289static int ks_wlan_set_powermgt(struct net_device *dev,
2310 struct iw_request_info *info, __u32 * uwrq, 2290 struct iw_request_info *info, __u32 *uwrq,
2311 char *extra) 2291 char *extra)
2312{ 2292{
2313 struct ks_wlan_private *priv = 2293 struct ks_wlan_private *priv =
2314 (struct ks_wlan_private *)netdev_priv(dev); 2294 (struct ks_wlan_private *)netdev_priv(dev);
2315 2295
2316 if (priv->sleep_mode == SLP_SLEEP) { 2296 if (priv->sleep_mode == SLP_SLEEP)
2317 return -EPERM; 2297 return -EPERM;
2318 } 2298
2319 /* for SLEEP MODE */ 2299 /* for SLEEP MODE */
2320 if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */ 2300 if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
2321 priv->reg.powermgt = POWMGT_ACTIVE_MODE; 2301 priv->reg.powermgt = POWMGT_ACTIVE_MODE;
@@ -2340,15 +2320,15 @@ static int ks_wlan_set_powermgt(struct net_device *dev,
2340/*------------------------------------------------------------------*/ 2320/*------------------------------------------------------------------*/
2341/* Private handler : get power save made */ 2321/* Private handler : get power save made */
2342static int ks_wlan_get_powermgt(struct net_device *dev, 2322static int ks_wlan_get_powermgt(struct net_device *dev,
2343 struct iw_request_info *info, __u32 * uwrq, 2323 struct iw_request_info *info, __u32 *uwrq,
2344 char *extra) 2324 char *extra)
2345{ 2325{
2346 struct ks_wlan_private *priv = 2326 struct ks_wlan_private *priv =
2347 (struct ks_wlan_private *)netdev_priv(dev); 2327 (struct ks_wlan_private *)netdev_priv(dev);
2348 2328
2349 if (priv->sleep_mode == SLP_SLEEP) { 2329 if (priv->sleep_mode == SLP_SLEEP)
2350 return -EPERM; 2330 return -EPERM;
2351 } 2331
2352 /* for SLEEP MODE */ 2332 /* for SLEEP MODE */
2353 *uwrq = priv->reg.powermgt; 2333 *uwrq = priv->reg.powermgt;
2354 return 0; 2334 return 0;
@@ -2357,15 +2337,14 @@ static int ks_wlan_get_powermgt(struct net_device *dev,
2357/*------------------------------------------------------------------*/ 2337/*------------------------------------------------------------------*/
2358/* Private handler : set scan type */ 2338/* Private handler : set scan type */
2359static int ks_wlan_set_scan_type(struct net_device *dev, 2339static int ks_wlan_set_scan_type(struct net_device *dev,
2360 struct iw_request_info *info, __u32 * uwrq, 2340 struct iw_request_info *info, __u32 *uwrq,
2361 char *extra) 2341 char *extra)
2362{ 2342{
2363 struct ks_wlan_private *priv = 2343 struct ks_wlan_private *priv =
2364 (struct ks_wlan_private *)netdev_priv(dev); 2344 (struct ks_wlan_private *)netdev_priv(dev);
2365 2345
2366 if (priv->sleep_mode == SLP_SLEEP) { 2346 if (priv->sleep_mode == SLP_SLEEP)
2367 return -EPERM; 2347 return -EPERM;
2368 }
2369 /* for SLEEP MODE */ 2348 /* for SLEEP MODE */
2370 if (*uwrq == ACTIVE_SCAN) { /* 0 */ 2349 if (*uwrq == ACTIVE_SCAN) { /* 0 */
2371 priv->reg.scan_type = ACTIVE_SCAN; 2350 priv->reg.scan_type = ACTIVE_SCAN;
@@ -2380,15 +2359,14 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
2380/*------------------------------------------------------------------*/ 2359/*------------------------------------------------------------------*/
2381/* Private handler : get scan type */ 2360/* Private handler : get scan type */
2382static int ks_wlan_get_scan_type(struct net_device *dev, 2361static int ks_wlan_get_scan_type(struct net_device *dev,
2383 struct iw_request_info *info, __u32 * uwrq, 2362 struct iw_request_info *info, __u32 *uwrq,
2384 char *extra) 2363 char *extra)
2385{ 2364{
2386 struct ks_wlan_private *priv = 2365 struct ks_wlan_private *priv =
2387 (struct ks_wlan_private *)netdev_priv(dev); 2366 (struct ks_wlan_private *)netdev_priv(dev);
2388 2367
2389 if (priv->sleep_mode == SLP_SLEEP) { 2368 if (priv->sleep_mode == SLP_SLEEP)
2390 return -EPERM; 2369 return -EPERM;
2391 }
2392 /* for SLEEP MODE */ 2370 /* for SLEEP MODE */
2393 *uwrq = priv->reg.scan_type; 2371 *uwrq = priv->reg.scan_type;
2394 return 0; 2372 return 0;
@@ -2404,9 +2382,8 @@ static int ks_wlan_data_write(struct net_device *dev,
2404 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2382 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2405 unsigned char *wbuff = NULL; 2383 unsigned char *wbuff = NULL;
2406 2384
2407 if (priv->sleep_mode == SLP_SLEEP) { 2385 if (priv->sleep_mode == SLP_SLEEP)
2408 return -EPERM; 2386 return -EPERM;
2409 }
2410 /* for SLEEP MODE */ 2387 /* for SLEEP MODE */
2411 wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC); 2388 wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
2412 if (!wbuff) 2389 if (!wbuff)
@@ -2428,9 +2405,8 @@ static int ks_wlan_data_read(struct net_device *dev,
2428 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2405 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2429 unsigned short read_length; 2406 unsigned short read_length;
2430 2407
2431 if (priv->sleep_mode == SLP_SLEEP) { 2408 if (priv->sleep_mode == SLP_SLEEP)
2432 return -EPERM; 2409 return -EPERM;
2433 }
2434 /* for SLEEP MODE */ 2410 /* for SLEEP MODE */
2435 if (!atomic_read(&priv->event_count)) { 2411 if (!atomic_read(&priv->event_count)) {
2436 if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */ 2412 if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
@@ -2488,9 +2464,8 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
2488 int i, j, len = 0; 2464 int i, j, len = 0;
2489 char tmp[WEP_ASCII_BUFF_SIZE]; 2465 char tmp[WEP_ASCII_BUFF_SIZE];
2490 2466
2491 if (priv->sleep_mode == SLP_SLEEP) { 2467 if (priv->sleep_mode == SLP_SLEEP)
2492 return -EPERM; 2468 return -EPERM;
2493 }
2494 /* for SLEEP MODE */ 2469 /* for SLEEP MODE */
2495 strcpy(tmp, " WEP keys ASCII \n"); 2470 strcpy(tmp, " WEP keys ASCII \n");
2496 len += strlen(" WEP keys ASCII \n"); 2471 len += strlen(" WEP keys ASCII \n");
@@ -2531,19 +2506,18 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
2531/*------------------------------------------------------------------*/ 2506/*------------------------------------------------------------------*/
2532/* Private handler : set beacon lost count */ 2507/* Private handler : set beacon lost count */
2533static int ks_wlan_set_beacon_lost(struct net_device *dev, 2508static int ks_wlan_set_beacon_lost(struct net_device *dev,
2534 struct iw_request_info *info, __u32 * uwrq, 2509 struct iw_request_info *info, __u32 *uwrq,
2535 char *extra) 2510 char *extra)
2536{ 2511{
2537 struct ks_wlan_private *priv = 2512 struct ks_wlan_private *priv =
2538 (struct ks_wlan_private *)netdev_priv(dev); 2513 (struct ks_wlan_private *)netdev_priv(dev);
2539 2514
2540 if (priv->sleep_mode == SLP_SLEEP) { 2515 if (priv->sleep_mode == SLP_SLEEP)
2541 return -EPERM; 2516 return -EPERM;
2542 }
2543 /* for SLEEP MODE */ 2517 /* for SLEEP MODE */
2544 if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) { 2518 if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX)
2545 priv->reg.beacon_lost_count = *uwrq; 2519 priv->reg.beacon_lost_count = *uwrq;
2546 } else 2520 else
2547 return -EINVAL; 2521 return -EINVAL;
2548 2522
2549 if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) { 2523 if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
@@ -2556,15 +2530,14 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
2556/*------------------------------------------------------------------*/ 2530/*------------------------------------------------------------------*/
2557/* Private handler : get beacon lost count */ 2531/* Private handler : get beacon lost count */
2558static int ks_wlan_get_beacon_lost(struct net_device *dev, 2532static int ks_wlan_get_beacon_lost(struct net_device *dev,
2559 struct iw_request_info *info, __u32 * uwrq, 2533 struct iw_request_info *info, __u32 *uwrq,
2560 char *extra) 2534 char *extra)
2561{ 2535{
2562 struct ks_wlan_private *priv = 2536 struct ks_wlan_private *priv =
2563 (struct ks_wlan_private *)netdev_priv(dev); 2537 (struct ks_wlan_private *)netdev_priv(dev);
2564 2538
2565 if (priv->sleep_mode == SLP_SLEEP) { 2539 if (priv->sleep_mode == SLP_SLEEP)
2566 return -EPERM; 2540 return -EPERM;
2567 }
2568 /* for SLEEP MODE */ 2541 /* for SLEEP MODE */
2569 *uwrq = priv->reg.beacon_lost_count; 2542 *uwrq = priv->reg.beacon_lost_count;
2570 return 0; 2543 return 0;
@@ -2573,15 +2546,14 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
2573/*------------------------------------------------------------------*/ 2546/*------------------------------------------------------------------*/
2574/* Private handler : set phy type */ 2547/* Private handler : set phy type */
2575static int ks_wlan_set_phy_type(struct net_device *dev, 2548static int ks_wlan_set_phy_type(struct net_device *dev,
2576 struct iw_request_info *info, __u32 * uwrq, 2549 struct iw_request_info *info, __u32 *uwrq,
2577 char *extra) 2550 char *extra)
2578{ 2551{
2579 struct ks_wlan_private *priv = 2552 struct ks_wlan_private *priv =
2580 (struct ks_wlan_private *)netdev_priv(dev); 2553 (struct ks_wlan_private *)netdev_priv(dev);
2581 2554
2582 if (priv->sleep_mode == SLP_SLEEP) { 2555 if (priv->sleep_mode == SLP_SLEEP)
2583 return -EPERM; 2556 return -EPERM;
2584 }
2585 /* for SLEEP MODE */ 2557 /* for SLEEP MODE */
2586 if (*uwrq == D_11B_ONLY_MODE) { /* 0 */ 2558 if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
2587 priv->reg.phy_type = D_11B_ONLY_MODE; 2559 priv->reg.phy_type = D_11B_ONLY_MODE;
@@ -2599,15 +2571,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
2599/*------------------------------------------------------------------*/ 2571/*------------------------------------------------------------------*/
2600/* Private handler : get phy type */ 2572/* Private handler : get phy type */
2601static int ks_wlan_get_phy_type(struct net_device *dev, 2573static int ks_wlan_get_phy_type(struct net_device *dev,
2602 struct iw_request_info *info, __u32 * uwrq, 2574 struct iw_request_info *info, __u32 *uwrq,
2603 char *extra) 2575 char *extra)
2604{ 2576{
2605 struct ks_wlan_private *priv = 2577 struct ks_wlan_private *priv =
2606 (struct ks_wlan_private *)netdev_priv(dev); 2578 (struct ks_wlan_private *)netdev_priv(dev);
2607 2579
2608 if (priv->sleep_mode == SLP_SLEEP) { 2580 if (priv->sleep_mode == SLP_SLEEP)
2609 return -EPERM; 2581 return -EPERM;
2610 }
2611 /* for SLEEP MODE */ 2582 /* for SLEEP MODE */
2612 *uwrq = priv->reg.phy_type; 2583 *uwrq = priv->reg.phy_type;
2613 return 0; 2584 return 0;
@@ -2616,15 +2587,14 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
2616/*------------------------------------------------------------------*/ 2587/*------------------------------------------------------------------*/
2617/* Private handler : set cts mode */ 2588/* Private handler : set cts mode */
2618static int ks_wlan_set_cts_mode(struct net_device *dev, 2589static int ks_wlan_set_cts_mode(struct net_device *dev,
2619 struct iw_request_info *info, __u32 * uwrq, 2590 struct iw_request_info *info, __u32 *uwrq,
2620 char *extra) 2591 char *extra)
2621{ 2592{
2622 struct ks_wlan_private *priv = 2593 struct ks_wlan_private *priv =
2623 (struct ks_wlan_private *)netdev_priv(dev); 2594 (struct ks_wlan_private *)netdev_priv(dev);
2624 2595
2625 if (priv->sleep_mode == SLP_SLEEP) { 2596 if (priv->sleep_mode == SLP_SLEEP)
2626 return -EPERM; 2597 return -EPERM;
2627 }
2628 /* for SLEEP MODE */ 2598 /* for SLEEP MODE */
2629 if (*uwrq == CTS_MODE_FALSE) { /* 0 */ 2599 if (*uwrq == CTS_MODE_FALSE) { /* 0 */
2630 priv->reg.cts_mode = CTS_MODE_FALSE; 2600 priv->reg.cts_mode = CTS_MODE_FALSE;
@@ -2644,15 +2614,14 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
2644/*------------------------------------------------------------------*/ 2614/*------------------------------------------------------------------*/
2645/* Private handler : get cts mode */ 2615/* Private handler : get cts mode */
2646static int ks_wlan_get_cts_mode(struct net_device *dev, 2616static int ks_wlan_get_cts_mode(struct net_device *dev,
2647 struct iw_request_info *info, __u32 * uwrq, 2617 struct iw_request_info *info, __u32 *uwrq,
2648 char *extra) 2618 char *extra)
2649{ 2619{
2650 struct ks_wlan_private *priv = 2620 struct ks_wlan_private *priv =
2651 (struct ks_wlan_private *)netdev_priv(dev); 2621 (struct ks_wlan_private *)netdev_priv(dev);
2652 2622
2653 if (priv->sleep_mode == SLP_SLEEP) { 2623 if (priv->sleep_mode == SLP_SLEEP)
2654 return -EPERM; 2624 return -EPERM;
2655 }
2656 /* for SLEEP MODE */ 2625 /* for SLEEP MODE */
2657 *uwrq = priv->reg.cts_mode; 2626 *uwrq = priv->reg.cts_mode;
2658 return 0; 2627 return 0;
@@ -2662,7 +2631,7 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
2662/* Private handler : set sleep mode */ 2631/* Private handler : set sleep mode */
2663static int ks_wlan_set_sleep_mode(struct net_device *dev, 2632static int ks_wlan_set_sleep_mode(struct net_device *dev,
2664 struct iw_request_info *info, 2633 struct iw_request_info *info,
2665 __u32 * uwrq, char *extra) 2634 __u32 *uwrq, char *extra)
2666{ 2635{
2667 struct ks_wlan_private *priv = 2636 struct ks_wlan_private *priv =
2668 (struct ks_wlan_private *)netdev_priv(dev); 2637 (struct ks_wlan_private *)netdev_priv(dev);
@@ -2692,7 +2661,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
2692/* Private handler : get sleep mode */ 2661/* Private handler : get sleep mode */
2693static int ks_wlan_get_sleep_mode(struct net_device *dev, 2662static int ks_wlan_get_sleep_mode(struct net_device *dev,
2694 struct iw_request_info *info, 2663 struct iw_request_info *info,
2695 __u32 * uwrq, char *extra) 2664 __u32 *uwrq, char *extra)
2696{ 2665{
2697 struct ks_wlan_private *priv = 2666 struct ks_wlan_private *priv =
2698 (struct ks_wlan_private *)netdev_priv(dev); 2667 (struct ks_wlan_private *)netdev_priv(dev);
@@ -2708,16 +2677,15 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
2708/* Private handler : set phy information timer */ 2677/* Private handler : set phy information timer */
2709static int ks_wlan_set_phy_information_timer(struct net_device *dev, 2678static int ks_wlan_set_phy_information_timer(struct net_device *dev,
2710 struct iw_request_info *info, 2679 struct iw_request_info *info,
2711 __u32 * uwrq, char *extra) 2680 __u32 *uwrq, char *extra)
2712{ 2681{
2713 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2682 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2714 2683
2715 if (priv->sleep_mode == SLP_SLEEP) { 2684 if (priv->sleep_mode == SLP_SLEEP)
2716 return -EPERM; 2685 return -EPERM;
2717 }
2718 /* for SLEEP MODE */ 2686 /* for SLEEP MODE */
2719 if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */ 2687 if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
2720 priv->reg.phy_info_timer = (uint16_t) * uwrq; 2688 priv->reg.phy_info_timer = (uint16_t)*uwrq;
2721 else 2689 else
2722 return -EINVAL; 2690 return -EINVAL;
2723 2691
@@ -2730,13 +2698,12 @@ static int ks_wlan_set_phy_information_timer(struct net_device *dev,
2730/* Private handler : get phy information timer */ 2698/* Private handler : get phy information timer */
2731static int ks_wlan_get_phy_information_timer(struct net_device *dev, 2699static int ks_wlan_get_phy_information_timer(struct net_device *dev,
2732 struct iw_request_info *info, 2700 struct iw_request_info *info,
2733 __u32 * uwrq, char *extra) 2701 __u32 *uwrq, char *extra)
2734{ 2702{
2735 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2703 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2736 2704
2737 if (priv->sleep_mode == SLP_SLEEP) { 2705 if (priv->sleep_mode == SLP_SLEEP)
2738 return -EPERM; 2706 return -EPERM;
2739 }
2740 /* for SLEEP MODE */ 2707 /* for SLEEP MODE */
2741 *uwrq = priv->reg.phy_info_timer; 2708 *uwrq = priv->reg.phy_info_timer;
2742 return 0; 2709 return 0;
@@ -2747,16 +2714,15 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev,
2747/*------------------------------------------------------------------*/ 2714/*------------------------------------------------------------------*/
2748/* Private handler : set WPS enable */ 2715/* Private handler : set WPS enable */
2749static int ks_wlan_set_wps_enable(struct net_device *dev, 2716static int ks_wlan_set_wps_enable(struct net_device *dev,
2750 struct iw_request_info *info, __u32 * uwrq, 2717 struct iw_request_info *info, __u32 *uwrq,
2751 char *extra) 2718 char *extra)
2752{ 2719{
2753 struct ks_wlan_private *priv = 2720 struct ks_wlan_private *priv =
2754 (struct ks_wlan_private *)netdev_priv(dev); 2721 (struct ks_wlan_private *)netdev_priv(dev);
2755 DPRINTK(2, "\n"); 2722 DPRINTK(2, "\n");
2756 2723
2757 if (priv->sleep_mode == SLP_SLEEP) { 2724 if (priv->sleep_mode == SLP_SLEEP)
2758 return -EPERM; 2725 return -EPERM;
2759 }
2760 /* for SLEEP MODE */ 2726 /* for SLEEP MODE */
2761 if (*uwrq == 0 || *uwrq == 1) 2727 if (*uwrq == 0 || *uwrq == 1)
2762 priv->wps.wps_enabled = *uwrq; 2728 priv->wps.wps_enabled = *uwrq;
@@ -2771,16 +2737,15 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
2771/*------------------------------------------------------------------*/ 2737/*------------------------------------------------------------------*/
2772/* Private handler : get WPS enable */ 2738/* Private handler : get WPS enable */
2773static int ks_wlan_get_wps_enable(struct net_device *dev, 2739static int ks_wlan_get_wps_enable(struct net_device *dev,
2774 struct iw_request_info *info, __u32 * uwrq, 2740 struct iw_request_info *info, __u32 *uwrq,
2775 char *extra) 2741 char *extra)
2776{ 2742{
2777 struct ks_wlan_private *priv = 2743 struct ks_wlan_private *priv =
2778 (struct ks_wlan_private *)netdev_priv(dev); 2744 (struct ks_wlan_private *)netdev_priv(dev);
2779 DPRINTK(2, "\n"); 2745 DPRINTK(2, "\n");
2780 2746
2781 if (priv->sleep_mode == SLP_SLEEP) { 2747 if (priv->sleep_mode == SLP_SLEEP)
2782 return -EPERM; 2748 return -EPERM;
2783 }
2784 /* for SLEEP MODE */ 2749 /* for SLEEP MODE */
2785 *uwrq = priv->wps.wps_enabled; 2750 *uwrq = priv->wps.wps_enabled;
2786 netdev_info(dev, "return=%d\n", *uwrq); 2751 netdev_info(dev, "return=%d\n", *uwrq);
@@ -2801,16 +2766,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
2801 2766
2802 DPRINTK(2, "\n"); 2767 DPRINTK(2, "\n");
2803 2768
2804 if (priv->sleep_mode == SLP_SLEEP) { 2769 if (priv->sleep_mode == SLP_SLEEP)
2805 return -EPERM; 2770 return -EPERM;
2806 }
2807 /* for SLEEP MODE */ 2771 /* for SLEEP MODE */
2808 DPRINTK(2, "dwrq->length=%d\n", dwrq->length); 2772 DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
2809 2773
2810 /* length check */ 2774 /* length check */
2811 if (p[1] + 2 != dwrq->length || dwrq->length > 256) { 2775 if (p[1] + 2 != dwrq->length || dwrq->length > 256)
2812 return -EINVAL; 2776 return -EINVAL;
2813 }
2814 2777
2815 priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */ 2778 priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
2816 len = p[1] + 2; /* IE header + IE */ 2779 len = p[1] + 2; /* IE header + IE */
@@ -2833,14 +2796,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
2833/* Private handler : get WPS probe req */ 2796/* Private handler : get WPS probe req */
2834static int ks_wlan_get_wps_probe_req(struct net_device *dev, 2797static int ks_wlan_get_wps_probe_req(struct net_device *dev,
2835 struct iw_request_info *info, 2798 struct iw_request_info *info,
2836 __u32 * uwrq, char *extra) 2799 __u32 *uwrq, char *extra)
2837{ 2800{
2838 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2801 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2802
2839 DPRINTK(2, "\n"); 2803 DPRINTK(2, "\n");
2840 2804
2841 if (priv->sleep_mode == SLP_SLEEP) { 2805 if (priv->sleep_mode == SLP_SLEEP)
2842 return -EPERM; 2806 return -EPERM;
2843 }
2844 /* for SLEEP MODE */ 2807 /* for SLEEP MODE */
2845 return 0; 2808 return 0;
2846} 2809}
@@ -2850,18 +2813,17 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev,
2850/*------------------------------------------------------------------*/ 2813/*------------------------------------------------------------------*/
2851/* Private handler : set tx gain control value */ 2814/* Private handler : set tx gain control value */
2852static int ks_wlan_set_tx_gain(struct net_device *dev, 2815static int ks_wlan_set_tx_gain(struct net_device *dev,
2853 struct iw_request_info *info, __u32 * uwrq, 2816 struct iw_request_info *info, __u32 *uwrq,
2854 char *extra) 2817 char *extra)
2855{ 2818{
2856 struct ks_wlan_private *priv = 2819 struct ks_wlan_private *priv =
2857 (struct ks_wlan_private *)netdev_priv(dev); 2820 (struct ks_wlan_private *)netdev_priv(dev);
2858 2821
2859 if (priv->sleep_mode == SLP_SLEEP) { 2822 if (priv->sleep_mode == SLP_SLEEP)
2860 return -EPERM; 2823 return -EPERM;
2861 }
2862 /* for SLEEP MODE */ 2824 /* for SLEEP MODE */
2863 if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */ 2825 if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
2864 priv->gain.TxGain = (uint8_t) * uwrq; 2826 priv->gain.TxGain = (uint8_t)*uwrq;
2865 else 2827 else
2866 return -EINVAL; 2828 return -EINVAL;
2867 2829
@@ -2877,15 +2839,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
2877/*------------------------------------------------------------------*/ 2839/*------------------------------------------------------------------*/
2878/* Private handler : get tx gain control value */ 2840/* Private handler : get tx gain control value */
2879static int ks_wlan_get_tx_gain(struct net_device *dev, 2841static int ks_wlan_get_tx_gain(struct net_device *dev,
2880 struct iw_request_info *info, __u32 * uwrq, 2842 struct iw_request_info *info, __u32 *uwrq,
2881 char *extra) 2843 char *extra)
2882{ 2844{
2883 struct ks_wlan_private *priv = 2845 struct ks_wlan_private *priv =
2884 (struct ks_wlan_private *)netdev_priv(dev); 2846 (struct ks_wlan_private *)netdev_priv(dev);
2885 2847
2886 if (priv->sleep_mode == SLP_SLEEP) { 2848 if (priv->sleep_mode == SLP_SLEEP)
2887 return -EPERM; 2849 return -EPERM;
2888 }
2889 /* for SLEEP MODE */ 2850 /* for SLEEP MODE */
2890 *uwrq = priv->gain.TxGain; 2851 *uwrq = priv->gain.TxGain;
2891 hostif_sme_enqueue(priv, SME_GET_GAIN); 2852 hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2895,18 +2856,17 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
2895/*------------------------------------------------------------------*/ 2856/*------------------------------------------------------------------*/
2896/* Private handler : set rx gain control value */ 2857/* Private handler : set rx gain control value */
2897static int ks_wlan_set_rx_gain(struct net_device *dev, 2858static int ks_wlan_set_rx_gain(struct net_device *dev,
2898 struct iw_request_info *info, __u32 * uwrq, 2859 struct iw_request_info *info, __u32 *uwrq,
2899 char *extra) 2860 char *extra)
2900{ 2861{
2901 struct ks_wlan_private *priv = 2862 struct ks_wlan_private *priv =
2902 (struct ks_wlan_private *)netdev_priv(dev); 2863 (struct ks_wlan_private *)netdev_priv(dev);
2903 2864
2904 if (priv->sleep_mode == SLP_SLEEP) { 2865 if (priv->sleep_mode == SLP_SLEEP)
2905 return -EPERM; 2866 return -EPERM;
2906 }
2907 /* for SLEEP MODE */ 2867 /* for SLEEP MODE */
2908 if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */ 2868 if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
2909 priv->gain.RxGain = (uint8_t) * uwrq; 2869 priv->gain.RxGain = (uint8_t)*uwrq;
2910 else 2870 else
2911 return -EINVAL; 2871 return -EINVAL;
2912 2872
@@ -2922,15 +2882,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
2922/*------------------------------------------------------------------*/ 2882/*------------------------------------------------------------------*/
2923/* Private handler : get rx gain control value */ 2883/* Private handler : get rx gain control value */
2924static int ks_wlan_get_rx_gain(struct net_device *dev, 2884static int ks_wlan_get_rx_gain(struct net_device *dev,
2925 struct iw_request_info *info, __u32 * uwrq, 2885 struct iw_request_info *info, __u32 *uwrq,
2926 char *extra) 2886 char *extra)
2927{ 2887{
2928 struct ks_wlan_private *priv = 2888 struct ks_wlan_private *priv =
2929 (struct ks_wlan_private *)netdev_priv(dev); 2889 (struct ks_wlan_private *)netdev_priv(dev);
2930 2890
2931 if (priv->sleep_mode == SLP_SLEEP) { 2891 if (priv->sleep_mode == SLP_SLEEP)
2932 return -EPERM; 2892 return -EPERM;
2933 }
2934 /* for SLEEP MODE */ 2893 /* for SLEEP MODE */
2935 *uwrq = priv->gain.RxGain; 2894 *uwrq = priv->gain.RxGain;
2936 hostif_sme_enqueue(priv, SME_GET_GAIN); 2895 hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2941,17 +2900,16 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
2941/*------------------------------------------------------------------*/ 2900/*------------------------------------------------------------------*/
2942/* Private handler : set region value */ 2901/* Private handler : set region value */
2943static int ks_wlan_set_region(struct net_device *dev, 2902static int ks_wlan_set_region(struct net_device *dev,
2944 struct iw_request_info *info, __u32 * uwrq, 2903 struct iw_request_info *info, __u32 *uwrq,
2945 char *extra) 2904 char *extra)
2946{ 2905{
2947 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; 2906 struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
2948 2907
2949 if (priv->sleep_mode == SLP_SLEEP) { 2908 if (priv->sleep_mode == SLP_SLEEP)
2950 return -EPERM; 2909 return -EPERM;
2951 }
2952 /* for SLEEP MODE */ 2910 /* for SLEEP MODE */
2953 if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */ 2911 if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
2954 priv->region = (uint8_t) * uwrq; 2912 priv->region = (uint8_t)*uwrq;
2955 else 2913 else
2956 return -EINVAL; 2914 return -EINVAL;
2957 2915
@@ -2963,7 +2921,7 @@ static int ks_wlan_set_region(struct net_device *dev,
2963/*------------------------------------------------------------------*/ 2921/*------------------------------------------------------------------*/
2964/* Private handler : get eeprom checksum result */ 2922/* Private handler : get eeprom checksum result */
2965static int ks_wlan_get_eeprom_cksum(struct net_device *dev, 2923static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
2966 struct iw_request_info *info, __u32 * uwrq, 2924 struct iw_request_info *info, __u32 *uwrq,
2967 char *extra) 2925 char *extra)
2968{ 2926{
2969 struct ks_wlan_private *priv = 2927 struct ks_wlan_private *priv =
@@ -3090,7 +3048,7 @@ static void print_hif_event(struct net_device *dev, int event)
3090/*------------------------------------------------------------------*/ 3048/*------------------------------------------------------------------*/
3091/* Private handler : get host command history */ 3049/* Private handler : get host command history */
3092static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info, 3050static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
3093 __u32 * uwrq, char *extra) 3051 __u32 *uwrq, char *extra)
3094{ 3052{
3095 int i, event; 3053 int i, event;
3096 struct ks_wlan_private *priv = 3054 struct ks_wlan_private *priv =
@@ -3293,6 +3251,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
3293{ 3251{
3294 int rc = 0; 3252 int rc = 0;
3295 struct iwreq *wrq = (struct iwreq *)rq; 3253 struct iwreq *wrq = (struct iwreq *)rq;
3254
3296 switch (cmd) { 3255 switch (cmd) {
3297 case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */ 3256 case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
3298 rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL); 3257 rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
@@ -3311,9 +3270,8 @@ struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
3311{ 3270{
3312 struct ks_wlan_private *priv = netdev_priv(dev); 3271 struct ks_wlan_private *priv = netdev_priv(dev);
3313 3272
3314 if (priv->dev_state < DEVICE_STATE_READY) { 3273 if (priv->dev_state < DEVICE_STATE_READY)
3315 return NULL; /* not finished initialize */ 3274 return NULL; /* not finished initialize */
3316 }
3317 3275
3318 return &priv->nstats; 3276 return &priv->nstats;
3319} 3277}
@@ -3323,6 +3281,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
3323{ 3281{
3324 struct ks_wlan_private *priv = netdev_priv(dev); 3282 struct ks_wlan_private *priv = netdev_priv(dev);
3325 struct sockaddr *mac_addr = (struct sockaddr *)addr; 3283 struct sockaddr *mac_addr = (struct sockaddr *)addr;
3284
3326 if (netif_running(dev)) 3285 if (netif_running(dev))
3327 return -EBUSY; 3286 return -EBUSY;
3328 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 3287 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
@@ -3330,10 +3289,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
3330 3289
3331 priv->mac_address_valid = 0; 3290 priv->mac_address_valid = 0;
3332 hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST); 3291 hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
3333 netdev_info(dev, 3292 netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
3334 "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
3335 priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
3336 priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
3337 return 0; 3293 return 0;
3338} 3294}
3339 3295
@@ -3344,9 +3300,8 @@ void ks_wlan_tx_timeout(struct net_device *dev)
3344 3300
3345 DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead, 3301 DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
3346 priv->tx_dev.qtail); 3302 priv->tx_dev.qtail);
3347 if (!netif_queue_stopped(dev)) { 3303 if (!netif_queue_stopped(dev))
3348 netif_stop_queue(dev); 3304 netif_stop_queue(dev);
3349 }
3350 priv->nstats.tx_errors++; 3305 priv->nstats.tx_errors++;
3351 netif_wake_queue(dev); 3306 netif_wake_queue(dev);
3352} 3307}
@@ -3375,9 +3330,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
3375 netif_trans_update(dev); 3330 netif_trans_update(dev);
3376 3331
3377 DPRINTK(4, "rc=%d\n", rc); 3332 DPRINTK(4, "rc=%d\n", rc);
3378 if (rc) { 3333 if (rc)
3379 rc = 0; 3334 rc = 0;
3380 }
3381 3335
3382 return rc; 3336 return rc;
3383} 3337}
@@ -3410,9 +3364,8 @@ void ks_wlan_set_multicast_list(struct net_device *dev)
3410 struct ks_wlan_private *priv = netdev_priv(dev); 3364 struct ks_wlan_private *priv = netdev_priv(dev);
3411 3365
3412 DPRINTK(4, "\n"); 3366 DPRINTK(4, "\n");
3413 if (priv->dev_state < DEVICE_STATE_READY) { 3367 if (priv->dev_state < DEVICE_STATE_READY)
3414 return; /* not finished initialize */ 3368 return; /* not finished initialize */
3415 }
3416 hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST); 3369 hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
3417} 3370}
3418 3371
@@ -3426,8 +3379,8 @@ int ks_wlan_open(struct net_device *dev)
3426 if (!priv->mac_address_valid) { 3379 if (!priv->mac_address_valid) {
3427 netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name); 3380 netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
3428 return -EBUSY; 3381 return -EBUSY;
3429 } else 3382 }
3430 netif_start_queue(dev); 3383 netif_start_queue(dev);
3431 3384
3432 return 0; 3385 return 0;
3433} 3386}
@@ -3474,9 +3427,8 @@ int ks_wlan_net_start(struct net_device *dev)
3474 3427
3475 /* phy information update timer */ 3428 /* phy information update timer */
3476 atomic_set(&update_phyinfo, 0); 3429 atomic_set(&update_phyinfo, 0);
3477 init_timer(&update_phyinfo_timer); 3430 setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
3478 update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout; 3431 (unsigned long)priv);
3479 update_phyinfo_timer.data = (unsigned long)priv;
3480 3432
3481 /* dummy address set */ 3433 /* dummy address set */
3482 memcpy(priv->eth_addr, dummy_addr, ETH_ALEN); 3434 memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index 78ae2b8fb7f3..2f535c08e172 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -14,10 +14,11 @@
14#include "michael_mic.h" 14#include "michael_mic.h"
15 15
16// Rotation functions on 32 bit values 16// Rotation functions on 32 bit values
17#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) ) 17#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
18#define ROR32( A, n ) ROL32( (A), 32-(n) ) 18#define ROR32(A, n) ROL32((A), 32-(n))
19// Convert from Byte[] to UInt32 in a portable way 19// Convert from Byte[] to UInt32 in a portable way
20#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24) 20#define getUInt32(A, B) ((uint32_t)(A[B+0] << 0) \
21 + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24))
21 22
22// Convert from UInt32 to Byte[] in a portable way 23// Convert from UInt32 to Byte[] in a portable way
23#define putUInt32(A, B, C) \ 24#define putUInt32(A, B, C) \
@@ -48,21 +49,22 @@ void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
48} 49}
49 50
50#define MichaelBlockFunction(L, R) \ 51#define MichaelBlockFunction(L, R) \
51do{ \ 52do { \
52 R ^= ROL32( L, 17 ); \ 53 R ^= ROL32(L, 17); \
53 L += R; \ 54 L += R; \
54 R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \ 55 R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
55 L += R; \ 56 L += R; \
56 R ^= ROL32( L, 3 ); \ 57 R ^= ROL32(L, 3); \
57 L += R; \ 58 L += R; \
58 R ^= ROR32( L, 2 ); \ 59 R ^= ROR32(L, 2); \
59 L += R; \ 60 L += R; \
60}while(0) 61} while (0)
61 62
62static 63static
63void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes) 64void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
64{ 65{
65 int addlen; 66 int addlen;
67
66 if (Mic->nBytesInM) { 68 if (Mic->nBytesInM) {
67 addlen = 4 - Mic->nBytesInM; 69 addlen = 4 - Mic->nBytesInM;
68 if (addlen > nBytes) 70 if (addlen > nBytes)
@@ -96,7 +98,8 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
96static 98static
97void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst) 99void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
98{ 100{
99 uint8_t *data = Mic->M; 101 u8 *data = Mic->M;
102
100 switch (Mic->nBytesInM) { 103 switch (Mic->nBytesInM) {
101 case 0: 104 case 0:
102 Mic->L ^= 0x5a; 105 Mic->L ^= 0x5a;
@@ -122,11 +125,11 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
122 MichaelClear(Mic); 125 MichaelClear(Mic);
123} 126}
124 127
125void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key, 128void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
126 uint8_t *Data, int Len, uint8_t priority, 129 u8 *Data, int Len, u8 priority,
127 uint8_t *Result) 130 u8 *Result)
128{ 131{
129 uint8_t pad_data[4] = { priority, 0, 0, 0 }; 132 u8 pad_data[4] = { priority, 0, 0, 0 };
130 // Compute the MIC value 133 // Compute the MIC value
131 /* 134 /*
132 * IEEE802.11i page 47 135 * IEEE802.11i page 47
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index efaa21788fc7..248f849fc4a5 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -11,15 +11,15 @@
11 11
12/* MichelMIC routine define */ 12/* MichelMIC routine define */
13struct michel_mic_t { 13struct michel_mic_t {
14 uint32_t K0; // Key 14 u32 K0; // Key
15 uint32_t K1; // Key 15 u32 K1; // Key
16 uint32_t L; // Current state 16 u32 L; // Current state
17 uint32_t R; // Current state 17 u32 R; // Current state
18 uint8_t M[4]; // Message accumulator (single word) 18 u8 M[4]; // Message accumulator (single word)
19 int nBytesInM; // # bytes in M 19 int nBytesInM; // # bytes in M
20 uint8_t Result[8]; 20 u8 Result[8];
21}; 21};
22 22
23void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key, 23void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
24 uint8_t *Data, int Len, uint8_t priority, 24 u8 *Data, int Len, u8 priority,
25 uint8_t *Result); 25 u8 *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index be0675d8ff5e..1ea27c9e3708 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -53,7 +53,7 @@
53#define current_pid() (current->pid) 53#define current_pid() (current->pid)
54#define current_comm() (current->comm) 54#define current_comm() (current->comm)
55 55
56typedef __u32 cfs_cap_t; 56typedef u32 cfs_cap_t;
57 57
58#define CFS_CAP_CHOWN 0 58#define CFS_CAP_CHOWN 0
59#define CFS_CAP_DAC_OVERRIDE 1 59#define CFS_CAP_DAC_OVERRIDE 1
@@ -65,15 +65,15 @@ typedef __u32 cfs_cap_t;
65#define CFS_CAP_SYS_BOOT 23 65#define CFS_CAP_SYS_BOOT 23
66#define CFS_CAP_SYS_RESOURCE 24 66#define CFS_CAP_SYS_RESOURCE 24
67 67
68#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \ 68#define CFS_CAP_FS_MASK (BIT(CFS_CAP_CHOWN) | \
69 (1 << CFS_CAP_DAC_OVERRIDE) | \ 69 BIT(CFS_CAP_DAC_OVERRIDE) | \
70 (1 << CFS_CAP_DAC_READ_SEARCH) | \ 70 BIT(CFS_CAP_DAC_READ_SEARCH) | \
71 (1 << CFS_CAP_FOWNER) | \ 71 BIT(CFS_CAP_FOWNER) | \
72 (1 << CFS_CAP_FSETID) | \ 72 BIT(CFS_CAP_FSETID) | \
73 (1 << CFS_CAP_LINUX_IMMUTABLE) | \ 73 BIT(CFS_CAP_LINUX_IMMUTABLE) | \
74 (1 << CFS_CAP_SYS_ADMIN) | \ 74 BIT(CFS_CAP_SYS_ADMIN) | \
75 (1 << CFS_CAP_SYS_BOOT) | \ 75 BIT(CFS_CAP_SYS_BOOT) | \
76 (1 << CFS_CAP_SYS_RESOURCE)) 76 BIT(CFS_CAP_SYS_RESOURCE))
77 77
78void cfs_cap_raise(cfs_cap_t cap); 78void cfs_cap_raise(cfs_cap_t cap);
79void cfs_cap_lower(cfs_cap_t cap); 79void cfs_cap_lower(cfs_cap_t cap);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 3b92d38d37e2..cc2c0e97bb7e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -61,7 +61,7 @@
61sigset_t cfs_block_allsigs(void); 61sigset_t cfs_block_allsigs(void);
62sigset_t cfs_block_sigs(unsigned long sigs); 62sigset_t cfs_block_sigs(unsigned long sigs);
63sigset_t cfs_block_sigsinv(unsigned long sigs); 63sigset_t cfs_block_sigsinv(unsigned long sigs);
64void cfs_restore_sigs(sigset_t); 64void cfs_restore_sigs(sigset_t sigset);
65void cfs_clear_sigpending(void); 65void cfs_clear_sigpending(void);
66 66
67/* 67/*
@@ -71,7 +71,7 @@ void cfs_clear_sigpending(void);
71/* returns a random 32-bit integer */ 71/* returns a random 32-bit integer */
72unsigned int cfs_rand(void); 72unsigned int cfs_rand(void);
73/* seed the generator */ 73/* seed the generator */
74void cfs_srand(unsigned int, unsigned int); 74void cfs_srand(unsigned int seed1, unsigned int seed2);
75void cfs_get_random_bytes(void *buf, int size); 75void cfs_get_random_bytes(void *buf, int size);
76 76
77#include "libcfs_debug.h" 77#include "libcfs_debug.h"
@@ -125,7 +125,6 @@ extern struct miscdevice libcfs_dev;
125/** 125/**
126 * The path of debug log dump upcall script. 126 * The path of debug log dump upcall script.
127 */ 127 */
128extern char lnet_upcall[1024];
129extern char lnet_debug_log_upcall[1024]; 128extern char lnet_debug_log_upcall[1024];
130 129
131extern struct cfs_wi_sched *cfs_sched_rehash; 130extern struct cfs_wi_sched *cfs_sched_rehash;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 81d8079e3b5e..6d8752a368fa 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -92,7 +92,7 @@ struct cfs_cpt_table {
92 /* node mask */ 92 /* node mask */
93 nodemask_t ctb_nodemask; 93 nodemask_t ctb_nodemask;
94 /* version */ 94 /* version */
95 __u64 ctb_version; 95 u64 ctb_version;
96}; 96};
97 97
98static inline cpumask_t * 98static inline cpumask_t *
@@ -211,7 +211,7 @@ int cfs_cpu_ht_nsiblings(int cpu);
211 */ 211 */
212void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); 212void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
213/* 213/*
214 * destory per-cpu-partition variable 214 * destroy per-cpu-partition variable
215 */ 215 */
216void cfs_percpt_free(void *vars); 216void cfs_percpt_free(void *vars);
217int cfs_percpt_number(void *vars); 217int cfs_percpt_number(void *vars);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 02be7d7608a5..8f34c5ddc63e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -29,10 +29,12 @@
29#define _LIBCFS_CRYPTO_H 29#define _LIBCFS_CRYPTO_H
30 30
31struct cfs_crypto_hash_type { 31struct cfs_crypto_hash_type {
32 char *cht_name; /**< hash algorithm name, equal to 32 char *cht_name; /*< hash algorithm name, equal to
33 * format name for crypto api */ 33 * format name for crypto api
34 unsigned int cht_key; /**< init key by default (valid for 34 */
35 * 4 bytes context like crc32, adler */ 35 unsigned int cht_key; /*< init key by default (valid for
36 * 4 bytes context like crc32, adler
37 */
36 unsigned int cht_size; /**< hash digest size */ 38 unsigned int cht_size; /**< hash digest size */
37}; 39};
38 40
@@ -135,7 +137,7 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname)
135 enum cfs_crypto_hash_alg hash_alg; 137 enum cfs_crypto_hash_alg hash_alg;
136 138
137 for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++) 139 for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
138 if (strcmp(hash_types[hash_alg].cht_name, algname) == 0) 140 if (!strcmp(hash_types[hash_alg].cht_name, algname))
139 return hash_alg; 141 return hash_alg;
140 142
141 return CFS_HASH_ALG_UNKNOWN; 143 return CFS_HASH_ALG_UNKNOWN;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index bdbbe934584c..fedb46dff696 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -39,8 +39,8 @@ extern int cfs_fail_err;
39extern wait_queue_head_t cfs_race_waitq; 39extern wait_queue_head_t cfs_race_waitq;
40extern int cfs_race_state; 40extern int cfs_race_state;
41 41
42int __cfs_fail_check_set(__u32 id, __u32 value, int set); 42int __cfs_fail_check_set(u32 id, u32 value, int set);
43int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set); 43int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set);
44 44
45enum { 45enum {
46 CFS_FAIL_LOC_NOSET = 0, 46 CFS_FAIL_LOC_NOSET = 0,
@@ -55,11 +55,11 @@ enum {
55 55
56#define CFS_FAILED_BIT 30 56#define CFS_FAILED_BIT 30
57/* CFS_FAILED is 0x40000000 */ 57/* CFS_FAILED is 0x40000000 */
58#define CFS_FAILED (1 << CFS_FAILED_BIT) 58#define CFS_FAILED BIT(CFS_FAILED_BIT)
59 59
60#define CFS_FAIL_ONCE_BIT 31 60#define CFS_FAIL_ONCE_BIT 31
61/* CFS_FAIL_ONCE is 0x80000000 */ 61/* CFS_FAIL_ONCE is 0x80000000 */
62#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT) 62#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT)
63 63
64/* The following flags aren't made to be combined */ 64/* The following flags aren't made to be combined */
65#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */ 65#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
@@ -69,14 +69,14 @@ enum {
69 69
70#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */ 70#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
71 71
72static inline bool CFS_FAIL_PRECHECK(__u32 id) 72static inline bool CFS_FAIL_PRECHECK(u32 id)
73{ 73{
74 return cfs_fail_loc != 0 && 74 return cfs_fail_loc &&
75 ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) || 75 ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
76 (cfs_fail_loc & id & CFS_FAULT)); 76 (cfs_fail_loc & id & CFS_FAULT));
77} 77}
78 78
79static inline int cfs_fail_check_set(__u32 id, __u32 value, 79static inline int cfs_fail_check_set(u32 id, u32 value,
80 int set, int quiet) 80 int set, int quiet)
81{ 81{
82 int ret = 0; 82 int ret = 0;
@@ -103,28 +103,34 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value,
103#define CFS_FAIL_CHECK_QUIET(id) \ 103#define CFS_FAIL_CHECK_QUIET(id) \
104 cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1) 104 cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
105 105
106/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1, 106/*
107 * otherwise return 0 */ 107 * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
108 * otherwise return 0
109 */
108#define CFS_FAIL_CHECK_VALUE(id, value) \ 110#define CFS_FAIL_CHECK_VALUE(id, value) \
109 cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0) 111 cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
110#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \ 112#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
111 cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1) 113 cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
112 114
113/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1, 115/*
114 * otherwise return 0 */ 116 * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
117 * otherwise return 0
118 */
115#define CFS_FAIL_CHECK_ORSET(id, value) \ 119#define CFS_FAIL_CHECK_ORSET(id, value) \
116 cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0) 120 cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
117#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \ 121#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
118 cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1) 122 cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
119 123
120/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1, 124/*
121 * otherwise return 0 */ 125 * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
126 * otherwise return 0
127 */
122#define CFS_FAIL_CHECK_RESET(id, value) \ 128#define CFS_FAIL_CHECK_RESET(id, value) \
123 cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0) 129 cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
124#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \ 130#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
125 cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1) 131 cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
126 132
127static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) 133static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
128{ 134{
129 if (unlikely(CFS_FAIL_PRECHECK(id))) 135 if (unlikely(CFS_FAIL_PRECHECK(id)))
130 return __cfs_fail_timeout_set(id, value, ms, set); 136 return __cfs_fail_timeout_set(id, value, ms, set);
@@ -138,8 +144,10 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
138#define CFS_FAIL_TIMEOUT_MS(id, ms) \ 144#define CFS_FAIL_TIMEOUT_MS(id, ms) \
139 cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET) 145 cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
140 146
141/* If id hit cfs_fail_loc, cfs_fail_loc |= value and 147/*
142 * sleep seconds or milliseconds */ 148 * If id hit cfs_fail_loc, cfs_fail_loc |= value and
149 * sleep seconds or milliseconds
150 */
143#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \ 151#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
144 cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET) 152 cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
145 153
@@ -152,13 +160,14 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
152#define CFS_FAULT_CHECK(id) \ 160#define CFS_FAULT_CHECK(id) \
153 CFS_FAIL_CHECK(CFS_FAULT | (id)) 161 CFS_FAIL_CHECK(CFS_FAULT | (id))
154 162
155/* The idea here is to synchronise two threads to force a race. The 163/*
164 * The idea here is to synchronise two threads to force a race. The
156 * first thread that calls this with a matching fail_loc is put to 165 * first thread that calls this with a matching fail_loc is put to
157 * sleep. The next thread that calls with the same fail_loc wakes up 166 * sleep. The next thread that calls with the same fail_loc wakes up
158 * the first and continues. */ 167 * the first and continues.
159static inline void cfs_race(__u32 id) 168 */
169static inline void cfs_race(u32 id)
160{ 170{
161
162 if (CFS_FAIL_PRECHECK(id)) { 171 if (CFS_FAIL_PRECHECK(id)) {
163 if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) { 172 if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
164 int rc; 173 int rc;
@@ -166,7 +175,7 @@ static inline void cfs_race(__u32 id)
166 cfs_race_state = 0; 175 cfs_race_state = 0;
167 CERROR("cfs_race id %x sleeping\n", id); 176 CERROR("cfs_race id %x sleeping\n", id);
168 rc = wait_event_interruptible(cfs_race_waitq, 177 rc = wait_event_interruptible(cfs_race_waitq,
169 cfs_race_state != 0); 178 !!cfs_race_state);
170 CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc); 179 CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
171 } else { 180 } else {
172 CERROR("cfs_fail_race id %x waking\n", id); 181 CERROR("cfs_fail_race id %x waking\n", id);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 6949a1846635..0cc2fc465c1a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -57,8 +57,10 @@
57 57
58/** disable debug */ 58/** disable debug */
59#define CFS_HASH_DEBUG_NONE 0 59#define CFS_HASH_DEBUG_NONE 0
60/** record hash depth and output to console when it's too deep, 60/*
61 * computing overhead is low but consume more memory */ 61 * record hash depth and output to console when it's too deep,
62 * computing overhead is low but consume more memory
63 */
62#define CFS_HASH_DEBUG_1 1 64#define CFS_HASH_DEBUG_1 1
63/** expensive, check key validation */ 65/** expensive, check key validation */
64#define CFS_HASH_DEBUG_2 2 66#define CFS_HASH_DEBUG_2 2
@@ -87,8 +89,8 @@ union cfs_hash_lock {
87 */ 89 */
88struct cfs_hash_bucket { 90struct cfs_hash_bucket {
89 union cfs_hash_lock hsb_lock; /**< bucket lock */ 91 union cfs_hash_lock hsb_lock; /**< bucket lock */
90 __u32 hsb_count; /**< current entries */ 92 u32 hsb_count; /**< current entries */
91 __u32 hsb_version; /**< change version */ 93 u32 hsb_version; /**< change version */
92 unsigned int hsb_index; /**< index of bucket */ 94 unsigned int hsb_index; /**< index of bucket */
93 int hsb_depmax; /**< max depth on bucket */ 95 int hsb_depmax; /**< max depth on bucket */
94 long hsb_head[0]; /**< hash-head array */ 96 long hsb_head[0]; /**< hash-head array */
@@ -123,38 +125,40 @@ enum cfs_hash_tag {
123 * . Some functions will be disabled with this flag, i.e: 125 * . Some functions will be disabled with this flag, i.e:
124 * cfs_hash_for_each_empty, cfs_hash_rehash 126 * cfs_hash_for_each_empty, cfs_hash_rehash
125 */ 127 */
126 CFS_HASH_NO_LOCK = 1 << 0, 128 CFS_HASH_NO_LOCK = BIT(0),
127 /** no bucket lock, use one spinlock to protect the whole hash */ 129 /** no bucket lock, use one spinlock to protect the whole hash */
128 CFS_HASH_NO_BKTLOCK = 1 << 1, 130 CFS_HASH_NO_BKTLOCK = BIT(1),
129 /** rwlock to protect bucket */ 131 /** rwlock to protect bucket */
130 CFS_HASH_RW_BKTLOCK = 1 << 2, 132 CFS_HASH_RW_BKTLOCK = BIT(2),
131 /** spinlock to protect bucket */ 133 /** spinlock to protect bucket */
132 CFS_HASH_SPIN_BKTLOCK = 1 << 3, 134 CFS_HASH_SPIN_BKTLOCK = BIT(3),
133 /** always add new item to tail */ 135 /** always add new item to tail */
134 CFS_HASH_ADD_TAIL = 1 << 4, 136 CFS_HASH_ADD_TAIL = BIT(4),
135 /** hash-table doesn't have refcount on item */ 137 /** hash-table doesn't have refcount on item */
136 CFS_HASH_NO_ITEMREF = 1 << 5, 138 CFS_HASH_NO_ITEMREF = BIT(5),
137 /** big name for param-tree */ 139 /** big name for param-tree */
138 CFS_HASH_BIGNAME = 1 << 6, 140 CFS_HASH_BIGNAME = BIT(6),
139 /** track global count */ 141 /** track global count */
140 CFS_HASH_COUNTER = 1 << 7, 142 CFS_HASH_COUNTER = BIT(7),
141 /** rehash item by new key */ 143 /** rehash item by new key */
142 CFS_HASH_REHASH_KEY = 1 << 8, 144 CFS_HASH_REHASH_KEY = BIT(8),
143 /** Enable dynamic hash resizing */ 145 /** Enable dynamic hash resizing */
144 CFS_HASH_REHASH = 1 << 9, 146 CFS_HASH_REHASH = BIT(9),
145 /** can shrink hash-size */ 147 /** can shrink hash-size */
146 CFS_HASH_SHRINK = 1 << 10, 148 CFS_HASH_SHRINK = BIT(10),
147 /** assert hash is empty on exit */ 149 /** assert hash is empty on exit */
148 CFS_HASH_ASSERT_EMPTY = 1 << 11, 150 CFS_HASH_ASSERT_EMPTY = BIT(11),
149 /** record hlist depth */ 151 /** record hlist depth */
150 CFS_HASH_DEPTH = 1 << 12, 152 CFS_HASH_DEPTH = BIT(12),
151 /** 153 /**
152 * rehash is always scheduled in a different thread, so current 154 * rehash is always scheduled in a different thread, so current
153 * change on hash table is non-blocking 155 * change on hash table is non-blocking
154 */ 156 */
155 CFS_HASH_NBLK_CHANGE = 1 << 13, 157 CFS_HASH_NBLK_CHANGE = BIT(13),
156 /** NB, we typed hs_flags as __u16, please change it 158 /**
157 * if you need to extend >=16 flags */ 159 * NB, we typed hs_flags as u16, please change it
160 * if you need to extend >=16 flags
161 */
158}; 162};
159 163
160/** most used attributes */ 164/** most used attributes */
@@ -201,8 +205,10 @@ enum cfs_hash_tag {
201 */ 205 */
202 206
203struct cfs_hash { 207struct cfs_hash {
204 /** serialize with rehash, or serialize all operations if 208 /**
205 * the hash-table has CFS_HASH_NO_BKTLOCK */ 209 * serialize with rehash, or serialize all operations if
210 * the hash-table has CFS_HASH_NO_BKTLOCK
211 */
206 union cfs_hash_lock hs_lock; 212 union cfs_hash_lock hs_lock;
207 /** hash operations */ 213 /** hash operations */
208 struct cfs_hash_ops *hs_ops; 214 struct cfs_hash_ops *hs_ops;
@@ -215,31 +221,31 @@ struct cfs_hash {
215 /** total number of items on this hash-table */ 221 /** total number of items on this hash-table */
216 atomic_t hs_count; 222 atomic_t hs_count;
217 /** hash flags, see cfs_hash_tag for detail */ 223 /** hash flags, see cfs_hash_tag for detail */
218 __u16 hs_flags; 224 u16 hs_flags;
219 /** # of extra-bytes for bucket, for user saving extended attributes */ 225 /** # of extra-bytes for bucket, for user saving extended attributes */
220 __u16 hs_extra_bytes; 226 u16 hs_extra_bytes;
221 /** wants to iterate */ 227 /** wants to iterate */
222 __u8 hs_iterating; 228 u8 hs_iterating;
223 /** hash-table is dying */ 229 /** hash-table is dying */
224 __u8 hs_exiting; 230 u8 hs_exiting;
225 /** current hash bits */ 231 /** current hash bits */
226 __u8 hs_cur_bits; 232 u8 hs_cur_bits;
227 /** min hash bits */ 233 /** min hash bits */
228 __u8 hs_min_bits; 234 u8 hs_min_bits;
229 /** max hash bits */ 235 /** max hash bits */
230 __u8 hs_max_bits; 236 u8 hs_max_bits;
231 /** bits for rehash */ 237 /** bits for rehash */
232 __u8 hs_rehash_bits; 238 u8 hs_rehash_bits;
233 /** bits for each bucket */ 239 /** bits for each bucket */
234 __u8 hs_bkt_bits; 240 u8 hs_bkt_bits;
235 /** resize min threshold */ 241 /** resize min threshold */
236 __u16 hs_min_theta; 242 u16 hs_min_theta;
237 /** resize max threshold */ 243 /** resize max threshold */
238 __u16 hs_max_theta; 244 u16 hs_max_theta;
239 /** resize count */ 245 /** resize count */
240 __u32 hs_rehash_count; 246 u32 hs_rehash_count;
241 /** # of iterators (caller of cfs_hash_for_each_*) */ 247 /** # of iterators (caller of cfs_hash_for_each_*) */
242 __u32 hs_iterators; 248 u32 hs_iterators;
243 /** rehash workitem */ 249 /** rehash workitem */
244 struct cfs_workitem hs_rehash_wi; 250 struct cfs_workitem hs_rehash_wi;
245 /** refcount on this hash table */ 251 /** refcount on this hash table */
@@ -291,8 +297,8 @@ struct cfs_hash_hlist_ops {
291 297
292struct cfs_hash_ops { 298struct cfs_hash_ops {
293 /** return hashed value from @key */ 299 /** return hashed value from @key */
294 unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, 300 unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
295 unsigned mask); 301 unsigned int mask);
296 /** return key address of @hnode */ 302 /** return key address of @hnode */
297 void * (*hs_key)(struct hlist_node *hnode); 303 void * (*hs_key)(struct hlist_node *hnode);
298 /** copy key from @hnode to @key */ 304 /** copy key from @hnode to @key */
@@ -317,110 +323,112 @@ struct cfs_hash_ops {
317 323
318/** total number of buckets in @hs */ 324/** total number of buckets in @hs */
319#define CFS_HASH_NBKT(hs) \ 325#define CFS_HASH_NBKT(hs) \
320 (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits)) 326 BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits)
321 327
322/** total number of buckets in @hs while rehashing */ 328/** total number of buckets in @hs while rehashing */
323#define CFS_HASH_RH_NBKT(hs) \ 329#define CFS_HASH_RH_NBKT(hs) \
324 (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)) 330 BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)
325 331
326/** number of hlist for in bucket */ 332/** number of hlist for in bucket */
327#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits) 333#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits)
328 334
329/** total number of hlist in @hs */ 335/** total number of hlist in @hs */
330#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits) 336#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits)
331 337
332/** total number of hlist in @hs while rehashing */ 338/** total number of hlist in @hs while rehashing */
333#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits) 339#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits)
334 340
335static inline int 341static inline int
336cfs_hash_with_no_lock(struct cfs_hash *hs) 342cfs_hash_with_no_lock(struct cfs_hash *hs)
337{ 343{
338 /* caller will serialize all operations for this hash-table */ 344 /* caller will serialize all operations for this hash-table */
339 return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0; 345 return hs->hs_flags & CFS_HASH_NO_LOCK;
340} 346}
341 347
342static inline int 348static inline int
343cfs_hash_with_no_bktlock(struct cfs_hash *hs) 349cfs_hash_with_no_bktlock(struct cfs_hash *hs)
344{ 350{
345 /* no bucket lock, one single lock to protect the hash-table */ 351 /* no bucket lock, one single lock to protect the hash-table */
346 return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0; 352 return hs->hs_flags & CFS_HASH_NO_BKTLOCK;
347} 353}
348 354
349static inline int 355static inline int
350cfs_hash_with_rw_bktlock(struct cfs_hash *hs) 356cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
351{ 357{
352 /* rwlock to protect hash bucket */ 358 /* rwlock to protect hash bucket */
353 return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0; 359 return hs->hs_flags & CFS_HASH_RW_BKTLOCK;
354} 360}
355 361
356static inline int 362static inline int
357cfs_hash_with_spin_bktlock(struct cfs_hash *hs) 363cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
358{ 364{
359 /* spinlock to protect hash bucket */ 365 /* spinlock to protect hash bucket */
360 return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0; 366 return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK;
361} 367}
362 368
363static inline int 369static inline int
364cfs_hash_with_add_tail(struct cfs_hash *hs) 370cfs_hash_with_add_tail(struct cfs_hash *hs)
365{ 371{
366 return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; 372 return hs->hs_flags & CFS_HASH_ADD_TAIL;
367} 373}
368 374
369static inline int 375static inline int
370cfs_hash_with_no_itemref(struct cfs_hash *hs) 376cfs_hash_with_no_itemref(struct cfs_hash *hs)
371{ 377{
372 /* hash-table doesn't keep refcount on item, 378 /*
379 * hash-table doesn't keep refcount on item,
373 * item can't be removed from hash unless it's 380 * item can't be removed from hash unless it's
374 * ZERO refcount */ 381 * ZERO refcount
375 return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0; 382 */
383 return hs->hs_flags & CFS_HASH_NO_ITEMREF;
376} 384}
377 385
378static inline int 386static inline int
379cfs_hash_with_bigname(struct cfs_hash *hs) 387cfs_hash_with_bigname(struct cfs_hash *hs)
380{ 388{
381 return (hs->hs_flags & CFS_HASH_BIGNAME) != 0; 389 return hs->hs_flags & CFS_HASH_BIGNAME;
382} 390}
383 391
384static inline int 392static inline int
385cfs_hash_with_counter(struct cfs_hash *hs) 393cfs_hash_with_counter(struct cfs_hash *hs)
386{ 394{
387 return (hs->hs_flags & CFS_HASH_COUNTER) != 0; 395 return hs->hs_flags & CFS_HASH_COUNTER;
388} 396}
389 397
390static inline int 398static inline int
391cfs_hash_with_rehash(struct cfs_hash *hs) 399cfs_hash_with_rehash(struct cfs_hash *hs)
392{ 400{
393 return (hs->hs_flags & CFS_HASH_REHASH) != 0; 401 return hs->hs_flags & CFS_HASH_REHASH;
394} 402}
395 403
396static inline int 404static inline int
397cfs_hash_with_rehash_key(struct cfs_hash *hs) 405cfs_hash_with_rehash_key(struct cfs_hash *hs)
398{ 406{
399 return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0; 407 return hs->hs_flags & CFS_HASH_REHASH_KEY;
400} 408}
401 409
402static inline int 410static inline int
403cfs_hash_with_shrink(struct cfs_hash *hs) 411cfs_hash_with_shrink(struct cfs_hash *hs)
404{ 412{
405 return (hs->hs_flags & CFS_HASH_SHRINK) != 0; 413 return hs->hs_flags & CFS_HASH_SHRINK;
406} 414}
407 415
408static inline int 416static inline int
409cfs_hash_with_assert_empty(struct cfs_hash *hs) 417cfs_hash_with_assert_empty(struct cfs_hash *hs)
410{ 418{
411 return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0; 419 return hs->hs_flags & CFS_HASH_ASSERT_EMPTY;
412} 420}
413 421
414static inline int 422static inline int
415cfs_hash_with_depth(struct cfs_hash *hs) 423cfs_hash_with_depth(struct cfs_hash *hs)
416{ 424{
417 return (hs->hs_flags & CFS_HASH_DEPTH) != 0; 425 return hs->hs_flags & CFS_HASH_DEPTH;
418} 426}
419 427
420static inline int 428static inline int
421cfs_hash_with_nblk_change(struct cfs_hash *hs) 429cfs_hash_with_nblk_change(struct cfs_hash *hs)
422{ 430{
423 return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0; 431 return hs->hs_flags & CFS_HASH_NBLK_CHANGE;
424} 432}
425 433
426static inline int 434static inline int
@@ -434,14 +442,14 @@ static inline int
434cfs_hash_is_rehashing(struct cfs_hash *hs) 442cfs_hash_is_rehashing(struct cfs_hash *hs)
435{ 443{
436 /* rehash is launched */ 444 /* rehash is launched */
437 return hs->hs_rehash_bits != 0; 445 return !!hs->hs_rehash_bits;
438} 446}
439 447
440static inline int 448static inline int
441cfs_hash_is_iterating(struct cfs_hash *hs) 449cfs_hash_is_iterating(struct cfs_hash *hs)
442{ 450{
443 /* someone is calling cfs_hash_for_each_* */ 451 /* someone is calling cfs_hash_for_each_* */
444 return hs->hs_iterating || hs->hs_iterators != 0; 452 return hs->hs_iterating || hs->hs_iterators;
445} 453}
446 454
447static inline int 455static inline int
@@ -453,7 +461,7 @@ cfs_hash_bkt_size(struct cfs_hash *hs)
453} 461}
454 462
455static inline unsigned 463static inline unsigned
456cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask) 464cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
457{ 465{
458 return hs->hs_ops->hs_hash(hs, key, mask); 466 return hs->hs_ops->hs_hash(hs, key, mask);
459} 467}
@@ -562,7 +570,7 @@ cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
562} 570}
563 571
564static inline void 572static inline void
565cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index, 573cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
566 struct cfs_hash_bd *bd) 574 struct cfs_hash_bd *bd)
567{ 575{
568 bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; 576 bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
@@ -576,14 +584,14 @@ cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
576 cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; 584 cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
577} 585}
578 586
579static inline __u32 587static inline u32
580cfs_hash_bd_version_get(struct cfs_hash_bd *bd) 588cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
581{ 589{
582 /* need hold cfs_hash_bd_lock */ 590 /* need hold cfs_hash_bd_lock */
583 return bd->bd_bucket->hsb_version; 591 return bd->bd_bucket->hsb_version;
584} 592}
585 593
586static inline __u32 594static inline u32
587cfs_hash_bd_count_get(struct cfs_hash_bd *bd) 595cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
588{ 596{
589 /* need hold cfs_hash_bd_lock */ 597 /* need hold cfs_hash_bd_lock */
@@ -669,10 +677,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
669 677
670/* Hash init/cleanup functions */ 678/* Hash init/cleanup functions */
671struct cfs_hash * 679struct cfs_hash *
672cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, 680cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
673 unsigned bkt_bits, unsigned extra_bytes, 681 unsigned int bkt_bits, unsigned int extra_bytes,
674 unsigned min_theta, unsigned max_theta, 682 unsigned int min_theta, unsigned int max_theta,
675 struct cfs_hash_ops *ops, unsigned flags); 683 struct cfs_hash_ops *ops, unsigned int flags);
676 684
677struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs); 685struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
678void cfs_hash_putref(struct cfs_hash *hs); 686void cfs_hash_putref(struct cfs_hash *hs);
@@ -700,27 +708,28 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
700void * 708void *
701cfs_hash_lookup(struct cfs_hash *hs, const void *key); 709cfs_hash_lookup(struct cfs_hash *hs, const void *key);
702void 710void
703cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); 711cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data);
704void 712void
705cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); 713cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
714 void *data);
706int 715int
707cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t, 716cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
708 void *data); 717 void *data, int start);
709int 718int
710cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t, 719cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
711 void *data); 720 void *data);
712void 721void
713cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, 722cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
714 cfs_hash_for_each_cb_t, void *data); 723 cfs_hash_for_each_cb_t cb, void *data);
715typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); 724typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
716void 725void
717cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data); 726cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data);
718 727
719void 728void
720cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, 729cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
721 cfs_hash_for_each_cb_t, void *data); 730 cfs_hash_for_each_cb_t cb, void *data);
722int cfs_hash_is_empty(struct cfs_hash *hs); 731int cfs_hash_is_empty(struct cfs_hash *hs);
723__u64 cfs_hash_size_get(struct cfs_hash *hs); 732u64 cfs_hash_size_get(struct cfs_hash *hs);
724 733
725/* 734/*
726 * Rehash - Theta is calculated to be the average chained 735 * Rehash - Theta is calculated to be the average chained
@@ -766,8 +775,8 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
766#endif /* CFS_HASH_DEBUG_LEVEL */ 775#endif /* CFS_HASH_DEBUG_LEVEL */
767 776
768#define CFS_HASH_THETA_BITS 10 777#define CFS_HASH_THETA_BITS 10
769#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1)) 778#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1)
770#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1)) 779#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1)
771 780
772/* Return integer component of theta */ 781/* Return integer component of theta */
773static inline int __cfs_hash_theta_int(int theta) 782static inline int __cfs_hash_theta_int(int theta)
@@ -792,8 +801,8 @@ static inline void
792__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max) 801__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
793{ 802{
794 LASSERT(min < max); 803 LASSERT(min < max);
795 hs->hs_min_theta = (__u16)min; 804 hs->hs_min_theta = (u16)min;
796 hs->hs_max_theta = (__u16)max; 805 hs->hs_max_theta = (u16)max;
797} 806}
798 807
799/* Generic debug formatting routines mainly for proc handler */ 808/* Generic debug formatting routines mainly for proc handler */
@@ -805,11 +814,11 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
805 * Generic djb2 hash algorithm for character arrays. 814 * Generic djb2 hash algorithm for character arrays.
806 */ 815 */
807static inline unsigned 816static inline unsigned
808cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask) 817cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
809{ 818{
810 unsigned i, hash = 5381; 819 unsigned int i, hash = 5381;
811 820
812 LASSERT(key != NULL); 821 LASSERT(key);
813 822
814 for (i = 0; i < size; i++) 823 for (i = 0; i < size; i++)
815 hash = hash * 33 + ((char *)key)[i]; 824 hash = hash * 33 + ((char *)key)[i];
@@ -821,7 +830,7 @@ cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
821 * Generic u32 hash algorithm. 830 * Generic u32 hash algorithm.
822 */ 831 */
823static inline unsigned 832static inline unsigned
824cfs_hash_u32_hash(const __u32 key, unsigned mask) 833cfs_hash_u32_hash(const u32 key, unsigned int mask)
825{ 834{
826 return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask); 835 return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
827} 836}
@@ -830,9 +839,9 @@ cfs_hash_u32_hash(const __u32 key, unsigned mask)
830 * Generic u64 hash algorithm. 839 * Generic u64 hash algorithm.
831 */ 840 */
832static inline unsigned 841static inline unsigned
833cfs_hash_u64_hash(const __u64 key, unsigned mask) 842cfs_hash_u64_hash(const u64 key, unsigned int mask)
834{ 843{
835 return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask); 844 return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
836} 845}
837 846
838/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */ 847/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index e0e1a5d0949d..aab15d8112a4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -75,7 +75,7 @@ do { \
75 75
76#define KLASSERT(e) LASSERT(e) 76#define KLASSERT(e) LASSERT(e)
77 77
78void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *); 78void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
79 79
80#define LBUG() \ 80#define LBUG() \
81do { \ 81do { \
@@ -96,7 +96,7 @@ do { \
96 96
97#define LIBCFS_ALLOC_POST(ptr, size) \ 97#define LIBCFS_ALLOC_POST(ptr, size) \
98do { \ 98do { \
99 if (unlikely((ptr) == NULL)) { \ 99 if (unlikely(!(ptr))) { \
100 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ 100 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
101 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \ 101 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
102 } else { \ 102 } else { \
@@ -147,7 +147,7 @@ do { \
147 147
148#define LIBCFS_FREE(ptr, size) \ 148#define LIBCFS_FREE(ptr, size) \
149do { \ 149do { \
150 if (unlikely((ptr) == NULL)) { \ 150 if (unlikely(!(ptr))) { \
151 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ 151 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
152 "%s:%d\n", (int)(size), __FILE__, __LINE__); \ 152 "%s:%d\n", (int)(size), __FILE__, __LINE__); \
153 break; \ 153 break; \
@@ -169,8 +169,6 @@ do { \
169#define ntohs(x) ___ntohs(x) 169#define ntohs(x) ___ntohs(x)
170#endif 170#endif
171 171
172void libcfs_run_upcall(char **argv);
173void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
174void libcfs_debug_dumplog(void); 172void libcfs_debug_dumplog(void);
175int libcfs_debug_init(unsigned long bufsize); 173int libcfs_debug_init(unsigned long bufsize);
176int libcfs_debug_cleanup(void); 174int libcfs_debug_cleanup(void);
@@ -280,7 +278,7 @@ do { \
280#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr))) 278#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
281 279
282/** Compile-time assertion. 280/** Compile-time assertion.
283 281 *
284 * Check an invariant described by a constant expression at compile time by 282 * Check an invariant described by a constant expression at compile time by
285 * forcing a compiler error if it does not hold. \a cond must be a constant 283 * forcing a compiler error if it does not hold. \a cond must be a constant
286 * expression as defined by the ISO C Standard: 284 * expression as defined by the ISO C Standard:
@@ -306,7 +304,8 @@ do { \
306/* -------------------------------------------------------------------- 304/* --------------------------------------------------------------------
307 * Light-weight trace 305 * Light-weight trace
308 * Support for temporary event tracing with minimal Heisenberg effect. 306 * Support for temporary event tracing with minimal Heisenberg effect.
309 * -------------------------------------------------------------------- */ 307 * --------------------------------------------------------------------
308 */
310 309
311#define MKSTR(ptr) ((ptr)) ? (ptr) : "" 310#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
312 311
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 0ee60ff336f2..41795d9b3b9b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -62,9 +62,9 @@ struct cfs_range_expr {
62 * Link to cfs_expr_list::el_exprs. 62 * Link to cfs_expr_list::el_exprs.
63 */ 63 */
64 struct list_head re_link; 64 struct list_head re_link;
65 __u32 re_lo; 65 u32 re_lo;
66 __u32 re_hi; 66 u32 re_hi;
67 __u32 re_stride; 67 u32 re_stride;
68}; 68};
69 69
70struct cfs_expr_list { 70struct cfs_expr_list {
@@ -74,24 +74,26 @@ struct cfs_expr_list {
74 74
75char *cfs_trimwhite(char *str); 75char *cfs_trimwhite(char *str);
76int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res); 76int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
77int cfs_str2num_check(char *str, int nob, unsigned *num, 77int cfs_str2num_check(char *str, int nob, unsigned int *num,
78 unsigned min, unsigned max); 78 unsigned int min, unsigned int max);
79int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list); 79int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list);
80int cfs_expr_list_print(char *buffer, int count, 80int cfs_expr_list_print(char *buffer, int count,
81 struct cfs_expr_list *expr_list); 81 struct cfs_expr_list *expr_list);
82int cfs_expr_list_values(struct cfs_expr_list *expr_list, 82int cfs_expr_list_values(struct cfs_expr_list *expr_list,
83 int max, __u32 **values); 83 int max, u32 **values);
84static inline void 84static inline void
85cfs_expr_list_values_free(__u32 *values, int num) 85cfs_expr_list_values_free(u32 *values, int num)
86{ 86{
87 /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed 87 /*
88 * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
88 * by OBD_FREE() if it's called by module other than libcfs & LNet, 89 * by OBD_FREE() if it's called by module other than libcfs & LNet,
89 * otherwise we will see fake memory leak */ 90 * otherwise we will see fake memory leak
91 */
90 LIBCFS_FREE(values, num * sizeof(values[0])); 92 LIBCFS_FREE(values, num * sizeof(values[0]));
91} 93}
92 94
93void cfs_expr_list_free(struct cfs_expr_list *expr_list); 95void cfs_expr_list_free(struct cfs_expr_list *expr_list);
94int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, 96int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
95 struct cfs_expr_list **elpp); 97 struct cfs_expr_list **elpp);
96void cfs_expr_list_free_list(struct list_head *list); 98void cfs_expr_list_free_list(struct list_head *list);
97 99
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index a7e1340e69a1..2accd9a85472 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -62,9 +62,9 @@
62 62
63struct cfs_wi_sched; 63struct cfs_wi_sched;
64 64
65void cfs_wi_sched_destroy(struct cfs_wi_sched *); 65void cfs_wi_sched_destroy(struct cfs_wi_sched *sched);
66int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt, 66int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
67 int nthrs, struct cfs_wi_sched **); 67 int nthrs, struct cfs_wi_sched **sched_pp);
68 68
69struct cfs_workitem; 69struct cfs_workitem;
70 70
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index f63cb47bc309..dd0cd0442b86 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -52,17 +52,17 @@ struct cfs_cpu_partition {
52 /* nodes mask for this partition */ 52 /* nodes mask for this partition */
53 nodemask_t *cpt_nodemask; 53 nodemask_t *cpt_nodemask;
54 /* spread rotor for NUMA allocator */ 54 /* spread rotor for NUMA allocator */
55 unsigned cpt_spread_rotor; 55 unsigned int cpt_spread_rotor;
56}; 56};
57 57
58/** descriptor for CPU partitions */ 58/** descriptor for CPU partitions */
59struct cfs_cpt_table { 59struct cfs_cpt_table {
60 /* version, reserved for hotplug */ 60 /* version, reserved for hotplug */
61 unsigned ctb_version; 61 unsigned int ctb_version;
62 /* spread rotor for NUMA allocator */ 62 /* spread rotor for NUMA allocator */
63 unsigned ctb_spread_rotor; 63 unsigned int ctb_spread_rotor;
64 /* # of CPU partitions */ 64 /* # of CPU partitions */
65 unsigned ctb_nparts; 65 unsigned int ctb_nparts;
66 /* partitions tables */ 66 /* partitions tables */
67 struct cfs_cpu_partition *ctb_parts; 67 struct cfs_cpu_partition *ctb_parts;
68 /* shadow HW CPU to CPU partition ID */ 68 /* shadow HW CPU to CPU partition ID */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index b646acd1f7e7..709e1ce98d8d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -76,23 +76,23 @@ static inline long cfs_duration_sec(long d)
76 76
77#define cfs_time_current_64 get_jiffies_64 77#define cfs_time_current_64 get_jiffies_64
78 78
79static inline __u64 cfs_time_add_64(__u64 t, __u64 d) 79static inline u64 cfs_time_add_64(u64 t, u64 d)
80{ 80{
81 return t + d; 81 return t + d;
82} 82}
83 83
84static inline __u64 cfs_time_shift_64(int seconds) 84static inline u64 cfs_time_shift_64(int seconds)
85{ 85{
86 return cfs_time_add_64(cfs_time_current_64(), 86 return cfs_time_add_64(cfs_time_current_64(),
87 cfs_time_seconds(seconds)); 87 cfs_time_seconds(seconds));
88} 88}
89 89
90static inline int cfs_time_before_64(__u64 t1, __u64 t2) 90static inline int cfs_time_before_64(u64 t1, u64 t2)
91{ 91{
92 return (__s64)t2 - (__s64)t1 > 0; 92 return (__s64)t2 - (__s64)t1 > 0;
93} 93}
94 94
95static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2) 95static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
96{ 96{
97 return (__s64)t2 - (__s64)t1 >= 0; 97 return (__s64)t2 - (__s64)t1 >= 0;
98} 98}
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index 417044552d3f..8a84888635ff 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -244,7 +244,7 @@ typedef struct {
244 int lstio_ses_timeout; /* IN: session timeout */ 244 int lstio_ses_timeout; /* IN: session timeout */
245 int lstio_ses_force; /* IN: force create ? */ 245 int lstio_ses_force; /* IN: force create ? */
246 /** IN: session features */ 246 /** IN: session features */
247 unsigned lstio_ses_feats; 247 unsigned int lstio_ses_feats;
248 lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ 248 lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
249 int lstio_ses_nmlen; /* IN: name length */ 249 int lstio_ses_nmlen; /* IN: name length */
250 char __user *lstio_ses_namep; /* IN: session name */ 250 char __user *lstio_ses_namep; /* IN: session name */
@@ -255,7 +255,7 @@ typedef struct {
255 lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ 255 lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
256 int __user *lstio_ses_keyp; /* OUT: local key */ 256 int __user *lstio_ses_keyp; /* OUT: local key */
257 /** OUT: session features */ 257 /** OUT: session features */
258 unsigned __user *lstio_ses_featp; 258 unsigned int __user *lstio_ses_featp;
259 lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */ 259 lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */
260 int lstio_ses_nmlen; /* IN: name length */ 260 int lstio_ses_nmlen; /* IN: name length */
261 char __user *lstio_ses_namep; /* OUT: session name */ 261 char __user *lstio_ses_namep; /* OUT: session name */
@@ -328,7 +328,7 @@ typedef struct {
328 char __user *lstio_grp_namep; /* IN: group name */ 328 char __user *lstio_grp_namep; /* IN: group name */
329 int lstio_grp_count; /* IN: # of nodes */ 329 int lstio_grp_count; /* IN: # of nodes */
330 /** OUT: session features */ 330 /** OUT: session features */
331 unsigned __user *lstio_grp_featp; 331 unsigned int __user *lstio_grp_featp;
332 lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */ 332 lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */
333 struct list_head __user *lstio_grp_resultp; /* OUT: list head of 333 struct list_head __user *lstio_grp_resultp; /* OUT: list head of
334 result buffer */ 334 result buffer */
@@ -490,6 +490,8 @@ typedef struct {
490 int blk_size; /* size (bytes) */ 490 int blk_size; /* size (bytes) */
491 int blk_time; /* time of running the test*/ 491 int blk_time; /* time of running the test*/
492 int blk_flags; /* reserved flags */ 492 int blk_flags; /* reserved flags */
493 int blk_cli_off; /* bulk offset on client */
494 int blk_srv_off; /* reserved: bulk offset on server */
493} lst_test_bulk_param_t; 495} lst_test_bulk_param_t;
494 496
495typedef struct { 497typedef struct {
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 9e8802181452..7f761b327166 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1489,7 +1489,7 @@ out_fpo:
1489static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, 1489static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
1490 struct list_head *zombies) 1490 struct list_head *zombies)
1491{ 1491{
1492 if (!fps->fps_net) /* intialized? */ 1492 if (!fps->fps_net) /* initialized? */
1493 return; 1493 return;
1494 1494
1495 spin_lock(&fps->fps_lock); 1495 spin_lock(&fps->fps_lock);
@@ -1637,7 +1637,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1637{ 1637{
1638 __u64 *pages = tx->tx_pages; 1638 __u64 *pages = tx->tx_pages;
1639 bool is_rx = (rd != tx->tx_rd); 1639 bool is_rx = (rd != tx->tx_rd);
1640 bool tx_pages_mapped = 0; 1640 bool tx_pages_mapped = false;
1641 struct kib_fmr_pool *fpo; 1641 struct kib_fmr_pool *fpo;
1642 int npages = 0; 1642 int npages = 0;
1643 __u64 version; 1643 __u64 version;
@@ -1812,7 +1812,7 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
1812 1812
1813static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) 1813static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
1814{ 1814{
1815 if (!ps->ps_net) /* intialized? */ 1815 if (!ps->ps_net) /* initialized? */
1816 return; 1816 return;
1817 1817
1818 spin_lock(&ps->ps_lock); 1818 spin_lock(&ps->ps_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de8888149..c7917abf9944 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1912,12 +1912,12 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error)
1912 libcfs_nid2str(peer->ibp_nid)); 1912 libcfs_nid2str(peer->ibp_nid));
1913 } else { 1913 } else {
1914 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", 1914 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
1915 libcfs_nid2str(peer->ibp_nid), error, 1915 libcfs_nid2str(peer->ibp_nid), error,
1916 list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", 1916 list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1917 list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", 1917 list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
1918 list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", 1918 list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
1919 list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", 1919 list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
1920 list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); 1920 list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1921 } 1921 }
1922 1922
1923 dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev; 1923 dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -2643,7 +2643,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
2643 if (incarnation) 2643 if (incarnation)
2644 peer->ibp_incarnation = incarnation; 2644 peer->ibp_incarnation = incarnation;
2645out: 2645out:
2646 write_unlock_irqrestore(glock, flags); 2646 write_unlock_irqrestore(glock, flags);
2647 2647
2648 CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", 2648 CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2649 libcfs_nid2str(peer->ibp_nid), 2649 libcfs_nid2str(peer->ibp_nid),
@@ -2651,7 +2651,7 @@ out:
2651 reason, IBLND_MSG_VERSION, version, msg_size, 2651 reason, IBLND_MSG_VERSION, version, msg_size,
2652 conn->ibc_queue_depth, queue_dep, 2652 conn->ibc_queue_depth, queue_dep,
2653 conn->ibc_max_frags, frag_num); 2653 conn->ibc_max_frags, frag_num);
2654 /** 2654 /**
2655 * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer 2655 * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
2656 * while destroying the zombie 2656 * while destroying the zombie
2657 */ 2657 */
@@ -2976,7 +2976,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2976 case RDMA_CM_EVENT_ADDR_ERROR: 2976 case RDMA_CM_EVENT_ADDR_ERROR:
2977 peer = (struct kib_peer *)cmid->context; 2977 peer = (struct kib_peer *)cmid->context;
2978 CNETERR("%s: ADDR ERROR %d\n", 2978 CNETERR("%s: ADDR ERROR %d\n",
2979 libcfs_nid2str(peer->ibp_nid), event->status); 2979 libcfs_nid2str(peer->ibp_nid), event->status);
2980 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); 2980 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2981 kiblnd_peer_decref(peer); 2981 kiblnd_peer_decref(peer);
2982 return -EHOSTUNREACH; /* rc destroys cmid */ 2982 return -EHOSTUNREACH; /* rc destroys cmid */
@@ -3021,7 +3021,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3021 return kiblnd_active_connect(cmid); 3021 return kiblnd_active_connect(cmid);
3022 3022
3023 CNETERR("Can't resolve route for %s: %d\n", 3023 CNETERR("Can't resolve route for %s: %d\n",
3024 libcfs_nid2str(peer->ibp_nid), event->status); 3024 libcfs_nid2str(peer->ibp_nid), event->status);
3025 kiblnd_peer_connect_failed(peer, 1, event->status); 3025 kiblnd_peer_connect_failed(peer, 1, event->status);
3026 kiblnd_peer_decref(peer); 3026 kiblnd_peer_decref(peer);
3027 return event->status; /* rc destroys cmid */ 3027 return event->status; /* rc destroys cmid */
@@ -3031,7 +3031,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3031 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || 3031 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3032 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); 3032 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3033 CNETERR("%s: UNREACHABLE %d\n", 3033 CNETERR("%s: UNREACHABLE %d\n",
3034 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); 3034 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3035 kiblnd_connreq_done(conn, -ENETDOWN); 3035 kiblnd_connreq_done(conn, -ENETDOWN);
3036 kiblnd_conn_decref(conn); 3036 kiblnd_conn_decref(conn);
3037 return 0; 3037 return 0;
@@ -3269,14 +3269,14 @@ kiblnd_disconnect_conn(struct kib_conn *conn)
3269#define KIB_RECONN_HIGH_RACE 10 3269#define KIB_RECONN_HIGH_RACE 10
3270/** 3270/**
3271 * Allow connd to take a break and handle other things after consecutive 3271 * Allow connd to take a break and handle other things after consecutive
3272 * reconnection attemps. 3272 * reconnection attempts.
3273 */ 3273 */
3274#define KIB_RECONN_BREAK 100 3274#define KIB_RECONN_BREAK 100
3275 3275
3276int 3276int
3277kiblnd_connd(void *arg) 3277kiblnd_connd(void *arg)
3278{ 3278{
3279 spinlock_t *lock= &kiblnd_data.kib_connd_lock; 3279 spinlock_t *lock = &kiblnd_data.kib_connd_lock;
3280 wait_queue_t wait; 3280 wait_queue_t wait;
3281 unsigned long flags; 3281 unsigned long flags;
3282 struct kib_conn *conn; 3282 struct kib_conn *conn;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cbc9a9c5385f..b74cf635faee 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
96} 96}
97 97
98static int 98static int
99ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id) 99ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
100 lnet_process_id_t id)
100{ 101{
101 int cpt = lnet_cpt_of_nid(id.nid); 102 int cpt = lnet_cpt_of_nid(id.nid);
102 struct ksock_net *net = ni->ni_data; 103 struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
319} 320}
320 321
321static void 322static void
322ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn) 323ksocknal_associate_route_conn_locked(struct ksock_route *route,
324 struct ksock_conn *conn)
323{ 325{
324 struct ksock_peer *peer = route->ksnr_peer; 326 struct ksock_peer *peer = route->ksnr_peer;
325 int type = conn->ksnc_type; 327 int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
821 if (k < peer->ksnp_n_passive_ips) /* using it already */ 823 if (k < peer->ksnp_n_passive_ips) /* using it already */
822 continue; 824 continue;
823 825
824 k = ksocknal_match_peerip(iface, peerips, n_peerips); 826 k = ksocknal_match_peerip(iface, peerips,
827 n_peerips);
825 xor = ip ^ peerips[k]; 828 xor = ip ^ peerips[k];
826 this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0; 829 this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
827 830
@@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
1302 1305
1303 /* Take packets blocking for this connection. */ 1306 /* Take packets blocking for this connection. */
1304 list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) { 1307 list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1305 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO) 1308 int match = conn->ksnc_proto->pro_match_tx(conn, tx,
1306 continue; 1309 tx->tx_nonblk);
1310
1311 if (match == SOCKNAL_MATCH_NO)
1312 continue;
1307 1313
1308 list_del(&tx->tx_list); 1314 list_del(&tx->tx_list);
1309 ksocknal_queue_tx_locked(tx, conn); 1315 ksocknal_queue_tx_locked(tx, conn);
@@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1493 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); 1499 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1494 } 1500 }
1495 1501
1496 peer->ksnp_proto = NULL; /* renegotiate protocol version */ 1502 peer->ksnp_proto = NULL; /* renegotiate protocol version */
1497 peer->ksnp_error = error; /* stash last conn close reason */ 1503 peer->ksnp_error = error; /* stash last conn close reason */
1498 1504
1499 if (list_empty(&peer->ksnp_routes)) { 1505 if (list_empty(&peer->ksnp_routes)) {
1500 /* 1506 /*
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
1786 (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) 1792 (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1787 continue; 1793 continue;
1788 1794
1789 count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0); 1795 count += ksocknal_close_peer_conns_locked(peer, ipaddr,
1796 0);
1790 } 1797 }
1791 } 1798 }
1792 1799
@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
2026 } 2033 }
2027 2034
2028 rc = 0; 2035 rc = 0;
2029 /* NB only new connections will pay attention to the new interface! */ 2036 /*
2037 * NB only new connections will pay attention to the
2038 * new interface!
2039 */
2030 } 2040 }
2031 2041
2032 write_unlock_bh(&ksocknal_data.ksnd_global_lock); 2042 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2200 int txmem; 2210 int txmem;
2201 int rxmem; 2211 int rxmem;
2202 int nagle; 2212 int nagle;
2203 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); 2213 struct ksock_conn *conn;
2204 2214
2215 conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2205 if (!conn) 2216 if (!conn)
2206 return -ENOENT; 2217 return -ENOENT;
2207 2218
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6ca0cf52691..842c45393b38 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -84,7 +84,8 @@ struct ksock_sched { /* per scheduler state */
84 struct list_head kss_zombie_noop_txs; /* zombie noop tx list */ 84 struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
85 wait_queue_head_t kss_waitq; /* where scheduler sleeps */ 85 wait_queue_head_t kss_waitq; /* where scheduler sleeps */
86 int kss_nconns; /* # connections assigned to 86 int kss_nconns; /* # connections assigned to
87 * this scheduler */ 87 * this scheduler
88 */
88 struct ksock_sched_info *kss_info; /* owner of it */ 89 struct ksock_sched_info *kss_info; /* owner of it */
89}; 90};
90 91
@@ -110,15 +111,19 @@ struct ksock_interface { /* in-use interface */
110 111
111struct ksock_tunables { 112struct ksock_tunables {
112 int *ksnd_timeout; /* "stuck" socket timeout 113 int *ksnd_timeout; /* "stuck" socket timeout
113 * (seconds) */ 114 * (seconds)
115 */
114 int *ksnd_nscheds; /* # scheduler threads in each 116 int *ksnd_nscheds; /* # scheduler threads in each
115 * pool while starting */ 117 * pool while starting
118 */
116 int *ksnd_nconnds; /* # connection daemons */ 119 int *ksnd_nconnds; /* # connection daemons */
117 int *ksnd_nconnds_max; /* max # connection daemons */ 120 int *ksnd_nconnds_max; /* max # connection daemons */
118 int *ksnd_min_reconnectms; /* first connection retry after 121 int *ksnd_min_reconnectms; /* first connection retry after
119 * (ms)... */ 122 * (ms)...
123 */
120 int *ksnd_max_reconnectms; /* ...exponentially increasing to 124 int *ksnd_max_reconnectms; /* ...exponentially increasing to
121 * this */ 125 * this
126 */
122 int *ksnd_eager_ack; /* make TCP ack eagerly? */ 127 int *ksnd_eager_ack; /* make TCP ack eagerly? */
123 int *ksnd_typed_conns; /* drive sockets by type? */ 128 int *ksnd_typed_conns; /* drive sockets by type? */
124 int *ksnd_min_bulk; /* smallest "large" message */ 129 int *ksnd_min_bulk; /* smallest "large" message */
@@ -126,9 +131,11 @@ struct ksock_tunables {
126 int *ksnd_rx_buffer_size; /* socket rx buffer size */ 131 int *ksnd_rx_buffer_size; /* socket rx buffer size */
127 int *ksnd_nagle; /* enable NAGLE? */ 132 int *ksnd_nagle; /* enable NAGLE? */
128 int *ksnd_round_robin; /* round robin for multiple 133 int *ksnd_round_robin; /* round robin for multiple
129 * interfaces */ 134 * interfaces
135 */
130 int *ksnd_keepalive; /* # secs for sending keepalive 136 int *ksnd_keepalive; /* # secs for sending keepalive
131 * NOOP */ 137 * NOOP
138 */
132 int *ksnd_keepalive_idle; /* # idle secs before 1st probe 139 int *ksnd_keepalive_idle; /* # idle secs before 1st probe
133 */ 140 */
134 int *ksnd_keepalive_count; /* # probes */ 141 int *ksnd_keepalive_count; /* # probes */
@@ -137,20 +144,26 @@ struct ksock_tunables {
137 int *ksnd_peertxcredits; /* # concurrent sends to 1 peer 144 int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
138 */ 145 */
139 int *ksnd_peerrtrcredits; /* # per-peer router buffer 146 int *ksnd_peerrtrcredits; /* # per-peer router buffer
140 * credits */ 147 * credits
148 */
141 int *ksnd_peertimeout; /* seconds to consider peer dead 149 int *ksnd_peertimeout; /* seconds to consider peer dead
142 */ 150 */
143 int *ksnd_enable_csum; /* enable check sum */ 151 int *ksnd_enable_csum; /* enable check sum */
144 int *ksnd_inject_csum_error; /* set non-zero to inject 152 int *ksnd_inject_csum_error; /* set non-zero to inject
145 * checksum error */ 153 * checksum error
154 */
146 int *ksnd_nonblk_zcack; /* always send zc-ack on 155 int *ksnd_nonblk_zcack; /* always send zc-ack on
147 * non-blocking connection */ 156 * non-blocking connection
157 */
148 unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload 158 unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
149 * size */ 159 * size
160 */
150 int *ksnd_zc_recv; /* enable ZC receive (for 161 int *ksnd_zc_recv; /* enable ZC receive (for
151 * Chelsio TOE) */ 162 * Chelsio TOE)
163 */
152 int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to 164 int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
153 * enable ZC receive */ 165 * enable ZC receive
166 */
154}; 167};
155 168
156struct ksock_net { 169struct ksock_net {
@@ -174,9 +187,11 @@ struct ksock_nal_data {
174 int ksnd_nnets; /* # networks set up */ 187 int ksnd_nnets; /* # networks set up */
175 struct list_head ksnd_nets; /* list of nets */ 188 struct list_head ksnd_nets; /* list of nets */
176 rwlock_t ksnd_global_lock; /* stabilize peer/conn 189 rwlock_t ksnd_global_lock; /* stabilize peer/conn
177 * ops */ 190 * ops
191 */
178 struct list_head *ksnd_peers; /* hash table of all my 192 struct list_head *ksnd_peers; /* hash table of all my
179 * known peers */ 193 * known peers
194 */
180 int ksnd_peer_hash_size; /* size of ksnd_peers */ 195 int ksnd_peer_hash_size; /* size of ksnd_peers */
181 196
182 int ksnd_nthreads; /* # live threads */ 197 int ksnd_nthreads; /* # live threads */
@@ -187,11 +202,14 @@ struct ksock_nal_data {
187 atomic_t ksnd_nactive_txs; /* #active txs */ 202 atomic_t ksnd_nactive_txs; /* #active txs */
188 203
189 struct list_head ksnd_deathrow_conns; /* conns to close: 204 struct list_head ksnd_deathrow_conns; /* conns to close:
190 * reaper_lock*/ 205 * reaper_lock
206 */
191 struct list_head ksnd_zombie_conns; /* conns to free: 207 struct list_head ksnd_zombie_conns; /* conns to free:
192 * reaper_lock */ 208 * reaper_lock
209 */
193 struct list_head ksnd_enomem_conns; /* conns to retry: 210 struct list_head ksnd_enomem_conns; /* conns to retry:
194 * reaper_lock*/ 211 * reaper_lock
212 */
195 wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ 213 wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
196 unsigned long ksnd_reaper_waketime; /* when reaper will wake 214 unsigned long ksnd_reaper_waketime; /* when reaper will wake
197 */ 215 */
@@ -201,30 +219,34 @@ struct ksock_nal_data {
201 int ksnd_stall_tx; /* test sluggish sender 219 int ksnd_stall_tx; /* test sluggish sender
202 */ 220 */
203 int ksnd_stall_rx; /* test sluggish 221 int ksnd_stall_rx; /* test sluggish
204 * receiver */ 222 * receiver
205 223 */
206 struct list_head ksnd_connd_connreqs; /* incoming connection 224 struct list_head ksnd_connd_connreqs; /* incoming connection
207 * requests */ 225 * requests
226 */
208 struct list_head ksnd_connd_routes; /* routes waiting to be 227 struct list_head ksnd_connd_routes; /* routes waiting to be
209 * connected */ 228 * connected
229 */
210 wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ 230 wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
211 int ksnd_connd_connecting; /* # connds connecting 231 int ksnd_connd_connecting; /* # connds connecting
212 */ 232 */
213 time64_t ksnd_connd_failed_stamp;/* time stamp of the 233 time64_t ksnd_connd_failed_stamp;/* time stamp of the
214 * last failed 234 * last failed
215 * connecting attempt */ 235 * connecting attempt
236 */
216 time64_t ksnd_connd_starting_stamp;/* time stamp of the 237 time64_t ksnd_connd_starting_stamp;/* time stamp of the
217 * last starting connd 238 * last starting connd
218 */ 239 */
219 unsigned ksnd_connd_starting; /* # starting connd */ 240 unsigned int ksnd_connd_starting; /* # starting connd */
220 unsigned ksnd_connd_running; /* # running connd */ 241 unsigned int ksnd_connd_running; /* # running connd */
221 spinlock_t ksnd_connd_lock; /* serialise */ 242 spinlock_t ksnd_connd_lock; /* serialise */
222 243
223 struct list_head ksnd_idle_noop_txs; /* list head for freed 244 struct list_head ksnd_idle_noop_txs; /* list head for freed
224 * noop tx */ 245 * noop tx
246 */
225 spinlock_t ksnd_tx_lock; /* serialise, g_lock 247 spinlock_t ksnd_tx_lock; /* serialise, g_lock
226 * unsafe */ 248 * unsafe
227 249 */
228}; 250};
229 251
230#define SOCKNAL_INIT_NOTHING 0 252#define SOCKNAL_INIT_NOTHING 0
@@ -304,18 +326,21 @@ struct ksock_conn {
304 struct list_head ksnc_list; /* stash on peer's conn list */ 326 struct list_head ksnc_list; /* stash on peer's conn list */
305 struct socket *ksnc_sock; /* actual socket */ 327 struct socket *ksnc_sock; /* actual socket */
306 void *ksnc_saved_data_ready; /* socket's original 328 void *ksnc_saved_data_ready; /* socket's original
307 * data_ready() callback */ 329 * data_ready() callback
330 */
308 void *ksnc_saved_write_space; /* socket's original 331 void *ksnc_saved_write_space; /* socket's original
309 * write_space() callback */ 332 * write_space() callback
333 */
310 atomic_t ksnc_conn_refcount;/* conn refcount */ 334 atomic_t ksnc_conn_refcount;/* conn refcount */
311 atomic_t ksnc_sock_refcount;/* sock refcount */ 335 atomic_t ksnc_sock_refcount;/* sock refcount */
312 struct ksock_sched *ksnc_scheduler; /* who schedules this connection 336 struct ksock_sched *ksnc_scheduler; /* who schedules this connection
313 */ 337 */
314 __u32 ksnc_myipaddr; /* my IP */ 338 __u32 ksnc_myipaddr; /* my IP */
315 __u32 ksnc_ipaddr; /* peer's IP */ 339 __u32 ksnc_ipaddr; /* peer's IP */
316 int ksnc_port; /* peer's port */ 340 int ksnc_port; /* peer's port */
317 signed int ksnc_type:3; /* type of connection, should be 341 signed int ksnc_type:3; /* type of connection, should be
318 * signed value */ 342 * signed value
343 */
319 unsigned int ksnc_closing:1; /* being shut down */ 344 unsigned int ksnc_closing:1; /* being shut down */
320 unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ 345 unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
321 unsigned int ksnc_zc_capable:1; /* enable to ZC */ 346 unsigned int ksnc_zc_capable:1; /* enable to ZC */
@@ -323,9 +348,11 @@ struct ksock_conn {
323 348
324 /* reader */ 349 /* reader */
325 struct list_head ksnc_rx_list; /* where I enq waiting input or a 350 struct list_head ksnc_rx_list; /* where I enq waiting input or a
326 * forwarding descriptor */ 351 * forwarding descriptor
352 */
327 unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times 353 unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
328 * out */ 354 * out
355 */
329 __u8 ksnc_rx_started; /* started receiving a message */ 356 __u8 ksnc_rx_started; /* started receiving a message */
330 __u8 ksnc_rx_ready; /* data ready to read */ 357 __u8 ksnc_rx_ready; /* data ready to read */
331 __u8 ksnc_rx_scheduled; /* being progressed */ 358 __u8 ksnc_rx_scheduled; /* being progressed */
@@ -338,7 +365,8 @@ struct ksock_conn {
338 lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ 365 lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
339 union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */ 366 union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
340 __u32 ksnc_rx_csum; /* partial checksum for incoming 367 __u32 ksnc_rx_csum; /* partial checksum for incoming
341 * data */ 368 * data
369 */
342 void *ksnc_cookie; /* rx lnet_finalize passthru arg 370 void *ksnc_cookie; /* rx lnet_finalize passthru arg
343 */ 371 */
344 ksock_msg_t ksnc_msg; /* incoming message buffer: 372 ksock_msg_t ksnc_msg; /* incoming message buffer:
@@ -346,14 +374,16 @@ struct ksock_conn {
346 * whole struct 374 * whole struct
347 * V1.x message is a bare 375 * V1.x message is a bare
348 * lnet_hdr_t, it's stored in 376 * lnet_hdr_t, it's stored in
349 * ksnc_msg.ksm_u.lnetmsg */ 377 * ksnc_msg.ksm_u.lnetmsg
350 378 */
351 /* WRITER */ 379 /* WRITER */
352 struct list_head ksnc_tx_list; /* where I enq waiting for output 380 struct list_head ksnc_tx_list; /* where I enq waiting for output
353 * space */ 381 * space
382 */
354 struct list_head ksnc_tx_queue; /* packets waiting to be sent */ 383 struct list_head ksnc_tx_queue; /* packets waiting to be sent */
355 struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet 384 struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
356 * message or ZC-ACK */ 385 * message or ZC-ACK
386 */
357 unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out 387 unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
358 */ 388 */
359 int ksnc_tx_bufnob; /* send buffer marker */ 389 int ksnc_tx_bufnob; /* send buffer marker */
@@ -361,7 +391,8 @@ struct ksock_conn {
361 int ksnc_tx_ready; /* write space */ 391 int ksnc_tx_ready; /* write space */
362 int ksnc_tx_scheduled; /* being progressed */ 392 int ksnc_tx_scheduled; /* being progressed */
363 unsigned long ksnc_tx_last_post; /* time stamp of the last posted 393 unsigned long ksnc_tx_last_post; /* time stamp of the last posted
364 * TX */ 394 * TX
395 */
365}; 396};
366 397
367struct ksock_route { 398struct ksock_route {
@@ -370,20 +401,24 @@ struct ksock_route {
370 struct ksock_peer *ksnr_peer; /* owning peer */ 401 struct ksock_peer *ksnr_peer; /* owning peer */
371 atomic_t ksnr_refcount; /* # users */ 402 atomic_t ksnr_refcount; /* # users */
372 unsigned long ksnr_timeout; /* when (in jiffies) reconnection 403 unsigned long ksnr_timeout; /* when (in jiffies) reconnection
373 * can happen next */ 404 * can happen next
405 */
374 long ksnr_retry_interval; /* how long between retries */ 406 long ksnr_retry_interval; /* how long between retries */
375 __u32 ksnr_myipaddr; /* my IP */ 407 __u32 ksnr_myipaddr; /* my IP */
376 __u32 ksnr_ipaddr; /* IP address to connect to */ 408 __u32 ksnr_ipaddr; /* IP address to connect to */
377 int ksnr_port; /* port to connect to */ 409 int ksnr_port; /* port to connect to */
378 unsigned int ksnr_scheduled:1; /* scheduled for attention */ 410 unsigned int ksnr_scheduled:1; /* scheduled for attention */
379 unsigned int ksnr_connecting:1; /* connection establishment in 411 unsigned int ksnr_connecting:1; /* connection establishment in
380 * progress */ 412 * progress
413 */
381 unsigned int ksnr_connected:4; /* connections established by 414 unsigned int ksnr_connected:4; /* connections established by
382 * type */ 415 * type
416 */
383 unsigned int ksnr_deleted:1; /* been removed from peer? */ 417 unsigned int ksnr_deleted:1; /* been removed from peer? */
384 unsigned int ksnr_share_count; /* created explicitly? */ 418 unsigned int ksnr_share_count; /* created explicitly? */
385 int ksnr_conn_count; /* # conns established by this 419 int ksnr_conn_count; /* # conns established by this
386 * route */ 420 * route
421 */
387}; 422};
388 423
389#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ 424#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
@@ -391,7 +426,8 @@ struct ksock_route {
391struct ksock_peer { 426struct ksock_peer {
392 struct list_head ksnp_list; /* stash on global peer list */ 427 struct list_head ksnp_list; /* stash on global peer list */
393 unsigned long ksnp_last_alive; /* when (in jiffies) I was last 428 unsigned long ksnp_last_alive; /* when (in jiffies) I was last
394 * alive */ 429 * alive
430 */
395 lnet_process_id_t ksnp_id; /* who's on the other end(s) */ 431 lnet_process_id_t ksnp_id; /* who's on the other end(s) */
396 atomic_t ksnp_refcount; /* # users */ 432 atomic_t ksnp_refcount; /* # users */
397 int ksnp_sharecount; /* lconf usage counter */ 433 int ksnp_sharecount; /* lconf usage counter */
@@ -408,7 +444,8 @@ struct ksock_peer {
408 struct list_head ksnp_tx_queue; /* waiting packets */ 444 struct list_head ksnp_tx_queue; /* waiting packets */
409 spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ 445 spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
410 struct list_head ksnp_zc_req_list; /* zero copy requests wait for 446 struct list_head ksnp_zc_req_list; /* zero copy requests wait for
411 * ACK */ 447 * ACK
448 */
412 unsigned long ksnp_send_keepalive; /* time to send keepalive */ 449 unsigned long ksnp_send_keepalive; /* time to send keepalive */
413 lnet_ni_t *ksnp_ni; /* which network */ 450 lnet_ni_t *ksnp_ni; /* which network */
414 int ksnp_n_passive_ips; /* # of... */ 451 int ksnp_n_passive_ips; /* # of... */
@@ -429,7 +466,8 @@ extern struct ksock_tunables ksocknal_tunables;
429#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ 466#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
430#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ 467#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
431#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not 468#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
432 * preferred */ 469 * preferred
470 */
433 471
434struct ksock_proto { 472struct ksock_proto {
435 /* version number of protocol */ 473 /* version number of protocol */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index c1c6f604e6ad..972f6094be75 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
620} 620}
621 621
622struct ksock_conn * 622struct ksock_conn *
623ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk) 623ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
624 int nonblk)
624{ 625{
625 struct list_head *tmp; 626 struct list_head *tmp;
626 struct ksock_conn *conn; 627 struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
630 int fnob = 0; 631 int fnob = 0;
631 632
632 list_for_each(tmp, &peer->ksnp_conns) { 633 list_for_each(tmp, &peer->ksnp_conns) {
633 struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list); 634 struct ksock_conn *c;
634 int nob = atomic_read(&c->ksnc_tx_nob) + 635 int nob, rc;
635 c->ksnc_sock->sk->sk_wmem_queued; 636
636 int rc; 637 c = list_entry(tmp, struct ksock_conn, ksnc_list);
638 nob = atomic_read(&c->ksnc_tx_nob) +
639 c->ksnc_sock->sk->sk_wmem_queued;
637 640
638 LASSERT(!c->ksnc_closing); 641 LASSERT(!c->ksnc_closing);
639 LASSERT(c->ksnc_proto && 642 LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
752 LASSERT(msg->ksm_zc_cookies[1]); 755 LASSERT(msg->ksm_zc_cookies[1]);
753 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); 756 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
754 757
758 /* ZC ACK piggybacked on ztx release tx later */
755 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) 759 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
756 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ 760 ztx = tx;
757
758 } else { 761 } else {
759 /* 762 /*
760 * It's a normal packet - can it piggback a noop zc-ack that 763 * It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
796 799
797 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); 800 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
798 801
799 if (route->ksnr_scheduled) /* connections being established */ 802 /* connections being established */
803 if (route->ksnr_scheduled)
800 continue; 804 continue;
801 805
802 /* all route types connected ? */ 806 /* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
1514 rc = ksocknal_process_transmit(conn, tx); 1518 rc = ksocknal_process_transmit(conn, tx);
1515 1519
1516 if (rc == -ENOMEM || rc == -EAGAIN) { 1520 if (rc == -ENOMEM || rc == -EAGAIN) {
1517 /* Incomplete send: replace tx on HEAD of tx_queue */ 1521 /*
1522 * Incomplete send: replace tx on HEAD of
1523 * tx_queue
1524 */
1518 spin_lock_bh(&sched->kss_lock); 1525 spin_lock_bh(&sched->kss_lock);
1519 list_add(&tx->tx_list, &conn->ksnc_tx_queue); 1526 list_add(&tx->tx_list, &conn->ksnc_tx_queue);
1520 } else { 1527 } else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
1724 timeout = active ? *ksocknal_tunables.ksnd_timeout : 1731 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1725 lnet_acceptor_timeout(); 1732 lnet_acceptor_timeout();
1726 1733
1727 rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout); 1734 rc = lnet_sock_read(sock, &hello->kshm_magic,
1735 sizeof(hello->kshm_magic), timeout);
1728 if (rc) { 1736 if (rc) {
1729 CERROR("Error %d reading HELLO from %pI4h\n", 1737 CERROR("Error %d reading HELLO from %pI4h\n",
1730 rc, &conn->ksnc_ipaddr); 1738 rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
1798 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { 1806 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1799 /* Userspace NAL assigns peer process ID from socket */ 1807 /* Userspace NAL assigns peer process ID from socket */
1800 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; 1808 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1801 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); 1809 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
1810 conn->ksnc_ipaddr);
1802 } else { 1811 } else {
1803 recv_id.nid = hello->kshm_src_nid; 1812 recv_id.nid = hello->kshm_src_nid;
1804 recv_id.pid = hello->kshm_src_pid; 1813 recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
1882 if (peer->ksnp_accepting > 0) { 1891 if (peer->ksnp_accepting > 0) {
1883 CDEBUG(D_NET, 1892 CDEBUG(D_NET,
1884 "peer %s(%d) already connecting to me, retry later.\n", 1893 "peer %s(%d) already connecting to me, retry later.\n",
1885 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); 1894 libcfs_nid2str(peer->ksnp_id.nid),
1895 peer->ksnp_accepting);
1886 retry_later = 1; 1896 retry_later = 1;
1887 } 1897 }
1888 1898
@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)
2241 2251
2242 /* Nothing to do for 'timeout' */ 2252 /* Nothing to do for 'timeout' */
2243 set_current_state(TASK_INTERRUPTIBLE); 2253 set_current_state(TASK_INTERRUPTIBLE);
2244 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); 2254 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
2255 &wait);
2245 spin_unlock_bh(connd_lock); 2256 spin_unlock_bh(connd_lock);
2246 2257
2247 nloops = 0; 2258 nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
2371 struct ksock_conn *conn; 2382 struct ksock_conn *conn;
2372 struct ksock_tx *tx; 2383 struct ksock_tx *tx;
2373 2384
2374 if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ 2385 /* last_alive will be updated by create_conn */
2386 if (list_empty(&peer->ksnp_conns))
2375 return 0; 2387 return 0;
2376 2388
2377 if (peer->ksnp_proto != &ksocknal_protocol_v3x) 2389 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2473,8 +2485,8 @@ ksocknal_check_peer_timeouts(int idx)
2473 * holding only shared lock 2485 * holding only shared lock
2474 */ 2486 */
2475 if (!list_empty(&peer->ksnp_tx_queue)) { 2487 if (!list_empty(&peer->ksnp_tx_queue)) {
2476 struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next, 2488 tx = list_entry(peer->ksnp_tx_queue.next,
2477 struct ksock_tx, tx_list); 2489 struct ksock_tx, tx_list);
2478 2490
2479 if (cfs_time_aftereq(cfs_time_current(), 2491 if (cfs_time_aftereq(cfs_time_current(),
2480 tx->tx_deadline)) { 2492 tx->tx_deadline)) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 6c95e989ca12..4bcab4bcc2de 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
202 fragnob = sum; 202 fragnob = sum;
203 203
204 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, 204 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
205 iov[i].iov_base, fragnob); 205 iov[i].iov_base,
206 fragnob);
206 } 207 }
207 conn->ksnc_msg.ksm_csum = saved_csum; 208 conn->ksnc_msg.ksm_csum = saved_csum;
208 } 209 }
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
291} 292}
292 293
293int 294int
294ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle) 295ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
296 int *rxmem, int *nagle)
295{ 297{
296 struct socket *sock = conn->ksnc_sock; 298 struct socket *sock = conn->ksnc_sock;
297 int len; 299 int len;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 82e174f6d9fe..8f0ff6ca1f39 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
194 } 194 }
195 195
196 if (!tx->tx_msg.ksm_zc_cookies[0]) { 196 if (!tx->tx_msg.ksm_zc_cookies[0]) {
197 /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ 197 /*
198 * NOOP tx has only one ZC-ACK cookie,
199 * can carry at least one more
200 */
198 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { 201 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
199 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; 202 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
200 tx->tx_msg.ksm_zc_cookies[1] = cookie; 203 tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
203 } 206 }
204 207
205 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { 208 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
206 /* not likely to carry more ACKs, skip it to simplify logic */ 209 /*
210 * not likely to carry more ACKs, skip it
211 * to simplify logic
212 */
207 ksocknal_next_tx_carrier(conn); 213 ksocknal_next_tx_carrier(conn);
208 } 214 }
209 215
@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
237 } 243 }
238 244
239 } else { 245 } else {
240 /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */ 246 /*
247 * ksm_zc_cookies[0] < ksm_zc_cookies[1],
248 * it is range of cookies
249 */
241 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && 250 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
242 cookie <= tx->tx_msg.ksm_zc_cookies[1]) { 251 cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
243 CWARN("%s: duplicated ZC cookie: %llu\n", 252 CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
425 tx_zc_list) { 434 tx_zc_list) {
426 __u64 c = tx->tx_msg.ksm_zc_cookies[0]; 435 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
427 436
428 if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { 437 if (c == cookie1 || c == cookie2 ||
438 (cookie1 < c && c < cookie2)) {
429 tx->tx_msg.ksm_zc_cookies[0] = 0; 439 tx->tx_msg.ksm_zc_cookies[0] = 0;
430 list_del(&tx->tx_zc_list); 440 list_del(&tx->tx_zc_list);
431 list_add(&tx->tx_zc_list, &zlist); 441 list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ out:
639} 649}
640 650
641static int 651static int
642ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout) 652ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
653 int timeout)
643{ 654{
644 struct socket *sock = conn->ksnc_sock; 655 struct socket *sock = conn->ksnc_sock;
645 int rc; 656 int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
737 tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); 748 tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
738 tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); 749 tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
739 } 750 }
740 /* Don't checksum before start sending, because packet can be piggybacked with ACK */ 751 /*
752 * Don't checksum before start sending, because packet can be
753 * piggybacked with ACK
754 */
741} 755}
742 756
743static void 757static void
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 23b36b890964..a38db2322225 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -57,7 +57,7 @@ static int libcfs_param_debug_mb_set(const char *val,
57 const struct kernel_param *kp) 57 const struct kernel_param *kp)
58{ 58{
59 int rc; 59 int rc;
60 unsigned num; 60 unsigned int num;
61 61
62 rc = kstrtouint(val, 0, &num); 62 rc = kstrtouint(val, 0, &num);
63 if (rc < 0) 63 if (rc < 0)
@@ -228,7 +228,8 @@ int libcfs_panic_in_progress;
228static const char * 228static const char *
229libcfs_debug_subsys2str(int subsys) 229libcfs_debug_subsys2str(int subsys)
230{ 230{
231 static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES; 231 static const char * const libcfs_debug_subsystems[] =
232 LIBCFS_DEBUG_SUBSYS_NAMES;
232 233
233 if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems)) 234 if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
234 return NULL; 235 return NULL;
@@ -240,7 +241,8 @@ libcfs_debug_subsys2str(int subsys)
240static const char * 241static const char *
241libcfs_debug_dbg2str(int debug) 242libcfs_debug_dbg2str(int debug)
242{ 243{
243 static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES; 244 static const char * const libcfs_debug_masks[] =
245 LIBCFS_DEBUG_MASKS_NAMES;
244 246
245 if (debug >= ARRAY_SIZE(libcfs_debug_masks)) 247 if (debug >= ARRAY_SIZE(libcfs_debug_masks))
246 return NULL; 248 return NULL;
@@ -253,17 +255,17 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
253{ 255{
254 const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : 256 const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
255 libcfs_debug_dbg2str; 257 libcfs_debug_dbg2str;
256 int len = 0; 258 int len = 0;
257 const char *token; 259 const char *token;
258 int i; 260 int i;
259 261
260 if (mask == 0) { /* "0" */ 262 if (!mask) { /* "0" */
261 if (size > 0) 263 if (size > 0)
262 str[0] = '0'; 264 str[0] = '0';
263 len = 1; 265 len = 1;
264 } else { /* space-separated tokens */ 266 } else { /* space-separated tokens */
265 for (i = 0; i < 32; i++) { 267 for (i = 0; i < 32; i++) {
266 if ((mask & (1 << i)) == 0) 268 if (!(mask & (1 << i)))
267 continue; 269 continue;
268 270
269 token = fn(i); 271 token = fn(i);
@@ -276,7 +278,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
276 len++; 278 len++;
277 } 279 }
278 280
279 while (*token != 0) { 281 while (*token) {
280 if (len < size) 282 if (len < size)
281 str[len] = *token; 283 str[len] = *token;
282 token++; 284 token++;
@@ -299,10 +301,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
299{ 301{
300 const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : 302 const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
301 libcfs_debug_dbg2str; 303 libcfs_debug_dbg2str;
302 int m = 0; 304 int m = 0;
303 int matched; 305 int matched;
304 int n; 306 int n;
305 int t; 307 int t;
306 308
307 /* Allow a number for backwards compatibility */ 309 /* Allow a number for backwards compatibility */
308 310
@@ -313,7 +315,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
313 t = sscanf(str, "%i%n", &m, &matched); 315 t = sscanf(str, "%i%n", &m, &matched);
314 if (t >= 1 && matched == n) { 316 if (t >= 1 && matched == n) {
315 /* don't print warning for lctl set_param debug=0 or -1 */ 317 /* don't print warning for lctl set_param debug=0 or -1 */
316 if (m != 0 && m != -1) 318 if (m && m != -1)
317 CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); 319 CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
318 *mask = m; 320 *mask = m;
319 return 0; 321 return 0;
@@ -387,8 +389,8 @@ EXPORT_SYMBOL(libcfs_debug_dumplog);
387 389
388int libcfs_debug_init(unsigned long bufsize) 390int libcfs_debug_init(unsigned long bufsize)
389{ 391{
390 int rc = 0;
391 unsigned int max = libcfs_debug_mb; 392 unsigned int max = libcfs_debug_mb;
393 int rc = 0;
392 394
393 init_waitqueue_head(&debug_ctlwq); 395 init_waitqueue_head(&debug_ctlwq);
394 396
@@ -414,9 +416,9 @@ int libcfs_debug_init(unsigned long bufsize)
414 max = max / num_possible_cpus(); 416 max = max / num_possible_cpus();
415 max <<= (20 - PAGE_SHIFT); 417 max <<= (20 - PAGE_SHIFT);
416 } 418 }
417 rc = cfs_tracefile_init(max);
418 419
419 if (rc == 0) { 420 rc = cfs_tracefile_init(max);
421 if (!rc) {
420 libcfs_register_panic_notifier(); 422 libcfs_register_panic_notifier();
421 libcfs_debug_mb = cfs_trace_get_debug_mb(); 423 libcfs_debug_mb = cfs_trace_get_debug_mb();
422 } 424 }
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index e4b1a0a86eae..12dd50ad4efb 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(cfs_race_waitq);
46int cfs_race_state; 46int cfs_race_state;
47EXPORT_SYMBOL(cfs_race_state); 47EXPORT_SYMBOL(cfs_race_state);
48 48
49int __cfs_fail_check_set(__u32 id, __u32 value, int set) 49int __cfs_fail_check_set(u32 id, u32 value, int set)
50{ 50{
51 static atomic_t cfs_fail_count = ATOMIC_INIT(0); 51 static atomic_t cfs_fail_count = ATOMIC_INIT(0);
52 52
@@ -113,6 +113,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
113 break; 113 break;
114 case CFS_FAIL_LOC_RESET: 114 case CFS_FAIL_LOC_RESET:
115 cfs_fail_loc = value; 115 cfs_fail_loc = value;
116 atomic_set(&cfs_fail_count, 0);
116 break; 117 break;
117 default: 118 default:
118 LASSERTF(0, "called with bad set %u\n", set); 119 LASSERTF(0, "called with bad set %u\n", set);
@@ -123,7 +124,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
123} 124}
124EXPORT_SYMBOL(__cfs_fail_check_set); 125EXPORT_SYMBOL(__cfs_fail_check_set);
125 126
126int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) 127int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
127{ 128{
128 int ret; 129 int ret;
129 130
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 23283b6e09ab..c93c59d8fe6c 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -289,7 +289,7 @@ cfs_hash_hd_hhead_size(struct cfs_hash *hs)
289static struct hlist_head * 289static struct hlist_head *
290cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) 290cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
291{ 291{
292 struct cfs_hash_head_dep *head; 292 struct cfs_hash_head_dep *head;
293 293
294 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; 294 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
295 return &head[bd->bd_offset].hd_head; 295 return &head[bd->bd_offset].hd_head;
@@ -492,7 +492,7 @@ cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
492 cfs_hash_bd_from_key(hs, hs->hs_buckets, 492 cfs_hash_bd_from_key(hs, hs->hs_buckets,
493 hs->hs_cur_bits, key, bd); 493 hs->hs_cur_bits, key, bd);
494 } else { 494 } else {
495 LASSERT(hs->hs_rehash_bits != 0); 495 LASSERT(hs->hs_rehash_bits);
496 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, 496 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
497 hs->hs_rehash_bits, key, bd); 497 hs->hs_rehash_bits, key, bd);
498 } 498 }
@@ -507,14 +507,14 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
507 507
508 bd->bd_bucket->hsb_depmax = dep_cur; 508 bd->bd_bucket->hsb_depmax = dep_cur;
509# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 509# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
510 if (likely(warn_on_depth == 0 || 510 if (likely(!warn_on_depth ||
511 max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) 511 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
512 return; 512 return;
513 513
514 spin_lock(&hs->hs_dep_lock); 514 spin_lock(&hs->hs_dep_lock);
515 hs->hs_dep_max = dep_cur; 515 hs->hs_dep_max = dep_cur;
516 hs->hs_dep_bkt = bd->bd_bucket->hsb_index; 516 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
517 hs->hs_dep_off = bd->bd_offset; 517 hs->hs_dep_off = bd->bd_offset;
518 hs->hs_dep_bits = hs->hs_cur_bits; 518 hs->hs_dep_bits = hs->hs_cur_bits;
519 spin_unlock(&hs->hs_dep_lock); 519 spin_unlock(&hs->hs_dep_lock);
520 520
@@ -531,7 +531,7 @@ cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
531 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); 531 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
532 cfs_hash_bd_dep_record(hs, bd, rc); 532 cfs_hash_bd_dep_record(hs, bd, rc);
533 bd->bd_bucket->hsb_version++; 533 bd->bd_bucket->hsb_version++;
534 if (unlikely(bd->bd_bucket->hsb_version == 0)) 534 if (unlikely(!bd->bd_bucket->hsb_version))
535 bd->bd_bucket->hsb_version++; 535 bd->bd_bucket->hsb_version++;
536 bd->bd_bucket->hsb_count++; 536 bd->bd_bucket->hsb_count++;
537 537
@@ -551,7 +551,7 @@ cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
551 LASSERT(bd->bd_bucket->hsb_count > 0); 551 LASSERT(bd->bd_bucket->hsb_count > 0);
552 bd->bd_bucket->hsb_count--; 552 bd->bd_bucket->hsb_count--;
553 bd->bd_bucket->hsb_version++; 553 bd->bd_bucket->hsb_version++;
554 if (unlikely(bd->bd_bucket->hsb_version == 0)) 554 if (unlikely(!bd->bd_bucket->hsb_version))
555 bd->bd_bucket->hsb_version++; 555 bd->bd_bucket->hsb_version++;
556 556
557 if (cfs_hash_with_counter(hs)) { 557 if (cfs_hash_with_counter(hs)) {
@@ -571,7 +571,7 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
571 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; 571 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
572 int rc; 572 int rc;
573 573
574 if (cfs_hash_bd_compare(bd_old, bd_new) == 0) 574 if (!cfs_hash_bd_compare(bd_old, bd_new))
575 return; 575 return;
576 576
577 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops 577 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
@@ -584,11 +584,11 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
584 LASSERT(obkt->hsb_count > 0); 584 LASSERT(obkt->hsb_count > 0);
585 obkt->hsb_count--; 585 obkt->hsb_count--;
586 obkt->hsb_version++; 586 obkt->hsb_version++;
587 if (unlikely(obkt->hsb_version == 0)) 587 if (unlikely(!obkt->hsb_version))
588 obkt->hsb_version++; 588 obkt->hsb_version++;
589 nbkt->hsb_count++; 589 nbkt->hsb_count++;
590 nbkt->hsb_version++; 590 nbkt->hsb_version++;
591 if (unlikely(nbkt->hsb_version == 0)) 591 if (unlikely(!nbkt->hsb_version))
592 nbkt->hsb_version++; 592 nbkt->hsb_version++;
593} 593}
594 594
@@ -629,7 +629,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
629 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); 629 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
630 struct hlist_node *ehnode; 630 struct hlist_node *ehnode;
631 struct hlist_node *match; 631 struct hlist_node *match;
632 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; 632 int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
633 633
634 /* with this function, we can avoid a lot of useless refcount ops, 634 /* with this function, we can avoid a lot of useless refcount ops,
635 * which are expensive atomic operations most time. 635 * which are expensive atomic operations most time.
@@ -643,13 +643,13 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
643 continue; 643 continue;
644 644
645 /* match and ... */ 645 /* match and ... */
646 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) { 646 if (intent & CFS_HS_LOOKUP_MASK_DEL) {
647 cfs_hash_bd_del_locked(hs, bd, ehnode); 647 cfs_hash_bd_del_locked(hs, bd, ehnode);
648 return ehnode; 648 return ehnode;
649 } 649 }
650 650
651 /* caller wants refcount? */ 651 /* caller wants refcount? */
652 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0) 652 if (intent & CFS_HS_LOOKUP_MASK_REF)
653 cfs_hash_get(hs, ehnode); 653 cfs_hash_get(hs, ehnode);
654 return ehnode; 654 return ehnode;
655 } 655 }
@@ -682,7 +682,7 @@ EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
682 682
683static void 683static void
684cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, 684cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
685 unsigned n, int excl) 685 unsigned int n, int excl)
686{ 686{
687 struct cfs_hash_bucket *prev = NULL; 687 struct cfs_hash_bucket *prev = NULL;
688 int i; 688 int i;
@@ -704,7 +704,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
704 704
705static void 705static void
706cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, 706cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
707 unsigned n, int excl) 707 unsigned int n, int excl)
708{ 708{
709 struct cfs_hash_bucket *prev = NULL; 709 struct cfs_hash_bucket *prev = NULL;
710 int i; 710 int i;
@@ -719,10 +719,10 @@ cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
719 719
720static struct hlist_node * 720static struct hlist_node *
721cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, 721cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
722 unsigned n, const void *key) 722 unsigned int n, const void *key)
723{ 723{
724 struct hlist_node *ehnode; 724 struct hlist_node *ehnode;
725 unsigned i; 725 unsigned int i;
726 726
727 cfs_hash_for_each_bd(bds, n, i) { 727 cfs_hash_for_each_bd(bds, n, i) {
728 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, 728 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
@@ -735,12 +735,12 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
735 735
736static struct hlist_node * 736static struct hlist_node *
737cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, 737cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
738 unsigned n, const void *key, 738 unsigned int n, const void *key,
739 struct hlist_node *hnode, int noref) 739 struct hlist_node *hnode, int noref)
740{ 740{
741 struct hlist_node *ehnode; 741 struct hlist_node *ehnode;
742 int intent; 742 int intent;
743 unsigned i; 743 unsigned int i;
744 744
745 LASSERT(hnode); 745 LASSERT(hnode);
746 intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; 746 intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
@@ -766,7 +766,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
766 766
767static struct hlist_node * 767static struct hlist_node *
768cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, 768cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
769 unsigned n, const void *key, 769 unsigned int n, const void *key,
770 struct hlist_node *hnode) 770 struct hlist_node *hnode)
771{ 771{
772 struct hlist_node *ehnode; 772 struct hlist_node *ehnode;
@@ -815,7 +815,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
815 return; 815 return;
816 } 816 }
817 817
818 LASSERT(hs->hs_rehash_bits != 0); 818 LASSERT(hs->hs_rehash_bits);
819 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, 819 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
820 hs->hs_rehash_bits, key, &bds[1]); 820 hs->hs_rehash_bits, key, &bds[1]);
821 821
@@ -883,7 +883,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
883 struct cfs_hash_bucket **new_bkts; 883 struct cfs_hash_bucket **new_bkts;
884 int i; 884 int i;
885 885
886 LASSERT(old_size == 0 || old_bkts); 886 LASSERT(!old_size || old_bkts);
887 887
888 if (old_bkts && old_size == new_size) 888 if (old_bkts && old_size == new_size)
889 return old_bkts; 889 return old_bkts;
@@ -908,9 +908,9 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
908 return NULL; 908 return NULL;
909 } 909 }
910 910
911 new_bkts[i]->hsb_index = i; 911 new_bkts[i]->hsb_index = i;
912 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ 912 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
913 new_bkts[i]->hsb_depmax = -1; /* unknown */ 913 new_bkts[i]->hsb_depmax = -1; /* unknown */
914 bd.bd_bucket = new_bkts[i]; 914 bd.bd_bucket = new_bkts[i];
915 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) 915 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
916 INIT_HLIST_HEAD(hhead); 916 INIT_HLIST_HEAD(hhead);
@@ -950,9 +950,9 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
950 int bits; 950 int bits;
951 951
952 spin_lock(&hs->hs_dep_lock); 952 spin_lock(&hs->hs_dep_lock);
953 dep = hs->hs_dep_max; 953 dep = hs->hs_dep_max;
954 bkt = hs->hs_dep_bkt; 954 bkt = hs->hs_dep_bkt;
955 off = hs->hs_dep_off; 955 off = hs->hs_dep_off;
956 bits = hs->hs_dep_bits; 956 bits = hs->hs_dep_bits;
957 spin_unlock(&hs->hs_dep_lock); 957 spin_unlock(&hs->hs_dep_lock);
958 958
@@ -976,7 +976,7 @@ static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
976 return; 976 return;
977 977
978 spin_lock(&hs->hs_dep_lock); 978 spin_lock(&hs->hs_dep_lock);
979 while (hs->hs_dep_bits != 0) { 979 while (hs->hs_dep_bits) {
980 spin_unlock(&hs->hs_dep_lock); 980 spin_unlock(&hs->hs_dep_lock);
981 cond_resched(); 981 cond_resched();
982 spin_lock(&hs->hs_dep_lock); 982 spin_lock(&hs->hs_dep_lock);
@@ -992,10 +992,10 @@ static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
992#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ 992#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
993 993
994struct cfs_hash * 994struct cfs_hash *
995cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, 995cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
996 unsigned bkt_bits, unsigned extra_bytes, 996 unsigned int bkt_bits, unsigned int extra_bytes,
997 unsigned min_theta, unsigned max_theta, 997 unsigned int min_theta, unsigned int max_theta,
998 struct cfs_hash_ops *ops, unsigned flags) 998 struct cfs_hash_ops *ops, unsigned int flags)
999{ 999{
1000 struct cfs_hash *hs; 1000 struct cfs_hash *hs;
1001 int len; 1001 int len;
@@ -1010,18 +1010,17 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1010 LASSERT(ops->hs_get); 1010 LASSERT(ops->hs_get);
1011 LASSERT(ops->hs_put_locked); 1011 LASSERT(ops->hs_put_locked);
1012 1012
1013 if ((flags & CFS_HASH_REHASH) != 0) 1013 if (flags & CFS_HASH_REHASH)
1014 flags |= CFS_HASH_COUNTER; /* must have counter */ 1014 flags |= CFS_HASH_COUNTER; /* must have counter */
1015 1015
1016 LASSERT(cur_bits > 0); 1016 LASSERT(cur_bits > 0);
1017 LASSERT(cur_bits >= bkt_bits); 1017 LASSERT(cur_bits >= bkt_bits);
1018 LASSERT(max_bits >= cur_bits && max_bits < 31); 1018 LASSERT(max_bits >= cur_bits && max_bits < 31);
1019 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); 1019 LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
1020 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, 1020 LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
1021 (flags & CFS_HASH_NO_LOCK) == 0)); 1021 LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
1022 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
1023 1022
1024 len = (flags & CFS_HASH_BIGNAME) == 0 ? 1023 len = !(flags & CFS_HASH_BIGNAME) ?
1025 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; 1024 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1026 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); 1025 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1027 if (!hs) 1026 if (!hs)
@@ -1036,12 +1035,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1036 cfs_hash_lock_setup(hs); 1035 cfs_hash_lock_setup(hs);
1037 cfs_hash_hlist_setup(hs); 1036 cfs_hash_hlist_setup(hs);
1038 1037
1039 hs->hs_cur_bits = (__u8)cur_bits; 1038 hs->hs_cur_bits = (u8)cur_bits;
1040 hs->hs_min_bits = (__u8)cur_bits; 1039 hs->hs_min_bits = (u8)cur_bits;
1041 hs->hs_max_bits = (__u8)max_bits; 1040 hs->hs_max_bits = (u8)max_bits;
1042 hs->hs_bkt_bits = (__u8)bkt_bits; 1041 hs->hs_bkt_bits = (u8)bkt_bits;
1043 1042
1044 hs->hs_ops = ops; 1043 hs->hs_ops = ops;
1045 hs->hs_extra_bytes = extra_bytes; 1044 hs->hs_extra_bytes = extra_bytes;
1046 hs->hs_rehash_bits = 0; 1045 hs->hs_rehash_bits = 0;
1047 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); 1046 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
@@ -1107,12 +1106,12 @@ cfs_hash_destroy(struct cfs_hash *hs)
1107 cfs_hash_exit(hs, hnode); 1106 cfs_hash_exit(hs, hnode);
1108 } 1107 }
1109 } 1108 }
1110 LASSERT(bd.bd_bucket->hsb_count == 0); 1109 LASSERT(!bd.bd_bucket->hsb_count);
1111 cfs_hash_bd_unlock(hs, &bd, 1); 1110 cfs_hash_bd_unlock(hs, &bd, 1);
1112 cond_resched(); 1111 cond_resched();
1113 } 1112 }
1114 1113
1115 LASSERT(atomic_read(&hs->hs_count) == 0); 1114 LASSERT(!atomic_read(&hs->hs_count));
1116 1115
1117 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), 1116 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1118 0, CFS_HASH_NBKT(hs)); 1117 0, CFS_HASH_NBKT(hs));
@@ -1216,7 +1215,7 @@ cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1216 struct cfs_hash_bd bds[2]; 1215 struct cfs_hash_bd bds[2];
1217 int bits = 0; 1216 int bits = 0;
1218 1217
1219 LASSERT(hlist_unhashed(hnode)); 1218 LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1220 1219
1221 cfs_hash_lock(hs, 0); 1220 cfs_hash_lock(hs, 0);
1222 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); 1221 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1293,7 +1292,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1293 } 1292 }
1294 1293
1295 if (hnode) { 1294 if (hnode) {
1296 obj = cfs_hash_object(hs, hnode); 1295 obj = cfs_hash_object(hs, hnode);
1297 bits = cfs_hash_rehash_bits(hs); 1296 bits = cfs_hash_rehash_bits(hs);
1298 } 1297 }
1299 1298
@@ -1388,7 +1387,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
1388 bits = cfs_hash_rehash_bits(hs); 1387 bits = cfs_hash_rehash_bits(hs);
1389 cfs_hash_unlock(hs, 1); 1388 cfs_hash_unlock(hs, 1);
1390 /* NB: it's race on cfs_has_t::hs_iterating, see above */ 1389 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1391 if (remained == 0) 1390 if (!remained)
1392 hs->hs_iterating = 0; 1391 hs->hs_iterating = 0;
1393 if (bits > 0) { 1392 if (bits > 0) {
1394 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < 1393 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
@@ -1406,14 +1405,14 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
1406 * . if @removal_safe is true, use can remove current item by 1405 * . if @removal_safe is true, use can remove current item by
1407 * cfs_hash_bd_del_locked 1406 * cfs_hash_bd_del_locked
1408 */ 1407 */
1409static __u64 1408static u64
1410cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, 1409cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1411 void *data, int remove_safe) 1410 void *data, int remove_safe)
1412{ 1411{
1413 struct hlist_node *hnode; 1412 struct hlist_node *hnode;
1414 struct hlist_node *pos; 1413 struct hlist_node *pos;
1415 struct cfs_hash_bd bd; 1414 struct cfs_hash_bd bd;
1416 __u64 count = 0; 1415 u64 count = 0;
1417 int excl = !!remove_safe; 1416 int excl = !!remove_safe;
1418 int loop = 0; 1417 int loop = 0;
1419 int i; 1418 int i;
@@ -1526,7 +1525,7 @@ cfs_hash_is_empty(struct cfs_hash *hs)
1526} 1525}
1527EXPORT_SYMBOL(cfs_hash_is_empty); 1526EXPORT_SYMBOL(cfs_hash_is_empty);
1528 1527
1529__u64 1528u64
1530cfs_hash_size_get(struct cfs_hash *hs) 1529cfs_hash_size_get(struct cfs_hash *hs)
1531{ 1530{
1532 return cfs_hash_with_counter(hs) ? 1531 return cfs_hash_with_counter(hs) ?
@@ -1552,26 +1551,33 @@ EXPORT_SYMBOL(cfs_hash_size_get);
1552 */ 1551 */
1553static int 1552static int
1554cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, 1553cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1555 void *data) 1554 void *data, int start)
1556{ 1555{
1557 struct hlist_node *hnode; 1556 struct hlist_node *hnode;
1558 struct hlist_node *tmp; 1557 struct hlist_node *tmp;
1559 struct cfs_hash_bd bd; 1558 struct cfs_hash_bd bd;
1560 __u32 version; 1559 u32 version;
1561 int count = 0; 1560 int count = 0;
1562 int stop_on_change; 1561 int stop_on_change;
1563 int rc; 1562 int end = -1;
1563 int rc = 0;
1564 int i; 1564 int i;
1565 1565
1566 stop_on_change = cfs_hash_with_rehash_key(hs) || 1566 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1567 !cfs_hash_with_no_itemref(hs) || 1567 !cfs_hash_with_no_itemref(hs) ||
1568 !hs->hs_ops->hs_put_locked; 1568 !hs->hs_ops->hs_put_locked;
1569 cfs_hash_lock(hs, 0); 1569 cfs_hash_lock(hs, 0);
1570again:
1570 LASSERT(!cfs_hash_is_rehashing(hs)); 1571 LASSERT(!cfs_hash_is_rehashing(hs));
1571 1572
1572 cfs_hash_for_each_bucket(hs, &bd, i) { 1573 cfs_hash_for_each_bucket(hs, &bd, i) {
1573 struct hlist_head *hhead; 1574 struct hlist_head *hhead;
1574 1575
1576 if (i < start)
1577 continue;
1578 else if (end > 0 && i >= end)
1579 break;
1580
1575 cfs_hash_bd_lock(hs, &bd, 0); 1581 cfs_hash_bd_lock(hs, &bd, 0);
1576 version = cfs_hash_bd_version_get(&bd); 1582 version = cfs_hash_bd_version_get(&bd);
1577 1583
@@ -1611,14 +1617,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1611 if (rc) /* callback wants to break iteration */ 1617 if (rc) /* callback wants to break iteration */
1612 break; 1618 break;
1613 } 1619 }
1614 cfs_hash_unlock(hs, 0); 1620 if (start > 0 && !rc) {
1621 end = start;
1622 start = 0;
1623 goto again;
1624 }
1615 1625
1626 cfs_hash_unlock(hs, 0);
1616 return count; 1627 return count;
1617} 1628}
1618 1629
1619int 1630int
1620cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, 1631cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1621 void *data) 1632 void *data, int start)
1622{ 1633{
1623 if (cfs_hash_with_no_lock(hs) || 1634 if (cfs_hash_with_no_lock(hs) ||
1624 cfs_hash_with_rehash_key(hs) || 1635 cfs_hash_with_rehash_key(hs) ||
@@ -1630,7 +1641,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1630 return -EOPNOTSUPP; 1641 return -EOPNOTSUPP;
1631 1642
1632 cfs_hash_for_each_enter(hs); 1643 cfs_hash_for_each_enter(hs);
1633 cfs_hash_for_each_relax(hs, func, data); 1644 cfs_hash_for_each_relax(hs, func, data, start);
1634 cfs_hash_for_each_exit(hs); 1645 cfs_hash_for_each_exit(hs);
1635 1646
1636 return 0; 1647 return 0;
@@ -1652,7 +1663,7 @@ int
1652cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, 1663cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1653 void *data) 1664 void *data)
1654{ 1665{
1655 unsigned i = 0; 1666 unsigned int i = 0;
1656 1667
1657 if (cfs_hash_with_no_lock(hs)) 1668 if (cfs_hash_with_no_lock(hs))
1658 return -EOPNOTSUPP; 1669 return -EOPNOTSUPP;
@@ -1662,7 +1673,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1662 return -EOPNOTSUPP; 1673 return -EOPNOTSUPP;
1663 1674
1664 cfs_hash_for_each_enter(hs); 1675 cfs_hash_for_each_enter(hs);
1665 while (cfs_hash_for_each_relax(hs, func, data)) { 1676 while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1666 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", 1677 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1667 hs->hs_name, i++); 1678 hs->hs_name, i++);
1668 } 1679 }
@@ -1672,7 +1683,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1672EXPORT_SYMBOL(cfs_hash_for_each_empty); 1683EXPORT_SYMBOL(cfs_hash_for_each_empty);
1673 1684
1674void 1685void
1675cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, 1686cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
1676 cfs_hash_for_each_cb_t func, void *data) 1687 cfs_hash_for_each_cb_t func, void *data)
1677{ 1688{
1678 struct hlist_head *hhead; 1689 struct hlist_head *hhead;
@@ -1704,7 +1715,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1704 * the passed callback @func and pass to it as an argument each hash 1715 * the passed callback @func and pass to it as an argument each hash
1705 * item and the private @data. During the callback the bucket lock 1716 * item and the private @data. During the callback the bucket lock
1706 * is held so the callback must never sleep. 1717 * is held so the callback must never sleep.
1707 */ 1718 */
1708void 1719void
1709cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, 1720cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1710 cfs_hash_for_each_cb_t func, void *data) 1721 cfs_hash_for_each_cb_t func, void *data)
@@ -1936,7 +1947,7 @@ out:
1936 /* can't refer to @hs anymore because it could be destroyed */ 1947 /* can't refer to @hs anymore because it could be destroyed */
1937 if (bkts) 1948 if (bkts)
1938 cfs_hash_buckets_free(bkts, bsize, new_size, old_size); 1949 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1939 if (rc != 0) 1950 if (rc)
1940 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); 1951 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1941 /* return 1 only if cfs_wi_exit is called */ 1952 /* return 1 only if cfs_wi_exit is called */
1942 return rc == -ESRCH; 1953 return rc == -ESRCH;
@@ -2005,7 +2016,7 @@ cfs_hash_full_bkts(struct cfs_hash *hs)
2005 if (!hs->hs_rehash_buckets) 2016 if (!hs->hs_rehash_buckets)
2006 return hs->hs_buckets; 2017 return hs->hs_buckets;
2007 2018
2008 LASSERT(hs->hs_rehash_bits != 0); 2019 LASSERT(hs->hs_rehash_bits);
2009 return hs->hs_rehash_bits > hs->hs_cur_bits ? 2020 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2010 hs->hs_rehash_buckets : hs->hs_buckets; 2021 hs->hs_rehash_buckets : hs->hs_buckets;
2011} 2022}
@@ -2017,7 +2028,7 @@ cfs_hash_full_nbkt(struct cfs_hash *hs)
2017 if (!hs->hs_rehash_buckets) 2028 if (!hs->hs_rehash_buckets)
2018 return CFS_HASH_NBKT(hs); 2029 return CFS_HASH_NBKT(hs);
2019 2030
2020 LASSERT(hs->hs_rehash_bits != 0); 2031 LASSERT(hs->hs_rehash_bits);
2021 return hs->hs_rehash_bits > hs->hs_cur_bits ? 2032 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2022 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); 2033 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2023} 2034}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 33352af6c27f..55caa19def51 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free);
74int 74int
75cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) 75cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
76{ 76{
77 int rc; 77 int rc;
78 78
79 rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); 79 rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
80 len -= rc; 80 len -= rc;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 83543f928279..1967b97c4afc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -52,9 +52,9 @@ struct cfs_percpt_lock *
52cfs_percpt_lock_create(struct cfs_cpt_table *cptab, 52cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
53 struct lock_class_key *keys) 53 struct lock_class_key *keys)
54{ 54{
55 struct cfs_percpt_lock *pcl; 55 struct cfs_percpt_lock *pcl;
56 spinlock_t *lock; 56 spinlock_t *lock;
57 int i; 57 int i;
58 58
59 /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ 59 /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
60 LIBCFS_ALLOC(pcl, sizeof(*pcl)); 60 LIBCFS_ALLOC(pcl, sizeof(*pcl));
@@ -73,7 +73,7 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
73 73
74 cfs_percpt_for_each(lock, i, pcl->pcl_locks) { 74 cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
75 spin_lock_init(lock); 75 spin_lock_init(lock);
76 if (keys != NULL) 76 if (keys)
77 lockdep_set_class(lock, &keys[i]); 77 lockdep_set_class(lock, &keys[i]);
78 } 78 }
79 79
@@ -94,8 +94,8 @@ void
94cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) 94cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
95 __acquires(pcl->pcl_locks) 95 __acquires(pcl->pcl_locks)
96{ 96{
97 int ncpt = cfs_cpt_number(pcl->pcl_cptab); 97 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
98 int i; 98 int i;
99 99
100 LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); 100 LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
101 101
@@ -114,7 +114,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
114 /* exclusive lock request */ 114 /* exclusive lock request */
115 for (i = 0; i < ncpt; i++) { 115 for (i = 0; i < ncpt; i++) {
116 spin_lock(pcl->pcl_locks[i]); 116 spin_lock(pcl->pcl_locks[i]);
117 if (i == 0) { 117 if (!i) {
118 LASSERT(!pcl->pcl_locked); 118 LASSERT(!pcl->pcl_locked);
119 /* nobody should take private lock after this 119 /* nobody should take private lock after this
120 * so I wouldn't starve for too long time 120 * so I wouldn't starve for too long time
@@ -130,8 +130,8 @@ void
130cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) 130cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
131 __releases(pcl->pcl_locks) 131 __releases(pcl->pcl_locks)
132{ 132{
133 int ncpt = cfs_cpt_number(pcl->pcl_cptab); 133 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
134 int i; 134 int i;
135 135
136 index = ncpt == 1 ? 0 : index; 136 index = ncpt == 1 ? 0 : index;
137 137
@@ -141,7 +141,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
141 } 141 }
142 142
143 for (i = ncpt - 1; i >= 0; i--) { 143 for (i = ncpt - 1; i >= 0; i--) {
144 if (i == 0) { 144 if (!i) {
145 LASSERT(pcl->pcl_locked); 145 LASSERT(pcl->pcl_locked);
146 pcl->pcl_locked = 0; 146 pcl->pcl_locked = 0;
147 } 147 }
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index d0e81bb41cdc..ef085ba23194 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -43,8 +43,8 @@ struct cfs_var_array {
43void 43void
44cfs_percpt_free(void *vars) 44cfs_percpt_free(void *vars)
45{ 45{
46 struct cfs_var_array *arr; 46 struct cfs_var_array *arr;
47 int i; 47 int i;
48 48
49 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); 49 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
50 50
@@ -72,9 +72,9 @@ EXPORT_SYMBOL(cfs_percpt_free);
72void * 72void *
73cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) 73cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
74{ 74{
75 struct cfs_var_array *arr; 75 struct cfs_var_array *arr;
76 int count; 76 int count;
77 int i; 77 int i;
78 78
79 count = cfs_cpt_number(cptab); 79 count = cfs_cpt_number(cptab);
80 80
@@ -120,8 +120,8 @@ EXPORT_SYMBOL(cfs_percpt_number);
120void 120void
121cfs_array_free(void *vars) 121cfs_array_free(void *vars)
122{ 122{
123 struct cfs_var_array *arr; 123 struct cfs_var_array *arr;
124 int i; 124 int i;
125 125
126 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); 126 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
127 127
@@ -144,15 +144,15 @@ EXPORT_SYMBOL(cfs_array_free);
144void * 144void *
145cfs_array_alloc(int count, unsigned int size) 145cfs_array_alloc(int count, unsigned int size)
146{ 146{
147 struct cfs_var_array *arr; 147 struct cfs_var_array *arr;
148 int i; 148 int i;
149 149
150 LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); 150 LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
151 if (!arr) 151 if (!arr)
152 return NULL; 152 return NULL;
153 153
154 arr->va_count = count; 154 arr->va_count = count;
155 arr->va_size = size; 155 arr->va_size = size;
156 156
157 for (i = 0; i < count; i++) { 157 for (i = 0; i < count; i++) {
158 LIBCFS_ALLOC(arr->va_ptrs[i], size); 158 LIBCFS_ALLOC(arr->va_ptrs[i], size);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 56a614d7713b..02de1ee720fd 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -79,7 +79,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
79 for (i = 0; i < 32; i++) { 79 for (i = 0; i < 32; i++) {
80 debugstr = bit2str(i); 80 debugstr = bit2str(i);
81 if (debugstr && strlen(debugstr) == len && 81 if (debugstr && strlen(debugstr) == len &&
82 strncasecmp(str, debugstr, len) == 0) { 82 !strncasecmp(str, debugstr, len)) {
83 if (op == '-') 83 if (op == '-')
84 newmask &= ~(1 << i); 84 newmask &= ~(1 << i);
85 else 85 else
@@ -89,7 +89,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
89 } 89 }
90 } 90 }
91 if (!found && len == 3 && 91 if (!found && len == 3 &&
92 (strncasecmp(str, "ALL", len) == 0)) { 92 !strncasecmp(str, "ALL", len)) {
93 if (op == '-') 93 if (op == '-')
94 newmask = minmask; 94 newmask = minmask;
95 else 95 else
@@ -112,7 +112,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
112char *cfs_firststr(char *str, size_t size) 112char *cfs_firststr(char *str, size_t size)
113{ 113{
114 size_t i = 0; 114 size_t i = 0;
115 char *end; 115 char *end;
116 116
117 /* trim leading spaces */ 117 /* trim leading spaces */
118 while (i < size && *str && isspace(*str)) { 118 while (i < size && *str && isspace(*str)) {
@@ -182,7 +182,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
182 next->ls_len--; 182 next->ls_len--;
183 } 183 }
184 184
185 if (next->ls_len == 0) /* whitespaces only */ 185 if (!next->ls_len) /* whitespaces only */
186 return 0; 186 return 0;
187 187
188 if (*next->ls_str == delim) { 188 if (*next->ls_str == delim) {
@@ -222,8 +222,8 @@ EXPORT_SYMBOL(cfs_gettok);
222 * \retval 0 otherwise 222 * \retval 0 otherwise
223 */ 223 */
224int 224int
225cfs_str2num_check(char *str, int nob, unsigned *num, 225cfs_str2num_check(char *str, int nob, unsigned int *num,
226 unsigned min, unsigned max) 226 unsigned int min, unsigned int max)
227{ 227{
228 bool all_numbers = true; 228 bool all_numbers = true;
229 char *endp, cache; 229 char *endp, cache;
@@ -273,11 +273,11 @@ EXPORT_SYMBOL(cfs_str2num_check);
273 * -ENOMEM will be returned. 273 * -ENOMEM will be returned.
274 */ 274 */
275static int 275static int
276cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, 276cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
277 int bracketed, struct cfs_range_expr **expr) 277 int bracketed, struct cfs_range_expr **expr)
278{ 278{
279 struct cfs_range_expr *re; 279 struct cfs_range_expr *re;
280 struct cfs_lstr tok; 280 struct cfs_lstr tok;
281 281
282 LIBCFS_ALLOC(re, sizeof(*re)); 282 LIBCFS_ALLOC(re, sizeof(*re));
283 if (!re) 283 if (!re)
@@ -391,7 +391,7 @@ cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list)
391 i += scnprintf(buffer + i, count - i, "["); 391 i += scnprintf(buffer + i, count - i, "[");
392 392
393 list_for_each_entry(expr, &expr_list->el_exprs, re_link) { 393 list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
394 if (j++ != 0) 394 if (j++)
395 i += scnprintf(buffer + i, count - i, ","); 395 i += scnprintf(buffer + i, count - i, ",");
396 i += cfs_range_expr_print(buffer + i, count - i, expr, 396 i += cfs_range_expr_print(buffer + i, count - i, expr,
397 numexprs > 1); 397 numexprs > 1);
@@ -411,13 +411,13 @@ EXPORT_SYMBOL(cfs_expr_list_print);
411 * \retval 0 otherwise 411 * \retval 0 otherwise
412 */ 412 */
413int 413int
414cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) 414cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list)
415{ 415{
416 struct cfs_range_expr *expr; 416 struct cfs_range_expr *expr;
417 417
418 list_for_each_entry(expr, &expr_list->el_exprs, re_link) { 418 list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
419 if (value >= expr->re_lo && value <= expr->re_hi && 419 if (value >= expr->re_lo && value <= expr->re_hi &&
420 ((value - expr->re_lo) % expr->re_stride) == 0) 420 !((value - expr->re_lo) % expr->re_stride))
421 return 1; 421 return 1;
422 } 422 }
423 423
@@ -433,21 +433,21 @@ EXPORT_SYMBOL(cfs_expr_list_match);
433 * \retval < 0 for failure 433 * \retval < 0 for failure
434 */ 434 */
435int 435int
436cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) 436cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
437{ 437{
438 struct cfs_range_expr *expr; 438 struct cfs_range_expr *expr;
439 __u32 *val; 439 u32 *val;
440 int count = 0; 440 int count = 0;
441 int i; 441 int i;
442 442
443 list_for_each_entry(expr, &expr_list->el_exprs, re_link) { 443 list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
444 for (i = expr->re_lo; i <= expr->re_hi; i++) { 444 for (i = expr->re_lo; i <= expr->re_hi; i++) {
445 if (((i - expr->re_lo) % expr->re_stride) == 0) 445 if (!((i - expr->re_lo) % expr->re_stride))
446 count++; 446 count++;
447 } 447 }
448 } 448 }
449 449
450 if (count == 0) /* empty expression list */ 450 if (!count) /* empty expression list */
451 return 0; 451 return 0;
452 452
453 if (count > max) { 453 if (count > max) {
@@ -463,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
463 count = 0; 463 count = 0;
464 list_for_each_entry(expr, &expr_list->el_exprs, re_link) { 464 list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
465 for (i = expr->re_lo; i <= expr->re_hi; i++) { 465 for (i = expr->re_lo; i <= expr->re_hi; i++) {
466 if (((i - expr->re_lo) % expr->re_stride) == 0) 466 if (!((i - expr->re_lo) % expr->re_stride))
467 val[count++] = i; 467 val[count++] = i;
468 } 468 }
469 } 469 }
@@ -501,13 +501,13 @@ EXPORT_SYMBOL(cfs_expr_list_free);
501 * \retval -errno otherwise 501 * \retval -errno otherwise
502 */ 502 */
503int 503int
504cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, 504cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
505 struct cfs_expr_list **elpp) 505 struct cfs_expr_list **elpp)
506{ 506{
507 struct cfs_expr_list *expr_list; 507 struct cfs_expr_list *expr_list;
508 struct cfs_range_expr *expr; 508 struct cfs_range_expr *expr;
509 struct cfs_lstr src; 509 struct cfs_lstr src;
510 int rc; 510 int rc;
511 511
512 LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); 512 LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
513 if (!expr_list) 513 if (!expr_list)
@@ -533,18 +533,18 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
533 } 533 }
534 534
535 rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); 535 rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
536 if (rc != 0) 536 if (rc)
537 break; 537 break;
538 538
539 list_add_tail(&expr->re_link, &expr_list->el_exprs); 539 list_add_tail(&expr->re_link, &expr_list->el_exprs);
540 } 540 }
541 } else { 541 } else {
542 rc = cfs_range_expr_parse(&src, min, max, 0, &expr); 542 rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
543 if (rc == 0) 543 if (!rc)
544 list_add_tail(&expr->re_link, &expr_list->el_exprs); 544 list_add_tail(&expr->re_link, &expr_list->el_exprs);
545 } 545 }
546 546
547 if (rc != 0) 547 if (rc)
548 cfs_expr_list_free(expr_list); 548 cfs_expr_list_free(expr_list);
549 else 549 else
550 *elpp = expr_list; 550 *elpp = expr_list;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index e8b1a61420de..6b9cf06e8df2 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -55,6 +55,8 @@ MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
55 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket 55 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
56 * are NUMA node ID, number before bracket is CPU partition ID. 56 * are NUMA node ID, number before bracket is CPU partition ID.
57 * 57 *
58 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
59 *
58 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored 60 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
59 */ 61 */
60static char *cpu_pattern = ""; 62static char *cpu_pattern = "";
@@ -88,7 +90,7 @@ cfs_node_to_cpumask(int node, cpumask_t *mask)
88void 90void
89cfs_cpt_table_free(struct cfs_cpt_table *cptab) 91cfs_cpt_table_free(struct cfs_cpt_table *cptab)
90{ 92{
91 int i; 93 int i;
92 94
93 if (cptab->ctb_cpu2cpt) { 95 if (cptab->ctb_cpu2cpt) {
94 LIBCFS_FREE(cptab->ctb_cpu2cpt, 96 LIBCFS_FREE(cptab->ctb_cpu2cpt,
@@ -126,7 +128,7 @@ struct cfs_cpt_table *
126cfs_cpt_table_alloc(unsigned int ncpt) 128cfs_cpt_table_alloc(unsigned int ncpt)
127{ 129{
128 struct cfs_cpt_table *cptab; 130 struct cfs_cpt_table *cptab;
129 int i; 131 int i;
130 132
131 LIBCFS_ALLOC(cptab, sizeof(*cptab)); 133 LIBCFS_ALLOC(cptab, sizeof(*cptab));
132 if (!cptab) 134 if (!cptab)
@@ -177,10 +179,10 @@ EXPORT_SYMBOL(cfs_cpt_table_alloc);
177int 179int
178cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) 180cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
179{ 181{
180 char *tmp = buf; 182 char *tmp = buf;
181 int rc = 0; 183 int rc = 0;
182 int i; 184 int i;
183 int j; 185 int j;
184 186
185 for (i = 0; i < cptab->ctb_nparts; i++) { 187 for (i = 0; i < cptab->ctb_nparts; i++) {
186 if (len > 0) { 188 if (len > 0) {
@@ -271,7 +273,7 @@ EXPORT_SYMBOL(cfs_cpt_nodemask);
271int 273int
272cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) 274cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
273{ 275{
274 int node; 276 int node;
275 277
276 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); 278 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
277 279
@@ -311,8 +313,8 @@ EXPORT_SYMBOL(cfs_cpt_set_cpu);
311void 313void
312cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) 314cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
313{ 315{
314 int node; 316 int node;
315 int i; 317 int i;
316 318
317 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); 319 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
318 320
@@ -371,9 +373,9 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpu);
371int 373int
372cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) 374cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
373{ 375{
374 int i; 376 int i;
375 377
376 if (cpumask_weight(mask) == 0 || 378 if (!cpumask_weight(mask) ||
377 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { 379 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
378 CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", 380 CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
379 cpt); 381 cpt);
@@ -392,7 +394,7 @@ EXPORT_SYMBOL(cfs_cpt_set_cpumask);
392void 394void
393cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) 395cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
394{ 396{
395 int i; 397 int i;
396 398
397 for_each_cpu(i, mask) 399 for_each_cpu(i, mask)
398 cfs_cpt_unset_cpu(cptab, cpt, i); 400 cfs_cpt_unset_cpu(cptab, cpt, i);
@@ -402,8 +404,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
402int 404int
403cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) 405cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
404{ 406{
405 cpumask_t *mask; 407 cpumask_t *mask;
406 int rc; 408 int rc;
407 409
408 if (node < 0 || node >= MAX_NUMNODES) { 410 if (node < 0 || node >= MAX_NUMNODES) {
409 CDEBUG(D_INFO, 411 CDEBUG(D_INFO,
@@ -449,7 +451,7 @@ EXPORT_SYMBOL(cfs_cpt_unset_node);
449int 451int
450cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) 452cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
451{ 453{
452 int i; 454 int i;
453 455
454 for_each_node_mask(i, *mask) { 456 for_each_node_mask(i, *mask) {
455 if (!cfs_cpt_set_node(cptab, cpt, i)) 457 if (!cfs_cpt_set_node(cptab, cpt, i))
@@ -463,7 +465,7 @@ EXPORT_SYMBOL(cfs_cpt_set_nodemask);
463void 465void
464cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) 466cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
465{ 467{
466 int i; 468 int i;
467 469
468 for_each_node_mask(i, *mask) 470 for_each_node_mask(i, *mask)
469 cfs_cpt_unset_node(cptab, cpt, i); 471 cfs_cpt_unset_node(cptab, cpt, i);
@@ -473,8 +475,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
473void 475void
474cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) 476cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
475{ 477{
476 int last; 478 int last;
477 int i; 479 int i;
478 480
479 if (cpt == CFS_CPT_ANY) { 481 if (cpt == CFS_CPT_ANY) {
480 last = cptab->ctb_nparts - 1; 482 last = cptab->ctb_nparts - 1;
@@ -493,10 +495,10 @@ EXPORT_SYMBOL(cfs_cpt_clear);
493int 495int
494cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) 496cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
495{ 497{
496 nodemask_t *mask; 498 nodemask_t *mask;
497 int weight; 499 int weight;
498 int rotor; 500 int rotor;
499 int node; 501 int node;
500 502
501 /* convert CPU partition ID to HW node id */ 503 /* convert CPU partition ID to HW node id */
502 504
@@ -514,7 +516,7 @@ cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
514 rotor %= weight; 516 rotor %= weight;
515 517
516 for_each_node_mask(node, *mask) { 518 for_each_node_mask(node, *mask) {
517 if (rotor-- == 0) 519 if (!rotor--)
518 return node; 520 return node;
519 } 521 }
520 522
@@ -526,8 +528,8 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
526int 528int
527cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) 529cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
528{ 530{
529 int cpu = smp_processor_id(); 531 int cpu = smp_processor_id();
530 int cpt = cptab->ctb_cpu2cpt[cpu]; 532 int cpt = cptab->ctb_cpu2cpt[cpu];
531 533
532 if (cpt < 0) { 534 if (cpt < 0) {
533 if (!remap) 535 if (!remap)
@@ -555,10 +557,10 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
555int 557int
556cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) 558cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
557{ 559{
558 cpumask_t *cpumask; 560 cpumask_t *cpumask;
559 nodemask_t *nodemask; 561 nodemask_t *nodemask;
560 int rc; 562 int rc;
561 int i; 563 int i;
562 564
563 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); 565 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
564 566
@@ -582,7 +584,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
582 584
583 rc = set_cpus_allowed_ptr(current, cpumask); 585 rc = set_cpus_allowed_ptr(current, cpumask);
584 set_mems_allowed(*nodemask); 586 set_mems_allowed(*nodemask);
585 if (rc == 0) 587 if (!rc)
586 schedule(); /* switch to allowed CPU */ 588 schedule(); /* switch to allowed CPU */
587 589
588 return rc; 590 return rc;
@@ -601,10 +603,10 @@ static int
601cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, 603cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
602 cpumask_t *node, int number) 604 cpumask_t *node, int number)
603{ 605{
604 cpumask_t *socket = NULL; 606 cpumask_t *socket = NULL;
605 cpumask_t *core = NULL; 607 cpumask_t *core = NULL;
606 int rc = 0; 608 int rc = 0;
607 int cpu; 609 int cpu;
608 610
609 LASSERT(number > 0); 611 LASSERT(number > 0);
610 612
@@ -638,7 +640,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
638 LASSERT(!cpumask_empty(socket)); 640 LASSERT(!cpumask_empty(socket));
639 641
640 while (!cpumask_empty(socket)) { 642 while (!cpumask_empty(socket)) {
641 int i; 643 int i;
642 644
643 /* get cpumask for hts in the same core */ 645 /* get cpumask for hts in the same core */
644 cpumask_copy(core, topology_sibling_cpumask(cpu)); 646 cpumask_copy(core, topology_sibling_cpumask(cpu));
@@ -656,14 +658,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
656 goto out; 658 goto out;
657 } 659 }
658 660
659 if (--number == 0) 661 if (!--number)
660 goto out; 662 goto out;
661 } 663 }
662 cpu = cpumask_first(socket); 664 cpu = cpumask_first(socket);
663 } 665 }
664 } 666 }
665 667
666 out: 668out:
667 if (socket) 669 if (socket)
668 LIBCFS_FREE(socket, cpumask_size()); 670 LIBCFS_FREE(socket, cpumask_size());
669 if (core) 671 if (core)
@@ -676,9 +678,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
676static unsigned int 678static unsigned int
677cfs_cpt_num_estimate(void) 679cfs_cpt_num_estimate(void)
678{ 680{
679 unsigned nnode = num_online_nodes(); 681 unsigned int nnode = num_online_nodes();
680 unsigned ncpu = num_online_cpus(); 682 unsigned int ncpu = num_online_cpus();
681 unsigned ncpt; 683 unsigned int ncpt;
682 684
683 if (ncpu <= CPT_WEIGHT_MIN) { 685 if (ncpu <= CPT_WEIGHT_MIN) {
684 ncpt = 1; 686 ncpt = 1;
@@ -703,14 +705,14 @@ cfs_cpt_num_estimate(void)
703 705
704 ncpt = nnode; 706 ncpt = nnode;
705 707
706 out: 708out:
707#if (BITS_PER_LONG == 32) 709#if (BITS_PER_LONG == 32)
708 /* config many CPU partitions on 32-bit system could consume 710 /* config many CPU partitions on 32-bit system could consume
709 * too much memory 711 * too much memory
710 */ 712 */
711 ncpt = min(2U, ncpt); 713 ncpt = min(2U, ncpt);
712#endif 714#endif
713 while (ncpu % ncpt != 0) 715 while (ncpu % ncpt)
714 ncpt--; /* worst case is 1 */ 716 ncpt--; /* worst case is 1 */
715 717
716 return ncpt; 718 return ncpt;
@@ -720,11 +722,11 @@ static struct cfs_cpt_table *
720cfs_cpt_table_create(int ncpt) 722cfs_cpt_table_create(int ncpt)
721{ 723{
722 struct cfs_cpt_table *cptab = NULL; 724 struct cfs_cpt_table *cptab = NULL;
723 cpumask_t *mask = NULL; 725 cpumask_t *mask = NULL;
724 int cpt = 0; 726 int cpt = 0;
725 int num; 727 int num;
726 int rc; 728 int rc;
727 int i; 729 int i;
728 730
729 rc = cfs_cpt_num_estimate(); 731 rc = cfs_cpt_num_estimate();
730 if (ncpt <= 0) 732 if (ncpt <= 0)
@@ -735,7 +737,7 @@ cfs_cpt_table_create(int ncpt)
735 ncpt, rc); 737 ncpt, rc);
736 } 738 }
737 739
738 if (num_online_cpus() % ncpt != 0) { 740 if (num_online_cpus() % ncpt) {
739 CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", 741 CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
740 (int)num_online_cpus(), ncpt); 742 (int)num_online_cpus(), ncpt);
741 goto failed; 743 goto failed;
@@ -748,7 +750,7 @@ cfs_cpt_table_create(int ncpt)
748 } 750 }
749 751
750 num = num_online_cpus() / ncpt; 752 num = num_online_cpus() / ncpt;
751 if (num == 0) { 753 if (!num) {
752 CERROR("CPU changed while setting CPU partition\n"); 754 CERROR("CPU changed while setting CPU partition\n");
753 goto failed; 755 goto failed;
754 } 756 }
@@ -764,7 +766,7 @@ cfs_cpt_table_create(int ncpt)
764 766
765 while (!cpumask_empty(mask)) { 767 while (!cpumask_empty(mask)) {
766 struct cfs_cpu_partition *part; 768 struct cfs_cpu_partition *part;
767 int n; 769 int n;
768 770
769 /* 771 /*
770 * Each emulated NUMA node has all allowed CPUs in 772 * Each emulated NUMA node has all allowed CPUs in
@@ -817,27 +819,36 @@ cfs_cpt_table_create(int ncpt)
817static struct cfs_cpt_table * 819static struct cfs_cpt_table *
818cfs_cpt_table_create_pattern(char *pattern) 820cfs_cpt_table_create_pattern(char *pattern)
819{ 821{
820 struct cfs_cpt_table *cptab; 822 struct cfs_cpt_table *cptab;
821 char *str = pattern; 823 char *str;
822 int node = 0; 824 int node = 0;
823 int high; 825 int high;
824 int ncpt; 826 int ncpt = 0;
825 int c; 827 int cpt;
826 828 int rc;
827 for (ncpt = 0;; ncpt++) { /* quick scan bracket */ 829 int c;
828 str = strchr(str, '['); 830 int i;
829 if (!str)
830 break;
831 str++;
832 }
833 831
834 str = cfs_trimwhite(pattern); 832 str = cfs_trimwhite(pattern);
835 if (*str == 'n' || *str == 'N') { 833 if (*str == 'n' || *str == 'N') {
836 pattern = str + 1; 834 pattern = str + 1;
837 node = 1; 835 if (*pattern != '\0') {
836 node = 1;
837 } else { /* shortcut to create CPT from NUMA & CPU topology */
838 node = -1;
839 ncpt = num_online_nodes();
840 }
841 }
842
843 if (!ncpt) { /* scanning bracket which is mark of partition */
844 for (str = pattern;; str++, ncpt++) {
845 str = strchr(str, '[');
846 if (!str)
847 break;
848 }
838 } 849 }
839 850
840 if (ncpt == 0 || 851 if (!ncpt ||
841 (node && ncpt > num_online_nodes()) || 852 (node && ncpt > num_online_nodes()) ||
842 (!node && ncpt > num_online_cpus())) { 853 (!node && ncpt > num_online_cpus())) {
843 CERROR("Invalid pattern %s, or too many partitions %d\n", 854 CERROR("Invalid pattern %s, or too many partitions %d\n",
@@ -845,25 +856,39 @@ cfs_cpt_table_create_pattern(char *pattern)
845 return NULL; 856 return NULL;
846 } 857 }
847 858
848 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
849
850 cptab = cfs_cpt_table_alloc(ncpt); 859 cptab = cfs_cpt_table_alloc(ncpt);
851 if (!cptab) { 860 if (!cptab) {
852 CERROR("Failed to allocate cpu partition table\n"); 861 CERROR("Failed to allocate cpu partition table\n");
853 return NULL; 862 return NULL;
854 } 863 }
855 864
865 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
866 cpt = 0;
867
868 for_each_online_node(i) {
869 if (cpt >= ncpt) {
870 CERROR("CPU changed while setting CPU partition table, %d/%d\n",
871 cpt, ncpt);
872 goto failed;
873 }
874
875 rc = cfs_cpt_set_node(cptab, cpt++, i);
876 if (!rc)
877 goto failed;
878 }
879 return cptab;
880 }
881
882 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
883
856 for (str = cfs_trimwhite(pattern), c = 0;; c++) { 884 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
857 struct cfs_range_expr *range; 885 struct cfs_range_expr *range;
858 struct cfs_expr_list *el; 886 struct cfs_expr_list *el;
859 char *bracket = strchr(str, '['); 887 char *bracket = strchr(str, '[');
860 int cpt; 888 int n;
861 int rc;
862 int i;
863 int n;
864 889
865 if (!bracket) { 890 if (!bracket) {
866 if (*str != 0) { 891 if (*str) {
867 CERROR("Invalid pattern %s\n", str); 892 CERROR("Invalid pattern %s\n", str);
868 goto failed; 893 goto failed;
869 } 894 }
@@ -886,7 +911,7 @@ cfs_cpt_table_create_pattern(char *pattern)
886 goto failed; 911 goto failed;
887 } 912 }
888 913
889 if (cfs_cpt_weight(cptab, cpt) != 0) { 914 if (cfs_cpt_weight(cptab, cpt)) {
890 CERROR("Partition %d has already been set.\n", cpt); 915 CERROR("Partition %d has already been set.\n", cpt);
891 goto failed; 916 goto failed;
892 } 917 }
@@ -905,14 +930,14 @@ cfs_cpt_table_create_pattern(char *pattern)
905 } 930 }
906 931
907 if (cfs_expr_list_parse(str, (bracket - str) + 1, 932 if (cfs_expr_list_parse(str, (bracket - str) + 1,
908 0, high, &el) != 0) { 933 0, high, &el)) {
909 CERROR("Can't parse number range: %s\n", str); 934 CERROR("Can't parse number range: %s\n", str);
910 goto failed; 935 goto failed;
911 } 936 }
912 937
913 list_for_each_entry(range, &el->el_exprs, re_link) { 938 list_for_each_entry(range, &el->el_exprs, re_link) {
914 for (i = range->re_lo; i <= range->re_hi; i++) { 939 for (i = range->re_lo; i <= range->re_hi; i++) {
915 if ((i - range->re_lo) % range->re_stride != 0) 940 if ((i - range->re_lo) % range->re_stride)
916 continue; 941 continue;
917 942
918 rc = node ? cfs_cpt_set_node(cptab, cpt, i) : 943 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
@@ -945,8 +970,8 @@ cfs_cpt_table_create_pattern(char *pattern)
945static int 970static int
946cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 971cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
947{ 972{
948 unsigned int cpu = (unsigned long)hcpu; 973 unsigned int cpu = (unsigned long)hcpu;
949 bool warn; 974 bool warn;
950 975
951 switch (action) { 976 switch (action) {
952 case CPU_DEAD: 977 case CPU_DEAD:
@@ -1019,7 +1044,7 @@ cfs_cpu_init(void)
1019 register_hotcpu_notifier(&cfs_cpu_notifier); 1044 register_hotcpu_notifier(&cfs_cpu_notifier);
1020#endif 1045#endif
1021 1046
1022 if (*cpu_pattern != 0) { 1047 if (*cpu_pattern) {
1023 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); 1048 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1024 if (!cfs_cpt_table) { 1049 if (!cfs_cpt_table) {
1025 CERROR("Failed to create cptab from pattern %s\n", 1050 CERROR("Failed to create cptab from pattern %s\n",
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 7f56d2c9dd00..68e34b4a76c9 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -64,7 +64,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
64 unsigned int key_len) 64 unsigned int key_len)
65{ 65{
66 struct crypto_ahash *tfm; 66 struct crypto_ahash *tfm;
67 int err = 0; 67 int err = 0;
68 68
69 *type = cfs_crypto_hash_type(hash_alg); 69 *type = cfs_crypto_hash_type(hash_alg);
70 70
@@ -93,12 +93,12 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
93 93
94 if (key) 94 if (key)
95 err = crypto_ahash_setkey(tfm, key, key_len); 95 err = crypto_ahash_setkey(tfm, key, key_len);
96 else if ((*type)->cht_key != 0) 96 else if ((*type)->cht_key)
97 err = crypto_ahash_setkey(tfm, 97 err = crypto_ahash_setkey(tfm,
98 (unsigned char *)&((*type)->cht_key), 98 (unsigned char *)&((*type)->cht_key),
99 (*type)->cht_size); 99 (*type)->cht_size);
100 100
101 if (err != 0) { 101 if (err) {
102 ahash_request_free(*req); 102 ahash_request_free(*req);
103 crypto_free_ahash(tfm); 103 crypto_free_ahash(tfm);
104 return err; 104 return err;
@@ -147,16 +147,16 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
147 unsigned char *key, unsigned int key_len, 147 unsigned char *key, unsigned int key_len,
148 unsigned char *hash, unsigned int *hash_len) 148 unsigned char *hash, unsigned int *hash_len)
149{ 149{
150 struct scatterlist sl; 150 struct scatterlist sl;
151 struct ahash_request *req; 151 struct ahash_request *req;
152 int err; 152 int err;
153 const struct cfs_crypto_hash_type *type; 153 const struct cfs_crypto_hash_type *type;
154 154
155 if (!buf || buf_len == 0 || !hash_len) 155 if (!buf || !buf_len || !hash_len)
156 return -EINVAL; 156 return -EINVAL;
157 157
158 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); 158 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
159 if (err != 0) 159 if (err)
160 return err; 160 return err;
161 161
162 if (!hash || *hash_len < type->cht_size) { 162 if (!hash || *hash_len < type->cht_size) {
@@ -177,7 +177,7 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
177EXPORT_SYMBOL(cfs_crypto_hash_digest); 177EXPORT_SYMBOL(cfs_crypto_hash_digest);
178 178
179/** 179/**
180 * Allocate and initialize desriptor for hash algorithm. 180 * Allocate and initialize descriptor for hash algorithm.
181 * 181 *
182 * This should be used to initialize a hash descriptor for multiple calls 182 * This should be used to initialize a hash descriptor for multiple calls
183 * to a single hash function when computing the hash across multiple 183 * to a single hash function when computing the hash across multiple
@@ -198,8 +198,8 @@ cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
198 unsigned char *key, unsigned int key_len) 198 unsigned char *key, unsigned int key_len)
199{ 199{
200 struct ahash_request *req; 200 struct ahash_request *req;
201 int err; 201 int err;
202 const struct cfs_crypto_hash_type *type; 202 const struct cfs_crypto_hash_type *type;
203 203
204 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); 204 err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
205 205
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(cfs_crypto_hash_update);
273int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, 273int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
274 unsigned char *hash, unsigned int *hash_len) 274 unsigned char *hash, unsigned int *hash_len)
275{ 275{
276 int err; 276 int err;
277 struct ahash_request *req = (void *)hdesc; 277 struct ahash_request *req = (void *)hdesc;
278 int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 278 int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
279 279
@@ -312,8 +312,8 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
312{ 312{
313 int buf_len = max(PAGE_SIZE, 1048576UL); 313 int buf_len = max(PAGE_SIZE, 1048576UL);
314 void *buf; 314 void *buf;
315 unsigned long start, end; 315 unsigned long start, end;
316 int bcount, err = 0; 316 int bcount, err = 0;
317 struct page *page; 317 struct page *page;
318 unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; 318 unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
319 unsigned int hash_len = sizeof(hash); 319 unsigned int hash_len = sizeof(hash);
@@ -358,7 +358,7 @@ out_err:
358 CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n", 358 CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
359 cfs_crypto_hash_name(hash_alg), err); 359 cfs_crypto_hash_name(hash_alg), err);
360 } else { 360 } else {
361 unsigned long tmp; 361 unsigned long tmp;
362 362
363 tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * 363 tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
364 1000) / (1024 * 1024); 364 1000) / (1024 * 1024);
@@ -440,6 +440,6 @@ int cfs_crypto_register(void)
440 */ 440 */
441void cfs_crypto_unregister(void) 441void cfs_crypto_unregister(void)
442{ 442{
443 if (adler32 == 0) 443 if (!adler32)
444 cfs_crypto_adler32_unregister(); 444 cfs_crypto_adler32_unregister();
445} 445}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index 18e8cd4d8758..d0b3aa80cfa6 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,4 +1,4 @@
1 /* 1/*
2 * GPL HEADER START 2 * GPL HEADER START
3 * 3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 435b784c52f8..39a72e3f0c18 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -57,7 +57,6 @@
57 57
58#include <linux/kallsyms.h> 58#include <linux/kallsyms.h>
59 59
60char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
61char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; 60char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
62 61
63/** 62/**
@@ -68,11 +67,12 @@ char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
68void libcfs_run_debug_log_upcall(char *file) 67void libcfs_run_debug_log_upcall(char *file)
69{ 68{
70 char *argv[3]; 69 char *argv[3];
71 int rc; 70 int rc;
72 char *envp[] = { 71 static const char * const envp[] = {
73 "HOME=/", 72 "HOME=/",
74 "PATH=/sbin:/bin:/usr/sbin:/usr/bin", 73 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
75 NULL}; 74 NULL
75 };
76 76
77 argv[0] = lnet_debug_log_upcall; 77 argv[0] = lnet_debug_log_upcall;
78 78
@@ -81,7 +81,7 @@ void libcfs_run_debug_log_upcall(char *file)
81 81
82 argv[2] = NULL; 82 argv[2] = NULL;
83 83
84 rc = call_usermodehelper(argv[0], argv, envp, 1); 84 rc = call_usermodehelper(argv[0], argv, (char **)envp, 1);
85 if (rc < 0 && rc != -ENOENT) { 85 if (rc < 0 && rc != -ENOENT) {
86 CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n", 86 CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
87 rc, argv[0], argv[1]); 87 rc, argv[0], argv[1]);
@@ -91,57 +91,6 @@ void libcfs_run_debug_log_upcall(char *file)
91 } 91 }
92} 92}
93 93
94void libcfs_run_upcall(char **argv)
95{
96 int rc;
97 int argc;
98 char *envp[] = {
99 "HOME=/",
100 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
101 NULL};
102
103 argv[0] = lnet_upcall;
104 argc = 1;
105 while (argv[argc])
106 argc++;
107
108 LASSERT(argc >= 2);
109
110 rc = call_usermodehelper(argv[0], argv, envp, 1);
111 if (rc < 0 && rc != -ENOENT) {
112 CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
113 rc, argv[0], argv[1],
114 argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
115 argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
116 argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
117 argc < 6 ? "" : ",...");
118 } else {
119 CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
120 argv[0], argv[1],
121 argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
122 argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
123 argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
124 argc < 6 ? "" : ",...");
125 }
126}
127
128void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
129{
130 char *argv[6];
131 char buf[32];
132
133 snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
134
135 argv[1] = "LBUG";
136 argv[2] = (char *)msgdata->msg_file;
137 argv[3] = (char *)msgdata->msg_fn;
138 argv[4] = buf;
139 argv[5] = NULL;
140
141 libcfs_run_upcall(argv);
142}
143EXPORT_SYMBOL(libcfs_run_lbug_upcall);
144
145/* coverity[+kill] */ 94/* coverity[+kill] */
146void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) 95void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
147{ 96{
@@ -156,7 +105,6 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
156 dump_stack(); 105 dump_stack();
157 if (!libcfs_panic_on_lbug) 106 if (!libcfs_panic_on_lbug)
158 libcfs_debug_dumplog(); 107 libcfs_debug_dumplog();
159 libcfs_run_lbug_upcall(msgdata);
160 if (libcfs_panic_on_lbug) 108 if (libcfs_panic_on_lbug)
161 panic("LBUG"); 109 panic("LBUG");
162 set_task_state(current, TASK_UNINTERRUPTIBLE); 110 set_task_state(current, TASK_UNINTERRUPTIBLE);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 38308f8b6aae..3f5d58babc2f 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -83,7 +83,7 @@ static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
83 CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); 83 CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
84 return true; 84 return true;
85 } 85 }
86 if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { 86 if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
87 CERROR("LIBCFS ioctl: packlen != ioc_len\n"); 87 CERROR("LIBCFS ioctl: packlen != ioc_len\n");
88 return true; 88 return true;
89 } 89 }
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 291d286eab48..cf902154f0aa 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -45,8 +45,8 @@
45sigset_t 45sigset_t
46cfs_block_allsigs(void) 46cfs_block_allsigs(void)
47{ 47{
48 unsigned long flags; 48 unsigned long flags;
49 sigset_t old; 49 sigset_t old;
50 50
51 spin_lock_irqsave(&current->sighand->siglock, flags); 51 spin_lock_irqsave(&current->sighand->siglock, flags);
52 old = current->blocked; 52 old = current->blocked;
@@ -60,8 +60,8 @@ EXPORT_SYMBOL(cfs_block_allsigs);
60 60
61sigset_t cfs_block_sigs(unsigned long sigs) 61sigset_t cfs_block_sigs(unsigned long sigs)
62{ 62{
63 unsigned long flags; 63 unsigned long flags;
64 sigset_t old; 64 sigset_t old;
65 65
66 spin_lock_irqsave(&current->sighand->siglock, flags); 66 spin_lock_irqsave(&current->sighand->siglock, flags);
67 old = current->blocked; 67 old = current->blocked;
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(cfs_block_sigsinv);
91void 91void
92cfs_restore_sigs(sigset_t old) 92cfs_restore_sigs(sigset_t old)
93{ 93{
94 unsigned long flags; 94 unsigned long flags;
95 95
96 spin_lock_irqsave(&current->sighand->siglock, flags); 96 spin_lock_irqsave(&current->sighand->siglock, flags);
97 current->blocked = old; 97 current->blocked = old;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 8b551d2708ba..75eb84e7f0f8 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -49,8 +49,8 @@ static DECLARE_RWSEM(cfs_tracefile_sem);
49 49
50int cfs_tracefile_init_arch(void) 50int cfs_tracefile_init_arch(void)
51{ 51{
52 int i; 52 int i;
53 int j; 53 int j;
54 struct cfs_trace_cpu_data *tcd; 54 struct cfs_trace_cpu_data *tcd;
55 55
56 /* initialize trace_data */ 56 /* initialize trace_data */
@@ -85,14 +85,14 @@ int cfs_tracefile_init_arch(void)
85 85
86out: 86out:
87 cfs_tracefile_fini_arch(); 87 cfs_tracefile_fini_arch();
88 printk(KERN_ERR "lnet: Not enough memory\n"); 88 pr_err("lnet: Not enough memory\n");
89 return -ENOMEM; 89 return -ENOMEM;
90} 90}
91 91
92void cfs_tracefile_fini_arch(void) 92void cfs_tracefile_fini_arch(void)
93{ 93{
94 int i; 94 int i;
95 int j; 95 int j;
96 96
97 for (i = 0; i < num_possible_cpus(); i++) 97 for (i = 0; i < num_possible_cpus(); i++)
98 for (j = 0; j < 3; j++) { 98 for (j = 0; j < 3; j++) {
@@ -224,26 +224,26 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
224{ 224{
225 char *prefix = "Lustre", *ptype = NULL; 225 char *prefix = "Lustre", *ptype = NULL;
226 226
227 if ((mask & D_EMERG) != 0) { 227 if (mask & D_EMERG) {
228 prefix = dbghdr_to_err_string(hdr); 228 prefix = dbghdr_to_err_string(hdr);
229 ptype = KERN_EMERG; 229 ptype = KERN_EMERG;
230 } else if ((mask & D_ERROR) != 0) { 230 } else if (mask & D_ERROR) {
231 prefix = dbghdr_to_err_string(hdr); 231 prefix = dbghdr_to_err_string(hdr);
232 ptype = KERN_ERR; 232 ptype = KERN_ERR;
233 } else if ((mask & D_WARNING) != 0) { 233 } else if (mask & D_WARNING) {
234 prefix = dbghdr_to_info_string(hdr); 234 prefix = dbghdr_to_info_string(hdr);
235 ptype = KERN_WARNING; 235 ptype = KERN_WARNING;
236 } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) { 236 } else if (mask & (D_CONSOLE | libcfs_printk)) {
237 prefix = dbghdr_to_info_string(hdr); 237 prefix = dbghdr_to_info_string(hdr);
238 ptype = KERN_INFO; 238 ptype = KERN_INFO;
239 } 239 }
240 240
241 if ((mask & D_CONSOLE) != 0) { 241 if (mask & D_CONSOLE) {
242 printk("%s%s: %.*s", ptype, prefix, len, buf); 242 pr_info("%s%s: %.*s", ptype, prefix, len, buf);
243 } else { 243 } else {
244 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, 244 pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
245 hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, 245 hdr->ph_pid, hdr->ph_extern_pid, file,
246 fn, len, buf); 246 hdr->ph_line_num, fn, len, buf);
247 } 247 }
248} 248}
249 249
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 86b4d25cad46..161e04226521 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -183,12 +183,12 @@ EXPORT_SYMBOL(lprocfs_call_handler);
183static int __proc_dobitmasks(void *data, int write, 183static int __proc_dobitmasks(void *data, int write,
184 loff_t pos, void __user *buffer, int nob) 184 loff_t pos, void __user *buffer, int nob)
185{ 185{
186 const int tmpstrlen = 512; 186 const int tmpstrlen = 512;
187 char *tmpstr; 187 char *tmpstr;
188 int rc; 188 int rc;
189 unsigned int *mask = data; 189 unsigned int *mask = data;
190 int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; 190 int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
191 int is_printk = (mask == &libcfs_printk) ? 1 : 0; 191 int is_printk = (mask == &libcfs_printk) ? 1 : 0;
192 192
193 rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); 193 rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
194 if (rc < 0) 194 if (rc < 0)
@@ -293,8 +293,8 @@ static int __proc_cpt_table(void *data, int write,
293 loff_t pos, void __user *buffer, int nob) 293 loff_t pos, void __user *buffer, int nob)
294{ 294{
295 char *buf = NULL; 295 char *buf = NULL;
296 int len = 4096; 296 int len = 4096;
297 int rc = 0; 297 int rc = 0;
298 298
299 if (write) 299 if (write)
300 return -EPERM; 300 return -EPERM;
@@ -365,14 +365,6 @@ static struct ctl_table lnet_table[] = {
365 .mode = 0444, 365 .mode = 0444,
366 .proc_handler = &proc_cpt_table, 366 .proc_handler = &proc_cpt_table,
367 }, 367 },
368
369 {
370 .procname = "upcall",
371 .data = lnet_upcall,
372 .maxlen = sizeof(lnet_upcall),
373 .mode = 0644,
374 .proc_handler = &proc_dostring,
375 },
376 { 368 {
377 .procname = "debug_log_upcall", 369 .procname = "debug_log_upcall",
378 .data = lnet_debug_log_upcall, 370 .data = lnet_debug_log_upcall,
@@ -547,7 +539,7 @@ static int libcfs_init(void)
547 } 539 }
548 540
549 rc = cfs_cpu_init(); 541 rc = cfs_cpu_init();
550 if (rc != 0) 542 if (rc)
551 goto cleanup_debug; 543 goto cleanup_debug;
552 544
553 rc = misc_register(&libcfs_dev); 545 rc = misc_register(&libcfs_dev);
@@ -566,7 +558,7 @@ static int libcfs_init(void)
566 rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); 558 rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
567 rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, 559 rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
568 rc, &cfs_sched_rehash); 560 rc, &cfs_sched_rehash);
569 if (rc != 0) { 561 if (rc) {
570 CERROR("Startup workitem scheduler: error: %d\n", rc); 562 CERROR("Startup workitem scheduler: error: %d\n", rc);
571 goto cleanup_deregister; 563 goto cleanup_deregister;
572 } 564 }
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index a9bdb284fd15..21d5a3912c5f 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -33,7 +33,7 @@
33 * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16, 33 * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
34 * number and carry packed within the same 32 bit integer. 34 * number and carry packed within the same 32 bit integer.
35 * algorithm recommended by Marsaglia 35 * algorithm recommended by Marsaglia
36*/ 36 */
37 37
38#include "../../include/linux/libcfs/libcfs.h" 38#include "../../include/linux/libcfs/libcfs.h"
39 39
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 1c7efdfaffcf..d7b29f8997c0 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -59,13 +59,13 @@ struct page_collection {
59 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, 59 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
60 * only ->tcd_pages are spilled. 60 * only ->tcd_pages are spilled.
61 */ 61 */
62 int pc_want_daemon_pages; 62 int pc_want_daemon_pages;
63}; 63};
64 64
65struct tracefiled_ctl { 65struct tracefiled_ctl {
66 struct completion tctl_start; 66 struct completion tctl_start;
67 struct completion tctl_stop; 67 struct completion tctl_stop;
68 wait_queue_head_t tctl_waitq; 68 wait_queue_head_t tctl_waitq;
69 pid_t tctl_pid; 69 pid_t tctl_pid;
70 atomic_t tctl_shutdown; 70 atomic_t tctl_shutdown;
71}; 71};
@@ -77,24 +77,24 @@ struct cfs_trace_page {
77 /* 77 /*
78 * page itself 78 * page itself
79 */ 79 */
80 struct page *page; 80 struct page *page;
81 /* 81 /*
82 * linkage into one of the lists in trace_data_union or 82 * linkage into one of the lists in trace_data_union or
83 * page_collection 83 * page_collection
84 */ 84 */
85 struct list_head linkage; 85 struct list_head linkage;
86 /* 86 /*
87 * number of bytes used within this page 87 * number of bytes used within this page
88 */ 88 */
89 unsigned int used; 89 unsigned int used;
90 /* 90 /*
91 * cpu that owns this page 91 * cpu that owns this page
92 */ 92 */
93 unsigned short cpu; 93 unsigned short cpu;
94 /* 94 /*
95 * type(context) of this page 95 * type(context) of this page
96 */ 96 */
97 unsigned short type; 97 unsigned short type;
98}; 98};
99 99
100static void put_pages_on_tcd_daemon_list(struct page_collection *pc, 100static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
@@ -108,7 +108,7 @@ cfs_tage_from_list(struct list_head *list)
108 108
109static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) 109static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
110{ 110{
111 struct page *page; 111 struct page *page;
112 struct cfs_trace_page *tage; 112 struct cfs_trace_page *tage;
113 113
114 /* My caller is trying to free memory */ 114 /* My caller is trying to free memory */
@@ -236,7 +236,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
236 INIT_LIST_HEAD(&pc.pc_pages); 236 INIT_LIST_HEAD(&pc.pc_pages);
237 237
238 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { 238 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
239 if (pgcount-- == 0) 239 if (!pgcount--)
240 break; 240 break;
241 241
242 list_move_tail(&tage->linkage, &pc.pc_pages); 242 list_move_tail(&tage->linkage, &pc.pc_pages);
@@ -278,7 +278,7 @@ int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
278 const char *format, ...) 278 const char *format, ...)
279{ 279{
280 va_list args; 280 va_list args;
281 int rc; 281 int rc;
282 282
283 va_start(args, format); 283 va_start(args, format);
284 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); 284 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
@@ -293,21 +293,21 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
293 const char *format2, ...) 293 const char *format2, ...)
294{ 294{
295 struct cfs_trace_cpu_data *tcd = NULL; 295 struct cfs_trace_cpu_data *tcd = NULL;
296 struct ptldebug_header header = {0}; 296 struct ptldebug_header header = { 0 };
297 struct cfs_trace_page *tage; 297 struct cfs_trace_page *tage;
298 /* string_buf is used only if tcd != NULL, and is always set then */ 298 /* string_buf is used only if tcd != NULL, and is always set then */
299 char *string_buf = NULL; 299 char *string_buf = NULL;
300 char *debug_buf; 300 char *debug_buf;
301 int known_size; 301 int known_size;
302 int needed = 85; /* average message length */ 302 int needed = 85; /* average message length */
303 int max_nob; 303 int max_nob;
304 va_list ap; 304 va_list ap;
305 int depth; 305 int depth;
306 int i; 306 int i;
307 int remain; 307 int remain;
308 int mask = msgdata->msg_mask; 308 int mask = msgdata->msg_mask;
309 const char *file = kbasename(msgdata->msg_file); 309 const char *file = kbasename(msgdata->msg_file);
310 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; 310 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
311 311
312 tcd = cfs_trace_get_tcd(); 312 tcd = cfs_trace_get_tcd();
313 313
@@ -320,7 +320,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
320 if (!tcd) /* arch may not log in IRQ context */ 320 if (!tcd) /* arch may not log in IRQ context */
321 goto console; 321 goto console;
322 322
323 if (tcd->tcd_cur_pages == 0) 323 if (!tcd->tcd_cur_pages)
324 header.ph_flags |= PH_FLAG_FIRST_RECORD; 324 header.ph_flags |= PH_FLAG_FIRST_RECORD;
325 325
326 if (tcd->tcd_shutting_down) { 326 if (tcd->tcd_shutting_down) {
@@ -423,7 +423,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
423 __LASSERT(tage->used <= PAGE_SIZE); 423 __LASSERT(tage->used <= PAGE_SIZE);
424 424
425console: 425console:
426 if ((mask & libcfs_printk) == 0) { 426 if (!(mask & libcfs_printk)) {
427 /* no console output requested */ 427 /* no console output requested */
428 if (tcd) 428 if (tcd)
429 cfs_trace_put_tcd(tcd); 429 cfs_trace_put_tcd(tcd);
@@ -432,7 +432,7 @@ console:
432 432
433 if (cdls) { 433 if (cdls) {
434 if (libcfs_console_ratelimit && 434 if (libcfs_console_ratelimit &&
435 cdls->cdls_next != 0 && /* not first time ever */ 435 cdls->cdls_next && /* not first time ever */
436 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { 436 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
437 /* skipping a console message */ 437 /* skipping a console message */
438 cdls->cdls_count++; 438 cdls->cdls_count++;
@@ -489,7 +489,7 @@ console:
489 put_cpu(); 489 put_cpu();
490 } 490 }
491 491
492 if (cdls && cdls->cdls_count != 0) { 492 if (cdls && cdls->cdls_count) {
493 string_buf = cfs_trace_get_console_buffer(); 493 string_buf = cfs_trace_get_console_buffer();
494 494
495 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, 495 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -535,9 +535,9 @@ panic_collect_pages(struct page_collection *pc)
535 * CPUs have been stopped during a panic. If this isn't true for some 535 * CPUs have been stopped during a panic. If this isn't true for some
536 * arch, this will have to be implemented separately in each arch. 536 * arch, this will have to be implemented separately in each arch.
537 */ 537 */
538 int i;
539 int j;
540 struct cfs_trace_cpu_data *tcd; 538 struct cfs_trace_cpu_data *tcd;
539 int i;
540 int j;
541 541
542 INIT_LIST_HEAD(&pc->pc_pages); 542 INIT_LIST_HEAD(&pc->pc_pages);
543 543
@@ -698,11 +698,11 @@ void cfs_trace_debug_print(void)
698 698
699int cfs_tracefile_dump_all_pages(char *filename) 699int cfs_tracefile_dump_all_pages(char *filename)
700{ 700{
701 struct page_collection pc; 701 struct page_collection pc;
702 struct file *filp; 702 struct file *filp;
703 struct cfs_trace_page *tage; 703 struct cfs_trace_page *tage;
704 struct cfs_trace_page *tmp; 704 struct cfs_trace_page *tmp;
705 char *buf; 705 char *buf;
706 mm_segment_t __oldfs; 706 mm_segment_t __oldfs;
707 int rc; 707 int rc;
708 708
@@ -778,7 +778,7 @@ void cfs_trace_flush_pages(void)
778int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, 778int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
779 const char __user *usr_buffer, int usr_buffer_nob) 779 const char __user *usr_buffer, int usr_buffer_nob)
780{ 780{
781 int nob; 781 int nob;
782 782
783 if (usr_buffer_nob > knl_buffer_nob) 783 if (usr_buffer_nob > knl_buffer_nob)
784 return -EOVERFLOW; 784 return -EOVERFLOW;
@@ -810,7 +810,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
810 * NB if 'append' != NULL, it's a single character to append to the 810 * NB if 'append' != NULL, it's a single character to append to the
811 * copied out string - usually "\n" or "" (i.e. a terminating zero byte) 811 * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
812 */ 812 */
813 int nob = strlen(knl_buffer); 813 int nob = strlen(knl_buffer);
814 814
815 if (nob > usr_buffer_nob) 815 if (nob > usr_buffer_nob)
816 nob = usr_buffer_nob; 816 nob = usr_buffer_nob;
@@ -843,16 +843,16 @@ int cfs_trace_allocate_string_buffer(char **str, int nob)
843 843
844int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) 844int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
845{ 845{
846 char *str; 846 char *str;
847 int rc; 847 int rc;
848 848
849 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); 849 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
850 if (rc != 0) 850 if (rc)
851 return rc; 851 return rc;
852 852
853 rc = cfs_trace_copyin_string(str, usr_str_nob + 1, 853 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
854 usr_str, usr_str_nob); 854 usr_str, usr_str_nob);
855 if (rc != 0) 855 if (rc)
856 goto out; 856 goto out;
857 857
858 if (str[0] != '/') { 858 if (str[0] != '/') {
@@ -867,17 +867,17 @@ out:
867 867
868int cfs_trace_daemon_command(char *str) 868int cfs_trace_daemon_command(char *str)
869{ 869{
870 int rc = 0; 870 int rc = 0;
871 871
872 cfs_tracefile_write_lock(); 872 cfs_tracefile_write_lock();
873 873
874 if (strcmp(str, "stop") == 0) { 874 if (!strcmp(str, "stop")) {
875 cfs_tracefile_write_unlock(); 875 cfs_tracefile_write_unlock();
876 cfs_trace_stop_thread(); 876 cfs_trace_stop_thread();
877 cfs_tracefile_write_lock(); 877 cfs_tracefile_write_lock();
878 memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); 878 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
879 879
880 } else if (strncmp(str, "size=", 5) == 0) { 880 } else if (!strncmp(str, "size=", 5)) {
881 unsigned long tmp; 881 unsigned long tmp;
882 882
883 rc = kstrtoul(str + 5, 10, &tmp); 883 rc = kstrtoul(str + 5, 10, &tmp);
@@ -909,15 +909,15 @@ int cfs_trace_daemon_command(char *str)
909int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) 909int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
910{ 910{
911 char *str; 911 char *str;
912 int rc; 912 int rc;
913 913
914 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); 914 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
915 if (rc != 0) 915 if (rc)
916 return rc; 916 return rc;
917 917
918 rc = cfs_trace_copyin_string(str, usr_str_nob + 1, 918 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
919 usr_str, usr_str_nob); 919 usr_str, usr_str_nob);
920 if (rc == 0) 920 if (!rc)
921 rc = cfs_trace_daemon_command(str); 921 rc = cfs_trace_daemon_command(str);
922 922
923 kfree(str); 923 kfree(str);
@@ -1003,7 +1003,7 @@ static int tracefiled(void *arg)
1003 1003
1004 filp = NULL; 1004 filp = NULL;
1005 cfs_tracefile_read_lock(); 1005 cfs_tracefile_read_lock();
1006 if (cfs_tracefile[0] != 0) { 1006 if (cfs_tracefile[0]) {
1007 filp = filp_open(cfs_tracefile, 1007 filp = filp_open(cfs_tracefile,
1008 O_CREAT | O_RDWR | O_LARGEFILE, 1008 O_CREAT | O_RDWR | O_LARGEFILE,
1009 0600); 1009 0600);
@@ -1072,7 +1072,7 @@ static int tracefiled(void *arg)
1072 __LASSERT(list_empty(&pc.pc_pages)); 1072 __LASSERT(list_empty(&pc.pc_pages));
1073end_loop: 1073end_loop:
1074 if (atomic_read(&tctl->tctl_shutdown)) { 1074 if (atomic_read(&tctl->tctl_shutdown)) {
1075 if (last_loop == 0) { 1075 if (!last_loop) {
1076 last_loop = 1; 1076 last_loop = 1;
1077 continue; 1077 continue;
1078 } else { 1078 } else {
@@ -1135,13 +1135,13 @@ void cfs_trace_stop_thread(void)
1135int cfs_tracefile_init(int max_pages) 1135int cfs_tracefile_init(int max_pages)
1136{ 1136{
1137 struct cfs_trace_cpu_data *tcd; 1137 struct cfs_trace_cpu_data *tcd;
1138 int i; 1138 int i;
1139 int j; 1139 int j;
1140 int rc; 1140 int rc;
1141 int factor; 1141 int factor;
1142 1142
1143 rc = cfs_tracefile_init_arch(); 1143 rc = cfs_tracefile_init_arch();
1144 if (rc != 0) 1144 if (rc)
1145 return rc; 1145 return rc;
1146 1146
1147 cfs_tcd_for_each(tcd, i, j) { 1147 cfs_tcd_for_each(tcd, i, j) {
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index d878676bc375..f644cbc5a277 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -45,7 +45,7 @@ enum cfs_trace_buf_type {
45/* trace file lock routines */ 45/* trace file lock routines */
46 46
47#define TRACEFILE_NAME_SIZE 1024 47#define TRACEFILE_NAME_SIZE 1024
48extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; 48extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
49extern long long cfs_tracefile_size; 49extern long long cfs_tracefile_size;
50 50
51void libcfs_run_debug_log_upcall(char *file); 51void libcfs_run_debug_log_upcall(char *file);
@@ -80,7 +80,7 @@ int cfs_trace_get_debug_mb(void);
80void libcfs_debug_dumplog_internal(void *arg); 80void libcfs_debug_dumplog_internal(void *arg);
81void libcfs_register_panic_notifier(void); 81void libcfs_register_panic_notifier(void);
82void libcfs_unregister_panic_notifier(void); 82void libcfs_unregister_panic_notifier(void);
83extern int libcfs_panic_in_progress; 83extern int libcfs_panic_in_progress;
84int cfs_trace_max_debug_mb(void); 84int cfs_trace_max_debug_mb(void);
85 85
86#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) 86#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
@@ -113,14 +113,14 @@ union cfs_trace_data_union {
113 * tcd_for_each_type_lock 113 * tcd_for_each_type_lock
114 */ 114 */
115 spinlock_t tcd_lock; 115 spinlock_t tcd_lock;
116 unsigned long tcd_lock_flags; 116 unsigned long tcd_lock_flags;
117 117
118 /* 118 /*
119 * pages with trace records not yet processed by tracefiled. 119 * pages with trace records not yet processed by tracefiled.
120 */ 120 */
121 struct list_head tcd_pages; 121 struct list_head tcd_pages;
122 /* number of pages on ->tcd_pages */ 122 /* number of pages on ->tcd_pages */
123 unsigned long tcd_cur_pages; 123 unsigned long tcd_cur_pages;
124 124
125 /* 125 /*
126 * pages with trace records already processed by 126 * pages with trace records already processed by
@@ -132,9 +132,9 @@ union cfs_trace_data_union {
132 * (put_pages_on_daemon_list()). LRU pages from this list are 132 * (put_pages_on_daemon_list()). LRU pages from this list are
133 * discarded when list grows too large. 133 * discarded when list grows too large.
134 */ 134 */
135 struct list_head tcd_daemon_pages; 135 struct list_head tcd_daemon_pages;
136 /* number of pages on ->tcd_daemon_pages */ 136 /* number of pages on ->tcd_daemon_pages */
137 unsigned long tcd_cur_daemon_pages; 137 unsigned long tcd_cur_daemon_pages;
138 138
139 /* 139 /*
140 * Maximal number of pages allowed on ->tcd_pages and 140 * Maximal number of pages allowed on ->tcd_pages and
@@ -142,7 +142,7 @@ union cfs_trace_data_union {
142 * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current 142 * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
143 * implementation. 143 * implementation.
144 */ 144 */
145 unsigned long tcd_max_pages; 145 unsigned long tcd_max_pages;
146 146
147 /* 147 /*
148 * preallocated pages to write trace records into. Pages from 148 * preallocated pages to write trace records into. Pages from
@@ -166,15 +166,15 @@ union cfs_trace_data_union {
166 * TCD_STOCK_PAGES pagesful are consumed by trace records all 166 * TCD_STOCK_PAGES pagesful are consumed by trace records all
167 * emitted in non-blocking contexts. Which is quite unlikely. 167 * emitted in non-blocking contexts. Which is quite unlikely.
168 */ 168 */
169 struct list_head tcd_stock_pages; 169 struct list_head tcd_stock_pages;
170 /* number of pages on ->tcd_stock_pages */ 170 /* number of pages on ->tcd_stock_pages */
171 unsigned long tcd_cur_stock_pages; 171 unsigned long tcd_cur_stock_pages;
172 172
173 unsigned short tcd_shutting_down; 173 unsigned short tcd_shutting_down;
174 unsigned short tcd_cpu; 174 unsigned short tcd_cpu;
175 unsigned short tcd_type; 175 unsigned short tcd_type;
176 /* The factors to share debug memory. */ 176 /* The factors to share debug memory. */
177 unsigned short tcd_pages_factor; 177 unsigned short tcd_pages_factor;
178 } tcd; 178 } tcd;
179 char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; 179 char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
180}; 180};
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index e98c818a14fb..d0512da6bcde 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -45,7 +45,7 @@ struct cfs_wi_sched {
45 /* chain on global list */ 45 /* chain on global list */
46 struct list_head ws_list; 46 struct list_head ws_list;
47 /** serialised workitems */ 47 /** serialised workitems */
48 spinlock_t ws_lock; 48 spinlock_t ws_lock;
49 /** where schedulers sleep */ 49 /** where schedulers sleep */
50 wait_queue_head_t ws_waitq; 50 wait_queue_head_t ws_waitq;
51 /** concurrent workitems */ 51 /** concurrent workitems */
@@ -59,26 +59,26 @@ struct cfs_wi_sched {
59 */ 59 */
60 struct list_head ws_rerunq; 60 struct list_head ws_rerunq;
61 /** CPT-table for this scheduler */ 61 /** CPT-table for this scheduler */
62 struct cfs_cpt_table *ws_cptab; 62 struct cfs_cpt_table *ws_cptab;
63 /** CPT id for affinity */ 63 /** CPT id for affinity */
64 int ws_cpt; 64 int ws_cpt;
65 /** number of scheduled workitems */ 65 /** number of scheduled workitems */
66 int ws_nscheduled; 66 int ws_nscheduled;
67 /** started scheduler thread, protected by cfs_wi_data::wi_glock */ 67 /** started scheduler thread, protected by cfs_wi_data::wi_glock */
68 unsigned int ws_nthreads:30; 68 unsigned int ws_nthreads:30;
69 /** shutting down, protected by cfs_wi_data::wi_glock */ 69 /** shutting down, protected by cfs_wi_data::wi_glock */
70 unsigned int ws_stopping:1; 70 unsigned int ws_stopping:1;
71 /** serialize starting thread, protected by cfs_wi_data::wi_glock */ 71 /** serialize starting thread, protected by cfs_wi_data::wi_glock */
72 unsigned int ws_starting:1; 72 unsigned int ws_starting:1;
73 /** scheduler name */ 73 /** scheduler name */
74 char ws_name[CFS_WS_NAME_LEN]; 74 char ws_name[CFS_WS_NAME_LEN];
75}; 75};
76 76
77static struct cfs_workitem_data { 77static struct cfs_workitem_data {
78 /** serialize */ 78 /** serialize */
79 spinlock_t wi_glock; 79 spinlock_t wi_glock;
80 /** list of all schedulers */ 80 /** list of all schedulers */
81 struct list_head wi_scheds; 81 struct list_head wi_scheds;
82 /** WI module is initialized */ 82 /** WI module is initialized */
83 int wi_init; 83 int wi_init;
84 /** shutting down the whole WI module */ 84 /** shutting down the whole WI module */
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
136int 136int
137cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) 137cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
138{ 138{
139 int rc; 139 int rc;
140 140
141 LASSERT(!in_interrupt()); /* because we use plain spinlock */ 141 LASSERT(!in_interrupt()); /* because we use plain spinlock */
142 LASSERT(!sched->ws_stopping); 142 LASSERT(!sched->ws_stopping);
@@ -202,13 +202,13 @@ EXPORT_SYMBOL(cfs_wi_schedule);
202 202
203static int cfs_wi_scheduler(void *arg) 203static int cfs_wi_scheduler(void *arg)
204{ 204{
205 struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; 205 struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
206 206
207 cfs_block_allsigs(); 207 cfs_block_allsigs();
208 208
209 /* CPT affinity scheduler? */ 209 /* CPT affinity scheduler? */
210 if (sched->ws_cptab) 210 if (sched->ws_cptab)
211 if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) 211 if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt))
212 CWARN("Failed to bind %s on CPT %d\n", 212 CWARN("Failed to bind %s on CPT %d\n",
213 sched->ws_name, sched->ws_cpt); 213 sched->ws_name, sched->ws_cpt);
214 214
@@ -223,8 +223,8 @@ static int cfs_wi_scheduler(void *arg)
223 spin_lock(&sched->ws_lock); 223 spin_lock(&sched->ws_lock);
224 224
225 while (!sched->ws_stopping) { 225 while (!sched->ws_stopping) {
226 int nloops = 0; 226 int nloops = 0;
227 int rc; 227 int rc;
228 struct cfs_workitem *wi; 228 struct cfs_workitem *wi;
229 229
230 while (!list_empty(&sched->ws_runq) && 230 while (!list_empty(&sched->ws_runq) &&
@@ -238,16 +238,16 @@ static int cfs_wi_scheduler(void *arg)
238 LASSERT(sched->ws_nscheduled > 0); 238 LASSERT(sched->ws_nscheduled > 0);
239 sched->ws_nscheduled--; 239 sched->ws_nscheduled--;
240 240
241 wi->wi_running = 1; 241 wi->wi_running = 1;
242 wi->wi_scheduled = 0; 242 wi->wi_scheduled = 0;
243 243
244 spin_unlock(&sched->ws_lock); 244 spin_unlock(&sched->ws_lock);
245 nloops++; 245 nloops++;
246 246
247 rc = (*wi->wi_action) (wi); 247 rc = (*wi->wi_action)(wi);
248 248
249 spin_lock(&sched->ws_lock); 249 spin_lock(&sched->ws_lock);
250 if (rc != 0) /* WI should be dead, even be freed! */ 250 if (rc) /* WI should be dead, even be freed! */
251 continue; 251 continue;
252 252
253 wi->wi_running = 0; 253 wi->wi_running = 0;
@@ -273,7 +273,7 @@ static int cfs_wi_scheduler(void *arg)
273 273
274 spin_unlock(&sched->ws_lock); 274 spin_unlock(&sched->ws_lock);
275 rc = wait_event_interruptible_exclusive(sched->ws_waitq, 275 rc = wait_event_interruptible_exclusive(sched->ws_waitq,
276 !cfs_wi_sched_cansleep(sched)); 276 !cfs_wi_sched_cansleep(sched));
277 spin_lock(&sched->ws_lock); 277 spin_lock(&sched->ws_lock);
278 } 278 }
279 279
@@ -289,7 +289,7 @@ static int cfs_wi_scheduler(void *arg)
289void 289void
290cfs_wi_sched_destroy(struct cfs_wi_sched *sched) 290cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
291{ 291{
292 int i; 292 int i;
293 293
294 LASSERT(cfs_wi_data.wi_init); 294 LASSERT(cfs_wi_data.wi_init);
295 LASSERT(!cfs_wi_data.wi_stopping); 295 LASSERT(!cfs_wi_data.wi_stopping);
@@ -325,7 +325,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
325 list_del(&sched->ws_list); 325 list_del(&sched->ws_list);
326 326
327 spin_unlock(&cfs_wi_data.wi_glock); 327 spin_unlock(&cfs_wi_data.wi_glock);
328 LASSERT(sched->ws_nscheduled == 0); 328 LASSERT(!sched->ws_nscheduled);
329 329
330 LIBCFS_FREE(sched, sizeof(*sched)); 330 LIBCFS_FREE(sched, sizeof(*sched));
331} 331}
@@ -335,8 +335,8 @@ int
335cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, 335cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
336 int cpt, int nthrs, struct cfs_wi_sched **sched_pp) 336 int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
337{ 337{
338 struct cfs_wi_sched *sched; 338 struct cfs_wi_sched *sched;
339 int rc; 339 int rc;
340 340
341 LASSERT(cfs_wi_data.wi_init); 341 LASSERT(cfs_wi_data.wi_init);
342 LASSERT(!cfs_wi_data.wi_stopping); 342 LASSERT(!cfs_wi_data.wi_stopping);
@@ -364,7 +364,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
364 364
365 rc = 0; 365 rc = 0;
366 while (nthrs > 0) { 366 while (nthrs > 0) {
367 char name[16]; 367 char name[16];
368 struct task_struct *task; 368 struct task_struct *task;
369 369
370 spin_lock(&cfs_wi_data.wi_glock); 370 spin_lock(&cfs_wi_data.wi_glock);
@@ -431,7 +431,7 @@ cfs_wi_startup(void)
431void 431void
432cfs_wi_shutdown(void) 432cfs_wi_shutdown(void)
433{ 433{
434 struct cfs_wi_sched *sched; 434 struct cfs_wi_sched *sched;
435 struct cfs_wi_sched *temp; 435 struct cfs_wi_sched *temp;
436 436
437 spin_lock(&cfs_wi_data.wi_glock); 437 spin_lock(&cfs_wi_data.wi_glock);
@@ -447,7 +447,7 @@ cfs_wi_shutdown(void)
447 list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { 447 list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
448 spin_lock(&cfs_wi_data.wi_glock); 448 spin_lock(&cfs_wi_data.wi_glock);
449 449
450 while (sched->ws_nthreads != 0) { 450 while (sched->ws_nthreads) {
451 spin_unlock(&cfs_wi_data.wi_glock); 451 spin_unlock(&cfs_wi_data.wi_glock);
452 set_current_state(TASK_UNINTERRUPTIBLE); 452 set_current_state(TASK_UNINTERRUPTIBLE);
453 schedule_timeout(cfs_time_seconds(1) / 20); 453 schedule_timeout(cfs_time_seconds(1) / 20);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4daf828198c3..b2ba10d59f84 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1551,16 +1551,16 @@ LNetNIInit(lnet_pid_t requested_pid)
1551 1551
1552 rc = lnet_check_routes(); 1552 rc = lnet_check_routes();
1553 if (rc) 1553 if (rc)
1554 goto err_destory_routes; 1554 goto err_destroy_routes;
1555 1555
1556 rc = lnet_rtrpools_alloc(im_a_router); 1556 rc = lnet_rtrpools_alloc(im_a_router);
1557 if (rc) 1557 if (rc)
1558 goto err_destory_routes; 1558 goto err_destroy_routes;
1559 } 1559 }
1560 1560
1561 rc = lnet_acceptor_start(); 1561 rc = lnet_acceptor_start();
1562 if (rc) 1562 if (rc)
1563 goto err_destory_routes; 1563 goto err_destroy_routes;
1564 1564
1565 the_lnet.ln_refcount = 1; 1565 the_lnet.ln_refcount = 1;
1566 /* Now I may use my own API functions... */ 1566 /* Now I may use my own API functions... */
@@ -1587,7 +1587,7 @@ err_stop_ping:
1587err_acceptor_stop: 1587err_acceptor_stop:
1588 the_lnet.ln_refcount = 0; 1588 the_lnet.ln_refcount = 0;
1589 lnet_acceptor_stop(); 1589 lnet_acceptor_stop();
1590err_destory_routes: 1590err_destroy_routes:
1591 if (!the_lnet.ln_nis_from_mod_params) 1591 if (!the_lnet.ln_nis_from_mod_params)
1592 lnet_destroy_routes(); 1592 lnet_destroy_routes();
1593err_shutdown_lndnis: 1593err_shutdown_lndnis:
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index b430046dc294..eb796a86e6ab 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -271,21 +271,3 @@ lnet_me_unlink(lnet_me_t *me)
271 lnet_res_lh_invalidate(&me->me_lh); 271 lnet_res_lh_invalidate(&me->me_lh);
272 lnet_me_free(me); 272 lnet_me_free(me);
273} 273}
274
275#if 0
276static void
277lib_me_dump(lnet_me_t *me)
278{
279 CWARN("Match Entry %p (%#llx)\n", me,
280 me->me_lh.lh_cookie);
281
282 CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
283 me->me_match_bits, me->me_ignore_bits);
284
285 CWARN("\tMD\t= %p\n", me->md);
286 CWARN("\tprev\t= %p\n",
287 list_entry(me->me_list.prev, lnet_me_t, me_list));
288 CWARN("\tnext\t= %p\n",
289 list_entry(me->me_list.next, lnet_me_t, me_list));
290}
291#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 48e6f8f2392f..f3dd6e42f4d4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -192,6 +192,7 @@ lnet_copy_iov2iter(struct iov_iter *to,
192 left = siov->iov_len - soffset; 192 left = siov->iov_len - soffset;
193 do { 193 do {
194 size_t n, copy = left; 194 size_t n, copy = left;
195
195 LASSERT(nsiov > 0); 196 LASSERT(nsiov > 0);
196 197
197 if (copy > nob) 198 if (copy > nob)
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index a6d7a6159b8f..a9fe3e69daae 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -193,7 +193,7 @@ add_nidrange(const struct cfs_lstr *src,
193 struct netstrfns *nf; 193 struct netstrfns *nf;
194 struct nidrange *nr; 194 struct nidrange *nr;
195 int endlen; 195 int endlen;
196 unsigned netnum; 196 unsigned int netnum;
197 197
198 if (src->ls_len >= LNET_NIDSTR_SIZE) 198 if (src->ls_len >= LNET_NIDSTR_SIZE)
199 return NULL; 199 return NULL;
@@ -247,10 +247,8 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
247{ 247{
248 struct cfs_lstr addrrange; 248 struct cfs_lstr addrrange;
249 struct cfs_lstr net; 249 struct cfs_lstr net;
250 struct cfs_lstr tmp;
251 struct nidrange *nr; 250 struct nidrange *nr;
252 251
253 tmp = *src;
254 if (!cfs_gettok(src, '@', &addrrange)) 252 if (!cfs_gettok(src, '@', &addrrange))
255 goto failed; 253 goto failed;
256 254
@@ -1156,7 +1154,7 @@ EXPORT_SYMBOL(libcfs_nid2str_r);
1156static struct netstrfns * 1154static struct netstrfns *
1157libcfs_str2net_internal(const char *str, __u32 *net) 1155libcfs_str2net_internal(const char *str, __u32 *net)
1158{ 1156{
1159 struct netstrfns *uninitialized_var(nf); 1157 struct netstrfns *nf = NULL;
1160 int nob; 1158 int nob;
1161 unsigned int netnum; 1159 unsigned int netnum;
1162 int i; 1160 int i;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 063ad55ec950..8afa0abf15cd 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -903,6 +903,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
903{ 903{
904 lnet_rc_data_t *rcd = NULL; 904 lnet_rc_data_t *rcd = NULL;
905 lnet_ping_info_t *pi; 905 lnet_ping_info_t *pi;
906 lnet_md_t md;
906 int rc; 907 int rc;
907 int i; 908 int i;
908 909
@@ -925,15 +926,15 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
925 } 926 }
926 rcd->rcd_pinginfo = pi; 927 rcd->rcd_pinginfo = pi;
927 928
929 md.start = pi;
930 md.user_ptr = rcd;
931 md.length = LNET_PINGINFO_SIZE;
932 md.threshold = LNET_MD_THRESH_INF;
933 md.options = LNET_MD_TRUNCATE;
934 md.eq_handle = the_lnet.ln_rc_eqh;
935
928 LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh)); 936 LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
929 rc = LNetMDBind((lnet_md_t){.start = pi, 937 rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
930 .user_ptr = rcd,
931 .length = LNET_PINGINFO_SIZE,
932 .threshold = LNET_MD_THRESH_INF,
933 .options = LNET_MD_TRUNCATE,
934 .eq_handle = the_lnet.ln_rc_eqh},
935 LNET_UNLINK,
936 &rcd->rcd_mdh);
937 if (rc < 0) { 938 if (rc < 0) {
938 CERROR("Can't bind MD: %d\n", rc); 939 CERROR("Can't bind MD: %d\n", rc);
939 goto out; 940 goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b20c5d394e3b..67b460f41d6e 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -44,6 +44,10 @@ static int brw_inject_errors;
44module_param(brw_inject_errors, int, 0644); 44module_param(brw_inject_errors, int, 0644);
45MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); 45MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
46 46
47#define BRW_POISON 0xbeefbeefbeefbeefULL
48#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
49#define BRW_MSIZE sizeof(u64)
50
47static void 51static void
48brw_client_fini(struct sfw_test_instance *tsi) 52brw_client_fini(struct sfw_test_instance *tsi)
49{ 53{
@@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi)
67{ 71{
68 struct sfw_session *sn = tsi->tsi_batch->bat_session; 72 struct sfw_session *sn = tsi->tsi_batch->bat_session;
69 int flags; 73 int flags;
74 int off;
70 int npg; 75 int npg;
71 int len; 76 int len;
72 int opc; 77 int opc;
@@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi)
87 * but we have to keep it for compatibility 92 * but we have to keep it for compatibility
88 */ 93 */
89 len = npg * PAGE_SIZE; 94 len = npg * PAGE_SIZE;
95 off = 0;
90 } else { 96 } else {
91 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; 97 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
92 98
@@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi)
99 opc = breq->blk_opc; 105 opc = breq->blk_opc;
100 flags = breq->blk_flags; 106 flags = breq->blk_flags;
101 len = breq->blk_len; 107 len = breq->blk_len;
102 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 108 off = breq->blk_offset & ~PAGE_MASK;
109 npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
103 } 110 }
104 111
112 if (off % BRW_MSIZE)
113 return -EINVAL;
114
105 if (npg > LNET_MAX_IOV || npg <= 0) 115 if (npg > LNET_MAX_IOV || npg <= 0)
106 return -EINVAL; 116 return -EINVAL;
107 117
@@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi)
114 124
115 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { 125 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
116 bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid), 126 bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
117 npg, len, opc == LST_BRW_READ); 127 off, npg, len, opc == LST_BRW_READ);
118 if (!bulk) { 128 if (!bulk) {
119 brw_client_fini(tsi); 129 brw_client_fini(tsi);
120 return -ENOMEM; 130 return -ENOMEM;
@@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi)
126 return 0; 136 return 0;
127} 137}
128 138
129#define BRW_POISON 0xbeefbeefbeefbeefULL 139int brw_inject_one_error(void)
130#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
131#define BRW_MSIZE sizeof(__u64)
132
133static int
134brw_inject_one_error(void)
135{ 140{
136 struct timespec64 ts; 141 struct timespec64 ts;
137 142
@@ -147,12 +152,13 @@ brw_inject_one_error(void)
147} 152}
148 153
149static void 154static void
150brw_fill_page(struct page *pg, int pattern, __u64 magic) 155brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
151{ 156{
152 char *addr = page_address(pg); 157 char *addr = page_address(pg) + off;
153 int i; 158 int i;
154 159
155 LASSERT(addr); 160 LASSERT(addr);
161 LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
156 162
157 if (pattern == LST_BRW_CHECK_NONE) 163 if (pattern == LST_BRW_CHECK_NONE)
158 return; 164 return;
@@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
162 168
163 if (pattern == LST_BRW_CHECK_SIMPLE) { 169 if (pattern == LST_BRW_CHECK_SIMPLE) {
164 memcpy(addr, &magic, BRW_MSIZE); 170 memcpy(addr, &magic, BRW_MSIZE);
165 addr += PAGE_SIZE - BRW_MSIZE; 171 if (len > BRW_MSIZE) {
166 memcpy(addr, &magic, BRW_MSIZE); 172 addr += PAGE_SIZE - BRW_MSIZE;
173 memcpy(addr, &magic, BRW_MSIZE);
174 }
167 return; 175 return;
168 } 176 }
169 177
170 if (pattern == LST_BRW_CHECK_FULL) { 178 if (pattern == LST_BRW_CHECK_FULL) {
171 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) 179 for (i = 0; i < len; i += BRW_MSIZE)
172 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); 180 memcpy(addr + i, &magic, BRW_MSIZE);
173 return; 181 return;
174 } 182 }
175 183
@@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
177} 185}
178 186
179static int 187static int
180brw_check_page(struct page *pg, int pattern, __u64 magic) 188brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
181{ 189{
182 char *addr = page_address(pg); 190 char *addr = page_address(pg) + off;
183 __u64 data = 0; /* make compiler happy */ 191 __u64 data = 0; /* make compiler happy */
184 int i; 192 int i;
185 193
186 LASSERT(addr); 194 LASSERT(addr);
195 LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
187 196
188 if (pattern == LST_BRW_CHECK_NONE) 197 if (pattern == LST_BRW_CHECK_NONE)
189 return 0; 198 return 0;
@@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
193 if (data != magic) 202 if (data != magic)
194 goto bad_data; 203 goto bad_data;
195 204
196 addr += PAGE_SIZE - BRW_MSIZE; 205 if (len > BRW_MSIZE) {
197 data = *((__u64 *)addr); 206 addr += PAGE_SIZE - BRW_MSIZE;
198 if (data != magic) 207 data = *((__u64 *)addr);
199 goto bad_data; 208 if (data != magic)
200 209 goto bad_data;
210 }
201 return 0; 211 return 0;
202 } 212 }
203 213
204 if (pattern == LST_BRW_CHECK_FULL) { 214 if (pattern == LST_BRW_CHECK_FULL) {
205 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) { 215 for (i = 0; i < len; i += BRW_MSIZE) {
206 data = *(((__u64 *)addr) + i); 216 data = *(u64 *)(addr + i);
207 if (data != magic) 217 if (data != magic)
208 goto bad_data; 218 goto bad_data;
209 } 219 }
210
211 return 0; 220 return 0;
212 } 221 }
213 222
@@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
226 struct page *pg; 235 struct page *pg;
227 236
228 for (i = 0; i < bk->bk_niov; i++) { 237 for (i = 0; i < bk->bk_niov; i++) {
238 int off, len;
239
229 pg = bk->bk_iovs[i].bv_page; 240 pg = bk->bk_iovs[i].bv_page;
230 brw_fill_page(pg, pattern, magic); 241 off = bk->bk_iovs[i].bv_offset;
242 len = bk->bk_iovs[i].bv_len;
243 brw_fill_page(pg, off, len, pattern, magic);
231 } 244 }
232} 245}
233 246
@@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
238 struct page *pg; 251 struct page *pg;
239 252
240 for (i = 0; i < bk->bk_niov; i++) { 253 for (i = 0; i < bk->bk_niov; i++) {
254 int off, len;
255
241 pg = bk->bk_iovs[i].bv_page; 256 pg = bk->bk_iovs[i].bv_page;
242 if (brw_check_page(pg, pattern, magic)) { 257 off = bk->bk_iovs[i].bv_offset;
258 len = bk->bk_iovs[i].bv_len;
259 if (brw_check_page(pg, off, len, pattern, magic)) {
243 CERROR("Bulk page %p (%d/%d) is corrupted!\n", 260 CERROR("Bulk page %p (%d/%d) is corrupted!\n",
244 pg, i, bk->bk_niov); 261 pg, i, bk->bk_niov);
245 return 1; 262 return 1;
@@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
276 len = npg * PAGE_SIZE; 293 len = npg * PAGE_SIZE;
277 } else { 294 } else {
278 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; 295 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
296 int off;
279 297
280 /* 298 /*
281 * I should never get this step if it's unknown feature 299 * I should never get this step if it's unknown feature
@@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
286 opc = breq->blk_opc; 304 opc = breq->blk_opc;
287 flags = breq->blk_flags; 305 flags = breq->blk_flags;
288 len = breq->blk_len; 306 len = breq->blk_len;
289 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 307 off = breq->blk_offset;
308 npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
290 } 309 }
291 310
292 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); 311 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index b786f8b4a73d..94383023c1be 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
315static int 315static int
316lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) 316lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
317{ 317{
318 unsigned feats; 318 unsigned int feats;
319 int rc; 319 int rc;
320 char *name; 320 char *name;
321 321
@@ -742,6 +742,10 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
742 PAGE_SIZE - sizeof(struct lstcon_test))) 742 PAGE_SIZE - sizeof(struct lstcon_test)))
743 return -EINVAL; 743 return -EINVAL;
744 744
745 /* Enforce zero parameter length if there's no parameter */
746 if (!args->lstio_tes_param && args->lstio_tes_param_len)
747 return -EINVAL;
748
745 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
746 if (!batch_name) 750 if (!batch_name)
747 return rc; 751 return rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 55afb53b0743..994422c62487 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -86,8 +86,9 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
86} 86}
87 87
88static int 88static int
89lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats, 89lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
90 int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc) 90 int bulk_npg, int bulk_len, int embedded,
91 struct lstcon_rpc *crpc)
91{ 92{
92 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, 93 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
93 feats, bulk_npg, bulk_len, 94 feats, bulk_npg, bulk_len,
@@ -111,7 +112,7 @@ lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
111} 112}
112 113
113static int 114static int
114lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats, 115lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
115 int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp) 116 int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
116{ 117{
117 struct lstcon_rpc *crpc = NULL; 118 struct lstcon_rpc *crpc = NULL;
@@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
292 293
293 spin_lock(&rpc->crpc_lock); 294 spin_lock(&rpc->crpc_lock);
294 295
295 if (!crpc->crp_posted || /* not posted */ 296 if (!crpc->crp_posted || /* not posted */
296 crpc->crp_stamp) { /* rpc done or aborted already */ 297 crpc->crp_stamp) { /* rpc done or aborted already */
297 if (!crpc->crp_stamp) { 298 if (!crpc->crp_stamp) {
298 crpc->crp_stamp = cfs_time_current(); 299 crpc->crp_stamp = cfs_time_current();
299 crpc->crp_status = -EINTR; 300 crpc->crp_status = -EINTR;
@@ -589,7 +590,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
589 590
590int 591int
591lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, 592lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
592 unsigned feats, struct lstcon_rpc **crpc) 593 unsigned int feats, struct lstcon_rpc **crpc)
593{ 594{
594 struct srpc_mksn_reqst *msrq; 595 struct srpc_mksn_reqst *msrq;
595 struct srpc_rmsn_reqst *rsrq; 596 struct srpc_rmsn_reqst *rsrq;
@@ -627,7 +628,8 @@ lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
627} 628}
628 629
629int 630int
630lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) 631lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats,
632 struct lstcon_rpc **crpc)
631{ 633{
632 struct srpc_debug_reqst *drq; 634 struct srpc_debug_reqst *drq;
633 int rc; 635 int rc;
@@ -645,7 +647,7 @@ lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **c
645} 647}
646 648
647int 649int
648lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, 650lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
649 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc) 651 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
650{ 652{
651 struct lstcon_batch *batch; 653 struct lstcon_batch *batch;
@@ -678,7 +680,8 @@ lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
678} 680}
679 681
680int 682int
681lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) 683lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
684 struct lstcon_rpc **crpc)
682{ 685{
683 struct srpc_stat_reqst *srq; 686 struct srpc_stat_reqst *srq;
684 int rc; 687 int rc;
@@ -776,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
776} 779}
777 780
778static int 781static int
779lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) 782lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
783 struct srpc_test_reqst *req)
780{ 784{
781 struct test_bulk_req *brq = &req->tsr_u.bulk_v0; 785 struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
782 786
@@ -789,20 +793,21 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req
789} 793}
790 794
791static int 795static int
792lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) 796lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client,
797 struct srpc_test_reqst *req)
793{ 798{
794 struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1; 799 struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
795 800
796 brq->blk_opc = param->blk_opc; 801 brq->blk_opc = param->blk_opc;
797 brq->blk_flags = param->blk_flags; 802 brq->blk_flags = param->blk_flags;
798 brq->blk_len = param->blk_size; 803 brq->blk_len = param->blk_size;
799 brq->blk_offset = 0; /* reserved */ 804 brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off;
800 805
801 return 0; 806 return 0;
802} 807}
803 808
804int 809int
805lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, 810lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
806 struct lstcon_test *test, struct lstcon_rpc **crpc) 811 struct lstcon_test *test, struct lstcon_rpc **crpc)
807{ 812{
808 struct lstcon_group *sgrp = test->tes_src_grp; 813 struct lstcon_group *sgrp = test->tes_src_grp;
@@ -897,7 +902,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
897 &test->tes_param[0], trq); 902 &test->tes_param[0], trq);
898 } else { 903 } else {
899 rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *) 904 rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
900 &test->tes_param[0], trq); 905 &test->tes_param[0],
906 trq->tsr_is_client, trq);
901 } 907 }
902 908
903 break; 909 break;
@@ -1084,7 +1090,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
1084 struct lstcon_ndlink *ndl; 1090 struct lstcon_ndlink *ndl;
1085 struct lstcon_node *nd; 1091 struct lstcon_node *nd;
1086 struct lstcon_rpc *rpc; 1092 struct lstcon_rpc *rpc;
1087 unsigned feats; 1093 unsigned int feats;
1088 int rc; 1094 int rc;
1089 1095
1090 /* Creating session RPG for list of nodes */ 1096 /* Creating session RPG for list of nodes */
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7ec6fc96959e..e629e87c461c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -78,8 +78,8 @@ struct lstcon_rpc_trans {
78 struct list_head tas_olink; /* link chain on owner list */ 78 struct list_head tas_olink; /* link chain on owner list */
79 struct list_head tas_link; /* link chain on global list */ 79 struct list_head tas_link; /* link chain on global list */
80 int tas_opc; /* operation code of transaction */ 80 int tas_opc; /* operation code of transaction */
81 unsigned tas_feats_updated; /* features mask is uptodate */ 81 unsigned int tas_feats_updated; /* features mask is uptodate */
82 unsigned tas_features; /* test features mask */ 82 unsigned int tas_features; /* test features mask */
83 wait_queue_head_t tas_waitq; /* wait queue head */ 83 wait_queue_head_t tas_waitq; /* wait queue head */
84 atomic_t tas_remaining; /* # of un-scheduled rpcs */ 84 atomic_t tas_remaining; /* # of un-scheduled rpcs */
85 struct list_head tas_rpcs_list; /* queued requests */ 85 struct list_head tas_rpcs_list; /* queued requests */
@@ -106,14 +106,16 @@ typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
106 lstcon_rpc_ent_t __user *); 106 lstcon_rpc_ent_t __user *);
107 107
108int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, 108int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
109 unsigned version, struct lstcon_rpc **crpc); 109 unsigned int version, struct lstcon_rpc **crpc);
110int lstcon_dbgrpc_prep(struct lstcon_node *nd, 110int lstcon_dbgrpc_prep(struct lstcon_node *nd,
111 unsigned version, struct lstcon_rpc **crpc); 111 unsigned int version, struct lstcon_rpc **crpc);
112int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 112int lstcon_batrpc_prep(struct lstcon_node *nd, int transop,
113 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc); 113 unsigned int version, struct lstcon_tsb_hdr *tsb,
114int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 114 struct lstcon_rpc **crpc);
115 struct lstcon_test *test, struct lstcon_rpc **crpc); 115int lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
116int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version, 116 unsigned int version, struct lstcon_test *test,
117 struct lstcon_rpc **crpc);
118int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version,
117 struct lstcon_rpc **crpc); 119 struct lstcon_rpc **crpc);
118void lstcon_rpc_put(struct lstcon_rpc *crpc); 120void lstcon_rpc_put(struct lstcon_rpc *crpc);
119int lstcon_rpc_trans_prep(struct list_head *translist, 121int lstcon_rpc_trans_prep(struct list_head *translist,
@@ -129,7 +131,8 @@ int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
129 lstcon_rpc_readent_func_t readent); 131 lstcon_rpc_readent_func_t readent);
130void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error); 132void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
131void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans); 133void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
132void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req); 134void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
135 struct lstcon_rpc *req);
133int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout); 136int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
134int lstcon_rpc_pinger_start(void); 137int lstcon_rpc_pinger_start(void);
135void lstcon_rpc_pinger_stop(void); 138void lstcon_rpc_pinger_stop(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a0fcbf3bcc95..1456d2395cc9 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
86 if (!create) 86 if (!create)
87 return -ENOENT; 87 return -ENOENT;
88 88
89 LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); 89 LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl));
90 if (!*ndpp) 90 if (!*ndpp)
91 return -ENOMEM; 91 return -ENOMEM;
92 92
@@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd)
131 list_del(&ndl->ndl_link); 131 list_del(&ndl->ndl_link);
132 list_del(&ndl->ndl_hlink); 132 list_del(&ndl->ndl_hlink);
133 133
134 LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); 134 LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
135} 135}
136 136
137static int 137static int
138lstcon_ndlink_find(struct list_head *hash, 138lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
139 lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create) 139 struct lstcon_ndlink **ndlpp, int create)
140{ 140{
141 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; 141 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
142 struct lstcon_ndlink *ndl; 142 struct lstcon_ndlink *ndl;
@@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp)
230 grp->grp_ref++; 230 grp->grp_ref++;
231} 231}
232 232
233static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *); 233static void lstcon_group_ndlink_release(struct lstcon_group *,
234 struct lstcon_ndlink *);
234 235
235static void 236static void
236lstcon_group_drain(struct lstcon_group *grp, int keep) 237lstcon_group_drain(struct lstcon_group *grp, int keep)
@@ -397,7 +398,8 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
397static int 398static int
398lstcon_group_nodes_add(struct lstcon_group *grp, 399lstcon_group_nodes_add(struct lstcon_group *grp,
399 int count, lnet_process_id_t __user *ids_up, 400 int count, lnet_process_id_t __user *ids_up,
400 unsigned *featp, struct list_head __user *result_up) 401 unsigned int *featp,
402 struct list_head __user *result_up)
401{ 403{
402 struct lstcon_rpc_trans *trans; 404 struct lstcon_rpc_trans *trans;
403 struct lstcon_ndlink *ndl; 405 struct lstcon_ndlink *ndl;
@@ -542,7 +544,8 @@ lstcon_group_add(char *name)
542 544
543int 545int
544lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, 546lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
545 unsigned *featp, struct list_head __user *result_up) 547 unsigned int *featp,
548 struct list_head __user *result_up)
546{ 549{
547 struct lstcon_group *grp; 550 struct lstcon_group *grp;
548 int rc; 551 int rc;
@@ -820,7 +823,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
820 823
821 lstcon_group_decref(grp); 824 lstcon_group_decref(grp);
822 825
823 return 0; 826 return rc;
824} 827}
825 828
826static int 829static int
@@ -1181,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
1181} 1184}
1182 1185
1183static int 1186static int
1184lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up) 1187lstcon_test_nodes_add(struct lstcon_test *test,
1188 struct list_head __user *result_up)
1185{ 1189{
1186 struct lstcon_rpc_trans *trans; 1190 struct lstcon_rpc_trans *trans;
1187 struct lstcon_group *grp; 1191 struct lstcon_group *grp;
@@ -1364,7 +1368,8 @@ out:
1364} 1368}
1365 1369
1366static int 1370static int
1367lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp) 1371lstcon_test_find(struct lstcon_batch *batch, int idx,
1372 struct lstcon_test **testpp)
1368{ 1373{
1369 struct lstcon_test *test; 1374 struct lstcon_test *test;
1370 1375
@@ -1702,7 +1707,7 @@ lstcon_new_session_id(lst_sid_t *sid)
1702} 1707}
1703 1708
1704int 1709int
1705lstcon_session_new(char *name, int key, unsigned feats, 1710lstcon_session_new(char *name, int key, unsigned int feats,
1706 int timeout, int force, lst_sid_t __user *sid_up) 1711 int timeout, int force, lst_sid_t __user *sid_up)
1707{ 1712{
1708 int rc = 0; 1713 int rc = 0;
@@ -1868,7 +1873,7 @@ lstcon_session_end(void)
1868} 1873}
1869 1874
1870int 1875int
1871lstcon_session_feats_check(unsigned feats) 1876lstcon_session_feats_check(unsigned int feats)
1872{ 1877{
1873 int rc = 0; 1878 int rc = 0;
1874 1879
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 78388a611c22..5dc1de48a10e 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -92,14 +92,16 @@ struct lstcon_batch {
92 int bat_ntest; /* # of test */ 92 int bat_ntest; /* # of test */
93 int bat_state; /* state of the batch */ 93 int bat_state; /* state of the batch */
94 int bat_arg; /* parameter for run|stop, timeout 94 int bat_arg; /* parameter for run|stop, timeout
95 * for run, force for stop */ 95 * for run, force for stop
96 */
96 char bat_name[LST_NAME_SIZE];/* name of batch */ 97 char bat_name[LST_NAME_SIZE];/* name of batch */
97 98
98 struct list_head bat_test_list; /* list head of tests (struct lstcon_test) 99 struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
99 */ 100 */
100 struct list_head bat_trans_list; /* list head of transaction */ 101 struct list_head bat_trans_list; /* list head of transaction */
101 struct list_head bat_cli_list; /* list head of client nodes 102 struct list_head bat_cli_list; /* list head of client nodes
102 * (struct lstcon_node) */ 103 * (struct lstcon_node)
104 */
103 struct list_head *bat_cli_hash; /* hash table of client nodes */ 105 struct list_head *bat_cli_hash; /* hash table of client nodes */
104 struct list_head bat_srv_list; /* list head of server nodes */ 106 struct list_head bat_srv_list; /* list head of server nodes */
105 struct list_head *bat_srv_hash; /* hash table of server nodes */ 107 struct list_head *bat_srv_hash; /* hash table of server nodes */
@@ -144,13 +146,14 @@ struct lstcon_session {
144 int ses_timeout; /* timeout in seconds */ 146 int ses_timeout; /* timeout in seconds */
145 time64_t ses_laststamp; /* last operation stamp (seconds) 147 time64_t ses_laststamp; /* last operation stamp (seconds)
146 */ 148 */
147 unsigned ses_features; /* tests features of the session 149 unsigned int ses_features; /* tests features of the session
148 */ 150 */
149 unsigned ses_feats_updated:1; /* features are synced with 151 unsigned int ses_feats_updated:1; /* features are synced with
150 * remote test nodes */ 152 * remote test nodes
151 unsigned ses_force:1; /* force creating */ 153 */
152 unsigned ses_shutdown:1; /* session is shutting down */ 154 unsigned int ses_force:1; /* force creating */
153 unsigned ses_expired:1; /* console is timedout */ 155 unsigned int ses_shutdown:1; /* session is shutting down */
156 unsigned int ses_expired:1; /* console is timedout */
154 __u64 ses_id_cookie; /* batch id cookie */ 157 __u64 ses_id_cookie; /* batch id cookie */
155 char ses_name[LST_NAME_SIZE];/* session name */ 158 char ses_name[LST_NAME_SIZE];/* session name */
156 struct lstcon_rpc_trans *ses_ping; /* session pinger */ 159 struct lstcon_rpc_trans *ses_ping; /* session pinger */
@@ -188,14 +191,14 @@ int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
188int lstcon_console_init(void); 191int lstcon_console_init(void);
189int lstcon_console_fini(void); 192int lstcon_console_fini(void);
190int lstcon_session_match(lst_sid_t sid); 193int lstcon_session_match(lst_sid_t sid);
191int lstcon_session_new(char *name, int key, unsigned version, 194int lstcon_session_new(char *name, int key, unsigned int version,
192 int timeout, int flags, lst_sid_t __user *sid_up); 195 int timeout, int flags, lst_sid_t __user *sid_up);
193int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key, 196int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
194 unsigned __user *verp, lstcon_ndlist_ent_t __user *entp, 197 unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
195 char __user *name_up, int len); 198 char __user *name_up, int len);
196int lstcon_session_end(void); 199int lstcon_session_end(void);
197int lstcon_session_debug(int timeout, struct list_head __user *result_up); 200int lstcon_session_debug(int timeout, struct list_head __user *result_up);
198int lstcon_session_feats_check(unsigned feats); 201int lstcon_session_feats_check(unsigned int feats);
199int lstcon_batch_debug(int timeout, char *name, 202int lstcon_batch_debug(int timeout, char *name,
200 int client, struct list_head __user *result_up); 203 int client, struct list_head __user *result_up);
201int lstcon_group_debug(int timeout, char *name, 204int lstcon_group_debug(int timeout, char *name,
@@ -207,7 +210,7 @@ int lstcon_group_del(char *name);
207int lstcon_group_clean(char *name, int args); 210int lstcon_group_clean(char *name, int args);
208int lstcon_group_refresh(char *name, struct list_head __user *result_up); 211int lstcon_group_refresh(char *name, struct list_head __user *result_up);
209int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up, 212int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
210 unsigned *featp, struct list_head __user *result_up); 213 unsigned int *featp, struct list_head __user *result_up);
211int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up, 214int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
212 struct list_head __user *result_up); 215 struct list_head __user *result_up);
213int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up, 216int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index abbd6287b4bd..48dcc330dc9b 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -131,7 +131,8 @@ sfw_find_test_case(int id)
131} 131}
132 132
133static int 133static int
134sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops) 134sfw_register_test(struct srpc_service *service,
135 struct sfw_test_client_ops *cliops)
135{ 136{
136 struct sfw_test_case *tsc; 137 struct sfw_test_case *tsc;
137 138
@@ -254,7 +255,7 @@ sfw_session_expired(void *data)
254 255
255static inline void 256static inline void
256sfw_init_session(struct sfw_session *sn, lst_sid_t sid, 257sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
257 unsigned features, const char *name) 258 unsigned int features, const char *name)
258{ 259{
259 struct stt_timer *timer = &sn->sn_timer; 260 struct stt_timer *timer = &sn->sn_timer;
260 261
@@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
469} 470}
470 471
471static int 472static int
472sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply) 473sfw_remove_session(struct srpc_rmsn_reqst *request,
474 struct srpc_rmsn_reply *reply)
473{ 475{
474 struct sfw_session *sn = sfw_data.fw_session; 476 struct sfw_session *sn = sfw_data.fw_session;
475 477
@@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl
501} 503}
502 504
503static int 505static int
504sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply) 506sfw_debug_session(struct srpc_debug_reqst *request,
507 struct srpc_debug_reply *reply)
505{ 508{
506 struct sfw_session *sn = sfw_data.fw_session; 509 struct sfw_session *sn = sfw_data.fw_session;
507 510
@@ -897,7 +900,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc)
897 900
898int 901int
899sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer, 902sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
900 unsigned features, int nblk, int blklen, 903 unsigned int features, int nblk, int blklen,
901 struct srpc_client_rpc **rpcpp) 904 struct srpc_client_rpc **rpcpp)
902{ 905{
903 struct srpc_client_rpc *rpc = NULL; 906 struct srpc_client_rpc *rpc = NULL;
@@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force)
1064} 1067}
1065 1068
1066static int 1069static int
1067sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply) 1070sfw_query_batch(struct sfw_batch *tsb, int testidx,
1071 struct srpc_batch_reply *reply)
1068{ 1072{
1069 struct sfw_test_instance *tsi; 1073 struct sfw_test_instance *tsi;
1070 1074
@@ -1101,7 +1105,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
1101 LASSERT(!rpc->srpc_bulk); 1105 LASSERT(!rpc->srpc_bulk);
1102 LASSERT(npages > 0 && npages <= LNET_MAX_IOV); 1106 LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
1103 1107
1104 rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); 1108 rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
1105 if (!rpc->srpc_bulk) 1109 if (!rpc->srpc_bulk)
1106 return -ENOMEM; 1110 return -ENOMEM;
1107 1111
@@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1179} 1183}
1180 1184
1181static int 1185static int
1182sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply) 1186sfw_control_batch(struct srpc_batch_reqst *request,
1187 struct srpc_batch_reply *reply)
1183{ 1188{
1184 struct sfw_session *sn = sfw_data.fw_session; 1189 struct sfw_session *sn = sfw_data.fw_session;
1185 int rc = 0; 1190 int rc = 0;
@@ -1225,7 +1230,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1225 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 1230 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1226 struct srpc_msg *reply = &rpc->srpc_replymsg; 1231 struct srpc_msg *reply = &rpc->srpc_replymsg;
1227 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg; 1232 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
1228 unsigned features = LST_FEATS_MASK; 1233 unsigned int features = LST_FEATS_MASK;
1229 int rc = 0; 1234 int rc = 0;
1230 1235
1231 LASSERT(!sfw_data.fw_active_srpc); 1236 LASSERT(!sfw_data.fw_active_srpc);
@@ -1375,7 +1380,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
1375 1380
1376struct srpc_client_rpc * 1381struct srpc_client_rpc *
1377sfw_create_rpc(lnet_process_id_t peer, int service, 1382sfw_create_rpc(lnet_process_id_t peer, int service,
1378 unsigned features, int nbulkiov, int bulklen, 1383 unsigned int features, int nbulkiov, int bulklen,
1379 void (*done)(struct srpc_client_rpc *), void *priv) 1384 void (*done)(struct srpc_client_rpc *), void *priv)
1380{ 1385{
1381 struct srpc_client_rpc *rpc = NULL; 1386 struct srpc_client_rpc *rpc = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9331ca4e3606..b9601b00a273 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -159,8 +159,8 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
159 159
160 ktime_get_real_ts64(&ts); 160 ktime_get_real_ts64(&ts);
161 CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, 161 CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
162 (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + 162 (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
163 (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); 163 (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
164} 164}
165 165
166static int 166static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index f5619d8744ef..ce9de8c9be57 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt)
84} 84}
85 85
86static int 86static int
87srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob) 87srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
88 int nob)
88{ 89{
89 nob = min_t(int, nob, PAGE_SIZE); 90 LASSERT(off < PAGE_SIZE);
91 LASSERT(nob > 0 && nob <= PAGE_SIZE);
90 92
91 LASSERT(nob > 0); 93 bk->bk_iovs[i].bv_offset = off;
92 LASSERT(i >= 0 && i < bk->bk_niov);
93
94 bk->bk_iovs[i].bv_offset = 0;
95 bk->bk_iovs[i].bv_page = pg; 94 bk->bk_iovs[i].bv_page = pg;
96 bk->bk_iovs[i].bv_len = nob; 95 bk->bk_iovs[i].bv_len = nob;
97 return nob; 96 return nob;
@@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk)
117} 116}
118 117
119struct srpc_bulk * 118struct srpc_bulk *
120srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) 119srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
120 unsigned int bulk_len, int sink)
121{ 121{
122 struct srpc_bulk *bk; 122 struct srpc_bulk *bk;
123 int i; 123 int i;
@@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
148 return NULL; 148 return NULL;
149 } 149 }
150 150
151 nob = srpc_add_bulk_page(bk, pg, i, bulk_len); 151 nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
152 bulk_off;
153 srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
152 bulk_len -= nob; 154 bulk_len -= nob;
155 bulk_off = 0;
153 } 156 }
154 157
155 return bk; 158 return bk;
@@ -693,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv)
693 696
694/* called with sv->sv_lock held */ 697/* called with sv->sv_lock held */
695static void 698static void
696srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) 699srpc_service_recycle_buffer(struct srpc_service_cd *scd,
700 struct srpc_buffer *buf)
697__must_hold(&scd->scd_lock) 701__must_hold(&scd->scd_lock)
698{ 702{
699 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { 703 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 4ab2ee264004..f353a634cc8e 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -113,7 +113,8 @@ struct srpc_join_reply {
113 __u32 join_status; /* returned status */ 113 __u32 join_status; /* returned status */
114 lst_sid_t join_sid; /* session id */ 114 lst_sid_t join_sid; /* session id */
115 __u32 join_timeout; /* # seconds' inactivity to 115 __u32 join_timeout; /* # seconds' inactivity to
116 * expire */ 116 * expire
117 */
117 char join_session[LST_NAME_SIZE]; /* session name */ 118 char join_session[LST_NAME_SIZE]; /* session name */
118} WIRE_ATTR; 119} WIRE_ATTR;
119 120
@@ -175,7 +176,7 @@ struct test_bulk_req_v1 {
175 __u16 blk_opc; /* bulk operation code */ 176 __u16 blk_opc; /* bulk operation code */
176 __u16 blk_flags; /* data check flags */ 177 __u16 blk_flags; /* data check flags */
177 __u32 blk_len; /* data length */ 178 __u32 blk_len; /* data length */
178 __u32 blk_offset; /* reserved: offset */ 179 __u32 blk_offset; /* offset */
179} WIRE_ATTR; 180} WIRE_ATTR;
180 181
181struct test_ping_req { 182struct test_ping_req {
@@ -190,7 +191,8 @@ struct srpc_test_reqst {
190 lst_bid_t tsr_bid; /* batch id */ 191 lst_bid_t tsr_bid; /* batch id */
191 __u32 tsr_service; /* test type: bulk|ping|... */ 192 __u32 tsr_service; /* test type: bulk|ping|... */
192 __u32 tsr_loop; /* test client loop count or 193 __u32 tsr_loop; /* test client loop count or
193 * # server buffers needed */ 194 * # server buffers needed
195 */
194 __u32 tsr_concur; /* concurrency of test */ 196 __u32 tsr_concur; /* concurrency of test */
195 __u8 tsr_is_client; /* is test client or not */ 197 __u8 tsr_is_client; /* is test client or not */
196 __u8 tsr_stop_onerr; /* stop on error */ 198 __u8 tsr_stop_onerr; /* stop on error */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index d033ac03d953..c8833a016b6d 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -131,7 +131,8 @@ srpc_service2reply(int service)
131 131
132enum srpc_event_type { 132enum srpc_event_type {
133 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) 133 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
134 * received */ 134 * received
135 */
135 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ 136 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
136 SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */ 137 SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
137 SRPC_REPLY_RCVD = 4, /* incoming reply received */ 138 SRPC_REPLY_RCVD = 4, /* incoming reply received */
@@ -295,7 +296,8 @@ struct srpc_service_cd {
295#define SFW_TEST_WI_MIN 256 296#define SFW_TEST_WI_MIN 256
296#define SFW_TEST_WI_MAX 2048 297#define SFW_TEST_WI_MAX 2048
297/* extra buffers for tolerating buggy peers, or unbalanced number 298/* extra buffers for tolerating buggy peers, or unbalanced number
298 * of peers between partitions */ 299 * of peers between partitions
300 */
299#define SFW_TEST_WI_EXTRA 64 301#define SFW_TEST_WI_EXTRA 64
300 302
301/* number of server workitems (mini-thread) for framework service */ 303/* number of server workitems (mini-thread) for framework service */
@@ -347,9 +349,11 @@ struct sfw_batch {
347 349
348struct sfw_test_client_ops { 350struct sfw_test_client_ops {
349 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test 351 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
350 * client */ 352 * client
353 */
351 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test 354 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
352 * client */ 355 * client
356 */
353 int (*tso_prep_rpc)(struct sfw_test_unit *tsu, 357 int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
354 lnet_process_id_t dest, 358 lnet_process_id_t dest,
355 struct srpc_client_rpc **rpc); /* prep a tests rpc */ 359 struct srpc_client_rpc **rpc); /* prep a tests rpc */
@@ -374,7 +378,8 @@ struct sfw_test_instance {
374 spinlock_t tsi_lock; /* serialize */ 378 spinlock_t tsi_lock; /* serialize */
375 unsigned int tsi_stopping:1; /* test is stopping */ 379 unsigned int tsi_stopping:1; /* test is stopping */
376 atomic_t tsi_nactive; /* # of active test 380 atomic_t tsi_nactive; /* # of active test
377 * unit */ 381 * unit
382 */
378 struct list_head tsi_units; /* test units */ 383 struct list_head tsi_units; /* test units */
379 struct list_head tsi_free_rpcs; /* free rpcs */ 384 struct list_head tsi_free_rpcs; /* free rpcs */
380 struct list_head tsi_active_rpcs; /* active rpcs */ 385 struct list_head tsi_active_rpcs; /* active rpcs */
@@ -386,8 +391,10 @@ struct sfw_test_instance {
386 } tsi_u; 391 } tsi_u;
387}; 392};
388 393
389/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of 394/*
390 * pages are not used */ 395 * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
396 * pages are not used
397 */
391#define SFW_MAX_CONCUR LST_MAX_CONCUR 398#define SFW_MAX_CONCUR LST_MAX_CONCUR
392#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) 399#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
393#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 400#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
@@ -410,10 +417,10 @@ struct sfw_test_case {
410 417
411struct srpc_client_rpc * 418struct srpc_client_rpc *
412sfw_create_rpc(lnet_process_id_t peer, int service, 419sfw_create_rpc(lnet_process_id_t peer, int service,
413 unsigned features, int nbulkiov, int bulklen, 420 unsigned int features, int nbulkiov, int bulklen,
414 void (*done)(struct srpc_client_rpc *), void *priv); 421 void (*done)(struct srpc_client_rpc *), void *priv);
415int sfw_create_test_rpc(struct sfw_test_unit *tsu, 422int sfw_create_test_rpc(struct sfw_test_unit *tsu,
416 lnet_process_id_t peer, unsigned features, 423 lnet_process_id_t peer, unsigned int features,
417 int nblk, int blklen, struct srpc_client_rpc **rpc); 424 int nblk, int blklen, struct srpc_client_rpc **rpc);
418void sfw_abort_rpc(struct srpc_client_rpc *rpc); 425void sfw_abort_rpc(struct srpc_client_rpc *rpc);
419void sfw_post_rpc(struct srpc_client_rpc *rpc); 426void sfw_post_rpc(struct srpc_client_rpc *rpc);
@@ -434,8 +441,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
434void srpc_post_rpc(struct srpc_client_rpc *rpc); 441void srpc_post_rpc(struct srpc_client_rpc *rpc);
435void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why); 442void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
436void srpc_free_bulk(struct srpc_bulk *bk); 443void srpc_free_bulk(struct srpc_bulk *bk);
437struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg, 444struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
438 unsigned bulk_len, int sink); 445 unsigned int bulk_npg, unsigned int bulk_len,
446 int sink);
439int srpc_send_rpc(struct swi_workitem *wi); 447int srpc_send_rpc(struct swi_workitem *wi);
440int srpc_send_reply(struct srpc_server_rpc *rpc); 448int srpc_send_reply(struct srpc_server_rpc *rpc);
441int srpc_add_service(struct srpc_service *sv); 449int srpc_add_service(struct srpc_service *sv);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index dcd22580b1f0..2fe692df19d0 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -46,16 +46,17 @@
46 * to cover a time period of 1024 seconds into the future before wrapping. 46 * to cover a time period of 1024 seconds into the future before wrapping.
47 */ 47 */
48#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ 48#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
49#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL) 49#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL)
50#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) 50#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
51#define STTIMER_NSLOTS (1 << 7) 51#define STTIMER_NSLOTS BIT(7)
52#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \ 52#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
53 (STTIMER_NSLOTS - 1))]) 53 (STTIMER_NSLOTS - 1))])
54 54
55static struct st_timer_data { 55static struct st_timer_data {
56 spinlock_t stt_lock; 56 spinlock_t stt_lock;
57 unsigned long stt_prev_slot; /* start time of the slot processed 57 unsigned long stt_prev_slot; /* start time of the slot processed
58 * previously */ 58 * previously
59 */
59 struct list_head stt_hash[STTIMER_NSLOTS]; 60 struct list_head stt_hash[STTIMER_NSLOTS];
60 int stt_shuttingdown; 61 int stt_shuttingdown;
61 wait_queue_head_t stt_waitq; 62 wait_queue_head_t stt_waitq;
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index edd72b926f81..999f250ceed0 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -74,7 +74,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
74 74
75 /* Zero out input range, this is not recovery yet. */ 75 /* Zero out input range, this is not recovery yet. */
76 in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE); 76 in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
77 range_init(in); 77 lu_seq_range_init(in);
78 78
79 ptlrpc_request_set_replen(req); 79 ptlrpc_request_set_replen(req);
80 80
@@ -112,25 +112,21 @@ static int seq_client_rpc(struct lu_client_seq *seq,
112 112
113 ptlrpc_at_set_req_timeout(req); 113 ptlrpc_at_set_req_timeout(req);
114 114
115 if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
116 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
117 rc = ptlrpc_queue_wait(req); 115 rc = ptlrpc_queue_wait(req);
118 if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
119 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
120 if (rc) 116 if (rc)
121 goto out_req; 117 goto out_req;
122 118
123 out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE); 119 out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
124 *output = *out; 120 *output = *out;
125 121
126 if (!range_is_sane(output)) { 122 if (!lu_seq_range_is_sane(output)) {
127 CERROR("%s: Invalid range received from server: " 123 CERROR("%s: Invalid range received from server: "
128 DRANGE "\n", seq->lcs_name, PRANGE(output)); 124 DRANGE "\n", seq->lcs_name, PRANGE(output));
129 rc = -EINVAL; 125 rc = -EINVAL;
130 goto out_req; 126 goto out_req;
131 } 127 }
132 128
133 if (range_is_exhausted(output)) { 129 if (lu_seq_range_is_exhausted(output)) {
134 CERROR("%s: Range received from server is exhausted: " 130 CERROR("%s: Range received from server is exhausted: "
135 DRANGE "]\n", seq->lcs_name, PRANGE(output)); 131 DRANGE "]\n", seq->lcs_name, PRANGE(output));
136 rc = -EINVAL; 132 rc = -EINVAL;
@@ -170,9 +166,9 @@ static int seq_client_alloc_seq(const struct lu_env *env,
170{ 166{
171 int rc; 167 int rc;
172 168
173 LASSERT(range_is_sane(&seq->lcs_space)); 169 LASSERT(lu_seq_range_is_sane(&seq->lcs_space));
174 170
175 if (range_is_exhausted(&seq->lcs_space)) { 171 if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
176 rc = seq_client_alloc_meta(env, seq); 172 rc = seq_client_alloc_meta(env, seq);
177 if (rc) { 173 if (rc) {
178 CERROR("%s: Can't allocate new meta-sequence, rc %d\n", 174 CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
@@ -185,7 +181,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
185 rc = 0; 181 rc = 0;
186 } 182 }
187 183
188 LASSERT(!range_is_exhausted(&seq->lcs_space)); 184 LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
189 *seqnr = seq->lcs_space.lsr_start; 185 *seqnr = seq->lcs_space.lsr_start;
190 seq->lcs_space.lsr_start += 1; 186 seq->lcs_space.lsr_start += 1;
191 187
@@ -320,7 +316,7 @@ void seq_client_flush(struct lu_client_seq *seq)
320 316
321 seq->lcs_space.lsr_index = -1; 317 seq->lcs_space.lsr_index = -1;
322 318
323 range_init(&seq->lcs_space); 319 lu_seq_range_init(&seq->lcs_space);
324 mutex_unlock(&seq->lcs_mutex); 320 mutex_unlock(&seq->lcs_mutex);
325} 321}
326EXPORT_SYMBOL(seq_client_flush); 322EXPORT_SYMBOL(seq_client_flush);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 3ed32d77f38b..97d4849c7199 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -83,7 +83,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
83 (unsigned long long *)&tmp.lsr_end); 83 (unsigned long long *)&tmp.lsr_end);
84 if (rc != 2) 84 if (rc != 2)
85 return -EINVAL; 85 return -EINVAL;
86 if (!range_is_sane(&tmp) || range_is_zero(&tmp) || 86 if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) ||
87 tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) 87 tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
88 return -EINVAL; 88 return -EINVAL;
89 *range = tmp; 89 *range = tmp;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 0100a935f4ff..11f697496180 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -143,7 +143,7 @@ restart_fixup:
143 c_range = &f_curr->fce_range; 143 c_range = &f_curr->fce_range;
144 n_range = &f_next->fce_range; 144 n_range = &f_next->fce_range;
145 145
146 LASSERT(range_is_sane(c_range)); 146 LASSERT(lu_seq_range_is_sane(c_range));
147 if (&f_next->fce_list == head) 147 if (&f_next->fce_list == head)
148 break; 148 break;
149 149
@@ -358,7 +358,7 @@ struct fld_cache_entry
358{ 358{
359 struct fld_cache_entry *f_new; 359 struct fld_cache_entry *f_new;
360 360
361 LASSERT(range_is_sane(range)); 361 LASSERT(lu_seq_range_is_sane(range));
362 362
363 f_new = kzalloc(sizeof(*f_new), GFP_NOFS); 363 f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
364 if (!f_new) 364 if (!f_new)
@@ -503,7 +503,7 @@ int fld_cache_lookup(struct fld_cache *cache,
503 } 503 }
504 504
505 prev = flde; 505 prev = flde;
506 if (range_within(&flde->fce_range, seq)) { 506 if (lu_seq_range_within(&flde->fce_range, seq)) {
507 *range = flde->fce_range; 507 *range = flde->fce_range;
508 508
509 cache->fci_stat.fst_cache++; 509 cache->fci_stat.fst_cache++;
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 08eaec735d6f..4a7f0b71c48d 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -62,11 +62,6 @@
62#include "../include/lustre_req_layout.h" 62#include "../include/lustre_req_layout.h"
63#include "../include/lustre_fld.h" 63#include "../include/lustre_fld.h"
64 64
65enum {
66 LUSTRE_FLD_INIT = 1 << 0,
67 LUSTRE_FLD_RUN = 1 << 1
68};
69
70struct fld_stats { 65struct fld_stats {
71 __u64 fst_count; 66 __u64 fst_count;
72 __u64 fst_cache; 67 __u64 fst_cache;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 0de72b717ce5..4cade7a16800 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -159,11 +159,6 @@ int fld_client_add_target(struct lu_client_fld *fld,
159 LASSERT(name); 159 LASSERT(name);
160 LASSERT(tar->ft_srv || tar->ft_exp); 160 LASSERT(tar->ft_srv || tar->ft_exp);
161 161
162 if (fld->lcf_flags != LUSTRE_FLD_INIT) {
163 CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
164 fld->lcf_name, name, tar->ft_idx);
165 return 0;
166 }
167 CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", 162 CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
168 fld->lcf_name, name, tar->ft_idx); 163 fld->lcf_name, name, tar->ft_idx);
169 164
@@ -282,7 +277,6 @@ int fld_client_init(struct lu_client_fld *fld,
282 fld->lcf_count = 0; 277 fld->lcf_count = 0;
283 spin_lock_init(&fld->lcf_lock); 278 spin_lock_init(&fld->lcf_lock);
284 fld->lcf_hash = &fld_hash[hash]; 279 fld->lcf_hash = &fld_hash[hash];
285 fld->lcf_flags = LUSTRE_FLD_INIT;
286 INIT_LIST_HEAD(&fld->lcf_targets); 280 INIT_LIST_HEAD(&fld->lcf_targets);
287 281
288 cache_size = FLD_CLIENT_CACHE_SIZE / 282 cache_size = FLD_CLIENT_CACHE_SIZE /
@@ -421,8 +415,6 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
421 struct lu_fld_target *target; 415 struct lu_fld_target *target;
422 int rc; 416 int rc;
423 417
424 fld->lcf_flags |= LUSTRE_FLD_RUN;
425
426 rc = fld_cache_lookup(fld->lcf_cache, seq, &res); 418 rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
427 if (rc == 0) { 419 if (rc == 0) {
428 *mds = res.lsr_index; 420 *mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 89292c93dcd5..dc685610c4c4 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -59,10 +59,6 @@
59 * read/write system call it is associated with the single user 59 * read/write system call it is associated with the single user
60 * thread, that issued the system call). 60 * thread, that issued the system call).
61 * 61 *
62 * - cl_req represents a collection of pages for a transfer. cl_req is
63 * constructed by req-forming engine that tries to saturate
64 * transport with large and continuous transfers.
65 *
66 * Terminology 62 * Terminology
67 * 63 *
68 * - to avoid confusion high-level I/O operation like read or write system 64 * - to avoid confusion high-level I/O operation like read or write system
@@ -103,11 +99,8 @@
103struct inode; 99struct inode;
104 100
105struct cl_device; 101struct cl_device;
106struct cl_device_operations;
107 102
108struct cl_object; 103struct cl_object;
109struct cl_object_page_operations;
110struct cl_object_lock_operations;
111 104
112struct cl_page; 105struct cl_page;
113struct cl_page_slice; 106struct cl_page_slice;
@@ -120,27 +113,7 @@ struct cl_page_operations;
120struct cl_io; 113struct cl_io;
121struct cl_io_slice; 114struct cl_io_slice;
122 115
123struct cl_req; 116struct cl_req_attr;
124struct cl_req_slice;
125
126/**
127 * Operations for each data device in the client stack.
128 *
129 * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
130 */
131struct cl_device_operations {
132 /**
133 * Initialize cl_req. This method is called top-to-bottom on all
134 * devices in the stack to get them a chance to allocate layer-private
135 * data, and to attach them to the cl_req by calling
136 * cl_req_slice_add().
137 *
138 * \see osc_req_init(), lov_req_init(), lovsub_req_init()
139 * \see vvp_req_init()
140 */
141 int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
142 struct cl_req *req);
143};
144 117
145/** 118/**
146 * Device in the client stack. 119 * Device in the client stack.
@@ -150,8 +123,6 @@ struct cl_device_operations {
150struct cl_device { 123struct cl_device {
151 /** Super-class. */ 124 /** Super-class. */
152 struct lu_device cd_lu_dev; 125 struct lu_device cd_lu_dev;
153 /** Per-layer operation vector. */
154 const struct cl_device_operations *cd_ops;
155}; 126};
156 127
157/** \addtogroup cl_object cl_object 128/** \addtogroup cl_object cl_object
@@ -267,7 +238,7 @@ struct cl_object_conf {
267 /** 238 /**
268 * Object layout. This is consumed by lov. 239 * Object layout. This is consumed by lov.
269 */ 240 */
270 struct lustre_md *coc_md; 241 struct lu_buf coc_layout;
271 /** 242 /**
272 * Description of particular stripe location in the 243 * Description of particular stripe location in the
273 * cluster. This is consumed by osc. 244 * cluster. This is consumed by osc.
@@ -301,6 +272,26 @@ enum {
301 OBJECT_CONF_WAIT = 2 272 OBJECT_CONF_WAIT = 2
302}; 273};
303 274
275enum {
276 CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
277 CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
278};
279
280struct cl_layout {
281 /** the buffer to return the layout in lov_mds_md format. */
282 struct lu_buf cl_buf;
283 /** size of layout in lov_mds_md format. */
284 size_t cl_size;
285 /** Layout generation. */
286 u32 cl_layout_gen;
287 /**
288 * True if this is a released file.
289 * Temporarily added for released file truncate in ll_setattr_raw().
290 * It will be removed later. -Jinshan
291 */
292 bool cl_is_released;
293};
294
304/** 295/**
305 * Operations implemented for each cl object layer. 296 * Operations implemented for each cl object layer.
306 * 297 *
@@ -400,6 +391,27 @@ struct cl_object_operations {
400 */ 391 */
401 int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, 392 int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
402 struct lov_user_md __user *lum); 393 struct lov_user_md __user *lum);
394 /**
395 * Get FIEMAP mapping from the object.
396 */
397 int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
398 struct ll_fiemap_info_key *fmkey,
399 struct fiemap *fiemap, size_t *buflen);
400 /**
401 * Get layout and generation of the object.
402 */
403 int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
404 struct cl_layout *layout);
405 /**
406 * Get maximum size of the object.
407 */
408 loff_t (*coo_maxbytes)(struct cl_object *obj);
409 /**
410 * Set request attributes.
411 */
412 void (*coo_req_attr_set)(const struct lu_env *env,
413 struct cl_object *obj,
414 struct cl_req_attr *attr);
403}; 415};
404 416
405/** 417/**
@@ -591,7 +603,7 @@ enum cl_page_state {
591 * 603 *
592 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the 604 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
593 * req-formation engine decides that it wants to include this page 605 * req-formation engine decides that it wants to include this page
594 * into an cl_req being constructed, and yanks it from the cache; 606 * into an RPC being constructed, and yanks it from the cache;
595 * 607 *
596 * - [cl_page_state::CPS_FREEING] VM callback is executed to 608 * - [cl_page_state::CPS_FREEING] VM callback is executed to
597 * evict the page form the memory; 609 * evict the page form the memory;
@@ -660,7 +672,7 @@ enum cl_page_state {
660 * Page is being read in, as a part of a transfer. This is quite 672 * Page is being read in, as a part of a transfer. This is quite
661 * similar to the cl_page_state::CPS_PAGEOUT state, except that 673 * similar to the cl_page_state::CPS_PAGEOUT state, except that
662 * read-in is always "immediate"---there is no such thing a sudden 674 * read-in is always "immediate"---there is no such thing a sudden
663 * construction of read cl_req from cached, presumably not up to date, 675 * construction of read request from cached, presumably not up to date,
664 * pages. 676 * pages.
665 * 677 *
666 * Underlying VM page is locked for the duration of transfer. 678 * Underlying VM page is locked for the duration of transfer.
@@ -714,8 +726,6 @@ struct cl_page {
714 struct list_head cp_batch; 726 struct list_head cp_batch;
715 /** List of slices. Immutable after creation. */ 727 /** List of slices. Immutable after creation. */
716 struct list_head cp_layers; 728 struct list_head cp_layers;
717 /** Linkage of pages within cl_req. */
718 struct list_head cp_flight;
719 /** 729 /**
720 * Page state. This field is const to avoid accidental update, it is 730 * Page state. This field is const to avoid accidental update, it is
721 * modified only internally within cl_page.c. Protected by a VM lock. 731 * modified only internally within cl_page.c. Protected by a VM lock.
@@ -732,12 +742,6 @@ struct cl_page {
732 * by sub-io. Protected by a VM lock. 742 * by sub-io. Protected by a VM lock.
733 */ 743 */
734 struct cl_io *cp_owner; 744 struct cl_io *cp_owner;
735 /**
736 * Owning IO request in cl_page_state::CPS_PAGEOUT and
737 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
738 * the top-level pages. Protected by a VM lock.
739 */
740 struct cl_req *cp_req;
741 /** List of references to this page, for debugging. */ 745 /** List of references to this page, for debugging. */
742 struct lu_ref cp_reference; 746 struct lu_ref cp_reference;
743 /** Link to an object, for debugging. */ 747 /** Link to an object, for debugging. */
@@ -779,7 +783,6 @@ enum cl_lock_mode {
779 783
780/** 784/**
781 * Requested transfer type. 785 * Requested transfer type.
782 * \ingroup cl_req
783 */ 786 */
784enum cl_req_type { 787enum cl_req_type {
785 CRT_READ, 788 CRT_READ,
@@ -884,26 +887,6 @@ struct cl_page_operations {
884 /** Destructor. Frees resources and slice itself. */ 887 /** Destructor. Frees resources and slice itself. */
885 void (*cpo_fini)(const struct lu_env *env, 888 void (*cpo_fini)(const struct lu_env *env,
886 struct cl_page_slice *slice); 889 struct cl_page_slice *slice);
887
888 /**
889 * Checks whether the page is protected by a cl_lock. This is a
890 * per-layer method, because certain layers have ways to check for the
891 * lock much more efficiently than through the generic locks scan, or
892 * implement locking mechanisms separate from cl_lock, e.g.,
893 * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
894 * being canceled, or scheduled for cancellation as soon as the last
895 * user goes away, too.
896 *
897 * \retval -EBUSY: page is protected by a lock of a given mode;
898 * \retval -ENODATA: page is not protected by a lock;
899 * \retval 0: this layer cannot decide.
900 *
901 * \see cl_page_is_under_lock()
902 */
903 int (*cpo_is_under_lock)(const struct lu_env *env,
904 const struct cl_page_slice *slice,
905 struct cl_io *io, pgoff_t *max);
906
907 /** 890 /**
908 * Optional debugging helper. Prints given page slice. 891 * Optional debugging helper. Prints given page slice.
909 * 892 *
@@ -915,8 +898,7 @@ struct cl_page_operations {
915 /** 898 /**
916 * \name transfer 899 * \name transfer
917 * 900 *
918 * Transfer methods. See comment on cl_req for a description of 901 * Transfer methods.
919 * transfer formation and life-cycle.
920 * 902 *
921 * @{ 903 * @{
922 */ 904 */
@@ -962,7 +944,7 @@ struct cl_page_operations {
962 int ioret); 944 int ioret);
963 /** 945 /**
964 * Called when cached page is about to be added to the 946 * Called when cached page is about to be added to the
965 * cl_req as a part of req formation. 947 * ptlrpc request as a part of req formation.
966 * 948 *
967 * \return 0 : proceed with this page; 949 * \return 0 : proceed with this page;
968 * \return -EAGAIN : skip this page; 950 * \return -EAGAIN : skip this page;
@@ -1365,7 +1347,6 @@ struct cl_2queue {
1365 * (3) sort all locks to avoid dead-locks, and acquire them 1347 * (3) sort all locks to avoid dead-locks, and acquire them
1366 * 1348 *
1367 * (4) process the chunk: call per-page methods 1349 * (4) process the chunk: call per-page methods
1368 * (cl_io_operations::cio_read_page() for read,
1369 * cl_io_operations::cio_prepare_write(), 1350 * cl_io_operations::cio_prepare_write(),
1370 * cl_io_operations::cio_commit_write() for write) 1351 * cl_io_operations::cio_commit_write() for write)
1371 * 1352 *
@@ -1388,6 +1369,8 @@ enum cl_io_type {
1388 CIT_WRITE, 1369 CIT_WRITE,
1389 /** truncate, utime system calls */ 1370 /** truncate, utime system calls */
1390 CIT_SETATTR, 1371 CIT_SETATTR,
1372 /** get data version */
1373 CIT_DATA_VERSION,
1391 /** 1374 /**
1392 * page fault handling 1375 * page fault handling
1393 */ 1376 */
@@ -1467,6 +1450,31 @@ struct cl_io_slice {
1467 1450
1468typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, 1451typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1469 struct cl_page *); 1452 struct cl_page *);
1453
1454struct cl_read_ahead {
1455 /*
1456 * Maximum page index the readahead window will end.
1457 * This is determined DLM lock coverage, RPC and stripe boundary.
1458 * cra_end is included.
1459 */
1460 pgoff_t cra_end;
1461 /*
1462 * Release routine. If readahead holds resources underneath, this
1463 * function should be called to release it.
1464 */
1465 void (*cra_release)(const struct lu_env *env, void *cbdata);
1466 /* Callback data for cra_release routine */
1467 void *cra_cbdata;
1468};
1469
1470static inline void cl_read_ahead_release(const struct lu_env *env,
1471 struct cl_read_ahead *ra)
1472{
1473 if (ra->cra_release)
1474 ra->cra_release(env, ra->cra_cbdata);
1475 memset(ra, 0, sizeof(*ra));
1476}
1477
1470/** 1478/**
1471 * Per-layer io operations. 1479 * Per-layer io operations.
1472 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops 1480 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -1573,16 +1581,13 @@ struct cl_io_operations {
1573 struct cl_page_list *queue, int from, int to, 1581 struct cl_page_list *queue, int from, int to,
1574 cl_commit_cbt cb); 1582 cl_commit_cbt cb);
1575 /** 1583 /**
1576 * Read missing page. 1584 * Decide maximum read ahead extent
1577 *
1578 * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
1579 * method, when it hits not-up-to-date page in the range. Optional.
1580 * 1585 *
1581 * \pre io->ci_type == CIT_READ 1586 * \pre io->ci_type == CIT_READ
1582 */ 1587 */
1583 int (*cio_read_page)(const struct lu_env *env, 1588 int (*cio_read_ahead)(const struct lu_env *env,
1584 const struct cl_io_slice *slice, 1589 const struct cl_io_slice *slice,
1585 const struct cl_page_slice *page); 1590 pgoff_t start, struct cl_read_ahead *ra);
1586 /** 1591 /**
1587 * Optional debugging helper. Print given io slice. 1592 * Optional debugging helper. Print given io slice.
1588 */ 1593 */
@@ -1765,10 +1770,15 @@ struct cl_io {
1765 struct cl_io_rw_common ci_rw; 1770 struct cl_io_rw_common ci_rw;
1766 struct cl_setattr_io { 1771 struct cl_setattr_io {
1767 struct ost_lvb sa_attr; 1772 struct ost_lvb sa_attr;
1773 unsigned int sa_attr_flags;
1768 unsigned int sa_valid; 1774 unsigned int sa_valid;
1769 int sa_stripe_index; 1775 int sa_stripe_index;
1770 struct lu_fid *sa_parent_fid; 1776 const struct lu_fid *sa_parent_fid;
1771 } ci_setattr; 1777 } ci_setattr;
1778 struct cl_data_version_io {
1779 u64 dv_data_version;
1780 int dv_flags;
1781 } ci_data_version;
1772 struct cl_fault_io { 1782 struct cl_fault_io {
1773 /** page index within file. */ 1783 /** page index within file. */
1774 pgoff_t ft_index; 1784 pgoff_t ft_index;
@@ -1836,179 +1846,20 @@ struct cl_io {
1836 1846
1837/** @} cl_io */ 1847/** @} cl_io */
1838 1848
1839/** \addtogroup cl_req cl_req
1840 * @{
1841 */
1842/** \struct cl_req
1843 * Transfer.
1844 *
1845 * There are two possible modes of transfer initiation on the client:
1846 *
1847 * - immediate transfer: this is started when a high level io wants a page
1848 * or a collection of pages to be transferred right away. Examples:
1849 * read-ahead, synchronous read in the case of non-page aligned write,
1850 * page write-out as a part of extent lock cancellation, page write-out
1851 * as a part of memory cleansing. Immediate transfer can be both
1852 * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
1853 *
1854 * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
1855 * when io wants to transfer a page to the server some time later, when
1856 * it can be done efficiently. Example: pages dirtied by the write(2)
1857 * path.
1858 *
1859 * In any case, transfer takes place in the form of a cl_req, which is a
1860 * representation for a network RPC.
1861 *
1862 * Pages queued for an opportunistic transfer are cached until it is decided
1863 * that efficient RPC can be composed of them. This decision is made by "a
1864 * req-formation engine", currently implemented as a part of osc
1865 * layer. Req-formation depends on many factors: the size of the resulting
1866 * RPC, whether or not multi-object RPCs are supported by the server,
1867 * max-rpc-in-flight limitations, size of the dirty cache, etc.
1868 *
1869 * For the immediate transfer io submits a cl_page_list, that req-formation
1870 * engine slices into cl_req's, possibly adding cached pages to some of
1871 * the resulting req's.
1872 *
1873 * Whenever a page from cl_page_list is added to a newly constructed req, its
1874 * cl_page_operations::cpo_prep() layer methods are called. At that moment,
1875 * page state is atomically changed from cl_page_state::CPS_OWNED to
1876 * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
1877 * is zeroed, and cl_page::cp_req is set to the
1878 * req. cl_page_operations::cpo_prep() method at the particular layer might
1879 * return -EALREADY to indicate that it does not need to submit this page
1880 * at all. This is possible, for example, if page, submitted for read,
1881 * became up-to-date in the meantime; and for write, the page don't have
1882 * dirty bit marked. \see cl_io_submit_rw()
1883 *
1884 * Whenever a cached page is added to a newly constructed req, its
1885 * cl_page_operations::cpo_make_ready() layer methods are called. At that
1886 * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
1887 * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
1888 * req. cl_page_operations::cpo_make_ready() method at the particular layer
1889 * might return -EAGAIN to indicate that this page is not eligible for the
1890 * transfer right now.
1891 *
1892 * FUTURE
1893 *
1894 * Plan is to divide transfers into "priority bands" (indicated when
1895 * submitting cl_page_list, and queuing a page for the opportunistic transfer)
1896 * and allow glueing of cached pages to immediate transfers only within single
1897 * band. This would make high priority transfers (like lock cancellation or
1898 * memory pressure induced write-out) really high priority.
1899 *
1900 */
1901
1902/** 1849/**
1903 * Per-transfer attributes. 1850 * Per-transfer attributes.
1904 */ 1851 */
1905struct cl_req_attr { 1852struct cl_req_attr {
1853 enum cl_req_type cra_type;
1854 u64 cra_flags;
1855 struct cl_page *cra_page;
1856
1906 /** Generic attributes for the server consumption. */ 1857 /** Generic attributes for the server consumption. */
1907 struct obdo *cra_oa; 1858 struct obdo *cra_oa;
1908 /** Jobid */ 1859 /** Jobid */
1909 char cra_jobid[LUSTRE_JOBID_SIZE]; 1860 char cra_jobid[LUSTRE_JOBID_SIZE];
1910}; 1861};
1911 1862
1912/**
1913 * Transfer request operations definable at every layer.
1914 *
1915 * Concurrency: transfer formation engine synchronizes calls to all transfer
1916 * methods.
1917 */
1918struct cl_req_operations {
1919 /**
1920 * Invoked top-to-bottom by cl_req_prep() when transfer formation is
1921 * complete (all pages are added).
1922 *
1923 * \see osc_req_prep()
1924 */
1925 int (*cro_prep)(const struct lu_env *env,
1926 const struct cl_req_slice *slice);
1927 /**
1928 * Called top-to-bottom to fill in \a oa fields. This is called twice
1929 * with different flags, see bug 10150 and osc_build_req().
1930 *
1931 * \param obj an object from cl_req which attributes are to be set in
1932 * \a oa.
1933 *
1934 * \param oa struct obdo where attributes are placed
1935 *
1936 * \param flags \a oa fields to be filled.
1937 */
1938 void (*cro_attr_set)(const struct lu_env *env,
1939 const struct cl_req_slice *slice,
1940 const struct cl_object *obj,
1941 struct cl_req_attr *attr, u64 flags);
1942 /**
1943 * Called top-to-bottom from cl_req_completion() to notify layers that
1944 * transfer completed. Has to free all state allocated by
1945 * cl_device_operations::cdo_req_init().
1946 */
1947 void (*cro_completion)(const struct lu_env *env,
1948 const struct cl_req_slice *slice, int ioret);
1949};
1950
1951/**
1952 * A per-object state that (potentially multi-object) transfer request keeps.
1953 */
1954struct cl_req_obj {
1955 /** object itself */
1956 struct cl_object *ro_obj;
1957 /** reference to cl_req_obj::ro_obj. For debugging. */
1958 struct lu_ref_link ro_obj_ref;
1959 /* something else? Number of pages for a given object? */
1960};
1961
1962/**
1963 * Transfer request.
1964 *
1965 * Transfer requests are not reference counted, because IO sub-system owns
1966 * them exclusively and knows when to free them.
1967 *
1968 * Life cycle.
1969 *
1970 * cl_req is created by cl_req_alloc() that calls
1971 * cl_device_operations::cdo_req_init() device methods to allocate per-req
1972 * state in every layer.
1973 *
1974 * Then pages are added (cl_req_page_add()), req keeps track of all objects it
1975 * contains pages for.
1976 *
1977 * Once all pages were collected, cl_page_operations::cpo_prep() method is
1978 * called top-to-bottom. At that point layers can modify req, let it pass, or
1979 * deny it completely. This is to support things like SNS that have transfer
1980 * ordering requirements invisible to the individual req-formation engine.
1981 *
1982 * On transfer completion (or transfer timeout, or failure to initiate the
1983 * transfer of an allocated req), cl_req_operations::cro_completion() method
1984 * is called, after execution of cl_page_operations::cpo_completion() of all
1985 * req's pages.
1986 */
1987struct cl_req {
1988 enum cl_req_type crq_type;
1989 /** A list of pages being transferred */
1990 struct list_head crq_pages;
1991 /** Number of pages in cl_req::crq_pages */
1992 unsigned crq_nrpages;
1993 /** An array of objects which pages are in ->crq_pages */
1994 struct cl_req_obj *crq_o;
1995 /** Number of elements in cl_req::crq_objs[] */
1996 unsigned crq_nrobjs;
1997 struct list_head crq_layers;
1998};
1999
2000/**
2001 * Per-layer state for request.
2002 */
2003struct cl_req_slice {
2004 struct cl_req *crs_req;
2005 struct cl_device *crs_dev;
2006 struct list_head crs_linkage;
2007 const struct cl_req_operations *crs_ops;
2008};
2009
2010/* @} cl_req */
2011
2012enum cache_stats_item { 1863enum cache_stats_item {
2013 /** how many cache lookups were performed */ 1864 /** how many cache lookups were performed */
2014 CS_lookup = 0, 1865 CS_lookup = 0,
@@ -2153,9 +2004,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2153 const struct cl_lock_operations *ops); 2004 const struct cl_lock_operations *ops);
2154void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, 2005void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2155 struct cl_object *obj, const struct cl_io_operations *ops); 2006 struct cl_object *obj, const struct cl_io_operations *ops);
2156void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
2157 struct cl_device *dev,
2158 const struct cl_req_operations *ops);
2159/** @} helpers */ 2007/** @} helpers */
2160 2008
2161/** \defgroup cl_object cl_object 2009/** \defgroup cl_object cl_object
@@ -2183,6 +2031,12 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
2183void cl_object_kill(const struct lu_env *env, struct cl_object *obj); 2031void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
2184int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, 2032int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2185 struct lov_user_md __user *lum); 2033 struct lov_user_md __user *lum);
2034int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
2035 struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
2036 size_t *buflen);
2037int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
2038 struct cl_layout *cl);
2039loff_t cl_object_maxbytes(struct cl_object *obj);
2186 2040
2187/** 2041/**
2188 * Returns true, iff \a o0 and \a o1 are slices of the same object. 2042 * Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2302,8 +2156,6 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2302void cl_page_delete(const struct lu_env *env, struct cl_page *pg); 2156void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2303int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); 2157int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
2304void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); 2158void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
2305int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
2306 struct cl_page *page, pgoff_t *max_index);
2307loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); 2159loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
2308pgoff_t cl_index(const struct cl_object *obj, loff_t offset); 2160pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
2309size_t cl_page_size(const struct cl_object *obj); 2161size_t cl_page_size(const struct cl_object *obj);
@@ -2414,8 +2266,6 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
2414 struct cl_io_lock_link *link); 2266 struct cl_io_lock_link *link);
2415int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, 2267int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
2416 struct cl_lock_descr *descr); 2268 struct cl_lock_descr *descr);
2417int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
2418 struct cl_page *page);
2419int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, 2269int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
2420 enum cl_req_type iot, struct cl_2queue *queue); 2270 enum cl_req_type iot, struct cl_2queue *queue);
2421int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, 2271int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
@@ -2424,6 +2274,8 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
2424int cl_io_commit_async(const struct lu_env *env, struct cl_io *io, 2274int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
2425 struct cl_page_list *queue, int from, int to, 2275 struct cl_page_list *queue, int from, int to,
2426 cl_commit_cbt cb); 2276 cl_commit_cbt cb);
2277int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
2278 pgoff_t start, struct cl_read_ahead *ra);
2427int cl_io_is_going(const struct lu_env *env); 2279int cl_io_is_going(const struct lu_env *env);
2428 2280
2429/** 2281/**
@@ -2520,19 +2372,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
2520 2372
2521/** @} cl_page_list */ 2373/** @} cl_page_list */
2522 2374
2523/** \defgroup cl_req cl_req 2375void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
2524 * @{ 2376 struct cl_req_attr *attr);
2525 */
2526struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
2527 enum cl_req_type crt, int nr_objects);
2528
2529void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
2530 struct cl_page *page);
2531void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
2532int cl_req_prep(const struct lu_env *env, struct cl_req *req);
2533void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
2534 struct cl_req_attr *attr, u64 flags);
2535void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
2536 2377
2537/** \defgroup cl_sync_io cl_sync_io 2378/** \defgroup cl_sync_io cl_sync_io
2538 * @{ 2379 * @{
@@ -2568,8 +2409,6 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
2568 2409
2569/** @} cl_sync_io */ 2410/** @} cl_sync_io */
2570 2411
2571/** @} cl_req */
2572
2573/** \defgroup cl_env cl_env 2412/** \defgroup cl_env cl_env
2574 * 2413 *
2575 * lu_env handling for a client. 2414 * lu_env handling for a client.
@@ -2593,35 +2432,13 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
2593 * - allocation and destruction of environment is amortized by caching no 2432 * - allocation and destruction of environment is amortized by caching no
2594 * longer used environments instead of destroying them; 2433 * longer used environments instead of destroying them;
2595 * 2434 *
2596 * - there is a notion of "current" environment, attached to the kernel
2597 * data structure representing current thread Top-level lustre code
2598 * allocates an environment and makes it current, then calls into
2599 * non-lustre code, that in turn calls lustre back. Low-level lustre
2600 * code thus called can fetch environment created by the top-level code
2601 * and reuse it, avoiding additional environment allocation.
2602 * Right now, three interfaces can attach the cl_env to running thread:
2603 * - cl_env_get
2604 * - cl_env_implant
2605 * - cl_env_reexit(cl_env_reenter had to be called priorly)
2606 *
2607 * \see lu_env, lu_context, lu_context_key 2435 * \see lu_env, lu_context, lu_context_key
2608 * @{ 2436 * @{
2609 */ 2437 */
2610 2438
2611struct cl_env_nest {
2612 int cen_refcheck;
2613 void *cen_cookie;
2614};
2615
2616struct lu_env *cl_env_get(int *refcheck); 2439struct lu_env *cl_env_get(int *refcheck);
2617struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); 2440struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
2618struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
2619void cl_env_put(struct lu_env *env, int *refcheck); 2441void cl_env_put(struct lu_env *env, int *refcheck);
2620void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
2621void *cl_env_reenter(void);
2622void cl_env_reexit(void *cookie);
2623void cl_env_implant(struct lu_env *env, int *refcheck);
2624void cl_env_unplant(struct lu_env *env, int *refcheck);
2625unsigned int cl_env_cache_purge(unsigned int nr); 2442unsigned int cl_env_cache_purge(unsigned int nr);
2626struct lu_env *cl_env_percpu_get(void); 2443struct lu_env *cl_env_percpu_get(void);
2627void cl_env_percpu_put(struct lu_env *env); 2444void cl_env_percpu_put(struct lu_env *env);
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
new file mode 100644
index 000000000000..fd7ffb154ad1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -0,0 +1,65 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2014, Intel Corporation.
27 *
28 * Copyright 2015 Cray Inc, all rights reserved.
29 * Author: Ben Evans.
30 *
31 * We assume all nodes are either little-endian or big-endian, and we
32 * always send messages in the sender's native format. The receiver
33 * detects the message format by checking the 'magic' field of the message
34 * (see lustre_msg_swabbed() below).
35 *
36 * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines
37 * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the
38 * type from "other" endian, in-place in the message buffer.
39 *
40 * A swabber takes a single pointer argument. The caller must already have
41 * verified that the length of the message buffer >= sizeof (type).
42 *
43 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
44 * may be defined that swabs just the variable part, after the caller has
45 * verified that the message buffer is large enough.
46 */
47
48#ifndef _LLOG_SWAB_H_
49#define _LLOG_SWAB_H_
50
51#include "lustre/lustre_idl.h"
52struct lustre_cfg;
53
54void lustre_swab_lu_fid(struct lu_fid *fid);
55void lustre_swab_ost_id(struct ost_id *oid);
56void lustre_swab_llogd_body(struct llogd_body *d);
57void lustre_swab_llog_hdr(struct llog_log_hdr *h);
58void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
59void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
60void lustre_swab_lu_seq_range(struct lu_seq_range *range);
61void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
62void lustre_swab_cfg_marker(struct cfg_marker *marker,
63 int swab, int size);
64
65#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index cc0713ef8ae5..62753dae0bfa 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -43,6 +43,8 @@
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/types.h> 44#include <linux/types.h>
45 45
46#include "../../include/linux/libcfs/libcfs.h"
47#include "lustre_cfg.h"
46#include "lustre/lustre_idl.h" 48#include "lustre/lustre_idl.h"
47 49
48struct lprocfs_vars { 50struct lprocfs_vars {
@@ -540,7 +542,8 @@ lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
540void lprocfs_clear_stats(struct lprocfs_stats *stats); 542void lprocfs_clear_stats(struct lprocfs_stats *stats);
541void lprocfs_free_stats(struct lprocfs_stats **stats); 543void lprocfs_free_stats(struct lprocfs_stats **stats);
542void lprocfs_counter_init(struct lprocfs_stats *stats, int index, 544void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
543 unsigned conf, const char *name, const char *units); 545 unsigned int conf, const char *name,
546 const char *units);
544struct obd_export; 547struct obd_export;
545int lprocfs_exp_cleanup(struct obd_export *exp); 548int lprocfs_exp_cleanup(struct obd_export *exp);
546struct dentry *ldebugfs_add_simple(struct dentry *root, 549struct dentry *ldebugfs_add_simple(struct dentry *root,
@@ -701,9 +704,9 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
701extern const struct sysfs_ops lustre_sysfs_ops; 704extern const struct sysfs_ops lustre_sysfs_ops;
702 705
703struct root_squash_info; 706struct root_squash_info;
704int lprocfs_wr_root_squash(const char *buffer, unsigned long count, 707int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
705 struct root_squash_info *squash, char *name); 708 struct root_squash_info *squash, char *name);
706int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count, 709int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
707 struct root_squash_info *squash, char *name); 710 struct root_squash_info *squash, char *name);
708 711
709/* all quota proc functions */ 712/* all quota proc functions */
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index c2340d643e84..b8ad5559a3b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -41,79 +41,24 @@
41#ifndef _LUSTRE_FIEMAP_H 41#ifndef _LUSTRE_FIEMAP_H
42#define _LUSTRE_FIEMAP_H 42#define _LUSTRE_FIEMAP_H
43 43
44struct ll_fiemap_extent { 44#ifndef __KERNEL__
45 __u64 fe_logical; /* logical offset in bytes for the start of 45#include <stddef.h>
46 * the extent from the beginning of the file 46#include <fiemap.h>
47 */ 47#endif
48 __u64 fe_physical; /* physical offset in bytes for the start
49 * of the extent from the beginning of the disk
50 */
51 __u64 fe_length; /* length in bytes for this extent */
52 __u64 fe_reserved64[2];
53 __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
54 __u32 fe_device; /* device number for this extent */
55 __u32 fe_reserved[2];
56};
57
58struct ll_user_fiemap {
59 __u64 fm_start; /* logical offset (inclusive) at
60 * which to start mapping (in)
61 */
62 __u64 fm_length; /* logical length of mapping which
63 * userspace wants (in)
64 */
65 __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
66 __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
67 __u32 fm_extent_count; /* size of fm_extents array (in) */
68 __u32 fm_reserved;
69 struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
70};
71
72#define FIEMAP_MAX_OFFSET (~0ULL)
73 48
74#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before 49/* XXX: We use fiemap_extent::fe_reserved[0] */
75 * map 50#define fe_device fe_reserved[0]
76 */
77#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute
78 * tree
79 */
80#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
81#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
82#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
83 * Sets EXTENT_UNKNOWN.
84 */
85#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
86 * while fs is unmounted
87 */
88#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
89 * Sets EXTENT_NO_DIRECT.
90 */
91#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
92 * block aligned.
93 */
94#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
95 * Sets EXTENT_NOT_ALIGNED.*/
96#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
97 * Sets EXTENT_NOT_ALIGNED.
98 */
99#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
100 * no data (i.e. zero).
101 */
102#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
103 * support extents. Result
104 * merged for efficiency.
105 */
106 51
107static inline size_t fiemap_count_to_size(size_t extent_count) 52static inline size_t fiemap_count_to_size(size_t extent_count)
108{ 53{
109 return (sizeof(struct ll_user_fiemap) + extent_count * 54 return sizeof(struct fiemap) + extent_count *
110 sizeof(struct ll_fiemap_extent)); 55 sizeof(struct fiemap_extent);
111} 56}
112 57
113static inline unsigned fiemap_size_to_count(size_t array_size) 58static inline unsigned fiemap_size_to_count(size_t array_size)
114{ 59{
115 return ((array_size - sizeof(struct ll_user_fiemap)) / 60 return (array_size - sizeof(struct fiemap)) /
116 sizeof(struct ll_fiemap_extent)); 61 sizeof(struct fiemap_extent);
117} 62}
118 63
119#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */ 64#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 72eaee95c6b8..65ce503ad595 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -48,8 +48,7 @@
48 * that the Lustre wire protocol is not influenced by external dependencies. 48 * that the Lustre wire protocol is not influenced by external dependencies.
49 * 49 *
50 * The only other acceptable items in this file are VERY SIMPLE accessor 50 * The only other acceptable items in this file are VERY SIMPLE accessor
51 * functions to avoid callers grubbing inside the structures, and the 51 * functions to avoid callers grubbing inside the structures. Nothing that
52 * prototypes of the swabber functions for each struct. Nothing that
53 * depends on external functions or definitions should be in here. 52 * depends on external functions or definitions should be in here.
54 * 53 *
55 * Structs must be properly aligned to put 64-bit values on an 8-byte 54 * Structs must be properly aligned to put 64-bit values on an 8-byte
@@ -64,23 +63,6 @@
64 * in the code to ensure that new/old clients that see this larger struct 63 * in the code to ensure that new/old clients that see this larger struct
65 * do not fail, otherwise you need to implement protocol compatibility). 64 * do not fail, otherwise you need to implement protocol compatibility).
66 * 65 *
67 * We assume all nodes are either little-endian or big-endian, and we
68 * always send messages in the sender's native format. The receiver
69 * detects the message format by checking the 'magic' field of the message
70 * (see lustre_msg_swabbed() below).
71 *
72 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
73 * implemented either here, inline (trivial implementations) or in
74 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
75 * endian, in-place in the message buffer.
76 *
77 * A swabber takes a single pointer argument. The caller must already have
78 * verified that the length of the message buffer >= sizeof (type).
79 *
80 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
81 * may be defined that swabs just the variable part, after the caller has
82 * verified that the message buffer is large enough.
83 *
84 * @{ 66 * @{
85 */ 67 */
86 68
@@ -192,113 +174,6 @@ struct lu_seq_range_array {
192 174
193#define LU_SEQ_RANGE_MASK 0x3 175#define LU_SEQ_RANGE_MASK 0x3
194 176
195static inline unsigned fld_range_type(const struct lu_seq_range *range)
196{
197 return range->lsr_flags & LU_SEQ_RANGE_MASK;
198}
199
200static inline bool fld_range_is_ost(const struct lu_seq_range *range)
201{
202 return fld_range_type(range) == LU_SEQ_RANGE_OST;
203}
204
205static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
206{
207 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
208}
209
210/**
211 * This all range is only being used when fld client sends fld query request,
212 * but it does not know whether the seq is MDT or OST, so it will send req
213 * with ALL type, which means either seq type gotten from lookup can be
214 * expected.
215 */
216static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
217{
218 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
219}
220
221static inline void fld_range_set_type(struct lu_seq_range *range,
222 unsigned flags)
223{
224 range->lsr_flags |= flags;
225}
226
227static inline void fld_range_set_mdt(struct lu_seq_range *range)
228{
229 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
230}
231
232static inline void fld_range_set_ost(struct lu_seq_range *range)
233{
234 fld_range_set_type(range, LU_SEQ_RANGE_OST);
235}
236
237static inline void fld_range_set_any(struct lu_seq_range *range)
238{
239 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
240}
241
242/**
243 * returns width of given range \a r
244 */
245
246static inline __u64 range_space(const struct lu_seq_range *range)
247{
248 return range->lsr_end - range->lsr_start;
249}
250
251/**
252 * initialize range to zero
253 */
254
255static inline void range_init(struct lu_seq_range *range)
256{
257 memset(range, 0, sizeof(*range));
258}
259
260/**
261 * check if given seq id \a s is within given range \a r
262 */
263
264static inline bool range_within(const struct lu_seq_range *range,
265 __u64 s)
266{
267 return s >= range->lsr_start && s < range->lsr_end;
268}
269
270static inline bool range_is_sane(const struct lu_seq_range *range)
271{
272 return (range->lsr_end >= range->lsr_start);
273}
274
275static inline bool range_is_zero(const struct lu_seq_range *range)
276{
277 return (range->lsr_start == 0 && range->lsr_end == 0);
278}
279
280static inline bool range_is_exhausted(const struct lu_seq_range *range)
281
282{
283 return range_space(range) == 0;
284}
285
286/* return 0 if two range have the same location */
287static inline int range_compare_loc(const struct lu_seq_range *r1,
288 const struct lu_seq_range *r2)
289{
290 return r1->lsr_index != r2->lsr_index ||
291 r1->lsr_flags != r2->lsr_flags;
292}
293
294#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
295
296#define PRANGE(range) \
297 (range)->lsr_start, \
298 (range)->lsr_end, \
299 (range)->lsr_index, \
300 fld_range_is_mdt(range) ? "mdt" : "ost"
301
302/** \defgroup lu_fid lu_fid 177/** \defgroup lu_fid lu_fid
303 * @{ 178 * @{
304 */ 179 */
@@ -310,7 +185,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
310 */ 185 */
311enum lma_compat { 186enum lma_compat {
312 LMAC_HSM = 0x00000001, 187 LMAC_HSM = 0x00000001,
313 LMAC_SOM = 0x00000002, 188/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
314 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ 189 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
315 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is 190 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
316 * under /O/<seq>/d<x>. 191 * under /O/<seq>/d<x>.
@@ -644,13 +519,14 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
644{ 519{
645 if (fid_seq_is_mdt0(oi->oi.oi_seq)) { 520 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
646 if (oid >= IDIF_MAX_OID) { 521 if (oid >= IDIF_MAX_OID) {
647 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); 522 CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
523 oid, POSTID(oi));
648 return; 524 return;
649 } 525 }
650 oi->oi.oi_id = oid; 526 oi->oi.oi_id = oid;
651 } else if (fid_is_idif(&oi->oi_fid)) { 527 } else if (fid_is_idif(&oi->oi_fid)) {
652 if (oid >= IDIF_MAX_OID) { 528 if (oid >= IDIF_MAX_OID) {
653 CERROR("Bad %llu to set "DOSTID"\n", 529 CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
654 oid, POSTID(oi)); 530 oid, POSTID(oi));
655 return; 531 return;
656 } 532 }
@@ -676,7 +552,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
676 552
677 if (fid_is_idif(fid)) { 553 if (fid_is_idif(fid)) {
678 if (oid >= IDIF_MAX_OID) { 554 if (oid >= IDIF_MAX_OID) {
679 CERROR("Too large OID %#llx to set IDIF "DFID"\n", 555 CERROR("Too large OID %#llx to set IDIF " DFID "\n",
680 (unsigned long long)oid, PFID(fid)); 556 (unsigned long long)oid, PFID(fid));
681 return -EBADF; 557 return -EBADF;
682 } 558 }
@@ -685,7 +561,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
685 fid->f_ver = oid >> 48; 561 fid->f_ver = oid >> 48;
686 } else { 562 } else {
687 if (oid >= OBIF_MAX_OID) { 563 if (oid >= OBIF_MAX_OID) {
688 CERROR("Too large OID %#llx to set REG "DFID"\n", 564 CERROR("Too large OID %#llx to set REG " DFID "\n",
689 (unsigned long long)oid, PFID(fid)); 565 (unsigned long long)oid, PFID(fid));
690 return -EBADF; 566 return -EBADF;
691 } 567 }
@@ -785,8 +661,6 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
785 return fid_seq(fid); 661 return fid_seq(fid);
786} 662}
787 663
788void lustre_swab_ost_id(struct ost_id *oid);
789
790/** 664/**
791 * Get inode generation from a igif. 665 * Get inode generation from a igif.
792 * \param fid a igif to get inode generation from. 666 * \param fid a igif to get inode generation from.
@@ -847,9 +721,6 @@ static inline bool fid_is_sane(const struct lu_fid *fid)
847 fid_seq_is_rsvd(fid_seq(fid))); 721 fid_seq_is_rsvd(fid_seq(fid)));
848} 722}
849 723
850void lustre_swab_lu_fid(struct lu_fid *fid);
851void lustre_swab_lu_seq_range(struct lu_seq_range *range);
852
853static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) 724static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
854{ 725{
855 return memcmp(f0, f1, sizeof(*f0)) == 0; 726 return memcmp(f0, f1, sizeof(*f0)) == 0;
@@ -1099,8 +970,10 @@ struct ptlrpc_body_v3 {
1099 __u32 pb_version; 970 __u32 pb_version;
1100 __u32 pb_opc; 971 __u32 pb_opc;
1101 __u32 pb_status; 972 __u32 pb_status;
1102 __u64 pb_last_xid; 973 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1103 __u64 pb_last_seen; 974 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
975 __u16 pb_padding0;
976 __u32 pb_padding1;
1104 __u64 pb_last_committed; 977 __u64 pb_last_committed;
1105 __u64 pb_transno; 978 __u64 pb_transno;
1106 __u32 pb_flags; 979 __u32 pb_flags;
@@ -1112,8 +985,11 @@ struct ptlrpc_body_v3 {
1112 __u64 pb_slv; 985 __u64 pb_slv;
1113 /* VBR: pre-versions */ 986 /* VBR: pre-versions */
1114 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; 987 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
988 __u64 pb_mbits; /**< match bits for bulk request */
1115 /* padding for future needs */ 989 /* padding for future needs */
1116 __u64 pb_padding[4]; 990 __u64 pb_padding64_0;
991 __u64 pb_padding64_1;
992 __u64 pb_padding64_2;
1117 char pb_jobid[LUSTRE_JOBID_SIZE]; 993 char pb_jobid[LUSTRE_JOBID_SIZE];
1118}; 994};
1119 995
@@ -1125,8 +1001,10 @@ struct ptlrpc_body_v2 {
1125 __u32 pb_version; 1001 __u32 pb_version;
1126 __u32 pb_opc; 1002 __u32 pb_opc;
1127 __u32 pb_status; 1003 __u32 pb_status;
1128 __u64 pb_last_xid; 1004 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1129 __u64 pb_last_seen; 1005 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1006 __u16 pb_padding0;
1007 __u32 pb_padding1;
1130 __u64 pb_last_committed; 1008 __u64 pb_last_committed;
1131 __u64 pb_transno; 1009 __u64 pb_transno;
1132 __u32 pb_flags; 1010 __u32 pb_flags;
@@ -1140,12 +1018,13 @@ struct ptlrpc_body_v2 {
1140 __u64 pb_slv; 1018 __u64 pb_slv;
1141 /* VBR: pre-versions */ 1019 /* VBR: pre-versions */
1142 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; 1020 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1021 __u64 pb_mbits; /**< unused in V2 */
1143 /* padding for future needs */ 1022 /* padding for future needs */
1144 __u64 pb_padding[4]; 1023 __u64 pb_padding64_0;
1024 __u64 pb_padding64_1;
1025 __u64 pb_padding64_2;
1145}; 1026};
1146 1027
1147void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1148
1149/* message body offset for lustre_msg_v2 */ 1028/* message body offset for lustre_msg_v2 */
1150/* ptlrpc body offset in all request/reply messages */ 1029/* ptlrpc body offset in all request/reply messages */
1151#define MSG_PTLRPC_BODY_OFF 0 1030#define MSG_PTLRPC_BODY_OFF 0
@@ -1282,7 +1161,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1282 */ 1161 */
1283#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ 1162#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
1284#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ 1163#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
1164#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
1165 * RPCs in parallel
1166 */
1285#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */ 1167#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
1168#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
1169#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */
1170/** bulk matchbits is sent within ptlrpc_body */
1171#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
1172#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */
1173#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */
1286 1174
1287/* XXX README XXX: 1175/* XXX README XXX:
1288 * Please DO NOT add flag values here before first ensuring that this same 1176 * Please DO NOT add flag values here before first ensuring that this same
@@ -1313,25 +1201,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1313 * If we eventually have separate connect data for different types, which we 1201 * If we eventually have separate connect data for different types, which we
1314 * almost certainly will, then perhaps we stick a union in here. 1202 * almost certainly will, then perhaps we stick a union in here.
1315 */ 1203 */
1316struct obd_connect_data_v1 {
1317 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1318 __u32 ocd_version; /* lustre release version number */
1319 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1320 __u32 ocd_index; /* LOV index to connect to */
1321 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1322 __u64 ocd_ibits_known; /* inode bits this client understands */
1323 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1324 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1325 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1326 __u32 ocd_unused; /* also fix lustre_swab_connect */
1327 __u64 ocd_transno; /* first transno from client to be replayed */
1328 __u32 ocd_group; /* MDS group on OST */
1329 __u32 ocd_cksum_types; /* supported checksum algorithms */
1330 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1331 __u32 ocd_instance; /* also fix lustre_swab_connect */
1332 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1333};
1334
1335struct obd_connect_data { 1204struct obd_connect_data {
1336 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ 1205 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1337 __u32 ocd_version; /* lustre release version number */ 1206 __u32 ocd_version; /* lustre release version number */
@@ -1354,8 +1223,10 @@ struct obd_connect_data {
1354 * any field after ocd_maxbytes on the receiver without a valid flag 1223 * any field after ocd_maxbytes on the receiver without a valid flag
1355 * may result in out-of-bound memory access and kernel oops. 1224 * may result in out-of-bound memory access and kernel oops.
1356 */ 1225 */
1357 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ 1226 __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
1358 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ 1227 __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
1228 __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1229 __u64 ocd_connect_flags2;
1359 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ 1230 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1360 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */ 1231 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1361 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */ 1232 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
@@ -1380,8 +1251,6 @@ struct obd_connect_data {
1380 * reserve the flag for future use. 1251 * reserve the flag for future use.
1381 */ 1252 */
1382 1253
1383void lustre_swab_connect(struct obd_connect_data *ocd);
1384
1385/* 1254/*
1386 * Supported checksum algorithms. Up to 32 checksum types are supported. 1255 * Supported checksum algorithms. Up to 32 checksum types are supported.
1387 * (32-bit mask stored in obd_connect_data::ocd_cksum_types) 1256 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
@@ -1416,7 +1285,7 @@ enum ost_cmd {
1416 OST_STATFS = 13, 1285 OST_STATFS = 13,
1417 OST_SYNC = 16, 1286 OST_SYNC = 16,
1418 OST_SET_INFO = 17, 1287 OST_SET_INFO = 17,
1419 OST_QUOTACHECK = 18, 1288 OST_QUOTACHECK = 18, /* not used since 2.4 */
1420 OST_QUOTACTL = 19, 1289 OST_QUOTACTL = 19,
1421 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ 1290 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1422 OST_LAST_OPC 1291 OST_LAST_OPC
@@ -1580,8 +1449,6 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1580 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); 1449 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1581} 1450}
1582 1451
1583/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1584
1585#define MAX_MD_SIZE \ 1452#define MAX_MD_SIZE \
1586 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) 1453 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1587#define MIN_MD_SIZE \ 1454#define MIN_MD_SIZE \
@@ -1674,7 +1541,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1674#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ 1541#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1675#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ 1542#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1676/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ 1543/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1677#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */ 1544/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
1678#define OBD_MD_FLGROUP (0x01000000ULL) /* group */ 1545#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1679#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ 1546#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1680#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ 1547#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
@@ -1713,7 +1580,9 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1713/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */ 1580/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
1714 1581
1715#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ 1582#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1716#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ 1583#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
1584 * executed
1585 */
1717 1586
1718#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ 1587#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
1719 1588
@@ -1742,11 +1611,6 @@ struct hsm_state_set {
1742 __u64 hss_clearmask; 1611 __u64 hss_clearmask;
1743}; 1612};
1744 1613
1745void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1746void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1747
1748void lustre_swab_obd_statfs(struct obd_statfs *os);
1749
1750/* ost_body.data values for OST_BRW */ 1614/* ost_body.data values for OST_BRW */
1751 1615
1752#define OBD_BRW_READ 0x01 1616#define OBD_BRW_READ 0x01
@@ -1786,14 +1650,16 @@ struct obd_ioobj {
1786 __u32 ioo_bufcnt; /* number of niobufs for this object */ 1650 __u32 ioo_bufcnt; /* number of niobufs for this object */
1787}; 1651};
1788 1652
1653/*
1654 * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
1655 * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
1656 * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
1657 */
1789#define IOOBJ_MAX_BRW_BITS 16 1658#define IOOBJ_MAX_BRW_BITS 16
1790#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1791#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1) 1659#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1792#define ioobj_max_brw_set(ioo, num) \ 1660#define ioobj_max_brw_set(ioo, num) \
1793do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) 1661do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1794 1662
1795void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1796
1797/* multiple of 8 bytes => can array */ 1663/* multiple of 8 bytes => can array */
1798struct niobuf_remote { 1664struct niobuf_remote {
1799 __u64 rnb_offset; 1665 __u64 rnb_offset;
@@ -1801,8 +1667,6 @@ struct niobuf_remote {
1801 __u32 rnb_flags; 1667 __u32 rnb_flags;
1802}; 1668};
1803 1669
1804void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1805
1806/* lock value block communicated between the filter and llite */ 1670/* lock value block communicated between the filter and llite */
1807 1671
1808/* OST_LVB_ERR_INIT is needed because the return code in rc is 1672/* OST_LVB_ERR_INIT is needed because the return code in rc is
@@ -1824,8 +1688,6 @@ struct ost_lvb_v1 {
1824 __u64 lvb_blocks; 1688 __u64 lvb_blocks;
1825}; 1689};
1826 1690
1827void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1828
1829struct ost_lvb { 1691struct ost_lvb {
1830 __u64 lvb_size; 1692 __u64 lvb_size;
1831 __s64 lvb_mtime; 1693 __s64 lvb_mtime;
@@ -1838,8 +1700,6 @@ struct ost_lvb {
1838 __u32 lvb_padding; 1700 __u32 lvb_padding;
1839}; 1701};
1840 1702
1841void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1842
1843/* 1703/*
1844 * lquota data structures 1704 * lquota data structures
1845 */ 1705 */
@@ -1866,8 +1726,6 @@ struct obd_quotactl {
1866 struct obd_dqblk qc_dqblk; 1726 struct obd_dqblk qc_dqblk;
1867}; 1727};
1868 1728
1869void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1870
1871#define Q_COPY(out, in, member) (out)->member = (in)->member 1729#define Q_COPY(out, in, member) (out)->member = (in)->member
1872 1730
1873#define QCTL_COPY(out, in) \ 1731#define QCTL_COPY(out, in) \
@@ -1905,8 +1763,6 @@ struct lquota_lvb {
1905 __u64 lvb_pad1; 1763 __u64 lvb_pad1;
1906}; 1764};
1907 1765
1908void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1909
1910/* op codes */ 1766/* op codes */
1911enum quota_cmd { 1767enum quota_cmd {
1912 QUOTA_DQACQ = 601, 1768 QUOTA_DQACQ = 601,
@@ -1933,9 +1789,9 @@ enum mds_cmd {
1933 MDS_PIN = 42, /* obsolete, never used in a release */ 1789 MDS_PIN = 42, /* obsolete, never used in a release */
1934 MDS_UNPIN = 43, /* obsolete, never used in a release */ 1790 MDS_UNPIN = 43, /* obsolete, never used in a release */
1935 MDS_SYNC = 44, 1791 MDS_SYNC = 44,
1936 MDS_DONE_WRITING = 45, 1792 MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
1937 MDS_SET_INFO = 46, 1793 MDS_SET_INFO = 46,
1938 MDS_QUOTACHECK = 47, 1794 MDS_QUOTACHECK = 47, /* not used since 2.4 */
1939 MDS_QUOTACTL = 48, 1795 MDS_QUOTACTL = 48,
1940 MDS_GETXATTR = 49, 1796 MDS_GETXATTR = 49,
1941 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ 1797 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
@@ -1972,8 +1828,6 @@ enum mdt_reint_cmd {
1972 REINT_MAX 1828 REINT_MAX
1973}; 1829};
1974 1830
1975void lustre_swab_generic_32s(__u32 *val);
1976
1977/* the disposition of the intent outlines what was executed */ 1831/* the disposition of the intent outlines what was executed */
1978#define DISP_IT_EXECD 0x00000001 1832#define DISP_IT_EXECD 0x00000001
1979#define DISP_LOOKUP_EXECD 0x00000002 1833#define DISP_LOOKUP_EXECD 0x00000002
@@ -2031,36 +1885,19 @@ enum {
2031#define MDS_STATUS_CONN 1 1885#define MDS_STATUS_CONN 1
2032#define MDS_STATUS_LOV 2 1886#define MDS_STATUS_LOV 2
2033 1887
2034/* mdt_thread_info.mti_flags. */
2035enum md_op_flags {
2036 /* The flag indicates Size-on-MDS attributes are changed. */
2037 MF_SOM_CHANGE = (1 << 0),
2038 /* Flags indicates an epoch opens or closes. */
2039 MF_EPOCH_OPEN = (1 << 1),
2040 MF_EPOCH_CLOSE = (1 << 2),
2041 MF_MDC_CANCEL_FID1 = (1 << 3),
2042 MF_MDC_CANCEL_FID2 = (1 << 4),
2043 MF_MDC_CANCEL_FID3 = (1 << 5),
2044 MF_MDC_CANCEL_FID4 = (1 << 6),
2045 /* There is a pending attribute update. */
2046 MF_SOM_AU = (1 << 7),
2047 /* Cancel OST locks while getattr OST attributes. */
2048 MF_GETATTR_LOCK = (1 << 8),
2049 MF_GET_MDT_IDX = (1 << 9),
2050};
2051
2052#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2053
2054#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2055
2056/* these should be identical to their EXT4_*_FL counterparts, they are 1888/* these should be identical to their EXT4_*_FL counterparts, they are
2057 * redefined here only to avoid dragging in fs/ext4/ext4.h 1889 * redefined here only to avoid dragging in fs/ext4/ext4.h
2058 */ 1890 */
2059#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ 1891#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2060#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ 1892#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2061#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ 1893#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
1894#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */
2062#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */ 1895#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
1896#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */
2063#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */ 1897#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
1898#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
1899#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */
1900#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
2064 1901
2065/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values 1902/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2066 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire 1903 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
@@ -2113,7 +1950,7 @@ struct mdt_body {
2113 __u32 mbo_mode; 1950 __u32 mbo_mode;
2114 __u32 mbo_uid; 1951 __u32 mbo_uid;
2115 __u32 mbo_gid; 1952 __u32 mbo_gid;
2116 __u32 mbo_flags; 1953 __u32 mbo_flags; /* LUSTRE_*_FL file attributes */
2117 __u32 mbo_rdev; 1954 __u32 mbo_rdev;
2118 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ 1955 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
2119 __u32 mbo_unused2; /* was "generation" until 2.4.0 */ 1956 __u32 mbo_unused2; /* was "generation" until 2.4.0 */
@@ -2121,7 +1958,7 @@ struct mdt_body {
2121 __u32 mbo_eadatasize; 1958 __u32 mbo_eadatasize;
2122 __u32 mbo_aclsize; 1959 __u32 mbo_aclsize;
2123 __u32 mbo_max_mdsize; 1960 __u32 mbo_max_mdsize;
2124 __u32 mbo_max_cookiesize; 1961 __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
2125 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ 1962 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
2126 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ 1963 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
2127 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ 1964 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
@@ -2132,17 +1969,13 @@ struct mdt_body {
2132 __u64 mbo_padding_10; 1969 __u64 mbo_padding_10;
2133}; /* 216 */ 1970}; /* 216 */
2134 1971
2135void lustre_swab_mdt_body(struct mdt_body *b);
2136
2137struct mdt_ioepoch { 1972struct mdt_ioepoch {
2138 struct lustre_handle handle; 1973 struct lustre_handle mio_handle;
2139 __u64 ioepoch; 1974 __u64 mio_unused1; /* was ioepoch */
2140 __u32 flags; 1975 __u32 mio_unused2; /* was flags */
2141 __u32 padding; 1976 __u32 mio_padding;
2142}; 1977};
2143 1978
2144void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2145
2146/* permissions for md_perm.mp_perm */ 1979/* permissions for md_perm.mp_perm */
2147enum { 1980enum {
2148 CFS_SETUID_PERM = 0x01, 1981 CFS_SETUID_PERM = 0x01,
@@ -2178,8 +2011,6 @@ struct mdt_rec_setattr {
2178 __u32 sa_padding_5; 2011 __u32 sa_padding_5;
2179}; 2012};
2180 2013
2181void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2182
2183/* 2014/*
2184 * Attribute flags used in mdt_rec_setattr::sa_valid. 2015 * Attribute flags used in mdt_rec_setattr::sa_valid.
2185 * The kernel's #defines for ATTR_* should not be used over the network 2016 * The kernel's #defines for ATTR_* should not be used over the network
@@ -2207,12 +2038,9 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2207 2038
2208#define MDS_FMODE_CLOSED 00000000 2039#define MDS_FMODE_CLOSED 00000000
2209#define MDS_FMODE_EXEC 00000004 2040#define MDS_FMODE_EXEC 00000004
2210/* IO Epoch is opened on a closed file. */ 2041/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
2211#define MDS_FMODE_EPOCH 01000000 2042/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
2212/* IO Epoch is opened on a file truncate. */ 2043/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
2213#define MDS_FMODE_TRUNC 02000000
2214/* Size-on-MDS Attribute Update is pending. */
2215#define MDS_FMODE_SOM 04000000
2216 2044
2217#define MDS_OPEN_CREATED 00000010 2045#define MDS_OPEN_CREATED 00000010
2218#define MDS_OPEN_CROSS 00000020 2046#define MDS_OPEN_CROSS 00000020
@@ -2258,7 +2086,7 @@ enum mds_op_bias {
2258 MDS_CROSS_REF = 1 << 1, 2086 MDS_CROSS_REF = 1 << 1,
2259 MDS_VTX_BYPASS = 1 << 2, 2087 MDS_VTX_BYPASS = 1 << 2,
2260 MDS_PERM_BYPASS = 1 << 3, 2088 MDS_PERM_BYPASS = 1 << 3,
2261 MDS_SOM = 1 << 4, 2089/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
2262 MDS_QUOTA_IGNORE = 1 << 5, 2090 MDS_QUOTA_IGNORE = 1 << 5,
2263 MDS_CLOSE_CLEANUP = 1 << 6, 2091 MDS_CLOSE_CLEANUP = 1 << 6,
2264 MDS_KEEP_ORPHAN = 1 << 7, 2092 MDS_KEEP_ORPHAN = 1 << 7,
@@ -2268,6 +2096,7 @@ enum mds_op_bias {
2268 MDS_OWNEROVERRIDE = 1 << 11, 2096 MDS_OWNEROVERRIDE = 1 << 11,
2269 MDS_HSM_RELEASE = 1 << 12, 2097 MDS_HSM_RELEASE = 1 << 12,
2270 MDS_RENAME_MIGRATE = BIT(13), 2098 MDS_RENAME_MIGRATE = BIT(13),
2099 MDS_CLOSE_LAYOUT_SWAP = BIT(14),
2271}; 2100};
2272 2101
2273/* instance of mdt_reint_rec */ 2102/* instance of mdt_reint_rec */
@@ -2456,8 +2285,6 @@ struct mdt_rec_reint {
2456 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ 2285 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2457}; 2286};
2458 2287
2459void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2460
2461/* lmv structures */ 2288/* lmv structures */
2462struct lmv_desc { 2289struct lmv_desc {
2463 __u32 ld_tgt_count; /* how many MDS's */ 2290 __u32 ld_tgt_count; /* how many MDS's */
@@ -2547,8 +2374,6 @@ union lmv_mds_md {
2547 struct lmv_user_md lmv_user_md; 2374 struct lmv_user_md lmv_user_md;
2548}; 2375};
2549 2376
2550void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
2551
2552static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) 2377static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2553{ 2378{
2554 ssize_t len = -EINVAL; 2379 ssize_t len = -EINVAL;
@@ -2652,8 +2477,6 @@ struct lov_desc {
2652 2477
2653#define ld_magic ld_active_tgt_count /* for swabbing from llogs */ 2478#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2654 2479
2655void lustre_swab_lov_desc(struct lov_desc *ld);
2656
2657/* 2480/*
2658 * LDLM requests: 2481 * LDLM requests:
2659 */ 2482 */
@@ -2749,24 +2572,38 @@ struct ldlm_flock_wire {
2749 * on the resource type. 2572 * on the resource type.
2750 */ 2573 */
2751 2574
2752typedef union { 2575union ldlm_wire_policy_data {
2753 struct ldlm_extent l_extent; 2576 struct ldlm_extent l_extent;
2754 struct ldlm_flock_wire l_flock; 2577 struct ldlm_flock_wire l_flock;
2755 struct ldlm_inodebits l_inodebits; 2578 struct ldlm_inodebits l_inodebits;
2756} ldlm_wire_policy_data_t; 2579};
2757 2580
2758union ldlm_gl_desc { 2581union ldlm_gl_desc {
2759 struct ldlm_gl_lquota_desc lquota_desc; 2582 struct ldlm_gl_lquota_desc lquota_desc;
2760}; 2583};
2761 2584
2762void lustre_swab_gl_desc(union ldlm_gl_desc *); 2585enum ldlm_intent_flags {
2586 IT_OPEN = BIT(0),
2587 IT_CREAT = BIT(1),
2588 IT_OPEN_CREAT = BIT(1) | BIT(0),
2589 IT_READDIR = BIT(2),
2590 IT_GETATTR = BIT(3),
2591 IT_LOOKUP = BIT(4),
2592 IT_UNLINK = BIT(5),
2593 IT_TRUNC = BIT(6),
2594 IT_GETXATTR = BIT(7),
2595 IT_EXEC = BIT(8),
2596 IT_PIN = BIT(9),
2597 IT_LAYOUT = BIT(10),
2598 IT_QUOTA_DQACQ = BIT(11),
2599 IT_QUOTA_CONN = BIT(12),
2600 IT_SETXATTR = BIT(13),
2601};
2763 2602
2764struct ldlm_intent { 2603struct ldlm_intent {
2765 __u64 opc; 2604 __u64 opc;
2766}; 2605};
2767 2606
2768void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2769
2770struct ldlm_resource_desc { 2607struct ldlm_resource_desc {
2771 enum ldlm_type lr_type; 2608 enum ldlm_type lr_type;
2772 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ 2609 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
@@ -2777,7 +2614,7 @@ struct ldlm_lock_desc {
2777 struct ldlm_resource_desc l_resource; 2614 struct ldlm_resource_desc l_resource;
2778 enum ldlm_mode l_req_mode; 2615 enum ldlm_mode l_req_mode;
2779 enum ldlm_mode l_granted_mode; 2616 enum ldlm_mode l_granted_mode;
2780 ldlm_wire_policy_data_t l_policy_data; 2617 union ldlm_wire_policy_data l_policy_data;
2781}; 2618};
2782 2619
2783#define LDLM_LOCKREQ_HANDLES 2 2620#define LDLM_LOCKREQ_HANDLES 2
@@ -2790,8 +2627,6 @@ struct ldlm_request {
2790 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES]; 2627 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2791}; 2628};
2792 2629
2793void lustre_swab_ldlm_request(struct ldlm_request *rq);
2794
2795/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. 2630/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2796 * Otherwise, 2 are available. 2631 * Otherwise, 2 are available.
2797 */ 2632 */
@@ -2813,8 +2648,6 @@ struct ldlm_reply {
2813 __u64 lock_policy_res2; 2648 __u64 lock_policy_res2;
2814}; 2649};
2815 2650
2816void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2817
2818#define ldlm_flags_to_wire(flags) ((__u32)(flags)) 2651#define ldlm_flags_to_wire(flags) ((__u32)(flags))
2819#define ldlm_flags_from_wire(flags) ((__u64)(flags)) 2652#define ldlm_flags_from_wire(flags) ((__u64)(flags))
2820 2653
@@ -2858,8 +2691,6 @@ struct mgs_target_info {
2858 char mti_params[MTI_PARAM_MAXLEN]; 2691 char mti_params[MTI_PARAM_MAXLEN];
2859}; 2692};
2860 2693
2861void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2862
2863struct mgs_nidtbl_entry { 2694struct mgs_nidtbl_entry {
2864 __u64 mne_version; /* table version of this entry */ 2695 __u64 mne_version; /* table version of this entry */
2865 __u32 mne_instance; /* target instance # */ 2696 __u32 mne_instance; /* target instance # */
@@ -2874,8 +2705,6 @@ struct mgs_nidtbl_entry {
2874 } u; 2705 } u;
2875}; 2706};
2876 2707
2877void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2878
2879struct mgs_config_body { 2708struct mgs_config_body {
2880 char mcb_name[MTI_NAME_MAXLEN]; /* logname */ 2709 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2881 __u64 mcb_offset; /* next index of config log to request */ 2710 __u64 mcb_offset; /* next index of config log to request */
@@ -2885,15 +2714,11 @@ struct mgs_config_body {
2885 __u32 mcb_units; /* # of units for bulk transfer */ 2714 __u32 mcb_units; /* # of units for bulk transfer */
2886}; 2715};
2887 2716
2888void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2889
2890struct mgs_config_res { 2717struct mgs_config_res {
2891 __u64 mcr_offset; /* index of last config log */ 2718 __u64 mcr_offset; /* index of last config log */
2892 __u64 mcr_size; /* size of the log */ 2719 __u64 mcr_size; /* size of the log */
2893}; 2720};
2894 2721
2895void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2896
2897/* Config marker flags (in config log) */ 2722/* Config marker flags (in config log) */
2898#define CM_START 0x01 2723#define CM_START 0x01
2899#define CM_END 0x02 2724#define CM_END 0x02
@@ -2913,8 +2738,6 @@ struct cfg_marker {
2913 char cm_comment[MTI_NAME_MAXLEN]; 2738 char cm_comment[MTI_NAME_MAXLEN];
2914}; 2739};
2915 2740
2916void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2917
2918/* 2741/*
2919 * Opcodes for multiple servers. 2742 * Opcodes for multiple servers.
2920 */ 2743 */
@@ -2922,7 +2745,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2922enum obd_cmd { 2745enum obd_cmd {
2923 OBD_PING = 400, 2746 OBD_PING = 400,
2924 OBD_LOG_CANCEL, 2747 OBD_LOG_CANCEL,
2925 OBD_QC_CALLBACK, 2748 OBD_QC_CALLBACK, /* not used since 2.4 */
2926 OBD_IDX_READ, 2749 OBD_IDX_READ,
2927 OBD_LAST_OPC 2750 OBD_LAST_OPC
2928}; 2751};
@@ -3155,23 +2978,32 @@ struct llog_gen_rec {
3155 struct llog_rec_tail lgr_tail; 2978 struct llog_rec_tail lgr_tail;
3156}; 2979};
3157 2980
3158/* On-disk header structure of each log object, stored in little endian order */
3159#define LLOG_CHUNK_SIZE 8192
3160#define LLOG_HEADER_SIZE (96)
3161#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3162
3163#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3164
3165/* flags for the logs */ 2981/* flags for the logs */
3166enum llog_flag { 2982enum llog_flag {
3167 LLOG_F_ZAP_WHEN_EMPTY = 0x1, 2983 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3168 LLOG_F_IS_CAT = 0x2, 2984 LLOG_F_IS_CAT = 0x2,
3169 LLOG_F_IS_PLAIN = 0x4, 2985 LLOG_F_IS_PLAIN = 0x4,
3170 LLOG_F_EXT_JOBID = BIT(3), 2986 LLOG_F_EXT_JOBID = BIT(3),
2987 LLOG_F_IS_FIXSIZE = BIT(4),
3171 2988
2989 /*
2990 * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
2991 * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
2992 * because the catlog record is usually fixed size, but its plain
2993 * log record can be variable
2994 */
3172 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, 2995 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
3173}; 2996};
3174 2997
2998/* On-disk header structure of each log object, stored in little endian order */
2999#define LLOG_MIN_CHUNK_SIZE 8192
3000#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) +
3001 * sizeof(llh_tail) - sizeof(llh_bitmap)
3002 */
3003#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
3004#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3005
3006/* flags for the logs */
3175struct llog_log_hdr { 3007struct llog_log_hdr {
3176 struct llog_rec_hdr llh_hdr; 3008 struct llog_rec_hdr llh_hdr;
3177 __s64 llh_timestamp; 3009 __s64 llh_timestamp;
@@ -3183,13 +3015,30 @@ struct llog_log_hdr {
3183 /* for a catalog the first plain slot is next to it */ 3015 /* for a catalog the first plain slot is next to it */
3184 struct obd_uuid llh_tgtuuid; 3016 struct obd_uuid llh_tgtuuid;
3185 __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23]; 3017 __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
3018 /* These fields must always be at the end of the llog_log_hdr.
3019 * Note: llh_bitmap size is variable because llog chunk size could be
3020 * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
3021 * bytes, and the real size is stored in llh_hdr.lrh_len, which means
3022 * llh_tail should only be referred by LLOG_HDR_TAIL().
3023 * But this structure is also used by client/server llog interface
3024 * (see llog_client.c), it will be kept in its original way to avoid
3025 * compatibility issue.
3026 */
3186 __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)]; 3027 __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
3187 struct llog_rec_tail llh_tail; 3028 struct llog_rec_tail llh_tail;
3188} __packed; 3029} __packed;
3189 3030
3190#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ 3031#undef LLOG_HEADER_SIZE
3191 llh->llh_bitmap_offset - \ 3032#undef LLOG_BITMAP_BYTES
3192 sizeof(llh->llh_tail)) * 8) 3033
3034#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3035 llh->llh_bitmap_offset - \
3036 sizeof(llh->llh_tail)) * 8)
3037#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
3038 (llh)->llh_bitmap_offset)
3039#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
3040 llh->llh_hdr.lrh_len - \
3041 sizeof(llh->llh_tail)))
3193 3042
3194/** log cookies are used to reference a specific log file and a record 3043/** log cookies are used to reference a specific log file and a record
3195 * therein 3044 * therein
@@ -3259,7 +3108,8 @@ struct obdo {
3259 __u32 o_parent_ver; 3108 __u32 o_parent_ver;
3260 struct lustre_handle o_handle; /* brw: lock handle to prolong locks 3109 struct lustre_handle o_handle; /* brw: lock handle to prolong locks
3261 */ 3110 */
3262 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS 3111 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS,
3112 * obsolete in 2.8, reused in OSP
3263 */ 3113 */
3264 __u32 o_uid_h; 3114 __u32 o_uid_h;
3265 __u32 o_gid_h; 3115 __u32 o_gid_h;
@@ -3333,30 +3183,11 @@ struct ost_body {
3333 3183
3334/* Key for FIEMAP to be used in get_info calls */ 3184/* Key for FIEMAP to be used in get_info calls */
3335struct ll_fiemap_info_key { 3185struct ll_fiemap_info_key {
3336 char name[8]; 3186 char lfik_name[8];
3337 struct obdo oa; 3187 struct obdo lfik_oa;
3338 struct ll_user_fiemap fiemap; 3188 struct fiemap lfik_fiemap;
3339}; 3189};
3340 3190
3341void lustre_swab_ost_body(struct ost_body *b);
3342void lustre_swab_ost_last_id(__u64 *id);
3343void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3344
3345void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3346void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3347void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3348 int stripe_count);
3349void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3350
3351/* llog_swab.c */
3352void lustre_swab_llogd_body(struct llogd_body *d);
3353void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3354void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3355void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3356
3357struct lustre_cfg;
3358void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3359
3360/* Functions for dumping PTLRPC fields */ 3191/* Functions for dumping PTLRPC fields */
3361void dump_rniobuf(struct niobuf_remote *rnb); 3192void dump_rniobuf(struct niobuf_remote *rnb);
3362void dump_ioo(struct obd_ioobj *nb); 3193void dump_ioo(struct obd_ioobj *nb);
@@ -3394,8 +3225,6 @@ struct lustre_capa {
3394 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ 3225 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3395} __packed; 3226} __packed;
3396 3227
3397void lustre_swab_lustre_capa(struct lustre_capa *c);
3398
3399/** lustre_capa::lc_opc */ 3228/** lustre_capa::lc_opc */
3400enum { 3229enum {
3401 CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */ 3230 CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
@@ -3458,8 +3287,6 @@ struct getinfo_fid2path {
3458 char gf_path[0]; 3287 char gf_path[0];
3459} __packed; 3288} __packed;
3460 3289
3461void lustre_swab_fid2path(struct getinfo_fid2path *gf);
3462
3463/** path2parent request/reply structures */ 3290/** path2parent request/reply structures */
3464struct getparent { 3291struct getparent {
3465 struct lu_fid gp_fid; /**< parent FID */ 3292 struct lu_fid gp_fid; /**< parent FID */
@@ -3486,8 +3313,6 @@ struct layout_intent {
3486 __u64 li_end; 3313 __u64 li_end;
3487}; 3314};
3488 3315
3489void lustre_swab_layout_intent(struct layout_intent *li);
3490
3491/** 3316/**
3492 * On the wire version of hsm_progress structure. 3317 * On the wire version of hsm_progress structure.
3493 * 3318 *
@@ -3506,13 +3331,6 @@ struct hsm_progress_kernel {
3506 __u64 hpk_padding2; 3331 __u64 hpk_padding2;
3507} __packed; 3332} __packed;
3508 3333
3509void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3510void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3511void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3512void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3513void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3514void lustre_swab_hsm_request(struct hsm_request *hr);
3515
3516/** layout swap request structure 3334/** layout swap request structure
3517 * fid1 and fid2 are in mdt_body 3335 * fid1 and fid2 are in mdt_body
3518 */ 3336 */
@@ -3520,8 +3338,6 @@ struct mdc_swap_layouts {
3520 __u64 msl_flags; 3338 __u64 msl_flags;
3521} __packed; 3339} __packed;
3522 3340
3523void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3524
3525struct close_data { 3341struct close_data {
3526 struct lustre_handle cd_handle; 3342 struct lustre_handle cd_handle;
3527 struct lu_fid cd_fid; 3343 struct lu_fid cd_fid;
@@ -3529,7 +3345,5 @@ struct close_data {
3529 __u64 cd_reserved[8]; 3345 __u64 cd_reserved[8];
3530}; 3346};
3531 3347
3532void lustre_swab_close_data(struct close_data *data);
3533
3534#endif 3348#endif
3535/** @} lustreidl */ 3349/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
index f3d7c94c3b50..eb08df33b2db 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
@@ -363,8 +363,8 @@ obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len)
363/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */ 363/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */
364/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */ 364/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */
365/* lustre/lustre_user.h 157-159 */ 365/* lustre/lustre_user.h 157-159 */
366#define OBD_IOC_QUOTACHECK _IOW('f', 160, int) 366/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */
367#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) 367/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */
368#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl) 368#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
369/* lustre/lustre_user.h 163-176 */ 369/* lustre/lustre_user.h 163-176 */
370#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data) 370#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc985571cba..3301ad652db1 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -63,9 +63,13 @@
63#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) 63#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
64typedef struct stat64 lstat_t; 64typedef struct stat64 lstat_t;
65#define lstat_f lstat64 65#define lstat_f lstat64
66#define fstat_f fstat64
67#define fstatat_f fstatat64
66#else 68#else
67typedef struct stat lstat_t; 69typedef struct stat lstat_t;
68#define lstat_f lstat 70#define lstat_f lstat
71#define fstat_f fstat
72#define fstatat_f fstatat
69#endif 73#endif
70 74
71#define HAVE_LOV_USER_MDS_DATA 75#define HAVE_LOV_USER_MDS_DATA
@@ -82,7 +86,6 @@ typedef struct stat lstat_t;
82#define FSFILT_IOC_SETVERSION _IOW('f', 4, long) 86#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
83#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long) 87#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
84#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long) 88#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
85#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap)
86#endif 89#endif
87 90
88/* FIEMAP flags supported by Lustre */ 91/* FIEMAP flags supported by Lustre */
@@ -235,7 +238,7 @@ struct ost_id {
235/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */ 238/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */
236/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */ 239/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */
237#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *) 240#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
238#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *) 241/* IOC_LOV_GETINFO 165 obsolete */
239#define LL_IOC_FLUSHCTX _IOW('f', 166, long) 242#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
240/* LL_IOC_RMTACL 167 obsolete */ 243/* LL_IOC_RMTACL 167 obsolete */
241#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long) 244#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
@@ -343,6 +346,9 @@ enum ll_lease_type {
343#define LOV_ALL_STRIPES 0xffff /* only valid for directories */ 346#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
344#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ 347#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
345 348
349#define XATTR_LUSTRE_PREFIX "lustre."
350#define XATTR_LUSTRE_LOV "lustre.lov"
351
346#define lov_user_ost_data lov_user_ost_data_v1 352#define lov_user_ost_data lov_user_ost_data_v1
347struct lov_user_ost_data_v1 { /* per-stripe data structure */ 353struct lov_user_ost_data_v1 { /* per-stripe data structure */
348 struct ost_id l_ost_oi; /* OST object ID */ 354 struct ost_id l_ost_oi; /* OST object ID */
@@ -451,8 +457,6 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
451 stripes * sizeof(struct lmv_user_mds_data); 457 stripes * sizeof(struct lmv_user_mds_data);
452} 458}
453 459
454void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
455
456struct ll_recreate_obj { 460struct ll_recreate_obj {
457 __u64 lrc_id; 461 __u64 lrc_id;
458 __u32 lrc_ost_idx; 462 __u32 lrc_ost_idx;
@@ -522,25 +526,20 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
522} 526}
523 527
524/* printf display format 528/* printf display format
525 * e.g. printf("file FID is "DFID"\n", PFID(fid)); 529 * * usage: printf("file FID is "DFID"\n", PFID(fid));
526 */ 530 */
527#define FID_NOBRACE_LEN 40 531#define FID_NOBRACE_LEN 40
528#define FID_LEN (FID_NOBRACE_LEN + 2) 532#define FID_LEN (FID_NOBRACE_LEN + 2)
529#define DFID_NOBRACE "%#llx:0x%x:0x%x" 533#define DFID_NOBRACE "%#llx:0x%x:0x%x"
530#define DFID "["DFID_NOBRACE"]" 534#define DFID "["DFID_NOBRACE"]"
531#define PFID(fid) \ 535#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
532 (fid)->f_seq, \
533 (fid)->f_oid, \
534 (fid)->f_ver
535 536
536/* scanf input parse format -- strip '[' first. 537/* scanf input parse format for fids in DFID_NOBRACE format
537 * e.g. sscanf(fidstr, SFID, RFID(&fid)); 538 * Need to strip '[' from DFID format first or use "["SFID"]" at caller.
539 * usage: sscanf(fidstr, SFID, RFID(&fid));
538 */ 540 */
539#define SFID "0x%llx:0x%x:0x%x" 541#define SFID "0x%llx:0x%x:0x%x"
540#define RFID(fid) \ 542#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver)
541 &((fid)->f_seq), \
542 &((fid)->f_oid), \
543 &((fid)->f_ver)
544 543
545/********* Quotas **********/ 544/********* Quotas **********/
546 545
@@ -551,23 +550,18 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
551#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ 550#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
552 551
553/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */ 552/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
554#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */ 553#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */
555#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */ 554#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */
556#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */ 555#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
557#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */ 556#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
558#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */ 557#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
559#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */ 558#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
560/* lustre-specific control commands */ 559/* lustre-specific control commands */
561#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */ 560#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */
562#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */ 561#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */
563 562
564#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */ 563#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
565 564
566struct if_quotacheck {
567 char obd_type[16];
568 struct obd_uuid obd_uuid;
569};
570
571#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629 565#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
572 566
573/* permission */ 567/* permission */
@@ -649,6 +643,7 @@ struct if_quotactl {
649#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1) 643#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
650#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2) 644#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
651#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3) 645#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
646#define SWAP_LAYOUTS_CLOSE BIT(4)
652 647
653/* Swap XATTR_NAME_HSM as well, only on the MDT so far */ 648/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
654#define SWAP_LAYOUTS_MDS_HSM (1 << 31) 649#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
@@ -999,6 +994,7 @@ struct ioc_data_version {
999 * See HSM_FLAGS below. 994 * See HSM_FLAGS below.
1000 */ 995 */
1001enum hsm_states { 996enum hsm_states {
997 HS_NONE = 0x00000000,
1002 HS_EXISTS = 0x00000001, 998 HS_EXISTS = 0x00000001,
1003 HS_DIRTY = 0x00000002, 999 HS_DIRTY = 0x00000002,
1004 HS_RELEASED = 0x00000004, 1000 HS_RELEASED = 0x00000004,
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 567c438e93cb..300e96fb032a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -74,4 +74,6 @@
74# define ext2_find_next_zero_bit find_next_zero_bit_le 74# define ext2_find_next_zero_bit find_next_zero_bit_le
75#endif 75#endif
76 76
77#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
78
77#endif /* _LUSTRE_COMPAT_H */ 79#endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index d03534432624..b7e61d082e55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -59,7 +59,7 @@ struct obd_device;
59#define OBD_LDLM_DEVICENAME "ldlm" 59#define OBD_LDLM_DEVICENAME "ldlm"
60 60
61#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) 61#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
62#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000)) 62#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
63#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024 63#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
64 64
65/** 65/**
@@ -86,10 +86,10 @@ enum ldlm_error {
86 * decisions about lack of conflicts or do any autonomous lock granting without 86 * decisions about lack of conflicts or do any autonomous lock granting without
87 * first speaking to a server. 87 * first speaking to a server.
88 */ 88 */
89typedef enum { 89enum ldlm_side {
90 LDLM_NAMESPACE_SERVER = 1 << 0, 90 LDLM_NAMESPACE_SERVER = 1 << 0,
91 LDLM_NAMESPACE_CLIENT = 1 << 1 91 LDLM_NAMESPACE_CLIENT = 1 << 1
92} ldlm_side_t; 92};
93 93
94/** 94/**
95 * The blocking callback is overloaded to perform two functions. These flags 95 * The blocking callback is overloaded to perform two functions. These flags
@@ -359,7 +359,7 @@ struct ldlm_namespace {
359 struct obd_device *ns_obd; 359 struct obd_device *ns_obd;
360 360
361 /** Flag indicating if namespace is on client instead of server */ 361 /** Flag indicating if namespace is on client instead of server */
362 ldlm_side_t ns_client; 362 enum ldlm_side ns_client;
363 363
364 /** Resource hash table for namespace. */ 364 /** Resource hash table for namespace. */
365 struct cfs_hash *ns_rs_hash; 365 struct cfs_hash *ns_rs_hash;
@@ -550,20 +550,18 @@ struct ldlm_flock {
550 __u64 owner; 550 __u64 owner;
551 __u64 blocking_owner; 551 __u64 blocking_owner;
552 struct obd_export *blocking_export; 552 struct obd_export *blocking_export;
553 /* Protected by the hash lock */
554 __u32 blocking_refs;
555 __u32 pid; 553 __u32 pid;
556}; 554};
557 555
558typedef union { 556union ldlm_policy_data {
559 struct ldlm_extent l_extent; 557 struct ldlm_extent l_extent;
560 struct ldlm_flock l_flock; 558 struct ldlm_flock l_flock;
561 struct ldlm_inodebits l_inodebits; 559 struct ldlm_inodebits l_inodebits;
562} ldlm_policy_data_t; 560};
563 561
564void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, 562void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
565 const ldlm_wire_policy_data_t *wpolicy, 563 const union ldlm_wire_policy_data *wpolicy,
566 ldlm_policy_data_t *lpolicy); 564 union ldlm_policy_data *lpolicy);
567 565
568enum lvb_type { 566enum lvb_type {
569 LVB_T_NONE = 0, 567 LVB_T_NONE = 0,
@@ -692,7 +690,7 @@ struct ldlm_lock {
692 * Representation of private data specific for a lock type. 690 * Representation of private data specific for a lock type.
693 * Examples are: extent range for extent lock or bitmask for ibits locks 691 * Examples are: extent range for extent lock or bitmask for ibits locks
694 */ 692 */
695 ldlm_policy_data_t l_policy_data; 693 union ldlm_policy_data l_policy_data;
696 694
697 /** 695 /**
698 * Lock state flags. Protected by lr_lock. 696 * Lock state flags. Protected by lr_lock.
@@ -967,8 +965,8 @@ struct ldlm_ast_work {
967 * Common ldlm_enqueue parameters 965 * Common ldlm_enqueue parameters
968 */ 966 */
969struct ldlm_enqueue_info { 967struct ldlm_enqueue_info {
970 __u32 ei_type; /** Type of the lock being enqueued. */ 968 enum ldlm_type ei_type; /** Type of the lock being enqueued. */
971 __u32 ei_mode; /** Mode of the lock being enqueued. */ 969 enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */
972 void *ei_cb_bl; /** blocking lock callback */ 970 void *ei_cb_bl; /** blocking lock callback */
973 void *ei_cb_cp; /** lock completion callback */ 971 void *ei_cb_cp; /** lock completion callback */
974 void *ei_cb_gl; /** lock glimpse callback */ 972 void *ei_cb_gl; /** lock glimpse callback */
@@ -979,7 +977,7 @@ struct ldlm_enqueue_info {
979extern struct obd_ops ldlm_obd_ops; 977extern struct obd_ops ldlm_obd_ops;
980 978
981extern char *ldlm_lockname[]; 979extern char *ldlm_lockname[];
982char *ldlm_it2str(int it); 980const char *ldlm_it2str(enum ldlm_intent_flags it);
983 981
984/** 982/**
985 * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG. 983 * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
@@ -1168,16 +1166,18 @@ do { \
1168struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); 1166struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1169void ldlm_lock_put(struct ldlm_lock *lock); 1167void ldlm_lock_put(struct ldlm_lock *lock);
1170void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc); 1168void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1171void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode); 1169void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
1172int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode); 1170int ldlm_lock_addref_try(const struct lustre_handle *lockh,
1173void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode); 1171 enum ldlm_mode mode);
1174void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode); 1172void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
1173void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
1174 enum ldlm_mode mode);
1175void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); 1175void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1176void ldlm_lock_allow_match(struct ldlm_lock *lock); 1176void ldlm_lock_allow_match(struct ldlm_lock *lock);
1177void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); 1177void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1178enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, 1178enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1179 const struct ldlm_res_id *, 1179 const struct ldlm_res_id *,
1180 enum ldlm_type type, ldlm_policy_data_t *, 1180 enum ldlm_type type, union ldlm_policy_data *,
1181 enum ldlm_mode mode, struct lustre_handle *, 1181 enum ldlm_mode mode, struct lustre_handle *,
1182 int unref); 1182 int unref);
1183enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh, 1183enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
@@ -1189,7 +1189,7 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
1189/* resource.c */ 1189/* resource.c */
1190struct ldlm_namespace * 1190struct ldlm_namespace *
1191ldlm_namespace_new(struct obd_device *obd, char *name, 1191ldlm_namespace_new(struct obd_device *obd, char *name,
1192 ldlm_side_t client, enum ldlm_appetite apt, 1192 enum ldlm_side client, enum ldlm_appetite apt,
1193 enum ldlm_ns_type ns_type); 1193 enum ldlm_ns_type ns_type);
1194int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); 1194int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
1195void ldlm_namespace_get(struct ldlm_namespace *ns); 1195void ldlm_namespace_get(struct ldlm_namespace *ns);
@@ -1208,7 +1208,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res,
1208 struct ldlm_lock *lock); 1208 struct ldlm_lock *lock);
1209void ldlm_resource_unlink_lock(struct ldlm_lock *lock); 1209void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
1210void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); 1210void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
1211void ldlm_dump_all_namespaces(ldlm_side_t client, int level); 1211void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
1212void ldlm_namespace_dump(int level, struct ldlm_namespace *); 1212void ldlm_namespace_dump(int level, struct ldlm_namespace *);
1213void ldlm_resource_dump(int level, struct ldlm_resource *); 1213void ldlm_resource_dump(int level, struct ldlm_resource *);
1214int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, 1214int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
@@ -1241,7 +1241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1241int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, 1241int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
1242 struct ldlm_enqueue_info *einfo, 1242 struct ldlm_enqueue_info *einfo,
1243 const struct ldlm_res_id *res_id, 1243 const struct ldlm_res_id *res_id,
1244 ldlm_policy_data_t const *policy, __u64 *flags, 1244 union ldlm_policy_data const *policy, __u64 *flags,
1245 void *lvb, __u32 lvb_len, enum lvb_type lvb_type, 1245 void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
1246 struct lustre_handle *lockh, int async); 1246 struct lustre_handle *lockh, int async);
1247int ldlm_prep_enqueue_req(struct obd_export *exp, 1247int ldlm_prep_enqueue_req(struct obd_export *exp,
@@ -1265,13 +1265,13 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
1265 enum ldlm_cancel_flags flags, void *opaque); 1265 enum ldlm_cancel_flags flags, void *opaque);
1266int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, 1266int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1267 const struct ldlm_res_id *res_id, 1267 const struct ldlm_res_id *res_id,
1268 ldlm_policy_data_t *policy, 1268 union ldlm_policy_data *policy,
1269 enum ldlm_mode mode, 1269 enum ldlm_mode mode,
1270 enum ldlm_cancel_flags flags, 1270 enum ldlm_cancel_flags flags,
1271 void *opaque); 1271 void *opaque);
1272int ldlm_cancel_resource_local(struct ldlm_resource *res, 1272int ldlm_cancel_resource_local(struct ldlm_resource *res,
1273 struct list_head *cancels, 1273 struct list_head *cancels,
1274 ldlm_policy_data_t *policy, 1274 union ldlm_policy_data *policy,
1275 enum ldlm_mode mode, __u64 lock_flags, 1275 enum ldlm_mode mode, __u64 lock_flags,
1276 enum ldlm_cancel_flags cancel_flags, 1276 enum ldlm_cancel_flags cancel_flags,
1277 void *opaque); 1277 void *opaque);
@@ -1333,7 +1333,7 @@ int ldlm_pools_init(void);
1333void ldlm_pools_fini(void); 1333void ldlm_pools_fini(void);
1334 1334
1335int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, 1335int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1336 int idx, ldlm_side_t client); 1336 int idx, enum ldlm_side client);
1337void ldlm_pool_fini(struct ldlm_pool *pl); 1337void ldlm_pool_fini(struct ldlm_pool *pl);
1338void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock); 1338void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1339void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock); 1339void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 316780693193..b5a1aadbcb93 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -150,6 +150,7 @@
150 150
151#include "../../include/linux/libcfs/libcfs.h" 151#include "../../include/linux/libcfs/libcfs.h"
152#include "lustre/lustre_idl.h" 152#include "lustre/lustre_idl.h"
153#include "seq_range.h"
153 154
154struct lu_env; 155struct lu_env;
155struct lu_site; 156struct lu_site;
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 932410d3e3cc..6ef1b03cb986 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -103,8 +103,6 @@ struct lu_client_fld {
103 103
104 /** Client fld debugfs entry name. */ 104 /** Client fld debugfs entry name. */
105 char lcf_name[LUSTRE_MDT_MAXNAMELEN]; 105 char lcf_name[LUSTRE_MDT_MAXNAMELEN];
106
107 int lcf_flags;
108}; 106};
109 107
110/* Client methods */ 108/* Client methods */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index cde7ed702c86..dec1e99d594d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -53,6 +53,7 @@ void ptlrpc_activate_import(struct obd_import *imp);
53void ptlrpc_deactivate_import(struct obd_import *imp); 53void ptlrpc_deactivate_import(struct obd_import *imp);
54void ptlrpc_invalidate_import(struct obd_import *imp); 54void ptlrpc_invalidate_import(struct obd_import *imp);
55void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt); 55void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
56void ptlrpc_pinger_force(struct obd_import *imp);
56 57
57/** @} ha */ 58/** @} ha */
58 59
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 5461ba33d90c..f0c931ce1a67 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -185,6 +185,11 @@ struct obd_import {
185 struct list_head *imp_replay_cursor; 185 struct list_head *imp_replay_cursor;
186 /** @} */ 186 /** @} */
187 187
188 /** List of not replied requests */
189 struct list_head imp_unreplied_list;
190 /** Known maximal replied XID */
191 __u64 imp_known_replied_xid;
192
188 /** obd device for this import */ 193 /** obd device for this import */
189 struct obd_device *imp_obd; 194 struct obd_device *imp_obd;
190 195
@@ -294,7 +299,9 @@ struct obd_import {
294 */ 299 */
295 imp_force_reconnect:1, 300 imp_force_reconnect:1,
296 /* import has tried to connect with server */ 301 /* import has tried to connect with server */
297 imp_connect_tried:1; 302 imp_connect_tried:1,
303 /* connected but not FULL yet */
304 imp_connected:1;
298 __u32 imp_connect_op; 305 __u32 imp_connect_op;
299 struct obd_connect_data imp_connect_data; 306 struct obd_connect_data imp_connect_data;
300 __u64 imp_connect_flags_orig; 307 __u64 imp_connect_flags_orig;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 6b231913ba2e..27f3148c4344 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -350,8 +350,6 @@ do { \
350 l_wait_event_exclusive_head(wq, condition, &lwi); \ 350 l_wait_event_exclusive_head(wq, condition, &lwi); \
351}) 351})
352 352
353#define LIBLUSTRE_CLIENT (0)
354
355/** @} lib */ 353/** @} lib */
356 354
357#endif /* _LUSTRE_LIB_H */ 355#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index d7f7afa8dfa7..5aa3645e64dc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -76,18 +76,7 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
76 76
77union lmv_mds_md; 77union lmv_mds_md;
78 78
79int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, 79void lmv_free_memmd(struct lmv_stripe_md *lsm);
80 const union lmv_mds_md *lmm, int stripe_count);
81
82static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count)
83{
84 return lmv_unpack_md(NULL, lsmp, NULL, stripe_count);
85}
86
87static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
88{
89 lmv_unpack_md(NULL, &lsm, NULL, 0);
90}
91 80
92static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst, 81static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
93 const struct lmv_mds_md_v1 *lmv_src) 82 const struct lmv_mds_md_v1 *lmv_src)
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 995b266932e3..35e37eb1bc2c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -214,6 +214,7 @@ struct llog_handle {
214 spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */ 214 spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
215 struct llog_logid lgh_id; /* id of this log */ 215 struct llog_logid lgh_id; /* id of this log */
216 struct llog_log_hdr *lgh_hdr; 216 struct llog_log_hdr *lgh_hdr;
217 size_t lgh_hdr_size;
217 int lgh_last_idx; 218 int lgh_last_idx;
218 int lgh_cur_idx; /* used during llog_process */ 219 int lgh_cur_idx; /* used during llog_process */
219 __u64 lgh_cur_offset; /* used during llog_process */ 220 __u64 lgh_cur_offset; /* used during llog_process */
@@ -244,6 +245,11 @@ struct llog_ctxt {
244 struct mutex loc_mutex; /* protect loc_imp */ 245 struct mutex loc_mutex; /* protect loc_imp */
245 atomic_t loc_refcount; 246 atomic_t loc_refcount;
246 long loc_flags; /* flags, see above defines */ 247 long loc_flags; /* flags, see above defines */
248 /*
249 * llog chunk size, and llog record size can not be bigger than
250 * loc_chunk_size
251 */
252 __u32 loc_chunk_size;
247}; 253};
248 254
249#define LLOG_PROC_BREAK 0x0001 255#define LLOG_PROC_BREAK 0x0001
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 8fc2d3f2dfd6..198ceb0c66f9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -156,16 +156,39 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
156 mutex_unlock(&lck->rpcl_mutex); 156 mutex_unlock(&lck->rpcl_mutex);
157} 157}
158 158
159static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req,
160 struct lookup_intent *it)
161{
162 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
163 u32 opc;
164 u16 tag;
165
166 opc = lustre_msg_get_opc(req->rq_reqmsg);
167 tag = obd_get_mod_rpc_slot(cli, opc, it);
168 lustre_msg_set_tag(req->rq_reqmsg, tag);
169}
170
171static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req,
172 struct lookup_intent *it)
173{
174 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
175 u32 opc;
176 u16 tag;
177
178 opc = lustre_msg_get_opc(req->rq_reqmsg);
179 tag = lustre_msg_get_tag(req->rq_reqmsg);
180 obd_put_mod_rpc_slot(cli, opc, it, tag);
181}
182
159/** 183/**
160 * Update the maximum possible easize and cookiesize. 184 * Update the maximum possible easize.
161 * 185 *
162 * The values are learned from ptlrpc replies sent by the MDT. The 186 * This value is learned from ptlrpc replies sent by the MDT. The
163 * default easize and cookiesize is initialized to the minimum value but 187 * default easize is initialized to the minimum value but allowed
164 * allowed to grow up to a single page in size if required to handle the 188 * to grow up to a single page in size if required to handle the
165 * common case. 189 * common case.
166 * 190 *
167 * \see client_obd::cl_default_mds_easize and 191 * \see client_obd::cl_default_mds_easize
168 * client_obd::cl_default_mds_cookiesize
169 * 192 *
170 * \param[in] exp export for MDC device 193 * \param[in] exp export for MDC device
171 * \param[in] body body of ptlrpc reply from MDT 194 * \param[in] body body of ptlrpc reply from MDT
@@ -176,7 +199,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
176{ 199{
177 if (body->mbo_valid & OBD_MD_FLMODEASIZE) { 200 if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
178 struct client_obd *cli = &exp->exp_obd->u.cli; 201 struct client_obd *cli = &exp->exp_obd->u.cli;
179 u32 def_cookiesize, def_easize; 202 u32 def_easize;
180 203
181 if (cli->cl_max_mds_easize < body->mbo_max_mdsize) 204 if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
182 cli->cl_max_mds_easize = body->mbo_max_mdsize; 205 cli->cl_max_mds_easize = body->mbo_max_mdsize;
@@ -184,13 +207,6 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
184 def_easize = min_t(__u32, body->mbo_max_mdsize, 207 def_easize = min_t(__u32, body->mbo_max_mdsize,
185 OBD_MAX_DEFAULT_EA_SIZE); 208 OBD_MAX_DEFAULT_EA_SIZE);
186 cli->cl_default_mds_easize = def_easize; 209 cli->cl_default_mds_easize = def_easize;
187
188 if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize)
189 cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize;
190
191 def_cookiesize = min_t(__u32, body->mbo_max_cookiesize,
192 OBD_MAX_DEFAULT_COOKIE_SIZE);
193 cli->cl_default_mds_cookiesize = def_cookiesize;
194 } 210 }
195} 211}
196 212
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index e9aba99ee52a..411eb0dc7f38 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -50,6 +50,7 @@
50 * @{ 50 * @{
51 */ 51 */
52 52
53#include <linux/uio.h>
53#include "../../include/linux/libcfs/libcfs.h" 54#include "../../include/linux/libcfs/libcfs.h"
54#include "../../include/linux/lnet/nidstr.h" 55#include "../../include/linux/lnet/nidstr.h"
55#include "../../include/linux/lnet/api.h" 56#include "../../include/linux/lnet/api.h"
@@ -68,13 +69,17 @@
68#define PTLRPC_MD_OPTIONS 0 69#define PTLRPC_MD_OPTIONS 0
69 70
70/** 71/**
71 * Max # of bulk operations in one request. 72 * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ...
72 * In order for the client and server to properly negotiate the maximum 73 * In order for the client and server to properly negotiate the maximum
73 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two 74 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
74 * value. The client is free to limit the actual RPC size for any bulk 75 * value. The client is free to limit the actual RPC size for any bulk
75 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. 76 * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
77 * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS.
76 */ 78 */
77#define PTLRPC_BULK_OPS_BITS 2 79#define PTLRPC_BULK_OPS_BITS 4
80#if PTLRPC_BULK_OPS_BITS > 16
81#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
82#endif
78#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) 83#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
79/** 84/**
80 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and 85 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
@@ -437,6 +442,10 @@ struct ptlrpc_reply_state {
437 unsigned long rs_committed:1;/* the transaction was committed 442 unsigned long rs_committed:1;/* the transaction was committed
438 * and the rs was dispatched 443 * and the rs was dispatched
439 */ 444 */
445 atomic_t rs_refcount; /* number of users */
446 /** Number of locks awaiting client ACK */
447 int rs_nlocks;
448
440 /** Size of the state */ 449 /** Size of the state */
441 int rs_size; 450 int rs_size;
442 /** opcode */ 451 /** opcode */
@@ -449,7 +458,6 @@ struct ptlrpc_reply_state {
449 struct ptlrpc_service_part *rs_svcpt; 458 struct ptlrpc_service_part *rs_svcpt;
450 /** Lnet metadata handle for the reply */ 459 /** Lnet metadata handle for the reply */
451 lnet_handle_md_t rs_md_h; 460 lnet_handle_md_t rs_md_h;
452 atomic_t rs_refcount;
453 461
454 /** Context for the service thread */ 462 /** Context for the service thread */
455 struct ptlrpc_svc_ctx *rs_svc_ctx; 463 struct ptlrpc_svc_ctx *rs_svc_ctx;
@@ -466,8 +474,6 @@ struct ptlrpc_reply_state {
466 */ 474 */
467 struct lustre_msg *rs_msg; /* reply message */ 475 struct lustre_msg *rs_msg; /* reply message */
468 476
469 /** Number of locks awaiting client ACK */
470 int rs_nlocks;
471 /** Handles of locks awaiting client reply ACK */ 477 /** Handles of locks awaiting client reply ACK */
472 struct lustre_handle rs_locks[RS_MAX_LOCKS]; 478 struct lustre_handle rs_locks[RS_MAX_LOCKS];
473 /** Lock modes of locks in \a rs_locks */ 479 /** Lock modes of locks in \a rs_locks */
@@ -515,717 +521,7 @@ struct lu_env;
515 521
516struct ldlm_lock; 522struct ldlm_lock;
517 523
518/** 524#include "lustre_nrs.h"
519 * \defgroup nrs Network Request Scheduler
520 * @{
521 */
522struct ptlrpc_nrs_policy;
523struct ptlrpc_nrs_resource;
524struct ptlrpc_nrs_request;
525
526/**
527 * NRS control operations.
528 *
529 * These are common for all policies.
530 */
531enum ptlrpc_nrs_ctl {
532 /**
533 * Not a valid opcode.
534 */
535 PTLRPC_NRS_CTL_INVALID,
536 /**
537 * Activate the policy.
538 */
539 PTLRPC_NRS_CTL_START,
540 /**
541 * Reserved for multiple primary policies, which may be a possibility
542 * in the future.
543 */
544 PTLRPC_NRS_CTL_STOP,
545 /**
546 * Policies can start using opcodes from this value and onwards for
547 * their own purposes; the assigned value itself is arbitrary.
548 */
549 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
550};
551
552/**
553 * ORR policy operations
554 */
555enum nrs_ctl_orr {
556 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
557 NRS_CTL_ORR_WR_QUANTUM,
558 NRS_CTL_ORR_RD_OFF_TYPE,
559 NRS_CTL_ORR_WR_OFF_TYPE,
560 NRS_CTL_ORR_RD_SUPP_REQ,
561 NRS_CTL_ORR_WR_SUPP_REQ,
562};
563
564/**
565 * NRS policy operations.
566 *
567 * These determine the behaviour of a policy, and are called in response to
568 * NRS core events.
569 */
570struct ptlrpc_nrs_pol_ops {
571 /**
572 * Called during policy registration; this operation is optional.
573 *
574 * \param[in,out] policy The policy being initialized
575 */
576 int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
577 /**
578 * Called during policy unregistration; this operation is optional.
579 *
580 * \param[in,out] policy The policy being unregistered/finalized
581 */
582 void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
583 /**
584 * Called when activating a policy via lprocfs; policies allocate and
585 * initialize their resources here; this operation is optional.
586 *
587 * \param[in,out] policy The policy being started
588 *
589 * \see nrs_policy_start_locked()
590 */
591 int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
592 /**
593 * Called when deactivating a policy via lprocfs; policies deallocate
594 * their resources here; this operation is optional
595 *
596 * \param[in,out] policy The policy being stopped
597 *
598 * \see nrs_policy_stop0()
599 */
600 void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
601 /**
602 * Used for policy-specific operations; i.e. not generic ones like
603 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
604 * to an ioctl; this operation is optional.
605 *
606 * \param[in,out] policy The policy carrying out operation \a opc
607 * \param[in] opc The command operation being carried out
608 * \param[in,out] arg An generic buffer for communication between the
609 * user and the control operation
610 *
611 * \retval -ve error
612 * \retval 0 success
613 *
614 * \see ptlrpc_nrs_policy_control()
615 */
616 int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
617 enum ptlrpc_nrs_ctl opc, void *arg);
618
619 /**
620 * Called when obtaining references to the resources of the resource
621 * hierarchy for a request that has arrived for handling at the PTLRPC
622 * service. Policies should return -ve for requests they do not wish
623 * to handle. This operation is mandatory.
624 *
625 * \param[in,out] policy The policy we're getting resources for.
626 * \param[in,out] nrq The request we are getting resources for.
627 * \param[in] parent The parent resource of the resource being
628 * requested; set to NULL if none.
629 * \param[out] resp The resource is to be returned here; the
630 * fallback policy in an NRS head should
631 * \e always return a non-NULL pointer value.
632 * \param[in] moving_req When set, signifies that this is an attempt
633 * to obtain resources for a request being moved
634 * to the high-priority NRS head by
635 * ldlm_lock_reorder_req().
636 * This implies two things:
637 * 1. We are under obd_export::exp_rpc_lock and
638 * so should not sleep.
639 * 2. We should not perform non-idempotent or can
640 * skip performing idempotent operations that
641 * were carried out when resources were first
642 * taken for the request when it was initialized
643 * in ptlrpc_nrs_req_initialize().
644 *
645 * \retval 0, +ve The level of the returned resource in the resource
646 * hierarchy; currently only 0 (for a non-leaf resource)
647 * and 1 (for a leaf resource) are supported by the
648 * framework.
649 * \retval -ve error
650 *
651 * \see ptlrpc_nrs_req_initialize()
652 * \see ptlrpc_nrs_hpreq_add_nolock()
653 */
654 int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
655 struct ptlrpc_nrs_request *nrq,
656 const struct ptlrpc_nrs_resource *parent,
657 struct ptlrpc_nrs_resource **resp,
658 bool moving_req);
659 /**
660 * Called when releasing references taken for resources in the resource
661 * hierarchy for the request; this operation is optional.
662 *
663 * \param[in,out] policy The policy the resource belongs to
664 * \param[in] res The resource to be freed
665 *
666 * \see ptlrpc_nrs_req_finalize()
667 * \see ptlrpc_nrs_hpreq_add_nolock()
668 */
669 void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
670 const struct ptlrpc_nrs_resource *res);
671
672 /**
673 * Obtains a request for handling from the policy, and optionally
674 * removes the request from the policy; this operation is mandatory.
675 *
676 * \param[in,out] policy The policy to poll
677 * \param[in] peek When set, signifies that we just want to
678 * examine the request, and not handle it, so the
679 * request is not removed from the policy.
680 * \param[in] force When set, it will force a policy to return a
681 * request if it has one queued.
682 *
683 * \retval NULL No request available for handling
684 * \retval valid-pointer The request polled for handling
685 *
686 * \see ptlrpc_nrs_req_get_nolock()
687 */
688 struct ptlrpc_nrs_request *
689 (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
690 bool force);
691 /**
692 * Called when attempting to add a request to a policy for later
693 * handling; this operation is mandatory.
694 *
695 * \param[in,out] policy The policy on which to enqueue \a nrq
696 * \param[in,out] nrq The request to enqueue
697 *
698 * \retval 0 success
699 * \retval != 0 error
700 *
701 * \see ptlrpc_nrs_req_add_nolock()
702 */
703 int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
704 struct ptlrpc_nrs_request *nrq);
705 /**
706 * Removes a request from the policy's set of pending requests. Normally
707 * called after a request has been polled successfully from the policy
708 * for handling; this operation is mandatory.
709 *
710 * \param[in,out] policy The policy the request \a nrq belongs to
711 * \param[in,out] nrq The request to dequeue
712 */
713 void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
714 struct ptlrpc_nrs_request *nrq);
715 /**
716 * Called after the request being carried out. Could be used for
717 * job/resource control; this operation is optional.
718 *
719 * \param[in,out] policy The policy which is stopping to handle request
720 * \a nrq
721 * \param[in,out] nrq The request
722 *
723 * \pre assert_spin_locked(&svcpt->scp_req_lock)
724 *
725 * \see ptlrpc_nrs_req_stop_nolock()
726 */
727 void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
728 struct ptlrpc_nrs_request *nrq);
729 /**
730 * Registers the policy's lprocfs interface with a PTLRPC service.
731 *
732 * \param[in] svc The service
733 *
734 * \retval 0 success
735 * \retval != 0 error
736 */
737 int (*op_lprocfs_init)(struct ptlrpc_service *svc);
738 /**
739 * Unegisters the policy's lprocfs interface with a PTLRPC service.
740 *
741 * In cases of failed policy registration in
742 * \e ptlrpc_nrs_policy_register(), this function may be called for a
743 * service which has not registered the policy successfully, so
744 * implementations of this method should make sure their operations are
745 * safe in such cases.
746 *
747 * \param[in] svc The service
748 */
749 void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
750};
751
752/**
753 * Policy flags
754 */
755enum nrs_policy_flags {
756 /**
757 * Fallback policy, use this flag only on a single supported policy per
758 * service. The flag cannot be used on policies that use
759 * \e PTLRPC_NRS_FL_REG_EXTERN
760 */
761 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
762 /**
763 * Start policy immediately after registering.
764 */
765 PTLRPC_NRS_FL_REG_START = (1 << 1),
766 /**
767 * This is a policy registering from a module different to the one NRS
768 * core ships in (currently ptlrpc).
769 */
770 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
771};
772
773/**
774 * NRS queue type.
775 *
776 * Denotes whether an NRS instance is for handling normal or high-priority
777 * RPCs, or whether an operation pertains to one or both of the NRS instances
778 * in a service.
779 */
780enum ptlrpc_nrs_queue_type {
781 PTLRPC_NRS_QUEUE_REG = (1 << 0),
782 PTLRPC_NRS_QUEUE_HP = (1 << 1),
783 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
784};
785
786/**
787 * NRS head
788 *
789 * A PTLRPC service has at least one NRS head instance for handling normal
790 * priority RPCs, and may optionally have a second NRS head instance for
791 * handling high-priority RPCs. Each NRS head maintains a list of available
792 * policies, of which one and only one policy is acting as the fallback policy,
793 * and optionally a different policy may be acting as the primary policy. For
794 * all RPCs handled by this NRS head instance, NRS core will first attempt to
795 * enqueue the RPC using the primary policy (if any). The fallback policy is
796 * used in the following cases:
797 * - when there was no primary policy in the
798 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
799 * was initialized.
800 * - when the primary policy that was at the
801 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
802 * RPC was initialized, denoted it did not wish, or for some other reason was
803 * not able to handle the request, by returning a non-valid NRS resource
804 * reference.
805 * - when the primary policy that was at the
806 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
807 * RPC was initialized, fails later during the request enqueueing stage.
808 *
809 * \see nrs_resource_get_safe()
810 * \see nrs_request_enqueue()
811 */
812struct ptlrpc_nrs {
813 spinlock_t nrs_lock;
814 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
815 /**
816 * List of registered policies
817 */
818 struct list_head nrs_policy_list;
819 /**
820 * List of policies with queued requests. Policies that have any
821 * outstanding requests are queued here, and this list is queried
822 * in a round-robin manner from NRS core when obtaining a request
823 * for handling. This ensures that requests from policies that at some
824 * point transition away from the
825 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
826 */
827 struct list_head nrs_policy_queued;
828 /**
829 * Service partition for this NRS head
830 */
831 struct ptlrpc_service_part *nrs_svcpt;
832 /**
833 * Primary policy, which is the preferred policy for handling RPCs
834 */
835 struct ptlrpc_nrs_policy *nrs_policy_primary;
836 /**
837 * Fallback policy, which is the backup policy for handling RPCs
838 */
839 struct ptlrpc_nrs_policy *nrs_policy_fallback;
840 /**
841 * This NRS head handles either HP or regular requests
842 */
843 enum ptlrpc_nrs_queue_type nrs_queue_type;
844 /**
845 * # queued requests from all policies in this NRS head
846 */
847 unsigned long nrs_req_queued;
848 /**
849 * # scheduled requests from all policies in this NRS head
850 */
851 unsigned long nrs_req_started;
852 /**
853 * # policies on this NRS
854 */
855 unsigned nrs_num_pols;
856 /**
857 * This NRS head is in progress of starting a policy
858 */
859 unsigned nrs_policy_starting:1;
860 /**
861 * In progress of shutting down the whole NRS head; used during
862 * unregistration
863 */
864 unsigned nrs_stopping:1;
865};
866
867#define NRS_POL_NAME_MAX 16
868
869struct ptlrpc_nrs_pol_desc;
870
871/**
872 * Service compatibility predicate; this determines whether a policy is adequate
873 * for handling RPCs of a particular PTLRPC service.
874 *
875 * XXX:This should give the same result during policy registration and
876 * unregistration, and for all partitions of a service; so the result should not
877 * depend on temporal service or other properties, that may influence the
878 * result.
879 */
880typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
881 const struct ptlrpc_nrs_pol_desc *desc);
882
883struct ptlrpc_nrs_pol_conf {
884 /**
885 * Human-readable policy name
886 */
887 char nc_name[NRS_POL_NAME_MAX];
888 /**
889 * NRS operations for this policy
890 */
891 const struct ptlrpc_nrs_pol_ops *nc_ops;
892 /**
893 * Service compatibility predicate
894 */
895 nrs_pol_desc_compat_t nc_compat;
896 /**
897 * Set for policies that support a single ptlrpc service, i.e. ones that
898 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
899 * depicts the name of the single service that such policies are
900 * compatible with.
901 */
902 const char *nc_compat_svc_name;
903 /**
904 * Owner module for this policy descriptor; policies registering from a
905 * different module to the one the NRS framework is held within
906 * (currently ptlrpc), should set this field to THIS_MODULE.
907 */
908 struct module *nc_owner;
909 /**
910 * Policy registration flags; a bitmask of \e nrs_policy_flags
911 */
912 unsigned nc_flags;
913};
914
915/**
916 * NRS policy registering descriptor
917 *
918 * Is used to hold a description of a policy that can be passed to NRS core in
919 * order to register the policy with NRS heads in different PTLRPC services.
920 */
921struct ptlrpc_nrs_pol_desc {
922 /**
923 * Human-readable policy name
924 */
925 char pd_name[NRS_POL_NAME_MAX];
926 /**
927 * Link into nrs_core::nrs_policies
928 */
929 struct list_head pd_list;
930 /**
931 * NRS operations for this policy
932 */
933 const struct ptlrpc_nrs_pol_ops *pd_ops;
934 /**
935 * Service compatibility predicate
936 */
937 nrs_pol_desc_compat_t pd_compat;
938 /**
939 * Set for policies that are compatible with only one PTLRPC service.
940 *
941 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
942 */
943 const char *pd_compat_svc_name;
944 /**
945 * Owner module for this policy descriptor.
946 *
947 * We need to hold a reference to the module whenever we might make use
948 * of any of the module's contents, i.e.
949 * - If one or more instances of the policy are at a state where they
950 * might be handling a request, i.e.
951 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
952 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
953 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
954 * is taken on the module when
955 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
956 * becomes 0, so that we hold only one reference to the module maximum
957 * at any time.
958 *
959 * We do not need to hold a reference to the module, even though we
960 * might use code and data from the module, in the following cases:
961 * - During external policy registration, because this should happen in
962 * the module's init() function, in which case the module is safe from
963 * removal because a reference is being held on the module by the
964 * kernel, and iirc kmod (and I guess module-init-tools also) will
965 * serialize any racing processes properly anyway.
966 * - During external policy unregistration, because this should happen
967 * in a module's exit() function, and any attempts to start a policy
968 * instance would need to take a reference on the module, and this is
969 * not possible once we have reached the point where the exit()
970 * handler is called.
971 * - During service registration and unregistration, as service setup
972 * and cleanup, and policy registration, unregistration and policy
973 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
974 * as long as users adhere to the convention of registering policies
975 * in init() and unregistering them in module exit() functions, there
976 * should not be a race between these operations.
977 * - During any policy-specific lprocfs operations, because a reference
978 * is held by the kernel on a proc entry that has been entered by a
979 * syscall, so as long as proc entries are removed during unregistration time,
980 * then unregistration and lprocfs operations will be properly
981 * serialized.
982 */
983 struct module *pd_owner;
984 /**
985 * Bitmask of \e nrs_policy_flags
986 */
987 unsigned pd_flags;
988 /**
989 * # of references on this descriptor
990 */
991 atomic_t pd_refs;
992};
993
994/**
995 * NRS policy state
996 *
997 * Policies transition from one state to the other during their lifetime
998 */
999enum ptlrpc_nrs_pol_state {
1000 /**
1001 * Not a valid policy state.
1002 */
1003 NRS_POL_STATE_INVALID,
1004 /**
1005 * Policies are at this state either at the start of their life, or
1006 * transition here when the user selects a different policy to act
1007 * as the primary one.
1008 */
1009 NRS_POL_STATE_STOPPED,
1010 /**
1011 * Policy is progress of stopping
1012 */
1013 NRS_POL_STATE_STOPPING,
1014 /**
1015 * Policy is in progress of starting
1016 */
1017 NRS_POL_STATE_STARTING,
1018 /**
1019 * A policy is in this state in two cases:
1020 * - it is the fallback policy, which is always in this state.
1021 * - it has been activated by the user; i.e. it is the primary policy,
1022 */
1023 NRS_POL_STATE_STARTED,
1024};
1025
1026/**
1027 * NRS policy information
1028 *
1029 * Used for obtaining information for the status of a policy via lprocfs
1030 */
1031struct ptlrpc_nrs_pol_info {
1032 /**
1033 * Policy name
1034 */
1035 char pi_name[NRS_POL_NAME_MAX];
1036 /**
1037 * Current policy state
1038 */
1039 enum ptlrpc_nrs_pol_state pi_state;
1040 /**
1041 * # RPCs enqueued for later dispatching by the policy
1042 */
1043 long pi_req_queued;
1044 /**
1045 * # RPCs started for dispatch by the policy
1046 */
1047 long pi_req_started;
1048 /**
1049 * Is this a fallback policy?
1050 */
1051 unsigned pi_fallback:1;
1052};
1053
1054/**
1055 * NRS policy
1056 *
1057 * There is one instance of this for each policy in each NRS head of each
1058 * PTLRPC service partition.
1059 */
1060struct ptlrpc_nrs_policy {
1061 /**
1062 * Linkage into the NRS head's list of policies,
1063 * ptlrpc_nrs:nrs_policy_list
1064 */
1065 struct list_head pol_list;
1066 /**
1067 * Linkage into the NRS head's list of policies with enqueued
1068 * requests ptlrpc_nrs:nrs_policy_queued
1069 */
1070 struct list_head pol_list_queued;
1071 /**
1072 * Current state of this policy
1073 */
1074 enum ptlrpc_nrs_pol_state pol_state;
1075 /**
1076 * Bitmask of nrs_policy_flags
1077 */
1078 unsigned pol_flags;
1079 /**
1080 * # RPCs enqueued for later dispatching by the policy
1081 */
1082 long pol_req_queued;
1083 /**
1084 * # RPCs started for dispatch by the policy
1085 */
1086 long pol_req_started;
1087 /**
1088 * Usage Reference count taken on the policy instance
1089 */
1090 long pol_ref;
1091 /**
1092 * The NRS head this policy has been created at
1093 */
1094 struct ptlrpc_nrs *pol_nrs;
1095 /**
1096 * Private policy data; varies by policy type
1097 */
1098 void *pol_private;
1099 /**
1100 * Policy descriptor for this policy instance.
1101 */
1102 struct ptlrpc_nrs_pol_desc *pol_desc;
1103};
1104
1105/**
1106 * NRS resource
1107 *
1108 * Resources are embedded into two types of NRS entities:
1109 * - Inside NRS policies, in the policy's private data in
1110 * ptlrpc_nrs_policy::pol_private
1111 * - In objects that act as prime-level scheduling entities in different NRS
1112 * policies; e.g. on a policy that performs round robin or similar order
1113 * scheduling across client NIDs, there would be one NRS resource per unique
1114 * client NID. On a policy which performs round robin scheduling across
1115 * backend filesystem objects, there would be one resource associated with
1116 * each of the backend filesystem objects partaking in the scheduling
1117 * performed by the policy.
1118 *
1119 * NRS resources share a parent-child relationship, in which resources embedded
1120 * in policy instances are the parent entities, with all scheduling entities
1121 * a policy schedules across being the children, thus forming a simple resource
1122 * hierarchy. This hierarchy may be extended with one or more levels in the
1123 * future if the ability to have more than one primary policy is added.
1124 *
1125 * Upon request initialization, references to the then active NRS policies are
1126 * taken and used to later handle the dispatching of the request with one of
1127 * these policies.
1128 *
1129 * \see nrs_resource_get_safe()
1130 * \see ptlrpc_nrs_req_add()
1131 */
1132struct ptlrpc_nrs_resource {
1133 /**
1134 * This NRS resource's parent; is NULL for resources embedded in NRS
1135 * policy instances; i.e. those are top-level ones.
1136 */
1137 struct ptlrpc_nrs_resource *res_parent;
1138 /**
1139 * The policy associated with this resource.
1140 */
1141 struct ptlrpc_nrs_policy *res_policy;
1142};
1143
1144enum {
1145 NRS_RES_FALLBACK,
1146 NRS_RES_PRIMARY,
1147 NRS_RES_MAX
1148};
1149
1150/* \name fifo
1151 *
1152 * FIFO policy
1153 *
1154 * This policy is a logical wrapper around previous, non-NRS functionality.
1155 * It dispatches RPCs in the same order as they arrive from the network. This
1156 * policy is currently used as the fallback policy, and the only enabled policy
1157 * on all NRS heads of all PTLRPC service partitions.
1158 * @{
1159 */
1160
1161/**
1162 * Private data structure for the FIFO policy
1163 */
1164struct nrs_fifo_head {
1165 /**
1166 * Resource object for policy instance.
1167 */
1168 struct ptlrpc_nrs_resource fh_res;
1169 /**
1170 * List of queued requests.
1171 */
1172 struct list_head fh_list;
1173 /**
1174 * For debugging purposes.
1175 */
1176 __u64 fh_sequence;
1177};
1178
1179struct nrs_fifo_req {
1180 struct list_head fr_list;
1181 __u64 fr_sequence;
1182};
1183
1184/** @} fifo */
1185
1186/**
1187 * NRS request
1188 *
1189 * Instances of this object exist embedded within ptlrpc_request; the main
1190 * purpose of this object is to hold references to the request's resources
1191 * for the lifetime of the request, and to hold properties that policies use
1192 * use for determining the request's scheduling priority.
1193 */
1194struct ptlrpc_nrs_request {
1195 /**
1196 * The request's resource hierarchy.
1197 */
1198 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1199 /**
1200 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1201 * policy that was used to enqueue the request.
1202 *
1203 * \see nrs_request_enqueue()
1204 */
1205 unsigned nr_res_idx;
1206 unsigned nr_initialized:1;
1207 unsigned nr_enqueued:1;
1208 unsigned nr_started:1;
1209 unsigned nr_finalized:1;
1210
1211 /**
1212 * Policy-specific fields, used for determining a request's scheduling
1213 * priority, and other supporting functionality.
1214 */
1215 union {
1216 /**
1217 * Fields for the FIFO policy
1218 */
1219 struct nrs_fifo_req fifo;
1220 } nr_u;
1221 /**
1222 * Externally-registering policies may want to use this to allocate
1223 * their own request properties.
1224 */
1225 void *ext;
1226};
1227
1228/** @} nrs */
1229 525
1230/** 526/**
1231 * Basic request prioritization operations structure. 527 * Basic request prioritization operations structure.
@@ -1304,6 +600,8 @@ struct ptlrpc_cli_req {
1304 union ptlrpc_async_args cr_async_args; 600 union ptlrpc_async_args cr_async_args;
1305 /** Opaq data for replay and commit callbacks. */ 601 /** Opaq data for replay and commit callbacks. */
1306 void *cr_cb_data; 602 void *cr_cb_data;
603 /** Link to the imp->imp_unreplied_list */
604 struct list_head cr_unreplied_list;
1307 /** 605 /**
1308 * Commit callback, called when request is committed and about to be 606 * Commit callback, called when request is committed and about to be
1309 * freed. 607 * freed.
@@ -1343,6 +641,7 @@ struct ptlrpc_cli_req {
1343#define rq_interpret_reply rq_cli.cr_reply_interp 641#define rq_interpret_reply rq_cli.cr_reply_interp
1344#define rq_async_args rq_cli.cr_async_args 642#define rq_async_args rq_cli.cr_async_args
1345#define rq_cb_data rq_cli.cr_cb_data 643#define rq_cb_data rq_cli.cr_cb_data
644#define rq_unreplied_list rq_cli.cr_unreplied_list
1346#define rq_commit_cb rq_cli.cr_commit_cb 645#define rq_commit_cb rq_cli.cr_commit_cb
1347#define rq_replay_cb rq_cli.cr_replay_cb 646#define rq_replay_cb rq_cli.cr_replay_cb
1348 647
@@ -1505,6 +804,8 @@ struct ptlrpc_request {
1505 __u64 rq_transno; 804 __u64 rq_transno;
1506 /** xid */ 805 /** xid */
1507 __u64 rq_xid; 806 __u64 rq_xid;
807 /** bulk match bits */
808 u64 rq_mbits;
1508 /** 809 /**
1509 * List item to for replay list. Not yet committed requests get linked 810 * List item to for replay list. Not yet committed requests get linked
1510 * there. 811 * there.
@@ -1793,10 +1094,93 @@ struct ptlrpc_bulk_page {
1793 struct page *bp_page; 1094 struct page *bp_page;
1794}; 1095};
1795 1096
1796#define BULK_GET_SOURCE 0 1097enum ptlrpc_bulk_op_type {
1797#define BULK_PUT_SINK 1 1098 PTLRPC_BULK_OP_ACTIVE = 0x00000001,
1798#define BULK_GET_SINK 2 1099 PTLRPC_BULK_OP_PASSIVE = 0x00000002,
1799#define BULK_PUT_SOURCE 3 1100 PTLRPC_BULK_OP_PUT = 0x00000004,
1101 PTLRPC_BULK_OP_GET = 0x00000008,
1102 PTLRPC_BULK_BUF_KVEC = 0x00000010,
1103 PTLRPC_BULK_BUF_KIOV = 0x00000020,
1104 PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
1105 PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
1106 PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
1107 PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT,
1108};
1109
1110static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type)
1111{
1112 return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET;
1113}
1114
1115static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type)
1116{
1117 return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE;
1118}
1119
1120static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type)
1121{
1122 return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK;
1123}
1124
1125static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type)
1126{
1127 return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK;
1128}
1129
1130static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type)
1131{
1132 return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
1133}
1134
1135static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
1136{
1137 return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
1138 == PTLRPC_BULK_BUF_KVEC;
1139}
1140
1141static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
1142{
1143 return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
1144 == PTLRPC_BULK_BUF_KIOV;
1145}
1146
1147static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
1148{
1149 return ((type & PTLRPC_BULK_OP_ACTIVE) |
1150 (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE;
1151}
1152
1153static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type)
1154{
1155 return ((type & PTLRPC_BULK_OP_ACTIVE) |
1156 (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE;
1157}
1158
1159struct ptlrpc_bulk_frag_ops {
1160 /**
1161 * Add a page \a page to the bulk descriptor \a desc
1162 * Data to transfer in the page starts at offset \a pageoffset and
1163 * amount of data to transfer from the page is \a len
1164 */
1165 void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc,
1166 struct page *page, int pageoffset, int len);
1167
1168 /*
1169 * Add a \a fragment to the bulk descriptor \a desc.
1170 * Data to transfer in the fragment is pointed to by \a frag
1171 * The size of the fragment is \a len
1172 */
1173 int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len);
1174
1175 /**
1176 * Uninitialize and free bulk descriptor \a desc.
1177 * Works on bulk descriptors both from server and client side.
1178 */
1179 void (*release_frags)(struct ptlrpc_bulk_desc *desc);
1180};
1181
1182extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
1183extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
1800 1184
1801/** 1185/**
1802 * Definition of bulk descriptor. 1186 * Definition of bulk descriptor.
@@ -1811,14 +1195,14 @@ struct ptlrpc_bulk_page {
1811struct ptlrpc_bulk_desc { 1195struct ptlrpc_bulk_desc {
1812 /** completed with failure */ 1196 /** completed with failure */
1813 unsigned long bd_failure:1; 1197 unsigned long bd_failure:1;
1814 /** {put,get}{source,sink} */
1815 unsigned long bd_type:2;
1816 /** client side */ 1198 /** client side */
1817 unsigned long bd_registered:1; 1199 unsigned long bd_registered:1;
1818 /** For serialization with callback */ 1200 /** For serialization with callback */
1819 spinlock_t bd_lock; 1201 spinlock_t bd_lock;
1820 /** Import generation when request for this bulk was sent */ 1202 /** Import generation when request for this bulk was sent */
1821 int bd_import_generation; 1203 int bd_import_generation;
1204 /** {put,get}{source,sink}{kvec,kiov} */
1205 enum ptlrpc_bulk_op_type bd_type;
1822 /** LNet portal for this bulk */ 1206 /** LNet portal for this bulk */
1823 __u32 bd_portal; 1207 __u32 bd_portal;
1824 /** Server side - export this bulk created for */ 1208 /** Server side - export this bulk created for */
@@ -1827,13 +1211,14 @@ struct ptlrpc_bulk_desc {
1827 struct obd_import *bd_import; 1211 struct obd_import *bd_import;
1828 /** Back pointer to the request */ 1212 /** Back pointer to the request */
1829 struct ptlrpc_request *bd_req; 1213 struct ptlrpc_request *bd_req;
1214 struct ptlrpc_bulk_frag_ops *bd_frag_ops;
1830 wait_queue_head_t bd_waitq; /* server side only WQ */ 1215 wait_queue_head_t bd_waitq; /* server side only WQ */
1831 int bd_iov_count; /* # entries in bd_iov */ 1216 int bd_iov_count; /* # entries in bd_iov */
1832 int bd_max_iov; /* allocated size of bd_iov */ 1217 int bd_max_iov; /* allocated size of bd_iov */
1833 int bd_nob; /* # bytes covered */ 1218 int bd_nob; /* # bytes covered */
1834 int bd_nob_transferred; /* # bytes GOT/PUT */ 1219 int bd_nob_transferred; /* # bytes GOT/PUT */
1835 1220
1836 __u64 bd_last_xid; 1221 u64 bd_last_mbits;
1837 1222
1838 struct ptlrpc_cb_id bd_cbid; /* network callback info */ 1223 struct ptlrpc_cb_id bd_cbid; /* network callback info */
1839 lnet_nid_t bd_sender; /* stash event::sender */ 1224 lnet_nid_t bd_sender; /* stash event::sender */
@@ -1842,14 +1227,31 @@ struct ptlrpc_bulk_desc {
1842 /** array of associated MDs */ 1227 /** array of associated MDs */
1843 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; 1228 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
1844 1229
1845 /* 1230 union {
1846 * encrypt iov, size is either 0 or bd_iov_count. 1231 struct {
1847 */ 1232 /*
1848 lnet_kiov_t *bd_enc_iov; 1233 * encrypt iov, size is either 0 or bd_iov_count.
1849 1234 */
1850 lnet_kiov_t bd_iov[0]; 1235 struct bio_vec *bd_enc_vec;
1236 struct bio_vec *bd_vec; /* Array of bio_vecs */
1237 } bd_kiov;
1238
1239 struct {
1240 struct kvec *bd_enc_kvec;
1241 struct kvec *bd_kvec; /* Array of kvecs */
1242 } bd_kvec;
1243 } bd_u;
1851}; 1244};
1852 1245
1246#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec)
1247#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i])
1248#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec)
1249#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i])
1250#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec)
1251#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i])
1252#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec)
1253#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
1254
1853enum { 1255enum {
1854 SVC_STOPPED = 1 << 0, 1256 SVC_STOPPED = 1 << 0,
1855 SVC_STOPPING = 1 << 1, 1257 SVC_STOPPING = 1 << 1,
@@ -2464,21 +1866,17 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2464void ptlrpc_req_finished(struct ptlrpc_request *request); 1866void ptlrpc_req_finished(struct ptlrpc_request *request);
2465struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); 1867struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2466struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, 1868struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2467 unsigned npages, unsigned max_brw, 1869 unsigned int nfrags,
2468 unsigned type, unsigned portal); 1870 unsigned int max_brw,
2469void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin); 1871 unsigned int type,
2470static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk) 1872 unsigned int portal,
2471{ 1873 const struct ptlrpc_bulk_frag_ops *ops);
2472 __ptlrpc_free_bulk(bulk, 1); 1874
2473} 1875int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
2474 1876 void *frag, int len);
2475static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2476{
2477 __ptlrpc_free_bulk(bulk, 0);
2478}
2479
2480void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, 1877void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2481 struct page *page, int pageoffset, int len, int); 1878 struct page *page, int pageoffset, int len,
1879 int pin);
2482static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, 1880static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2483 struct page *page, int pageoffset, 1881 struct page *page, int pageoffset,
2484 int len) 1882 int len)
@@ -2493,6 +1891,16 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2493 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); 1891 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2494} 1892}
2495 1893
1894void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
1895
1896static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
1897{
1898 int i;
1899
1900 for (i = 0; i < desc->bd_iov_count ; i++)
1901 put_page(BD_GET_KIOV(desc, i).bv_page);
1902}
1903
2496void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, 1904void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2497 struct obd_import *imp); 1905 struct obd_import *imp);
2498__u64 ptlrpc_next_xid(void); 1906__u64 ptlrpc_next_xid(void);
@@ -2652,6 +2060,7 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2652__u32 lustre_msg_get_type(struct lustre_msg *msg); 2060__u32 lustre_msg_get_type(struct lustre_msg *msg);
2653void lustre_msg_add_version(struct lustre_msg *msg, u32 version); 2061void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
2654__u32 lustre_msg_get_opc(struct lustre_msg *msg); 2062__u32 lustre_msg_get_opc(struct lustre_msg *msg);
2063__u16 lustre_msg_get_tag(struct lustre_msg *msg);
2655__u64 lustre_msg_get_last_committed(struct lustre_msg *msg); 2064__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2656__u64 *lustre_msg_get_versions(struct lustre_msg *msg); 2065__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2657__u64 lustre_msg_get_transno(struct lustre_msg *msg); 2066__u64 lustre_msg_get_transno(struct lustre_msg *msg);
@@ -2670,6 +2079,8 @@ void lustre_msg_set_handle(struct lustre_msg *msg,
2670 struct lustre_handle *handle); 2079 struct lustre_handle *handle);
2671void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); 2080void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2672void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); 2081void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2082void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid);
2083void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
2673void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions); 2084void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2674void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); 2085void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2675void lustre_msg_set_status(struct lustre_msg *msg, __u32 status); 2086void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
@@ -2679,6 +2090,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2679void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); 2090void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2680void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid); 2091void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2681void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); 2092void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2093void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits);
2682 2094
2683static inline void 2095static inline void
2684lustre_shrink_reply(struct ptlrpc_request *req, int segment, 2096lustre_shrink_reply(struct ptlrpc_request *req, int segment,
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
new file mode 100644
index 000000000000..a5028aaa19cd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -0,0 +1,717 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see
17 * http://www.gnu.org/licenses/gpl-2.0.html
18 *
19 * GPL HEADER END
20 */
21/*
22 * Copyright (c) 2014, Intel Corporation.
23 *
24 * Copyright 2012 Xyratex Technology Limited
25 */
26/*
27 *
28 * Network Request Scheduler (NRS)
29 *
30 */
31
32#ifndef _LUSTRE_NRS_H
33#define _LUSTRE_NRS_H
34
35/**
36 * \defgroup nrs Network Request Scheduler
37 * @{
38 */
39struct ptlrpc_nrs_policy;
40struct ptlrpc_nrs_resource;
41struct ptlrpc_nrs_request;
42
43/**
44 * NRS control operations.
45 *
46 * These are common for all policies.
47 */
48enum ptlrpc_nrs_ctl {
49 /**
50 * Not a valid opcode.
51 */
52 PTLRPC_NRS_CTL_INVALID,
53 /**
54 * Activate the policy.
55 */
56 PTLRPC_NRS_CTL_START,
57 /**
58 * Reserved for multiple primary policies, which may be a possibility
59 * in the future.
60 */
61 PTLRPC_NRS_CTL_STOP,
62 /**
63 * Policies can start using opcodes from this value and onwards for
64 * their own purposes; the assigned value itself is arbitrary.
65 */
66 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
67};
68
69/**
70 * NRS policy operations.
71 *
72 * These determine the behaviour of a policy, and are called in response to
73 * NRS core events.
74 */
75struct ptlrpc_nrs_pol_ops {
76 /**
77 * Called during policy registration; this operation is optional.
78 *
79 * \param[in,out] policy The policy being initialized
80 */
81 int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
82 /**
83 * Called during policy unregistration; this operation is optional.
84 *
85 * \param[in,out] policy The policy being unregistered/finalized
86 */
87 void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
88 /**
89 * Called when activating a policy via lprocfs; policies allocate and
90 * initialize their resources here; this operation is optional.
91 *
92 * \param[in,out] policy The policy being started
93 *
94 * \see nrs_policy_start_locked()
95 */
96 int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
97 /**
98 * Called when deactivating a policy via lprocfs; policies deallocate
99 * their resources here; this operation is optional
100 *
101 * \param[in,out] policy The policy being stopped
102 *
103 * \see nrs_policy_stop0()
104 */
105 void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
106 /**
107 * Used for policy-specific operations; i.e. not generic ones like
108 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
109 * to an ioctl; this operation is optional.
110 *
111 * \param[in,out] policy The policy carrying out operation \a opc
112 * \param[in] opc The command operation being carried out
113 * \param[in,out] arg An generic buffer for communication between the
114 * user and the control operation
115 *
116 * \retval -ve error
117 * \retval 0 success
118 *
119 * \see ptlrpc_nrs_policy_control()
120 */
121 int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
122 enum ptlrpc_nrs_ctl opc, void *arg);
123
124 /**
125 * Called when obtaining references to the resources of the resource
126 * hierarchy for a request that has arrived for handling at the PTLRPC
127 * service. Policies should return -ve for requests they do not wish
128 * to handle. This operation is mandatory.
129 *
130 * \param[in,out] policy The policy we're getting resources for.
131 * \param[in,out] nrq The request we are getting resources for.
132 * \param[in] parent The parent resource of the resource being
133 * requested; set to NULL if none.
134 * \param[out] resp The resource is to be returned here; the
135 * fallback policy in an NRS head should
136 * \e always return a non-NULL pointer value.
137 * \param[in] moving_req When set, signifies that this is an attempt
138 * to obtain resources for a request being moved
139 * to the high-priority NRS head by
140 * ldlm_lock_reorder_req().
141 * This implies two things:
142 * 1. We are under obd_export::exp_rpc_lock and
143 * so should not sleep.
144 * 2. We should not perform non-idempotent or can
145 * skip performing idempotent operations that
146 * were carried out when resources were first
147 * taken for the request when it was initialized
148 * in ptlrpc_nrs_req_initialize().
149 *
150 * \retval 0, +ve The level of the returned resource in the resource
151 * hierarchy; currently only 0 (for a non-leaf resource)
152 * and 1 (for a leaf resource) are supported by the
153 * framework.
154 * \retval -ve error
155 *
156 * \see ptlrpc_nrs_req_initialize()
157 * \see ptlrpc_nrs_hpreq_add_nolock()
158 * \see ptlrpc_nrs_req_hp_move()
159 */
160 int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
161 struct ptlrpc_nrs_request *nrq,
162 const struct ptlrpc_nrs_resource *parent,
163 struct ptlrpc_nrs_resource **resp,
164 bool moving_req);
165 /**
166 * Called when releasing references taken for resources in the resource
167 * hierarchy for the request; this operation is optional.
168 *
169 * \param[in,out] policy The policy the resource belongs to
170 * \param[in] res The resource to be freed
171 *
172 * \see ptlrpc_nrs_req_finalize()
173 * \see ptlrpc_nrs_hpreq_add_nolock()
174 * \see ptlrpc_nrs_req_hp_move()
175 */
176 void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
177 const struct ptlrpc_nrs_resource *res);
178
179 /**
180 * Obtains a request for handling from the policy, and optionally
181 * removes the request from the policy; this operation is mandatory.
182 *
183 * \param[in,out] policy The policy to poll
184 * \param[in] peek When set, signifies that we just want to
185 * examine the request, and not handle it, so the
186 * request is not removed from the policy.
187 * \param[in] force When set, it will force a policy to return a
188 * request if it has one queued.
189 *
190 * \retval NULL No request available for handling
191 * \retval valid-pointer The request polled for handling
192 *
193 * \see ptlrpc_nrs_req_get_nolock()
194 */
195 struct ptlrpc_nrs_request *
196 (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
197 bool force);
198 /**
199 * Called when attempting to add a request to a policy for later
200 * handling; this operation is mandatory.
201 *
202 * \param[in,out] policy The policy on which to enqueue \a nrq
203 * \param[in,out] nrq The request to enqueue
204 *
205 * \retval 0 success
206 * \retval != 0 error
207 *
208 * \see ptlrpc_nrs_req_add_nolock()
209 */
210 int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
211 struct ptlrpc_nrs_request *nrq);
212 /**
213 * Removes a request from the policy's set of pending requests. Normally
214 * called after a request has been polled successfully from the policy
215 * for handling; this operation is mandatory.
216 *
217 * \param[in,out] policy The policy the request \a nrq belongs to
218 * \param[in,out] nrq The request to dequeue
219 *
220 * \see ptlrpc_nrs_req_del_nolock()
221 */
222 void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
223 struct ptlrpc_nrs_request *nrq);
224 /**
225 * Called after the request being carried out. Could be used for
226 * job/resource control; this operation is optional.
227 *
228 * \param[in,out] policy The policy which is stopping to handle request
229 * \a nrq
230 * \param[in,out] nrq The request
231 *
232 * \pre assert_spin_locked(&svcpt->scp_req_lock)
233 *
234 * \see ptlrpc_nrs_req_stop_nolock()
235 */
236 void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
237 struct ptlrpc_nrs_request *nrq);
238 /**
239 * Registers the policy's lprocfs interface with a PTLRPC service.
240 *
241 * \param[in] svc The service
242 *
243 * \retval 0 success
244 * \retval != 0 error
245 */
246 int (*op_lprocfs_init)(struct ptlrpc_service *svc);
247 /**
248 * Unegisters the policy's lprocfs interface with a PTLRPC service.
249 *
250 * In cases of failed policy registration in
251 * \e ptlrpc_nrs_policy_register(), this function may be called for a
252 * service which has not registered the policy successfully, so
253 * implementations of this method should make sure their operations are
254 * safe in such cases.
255 *
256 * \param[in] svc The service
257 */
258 void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
259};
260
261/**
262 * Policy flags
263 */
264enum nrs_policy_flags {
265 /**
266 * Fallback policy, use this flag only on a single supported policy per
267 * service. The flag cannot be used on policies that use
268 * \e PTLRPC_NRS_FL_REG_EXTERN
269 */
270 PTLRPC_NRS_FL_FALLBACK = BIT(0),
271 /**
272 * Start policy immediately after registering.
273 */
274 PTLRPC_NRS_FL_REG_START = BIT(1),
275 /**
276 * This is a policy registering from a module different to the one NRS
277 * core ships in (currently ptlrpc).
278 */
279 PTLRPC_NRS_FL_REG_EXTERN = BIT(2),
280};
281
282/**
283 * NRS queue type.
284 *
285 * Denotes whether an NRS instance is for handling normal or high-priority
286 * RPCs, or whether an operation pertains to one or both of the NRS instances
287 * in a service.
288 */
289enum ptlrpc_nrs_queue_type {
290 PTLRPC_NRS_QUEUE_REG = BIT(0),
291 PTLRPC_NRS_QUEUE_HP = BIT(1),
292 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
293};
294
295/**
296 * NRS head
297 *
298 * A PTLRPC service has at least one NRS head instance for handling normal
299 * priority RPCs, and may optionally have a second NRS head instance for
300 * handling high-priority RPCs. Each NRS head maintains a list of available
301 * policies, of which one and only one policy is acting as the fallback policy,
302 * and optionally a different policy may be acting as the primary policy. For
303 * all RPCs handled by this NRS head instance, NRS core will first attempt to
304 * enqueue the RPC using the primary policy (if any). The fallback policy is
305 * used in the following cases:
306 * - when there was no primary policy in the
307 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
308 * was initialized.
309 * - when the primary policy that was at the
310 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
311 * RPC was initialized, denoted it did not wish, or for some other reason was
312 * not able to handle the request, by returning a non-valid NRS resource
313 * reference.
314 * - when the primary policy that was at the
315 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
316 * RPC was initialized, fails later during the request enqueueing stage.
317 *
318 * \see nrs_resource_get_safe()
319 * \see nrs_request_enqueue()
320 */
321struct ptlrpc_nrs {
322 spinlock_t nrs_lock;
323 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
324 /**
325 * List of registered policies
326 */
327 struct list_head nrs_policy_list;
328 /**
329 * List of policies with queued requests. Policies that have any
330 * outstanding requests are queued here, and this list is queried
331 * in a round-robin manner from NRS core when obtaining a request
332 * for handling. This ensures that requests from policies that at some
333 * point transition away from the
334 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
335 */
336 struct list_head nrs_policy_queued;
337 /**
338 * Service partition for this NRS head
339 */
340 struct ptlrpc_service_part *nrs_svcpt;
341 /**
342 * Primary policy, which is the preferred policy for handling RPCs
343 */
344 struct ptlrpc_nrs_policy *nrs_policy_primary;
345 /**
346 * Fallback policy, which is the backup policy for handling RPCs
347 */
348 struct ptlrpc_nrs_policy *nrs_policy_fallback;
349 /**
350 * This NRS head handles either HP or regular requests
351 */
352 enum ptlrpc_nrs_queue_type nrs_queue_type;
353 /**
354 * # queued requests from all policies in this NRS head
355 */
356 unsigned long nrs_req_queued;
357 /**
358 * # scheduled requests from all policies in this NRS head
359 */
360 unsigned long nrs_req_started;
361 /**
362 * # policies on this NRS
363 */
364 unsigned int nrs_num_pols;
365 /**
366 * This NRS head is in progress of starting a policy
367 */
368 unsigned int nrs_policy_starting:1;
369 /**
370 * In progress of shutting down the whole NRS head; used during
371 * unregistration
372 */
373 unsigned int nrs_stopping:1;
374 /**
375 * NRS policy is throttling request
376 */
377 unsigned int nrs_throttling:1;
378};
379
380#define NRS_POL_NAME_MAX 16
381#define NRS_POL_ARG_MAX 16
382
383struct ptlrpc_nrs_pol_desc;
384
385/**
386 * Service compatibility predicate; this determines whether a policy is adequate
387 * for handling RPCs of a particular PTLRPC service.
388 *
389 * XXX:This should give the same result during policy registration and
390 * unregistration, and for all partitions of a service; so the result should not
391 * depend on temporal service or other properties, that may influence the
392 * result.
393 */
394typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc,
395 const struct ptlrpc_nrs_pol_desc *desc);
396
397struct ptlrpc_nrs_pol_conf {
398 /**
399 * Human-readable policy name
400 */
401 char nc_name[NRS_POL_NAME_MAX];
402 /**
403 * NRS operations for this policy
404 */
405 const struct ptlrpc_nrs_pol_ops *nc_ops;
406 /**
407 * Service compatibility predicate
408 */
409 nrs_pol_desc_compat_t nc_compat;
410 /**
411 * Set for policies that support a single ptlrpc service, i.e. ones that
412 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
413 * depicts the name of the single service that such policies are
414 * compatible with.
415 */
416 const char *nc_compat_svc_name;
417 /**
418 * Owner module for this policy descriptor; policies registering from a
419 * different module to the one the NRS framework is held within
420 * (currently ptlrpc), should set this field to THIS_MODULE.
421 */
422 struct module *nc_owner;
423 /**
424 * Policy registration flags; a bitmask of \e nrs_policy_flags
425 */
426 unsigned int nc_flags;
427};
428
429/**
430 * NRS policy registering descriptor
431 *
432 * Is used to hold a description of a policy that can be passed to NRS core in
433 * order to register the policy with NRS heads in different PTLRPC services.
434 */
435struct ptlrpc_nrs_pol_desc {
436 /**
437 * Human-readable policy name
438 */
439 char pd_name[NRS_POL_NAME_MAX];
440 /**
441 * Link into nrs_core::nrs_policies
442 */
443 struct list_head pd_list;
444 /**
445 * NRS operations for this policy
446 */
447 const struct ptlrpc_nrs_pol_ops *pd_ops;
448 /**
449 * Service compatibility predicate
450 */
451 nrs_pol_desc_compat_t pd_compat;
452 /**
453 * Set for policies that are compatible with only one PTLRPC service.
454 *
455 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
456 */
457 const char *pd_compat_svc_name;
458 /**
459 * Owner module for this policy descriptor.
460 *
461 * We need to hold a reference to the module whenever we might make use
462 * of any of the module's contents, i.e.
463 * - If one or more instances of the policy are at a state where they
464 * might be handling a request, i.e.
465 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
466 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
467 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
468 * is taken on the module when
469 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
470 * becomes 0, so that we hold only one reference to the module maximum
471 * at any time.
472 *
473 * We do not need to hold a reference to the module, even though we
474 * might use code and data from the module, in the following cases:
475 * - During external policy registration, because this should happen in
476 * the module's init() function, in which case the module is safe from
477 * removal because a reference is being held on the module by the
478 * kernel, and iirc kmod (and I guess module-init-tools also) will
479 * serialize any racing processes properly anyway.
480 * - During external policy unregistration, because this should happen
481 * in a module's exit() function, and any attempts to start a policy
482 * instance would need to take a reference on the module, and this is
483 * not possible once we have reached the point where the exit()
484 * handler is called.
485 * - During service registration and unregistration, as service setup
486 * and cleanup, and policy registration, unregistration and policy
487 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
488 * as long as users adhere to the convention of registering policies
489 * in init() and unregistering them in module exit() functions, there
490 * should not be a race between these operations.
491 * - During any policy-specific lprocfs operations, because a reference
492 * is held by the kernel on a proc entry that has been entered by a
493 * syscall, so as long as proc entries are removed during
494 * unregistration time, then unregistration and lprocfs operations
495 * will be properly serialized.
496 */
497 struct module *pd_owner;
498 /**
499 * Bitmask of \e nrs_policy_flags
500 */
501 unsigned int pd_flags;
502 /**
503 * # of references on this descriptor
504 */
505 atomic_t pd_refs;
506};
507
508/**
509 * NRS policy state
510 *
511 * Policies transition from one state to the other during their lifetime
512 */
513enum ptlrpc_nrs_pol_state {
514 /**
515 * Not a valid policy state.
516 */
517 NRS_POL_STATE_INVALID,
518 /**
519 * Policies are at this state either at the start of their life, or
520 * transition here when the user selects a different policy to act
521 * as the primary one.
522 */
523 NRS_POL_STATE_STOPPED,
524 /**
525 * Policy is progress of stopping
526 */
527 NRS_POL_STATE_STOPPING,
528 /**
529 * Policy is in progress of starting
530 */
531 NRS_POL_STATE_STARTING,
532 /**
533 * A policy is in this state in two cases:
534 * - it is the fallback policy, which is always in this state.
535 * - it has been activated by the user; i.e. it is the primary policy,
536 */
537 NRS_POL_STATE_STARTED,
538};
539
540/**
541 * NRS policy information
542 *
543 * Used for obtaining information for the status of a policy via lprocfs
544 */
545struct ptlrpc_nrs_pol_info {
546 /**
547 * Policy name
548 */
549 char pi_name[NRS_POL_NAME_MAX];
550 /**
551 * Policy argument
552 */
553 char pi_arg[NRS_POL_ARG_MAX];
554 /**
555 * Current policy state
556 */
557 enum ptlrpc_nrs_pol_state pi_state;
558 /**
559 * # RPCs enqueued for later dispatching by the policy
560 */
561 long pi_req_queued;
562 /**
563 * # RPCs started for dispatch by the policy
564 */
565 long pi_req_started;
566 /**
567 * Is this a fallback policy?
568 */
569 unsigned pi_fallback:1;
570};
571
572/**
573 * NRS policy
574 *
575 * There is one instance of this for each policy in each NRS head of each
576 * PTLRPC service partition.
577 */
578struct ptlrpc_nrs_policy {
579 /**
580 * Linkage into the NRS head's list of policies,
581 * ptlrpc_nrs:nrs_policy_list
582 */
583 struct list_head pol_list;
584 /**
585 * Linkage into the NRS head's list of policies with enqueued
586 * requests ptlrpc_nrs:nrs_policy_queued
587 */
588 struct list_head pol_list_queued;
589 /**
590 * Current state of this policy
591 */
592 enum ptlrpc_nrs_pol_state pol_state;
593 /**
594 * Bitmask of nrs_policy_flags
595 */
596 unsigned int pol_flags;
597 /**
598 * # RPCs enqueued for later dispatching by the policy
599 */
600 long pol_req_queued;
601 /**
602 * # RPCs started for dispatch by the policy
603 */
604 long pol_req_started;
605 /**
606 * Usage Reference count taken on the policy instance
607 */
608 long pol_ref;
609 /**
610 * Human-readable policy argument
611 */
612 char pol_arg[NRS_POL_ARG_MAX];
613 /**
614 * The NRS head this policy has been created at
615 */
616 struct ptlrpc_nrs *pol_nrs;
617 /**
618 * Private policy data; varies by policy type
619 */
620 void *pol_private;
621 /**
622 * Policy descriptor for this policy instance.
623 */
624 struct ptlrpc_nrs_pol_desc *pol_desc;
625};
626
627/**
628 * NRS resource
629 *
630 * Resources are embedded into two types of NRS entities:
631 * - Inside NRS policies, in the policy's private data in
632 * ptlrpc_nrs_policy::pol_private
633 * - In objects that act as prime-level scheduling entities in different NRS
634 * policies; e.g. on a policy that performs round robin or similar order
635 * scheduling across client NIDs, there would be one NRS resource per unique
636 * client NID. On a policy which performs round robin scheduling across
637 * backend filesystem objects, there would be one resource associated with
638 * each of the backend filesystem objects partaking in the scheduling
639 * performed by the policy.
640 *
641 * NRS resources share a parent-child relationship, in which resources embedded
642 * in policy instances are the parent entities, with all scheduling entities
643 * a policy schedules across being the children, thus forming a simple resource
644 * hierarchy. This hierarchy may be extended with one or more levels in the
645 * future if the ability to have more than one primary policy is added.
646 *
647 * Upon request initialization, references to the then active NRS policies are
648 * taken and used to later handle the dispatching of the request with one of
649 * these policies.
650 *
651 * \see nrs_resource_get_safe()
652 * \see ptlrpc_nrs_req_add()
653 */
654struct ptlrpc_nrs_resource {
655 /**
656 * This NRS resource's parent; is NULL for resources embedded in NRS
657 * policy instances; i.e. those are top-level ones.
658 */
659 struct ptlrpc_nrs_resource *res_parent;
660 /**
661 * The policy associated with this resource.
662 */
663 struct ptlrpc_nrs_policy *res_policy;
664};
665
666enum {
667 NRS_RES_FALLBACK,
668 NRS_RES_PRIMARY,
669 NRS_RES_MAX
670};
671
672#include "lustre_nrs_fifo.h"
673
674/**
675 * NRS request
676 *
677 * Instances of this object exist embedded within ptlrpc_request; the main
678 * purpose of this object is to hold references to the request's resources
679 * for the lifetime of the request, and to hold properties that policies use
680 * use for determining the request's scheduling priority.
681 **/
682struct ptlrpc_nrs_request {
683 /**
684 * The request's resource hierarchy.
685 */
686 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
687 /**
688 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
689 * policy that was used to enqueue the request.
690 *
691 * \see nrs_request_enqueue()
692 */
693 unsigned int nr_res_idx;
694 unsigned int nr_initialized:1;
695 unsigned int nr_enqueued:1;
696 unsigned int nr_started:1;
697 unsigned int nr_finalized:1;
698
699 /**
700 * Policy-specific fields, used for determining a request's scheduling
701 * priority, and other supporting functionality.
702 */
703 union {
704 /**
705 * Fields for the FIFO policy
706 */
707 struct nrs_fifo_req fifo;
708 } nr_u;
709 /**
710 * Externally-registering policies may want to use this to allocate
711 * their own request properties.
712 */
713 void *ext;
714};
715
716/** @} nrs */
717#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
new file mode 100644
index 000000000000..3b5418eac6c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -0,0 +1,70 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see
17 * http://www.gnu.org/licenses/gpl-2.0.html
18 *
19 * GPL HEADER END
20 */
21/*
22 * Copyright (c) 2014, Intel Corporation.
23 *
24 * Copyright 2012 Xyratex Technology Limited
25 */
26/*
27 *
28 * Network Request Scheduler (NRS) First-in First-out (FIFO) policy
29 *
30 */
31
32#ifndef _LUSTRE_NRS_FIFO_H
33#define _LUSTRE_NRS_FIFO_H
34
35/* \name fifo
36 *
37 * FIFO policy
38 *
39 * This policy is a logical wrapper around previous, non-NRS functionality.
40 * It dispatches RPCs in the same order as they arrive from the network. This
41 * policy is currently used as the fallback policy, and the only enabled policy
42 * on all NRS heads of all PTLRPC service partitions.
43 * @{
44 */
45
46/**
47 * Private data structure for the FIFO policy
48 */
49struct nrs_fifo_head {
50 /**
51 * Resource object for policy instance.
52 */
53 struct ptlrpc_nrs_resource fh_res;
54 /**
55 * List of queued requests.
56 */
57 struct list_head fh_list;
58 /**
59 * For debugging purposes.
60 */
61 __u64 fh_sequence;
62};
63
64struct nrs_fifo_req {
65 struct list_head fr_list;
66 __u64 fr_sequence;
67};
68
69/** @} fifo */
70#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index a13558e53274..fbcd39572cd0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -148,13 +148,12 @@ extern struct req_format RQF_MDS_GETATTR;
148 */ 148 */
149extern struct req_format RQF_MDS_GETATTR_NAME; 149extern struct req_format RQF_MDS_GETATTR_NAME;
150extern struct req_format RQF_MDS_CLOSE; 150extern struct req_format RQF_MDS_CLOSE;
151extern struct req_format RQF_MDS_RELEASE_CLOSE; 151extern struct req_format RQF_MDS_INTENT_CLOSE;
152extern struct req_format RQF_MDS_CONNECT; 152extern struct req_format RQF_MDS_CONNECT;
153extern struct req_format RQF_MDS_DISCONNECT; 153extern struct req_format RQF_MDS_DISCONNECT;
154extern struct req_format RQF_MDS_GET_INFO; 154extern struct req_format RQF_MDS_GET_INFO;
155extern struct req_format RQF_MDS_READPAGE; 155extern struct req_format RQF_MDS_READPAGE;
156extern struct req_format RQF_MDS_WRITEPAGE; 156extern struct req_format RQF_MDS_WRITEPAGE;
157extern struct req_format RQF_MDS_DONE_WRITING;
158extern struct req_format RQF_MDS_REINT; 157extern struct req_format RQF_MDS_REINT;
159extern struct req_format RQF_MDS_REINT_CREATE; 158extern struct req_format RQF_MDS_REINT_CREATE;
160extern struct req_format RQF_MDS_REINT_CREATE_ACL; 159extern struct req_format RQF_MDS_REINT_CREATE_ACL;
@@ -166,10 +165,9 @@ extern struct req_format RQF_MDS_REINT_LINK;
166extern struct req_format RQF_MDS_REINT_RENAME; 165extern struct req_format RQF_MDS_REINT_RENAME;
167extern struct req_format RQF_MDS_REINT_SETATTR; 166extern struct req_format RQF_MDS_REINT_SETATTR;
168extern struct req_format RQF_MDS_REINT_SETXATTR; 167extern struct req_format RQF_MDS_REINT_SETXATTR;
169extern struct req_format RQF_MDS_QUOTACHECK;
170extern struct req_format RQF_MDS_QUOTACTL; 168extern struct req_format RQF_MDS_QUOTACTL;
171extern struct req_format RQF_QC_CALLBACK;
172extern struct req_format RQF_MDS_SWAP_LAYOUTS; 169extern struct req_format RQF_MDS_SWAP_LAYOUTS;
170extern struct req_format RQF_MDS_REINT_MIGRATE;
173/* MDS hsm formats */ 171/* MDS hsm formats */
174extern struct req_format RQF_MDS_HSM_STATE_GET; 172extern struct req_format RQF_MDS_HSM_STATE_GET;
175extern struct req_format RQF_MDS_HSM_STATE_SET; 173extern struct req_format RQF_MDS_HSM_STATE_SET;
@@ -181,7 +179,6 @@ extern struct req_format RQF_MDS_HSM_REQUEST;
181/* OST req_format */ 179/* OST req_format */
182extern struct req_format RQF_OST_CONNECT; 180extern struct req_format RQF_OST_CONNECT;
183extern struct req_format RQF_OST_DISCONNECT; 181extern struct req_format RQF_OST_DISCONNECT;
184extern struct req_format RQF_OST_QUOTACHECK;
185extern struct req_format RQF_OST_QUOTACTL; 182extern struct req_format RQF_OST_QUOTACTL;
186extern struct req_format RQF_OST_GETATTR; 183extern struct req_format RQF_OST_GETATTR;
187extern struct req_format RQF_OST_SETATTR; 184extern struct req_format RQF_OST_SETATTR;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 90c183424802..03a970bcac55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -50,6 +50,7 @@ struct brw_page;
50/* Linux specific */ 50/* Linux specific */
51struct key; 51struct key;
52struct seq_file; 52struct seq_file;
53struct lustre_cfg;
53 54
54/* 55/*
55 * forward declaration 56 * forward declaration
@@ -1029,6 +1030,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
1029 1030
1030/* bulk security api */ 1031/* bulk security api */
1031void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc); 1032void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1033int get_free_pages_in_pool(void);
1034int pool_is_at_full_capacity(void);
1032 1035
1033int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, 1036int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1034 struct ptlrpc_bulk_desc *desc); 1037 struct ptlrpc_bulk_desc *desc);
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
new file mode 100644
index 000000000000..26d01c2d6633
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -0,0 +1,102 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2014, Intel Corporation.
27 *
28 * Copyright 2015 Cray Inc, all rights reserved.
29 * Author: Ben Evans.
30 *
31 * We assume all nodes are either little-endian or big-endian, and we
32 * always send messages in the sender's native format. The receiver
33 * detects the message format by checking the 'magic' field of the message
34 * (see lustre_msg_swabbed() below).
35 *
36 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
37 * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the
38 * type from "other" endian, in-place in the message buffer.
39 *
40 * A swabber takes a single pointer argument. The caller must already have
41 * verified that the length of the message buffer >= sizeof (type).
42 *
43 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
44 * may be defined that swabs just the variable part, after the caller has
45 * verified that the message buffer is large enough.
46 */
47
48#ifndef _LUSTRE_SWAB_H_
49#define _LUSTRE_SWAB_H_
50
51#include "lustre/lustre_idl.h"
52
53void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
54void lustre_swab_connect(struct obd_connect_data *ocd);
55void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
56void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
57void lustre_swab_obd_statfs(struct obd_statfs *os);
58void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
59void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
60void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
61void lustre_swab_ost_lvb(struct ost_lvb *lvb);
62void lustre_swab_obd_quotactl(struct obd_quotactl *q);
63void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
64void lustre_swab_generic_32s(__u32 *val);
65void lustre_swab_mdt_body(struct mdt_body *b);
66void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
67void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
68void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
69void lustre_swab_lmv_desc(struct lmv_desc *ld);
70void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
71void lustre_swab_lov_desc(struct lov_desc *ld);
72void lustre_swab_gl_desc(union ldlm_gl_desc *desc);
73void lustre_swab_ldlm_intent(struct ldlm_intent *i);
74void lustre_swab_ldlm_request(struct ldlm_request *rq);
75void lustre_swab_ldlm_reply(struct ldlm_reply *r);
76void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
77void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
78void lustre_swab_mgs_config_body(struct mgs_config_body *body);
79void lustre_swab_mgs_config_res(struct mgs_config_res *body);
80void lustre_swab_ost_body(struct ost_body *b);
81void lustre_swab_ost_last_id(__u64 *id);
82void lustre_swab_fiemap(struct fiemap *fiemap);
83void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
84void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
85void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
86 int stripe_count);
87void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
88void lustre_swab_lustre_capa(struct lustre_capa *c);
89void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
90void lustre_swab_fid2path(struct getinfo_fid2path *gf);
91void lustre_swab_layout_intent(struct layout_intent *li);
92void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
93void lustre_swab_hsm_current_action(struct hsm_current_action *action);
94void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
95void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
96void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
97void lustre_swab_hsm_request(struct hsm_request *hr);
98void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
99void lustre_swab_close_data(struct close_data *data);
100void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
101
102#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd05bd6..0f48e9c3d9e3 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -73,70 +73,17 @@ static inline void loi_init(struct lov_oinfo *loi)
73{ 73{
74} 74}
75 75
76/* 76struct lov_stripe_md;
77 * If we are unable to get the maximum object size from the OST in
78 * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
79 * the old maximum object size from ext3.
80 */
81#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
82
83struct lov_stripe_md {
84 atomic_t lsm_refc;
85 spinlock_t lsm_lock;
86 pid_t lsm_lock_owner; /* debugging */
87
88 /* maximum possible file size, might change as OSTs status changes,
89 * e.g. disconnected, deactivated
90 */
91 __u64 lsm_maxbytes;
92 struct ost_id lsm_oi;
93 __u32 lsm_magic;
94 __u32 lsm_stripe_size;
95 __u32 lsm_pattern; /* striping pattern (RAID0, RAID1) */
96 __u16 lsm_stripe_count;
97 __u16 lsm_layout_gen;
98 char lsm_pool_name[LOV_MAXPOOLNAME + 1];
99 struct lov_oinfo *lsm_oinfo[0];
100};
101
102static inline bool lsm_is_released(struct lov_stripe_md *lsm)
103{
104 return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
105}
106
107static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
108{
109 if (!lsm)
110 return false;
111 if (lsm_is_released(lsm))
112 return false;
113 return true;
114}
115
116static inline int lov_stripe_md_size(unsigned int stripe_count)
117{
118 struct lov_stripe_md lsm;
119
120 return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]);
121}
122
123struct obd_info; 77struct obd_info;
124 78
125typedef int (*obd_enqueue_update_f)(void *cookie, int rc); 79typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
126 80
127/* obd info for a particular level (lov, osc). */ 81/* obd info for a particular level (lov, osc). */
128struct obd_info { 82struct obd_info {
129 /* Flags used for set request specific flags: 83 /* OBD_STATFS_* flags */
130 - while lock handling, the flags obtained on the enqueue
131 request are set here.
132 - while stats, the flags used for control delay/resend.
133 - while setattr, the flags used for distinguish punch operation
134 */
135 __u64 oi_flags; 84 __u64 oi_flags;
136 /* lsm data specific for every OSC. */ 85 /* lsm data specific for every OSC. */
137 struct lov_stripe_md *oi_md; 86 struct lov_stripe_md *oi_md;
138 /* obdo data specific for every OSC, if needed at all. */
139 struct obdo *oi_oa;
140 /* statfs data specific for every OSC, if needed at all. */ 87 /* statfs data specific for every OSC, if needed at all. */
141 struct obd_statfs *oi_osfs; 88 struct obd_statfs *oi_osfs;
142 /* An update callback which is called to update some data on upper 89 /* An update callback which is called to update some data on upper
@@ -204,7 +151,6 @@ enum obd_cl_sem_lock_class {
204 * on the MDS. 151 * on the MDS.
205 */ 152 */
206#define OBD_MAX_DEFAULT_EA_SIZE 4096 153#define OBD_MAX_DEFAULT_EA_SIZE 4096
207#define OBD_MAX_DEFAULT_COOKIE_SIZE 4096
208 154
209struct mdc_rpc_lock; 155struct mdc_rpc_lock;
210struct obd_import; 156struct obd_import;
@@ -214,7 +160,7 @@ struct client_obd {
214 struct obd_import *cl_import; /* ptlrpc connection state */ 160 struct obd_import *cl_import; /* ptlrpc connection state */
215 size_t cl_conn_count; 161 size_t cl_conn_count;
216 /* 162 /*
217 * Cache maximum and default values for easize and cookiesize. This is 163 * Cache maximum and default values for easize. This is
218 * strictly a performance optimization to minimize calls to 164 * strictly a performance optimization to minimize calls to
219 * obd_size_diskmd(). The default values are used to calculate the 165 * obd_size_diskmd(). The default values are used to calculate the
220 * initial size of a request buffer. The ptlrpc layer will resize the 166 * initial size of a request buffer. The ptlrpc layer will resize the
@@ -235,18 +181,6 @@ struct client_obd {
235 * run-time if a larger observed size is advertised by the MDT. 181 * run-time if a larger observed size is advertised by the MDT.
236 */ 182 */
237 u32 cl_max_mds_easize; 183 u32 cl_max_mds_easize;
238 /* Default cookie size for llog cookies (see struct llog_cookie). It is
239 * initialized to zero at mount-time, then it tracks the largest
240 * observed cookie size advertised by the MDT, up to a maximum value of
241 * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not
242 * used by clients communicating with MDS versions 2.4.0 and later.
243 */
244 u32 cl_default_mds_cookiesize;
245 /* Maximum possible cookie size computed at mount-time based on
246 * the number of OSTs in the filesystem. May be increased at
247 * run-time if a larger observed size is advertised by the MDT.
248 */
249 u32 cl_max_mds_cookiesize;
250 184
251 enum lustre_sec_part cl_sp_me; 185 enum lustre_sec_part cl_sp_me;
252 enum lustre_sec_part cl_sp_to; 186 enum lustre_sec_part cl_sp_to;
@@ -313,15 +247,42 @@ struct client_obd {
313 struct obd_histogram cl_read_offset_hist; 247 struct obd_histogram cl_read_offset_hist;
314 struct obd_histogram cl_write_offset_hist; 248 struct obd_histogram cl_write_offset_hist;
315 249
316 /* lru for osc caching pages */ 250 /* LRU for osc caching pages */
317 struct cl_client_cache *cl_cache; 251 struct cl_client_cache *cl_cache;
318 struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */ 252 /** member of cl_cache->ccc_lru */
253 struct list_head cl_lru_osc;
254 /** # of available LRU slots left in the per-OSC cache.
255 * Available LRU slots are shared by all OSCs of the same file system,
256 * therefore this is a pointer to cl_client_cache::ccc_lru_left.
257 */
319 atomic_long_t *cl_lru_left; 258 atomic_long_t *cl_lru_left;
259 /** # of busy LRU pages. A page is considered busy if it's in writeback
260 * queue, or in transfer. Busy pages can't be discarded so they are not
261 * in LRU cache.
262 */
320 atomic_long_t cl_lru_busy; 263 atomic_long_t cl_lru_busy;
264 /** # of LRU pages in the cache for this client_obd */
321 atomic_long_t cl_lru_in_list; 265 atomic_long_t cl_lru_in_list;
266 /** # of threads are shrinking LRU cache. To avoid contention, it's not
267 * allowed to have multiple threads shrinking LRU cache.
268 */
322 atomic_t cl_lru_shrinkers; 269 atomic_t cl_lru_shrinkers;
323 struct list_head cl_lru_list; /* lru page list */ 270 /** The time when this LRU cache was last used. */
324 spinlock_t cl_lru_list_lock; /* page list protector */ 271 time64_t cl_lru_last_used;
272 /** stats: how many reclaims have happened for this client_obd.
273 * reclaim and shrink - shrink is async, voluntarily rebalancing;
274 * reclaim is sync, initiated by IO thread when the LRU slots are
275 * in shortage.
276 */
277 u64 cl_lru_reclaim;
278 /** List of LRU pages for this client_obd */
279 struct list_head cl_lru_list;
280 /** Lock for LRU page list */
281 spinlock_t cl_lru_list_lock;
282 /** # of unstable pages in this client_obd.
283 * An unstable page is a page state that WRITE RPC has finished but
284 * the transaction has NOT yet committed.
285 */
325 atomic_long_t cl_unstable_count; 286 atomic_long_t cl_unstable_count;
326 287
327 /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ 288 /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
@@ -329,7 +290,17 @@ struct client_obd {
329 wait_queue_head_t cl_destroy_waitq; 290 wait_queue_head_t cl_destroy_waitq;
330 291
331 struct mdc_rpc_lock *cl_rpc_lock; 292 struct mdc_rpc_lock *cl_rpc_lock;
332 struct mdc_rpc_lock *cl_close_lock; 293
294 /* modify rpcs in flight
295 * currently used for metadata only
296 */
297 spinlock_t cl_mod_rpcs_lock;
298 u16 cl_max_mod_rpcs_in_flight;
299 u16 cl_mod_rpcs_in_flight;
300 u16 cl_close_rpcs_in_flight;
301 wait_queue_head_t cl_mod_rpcs_waitq;
302 unsigned long *cl_mod_tag_bitmap;
303 struct obd_histogram cl_mod_rpcs_hist;
333 304
334 /* mgc datastruct */ 305 /* mgc datastruct */
335 atomic_t cl_mgc_refcount; 306 atomic_t cl_mgc_refcount;
@@ -345,13 +316,6 @@ struct client_obd {
345 /* also protected by the poorly named _loi_list_lock lock above */ 316 /* also protected by the poorly named _loi_list_lock lock above */
346 struct osc_async_rc cl_ar; 317 struct osc_async_rc cl_ar;
347 318
348 /* used by quotacheck when the servers are older than 2.4 */
349 int cl_qchk_stat; /* quotacheck stat of the peer */
350#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
351#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE
352#warning "please consider removing quotacheck compatibility code"
353#endif
354
355 /* sequence manager */ 319 /* sequence manager */
356 struct lu_client_seq *cl_seq; 320 struct lu_client_seq *cl_seq;
357 321
@@ -454,8 +418,6 @@ struct lmv_obd {
454 int connected; 418 int connected;
455 int max_easize; 419 int max_easize;
456 int max_def_easize; 420 int max_def_easize;
457 int max_cookiesize;
458 int max_def_cookiesize;
459 421
460 u32 tgts_size; /* size of tgts array */ 422 u32 tgts_size; /* size of tgts array */
461 struct lmv_tgt_desc **tgts; 423 struct lmv_tgt_desc **tgts;
@@ -469,9 +431,9 @@ struct niobuf_local {
469 __u32 lnb_page_offset; 431 __u32 lnb_page_offset;
470 __u32 lnb_len; 432 __u32 lnb_len;
471 __u32 lnb_flags; 433 __u32 lnb_flags;
434 int lnb_rc;
472 struct page *lnb_page; 435 struct page *lnb_page;
473 void *lnb_data; 436 void *lnb_data;
474 int lnb_rc;
475}; 437};
476 438
477#define LUSTRE_FLD_NAME "fld" 439#define LUSTRE_FLD_NAME "fld"
@@ -512,21 +474,6 @@ struct niobuf_local {
512/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */ 474/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
513#define N_LOCAL_TEMP_PAGE 0x10000000 475#define N_LOCAL_TEMP_PAGE 0x10000000
514 476
515struct obd_trans_info {
516 __u64 oti_xid;
517 /* Only used on the server side for tracking acks. */
518 struct oti_req_ack_lock {
519 struct lustre_handle lock;
520 __u32 mode;
521 } oti_ack_locks[4];
522 void *oti_handle;
523 struct llog_cookie oti_onecookie;
524 struct llog_cookie *oti_logcookies;
525
526 /** VBR: versions */
527 __u64 oti_pre_version;
528};
529
530/* 477/*
531 * Events signalled through obd_notify() upcall-chain. 478 * Events signalled through obd_notify() upcall-chain.
532 */ 479 */
@@ -587,15 +534,14 @@ struct lvfs_run_ctxt {
587 534
588struct obd_device { 535struct obd_device {
589 struct obd_type *obd_type; 536 struct obd_type *obd_type;
590 __u32 obd_magic; 537 u32 obd_magic; /* OBD_DEVICE_MAGIC */
538 int obd_minor; /* device number: lctl dl */
539 struct lu_device *obd_lu_dev;
591 540
592 /* common and UUID name of this device */ 541 /* common and UUID name of this device */
593 char obd_name[MAX_OBD_NAME]; 542 struct obd_uuid obd_uuid;
594 struct obd_uuid obd_uuid; 543 char obd_name[MAX_OBD_NAME];
595
596 struct lu_device *obd_lu_dev;
597 544
598 int obd_minor;
599 /* bitfield modification is protected by obd_dev_lock */ 545 /* bitfield modification is protected by obd_dev_lock */
600 unsigned long obd_attached:1, /* finished attach */ 546 unsigned long obd_attached:1, /* finished attach */
601 obd_set_up:1, /* finished setup */ 547 obd_set_up:1, /* finished setup */
@@ -619,22 +565,22 @@ struct obd_device {
619 unsigned long obd_recovery_expired:1; 565 unsigned long obd_recovery_expired:1;
620 /* uuid-export hash body */ 566 /* uuid-export hash body */
621 struct cfs_hash *obd_uuid_hash; 567 struct cfs_hash *obd_uuid_hash;
622 atomic_t obd_refcount;
623 wait_queue_head_t obd_refcount_waitq; 568 wait_queue_head_t obd_refcount_waitq;
624 struct list_head obd_exports; 569 struct list_head obd_exports;
625 struct list_head obd_unlinked_exports; 570 struct list_head obd_unlinked_exports;
626 struct list_head obd_delayed_exports; 571 struct list_head obd_delayed_exports;
572 atomic_t obd_refcount;
627 int obd_num_exports; 573 int obd_num_exports;
628 spinlock_t obd_nid_lock; 574 spinlock_t obd_nid_lock;
629 struct ldlm_namespace *obd_namespace; 575 struct ldlm_namespace *obd_namespace;
630 struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */ 576 struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
631 /* a spinlock is OK for what we do now, may need a semaphore later */ 577 /* a spinlock is OK for what we do now, may need a semaphore later */
632 spinlock_t obd_dev_lock; /* protect OBD bitfield above */ 578 spinlock_t obd_dev_lock; /* protect OBD bitfield above */
633 struct mutex obd_dev_mutex;
634 __u64 obd_last_committed;
635 spinlock_t obd_osfs_lock; 579 spinlock_t obd_osfs_lock;
636 struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */ 580 struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
637 __u64 obd_osfs_age; 581 __u64 obd_osfs_age;
582 u64 obd_last_committed;
583 struct mutex obd_dev_mutex;
638 struct lvfs_run_ctxt obd_lvfs_ctxt; 584 struct lvfs_run_ctxt obd_lvfs_ctxt;
639 struct obd_llog_group obd_olg; /* default llog group */ 585 struct obd_llog_group obd_olg; /* default llog group */
640 struct obd_device *obd_observer; 586 struct obd_device *obd_observer;
@@ -648,12 +594,13 @@ struct obd_device {
648 struct lov_obd lov; 594 struct lov_obd lov;
649 struct lmv_obd lmv; 595 struct lmv_obd lmv;
650 } u; 596 } u;
597
651 /* Fields used by LProcFS */ 598 /* Fields used by LProcFS */
652 unsigned int obd_cntr_base; 599 struct lprocfs_stats *obd_stats;
653 struct lprocfs_stats *obd_stats; 600 unsigned int obd_cntr_base;
654 601
655 unsigned int md_cntr_base; 602 struct lprocfs_stats *md_stats;
656 struct lprocfs_stats *md_stats; 603 unsigned int md_cntr_base;
657 604
658 struct dentry *obd_debugfs_entry; 605 struct dentry *obd_debugfs_entry;
659 struct dentry *obd_svc_debugfs_entry; 606 struct dentry *obd_svc_debugfs_entry;
@@ -665,9 +612,11 @@ struct obd_device {
665 /** 612 /**
666 * Ldlm pool part. Save last calculated SLV and Limit. 613 * Ldlm pool part. Save last calculated SLV and Limit.
667 */ 614 */
668 rwlock_t obd_pool_lock; 615 rwlock_t obd_pool_lock;
669 int obd_pool_limit; 616 u64 obd_pool_slv;
670 __u64 obd_pool_slv; 617 int obd_pool_limit;
618
619 int obd_conn_inprogress;
671 620
672 /** 621 /**
673 * A list of outstanding class_incref()'s against this obd. For 622 * A list of outstanding class_incref()'s against this obd. For
@@ -675,19 +624,10 @@ struct obd_device {
675 */ 624 */
676 struct lu_ref obd_reference; 625 struct lu_ref obd_reference;
677 626
678 int obd_conn_inprogress;
679
680 struct kobject obd_kobj; /* sysfs object */ 627 struct kobject obd_kobj; /* sysfs object */
681 struct completion obd_kobj_unregister; 628 struct completion obd_kobj_unregister;
682}; 629};
683 630
684enum obd_cleanup_stage {
685/* Special case hack for MDS LOVs */
686 OBD_CLEANUP_EARLY,
687/* can be directly mapped to .ldto_device_fini() */
688 OBD_CLEANUP_EXPORTS,
689};
690
691/* get/set_info keys */ 631/* get/set_info keys */
692#define KEY_ASYNC "async" 632#define KEY_ASYNC "async"
693#define KEY_CHANGELOG_CLEAR "changelog_clear" 633#define KEY_CHANGELOG_CLEAR "changelog_clear"
@@ -704,7 +644,6 @@ enum obd_cleanup_stage {
704#define KEY_INTERMDS "inter_mds" 644#define KEY_INTERMDS "inter_mds"
705#define KEY_LAST_ID "last_id" 645#define KEY_LAST_ID "last_id"
706#define KEY_LAST_FID "last_fid" 646#define KEY_LAST_FID "last_fid"
707#define KEY_LOVDESC "lovdesc"
708#define KEY_MAX_EASIZE "max_easize" 647#define KEY_MAX_EASIZE "max_easize"
709#define KEY_DEFAULT_EASIZE "default_easize" 648#define KEY_DEFAULT_EASIZE "default_easize"
710#define KEY_MGSSEC "mgssec" 649#define KEY_MGSSEC "mgssec"
@@ -720,22 +659,6 @@ enum obd_cleanup_stage {
720 659
721struct lu_context; 660struct lu_context;
722 661
723/* /!\ must be coherent with include/linux/namei.h on patched kernel */
724#define IT_OPEN (1 << 0)
725#define IT_CREAT (1 << 1)
726#define IT_READDIR (1 << 2)
727#define IT_GETATTR (1 << 3)
728#define IT_LOOKUP (1 << 4)
729#define IT_UNLINK (1 << 5)
730#define IT_TRUNC (1 << 6)
731#define IT_GETXATTR (1 << 7)
732#define IT_EXEC (1 << 8)
733#define IT_PIN (1 << 9)
734#define IT_LAYOUT (1 << 10)
735#define IT_QUOTA_DQACQ (1 << 11)
736#define IT_QUOTA_CONN (1 << 12)
737#define IT_SETXATTR (1 << 13)
738
739static inline int it_to_lock_mode(struct lookup_intent *it) 662static inline int it_to_lock_mode(struct lookup_intent *it)
740{ 663{
741 /* CREAT needs to be tested before open (both could be set) */ 664 /* CREAT needs to be tested before open (both could be set) */
@@ -755,6 +678,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
755 return -EINVAL; 678 return -EINVAL;
756} 679}
757 680
681enum md_op_flags {
682 MF_MDC_CANCEL_FID1 = BIT(0),
683 MF_MDC_CANCEL_FID2 = BIT(1),
684 MF_MDC_CANCEL_FID3 = BIT(2),
685 MF_MDC_CANCEL_FID4 = BIT(3),
686 MF_GET_MDT_IDX = BIT(4),
687};
688
758enum md_cli_flags { 689enum md_cli_flags {
759 CLI_SET_MEA = BIT(0), 690 CLI_SET_MEA = BIT(0),
760 CLI_RM_ENTRY = BIT(1), 691 CLI_RM_ENTRY = BIT(1),
@@ -789,8 +720,6 @@ struct md_op_data {
789 __u64 op_valid; 720 __u64 op_valid;
790 loff_t op_attr_blocks; 721 loff_t op_attr_blocks;
791 722
792 /* Size-on-MDS epoch and flags. */
793 __u64 op_ioepoch;
794 __u32 op_flags; 723 __u32 op_flags;
795 724
796 /* Various operation flags. */ 725 /* Various operation flags. */
@@ -839,15 +768,13 @@ struct obd_ops {
839 int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, 768 int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
840 void *karg, void __user *uarg); 769 void *karg, void __user *uarg);
841 int (*get_info)(const struct lu_env *env, struct obd_export *, 770 int (*get_info)(const struct lu_env *env, struct obd_export *,
842 __u32 keylen, void *key, __u32 *vallen, void *val, 771 __u32 keylen, void *key, __u32 *vallen, void *val);
843 struct lov_stripe_md *lsm);
844 int (*set_info_async)(const struct lu_env *, struct obd_export *, 772 int (*set_info_async)(const struct lu_env *, struct obd_export *,
845 __u32 keylen, void *key, 773 __u32 keylen, void *key,
846 __u32 vallen, void *val, 774 __u32 vallen, void *val,
847 struct ptlrpc_request_set *set); 775 struct ptlrpc_request_set *set);
848 int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg); 776 int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
849 int (*precleanup)(struct obd_device *dev, 777 int (*precleanup)(struct obd_device *dev);
850 enum obd_cleanup_stage cleanup_stage);
851 int (*cleanup)(struct obd_device *dev); 778 int (*cleanup)(struct obd_device *dev);
852 int (*process_config)(struct obd_device *dev, u32 len, void *data); 779 int (*process_config)(struct obd_device *dev, u32 len, void *data);
853 int (*postrecov)(struct obd_device *dev); 780 int (*postrecov)(struct obd_device *dev);
@@ -887,35 +814,23 @@ struct obd_ops {
887 struct obd_statfs *osfs, __u64 max_age, __u32 flags); 814 struct obd_statfs *osfs, __u64 max_age, __u32 flags);
888 int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo, 815 int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
889 __u64 max_age, struct ptlrpc_request_set *set); 816 __u64 max_age, struct ptlrpc_request_set *set);
890 int (*packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
891 struct lov_stripe_md *mem_src);
892 int (*unpackmd)(struct obd_export *exp,
893 struct lov_stripe_md **mem_tgt,
894 struct lov_mds_md *disk_src, int disk_len);
895 int (*create)(const struct lu_env *env, struct obd_export *exp, 817 int (*create)(const struct lu_env *env, struct obd_export *exp,
896 struct obdo *oa, struct obd_trans_info *oti); 818 struct obdo *oa);
897 int (*destroy)(const struct lu_env *env, struct obd_export *exp, 819 int (*destroy)(const struct lu_env *env, struct obd_export *exp,
898 struct obdo *oa, struct obd_trans_info *oti); 820 struct obdo *oa);
899 int (*setattr)(const struct lu_env *, struct obd_export *exp, 821 int (*setattr)(const struct lu_env *, struct obd_export *exp,
900 struct obd_info *oinfo, struct obd_trans_info *oti); 822 struct obdo *oa);
901 int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
902 struct obd_trans_info *oti,
903 struct ptlrpc_request_set *rqset);
904 int (*getattr)(const struct lu_env *env, struct obd_export *exp, 823 int (*getattr)(const struct lu_env *env, struct obd_export *exp,
905 struct obd_info *oinfo); 824 struct obdo *oa);
906 int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
907 struct ptlrpc_request_set *set);
908 int (*preprw)(const struct lu_env *env, int cmd, 825 int (*preprw)(const struct lu_env *env, int cmd,
909 struct obd_export *exp, struct obdo *oa, int objcount, 826 struct obd_export *exp, struct obdo *oa, int objcount,
910 struct obd_ioobj *obj, struct niobuf_remote *remote, 827 struct obd_ioobj *obj, struct niobuf_remote *remote,
911 int *nr_pages, struct niobuf_local *local, 828 int *nr_pages, struct niobuf_local *local);
912 struct obd_trans_info *oti);
913 int (*commitrw)(const struct lu_env *env, int cmd, 829 int (*commitrw)(const struct lu_env *env, int cmd,
914 struct obd_export *exp, struct obdo *oa, 830 struct obd_export *exp, struct obdo *oa,
915 int objcount, struct obd_ioobj *obj, 831 int objcount, struct obd_ioobj *obj,
916 struct niobuf_remote *remote, int pages, 832 struct niobuf_remote *remote, int pages,
917 struct niobuf_local *local, 833 struct niobuf_local *local, int rc);
918 struct obd_trans_info *oti, int rc);
919 int (*init_export)(struct obd_export *exp); 834 int (*init_export)(struct obd_export *exp);
920 int (*destroy_export)(struct obd_export *exp); 835 int (*destroy_export)(struct obd_export *exp);
921 836
@@ -930,8 +845,6 @@ struct obd_ops {
930 struct obd_uuid *(*get_uuid)(struct obd_export *exp); 845 struct obd_uuid *(*get_uuid)(struct obd_export *exp);
931 846
932 /* quota methods */ 847 /* quota methods */
933 int (*quotacheck)(struct obd_device *, struct obd_export *,
934 struct obd_quotactl *);
935 int (*quotactl)(struct obd_device *, struct obd_export *, 848 int (*quotactl)(struct obd_device *, struct obd_export *,
936 struct obd_quotactl *); 849 struct obd_quotactl *);
937 850
@@ -954,7 +867,7 @@ struct obd_ops {
954/* lmv structures */ 867/* lmv structures */
955struct lustre_md { 868struct lustre_md {
956 struct mdt_body *body; 869 struct mdt_body *body;
957 struct lov_stripe_md *lsm; 870 struct lu_buf layout;
958 struct lmv_stripe_md *lmv; 871 struct lmv_stripe_md *lmv;
959#ifdef CONFIG_FS_POSIX_ACL 872#ifdef CONFIG_FS_POSIX_ACL
960 struct posix_acl *posix_acl; 873 struct posix_acl *posix_acl;
@@ -992,10 +905,8 @@ struct md_ops {
992 int (*create)(struct obd_export *, struct md_op_data *, 905 int (*create)(struct obd_export *, struct md_op_data *,
993 const void *, size_t, umode_t, uid_t, gid_t, 906 const void *, size_t, umode_t, uid_t, gid_t,
994 cfs_cap_t, __u64, struct ptlrpc_request **); 907 cfs_cap_t, __u64, struct ptlrpc_request **);
995 int (*done_writing)(struct obd_export *, struct md_op_data *,
996 struct md_open_data *);
997 int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *, 908 int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
998 const ldlm_policy_data_t *, 909 const union ldlm_policy_data *,
999 struct lookup_intent *, struct md_op_data *, 910 struct lookup_intent *, struct md_op_data *,
1000 struct lustre_handle *, __u64); 911 struct lustre_handle *, __u64);
1001 int (*getattr)(struct obd_export *, struct md_op_data *, 912 int (*getattr)(struct obd_export *, struct md_op_data *,
@@ -1012,8 +923,7 @@ struct md_ops {
1012 const char *, size_t, const char *, size_t, 923 const char *, size_t, const char *, size_t,
1013 struct ptlrpc_request **); 924 struct ptlrpc_request **);
1014 int (*setattr)(struct obd_export *, struct md_op_data *, void *, 925 int (*setattr)(struct obd_export *, struct md_op_data *, void *,
1015 size_t, void *, size_t, struct ptlrpc_request **, 926 size_t, struct ptlrpc_request **);
1016 struct md_open_data **mod);
1017 int (*sync)(struct obd_export *, const struct lu_fid *, 927 int (*sync)(struct obd_export *, const struct lu_fid *,
1018 struct ptlrpc_request **); 928 struct ptlrpc_request **);
1019 int (*read_page)(struct obd_export *, struct md_op_data *, 929 int (*read_page)(struct obd_export *, struct md_op_data *,
@@ -1030,7 +940,7 @@ struct md_ops {
1030 u64, const char *, const char *, int, int, int, 940 u64, const char *, const char *, int, int, int,
1031 struct ptlrpc_request **); 941 struct ptlrpc_request **);
1032 942
1033 int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32); 943 int (*init_ea_size)(struct obd_export *, u32, u32);
1034 944
1035 int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *, 945 int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
1036 struct obd_export *, struct obd_export *, 946 struct obd_export *, struct obd_export *,
@@ -1052,11 +962,11 @@ struct md_ops {
1052 962
1053 enum ldlm_mode (*lock_match)(struct obd_export *, __u64, 963 enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
1054 const struct lu_fid *, enum ldlm_type, 964 const struct lu_fid *, enum ldlm_type,
1055 ldlm_policy_data_t *, enum ldlm_mode, 965 union ldlm_policy_data *, enum ldlm_mode,
1056 struct lustre_handle *); 966 struct lustre_handle *);
1057 967
1058 int (*cancel_unused)(struct obd_export *, const struct lu_fid *, 968 int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
1059 ldlm_policy_data_t *, enum ldlm_mode, 969 union ldlm_policy_data *, enum ldlm_mode,
1060 enum ldlm_cancel_flags flags, void *opaque); 970 enum ldlm_cancel_flags flags, void *opaque);
1061 971
1062 int (*get_fid_from_lsm)(struct obd_export *, 972 int (*get_fid_from_lsm)(struct obd_export *,
@@ -1071,6 +981,8 @@ struct md_ops {
1071 int (*revalidate_lock)(struct obd_export *, struct lookup_intent *, 981 int (*revalidate_lock)(struct obd_export *, struct lookup_intent *,
1072 struct lu_fid *, __u64 *bits); 982 struct lu_fid *, __u64 *bits);
1073 983
984 int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm,
985 const union lmv_mds_md *lmv, size_t lmv_size);
1074 /* 986 /*
1075 * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to 987 * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
1076 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a 988 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
@@ -1078,33 +990,6 @@ struct md_ops {
1078 */ 990 */
1079}; 991};
1080 992
1081struct lsm_operations {
1082 void (*lsm_free)(struct lov_stripe_md *);
1083 void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
1084 u64 *);
1085 void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
1086 u64 *);
1087 int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
1088 __u16 *stripe_count);
1089 int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
1090 struct lov_mds_md *lmm);
1091};
1092
1093extern const struct lsm_operations lsm_v1_ops;
1094extern const struct lsm_operations lsm_v3_ops;
1095static inline const struct lsm_operations *lsm_op_find(int magic)
1096{
1097 switch (magic) {
1098 case LOV_MAGIC_V1:
1099 return &lsm_v1_ops;
1100 case LOV_MAGIC_V3:
1101 return &lsm_v3_ops;
1102 default:
1103 CERROR("Cannot recognize lsm_magic %08x\n", magic);
1104 return NULL;
1105 }
1106}
1107
1108static inline struct md_open_data *obd_mod_alloc(void) 993static inline struct md_open_data *obd_mod_alloc(void)
1109{ 994{
1110 struct md_open_data *mod; 995 struct md_open_data *mod;
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 16094dbec08b..7ec25202cd22 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -100,6 +100,13 @@ int obd_get_request_slot(struct client_obd *cli);
100void obd_put_request_slot(struct client_obd *cli); 100void obd_put_request_slot(struct client_obd *cli);
101__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli); 101__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
102int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max); 102int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
103int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max);
104int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq);
105
106u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc,
107 struct lookup_intent *it);
108void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
109 struct lookup_intent *it, u16 tag);
103 110
104struct llog_handle; 111struct llog_handle;
105struct llog_rec_hdr; 112struct llog_rec_hdr;
@@ -175,10 +182,13 @@ struct lustre_profile {
175 char *lp_profile; 182 char *lp_profile;
176 char *lp_dt; 183 char *lp_dt;
177 char *lp_md; 184 char *lp_md;
185 int lp_refs;
186 bool lp_list_deleted;
178}; 187};
179 188
180struct lustre_profile *class_get_profile(const char *prof); 189struct lustre_profile *class_get_profile(const char *prof);
181void class_del_profile(const char *prof); 190void class_del_profile(const char *prof);
191void class_put_profile(struct lustre_profile *lprof);
182void class_del_profiles(void); 192void class_del_profiles(void);
183 193
184#if LUSTRE_TRACKS_LOCK_EXP_REFS 194#if LUSTRE_TRACKS_LOCK_EXP_REFS
@@ -269,10 +279,8 @@ static inline int lprocfs_climp_check(struct obd_device *obd)
269struct inode; 279struct inode;
270struct lu_attr; 280struct lu_attr;
271struct obdo; 281struct obdo;
272void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid);
273 282
274void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj); 283void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
275void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid);
276 284
277#define OBT(dev) (dev)->obd_type 285#define OBT(dev) (dev)->obd_type
278#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op 286#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op
@@ -417,16 +425,14 @@ static inline int class_devno_max(void)
417 425
418static inline int obd_get_info(const struct lu_env *env, 426static inline int obd_get_info(const struct lu_env *env,
419 struct obd_export *exp, __u32 keylen, 427 struct obd_export *exp, __u32 keylen,
420 void *key, __u32 *vallen, void *val, 428 void *key, __u32 *vallen, void *val)
421 struct lov_stripe_md *lsm)
422{ 429{
423 int rc; 430 int rc;
424 431
425 EXP_CHECK_DT_OP(exp, get_info); 432 EXP_CHECK_DT_OP(exp, get_info);
426 EXP_COUNTER_INCREMENT(exp, get_info); 433 EXP_COUNTER_INCREMENT(exp, get_info);
427 434
428 rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val, 435 rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val);
429 lsm);
430 return rc; 436 return rc;
431} 437}
432 438
@@ -505,8 +511,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
505 return rc; 511 return rc;
506} 512}
507 513
508static inline int obd_precleanup(struct obd_device *obd, 514static inline int obd_precleanup(struct obd_device *obd)
509 enum obd_cleanup_stage cleanup_stage)
510{ 515{
511 int rc; 516 int rc;
512 DECLARE_LU_VARS(ldt, d); 517 DECLARE_LU_VARS(ldt, d);
@@ -517,20 +522,18 @@ static inline int obd_precleanup(struct obd_device *obd,
517 ldt = obd->obd_type->typ_lu; 522 ldt = obd->obd_type->typ_lu;
518 d = obd->obd_lu_dev; 523 d = obd->obd_lu_dev;
519 if (ldt && d) { 524 if (ldt && d) {
520 if (cleanup_stage == OBD_CLEANUP_EXPORTS) { 525 struct lu_env env;
521 struct lu_env env;
522 526
523 rc = lu_env_init(&env, ldt->ldt_ctx_tags); 527 rc = lu_env_init(&env, ldt->ldt_ctx_tags);
524 if (rc == 0) { 528 if (!rc) {
525 ldt->ldt_ops->ldto_device_fini(&env, d); 529 ldt->ldt_ops->ldto_device_fini(&env, d);
526 lu_env_fini(&env); 530 lu_env_fini(&env);
527 }
528 } 531 }
529 } 532 }
530 OBD_CHECK_DT_OP(obd, precleanup, 0); 533 OBD_CHECK_DT_OP(obd, precleanup, 0);
531 OBD_COUNTER_INCREMENT(obd, precleanup); 534 OBD_COUNTER_INCREMENT(obd, precleanup);
532 535
533 rc = OBP(obd, precleanup)(obd, cleanup_stage); 536 rc = OBP(obd, precleanup)(obd);
534 return rc; 537 return rc;
535} 538}
536 539
@@ -612,181 +615,51 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
612 return rc; 615 return rc;
613} 616}
614 617
615/* Pack an in-memory MD struct for storage on disk.
616 * Returns +ve size of packed MD (0 for free), or -ve error.
617 *
618 * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
619 * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
620 * If @*disk_tgt == NULL, it will be allocated
621 */
622static inline int obd_packmd(struct obd_export *exp,
623 struct lov_mds_md **disk_tgt,
624 struct lov_stripe_md *mem_src)
625{
626 int rc;
627
628 EXP_CHECK_DT_OP(exp, packmd);
629 EXP_COUNTER_INCREMENT(exp, packmd);
630
631 rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
632 return rc;
633}
634
635static inline int obd_size_diskmd(struct obd_export *exp,
636 struct lov_stripe_md *mem_src)
637{
638 return obd_packmd(exp, NULL, mem_src);
639}
640
641static inline int obd_free_diskmd(struct obd_export *exp,
642 struct lov_mds_md **disk_tgt)
643{
644 LASSERT(disk_tgt);
645 LASSERT(*disk_tgt);
646 /*
647 * LU-2590, for caller's convenience, *disk_tgt could be host
648 * endianness, it needs swab to LE if necessary, while just
649 * lov_mds_md header needs it for figuring out how much memory
650 * needs to be freed.
651 */
652 if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
653 (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
654 ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
655 lustre_swab_lov_mds_md(*disk_tgt);
656 return obd_packmd(exp, disk_tgt, NULL);
657}
658
659/* Unpack an MD struct from disk to in-memory format.
660 * Returns +ve size of unpacked MD (0 for free), or -ve error.
661 *
662 * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
663 * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
664 * If @*mem_tgt == NULL, it will be allocated
665 */
666static inline int obd_unpackmd(struct obd_export *exp,
667 struct lov_stripe_md **mem_tgt,
668 struct lov_mds_md *disk_src,
669 int disk_len)
670{
671 int rc;
672
673 EXP_CHECK_DT_OP(exp, unpackmd);
674 EXP_COUNTER_INCREMENT(exp, unpackmd);
675
676 rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
677 return rc;
678}
679
680static inline int obd_free_memmd(struct obd_export *exp,
681 struct lov_stripe_md **mem_tgt)
682{
683 int rc;
684
685 LASSERT(mem_tgt);
686 LASSERT(*mem_tgt);
687 rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
688 *mem_tgt = NULL;
689 return rc;
690}
691
692static inline int obd_create(const struct lu_env *env, struct obd_export *exp, 618static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
693 struct obdo *obdo, struct obd_trans_info *oti) 619 struct obdo *obdo)
694{ 620{
695 int rc; 621 int rc;
696 622
697 EXP_CHECK_DT_OP(exp, create); 623 EXP_CHECK_DT_OP(exp, create);
698 EXP_COUNTER_INCREMENT(exp, create); 624 EXP_COUNTER_INCREMENT(exp, create);
699 625
700 rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti); 626 rc = OBP(exp->exp_obd, create)(env, exp, obdo);
701 return rc; 627 return rc;
702} 628}
703 629
704static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp, 630static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
705 struct obdo *obdo, struct obd_trans_info *oti) 631 struct obdo *obdo)
706{ 632{
707 int rc; 633 int rc;
708 634
709 EXP_CHECK_DT_OP(exp, destroy); 635 EXP_CHECK_DT_OP(exp, destroy);
710 EXP_COUNTER_INCREMENT(exp, destroy); 636 EXP_COUNTER_INCREMENT(exp, destroy);
711 637
712 rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti); 638 rc = OBP(exp->exp_obd, destroy)(env, exp, obdo);
713 return rc; 639 return rc;
714} 640}
715 641
716static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp, 642static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
717 struct obd_info *oinfo) 643 struct obdo *oa)
718{ 644{
719 int rc; 645 int rc;
720 646
721 EXP_CHECK_DT_OP(exp, getattr); 647 EXP_CHECK_DT_OP(exp, getattr);
722 EXP_COUNTER_INCREMENT(exp, getattr); 648 EXP_COUNTER_INCREMENT(exp, getattr);
723 649
724 rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo); 650 rc = OBP(exp->exp_obd, getattr)(env, exp, oa);
725 return rc;
726}
727
728static inline int obd_getattr_async(struct obd_export *exp,
729 struct obd_info *oinfo,
730 struct ptlrpc_request_set *set)
731{
732 int rc;
733
734 EXP_CHECK_DT_OP(exp, getattr_async);
735 EXP_COUNTER_INCREMENT(exp, getattr_async);
736
737 rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
738 return rc; 651 return rc;
739} 652}
740 653
741static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp, 654static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
742 struct obd_info *oinfo, 655 struct obdo *oa)
743 struct obd_trans_info *oti)
744{ 656{
745 int rc; 657 int rc;
746 658
747 EXP_CHECK_DT_OP(exp, setattr); 659 EXP_CHECK_DT_OP(exp, setattr);
748 EXP_COUNTER_INCREMENT(exp, setattr); 660 EXP_COUNTER_INCREMENT(exp, setattr);
749 661
750 rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti); 662 rc = OBP(exp->exp_obd, setattr)(env, exp, oa);
751 return rc;
752}
753
754/* This performs all the requests set init/wait/destroy actions. */
755static inline int obd_setattr_rqset(struct obd_export *exp,
756 struct obd_info *oinfo,
757 struct obd_trans_info *oti)
758{
759 struct ptlrpc_request_set *set = NULL;
760 int rc;
761
762 EXP_CHECK_DT_OP(exp, setattr_async);
763 EXP_COUNTER_INCREMENT(exp, setattr_async);
764
765 set = ptlrpc_prep_set();
766 if (!set)
767 return -ENOMEM;
768
769 rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
770 if (rc == 0)
771 rc = ptlrpc_set_wait(set);
772 ptlrpc_set_destroy(set);
773 return rc;
774}
775
776/* This adds all the requests into @set if @set != NULL, otherwise
777 * all requests are sent asynchronously without waiting for response.
778 */
779static inline int obd_setattr_async(struct obd_export *exp,
780 struct obd_info *oinfo,
781 struct obd_trans_info *oti,
782 struct ptlrpc_request_set *set)
783{
784 int rc;
785
786 EXP_CHECK_DT_OP(exp, setattr_async);
787 EXP_COUNTER_INCREMENT(exp, setattr_async);
788
789 rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
790 return rc; 663 return rc;
791} 664}
792 665
@@ -1053,15 +926,16 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
1053 __u32 flags) 926 __u32 flags)
1054{ 927{
1055 struct ptlrpc_request_set *set = NULL; 928 struct ptlrpc_request_set *set = NULL;
1056 struct obd_info oinfo = { }; 929 struct obd_info oinfo = {
930 .oi_osfs = osfs,
931 .oi_flags = flags,
932 };
1057 int rc = 0; 933 int rc = 0;
1058 934
1059 set = ptlrpc_prep_set(); 935 set = ptlrpc_prep_set();
1060 if (!set) 936 if (!set)
1061 return -ENOMEM; 937 return -ENOMEM;
1062 938
1063 oinfo.oi_osfs = osfs;
1064 oinfo.oi_flags = flags;
1065 rc = obd_statfs_async(exp, &oinfo, max_age, set); 939 rc = obd_statfs_async(exp, &oinfo, max_age, set);
1066 if (rc == 0) 940 if (rc == 0)
1067 rc = ptlrpc_set_wait(set); 941 rc = ptlrpc_set_wait(set);
@@ -1112,8 +986,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
1112 struct obd_export *exp, struct obdo *oa, 986 struct obd_export *exp, struct obdo *oa,
1113 int objcount, struct obd_ioobj *obj, 987 int objcount, struct obd_ioobj *obj,
1114 struct niobuf_remote *remote, int *pages, 988 struct niobuf_remote *remote, int *pages,
1115 struct niobuf_local *local, 989 struct niobuf_local *local)
1116 struct obd_trans_info *oti)
1117{ 990{
1118 int rc; 991 int rc;
1119 992
@@ -1121,7 +994,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
1121 EXP_COUNTER_INCREMENT(exp, preprw); 994 EXP_COUNTER_INCREMENT(exp, preprw);
1122 995
1123 rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote, 996 rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
1124 pages, local, oti); 997 pages, local);
1125 return rc; 998 return rc;
1126} 999}
1127 1000
@@ -1129,14 +1002,13 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
1129 struct obd_export *exp, struct obdo *oa, 1002 struct obd_export *exp, struct obdo *oa,
1130 int objcount, struct obd_ioobj *obj, 1003 int objcount, struct obd_ioobj *obj,
1131 struct niobuf_remote *rnb, int pages, 1004 struct niobuf_remote *rnb, int pages,
1132 struct niobuf_local *local, 1005 struct niobuf_local *local, int rc)
1133 struct obd_trans_info *oti, int rc)
1134{ 1006{
1135 EXP_CHECK_DT_OP(exp, commitrw); 1007 EXP_CHECK_DT_OP(exp, commitrw);
1136 EXP_COUNTER_INCREMENT(exp, commitrw); 1008 EXP_COUNTER_INCREMENT(exp, commitrw);
1137 1009
1138 rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj, 1010 rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
1139 rnb, pages, local, oti, rc); 1011 rnb, pages, local, rc);
1140 return rc; 1012 return rc;
1141} 1013}
1142 1014
@@ -1219,18 +1091,6 @@ static inline int obd_notify_observer(struct obd_device *observer,
1219 return rc1 ? rc1 : rc2; 1091 return rc1 ? rc1 : rc2;
1220} 1092}
1221 1093
1222static inline int obd_quotacheck(struct obd_export *exp,
1223 struct obd_quotactl *oqctl)
1224{
1225 int rc;
1226
1227 EXP_CHECK_DT_OP(exp, quotacheck);
1228 EXP_COUNTER_INCREMENT(exp, quotacheck);
1229
1230 rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
1231 return rc;
1232}
1233
1234static inline int obd_quotactl(struct obd_export *exp, 1094static inline int obd_quotactl(struct obd_export *exp,
1235 struct obd_quotactl *oqctl) 1095 struct obd_quotactl *oqctl)
1236{ 1096{
@@ -1346,21 +1206,9 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
1346 return rc; 1206 return rc;
1347} 1207}
1348 1208
1349static inline int md_done_writing(struct obd_export *exp,
1350 struct md_op_data *op_data,
1351 struct md_open_data *mod)
1352{
1353 int rc;
1354
1355 EXP_CHECK_MD_OP(exp, done_writing);
1356 EXP_MD_COUNTER_INCREMENT(exp, done_writing);
1357 rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
1358 return rc;
1359}
1360
1361static inline int md_enqueue(struct obd_export *exp, 1209static inline int md_enqueue(struct obd_export *exp,
1362 struct ldlm_enqueue_info *einfo, 1210 struct ldlm_enqueue_info *einfo,
1363 const ldlm_policy_data_t *policy, 1211 const union ldlm_policy_data *policy,
1364 struct lookup_intent *it, 1212 struct lookup_intent *it,
1365 struct md_op_data *op_data, 1213 struct md_op_data *op_data,
1366 struct lustre_handle *lockh, 1214 struct lustre_handle *lockh,
@@ -1428,16 +1276,14 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
1428} 1276}
1429 1277
1430static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data, 1278static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
1431 void *ea, size_t ealen, void *ea2, size_t ea2len, 1279 void *ea, size_t ealen,
1432 struct ptlrpc_request **request, 1280 struct ptlrpc_request **request)
1433 struct md_open_data **mod)
1434{ 1281{
1435 int rc; 1282 int rc;
1436 1283
1437 EXP_CHECK_MD_OP(exp, setattr); 1284 EXP_CHECK_MD_OP(exp, setattr);
1438 EXP_MD_COUNTER_INCREMENT(exp, setattr); 1285 EXP_MD_COUNTER_INCREMENT(exp, setattr);
1439 rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, 1286 rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request);
1440 ea2, ea2len, request, mod);
1441 return rc; 1287 return rc;
1442} 1288}
1443 1289
@@ -1561,7 +1407,7 @@ static inline int md_set_lock_data(struct obd_export *exp,
1561 1407
1562static inline int md_cancel_unused(struct obd_export *exp, 1408static inline int md_cancel_unused(struct obd_export *exp,
1563 const struct lu_fid *fid, 1409 const struct lu_fid *fid,
1564 ldlm_policy_data_t *policy, 1410 union ldlm_policy_data *policy,
1565 enum ldlm_mode mode, 1411 enum ldlm_mode mode,
1566 enum ldlm_cancel_flags flags, 1412 enum ldlm_cancel_flags flags,
1567 void *opaque) 1413 void *opaque)
@@ -1579,7 +1425,7 @@ static inline int md_cancel_unused(struct obd_export *exp,
1579static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, 1425static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
1580 const struct lu_fid *fid, 1426 const struct lu_fid *fid,
1581 enum ldlm_type type, 1427 enum ldlm_type type,
1582 ldlm_policy_data_t *policy, 1428 union ldlm_policy_data *policy,
1583 enum ldlm_mode mode, 1429 enum ldlm_mode mode,
1584 struct lustre_handle *lockh) 1430 struct lustre_handle *lockh)
1585{ 1431{
@@ -1589,14 +1435,12 @@ static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
1589 policy, mode, lockh); 1435 policy, mode, lockh);
1590} 1436}
1591 1437
1592static inline int md_init_ea_size(struct obd_export *exp, int easize, 1438static inline int md_init_ea_size(struct obd_export *exp, u32 easize,
1593 int def_asize, int cookiesize, 1439 u32 def_asize)
1594 int def_cookiesize)
1595{ 1440{
1596 EXP_CHECK_MD_OP(exp, init_ea_size); 1441 EXP_CHECK_MD_OP(exp, init_ea_size);
1597 EXP_MD_COUNTER_INCREMENT(exp, init_ea_size); 1442 EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
1598 return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize, 1443 return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize);
1599 cookiesize, def_cookiesize);
1600} 1444}
1601 1445
1602static inline int md_intent_getattr_async(struct obd_export *exp, 1446static inline int md_intent_getattr_async(struct obd_export *exp,
@@ -1636,6 +1480,24 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp,
1636 return rc; 1480 return rc;
1637} 1481}
1638 1482
1483/* Unpack an MD struct from disk to in-memory format.
1484 * Returns +ve size of unpacked MD (0 for free), or -ve error.
1485 *
1486 * If *plsm != NULL and lmm == NULL then *lsm will be freed.
1487 * If *plsm == NULL then it will be allocated.
1488 */
1489static inline int md_unpackmd(struct obd_export *exp,
1490 struct lmv_stripe_md **plsm,
1491 const union lmv_mds_md *lmm, size_t lmm_size)
1492{
1493 int rc;
1494
1495 EXP_CHECK_MD_OP(exp, unpackmd);
1496 EXP_MD_COUNTER_INCREMENT(exp, unpackmd);
1497 rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size);
1498 return rc;
1499}
1500
1639/* OBD Metadata Support */ 1501/* OBD Metadata Support */
1640 1502
1641int obd_init_caches(void); 1503int obd_init_caches(void);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index b346a7f10aa4..aaedec7d793c 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -172,14 +172,14 @@ extern char obd_jobid_var[];
172#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123 172#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
173#define OBD_FAIL_MDS_SYNC_NET 0x124 173#define OBD_FAIL_MDS_SYNC_NET 0x124
174#define OBD_FAIL_MDS_SYNC_PACK 0x125 174#define OBD_FAIL_MDS_SYNC_PACK 0x125
175#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126 175/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */
176#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 176/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */
177#define OBD_FAIL_MDS_ALLOC_OBDO 0x128 177#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
178#define OBD_FAIL_MDS_PAUSE_OPEN 0x129 178#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
179#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a 179#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
180#define OBD_FAIL_MDS_OPEN_CREATE 0x12b 180#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
181#define OBD_FAIL_MDS_OST_SETATTR 0x12c 181#define OBD_FAIL_MDS_OST_SETATTR 0x12c
182#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d 182/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */
183#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e 183#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
184#define OBD_FAIL_MDS_CLIENT_ADD 0x12f 184#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
185#define OBD_FAIL_MDS_GETXATTR_NET 0x130 185#define OBD_FAIL_MDS_GETXATTR_NET 0x130
@@ -264,7 +264,7 @@ extern char obd_jobid_var[];
264#define OBD_FAIL_OST_ENOSPC 0x215 264#define OBD_FAIL_OST_ENOSPC 0x215
265#define OBD_FAIL_OST_EROFS 0x216 265#define OBD_FAIL_OST_EROFS 0x216
266#define OBD_FAIL_OST_ENOENT 0x217 266#define OBD_FAIL_OST_ENOENT 0x217
267#define OBD_FAIL_OST_QUOTACHECK_NET 0x218 267/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */
268#define OBD_FAIL_OST_QUOTACTL_NET 0x219 268#define OBD_FAIL_OST_QUOTACTL_NET 0x219
269#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a 269#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
270#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b 270#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
@@ -321,6 +321,8 @@ extern char obd_jobid_var[];
321#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322 321#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
322#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323 322#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
323 323
324#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
325
324/* LOCKLESS IO */ 326/* LOCKLESS IO */
325#define OBD_FAIL_LDLM_SET_CONTENTION 0x385 327#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
326 328
@@ -343,6 +345,7 @@ extern char obd_jobid_var[];
343#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410 345#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
344#define OBD_FAIL_OSC_NO_GRANT 0x411 346#define OBD_FAIL_OSC_NO_GRANT 0x411
345#define OBD_FAIL_OSC_DELAY_SETTIME 0x412 347#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
348#define OBD_FAIL_OSC_DELAY_IO 0x414
346 349
347#define OBD_FAIL_PTLRPC 0x500 350#define OBD_FAIL_PTLRPC 0x500
348#define OBD_FAIL_PTLRPC_ACK 0x501 351#define OBD_FAIL_PTLRPC_ACK 0x501
@@ -373,7 +376,7 @@ extern char obd_jobid_var[];
373#define OBD_FAIL_OBD_PING_NET 0x600 376#define OBD_FAIL_OBD_PING_NET 0x600
374#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601 377#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
375#define OBD_FAIL_OBD_LOGD_NET 0x602 378#define OBD_FAIL_OBD_LOGD_NET 0x602
376#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 379/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */
377#define OBD_FAIL_OBD_DQACQ 0x604 380#define OBD_FAIL_OBD_DQACQ 0x604
378#define OBD_FAIL_OBD_LLOG_SETUP 0x605 381#define OBD_FAIL_OBD_LLOG_SETUP 0x605
379#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606 382#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
@@ -458,6 +461,8 @@ extern char obd_jobid_var[];
458#define OBD_FAIL_LOV_INIT 0x1403 461#define OBD_FAIL_LOV_INIT 0x1403
459#define OBD_FAIL_GLIMPSE_DELAY 0x1404 462#define OBD_FAIL_GLIMPSE_DELAY 0x1404
460#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405 463#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
464#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406
465#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407
461#define OBD_FAIL_GETATTR_DELAY 0x1409 466#define OBD_FAIL_GETATTR_DELAY 0x1409
462 467
463#define OBD_FAIL_FID_INDIR 0x1501 468#define OBD_FAIL_FID_INDIR 0x1501
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
new file mode 100644
index 000000000000..30c4dd66d5c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -0,0 +1,199 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2014, Intel Corporation.
27 *
28 * Copyright 2015 Cray Inc, all rights reserved.
29 * Author: Ben Evans.
30 *
31 * Define lu_seq_range associated functions
32 */
33
34#ifndef _SEQ_RANGE_H_
35#define _SEQ_RANGE_H_
36
37#include "lustre/lustre_idl.h"
38
39/**
40 * computes the sequence range type \a range
41 */
42
43static inline unsigned int fld_range_type(const struct lu_seq_range *range)
44{
45 return range->lsr_flags & LU_SEQ_RANGE_MASK;
46}
47
48/**
49 * Is this sequence range an OST? \a range
50 */
51
52static inline bool fld_range_is_ost(const struct lu_seq_range *range)
53{
54 return fld_range_type(range) == LU_SEQ_RANGE_OST;
55}
56
57/**
58 * Is this sequence range an MDT? \a range
59 */
60
61static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
62{
63 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
64}
65
66/**
67 * ANY range is only used when the fld client sends a fld query request,
68 * but it does not know whether the seq is an MDT or OST, so it will send the
69 * request with ANY type, which means any seq type from the lookup can be
70 * expected. /a range
71 */
72static inline unsigned int fld_range_is_any(const struct lu_seq_range *range)
73{
74 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
75}
76
77/**
78 * Apply flags to range \a range \a flags
79 */
80
81static inline void fld_range_set_type(struct lu_seq_range *range,
82 unsigned int flags)
83{
84 range->lsr_flags |= flags;
85}
86
87/**
88 * Add MDT to range type \a range
89 */
90
91static inline void fld_range_set_mdt(struct lu_seq_range *range)
92{
93 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
94}
95
96/**
97 * Add OST to range type \a range
98 */
99
100static inline void fld_range_set_ost(struct lu_seq_range *range)
101{
102 fld_range_set_type(range, LU_SEQ_RANGE_OST);
103}
104
105/**
106 * Add ANY to range type \a range
107 */
108
109static inline void fld_range_set_any(struct lu_seq_range *range)
110{
111 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
112}
113
114/**
115 * computes width of given sequence range \a range
116 */
117
118static inline u64 lu_seq_range_space(const struct lu_seq_range *range)
119{
120 return range->lsr_end - range->lsr_start;
121}
122
123/**
124 * initialize range to zero \a range
125 */
126
127static inline void lu_seq_range_init(struct lu_seq_range *range)
128{
129 memset(range, 0, sizeof(*range));
130}
131
132/**
133 * check if given seq id \a s is within given range \a range
134 */
135
136static inline bool lu_seq_range_within(const struct lu_seq_range *range,
137 u64 seq)
138{
139 return seq >= range->lsr_start && seq < range->lsr_end;
140}
141
142/**
143 * Is the range sane? Is the end after the beginning? \a range
144 */
145
146static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range)
147{
148 return range->lsr_end >= range->lsr_start;
149}
150
151/**
152 * Is the range 0? \a range
153 */
154
155static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range)
156{
157 return range->lsr_start == 0 && range->lsr_end == 0;
158}
159
160/**
161 * Is the range out of space? \a range
162 */
163
164static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range)
165{
166 return lu_seq_range_space(range) == 0;
167}
168
169/**
170 * return 0 if two ranges have the same location, nonzero if they are
171 * different \a r1 \a r2
172 */
173
174static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1,
175 const struct lu_seq_range *r2)
176{
177 return r1->lsr_index != r2->lsr_index ||
178 r1->lsr_flags != r2->lsr_flags;
179}
180
181#if !defined(__REQ_LAYOUT_USER__)
182/**
183 * byte swap range structure \a range
184 */
185
186void lustre_swab_lu_seq_range(struct lu_seq_range *range);
187#endif
188/**
189 * printf string and argument list for sequence range
190 */
191#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s"
192
193#define PRANGE(range) \
194 (range)->lsr_start, \
195 (range)->lsr_end, \
196 (range)->lsr_index, \
197 fld_range_is_mdt(range) ? "mdt" : "ost"
198
199#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index ecf472e4813d..32b73ee62639 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -193,6 +193,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
193 * add the locks into grant list, for debug purpose, .. 193 * add the locks into grant list, for debug purpose, ..
194 */ 194 */
195 ldlm_resource_add_lock(res, &res->lr_granted, lock); 195 ldlm_resource_add_lock(res, &res->lr_granted, lock);
196
197 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
198 struct ldlm_lock *lck;
199
200 list_for_each_entry_reverse(lck, &res->lr_granted,
201 l_res_link) {
202 if (lck == lock)
203 continue;
204 if (lockmode_compat(lck->l_granted_mode,
205 lock->l_granted_mode))
206 continue;
207 if (ldlm_extent_overlap(&lck->l_req_extent,
208 &lock->l_req_extent)) {
209 CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
210 lck, lock);
211 ldlm_resource_dump(D_ERROR, res);
212 LBUG();
213 }
214 }
215 }
196} 216}
197 217
198/** Remove cancelled lock from resource interval tree. */ 218/** Remove cancelled lock from resource interval tree. */
@@ -220,8 +240,8 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
220 } 240 }
221} 241}
222 242
223void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 243void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
224 ldlm_policy_data_t *lpolicy) 244 union ldlm_policy_data *lpolicy)
225{ 245{
226 memset(lpolicy, 0, sizeof(*lpolicy)); 246 memset(lpolicy, 0, sizeof(*lpolicy));
227 lpolicy->l_extent.start = wpolicy->l_extent.start; 247 lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -229,8 +249,8 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
229 lpolicy->l_extent.gid = wpolicy->l_extent.gid; 249 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
230} 250}
231 251
232void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 252void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
233 ldlm_wire_policy_data_t *wpolicy) 253 union ldlm_wire_policy_data *wpolicy)
234{ 254{
235 memset(wpolicy, 0, sizeof(*wpolicy)); 255 memset(wpolicy, 0, sizeof(*wpolicy));
236 wpolicy->l_extent.start = lpolicy->l_extent.start; 256 wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 861f36f039b5..722160784f83 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -612,22 +612,8 @@ granted:
612} 612}
613EXPORT_SYMBOL(ldlm_flock_completion_ast); 613EXPORT_SYMBOL(ldlm_flock_completion_ast);
614 614
615void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, 615void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
616 ldlm_policy_data_t *lpolicy) 616 union ldlm_policy_data *lpolicy)
617{
618 memset(lpolicy, 0, sizeof(*lpolicy));
619 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
620 lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
621 lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
622 /* Compat code, old clients had no idea about owner field and
623 * relied solely on pid for ownership. Introduced in LU-104, 2.1,
624 * April 2011
625 */
626 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
627}
628
629void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
630 ldlm_policy_data_t *lpolicy)
631{ 617{
632 memset(lpolicy, 0, sizeof(*lpolicy)); 618 memset(lpolicy, 0, sizeof(*lpolicy));
633 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start; 619 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
@@ -636,8 +622,8 @@ void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
636 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner; 622 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
637} 623}
638 624
639void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 625void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
640 ldlm_wire_policy_data_t *wpolicy) 626 union ldlm_wire_policy_data *wpolicy)
641{ 627{
642 memset(wpolicy, 0, sizeof(*wpolicy)); 628 memset(wpolicy, 0, sizeof(*wpolicy));
643 wpolicy->l_flock.lfw_start = lpolicy->l_flock.start; 629 wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index 79f4e6fa193e..8e1709dc073c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -54,15 +54,15 @@
54#include "../include/lustre_lib.h" 54#include "../include/lustre_lib.h"
55#include "ldlm_internal.h" 55#include "ldlm_internal.h"
56 56
57void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 57void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
58 ldlm_policy_data_t *lpolicy) 58 union ldlm_policy_data *lpolicy)
59{ 59{
60 memset(lpolicy, 0, sizeof(*lpolicy)); 60 memset(lpolicy, 0, sizeof(*lpolicy));
61 lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits; 61 lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
62} 62}
63 63
64void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 64void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
65 ldlm_wire_policy_data_t *wpolicy) 65 union ldlm_wire_policy_data *wpolicy)
66{ 66{
67 memset(wpolicy, 0, sizeof(*wpolicy)); 67 memset(wpolicy, 0, sizeof(*wpolicy));
68 wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits; 68 wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5e82cfc245b2..5c02501d0560 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -39,13 +39,13 @@ extern struct list_head ldlm_srv_namespace_list;
39extern struct mutex ldlm_cli_namespace_lock; 39extern struct mutex ldlm_cli_namespace_lock;
40extern struct list_head ldlm_cli_active_namespace_list; 40extern struct list_head ldlm_cli_active_namespace_list;
41 41
42static inline int ldlm_namespace_nr_read(ldlm_side_t client) 42static inline int ldlm_namespace_nr_read(enum ldlm_side client)
43{ 43{
44 return client == LDLM_NAMESPACE_SERVER ? 44 return client == LDLM_NAMESPACE_SERVER ?
45 ldlm_srv_namespace_nr : ldlm_cli_namespace_nr; 45 ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
46} 46}
47 47
48static inline void ldlm_namespace_nr_inc(ldlm_side_t client) 48static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
49{ 49{
50 if (client == LDLM_NAMESPACE_SERVER) 50 if (client == LDLM_NAMESPACE_SERVER)
51 ldlm_srv_namespace_nr++; 51 ldlm_srv_namespace_nr++;
@@ -53,7 +53,7 @@ static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
53 ldlm_cli_namespace_nr++; 53 ldlm_cli_namespace_nr++;
54} 54}
55 55
56static inline void ldlm_namespace_nr_dec(ldlm_side_t client) 56static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
57{ 57{
58 if (client == LDLM_NAMESPACE_SERVER) 58 if (client == LDLM_NAMESPACE_SERVER)
59 ldlm_srv_namespace_nr--; 59 ldlm_srv_namespace_nr--;
@@ -61,13 +61,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
61 ldlm_cli_namespace_nr--; 61 ldlm_cli_namespace_nr--;
62} 62}
63 63
64static inline struct list_head *ldlm_namespace_list(ldlm_side_t client) 64static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
65{ 65{
66 return client == LDLM_NAMESPACE_SERVER ? 66 return client == LDLM_NAMESPACE_SERVER ?
67 &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list; 67 &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
68} 68}
69 69
70static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client) 70static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
71{ 71{
72 return client == LDLM_NAMESPACE_SERVER ? 72 return client == LDLM_NAMESPACE_SERVER ?
73 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; 73 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -79,22 +79,23 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
79 return atomic_read(&ns->ns_bref) == 0; 79 return atomic_read(&ns->ns_bref) == 0;
80} 80}
81 81
82void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t); 82void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
83 enum ldlm_side);
83void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, 84void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
84 ldlm_side_t); 85 enum ldlm_side);
85struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t); 86struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
86 87
87/* ldlm_request.c */ 88/* ldlm_request.c */
88/* Cancel lru flag, it indicates we cancel aged locks. */ 89/* Cancel lru flag, it indicates we cancel aged locks. */
89enum { 90enum {
90 LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */ 91 LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
91 LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */ 92 LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
92 LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ 93 LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
93 LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ 94 LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
94 LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither 95 LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
95 * sending nor waiting for any rpcs) 96 * sending nor waiting for any rpcs)
96 */ 97 */
97 LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */ 98 LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
98}; 99};
99 100
100int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, 101int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -137,10 +138,10 @@ ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
137 void *data, __u32 lvb_len, enum lvb_type lvb_type); 138 void *data, __u32 lvb_len, enum lvb_type lvb_type);
138enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, 139enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
139 void *cookie, __u64 *flags); 140 void *cookie, __u64 *flags);
140void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); 141void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
141void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); 142void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
142void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); 143void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
143void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode); 144void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
144int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, 145int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
145 enum ldlm_desc_ast_t ast_type); 146 enum ldlm_desc_ast_t ast_type);
146int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use); 147int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
@@ -311,28 +312,25 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
311 return ret; 312 return ret;
312} 313}
313 314
314typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *, 315typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
315 ldlm_policy_data_t *); 316 union ldlm_policy_data *);
316 317
317typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *, 318typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
318 ldlm_wire_policy_data_t *); 319 union ldlm_wire_policy_data *);
319 320
320void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 321void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
321 ldlm_policy_data_t *lpolicy); 322 union ldlm_policy_data *lpolicy);
322void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 323void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
323 ldlm_wire_policy_data_t *wpolicy); 324 union ldlm_wire_policy_data *wpolicy);
324void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 325void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
325 ldlm_policy_data_t *lpolicy); 326 union ldlm_policy_data *lpolicy);
326void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 327void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
327 ldlm_wire_policy_data_t *wpolicy); 328 union ldlm_wire_policy_data *wpolicy);
328void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 329void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
329 ldlm_policy_data_t *lpolicy); 330 union ldlm_policy_data *lpolicy);
330void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 331void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
331 ldlm_wire_policy_data_t *wpolicy); 332 union ldlm_wire_policy_data *wpolicy);
332void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, 333void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
333 ldlm_policy_data_t *lpolicy); 334 union ldlm_policy_data *lpolicy);
334void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy, 335void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
335 ldlm_policy_data_t *lpolicy); 336 union ldlm_wire_policy_data *wpolicy);
336
337void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
338 ldlm_wire_policy_data_t *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 153e990c494e..9be01426c955 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -170,6 +170,9 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
170 ptlrpc_connection_put(dlmexp->exp_connection); 170 ptlrpc_connection_put(dlmexp->exp_connection);
171 dlmexp->exp_connection = NULL; 171 dlmexp->exp_connection = NULL;
172 } 172 }
173
174 if (dlmexp)
175 class_export_put(dlmexp);
173 } 176 }
174 177
175 list_del(&imp_conn->oic_item); 178 list_del(&imp_conn->oic_item);
@@ -372,6 +375,25 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
372 } else { 375 } else {
373 cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; 376 cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
374 } 377 }
378
379 spin_lock_init(&cli->cl_mod_rpcs_lock);
380 spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
381 cli->cl_max_mod_rpcs_in_flight = 0;
382 cli->cl_mod_rpcs_in_flight = 0;
383 cli->cl_close_rpcs_in_flight = 0;
384 init_waitqueue_head(&cli->cl_mod_rpcs_waitq);
385 cli->cl_mod_tag_bitmap = NULL;
386
387 if (connect_op == MDS_CONNECT) {
388 cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1;
389 cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX),
390 sizeof(long), GFP_NOFS);
391 if (!cli->cl_mod_tag_bitmap) {
392 rc = -ENOMEM;
393 goto err;
394 }
395 }
396
375 rc = ldlm_get_ref(); 397 rc = ldlm_get_ref();
376 if (rc) { 398 if (rc) {
377 CERROR("ldlm_get_ref failed: %d\n", rc); 399 CERROR("ldlm_get_ref failed: %d\n", rc);
@@ -399,9 +421,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
399 } 421 }
400 422
401 cli->cl_import = imp; 423 cli->cl_import = imp;
402 /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */ 424 /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */
403 cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3); 425 cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
404 cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
405 426
406 if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) { 427 if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
407 if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) { 428 if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
@@ -425,8 +446,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
425 goto err_import; 446 goto err_import;
426 } 447 }
427 448
428 cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
429
430 return rc; 449 return rc;
431 450
432err_import: 451err_import:
@@ -434,12 +453,16 @@ err_import:
434err_ldlm: 453err_ldlm:
435 ldlm_put_ref(); 454 ldlm_put_ref();
436err: 455err:
456 kfree(cli->cl_mod_tag_bitmap);
457 cli->cl_mod_tag_bitmap = NULL;
437 return rc; 458 return rc;
438} 459}
439EXPORT_SYMBOL(client_obd_setup); 460EXPORT_SYMBOL(client_obd_setup);
440 461
441int client_obd_cleanup(struct obd_device *obddev) 462int client_obd_cleanup(struct obd_device *obddev)
442{ 463{
464 struct client_obd *cli = &obddev->u.cli;
465
443 ldlm_namespace_free_post(obddev->obd_namespace); 466 ldlm_namespace_free_post(obddev->obd_namespace);
444 obddev->obd_namespace = NULL; 467 obddev->obd_namespace = NULL;
445 468
@@ -447,6 +470,10 @@ int client_obd_cleanup(struct obd_device *obddev)
447 LASSERT(!obddev->u.cli.cl_import); 470 LASSERT(!obddev->u.cli.cl_import);
448 471
449 ldlm_put_ref(); 472 ldlm_put_ref();
473
474 kfree(cli->cl_mod_tag_bitmap);
475 cli->cl_mod_tag_bitmap = NULL;
476
450 return 0; 477 return 0;
451} 478}
452EXPORT_SYMBOL(client_obd_cleanup); 479EXPORT_SYMBOL(client_obd_cleanup);
@@ -461,6 +488,7 @@ int client_connect_import(const struct lu_env *env,
461 struct obd_import *imp = cli->cl_import; 488 struct obd_import *imp = cli->cl_import;
462 struct obd_connect_data *ocd; 489 struct obd_connect_data *ocd;
463 struct lustre_handle conn = { 0 }; 490 struct lustre_handle conn = { 0 };
491 bool is_mdc = false;
464 int rc; 492 int rc;
465 493
466 *exp = NULL; 494 *exp = NULL;
@@ -487,6 +515,10 @@ int client_connect_import(const struct lu_env *env,
487 ocd = &imp->imp_connect_data; 515 ocd = &imp->imp_connect_data;
488 if (data) { 516 if (data) {
489 *ocd = *data; 517 *ocd = *data;
518 is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name,
519 LUSTRE_MDC_NAME, 3);
520 if (is_mdc)
521 data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
490 imp->imp_connect_flags_orig = data->ocd_connect_flags; 522 imp->imp_connect_flags_orig = data->ocd_connect_flags;
491 } 523 }
492 524
@@ -502,6 +534,11 @@ int client_connect_import(const struct lu_env *env,
502 ocd->ocd_connect_flags, "old %#llx, new %#llx\n", 534 ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
503 data->ocd_connect_flags, ocd->ocd_connect_flags); 535 data->ocd_connect_flags, ocd->ocd_connect_flags);
504 data->ocd_connect_flags = ocd->ocd_connect_flags; 536 data->ocd_connect_flags = ocd->ocd_connect_flags;
537 /* clear the flag as it was not set and is not known
538 * by upper layers
539 */
540 if (is_mdc)
541 data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
505 } 542 }
506 543
507 ptlrpc_pinger_add_import(imp); 544 ptlrpc_pinger_add_import(imp);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4fb96f1..a4a291acb659 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -39,6 +39,7 @@
39 39
40#include "../../include/linux/libcfs/libcfs.h" 40#include "../../include/linux/libcfs/libcfs.h"
41#include "../include/lustre_intent.h" 41#include "../include/lustre_intent.h"
42#include "../include/lustre_swab.h"
42#include "../include/obd_class.h" 43#include "../include/obd_class.h"
43#include "ldlm_internal.h" 44#include "ldlm_internal.h"
44 45
@@ -63,17 +64,10 @@ static char *ldlm_typename[] = {
63 [LDLM_IBITS] = "IBT", 64 [LDLM_IBITS] = "IBT",
64}; 65};
65 66
66static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = { 67static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
67 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local, 68 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
68 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local, 69 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
69 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local, 70 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
70 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
71};
72
73static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
74 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
75 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
76 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
77 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local, 71 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
78}; 72};
79 73
@@ -88,8 +82,8 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
88 * Converts lock policy from local format to on the wire lock_desc format 82 * Converts lock policy from local format to on the wire lock_desc format
89 */ 83 */
90static void ldlm_convert_policy_to_wire(enum ldlm_type type, 84static void ldlm_convert_policy_to_wire(enum ldlm_type type,
91 const ldlm_policy_data_t *lpolicy, 85 const union ldlm_policy_data *lpolicy,
92 ldlm_wire_policy_data_t *wpolicy) 86 union ldlm_wire_policy_data *wpolicy)
93{ 87{
94 ldlm_policy_local_to_wire_t convert; 88 ldlm_policy_local_to_wire_t convert;
95 89
@@ -102,23 +96,17 @@ static void ldlm_convert_policy_to_wire(enum ldlm_type type,
102 * Converts lock policy from on the wire lock_desc format to local format 96 * Converts lock policy from on the wire lock_desc format to local format
103 */ 97 */
104void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, 98void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
105 const ldlm_wire_policy_data_t *wpolicy, 99 const union ldlm_wire_policy_data *wpolicy,
106 ldlm_policy_data_t *lpolicy) 100 union ldlm_policy_data *lpolicy)
107{ 101{
108 ldlm_policy_wire_to_local_t convert; 102 ldlm_policy_wire_to_local_t convert;
109 int new_client;
110 103
111 /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */ 104 convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
112 new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
113 if (new_client)
114 convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
115 else
116 convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
117 105
118 convert(wpolicy, lpolicy); 106 convert(wpolicy, lpolicy);
119} 107}
120 108
121char *ldlm_it2str(int it) 109const char *ldlm_it2str(enum ldlm_intent_flags it)
122{ 110{
123 switch (it) { 111 switch (it) {
124 case IT_OPEN: 112 case IT_OPEN:
@@ -140,7 +128,7 @@ char *ldlm_it2str(int it)
140 case IT_LAYOUT: 128 case IT_LAYOUT:
141 return "layout"; 129 return "layout";
142 default: 130 default:
143 CERROR("Unknown intent %d\n", it); 131 CERROR("Unknown intent 0x%08x\n", it);
144 return "UNKNOWN"; 132 return "UNKNOWN";
145 } 133 }
146} 134}
@@ -512,7 +500,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
512 500
513 return 0; 501 return 0;
514} 502}
515EXPORT_SYMBOL(ldlm_lock_change_resource);
516 503
517/** \defgroup ldlm_handles LDLM HANDLES 504/** \defgroup ldlm_handles LDLM HANDLES
518 * Ways to get hold of locks without any addresses. 505 * Ways to get hold of locks without any addresses.
@@ -595,7 +582,6 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
595 &lock->l_policy_data, 582 &lock->l_policy_data,
596 &desc->l_policy_data); 583 &desc->l_policy_data);
597} 584}
598EXPORT_SYMBOL(ldlm_lock2desc);
599 585
600/** 586/**
601 * Add a lock to list of conflicting locks to send AST to. 587 * Add a lock to list of conflicting locks to send AST to.
@@ -658,7 +644,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
658 * r/w reference type is determined by \a mode 644 * r/w reference type is determined by \a mode
659 * Calls ldlm_lock_addref_internal. 645 * Calls ldlm_lock_addref_internal.
660 */ 646 */
661void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode) 647void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
662{ 648{
663 struct ldlm_lock *lock; 649 struct ldlm_lock *lock;
664 650
@@ -676,7 +662,8 @@ EXPORT_SYMBOL(ldlm_lock_addref);
676 * Removes lock from LRU if it is there. 662 * Removes lock from LRU if it is there.
677 * Assumes the LDLM lock is already locked. 663 * Assumes the LDLM lock is already locked.
678 */ 664 */
679void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) 665void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
666 enum ldlm_mode mode)
680{ 667{
681 ldlm_lock_remove_from_lru(lock); 668 ldlm_lock_remove_from_lru(lock);
682 if (mode & (LCK_NL | LCK_CR | LCK_PR)) { 669 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -700,7 +687,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
700 * 687 *
701 * \retval -EAGAIN lock is being canceled. 688 * \retval -EAGAIN lock is being canceled.
702 */ 689 */
703int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode) 690int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
704{ 691{
705 struct ldlm_lock *lock; 692 struct ldlm_lock *lock;
706 int result; 693 int result;
@@ -726,7 +713,7 @@ EXPORT_SYMBOL(ldlm_lock_addref_try);
726 * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work. 713 * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
727 * Only called for local locks. 714 * Only called for local locks.
728 */ 715 */
729void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) 716void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
730{ 717{
731 lock_res_and_lock(lock); 718 lock_res_and_lock(lock);
732 ldlm_lock_addref_internal_nolock(lock, mode); 719 ldlm_lock_addref_internal_nolock(lock, mode);
@@ -740,7 +727,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
740 * Does NOT add lock to LRU if no r/w references left to accommodate flock locks 727 * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
741 * that cannot be placed in LRU. 728 * that cannot be placed in LRU.
742 */ 729 */
743void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) 730void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
731 enum ldlm_mode mode)
744{ 732{
745 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); 733 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
746 if (mode & (LCK_NL | LCK_CR | LCK_PR)) { 734 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -766,7 +754,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
766 * on the namespace. 754 * on the namespace.
767 * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called. 755 * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
768 */ 756 */
769void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) 757void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
770{ 758{
771 struct ldlm_namespace *ns; 759 struct ldlm_namespace *ns;
772 760
@@ -786,11 +774,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
786 } 774 }
787 775
788 if (!lock->l_readers && !lock->l_writers && 776 if (!lock->l_readers && !lock->l_writers &&
789 ldlm_is_cbpending(lock)) { 777 (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
790 /* If we received a blocked AST and this was the last reference, 778 /* If we received a blocked AST and this was the last reference,
791 * run the callback. 779 * run the callback.
780 * Group locks are special:
781 * They must not go in LRU, but they are not called back
782 * like non-group locks, instead they are manually released.
783 * They have an l_writers reference which they keep until
784 * they are manually released, so we remove them when they have
785 * no more reader or writer references. - LU-6368
792 */ 786 */
793
794 LDLM_DEBUG(lock, "final decref done on cbpending lock"); 787 LDLM_DEBUG(lock, "final decref done on cbpending lock");
795 788
796 LDLM_LOCK_GET(lock); /* dropped by bl thread */ 789 LDLM_LOCK_GET(lock); /* dropped by bl thread */
@@ -832,7 +825,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
832/** 825/**
833 * Decrease reader/writer refcount for LDLM lock with handle \a lockh 826 * Decrease reader/writer refcount for LDLM lock with handle \a lockh
834 */ 827 */
835void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode) 828void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
836{ 829{
837 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); 830 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
838 831
@@ -846,10 +839,9 @@ EXPORT_SYMBOL(ldlm_lock_decref);
846 * Decrease reader/writer refcount for LDLM lock with handle 839 * Decrease reader/writer refcount for LDLM lock with handle
847 * \a lockh and mark it for subsequent cancellation once r/w refcount 840 * \a lockh and mark it for subsequent cancellation once r/w refcount
848 * drops to zero instead of putting into LRU. 841 * drops to zero instead of putting into LRU.
849 *
850 * Typical usage is for GROUP locks which we cannot allow to be cached.
851 */ 842 */
852void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode) 843void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
844 enum ldlm_mode mode)
853{ 845{
854 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); 846 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
855 847
@@ -1055,88 +1047,173 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1055} 1047}
1056 1048
1057/** 1049/**
1058 * Search for a lock with given properties in a queue. 1050 * Describe the overlap between two locks. itree_overlap_cb data.
1051 */
1052struct lock_match_data {
1053 struct ldlm_lock *lmd_old;
1054 struct ldlm_lock *lmd_lock;
1055 enum ldlm_mode *lmd_mode;
1056 union ldlm_policy_data *lmd_policy;
1057 __u64 lmd_flags;
1058 int lmd_unref;
1059};
1060
1061/**
1062 * Check if the given @lock meets the criteria for a match.
1063 * A reference on the lock is taken if matched.
1059 * 1064 *
1060 * \retval a referenced lock or NULL. See the flag descriptions below, in the 1065 * \param lock test-against this lock
1061 * comment above ldlm_lock_match 1066 * \param data parameters
1062 */ 1067 */
1063static struct ldlm_lock *search_queue(struct list_head *queue, 1068static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
1064 enum ldlm_mode *mode,
1065 ldlm_policy_data_t *policy,
1066 struct ldlm_lock *old_lock,
1067 __u64 flags, int unref)
1068{ 1069{
1069 struct ldlm_lock *lock; 1070 union ldlm_policy_data *lpol = &lock->l_policy_data;
1070 struct list_head *tmp; 1071 enum ldlm_mode match;
1071 1072
1072 list_for_each(tmp, queue) { 1073 if (lock == data->lmd_old)
1073 enum ldlm_mode match; 1074 return INTERVAL_ITER_STOP;
1074 1075
1075 lock = list_entry(tmp, struct ldlm_lock, l_res_link); 1076 /*
1076 1077 * Check if this lock can be matched.
1077 if (lock == old_lock) 1078 * Used by LU-2919(exclusive open) for open lease lock
1078 break; 1079 */
1080 if (ldlm_is_excl(lock))
1081 return INTERVAL_ITER_CONT;
1079 1082
1080 /* Check if this lock can be matched. 1083 /*
1081 * Used by LU-2919(exclusive open) for open lease lock 1084 * llite sometimes wants to match locks that will be
1082 */ 1085 * canceled when their users drop, but we allow it to match
1083 if (ldlm_is_excl(lock)) 1086 * if it passes in CBPENDING and the lock still has users.
1084 continue; 1087 * this is generally only going to be used by children
1088 * whose parents already hold a lock so forward progress
1089 * can still happen.
1090 */
1091 if (ldlm_is_cbpending(lock) &&
1092 !(data->lmd_flags & LDLM_FL_CBPENDING))
1093 return INTERVAL_ITER_CONT;
1085 1094
1086 /* llite sometimes wants to match locks that will be 1095 if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
1087 * canceled when their users drop, but we allow it to match 1096 !lock->l_readers && !lock->l_writers)
1088 * if it passes in CBPENDING and the lock still has users. 1097 return INTERVAL_ITER_CONT;
1089 * this is generally only going to be used by children
1090 * whose parents already hold a lock so forward progress
1091 * can still happen.
1092 */
1093 if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
1094 continue;
1095 if (!unref && ldlm_is_cbpending(lock) &&
1096 lock->l_readers == 0 && lock->l_writers == 0)
1097 continue;
1098 1098
1099 if (!(lock->l_req_mode & *mode)) 1099 if (!(lock->l_req_mode & *data->lmd_mode))
1100 continue; 1100 return INTERVAL_ITER_CONT;
1101 match = lock->l_req_mode; 1101 match = lock->l_req_mode;
1102 1102
1103 if (lock->l_resource->lr_type == LDLM_EXTENT && 1103 switch (lock->l_resource->lr_type) {
1104 (lock->l_policy_data.l_extent.start > 1104 case LDLM_EXTENT:
1105 policy->l_extent.start || 1105 if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
1106 lock->l_policy_data.l_extent.end < policy->l_extent.end)) 1106 lpol->l_extent.end < data->lmd_policy->l_extent.end)
1107 continue; 1107 return INTERVAL_ITER_CONT;
1108 1108
1109 if (unlikely(match == LCK_GROUP) && 1109 if (unlikely(match == LCK_GROUP) &&
1110 lock->l_resource->lr_type == LDLM_EXTENT && 1110 data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
1111 policy->l_extent.gid != LDLM_GID_ANY && 1111 lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
1112 lock->l_policy_data.l_extent.gid != policy->l_extent.gid) 1112 return INTERVAL_ITER_CONT;
1113 continue; 1113 break;
1114 1114 case LDLM_IBITS:
1115 /* We match if we have existing lock with same or wider set 1115 /*
1116 * We match if we have existing lock with same or wider set
1116 * of bits. 1117 * of bits.
1117 */ 1118 */
1118 if (lock->l_resource->lr_type == LDLM_IBITS && 1119 if ((lpol->l_inodebits.bits &
1119 ((lock->l_policy_data.l_inodebits.bits & 1120 data->lmd_policy->l_inodebits.bits) !=
1120 policy->l_inodebits.bits) != 1121 data->lmd_policy->l_inodebits.bits)
1121 policy->l_inodebits.bits)) 1122 return INTERVAL_ITER_CONT;
1122 continue; 1123 break;
1124 default:
1125 break;
1126 }
1127 /*
1128 * We match if we have existing lock with same or wider set
1129 * of bits.
1130 */
1131 if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
1132 return INTERVAL_ITER_CONT;
1133
1134 if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) &&
1135 !ldlm_is_local(lock))
1136 return INTERVAL_ITER_CONT;
1137
1138 if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
1139 LDLM_LOCK_GET(lock);
1140 ldlm_lock_touch_in_lru(lock);
1141 } else {
1142 ldlm_lock_addref_internal_nolock(lock, match);
1143 }
1144
1145 *data->lmd_mode = match;
1146 data->lmd_lock = lock;
1147
1148 return INTERVAL_ITER_STOP;
1149}
1150
1151static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
1152{
1153 struct ldlm_interval *node = to_ldlm_interval(in);
1154 struct lock_match_data *data = args;
1155 struct ldlm_lock *lock;
1156 int rc;
1157
1158 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
1159 rc = lock_matches(lock, data);
1160 if (rc == INTERVAL_ITER_STOP)
1161 return INTERVAL_ITER_STOP;
1162 }
1163 return INTERVAL_ITER_CONT;
1164}
1165
1166/**
1167 * Search for a lock with given parameters in interval trees.
1168 *
1169 * \param res search for a lock in this resource
1170 * \param data parameters
1171 *
1172 * \retval a referenced lock or NULL.
1173 */
1174static struct ldlm_lock *search_itree(struct ldlm_resource *res,
1175 struct lock_match_data *data)
1176{
1177 struct interval_node_extent ext = {
1178 .start = data->lmd_policy->l_extent.start,
1179 .end = data->lmd_policy->l_extent.end
1180 };
1181 int idx;
1182
1183 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1184 struct ldlm_interval_tree *tree = &res->lr_itree[idx];
1123 1185
1124 if (!unref && LDLM_HAVE_MASK(lock, GONE)) 1186 if (!tree->lit_root)
1125 continue; 1187 continue;
1126 1188
1127 if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock)) 1189 if (!(tree->lit_mode & *data->lmd_mode))
1128 continue; 1190 continue;
1129 1191
1130 if (flags & LDLM_FL_TEST_LOCK) { 1192 interval_search(tree->lit_root, &ext,
1131 LDLM_LOCK_GET(lock); 1193 itree_overlap_cb, data);
1132 ldlm_lock_touch_in_lru(lock);
1133 } else {
1134 ldlm_lock_addref_internal_nolock(lock, match);
1135 }
1136 *mode = match;
1137 return lock;
1138 } 1194 }
1195 return data->lmd_lock;
1196}
1139 1197
1198/**
1199 * Search for a lock with given properties in a queue.
1200 *
1201 * \param queue search for a lock in this queue
1202 * \param data parameters
1203 *
1204 * \retval a referenced lock or NULL.
1205 */
1206static struct ldlm_lock *search_queue(struct list_head *queue,
1207 struct lock_match_data *data)
1208{
1209 struct ldlm_lock *lock;
1210 int rc;
1211
1212 list_for_each_entry(lock, queue, l_res_link) {
1213 rc = lock_matches(lock, data);
1214 if (rc == INTERVAL_ITER_STOP)
1215 return data->lmd_lock;
1216 }
1140 return NULL; 1217 return NULL;
1141} 1218}
1142 1219
@@ -1147,7 +1224,6 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1147 wake_up_all(&lock->l_waitq); 1224 wake_up_all(&lock->l_waitq);
1148 } 1225 }
1149} 1226}
1150EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1151 1227
1152/** 1228/**
1153 * Mark lock as "matchable" by OST. 1229 * Mark lock as "matchable" by OST.
@@ -1208,35 +1284,45 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
1208enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, 1284enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1209 const struct ldlm_res_id *res_id, 1285 const struct ldlm_res_id *res_id,
1210 enum ldlm_type type, 1286 enum ldlm_type type,
1211 ldlm_policy_data_t *policy, 1287 union ldlm_policy_data *policy,
1212 enum ldlm_mode mode, 1288 enum ldlm_mode mode,
1213 struct lustre_handle *lockh, int unref) 1289 struct lustre_handle *lockh, int unref)
1214{ 1290{
1291 struct lock_match_data data = {
1292 .lmd_old = NULL,
1293 .lmd_lock = NULL,
1294 .lmd_mode = &mode,
1295 .lmd_policy = policy,
1296 .lmd_flags = flags,
1297 .lmd_unref = unref,
1298 };
1215 struct ldlm_resource *res; 1299 struct ldlm_resource *res;
1216 struct ldlm_lock *lock, *old_lock = NULL; 1300 struct ldlm_lock *lock;
1217 int rc = 0; 1301 int rc = 0;
1218 1302
1219 if (!ns) { 1303 if (!ns) {
1220 old_lock = ldlm_handle2lock(lockh); 1304 data.lmd_old = ldlm_handle2lock(lockh);
1221 LASSERT(old_lock); 1305 LASSERT(data.lmd_old);
1222 1306
1223 ns = ldlm_lock_to_ns(old_lock); 1307 ns = ldlm_lock_to_ns(data.lmd_old);
1224 res_id = &old_lock->l_resource->lr_name; 1308 res_id = &data.lmd_old->l_resource->lr_name;
1225 type = old_lock->l_resource->lr_type; 1309 type = data.lmd_old->l_resource->lr_type;
1226 mode = old_lock->l_req_mode; 1310 *data.lmd_mode = data.lmd_old->l_req_mode;
1227 } 1311 }
1228 1312
1229 res = ldlm_resource_get(ns, NULL, res_id, type, 0); 1313 res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1230 if (IS_ERR(res)) { 1314 if (IS_ERR(res)) {
1231 LASSERT(!old_lock); 1315 LASSERT(!data.lmd_old);
1232 return 0; 1316 return 0;
1233 } 1317 }
1234 1318
1235 LDLM_RESOURCE_ADDREF(res); 1319 LDLM_RESOURCE_ADDREF(res);
1236 lock_res(res); 1320 lock_res(res);
1237 1321
1238 lock = search_queue(&res->lr_granted, &mode, policy, old_lock, 1322 if (res->lr_type == LDLM_EXTENT)
1239 flags, unref); 1323 lock = search_itree(res, &data);
1324 else
1325 lock = search_queue(&res->lr_granted, &data);
1240 if (lock) { 1326 if (lock) {
1241 rc = 1; 1327 rc = 1;
1242 goto out; 1328 goto out;
@@ -1245,14 +1331,12 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1245 rc = 0; 1331 rc = 0;
1246 goto out; 1332 goto out;
1247 } 1333 }
1248 lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, 1334 lock = search_queue(&res->lr_waiting, &data);
1249 flags, unref);
1250 if (lock) { 1335 if (lock) {
1251 rc = 1; 1336 rc = 1;
1252 goto out; 1337 goto out;
1253 } 1338 }
1254 1339out:
1255 out:
1256 unlock_res(res); 1340 unlock_res(res);
1257 LDLM_RESOURCE_DELREF(res); 1341 LDLM_RESOURCE_DELREF(res);
1258 ldlm_resource_putref(res); 1342 ldlm_resource_putref(res);
@@ -1324,8 +1408,8 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1324 (type == LDLM_PLAIN || type == LDLM_IBITS) ? 1408 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1325 res_id->name[3] : policy->l_extent.end); 1409 res_id->name[3] : policy->l_extent.end);
1326 } 1410 }
1327 if (old_lock) 1411 if (data.lmd_old)
1328 LDLM_LOCK_PUT(old_lock); 1412 LDLM_LOCK_PUT(data.lmd_old);
1329 1413
1330 return rc ? mode : 0; 1414 return rc ? mode : 0;
1331} 1415}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde697ebaadc..12647af5a336 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -511,23 +511,6 @@ static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
511 CWARN("Send reply failed, maybe cause bug 21636.\n"); 511 CWARN("Send reply failed, maybe cause bug 21636.\n");
512} 512}
513 513
514static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
515{
516 struct obd_quotactl *oqctl;
517 struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
518
519 oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
520 if (!oqctl) {
521 CERROR("Can't unpack obd_quotactl\n");
522 return -EPROTO;
523 }
524
525 oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
526
527 cli->cl_qchk_stat = oqctl->qc_stat;
528 return 0;
529}
530
531/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */ 514/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
532static int ldlm_callback_handler(struct ptlrpc_request *req) 515static int ldlm_callback_handler(struct ptlrpc_request *req)
533{ 516{
@@ -577,13 +560,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
577 rc = ldlm_handle_setinfo(req); 560 rc = ldlm_handle_setinfo(req);
578 ldlm_callback_reply(req, rc); 561 ldlm_callback_reply(req, rc);
579 return 0; 562 return 0;
580 case OBD_QC_CALLBACK:
581 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
582 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
583 return 0;
584 rc = ldlm_handle_qc_callback(req);
585 ldlm_callback_reply(req, rc);
586 return 0;
587 default: 563 default:
588 CERROR("unknown opcode %u\n", 564 CERROR("unknown opcode %u\n",
589 lustre_msg_get_opc(req->rq_reqmsg)); 565 lustre_msg_get_opc(req->rq_reqmsg));
@@ -858,7 +834,6 @@ int ldlm_get_ref(void)
858 834
859 return rc; 835 return rc;
860} 836}
861EXPORT_SYMBOL(ldlm_get_ref);
862 837
863void ldlm_put_ref(void) 838void ldlm_put_ref(void)
864{ 839{
@@ -875,7 +850,6 @@ void ldlm_put_ref(void)
875 } 850 }
876 mutex_unlock(&ldlm_ref_mutex); 851 mutex_unlock(&ldlm_ref_mutex);
877} 852}
878EXPORT_SYMBOL(ldlm_put_ref);
879 853
880static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj, 854static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
881 struct attribute *attr, 855 struct attribute *attr,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0aed39c46154..862ea0a1dc97 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -54,14 +54,14 @@
54 54
55#include "ldlm_internal.h" 55#include "ldlm_internal.h"
56 56
57void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, 57void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
58 ldlm_policy_data_t *lpolicy) 58 union ldlm_policy_data *lpolicy)
59{ 59{
60 /* No policy for plain locks */ 60 /* No policy for plain locks */
61} 61}
62 62
63void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, 63void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
64 ldlm_wire_policy_data_t *wpolicy) 64 union ldlm_wire_policy_data *wpolicy)
65{ 65{
66 /* No policy for plain locks */ 66 /* No policy for plain locks */
67} 67}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 9a1136e32dfc..8dfb3c8e6b7a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -293,7 +293,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
293 * take into account pl->pl_recalc_time here. 293 * take into account pl->pl_recalc_time here.
294 */ 294 */
295 ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool), 295 ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
296 0, LCF_ASYNC, LDLM_CANCEL_LRUR); 296 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR);
297 297
298out: 298out:
299 spin_lock(&pl->pl_lock); 299 spin_lock(&pl->pl_lock);
@@ -339,7 +339,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
339 if (nr == 0) 339 if (nr == 0)
340 return (unused / 100) * sysctl_vfs_cache_pressure; 340 return (unused / 100) * sysctl_vfs_cache_pressure;
341 else 341 else
342 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK); 342 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
343} 343}
344 344
345static const struct ldlm_pool_ops ldlm_cli_pool_ops = { 345static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
@@ -356,10 +356,10 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
356 u32 recalc_interval_sec; 356 u32 recalc_interval_sec;
357 int count; 357 int count;
358 358
359 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time; 359 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
360 if (recalc_interval_sec > 0) { 360 if (recalc_interval_sec > 0) {
361 spin_lock(&pl->pl_lock); 361 spin_lock(&pl->pl_lock);
362 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time; 362 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
363 363
364 if (recalc_interval_sec > 0) { 364 if (recalc_interval_sec > 0) {
365 /* 365 /*
@@ -382,7 +382,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
382 count); 382 count);
383 } 383 }
384 384
385 recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() + 385 recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
386 pl->pl_recalc_period; 386 pl->pl_recalc_period;
387 if (recalc_interval_sec <= 0) { 387 if (recalc_interval_sec <= 0) {
388 /* DEBUG: should be re-removed after LU-4536 is fixed */ 388 /* DEBUG: should be re-removed after LU-4536 is fixed */
@@ -651,13 +651,13 @@ static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
651} 651}
652 652
653int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, 653int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
654 int idx, ldlm_side_t client) 654 int idx, enum ldlm_side client)
655{ 655{
656 int rc; 656 int rc;
657 657
658 spin_lock_init(&pl->pl_lock); 658 spin_lock_init(&pl->pl_lock);
659 atomic_set(&pl->pl_granted, 0); 659 atomic_set(&pl->pl_granted, 0);
660 pl->pl_recalc_time = ktime_get_seconds(); 660 pl->pl_recalc_time = ktime_get_real_seconds();
661 atomic_set(&pl->pl_lock_volume_factor, 1); 661 atomic_set(&pl->pl_lock_volume_factor, 1);
662 662
663 atomic_set(&pl->pl_grant_rate, 0); 663 atomic_set(&pl->pl_grant_rate, 0);
@@ -684,7 +684,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
684 684
685 return rc; 685 return rc;
686} 686}
687EXPORT_SYMBOL(ldlm_pool_init);
688 687
689void ldlm_pool_fini(struct ldlm_pool *pl) 688void ldlm_pool_fini(struct ldlm_pool *pl)
690{ 689{
@@ -698,7 +697,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
698 */ 697 */
699 POISON(pl, 0x5a, sizeof(*pl)); 698 POISON(pl, 0x5a, sizeof(*pl));
700} 699}
701EXPORT_SYMBOL(ldlm_pool_fini);
702 700
703/** 701/**
704 * Add new taken ldlm lock \a lock into pool \a pl accounting. 702 * Add new taken ldlm lock \a lock into pool \a pl accounting.
@@ -724,7 +722,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
724 * with too long call paths. 722 * with too long call paths.
725 */ 723 */
726} 724}
727EXPORT_SYMBOL(ldlm_pool_add);
728 725
729/** 726/**
730 * Remove ldlm lock \a lock from pool \a pl accounting. 727 * Remove ldlm lock \a lock from pool \a pl accounting.
@@ -743,7 +740,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
743 740
744 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT); 741 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
745} 742}
746EXPORT_SYMBOL(ldlm_pool_del);
747 743
748/** 744/**
749 * Returns current \a pl SLV. 745 * Returns current \a pl SLV.
@@ -792,13 +788,12 @@ static struct completion ldlm_pools_comp;
792 * count locks from all namespaces (if possible). Returns number of 788 * count locks from all namespaces (if possible). Returns number of
793 * cached locks. 789 * cached locks.
794 */ 790 */
795static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) 791static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
796{ 792{
797 unsigned long total = 0; 793 unsigned long total = 0;
798 int nr_ns; 794 int nr_ns;
799 struct ldlm_namespace *ns; 795 struct ldlm_namespace *ns;
800 struct ldlm_namespace *ns_old = NULL; /* loop detection */ 796 struct ldlm_namespace *ns_old = NULL; /* loop detection */
801 void *cookie;
802 797
803 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) 798 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
804 return 0; 799 return 0;
@@ -806,8 +801,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
806 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n", 801 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
807 client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); 802 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
808 803
809 cookie = cl_env_reenter();
810
811 /* 804 /*
812 * Find out how many resources we may release. 805 * Find out how many resources we may release.
813 */ 806 */
@@ -816,7 +809,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
816 mutex_lock(ldlm_namespace_lock(client)); 809 mutex_lock(ldlm_namespace_lock(client));
817 if (list_empty(ldlm_namespace_list(client))) { 810 if (list_empty(ldlm_namespace_list(client))) {
818 mutex_unlock(ldlm_namespace_lock(client)); 811 mutex_unlock(ldlm_namespace_lock(client));
819 cl_env_reexit(cookie);
820 return 0; 812 return 0;
821 } 813 }
822 ns = ldlm_namespace_first_locked(client); 814 ns = ldlm_namespace_first_locked(client);
@@ -842,22 +834,19 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
842 ldlm_namespace_put(ns); 834 ldlm_namespace_put(ns);
843 } 835 }
844 836
845 cl_env_reexit(cookie);
846 return total; 837 return total;
847} 838}
848 839
849static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) 840static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
841 gfp_t gfp_mask)
850{ 842{
851 unsigned long freed = 0; 843 unsigned long freed = 0;
852 int tmp, nr_ns; 844 int tmp, nr_ns;
853 struct ldlm_namespace *ns; 845 struct ldlm_namespace *ns;
854 void *cookie;
855 846
856 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) 847 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
857 return -1; 848 return -1;
858 849
859 cookie = cl_env_reenter();
860
861 /* 850 /*
862 * Shrink at least ldlm_namespace_nr_read(client) namespaces. 851 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
863 */ 852 */
@@ -887,7 +876,6 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
887 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); 876 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
888 ldlm_namespace_put(ns); 877 ldlm_namespace_put(ns);
889 } 878 }
890 cl_env_reexit(cookie);
891 /* 879 /*
892 * we only decrease the SLV in server pools shrinker, return 880 * we only decrease the SLV in server pools shrinker, return
893 * SHRINK_STOP to kernel to avoid needless loop. LU-1128 881 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
@@ -908,7 +896,7 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
908 sc->gfp_mask); 896 sc->gfp_mask);
909} 897}
910 898
911static int ldlm_pools_recalc(ldlm_side_t client) 899static int ldlm_pools_recalc(enum ldlm_side client)
912{ 900{
913 struct ldlm_namespace *ns; 901 struct ldlm_namespace *ns;
914 struct ldlm_namespace *ns_old = NULL; 902 struct ldlm_namespace *ns_old = NULL;
@@ -1095,7 +1083,6 @@ int ldlm_pools_init(void)
1095 1083
1096 return rc; 1084 return rc;
1097} 1085}
1098EXPORT_SYMBOL(ldlm_pools_init);
1099 1086
1100void ldlm_pools_fini(void) 1087void ldlm_pools_fini(void)
1101{ 1088{
@@ -1104,4 +1091,3 @@ void ldlm_pools_fini(void)
1104 1091
1105 ldlm_pools_thread_stop(); 1092 ldlm_pools_thread_stop();
1106} 1093}
1107EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 35ba6f14d95f..c1f8693f94a5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -93,11 +93,7 @@ static int ldlm_expired_completion_wait(void *data)
93 if (!lock->l_conn_export) { 93 if (!lock->l_conn_export) {
94 static unsigned long next_dump, last_dump; 94 static unsigned long next_dump, last_dump;
95 95
96 LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n", 96 LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
97 (s64)lock->l_last_activity,
98 (s64)(ktime_get_real_seconds() -
99 lock->l_last_activity));
100 LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
101 (s64)lock->l_last_activity, 97 (s64)lock->l_last_activity,
102 (s64)(ktime_get_real_seconds() - 98 (s64)(ktime_get_real_seconds() -
103 lock->l_last_activity)); 99 lock->l_last_activity));
@@ -475,12 +471,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
475 "client-side enqueue, new policy data"); 471 "client-side enqueue, new policy data");
476 } 472 }
477 473
478 if ((*flags) & LDLM_FL_AST_SENT || 474 if ((*flags) & LDLM_FL_AST_SENT) {
479 /* Cancel extent locks as soon as possible on a liblustre client,
480 * because it cannot handle asynchronous ASTs robustly (see
481 * bug 7311).
482 */
483 (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
484 lock_res_and_lock(lock); 475 lock_res_and_lock(lock);
485 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; 476 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
486 unlock_res_and_lock(lock); 477 unlock_res_and_lock(lock);
@@ -602,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
602 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); 593 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
603 594
604 flags = ns_connect_lru_resize(ns) ? 595 flags = ns_connect_lru_resize(ns) ?
605 LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED; 596 LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
606 to_free = !ns_connect_lru_resize(ns) && 597 to_free = !ns_connect_lru_resize(ns) &&
607 opc == LDLM_ENQUEUE ? 1 : 0; 598 opc == LDLM_ENQUEUE ? 1 : 0;
608 599
@@ -657,6 +648,27 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
657} 648}
658EXPORT_SYMBOL(ldlm_prep_enqueue_req); 649EXPORT_SYMBOL(ldlm_prep_enqueue_req);
659 650
651static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
652 int lvb_len)
653{
654 struct ptlrpc_request *req;
655 int rc;
656
657 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
658 if (!req)
659 return ERR_PTR(-ENOMEM);
660
661 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
662 if (rc) {
663 ptlrpc_request_free(req);
664 return ERR_PTR(rc);
665 }
666
667 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
668 ptlrpc_request_set_replen(req);
669 return req;
670}
671
660/** 672/**
661 * Client-side lock enqueue. 673 * Client-side lock enqueue.
662 * 674 *
@@ -670,7 +682,7 @@ EXPORT_SYMBOL(ldlm_prep_enqueue_req);
670int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, 682int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
671 struct ldlm_enqueue_info *einfo, 683 struct ldlm_enqueue_info *einfo,
672 const struct ldlm_res_id *res_id, 684 const struct ldlm_res_id *res_id,
673 ldlm_policy_data_t const *policy, __u64 *flags, 685 union ldlm_policy_data const *policy, __u64 *flags,
674 void *lvb, __u32 lvb_len, enum lvb_type lvb_type, 686 void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
675 struct lustre_handle *lockh, int async) 687 struct lustre_handle *lockh, int async)
676{ 688{
@@ -727,17 +739,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
727 lock->l_last_activity = ktime_get_real_seconds(); 739 lock->l_last_activity = ktime_get_real_seconds();
728 740
729 /* lock not sent to server yet */ 741 /* lock not sent to server yet */
730
731 if (!reqp || !*reqp) { 742 if (!reqp || !*reqp) {
732 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), 743 req = ldlm_enqueue_pack(exp, lvb_len);
733 &RQF_LDLM_ENQUEUE, 744 if (IS_ERR(req)) {
734 LUSTRE_DLM_VERSION,
735 LDLM_ENQUEUE);
736 if (!req) {
737 failed_lock_cleanup(ns, lock, einfo->ei_mode); 745 failed_lock_cleanup(ns, lock, einfo->ei_mode);
738 LDLM_LOCK_RELEASE(lock); 746 LDLM_LOCK_RELEASE(lock);
739 return -ENOMEM; 747 return PTR_ERR(req);
740 } 748 }
749
741 req_passed_in = 0; 750 req_passed_in = 0;
742 if (reqp) 751 if (reqp)
743 *reqp = req; 752 *reqp = req;
@@ -757,24 +766,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
757 body->lock_flags = ldlm_flags_to_wire(*flags); 766 body->lock_flags = ldlm_flags_to_wire(*flags);
758 body->lock_handle[0] = *lockh; 767 body->lock_handle[0] = *lockh;
759 768
760 /* Continue as normal. */
761 if (!req_passed_in) {
762 if (lvb_len > 0)
763 req_capsule_extend(&req->rq_pill,
764 &RQF_LDLM_ENQUEUE_LVB);
765 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
766 lvb_len);
767 ptlrpc_request_set_replen(req);
768 }
769
770 /*
771 * Liblustre client doesn't get extent locks, except for O_APPEND case
772 * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
773 * [i_size, OBD_OBJECT_EOF] lock is taken.
774 */
775 LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
776 policy->l_extent.end == OBD_OBJECT_EOF));
777
778 if (async) { 769 if (async) {
779 LASSERT(reqp); 770 LASSERT(reqp);
780 return 0; 771 return 0;
@@ -1022,7 +1013,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
1022 1013
1023 return 0; 1014 return 0;
1024} 1015}
1025EXPORT_SYMBOL(ldlm_cli_update_pool);
1026 1016
1027/** 1017/**
1028 * Client side lock cancel. 1018 * Client side lock cancel.
@@ -1067,7 +1057,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
1067 1057
1068 ns = ldlm_lock_to_ns(lock); 1058 ns = ldlm_lock_to_ns(lock);
1069 flags = ns_connect_lru_resize(ns) ? 1059 flags = ns_connect_lru_resize(ns) ?
1070 LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED; 1060 LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
1071 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1, 1061 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1072 LCF_BL_AST, flags); 1062 LCF_BL_AST, flags);
1073 } 1063 }
@@ -1125,7 +1115,6 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1125 1115
1126 return count; 1116 return count;
1127} 1117}
1128EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
1129 1118
1130/** 1119/**
1131 * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back 1120 * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
@@ -1184,6 +1173,14 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1184 if (count && added >= count) 1173 if (count && added >= count)
1185 return LDLM_POLICY_KEEP_LOCK; 1174 return LDLM_POLICY_KEEP_LOCK;
1186 1175
1176 /*
1177 * Despite of the LV, It doesn't make sense to keep the lock which
1178 * is unused for ns_max_age time.
1179 */
1180 if (cfs_time_after(cfs_time_current(),
1181 cfs_time_add(lock->l_last_used, ns->ns_max_age)))
1182 return LDLM_POLICY_CANCEL_LOCK;
1183
1187 slv = ldlm_pool_get_slv(pl); 1184 slv = ldlm_pool_get_slv(pl);
1188 lvf = ldlm_pool_get_lvf(pl); 1185 lvf = ldlm_pool_get_lvf(pl);
1189 la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used)); 1186 la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
@@ -1287,21 +1284,21 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
1287static ldlm_cancel_lru_policy_t 1284static ldlm_cancel_lru_policy_t
1288ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) 1285ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1289{ 1286{
1290 if (flags & LDLM_CANCEL_NO_WAIT) 1287 if (flags & LDLM_LRU_FLAG_NO_WAIT)
1291 return ldlm_cancel_no_wait_policy; 1288 return ldlm_cancel_no_wait_policy;
1292 1289
1293 if (ns_connect_lru_resize(ns)) { 1290 if (ns_connect_lru_resize(ns)) {
1294 if (flags & LDLM_CANCEL_SHRINK) 1291 if (flags & LDLM_LRU_FLAG_SHRINK)
1295 /* We kill passed number of old locks. */ 1292 /* We kill passed number of old locks. */
1296 return ldlm_cancel_passed_policy; 1293 return ldlm_cancel_passed_policy;
1297 else if (flags & LDLM_CANCEL_LRUR) 1294 else if (flags & LDLM_LRU_FLAG_LRUR)
1298 return ldlm_cancel_lrur_policy; 1295 return ldlm_cancel_lrur_policy;
1299 else if (flags & LDLM_CANCEL_PASSED) 1296 else if (flags & LDLM_LRU_FLAG_PASSED)
1300 return ldlm_cancel_passed_policy; 1297 return ldlm_cancel_passed_policy;
1301 else if (flags & LDLM_CANCEL_LRUR_NO_WAIT) 1298 else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
1302 return ldlm_cancel_lrur_no_wait_policy; 1299 return ldlm_cancel_lrur_no_wait_policy;
1303 } else { 1300 } else {
1304 if (flags & LDLM_CANCEL_AGED) 1301 if (flags & LDLM_LRU_FLAG_AGED)
1305 return ldlm_cancel_aged_policy; 1302 return ldlm_cancel_aged_policy;
1306 } 1303 }
1307 1304
@@ -1325,21 +1322,21 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1325 * 1322 *
1326 * Calling policies for enabled LRU resize: 1323 * Calling policies for enabled LRU resize:
1327 * ---------------------------------------- 1324 * ----------------------------------------
1328 * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to 1325 * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
1329 * cancel not more than \a count locks; 1326 * cancel not more than \a count locks;
1330 * 1327 *
1331 * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at 1328 * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
1332 * the beginning of LRU list); 1329 * the beginning of LRU list);
1333 * 1330 *
1334 * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to 1331 * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
1335 * memory pressure policy function; 1332 * memory pressure policy function;
1336 * 1333 *
1337 * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy". 1334 * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
1338 * 1335 *
1339 * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible 1336 * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
1340 * (typically before replaying locks) w/o 1337 * (typically before replaying locks) w/o
1341 * sending any RPCs or waiting for any 1338 * sending any RPCs or waiting for any
1342 * outstanding RPC to complete. 1339 * outstanding RPC to complete.
1343 */ 1340 */
1344static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, 1341static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1345 struct list_head *cancels, int count, int max, 1342 struct list_head *cancels, int count, int max,
@@ -1348,7 +1345,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1348 ldlm_cancel_lru_policy_t pf; 1345 ldlm_cancel_lru_policy_t pf;
1349 struct ldlm_lock *lock, *next; 1346 struct ldlm_lock *lock, *next;
1350 int added = 0, unused, remained; 1347 int added = 0, unused, remained;
1351 int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT); 1348 int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
1352 1349
1353 spin_lock(&ns->ns_lock); 1350 spin_lock(&ns->ns_lock);
1354 unused = ns->ns_nr_unused; 1351 unused = ns->ns_nr_unused;
@@ -1531,7 +1528,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
1531 */ 1528 */
1532int ldlm_cancel_resource_local(struct ldlm_resource *res, 1529int ldlm_cancel_resource_local(struct ldlm_resource *res,
1533 struct list_head *cancels, 1530 struct list_head *cancels,
1534 ldlm_policy_data_t *policy, 1531 union ldlm_policy_data *policy,
1535 enum ldlm_mode mode, __u64 lock_flags, 1532 enum ldlm_mode mode, __u64 lock_flags,
1536 enum ldlm_cancel_flags cancel_flags, 1533 enum ldlm_cancel_flags cancel_flags,
1537 void *opaque) 1534 void *opaque)
@@ -1648,7 +1645,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list);
1648 */ 1645 */
1649int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, 1646int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1650 const struct ldlm_res_id *res_id, 1647 const struct ldlm_res_id *res_id,
1651 ldlm_policy_data_t *policy, 1648 union ldlm_policy_data *policy,
1652 enum ldlm_mode mode, 1649 enum ldlm_mode mode,
1653 enum ldlm_cancel_flags flags, 1650 enum ldlm_cancel_flags flags,
1654 void *opaque) 1651 void *opaque)
@@ -1723,7 +1720,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1723 opaque); 1720 opaque);
1724 } else { 1721 } else {
1725 cfs_hash_for_each_nolock(ns->ns_rs_hash, 1722 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1726 ldlm_cli_hash_cancel_unused, &arg); 1723 ldlm_cli_hash_cancel_unused, &arg, 0);
1727 return ELDLM_OK; 1724 return ELDLM_OK;
1728 } 1725 }
1729} 1726}
@@ -1796,7 +1793,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
1796 }; 1793 };
1797 1794
1798 cfs_hash_for_each_nolock(ns->ns_rs_hash, 1795 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1799 ldlm_res_iter_helper, &helper); 1796 ldlm_res_iter_helper, &helper, 0);
1800} 1797}
1801 1798
1802/* non-blocking function to manipulate a lock whose cb_data is being put away. 1799/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1840,7 +1837,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1840 * bug 17614: locks being actively cancelled. Get a reference 1837 * bug 17614: locks being actively cancelled. Get a reference
1841 * on a lock so that it does not disappear under us (e.g. due to cancel) 1838 * on a lock so that it does not disappear under us (e.g. due to cancel)
1842 */ 1839 */
1843 if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) { 1840 if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
1844 list_add(&lock->l_pending_chain, list); 1841 list_add(&lock->l_pending_chain, list);
1845 LDLM_LOCK_GET(lock); 1842 LDLM_LOCK_GET(lock);
1846 } 1843 }
@@ -1909,7 +1906,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1909 int flags; 1906 int flags;
1910 1907
1911 /* Bug 11974: Do not replay a lock which is actively being canceled */ 1908 /* Bug 11974: Do not replay a lock which is actively being canceled */
1912 if (ldlm_is_canceling(lock)) { 1909 if (ldlm_is_bl_done(lock)) {
1913 LDLM_DEBUG(lock, "Not replaying canceled lock:"); 1910 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1914 return 0; 1911 return 0;
1915 } 1912 }
@@ -2003,11 +2000,11 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
2003 ldlm_ns_name(ns), ns->ns_nr_unused); 2000 ldlm_ns_name(ns), ns->ns_nr_unused);
2004 2001
2005 /* We don't need to care whether or not LRU resize is enabled 2002 /* We don't need to care whether or not LRU resize is enabled
2006 * because the LDLM_CANCEL_NO_WAIT policy doesn't use the 2003 * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
2007 * count parameter 2004 * count parameter
2008 */ 2005 */
2009 canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, 2006 canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
2010 LCF_LOCAL, LDLM_CANCEL_NO_WAIT); 2007 LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
2011 2008
2012 CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n", 2009 CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
2013 canceled, ldlm_ns_name(ns)); 2010 canceled, ldlm_ns_name(ns));
@@ -2048,4 +2045,3 @@ int ldlm_replay_locks(struct obd_import *imp)
2048 2045
2049 return rc; 2046 return rc;
2050} 2047}
2051EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a09c25aea698..b22f5bae7201 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -226,7 +226,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
226 226
227 /* Try to cancel all @ns_nr_unused locks. */ 227 /* Try to cancel all @ns_nr_unused locks. */
228 canceled = ldlm_cancel_lru(ns, unused, 0, 228 canceled = ldlm_cancel_lru(ns, unused, 0,
229 LDLM_CANCEL_PASSED); 229 LDLM_LRU_FLAG_PASSED);
230 if (canceled < unused) { 230 if (canceled < unused) {
231 CDEBUG(D_DLMTRACE, 231 CDEBUG(D_DLMTRACE,
232 "not all requested locks are canceled, requested: %d, canceled: %d\n", 232 "not all requested locks are canceled, requested: %d, canceled: %d\n",
@@ -237,7 +237,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
237 } else { 237 } else {
238 tmp = ns->ns_max_unused; 238 tmp = ns->ns_max_unused;
239 ns->ns_max_unused = 0; 239 ns->ns_max_unused = 0;
240 ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED); 240 ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
241 ns->ns_max_unused = tmp; 241 ns->ns_max_unused = tmp;
242 } 242 }
243 return count; 243 return count;
@@ -262,7 +262,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
262 "changing namespace %s unused locks from %u to %u\n", 262 "changing namespace %s unused locks from %u to %u\n",
263 ldlm_ns_name(ns), ns->ns_nr_unused, 263 ldlm_ns_name(ns), ns->ns_nr_unused,
264 (unsigned int)tmp); 264 (unsigned int)tmp);
265 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED); 265 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
266 266
267 if (!lru_resize) { 267 if (!lru_resize) {
268 CDEBUG(D_DLMTRACE, 268 CDEBUG(D_DLMTRACE,
@@ -276,7 +276,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
276 ldlm_ns_name(ns), ns->ns_max_unused, 276 ldlm_ns_name(ns), ns->ns_max_unused,
277 (unsigned int)tmp); 277 (unsigned int)tmp);
278 ns->ns_max_unused = (unsigned int)tmp; 278 ns->ns_max_unused = (unsigned int)tmp;
279 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); 279 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
280 280
281 /* Make sure that LRU resize was originally supported before 281 /* Make sure that LRU resize was originally supported before
282 * turning it on here. 282 * turning it on here.
@@ -445,8 +445,8 @@ static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
445 return res; 445 return res;
446} 446}
447 447
448static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, 448static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
449 const void *key, unsigned mask) 449 const void *key, unsigned int mask)
450{ 450{
451 const struct ldlm_res_id *id = key; 451 const struct ldlm_res_id *id = key;
452 unsigned int val = 0; 452 unsigned int val = 0;
@@ -457,8 +457,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
457 return val & mask; 457 return val & mask;
458} 458}
459 459
460static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs, 460static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
461 const void *key, unsigned mask) 461 const void *key, unsigned int mask)
462{ 462{
463 const struct ldlm_res_id *id = key; 463 const struct ldlm_res_id *id = key;
464 struct lu_fid fid; 464 struct lu_fid fid;
@@ -612,7 +612,7 @@ static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
612 612
613/** Register \a ns in the list of namespaces */ 613/** Register \a ns in the list of namespaces */
614static void ldlm_namespace_register(struct ldlm_namespace *ns, 614static void ldlm_namespace_register(struct ldlm_namespace *ns,
615 ldlm_side_t client) 615 enum ldlm_side client)
616{ 616{
617 mutex_lock(ldlm_namespace_lock(client)); 617 mutex_lock(ldlm_namespace_lock(client));
618 LASSERT(list_empty(&ns->ns_list_chain)); 618 LASSERT(list_empty(&ns->ns_list_chain));
@@ -625,7 +625,7 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns,
625 * Create and initialize new empty namespace. 625 * Create and initialize new empty namespace.
626 */ 626 */
627struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, 627struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
628 ldlm_side_t client, 628 enum ldlm_side client,
629 enum ldlm_appetite apt, 629 enum ldlm_appetite apt,
630 enum ldlm_ns_type ns_type) 630 enum ldlm_ns_type ns_type)
631{ 631{
@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
855 return ELDLM_OK; 855 return ELDLM_OK;
856 } 856 }
857 857
858 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags); 858 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
859 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL); 859 &flags, 0);
860 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
861 NULL, 0);
860 return ELDLM_OK; 862 return ELDLM_OK;
861} 863}
862EXPORT_SYMBOL(ldlm_namespace_cleanup); 864EXPORT_SYMBOL(ldlm_namespace_cleanup);
@@ -952,7 +954,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
952 954
953/** Unregister \a ns from the list of namespaces. */ 955/** Unregister \a ns from the list of namespaces. */
954static void ldlm_namespace_unregister(struct ldlm_namespace *ns, 956static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
955 ldlm_side_t client) 957 enum ldlm_side client)
956{ 958{
957 mutex_lock(ldlm_namespace_lock(client)); 959 mutex_lock(ldlm_namespace_lock(client));
958 LASSERT(!list_empty(&ns->ns_list_chain)); 960 LASSERT(!list_empty(&ns->ns_list_chain));
@@ -999,7 +1001,6 @@ void ldlm_namespace_get(struct ldlm_namespace *ns)
999{ 1001{
1000 atomic_inc(&ns->ns_bref); 1002 atomic_inc(&ns->ns_bref);
1001} 1003}
1002EXPORT_SYMBOL(ldlm_namespace_get);
1003 1004
1004/* This is only for callers that care about refcount */ 1005/* This is only for callers that care about refcount */
1005static int ldlm_namespace_get_return(struct ldlm_namespace *ns) 1006static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
@@ -1014,11 +1015,10 @@ void ldlm_namespace_put(struct ldlm_namespace *ns)
1014 spin_unlock(&ns->ns_lock); 1015 spin_unlock(&ns->ns_lock);
1015 } 1016 }
1016} 1017}
1017EXPORT_SYMBOL(ldlm_namespace_put);
1018 1018
1019/** Should be called with ldlm_namespace_lock(client) taken. */ 1019/** Should be called with ldlm_namespace_lock(client) taken. */
1020void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, 1020void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1021 ldlm_side_t client) 1021 enum ldlm_side client)
1022{ 1022{
1023 LASSERT(!list_empty(&ns->ns_list_chain)); 1023 LASSERT(!list_empty(&ns->ns_list_chain));
1024 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 1024 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1027,7 +1027,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1027 1027
1028/** Should be called with ldlm_namespace_lock(client) taken. */ 1028/** Should be called with ldlm_namespace_lock(client) taken. */
1029void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, 1029void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1030 ldlm_side_t client) 1030 enum ldlm_side client)
1031{ 1031{
1032 LASSERT(!list_empty(&ns->ns_list_chain)); 1032 LASSERT(!list_empty(&ns->ns_list_chain));
1033 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 1033 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1035,7 +1035,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1035} 1035}
1036 1036
1037/** Should be called with ldlm_namespace_lock(client) taken. */ 1037/** Should be called with ldlm_namespace_lock(client) taken. */
1038struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client) 1038struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1039{ 1039{
1040 LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); 1040 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1041 LASSERT(!list_empty(ldlm_namespace_list(client))); 1041 LASSERT(!list_empty(ldlm_namespace_list(client)));
@@ -1305,7 +1305,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1305 * Print information about all locks in all namespaces on this node to debug 1305 * Print information about all locks in all namespaces on this node to debug
1306 * log. 1306 * log.
1307 */ 1307 */
1308void ldlm_dump_all_namespaces(ldlm_side_t client, int level) 1308void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1309{ 1309{
1310 struct list_head *tmp; 1310 struct list_head *tmp;
1311 1311
@@ -1323,7 +1323,6 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1323 1323
1324 mutex_unlock(ldlm_namespace_lock(client)); 1324 mutex_unlock(ldlm_namespace_lock(client));
1325} 1325}
1326EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1327 1326
1328static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, 1327static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1329 struct hlist_node *hnode, void *arg) 1328 struct hlist_node *hnode, void *arg)
@@ -1355,12 +1354,11 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1355 1354
1356 cfs_hash_for_each_nolock(ns->ns_rs_hash, 1355 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1357 ldlm_res_hash_dump, 1356 ldlm_res_hash_dump,
1358 (void *)(unsigned long)level); 1357 (void *)(unsigned long)level, 0);
1359 spin_lock(&ns->ns_lock); 1358 spin_lock(&ns->ns_lock);
1360 ns->ns_next_dump = cfs_time_shift(10); 1359 ns->ns_next_dump = cfs_time_shift(10);
1361 spin_unlock(&ns->ns_lock); 1360 spin_unlock(&ns->ns_lock);
1362} 1361}
1363EXPORT_SYMBOL(ldlm_namespace_dump);
1364 1362
1365/** 1363/**
1366 * Print information about all locks in this resource to debug log. 1364 * Print information about all locks in this resource to debug log.
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 1ac0940bd8df..322d4fa63f5d 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_LUSTRE_FS) += lustre.o 1obj-$(CONFIG_LUSTRE_FS) += lustre.o
2lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \ 2lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \
3 rw.o namei.o symlink.o llite_mmap.o range_lock.o \ 3 rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \
4 xattr.o xattr_cache.o rw26.o super25.o statahead.o \ 4 xattr.o xattr_cache.o xattr_security.o \
5 glimpse.o lcommon_cl.o lcommon_misc.o \ 5 super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \
6 vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \ 6 vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \
7 lproc_llite.o 7 lproc_llite.o
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7f32a539d260..ea5d247a3f70 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -51,6 +51,8 @@
51#include "../include/lustre_dlm.h" 51#include "../include/lustre_dlm.h"
52#include "../include/lustre_fid.h" 52#include "../include/lustre_fid.h"
53#include "../include/lustre_kernelcomm.h" 53#include "../include/lustre_kernelcomm.h"
54#include "../include/lustre_swab.h"
55
54#include "llite_internal.h" 56#include "llite_internal.h"
55 57
56/* 58/*
@@ -410,6 +412,8 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
410 struct ptlrpc_request *request = NULL; 412 struct ptlrpc_request *request = NULL;
411 struct md_op_data *op_data; 413 struct md_op_data *op_data;
412 struct ll_sb_info *sbi = ll_i2sbi(parent); 414 struct ll_sb_info *sbi = ll_i2sbi(parent);
415 struct inode *inode = NULL;
416 struct dentry dentry;
413 int err; 417 int err;
414 418
415 if (unlikely(lump->lum_magic != LMV_USER_MAGIC)) 419 if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
@@ -419,6 +423,10 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
419 PFID(ll_inode2fid(parent)), parent, dirname, 423 PFID(ll_inode2fid(parent)), parent, dirname,
420 (int)lump->lum_stripe_offset, lump->lum_stripe_count); 424 (int)lump->lum_stripe_offset, lump->lum_stripe_count);
421 425
426 if (lump->lum_stripe_count > 1 &&
427 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
428 return -EINVAL;
429
422 if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC)) 430 if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
423 lustre_swab_lmv_user_md(lump); 431 lustre_swab_lmv_user_md(lump);
424 432
@@ -439,8 +447,17 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
439 from_kgid(&init_user_ns, current_fsgid()), 447 from_kgid(&init_user_ns, current_fsgid()),
440 cfs_curproc_cap_pack(), 0, &request); 448 cfs_curproc_cap_pack(), 0, &request);
441 ll_finish_md_op_data(op_data); 449 ll_finish_md_op_data(op_data);
450
451 err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
442 if (err) 452 if (err)
443 goto err_exit; 453 goto err_exit;
454
455 memset(&dentry, 0, sizeof(dentry));
456 dentry.d_inode = inode;
457
458 err = ll_init_security(&dentry, inode, parent);
459 iput(inode);
460
444err_exit: 461err_exit:
445 ptlrpc_req_finished(request); 462 ptlrpc_req_finished(request);
446 return err; 463 return err;
@@ -501,8 +518,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
501 return PTR_ERR(op_data); 518 return PTR_ERR(op_data);
502 519
503 /* swabbing is done in lov_setstripe() on server side */ 520 /* swabbing is done in lov_setstripe() on server side */
504 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, 521 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
505 NULL, 0, &req, NULL);
506 ll_finish_md_op_data(op_data); 522 ll_finish_md_op_data(op_data);
507 ptlrpc_req_finished(req); 523 ptlrpc_req_finished(req);
508 if (rc) { 524 if (rc) {
@@ -682,7 +698,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
682{ 698{
683 struct ll_sb_info *sbi = ll_s2sbi(sb); 699 struct ll_sb_info *sbi = ll_s2sbi(sb);
684 struct hsm_progress_kernel hpk; 700 struct hsm_progress_kernel hpk;
685 int rc; 701 int rc2, rc = 0;
686 702
687 /* Forge a hsm_progress based on data from copy. */ 703 /* Forge a hsm_progress based on data from copy. */
688 hpk.hpk_fid = copy->hc_hai.hai_fid; 704 hpk.hpk_fid = copy->hc_hai.hai_fid;
@@ -732,10 +748,10 @@ progress:
732 /* On error, the request should be considered as completed */ 748 /* On error, the request should be considered as completed */
733 if (hpk.hpk_errval > 0) 749 if (hpk.hpk_errval > 0)
734 hpk.hpk_flags |= HP_FLAG_COMPLETED; 750 hpk.hpk_flags |= HP_FLAG_COMPLETED;
735 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), 751 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
736 &hpk, NULL); 752 &hpk, NULL);
737 753
738 return rc; 754 return rc ? rc : rc2;
739} 755}
740 756
741/** 757/**
@@ -757,7 +773,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
757{ 773{
758 struct ll_sb_info *sbi = ll_s2sbi(sb); 774 struct ll_sb_info *sbi = ll_s2sbi(sb);
759 struct hsm_progress_kernel hpk; 775 struct hsm_progress_kernel hpk;
760 int rc; 776 int rc2, rc = 0;
761 777
762 /* If you modify the logic here, also check llapi_hsm_copy_end(). */ 778 /* If you modify the logic here, also check llapi_hsm_copy_end(). */
763 /* Take care: copy->hc_hai.hai_action, len, gid and data are not 779 /* Take care: copy->hc_hai.hai_action, len, gid and data are not
@@ -823,18 +839,18 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
823 * when the file will not be modified for some tunable 839 * when the file will not be modified for some tunable
824 * time 840 * time
825 */ 841 */
826 /* we do not notify caller */
827 hpk.hpk_flags &= ~HP_FLAG_RETRY; 842 hpk.hpk_flags &= ~HP_FLAG_RETRY;
843 rc = -EBUSY;
828 /* hpk_errval must be >= 0 */ 844 /* hpk_errval must be >= 0 */
829 hpk.hpk_errval = EBUSY; 845 hpk.hpk_errval = -rc;
830 } 846 }
831 } 847 }
832 848
833progress: 849progress:
834 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), 850 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
835 &hpk, NULL); 851 &hpk, NULL);
836 852
837 return rc; 853 return rc ? rc : rc2;
838} 854}
839 855
840static int copy_and_ioctl(int cmd, struct obd_export *exp, 856static int copy_and_ioctl(int cmd, struct obd_export *exp,
@@ -862,10 +878,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
862 int rc = 0; 878 int rc = 0;
863 879
864 switch (cmd) { 880 switch (cmd) {
865 case LUSTRE_Q_INVALIDATE:
866 case LUSTRE_Q_FINVALIDATE:
867 case Q_QUOTAON:
868 case Q_QUOTAOFF:
869 case Q_SETQUOTA: 881 case Q_SETQUOTA:
870 case Q_SETINFO: 882 case Q_SETINFO:
871 if (!capable(CFS_CAP_SYS_ADMIN)) 883 if (!capable(CFS_CAP_SYS_ADMIN))
@@ -930,10 +942,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
930 QCTL_COPY(oqctl, qctl); 942 QCTL_COPY(oqctl, qctl);
931 rc = obd_quotactl(sbi->ll_md_exp, oqctl); 943 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
932 if (rc) { 944 if (rc) {
933 if (rc != -EALREADY && cmd == Q_QUOTAON) {
934 oqctl->qc_cmd = Q_QUOTAOFF;
935 obd_quotactl(sbi->ll_md_exp, oqctl);
936 }
937 kfree(oqctl); 945 kfree(oqctl);
938 return rc; 946 return rc;
939 } 947 }
@@ -1077,7 +1085,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1077 goto out_free; 1085 goto out_free;
1078 } 1086 }
1079 1087
1080 rc = ll_get_fid_by_name(inode, filename, namelen, NULL); 1088 rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
1081 if (rc < 0) { 1089 if (rc < 0) {
1082 CERROR("%s: lookup %.*s failed: rc = %d\n", 1090 CERROR("%s: lookup %.*s failed: rc = %d\n",
1083 ll_get_fsname(inode->i_sb, NULL, 0), namelen, 1091 ll_get_fsname(inode->i_sb, NULL, 0), namelen,
@@ -1189,6 +1197,7 @@ lmv_out_free:
1189 struct lmv_user_md *tmp = NULL; 1197 struct lmv_user_md *tmp = NULL;
1190 union lmv_mds_md *lmm = NULL; 1198 union lmv_mds_md *lmm = NULL;
1191 u64 valid = 0; 1199 u64 valid = 0;
1200 int max_stripe_count;
1192 int stripe_count; 1201 int stripe_count;
1193 int mdt_index; 1202 int mdt_index;
1194 int lum_size; 1203 int lum_size;
@@ -1200,6 +1209,7 @@ lmv_out_free:
1200 if (copy_from_user(&lum, ulmv, sizeof(*ulmv))) 1209 if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
1201 return -EFAULT; 1210 return -EFAULT;
1202 1211
1212 max_stripe_count = lum.lum_stripe_count;
1203 /* 1213 /*
1204 * lum_magic will indicate which stripe the ioctl will like 1214 * lum_magic will indicate which stripe the ioctl will like
1205 * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC 1215 * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
@@ -1219,9 +1229,6 @@ lmv_out_free:
1219 1229
1220 /* Get default LMV EA */ 1230 /* Get default LMV EA */
1221 if (lum.lum_magic == LMV_USER_MAGIC) { 1231 if (lum.lum_magic == LMV_USER_MAGIC) {
1222 if (rc)
1223 goto finish_req;
1224
1225 if (lmmsize > sizeof(*ulmv)) { 1232 if (lmmsize > sizeof(*ulmv)) {
1226 rc = -EINVAL; 1233 rc = -EINVAL;
1227 goto finish_req; 1234 goto finish_req;
@@ -1234,6 +1241,16 @@ lmv_out_free:
1234 } 1241 }
1235 1242
1236 stripe_count = lmv_mds_md_stripe_count_get(lmm); 1243 stripe_count = lmv_mds_md_stripe_count_get(lmm);
1244 if (max_stripe_count < stripe_count) {
1245 lum.lum_stripe_count = stripe_count;
1246 if (copy_to_user(ulmv, &lum, sizeof(lum))) {
1247 rc = -EFAULT;
1248 goto finish_req;
1249 }
1250 rc = -E2BIG;
1251 goto finish_req;
1252 }
1253
1237 lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1); 1254 lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
1238 tmp = kzalloc(lum_size, GFP_NOFS); 1255 tmp = kzalloc(lum_size, GFP_NOFS);
1239 if (!tmp) { 1256 if (!tmp) {
@@ -1370,134 +1387,6 @@ out_req:
1370 ll_putname(filename); 1387 ll_putname(filename);
1371 return rc; 1388 return rc;
1372 } 1389 }
1373 case IOC_LOV_GETINFO: {
1374 struct lov_user_mds_data __user *lumd;
1375 struct lov_stripe_md *lsm;
1376 struct lov_user_md __user *lum;
1377 struct lov_mds_md *lmm;
1378 int lmmsize;
1379 lstat_t st;
1380
1381 lumd = (struct lov_user_mds_data __user *)arg;
1382 lum = &lumd->lmd_lmm;
1383
1384 rc = ll_get_max_mdsize(sbi, &lmmsize);
1385 if (rc)
1386 return rc;
1387
1388 lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
1389 if (!lmm)
1390 return -ENOMEM;
1391 if (copy_from_user(lmm, lum, lmmsize)) {
1392 rc = -EFAULT;
1393 goto free_lmm;
1394 }
1395
1396 switch (lmm->lmm_magic) {
1397 case LOV_USER_MAGIC_V1:
1398 if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
1399 break;
1400 /* swab objects first so that stripes num will be sane */
1401 lustre_swab_lov_user_md_objects(
1402 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1403 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1404 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1405 break;
1406 case LOV_USER_MAGIC_V3:
1407 if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
1408 break;
1409 /* swab objects first so that stripes num will be sane */
1410 lustre_swab_lov_user_md_objects(
1411 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1412 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1413 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1414 break;
1415 default:
1416 rc = -EINVAL;
1417 goto free_lmm;
1418 }
1419
1420 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1421 if (rc < 0) {
1422 rc = -ENOMEM;
1423 goto free_lmm;
1424 }
1425
1426 /* Perform glimpse_size operation. */
1427 memset(&st, 0, sizeof(st));
1428
1429 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1430 if (rc)
1431 goto free_lsm;
1432
1433 if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
1434 rc = -EFAULT;
1435 goto free_lsm;
1436 }
1437
1438free_lsm:
1439 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1440free_lmm:
1441 kvfree(lmm);
1442 return rc;
1443 }
1444 case OBD_IOC_QUOTACHECK: {
1445 struct obd_quotactl *oqctl;
1446 int error = 0;
1447
1448 if (!capable(CFS_CAP_SYS_ADMIN))
1449 return -EPERM;
1450
1451 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1452 if (!oqctl)
1453 return -ENOMEM;
1454 oqctl->qc_type = arg;
1455 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1456 if (rc < 0) {
1457 CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1458 error = rc;
1459 }
1460
1461 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1462 if (rc < 0)
1463 CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1464
1465 kfree(oqctl);
1466 return error ?: rc;
1467 }
1468 case OBD_IOC_POLL_QUOTACHECK: {
1469 struct if_quotacheck *check;
1470
1471 if (!capable(CFS_CAP_SYS_ADMIN))
1472 return -EPERM;
1473
1474 check = kzalloc(sizeof(*check), GFP_NOFS);
1475 if (!check)
1476 return -ENOMEM;
1477
1478 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1479 NULL);
1480 if (rc) {
1481 CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1482 if (copy_to_user((void __user *)arg, check,
1483 sizeof(*check)))
1484 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1485 goto out_poll;
1486 }
1487
1488 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1489 NULL);
1490 if (rc) {
1491 CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1492 if (copy_to_user((void __user *)arg, check,
1493 sizeof(*check)))
1494 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1495 goto out_poll;
1496 }
1497out_poll:
1498 kfree(check);
1499 return rc;
1500 }
1501 case OBD_IOC_QUOTACTL: { 1390 case OBD_IOC_QUOTACTL: {
1502 struct if_quotactl *qctl; 1391 struct if_quotactl *qctl;
1503 1392
@@ -1536,7 +1425,7 @@ out_quotactl:
1536 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp; 1425 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1537 vallen = sizeof(count); 1426 vallen = sizeof(count);
1538 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT), 1427 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1539 KEY_TGT_COUNT, &vallen, &count, NULL); 1428 KEY_TGT_COUNT, &vallen, &count);
1540 if (rc) { 1429 if (rc) {
1541 CERROR("get target count failed: %d\n", rc); 1430 CERROR("get target count failed: %d\n", rc);
1542 return rc; 1431 return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index e1d784bae064..f634c11216e6 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -44,6 +44,7 @@
44#include <linux/mount.h> 44#include <linux/mount.h>
45#include "../include/lustre/ll_fiemap.h" 45#include "../include/lustre/ll_fiemap.h"
46#include "../include/lustre/lustre_ioctl.h" 46#include "../include/lustre/lustre_ioctl.h"
47#include "../include/lustre_swab.h"
47 48
48#include "../include/cl_object.h" 49#include "../include/cl_object.h"
49#include "llite_internal.h" 50#include "llite_internal.h"
@@ -75,60 +76,56 @@ static void ll_file_data_put(struct ll_file_data *fd)
75 kmem_cache_free(ll_file_data_slab, fd); 76 kmem_cache_free(ll_file_data_slab, fd);
76} 77}
77 78
78void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data, 79/**
79 struct lustre_handle *fh) 80 * Packs all the attributes into @op_data for the CLOSE rpc.
81 */
82static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
83 struct obd_client_handle *och)
80{ 84{
81 op_data->op_fid1 = ll_i2info(inode)->lli_fid; 85 struct ll_inode_info *lli = ll_i2info(inode);
86
87 ll_prep_md_op_data(op_data, inode, NULL, NULL,
88 0, 0, LUSTRE_OPC_ANY, NULL);
89
82 op_data->op_attr.ia_mode = inode->i_mode; 90 op_data->op_attr.ia_mode = inode->i_mode;
83 op_data->op_attr.ia_atime = inode->i_atime; 91 op_data->op_attr.ia_atime = inode->i_atime;
84 op_data->op_attr.ia_mtime = inode->i_mtime; 92 op_data->op_attr.ia_mtime = inode->i_mtime;
85 op_data->op_attr.ia_ctime = inode->i_ctime; 93 op_data->op_attr.ia_ctime = inode->i_ctime;
86 op_data->op_attr.ia_size = i_size_read(inode); 94 op_data->op_attr.ia_size = i_size_read(inode);
95 op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
96 ATTR_MTIME | ATTR_MTIME_SET |
97 ATTR_CTIME | ATTR_CTIME_SET;
87 op_data->op_attr_blocks = inode->i_blocks; 98 op_data->op_attr_blocks = inode->i_blocks;
88 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags); 99 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
89 op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch; 100 op_data->op_handle = och->och_fh;
90 if (fh)
91 op_data->op_handle = *fh;
92 101
93 if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED) 102 /*
103 * For HSM: if inode data has been modified, pack it so that
104 * MDT can set data dirty flag in the archive.
105 */
106 if (och->och_flags & FMODE_WRITE &&
107 test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags))
94 op_data->op_bias |= MDS_DATA_MODIFIED; 108 op_data->op_bias |= MDS_DATA_MODIFIED;
95} 109}
96 110
97/** 111/**
98 * Closes the IO epoch and packs all the attributes into @op_data for 112 * Perform a close, possibly with a bias.
99 * the CLOSE rpc. 113 * The meaning of "data" depends on the value of "bias".
114 *
115 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
116 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
117 * swap layouts with.
100 */ 118 */
101static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
102 struct obd_client_handle *och)
103{
104 op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
105 ATTR_MTIME | ATTR_MTIME_SET |
106 ATTR_CTIME | ATTR_CTIME_SET;
107
108 if (!(och->och_flags & FMODE_WRITE))
109 goto out;
110
111 if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
112 op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
113 else
114 ll_ioepoch_close(inode, op_data, &och, 0);
115
116out:
117 ll_pack_inode2opdata(inode, op_data, &och->och_fh);
118 ll_prep_md_op_data(op_data, inode, NULL, NULL,
119 0, 0, LUSTRE_OPC_ANY, NULL);
120}
121
122static int ll_close_inode_openhandle(struct obd_export *md_exp, 119static int ll_close_inode_openhandle(struct obd_export *md_exp,
123 struct inode *inode,
124 struct obd_client_handle *och, 120 struct obd_client_handle *och,
125 const __u64 *data_version) 121 struct inode *inode,
122 enum mds_op_bias bias,
123 void *data)
126{ 124{
127 struct obd_export *exp = ll_i2mdexp(inode); 125 struct obd_export *exp = ll_i2mdexp(inode);
128 struct md_op_data *op_data; 126 struct md_op_data *op_data;
129 struct ptlrpc_request *req = NULL; 127 struct ptlrpc_request *req = NULL;
130 struct obd_device *obd = class_exp2obd(exp); 128 struct obd_device *obd = class_exp2obd(exp);
131 int epoch_close = 1;
132 int rc; 129 int rc;
133 130
134 if (!obd) { 131 if (!obd) {
@@ -150,65 +147,51 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
150 } 147 }
151 148
152 ll_prepare_close(inode, op_data, och); 149 ll_prepare_close(inode, op_data, och);
153 if (data_version) { 150 switch (bias) {
154 /* Pass in data_version implies release. */ 151 case MDS_CLOSE_LAYOUT_SWAP:
152 LASSERT(data);
153 op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
154 op_data->op_data_version = 0;
155 op_data->op_lease_handle = och->och_lease_handle;
156 op_data->op_fid2 = *ll_inode2fid(data);
157 break;
158
159 case MDS_HSM_RELEASE:
160 LASSERT(data);
155 op_data->op_bias |= MDS_HSM_RELEASE; 161 op_data->op_bias |= MDS_HSM_RELEASE;
156 op_data->op_data_version = *data_version; 162 op_data->op_data_version = *(__u64 *)data;
157 op_data->op_lease_handle = och->och_lease_handle; 163 op_data->op_lease_handle = och->och_lease_handle;
158 op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS; 164 op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
165 break;
166
167 default:
168 LASSERT(!data);
169 break;
159 } 170 }
160 epoch_close = op_data->op_flags & MF_EPOCH_CLOSE; 171
161 rc = md_close(md_exp, op_data, och->och_mod, &req); 172 rc = md_close(md_exp, op_data, och->och_mod, &req);
162 if (rc == -EAGAIN) { 173 if (rc) {
163 /* This close must have the epoch closed. */
164 LASSERT(epoch_close);
165 /* MDS has instructed us to obtain Size-on-MDS attribute from
166 * OSTs and send setattr to back to MDS.
167 */
168 rc = ll_som_update(inode, op_data);
169 if (rc) {
170 CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
171 ll_i2mdexp(inode)->exp_obd->obd_name,
172 PFID(ll_inode2fid(inode)), rc);
173 rc = 0;
174 }
175 } else if (rc) {
176 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n", 174 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
177 ll_i2mdexp(inode)->exp_obd->obd_name, 175 ll_i2mdexp(inode)->exp_obd->obd_name,
178 PFID(ll_inode2fid(inode)), rc); 176 PFID(ll_inode2fid(inode)), rc);
179 } 177 }
180 178
181 /* DATA_MODIFIED flag was successfully sent on close, cancel data 179 if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) &&
182 * modification flag. 180 !rc) {
183 */
184 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
185 struct ll_inode_info *lli = ll_i2info(inode);
186
187 spin_lock(&lli->lli_lock);
188 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
189 spin_unlock(&lli->lli_lock);
190 }
191
192 if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
193 struct mdt_body *body; 181 struct mdt_body *body;
194 182
195 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); 183 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
196 if (!(body->mbo_valid & OBD_MD_FLRELEASED)) 184 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
197 rc = -EBUSY; 185 rc = -EBUSY;
198 } 186 }
199 187
200 ll_finish_md_op_data(op_data); 188 ll_finish_md_op_data(op_data);
201 189
202out: 190out:
203 if (exp_connect_som(exp) && !epoch_close && 191 md_clear_open_replay_data(md_exp, och);
204 S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) { 192 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
205 ll_queue_done_writing(inode, LLIF_DONE_WRITING); 193 kfree(och);
206 } else { 194
207 md_clear_open_replay_data(md_exp, och);
208 /* Free @och if it is not waiting for DONE_WRITING. */
209 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
210 kfree(och);
211 }
212 if (req) /* This is close request */ 195 if (req) /* This is close request */
213 ptlrpc_req_finished(req); 196 ptlrpc_req_finished(req);
214 return rc; 197 return rc;
@@ -252,7 +235,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
252 * be closed. 235 * be closed.
253 */ 236 */
254 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, 237 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
255 inode, och, NULL); 238 och, inode, 0, NULL);
256 } 239 }
257 240
258 return rc; 241 return rc;
@@ -266,7 +249,9 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
266 int lockmode; 249 int lockmode;
267 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; 250 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
268 struct lustre_handle lockh; 251 struct lustre_handle lockh;
269 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} }; 252 union ldlm_policy_data policy = {
253 .l_inodebits = { MDS_INODELOCK_OPEN }
254 };
270 int rc = 0; 255 int rc = 0;
271 256
272 /* clear group lock, if present */ 257 /* clear group lock, if present */
@@ -288,7 +273,8 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
288 } 273 }
289 274
290 if (fd->fd_och) { 275 if (fd->fd_och) {
291 rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL); 276 rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0,
277 NULL);
292 fd->fd_och = NULL; 278 fd->fd_och = NULL;
293 goto out; 279 goto out;
294 } 280 }
@@ -437,20 +423,6 @@ out:
437 return rc; 423 return rc;
438} 424}
439 425
440/**
441 * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
442 * not believe attributes if a few ioepoch holders exist. Attributes for
443 * previous ioepoch if new one is opened are also skipped by MDS.
444 */
445void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
446{
447 if (ioepoch && lli->lli_ioepoch != ioepoch) {
448 lli->lli_ioepoch = ioepoch;
449 CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n",
450 ioepoch, PFID(&lli->lli_fid));
451 }
452}
453
454static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it, 426static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
455 struct obd_client_handle *och) 427 struct obd_client_handle *och)
456{ 428{
@@ -470,23 +442,17 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
470 struct ll_file_data *fd, struct obd_client_handle *och) 442 struct ll_file_data *fd, struct obd_client_handle *och)
471{ 443{
472 struct inode *inode = file_inode(file); 444 struct inode *inode = file_inode(file);
473 struct ll_inode_info *lli = ll_i2info(inode);
474 445
475 LASSERT(!LUSTRE_FPRIVATE(file)); 446 LASSERT(!LUSTRE_FPRIVATE(file));
476 447
477 LASSERT(fd); 448 LASSERT(fd);
478 449
479 if (och) { 450 if (och) {
480 struct mdt_body *body;
481 int rc; 451 int rc;
482 452
483 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); 453 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
484 if (rc != 0) 454 if (rc != 0)
485 return rc; 455 return rc;
486
487 body = req_capsule_server_get(&it->it_request->rq_pill,
488 &RMF_MDT_BODY);
489 ll_ioepoch_open(lli, body->mbo_ioepoch);
490 } 456 }
491 457
492 LUSTRE_FPRIVATE(file) = fd; 458 LUSTRE_FPRIVATE(file) = fd;
@@ -677,12 +643,6 @@ restart:
677 if (!S_ISREG(inode->i_mode)) 643 if (!S_ISREG(inode->i_mode))
678 goto out_och_free; 644 goto out_och_free;
679 645
680 if (!lli->lli_has_smd &&
681 (cl_is_lov_delay_create(file->f_flags) ||
682 (file->f_mode & FMODE_WRITE) == 0)) {
683 CDEBUG(D_INODE, "object creation was delayed\n");
684 goto out_och_free;
685 }
686 cl_lov_delay_create_clear(&file->f_flags); 646 cl_lov_delay_create_clear(&file->f_flags);
687 goto out_och_free; 647 goto out_och_free;
688 648
@@ -867,7 +827,7 @@ out_close:
867 it.it_lock_mode = 0; 827 it.it_lock_mode = 0;
868 och->och_lease_handle.cookie = 0ULL; 828 och->och_lease_handle.cookie = 0ULL;
869 } 829 }
870 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL); 830 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL);
871 if (rc2 < 0) 831 if (rc2 < 0)
872 CERROR("%s: error closing file "DFID": %d\n", 832 CERROR("%s: error closing file "DFID": %d\n",
873 ll_get_fsname(inode->i_sb, NULL, 0), 833 ll_get_fsname(inode->i_sb, NULL, 0),
@@ -881,6 +841,69 @@ out:
881} 841}
882 842
883/** 843/**
844 * Check whether a layout swap can be done between two inodes.
845 *
846 * \param[in] inode1 First inode to check
847 * \param[in] inode2 Second inode to check
848 *
849 * \retval 0 on success, layout swap can be performed between both inodes
850 * \retval negative error code if requirements are not met
851 */
852static int ll_check_swap_layouts_validity(struct inode *inode1,
853 struct inode *inode2)
854{
855 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
856 return -EINVAL;
857
858 if (inode_permission(inode1, MAY_WRITE) ||
859 inode_permission(inode2, MAY_WRITE))
860 return -EPERM;
861
862 if (inode1->i_sb != inode2->i_sb)
863 return -EXDEV;
864
865 return 0;
866}
867
868static int ll_swap_layouts_close(struct obd_client_handle *och,
869 struct inode *inode, struct inode *inode2)
870{
871 const struct lu_fid *fid1 = ll_inode2fid(inode);
872 const struct lu_fid *fid2;
873 int rc;
874
875 CDEBUG(D_INODE, "%s: biased close of file " DFID "\n",
876 ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
877
878 rc = ll_check_swap_layouts_validity(inode, inode2);
879 if (rc < 0)
880 goto out_free_och;
881
882 /* We now know that inode2 is a lustre inode */
883 fid2 = ll_inode2fid(inode2);
884
885 rc = lu_fid_cmp(fid1, fid2);
886 if (!rc) {
887 rc = -EINVAL;
888 goto out_free_och;
889 }
890
891 /*
892 * Close the file and swap layouts between inode & inode2.
893 * NB: lease lock handle is released in mdc_close_layout_swap_pack()
894 * because we still need it to pack l_remote_handle to MDT.
895 */
896 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
897 MDS_CLOSE_LAYOUT_SWAP, inode2);
898
899 och = NULL; /* freed in ll_close_inode_openhandle() */
900
901out_free_och:
902 kfree(och);
903 return rc;
904}
905
906/**
884 * Release lease and close the file. 907 * Release lease and close the file.
885 * It will check if the lease has ever broken. 908 * It will check if the lease has ever broken.
886 */ 909 */
@@ -907,84 +930,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
907 *lease_broken = cancelled; 930 *lease_broken = cancelled;
908 931
909 return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, 932 return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
910 inode, och, NULL); 933 och, inode, 0, NULL);
911}
912
913/* Fills the obdo with the attributes for the lsm */
914static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
915 struct obdo *obdo, __u64 ioepoch, int dv_flags)
916{
917 struct ptlrpc_request_set *set;
918 struct obd_info oinfo = { };
919 int rc;
920
921 LASSERT(lsm);
922
923 oinfo.oi_md = lsm;
924 oinfo.oi_oa = obdo;
925 oinfo.oi_oa->o_oi = lsm->lsm_oi;
926 oinfo.oi_oa->o_mode = S_IFREG;
927 oinfo.oi_oa->o_ioepoch = ioepoch;
928 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
929 OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
930 OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
931 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
932 OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
933 OBD_MD_FLDATAVERSION;
934 if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
935 oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
936 oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
937 if (dv_flags & LL_DV_WR_FLUSH)
938 oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
939 }
940
941 set = ptlrpc_prep_set();
942 if (!set) {
943 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
944 rc = -ENOMEM;
945 } else {
946 rc = obd_getattr_async(exp, &oinfo, set);
947 if (rc == 0)
948 rc = ptlrpc_set_wait(set);
949 ptlrpc_set_destroy(set);
950 }
951 if (rc == 0) {
952 oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
953 OBD_MD_FLATIME | OBD_MD_FLMTIME |
954 OBD_MD_FLCTIME | OBD_MD_FLSIZE |
955 OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
956 if (dv_flags & LL_DV_WR_FLUSH &&
957 !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
958 oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
959 return -ENOTSUPP;
960 }
961 return rc;
962}
963
964/**
965 * Performs the getattr on the inode and updates its fields.
966 * If @sync != 0, perform the getattr under the server-side lock.
967 */
968int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
969 __u64 ioepoch, int sync)
970{
971 struct lov_stripe_md *lsm;
972 int rc;
973
974 lsm = ccc_inode_lsm_get(inode);
975 rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
976 obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
977 if (rc == 0) {
978 struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
979
980 obdo_refresh_inode(inode, obdo, obdo->o_valid);
981 CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
982 POSTID(oi), i_size_read(inode),
983 (unsigned long long)inode->i_blocks,
984 1UL << inode->i_blkbits);
985 }
986 ccc_inode_lsm_put(inode, lsm);
987 return rc;
988} 934}
989 935
990int ll_merge_attr(const struct lu_env *env, struct inode *inode) 936int ll_merge_attr(const struct lu_env *env, struct inode *inode)
@@ -1043,23 +989,6 @@ out_size_unlock:
1043 return rc; 989 return rc;
1044} 990}
1045 991
1046int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
1047 lstat_t *st)
1048{
1049 struct obdo obdo = { 0 };
1050 int rc;
1051
1052 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0);
1053 if (rc == 0) {
1054 st->st_size = obdo.o_size;
1055 st->st_blocks = obdo.o_blocks;
1056 st->st_mtime = obdo.o_mtime;
1057 st->st_atime = obdo.o_atime;
1058 st->st_ctime = obdo.o_ctime;
1059 }
1060 return rc;
1061}
1062
1063static bool file_is_noatime(const struct file *file) 992static bool file_is_noatime(const struct file *file)
1064{ 993{
1065 const struct vfsmount *mnt = file->f_path.mnt; 994 const struct vfsmount *mnt = file->f_path.mnt;
@@ -1117,9 +1046,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1117{ 1046{
1118 struct ll_inode_info *lli = ll_i2info(file_inode(file)); 1047 struct ll_inode_info *lli = ll_i2info(file_inode(file));
1119 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1048 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1049 struct vvp_io *vio = vvp_env_io(env);
1120 struct range_lock range; 1050 struct range_lock range;
1121 struct cl_io *io; 1051 struct cl_io *io;
1122 ssize_t result; 1052 ssize_t result = 0;
1053 int rc = 0;
1123 1054
1124 CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n", 1055 CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
1125 file, iot, *ppos, count); 1056 file, iot, *ppos, count);
@@ -1151,18 +1082,15 @@ restart:
1151 CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n", 1082 CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
1152 range.rl_node.in_extent.start, 1083 range.rl_node.in_extent.start,
1153 range.rl_node.in_extent.end); 1084 range.rl_node.in_extent.end);
1154 result = range_lock(&lli->lli_write_tree, 1085 rc = range_lock(&lli->lli_write_tree, &range);
1155 &range); 1086 if (rc < 0)
1156 if (result < 0)
1157 goto out; 1087 goto out;
1158 1088
1159 range_locked = true; 1089 range_locked = true;
1160 } 1090 }
1161 down_read(&lli->lli_trunc_sem);
1162 ll_cl_add(file, env, io); 1091 ll_cl_add(file, env, io);
1163 result = cl_io_loop(env, io); 1092 rc = cl_io_loop(env, io);
1164 ll_cl_remove(file, env); 1093 ll_cl_remove(file, env);
1165 up_read(&lli->lli_trunc_sem);
1166 if (range_locked) { 1094 if (range_locked) {
1167 CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n", 1095 CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
1168 range.rl_node.in_extent.start, 1096 range.rl_node.in_extent.start,
@@ -1171,24 +1099,26 @@ restart:
1171 } 1099 }
1172 } else { 1100 } else {
1173 /* cl_io_rw_init() handled IO */ 1101 /* cl_io_rw_init() handled IO */
1174 result = io->ci_result; 1102 rc = io->ci_result;
1175 } 1103 }
1176 1104
1177 if (io->ci_nob > 0) { 1105 if (io->ci_nob > 0) {
1178 result = io->ci_nob; 1106 result = io->ci_nob;
1107 count -= io->ci_nob;
1179 *ppos = io->u.ci_wr.wr.crw_pos; 1108 *ppos = io->u.ci_wr.wr.crw_pos;
1109
1110 /* prepare IO restart */
1111 if (count > 0)
1112 args->u.normal.via_iter = vio->vui_iter;
1180 } 1113 }
1181 goto out;
1182out: 1114out:
1183 cl_io_fini(env, io); 1115 cl_io_fini(env, io);
1184 /* If any bit been read/written (result != 0), we just return 1116
1185 * short read/write instead of restart io. 1117 if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
1186 */ 1118 CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
1187 if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { 1119 file_dentry(file)->d_name.name,
1188 CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n",
1189 iot == CIT_READ ? "read" : "write", 1120 iot == CIT_READ ? "read" : "write",
1190 file, *ppos, count); 1121 *ppos, count, result);
1191 LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
1192 goto restart; 1122 goto restart;
1193 } 1123 }
1194 1124
@@ -1201,13 +1131,19 @@ out:
1201 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), 1131 ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
1202 LPROC_LL_WRITE_BYTES, result); 1132 LPROC_LL_WRITE_BYTES, result);
1203 fd->fd_write_failed = false; 1133 fd->fd_write_failed = false;
1204 } else if (result != -ERESTARTSYS) { 1134 } else if (!result && !rc) {
1135 rc = io->ci_result;
1136 if (rc < 0)
1137 fd->fd_write_failed = true;
1138 else
1139 fd->fd_write_failed = false;
1140 } else if (rc != -ERESTARTSYS) {
1205 fd->fd_write_failed = true; 1141 fd->fd_write_failed = true;
1206 } 1142 }
1207 } 1143 }
1208 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result); 1144 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1209 1145
1210 return result; 1146 return result > 0 ? result : rc;
1211} 1147}
1212 1148
1213static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1149static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1259,37 +1195,22 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1259 __u64 flags, struct lov_user_md *lum, 1195 __u64 flags, struct lov_user_md *lum,
1260 int lum_size) 1196 int lum_size)
1261{ 1197{
1262 struct lov_stripe_md *lsm = NULL;
1263 struct lookup_intent oit = { 1198 struct lookup_intent oit = {
1264 .it_op = IT_OPEN, 1199 .it_op = IT_OPEN,
1265 .it_flags = flags | MDS_OPEN_BY_FID, 1200 .it_flags = flags | MDS_OPEN_BY_FID,
1266 }; 1201 };
1267 int rc = 0; 1202 int rc = 0;
1268 1203
1269 lsm = ccc_inode_lsm_get(inode);
1270 if (lsm) {
1271 ccc_inode_lsm_put(inode, lsm);
1272 CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
1273 PFID(ll_inode2fid(inode)));
1274 rc = -EEXIST;
1275 goto out;
1276 }
1277
1278 ll_inode_size_lock(inode); 1204 ll_inode_size_lock(inode);
1279 rc = ll_intent_file_open(dentry, lum, lum_size, &oit); 1205 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
1280 if (rc < 0) 1206 if (rc < 0)
1281 goto out_unlock; 1207 goto out_unlock;
1282 rc = oit.it_status;
1283 if (rc < 0)
1284 goto out_unlock;
1285 1208
1286 ll_release_openhandle(inode, &oit); 1209 ll_release_openhandle(inode, &oit);
1287 1210
1288out_unlock: 1211out_unlock:
1289 ll_inode_size_unlock(inode); 1212 ll_inode_size_unlock(inode);
1290 ll_intent_release(&oit); 1213 ll_intent_release(&oit);
1291 ccc_inode_lsm_put(inode, lsm);
1292out:
1293 return rc; 1214 return rc;
1294} 1215}
1295 1216
@@ -1566,7 +1487,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
1566 ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); 1487 ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
1567 1488
1568 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, 1489 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
1569 inode, och, NULL); 1490 och, inode, 0, NULL);
1570out: 1491out:
1571 /* this one is in place of ll_file_open */ 1492 /* this one is in place of ll_file_open */
1572 if (it_disposition(it, DISP_ENQ_OPEN_REF)) { 1493 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
@@ -1579,15 +1500,17 @@ out:
1579/** 1500/**
1580 * Get size for inode for which FIEMAP mapping is requested. 1501 * Get size for inode for which FIEMAP mapping is requested.
1581 * Make the FIEMAP get_info call and returns the result. 1502 * Make the FIEMAP get_info call and returns the result.
1503 *
1504 * \param fiemap kernel buffer to hold extens
1505 * \param num_bytes kernel buffer size
1582 */ 1506 */
1583static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, 1507static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
1584 size_t num_bytes) 1508 size_t num_bytes)
1585{ 1509{
1586 struct obd_export *exp = ll_i2dtexp(inode); 1510 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
1587 struct lov_stripe_md *lsm = NULL; 1511 struct lu_env *env;
1588 struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, }; 1512 int refcheck;
1589 __u32 vallen = num_bytes; 1513 int rc = 0;
1590 int rc;
1591 1514
1592 /* Checks for fiemap flags */ 1515 /* Checks for fiemap flags */
1593 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) { 1516 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
@@ -1602,21 +1525,9 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
1602 return rc; 1525 return rc;
1603 } 1526 }
1604 1527
1605 lsm = ccc_inode_lsm_get(inode); 1528 env = cl_env_get(&refcheck);
1606 if (!lsm) 1529 if (IS_ERR(env))
1607 return -ENOENT; 1530 return PTR_ERR(env);
1608
1609 /* If the stripe_count > 1 and the application does not understand
1610 * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
1611 */
1612 if (lsm->lsm_stripe_count > 1 &&
1613 !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1614 rc = -EOPNOTSUPP;
1615 goto out;
1616 }
1617
1618 fm_key.oa.o_oi = lsm->lsm_oi;
1619 fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1620 1531
1621 if (i_size_read(inode) == 0) { 1532 if (i_size_read(inode) == 0) {
1622 rc = ll_glimpse_size(inode); 1533 rc = ll_glimpse_size(inode);
@@ -1624,24 +1535,23 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
1624 goto out; 1535 goto out;
1625 } 1536 }
1626 1537
1627 obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE); 1538 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1628 obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid); 1539 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
1540 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
1541
1629 /* If filesize is 0, then there would be no objects for mapping */ 1542 /* If filesize is 0, then there would be no objects for mapping */
1630 if (fm_key.oa.o_size == 0) { 1543 if (fmkey.lfik_oa.o_size == 0) {
1631 fiemap->fm_mapped_extents = 0; 1544 fiemap->fm_mapped_extents = 0;
1632 rc = 0; 1545 rc = 0;
1633 goto out; 1546 goto out;
1634 } 1547 }
1635 1548
1636 memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap)); 1549 memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap));
1637
1638 rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
1639 fiemap, lsm);
1640 if (rc)
1641 CERROR("obd_get_info failed: rc = %d\n", rc);
1642 1550
1551 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
1552 &fmkey, fiemap, &num_bytes);
1643out: 1553out:
1644 ccc_inode_lsm_put(inode, lsm); 1554 cl_env_put(env, &refcheck);
1645 return rc; 1555 return rc;
1646} 1556}
1647 1557
@@ -1689,113 +1599,56 @@ gf_free:
1689 return rc; 1599 return rc;
1690} 1600}
1691 1601
1692static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
1693{
1694 struct ll_user_fiemap *fiemap_s;
1695 size_t num_bytes, ret_bytes;
1696 unsigned int extent_count;
1697 int rc = 0;
1698
1699 /* Get the extent count so we can calculate the size of
1700 * required fiemap buffer
1701 */
1702 if (get_user(extent_count,
1703 &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
1704 return -EFAULT;
1705
1706 if (extent_count >=
1707 (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent))
1708 return -EINVAL;
1709 num_bytes = sizeof(*fiemap_s) + (extent_count *
1710 sizeof(struct ll_fiemap_extent));
1711
1712 fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
1713 if (!fiemap_s)
1714 return -ENOMEM;
1715
1716 /* get the fiemap value */
1717 if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
1718 sizeof(*fiemap_s))) {
1719 rc = -EFAULT;
1720 goto error;
1721 }
1722
1723 /* If fm_extent_count is non-zero, read the first extent since
1724 * it is used to calculate end_offset and device from previous
1725 * fiemap call.
1726 */
1727 if (extent_count) {
1728 if (copy_from_user(&fiemap_s->fm_extents[0],
1729 (char __user *)arg + sizeof(*fiemap_s),
1730 sizeof(struct ll_fiemap_extent))) {
1731 rc = -EFAULT;
1732 goto error;
1733 }
1734 }
1735
1736 rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
1737 if (rc)
1738 goto error;
1739
1740 ret_bytes = sizeof(struct ll_user_fiemap);
1741
1742 if (extent_count != 0)
1743 ret_bytes += (fiemap_s->fm_mapped_extents *
1744 sizeof(struct ll_fiemap_extent));
1745
1746 if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
1747 rc = -EFAULT;
1748
1749error:
1750 kvfree(fiemap_s);
1751 return rc;
1752}
1753
1754/* 1602/*
1755 * Read the data_version for inode. 1603 * Read the data_version for inode.
1756 * 1604 *
1757 * This value is computed using stripe object version on OST. 1605 * This value is computed using stripe object version on OST.
1758 * Version is computed using server side locking. 1606 * Version is computed using server side locking.
1759 * 1607 *
1760 * @param sync if do sync on the OST side; 1608 * @param flags if do sync on the OST side;
1761 * 0: no sync 1609 * 0: no sync
1762 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs 1610 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
1763 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs 1611 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
1764 */ 1612 */
1765int ll_data_version(struct inode *inode, __u64 *data_version, int flags) 1613int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
1766{ 1614{
1767 struct lov_stripe_md *lsm = NULL; 1615 struct cl_object *obj = ll_i2info(inode)->lli_clob;
1768 struct ll_sb_info *sbi = ll_i2sbi(inode); 1616 struct lu_env *env;
1769 struct obdo *obdo = NULL; 1617 struct cl_io *io;
1770 int rc; 1618 int refcheck;
1619 int result;
1771 1620
1772 /* If no stripe, we consider version is 0. */ 1621 /* If no file object initialized, we consider its version is 0. */
1773 lsm = ccc_inode_lsm_get(inode); 1622 if (!obj) {
1774 if (!lsm_has_objects(lsm)) {
1775 *data_version = 0; 1623 *data_version = 0;
1776 CDEBUG(D_INODE, "No object for inode\n"); 1624 return 0;
1777 rc = 0;
1778 goto out;
1779 } 1625 }
1780 1626
1781 obdo = kzalloc(sizeof(*obdo), GFP_NOFS); 1627 env = cl_env_get(&refcheck);
1782 if (!obdo) { 1628 if (IS_ERR(env))
1783 rc = -ENOMEM; 1629 return PTR_ERR(env);
1784 goto out;
1785 }
1786 1630
1787 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags); 1631 io = vvp_env_thread_io(env);
1788 if (rc == 0) { 1632 io->ci_obj = obj;
1789 if (!(obdo->o_valid & OBD_MD_FLDATAVERSION)) 1633 io->u.ci_data_version.dv_data_version = 0;
1790 rc = -EOPNOTSUPP; 1634 io->u.ci_data_version.dv_flags = flags;
1791 else
1792 *data_version = obdo->o_data_version;
1793 }
1794 1635
1795 kfree(obdo); 1636restart:
1796out: 1637 if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj))
1797 ccc_inode_lsm_put(inode, lsm); 1638 result = cl_io_loop(env, io);
1798 return rc; 1639 else
1640 result = io->ci_result;
1641
1642 *data_version = io->u.ci_data_version.dv_data_version;
1643
1644 cl_io_fini(env, io);
1645
1646 if (unlikely(io->ci_need_restart))
1647 goto restart;
1648
1649 cl_env_put(env, &refcheck);
1650
1651 return result;
1799} 1652}
1800 1653
1801/* 1654/*
@@ -1803,11 +1656,11 @@ out:
1803 */ 1656 */
1804int ll_hsm_release(struct inode *inode) 1657int ll_hsm_release(struct inode *inode)
1805{ 1658{
1806 struct cl_env_nest nest;
1807 struct lu_env *env; 1659 struct lu_env *env;
1808 struct obd_client_handle *och = NULL; 1660 struct obd_client_handle *och = NULL;
1809 __u64 data_version = 0; 1661 __u64 data_version = 0;
1810 int rc; 1662 int rc;
1663 int refcheck;
1811 1664
1812 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n", 1665 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
1813 ll_get_fsname(inode->i_sb, NULL, 0), 1666 ll_get_fsname(inode->i_sb, NULL, 0),
@@ -1824,21 +1677,21 @@ int ll_hsm_release(struct inode *inode)
1824 if (rc != 0) 1677 if (rc != 0)
1825 goto out; 1678 goto out;
1826 1679
1827 env = cl_env_nested_get(&nest); 1680 env = cl_env_get(&refcheck);
1828 if (IS_ERR(env)) { 1681 if (IS_ERR(env)) {
1829 rc = PTR_ERR(env); 1682 rc = PTR_ERR(env);
1830 goto out; 1683 goto out;
1831 } 1684 }
1832 1685
1833 ll_merge_attr(env, inode); 1686 ll_merge_attr(env, inode);
1834 cl_env_nested_put(&nest, env); 1687 cl_env_put(env, &refcheck);
1835 1688
1836 /* Release the file. 1689 /* Release the file.
1837 * NB: lease lock handle is released in mdc_hsm_release_pack() because 1690 * NB: lease lock handle is released in mdc_hsm_release_pack() because
1838 * we still need it to pack l_remote_handle to MDT. 1691 * we still need it to pack l_remote_handle to MDT.
1839 */ 1692 */
1840 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, 1693 rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
1841 &data_version); 1694 MDS_HSM_RELEASE, &data_version);
1842 och = NULL; 1695 och = NULL;
1843 1696
1844out: 1697out:
@@ -1849,10 +1702,12 @@ out:
1849} 1702}
1850 1703
1851struct ll_swap_stack { 1704struct ll_swap_stack {
1852 struct iattr ia1, ia2; 1705 u64 dv1;
1853 __u64 dv1, dv2; 1706 u64 dv2;
1854 struct inode *inode1, *inode2; 1707 struct inode *inode1;
1855 bool check_dv1, check_dv2; 1708 struct inode *inode2;
1709 bool check_dv1;
1710 bool check_dv2;
1856}; 1711};
1857 1712
1858static int ll_swap_layouts(struct file *file1, struct file *file2, 1713static int ll_swap_layouts(struct file *file1, struct file *file2,
@@ -1872,21 +1727,9 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
1872 llss->inode1 = file_inode(file1); 1727 llss->inode1 = file_inode(file1);
1873 llss->inode2 = file_inode(file2); 1728 llss->inode2 = file_inode(file2);
1874 1729
1875 if (!S_ISREG(llss->inode2->i_mode)) { 1730 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
1876 rc = -EINVAL; 1731 if (rc < 0)
1877 goto free;
1878 }
1879
1880 if (inode_permission(llss->inode1, MAY_WRITE) ||
1881 inode_permission(llss->inode2, MAY_WRITE)) {
1882 rc = -EPERM;
1883 goto free; 1732 goto free;
1884 }
1885
1886 if (llss->inode2->i_sb != llss->inode1->i_sb) {
1887 rc = -EXDEV;
1888 goto free;
1889 }
1890 1733
1891 /* we use 2 bool because it is easier to swap than 2 bits */ 1734 /* we use 2 bool because it is easier to swap than 2 bits */
1892 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1) 1735 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
@@ -1900,10 +1743,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
1900 llss->dv2 = lsl->sl_dv2; 1743 llss->dv2 = lsl->sl_dv2;
1901 1744
1902 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2)); 1745 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
1903 if (rc == 0) /* same file, done! */ { 1746 if (!rc) /* same file, done! */
1904 rc = 0;
1905 goto free; 1747 goto free;
1906 }
1907 1748
1908 if (rc < 0) { /* sequentialize it */ 1749 if (rc < 0) { /* sequentialize it */
1909 swap(llss->inode1, llss->inode2); 1750 swap(llss->inode1, llss->inode2);
@@ -1925,19 +1766,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
1925 } 1766 }
1926 } 1767 }
1927 1768
1928 /* to be able to restore mtime and atime after swap
1929 * we need to first save them
1930 */
1931 if (lsl->sl_flags &
1932 (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
1933 llss->ia1.ia_mtime = llss->inode1->i_mtime;
1934 llss->ia1.ia_atime = llss->inode1->i_atime;
1935 llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
1936 llss->ia2.ia_mtime = llss->inode2->i_mtime;
1937 llss->ia2.ia_atime = llss->inode2->i_atime;
1938 llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
1939 }
1940
1941 /* ultimate check, before swapping the layouts we check if 1769 /* ultimate check, before swapping the layouts we check if
1942 * dataversion has changed (if requested) 1770 * dataversion has changed (if requested)
1943 */ 1771 */
@@ -1987,39 +1815,6 @@ putgl:
1987 ll_put_grouplock(llss->inode1, file1, gid); 1815 ll_put_grouplock(llss->inode1, file1, gid);
1988 } 1816 }
1989 1817
1990 /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
1991 if (rc != 0)
1992 goto free;
1993
1994 /* clear useless flags */
1995 if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
1996 llss->ia1.ia_valid &= ~ATTR_MTIME;
1997 llss->ia2.ia_valid &= ~ATTR_MTIME;
1998 }
1999
2000 if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
2001 llss->ia1.ia_valid &= ~ATTR_ATIME;
2002 llss->ia2.ia_valid &= ~ATTR_ATIME;
2003 }
2004
2005 /* update time if requested */
2006 rc = 0;
2007 if (llss->ia2.ia_valid != 0) {
2008 inode_lock(llss->inode1);
2009 rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
2010 inode_unlock(llss->inode1);
2011 }
2012
2013 if (llss->ia1.ia_valid != 0) {
2014 int rc1;
2015
2016 inode_lock(llss->inode2);
2017 rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
2018 inode_unlock(llss->inode2);
2019 if (rc == 0)
2020 rc = rc1;
2021 }
2022
2023free: 1818free:
2024 kfree(llss); 1819 kfree(llss);
2025 1820
@@ -2176,24 +1971,52 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2176 sizeof(struct lustre_swap_layouts))) 1971 sizeof(struct lustre_swap_layouts)))
2177 return -EFAULT; 1972 return -EFAULT;
2178 1973
2179 if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */ 1974 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
2180 return -EPERM; 1975 return -EPERM;
2181 1976
2182 file2 = fget(lsl.sl_fd); 1977 file2 = fget(lsl.sl_fd);
2183 if (!file2) 1978 if (!file2)
2184 return -EBADF; 1979 return -EBADF;
2185 1980
2186 rc = -EPERM; 1981 /* O_WRONLY or O_RDWR */
2187 if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */ 1982 if ((file2->f_flags & O_ACCMODE) == O_RDONLY) {
1983 rc = -EPERM;
1984 goto out;
1985 }
1986
1987 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
1988 struct obd_client_handle *och = NULL;
1989 struct ll_inode_info *lli;
1990 struct inode *inode2;
1991
1992 if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) {
1993 rc = -EINVAL;
1994 goto out;
1995 }
1996
1997 lli = ll_i2info(inode);
1998 mutex_lock(&lli->lli_och_mutex);
1999 if (fd->fd_lease_och) {
2000 och = fd->fd_lease_och;
2001 fd->fd_lease_och = NULL;
2002 }
2003 mutex_unlock(&lli->lli_och_mutex);
2004 if (!och) {
2005 rc = -ENOLCK;
2006 goto out;
2007 }
2008 inode2 = file_inode(file2);
2009 rc = ll_swap_layouts_close(och, inode, inode2);
2010 } else {
2188 rc = ll_swap_layouts(file, file2, &lsl); 2011 rc = ll_swap_layouts(file, file2, &lsl);
2012 }
2013out:
2189 fput(file2); 2014 fput(file2);
2190 return rc; 2015 return rc;
2191 } 2016 }
2192 case LL_IOC_LOV_GETSTRIPE: 2017 case LL_IOC_LOV_GETSTRIPE:
2193 return ll_file_getstripe(inode, 2018 return ll_file_getstripe(inode,
2194 (struct lov_user_md __user *)arg); 2019 (struct lov_user_md __user *)arg);
2195 case FSFILT_IOC_FIEMAP:
2196 return ll_ioctl_fiemap(inode, arg);
2197 case FSFILT_IOC_GETFLAGS: 2020 case FSFILT_IOC_GETFLAGS:
2198 case FSFILT_IOC_SETFLAGS: 2021 case FSFILT_IOC_SETFLAGS:
2199 return ll_iocontrol(inode, file, cmd, arg); 2022 return ll_iocontrol(inode, file, cmd, arg);
@@ -2489,17 +2312,17 @@ static int ll_flush(struct file *file, fl_owner_t id)
2489int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, 2312int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
2490 enum cl_fsync_mode mode, int ignore_layout) 2313 enum cl_fsync_mode mode, int ignore_layout)
2491{ 2314{
2492 struct cl_env_nest nest;
2493 struct lu_env *env; 2315 struct lu_env *env;
2494 struct cl_io *io; 2316 struct cl_io *io;
2495 struct cl_fsync_io *fio; 2317 struct cl_fsync_io *fio;
2496 int result; 2318 int result;
2319 int refcheck;
2497 2320
2498 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL && 2321 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
2499 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL) 2322 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
2500 return -EINVAL; 2323 return -EINVAL;
2501 2324
2502 env = cl_env_nested_get(&nest); 2325 env = cl_env_get(&refcheck);
2503 if (IS_ERR(env)) 2326 if (IS_ERR(env))
2504 return PTR_ERR(env); 2327 return PTR_ERR(env);
2505 2328
@@ -2522,7 +2345,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
2522 if (result == 0) 2345 if (result == 0)
2523 result = fio->fi_nr_written; 2346 result = fio->fi_nr_written;
2524 cl_io_fini(env, io); 2347 cl_io_fini(env, io);
2525 cl_env_nested_put(&nest, env); 2348 cl_env_put(env, &refcheck);
2526 2349
2527 return result; 2350 return result;
2528} 2351}
@@ -2549,9 +2372,11 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2549 lli->lli_async_rc = 0; 2372 lli->lli_async_rc = 0;
2550 if (rc == 0) 2373 if (rc == 0)
2551 rc = err; 2374 rc = err;
2552 err = lov_read_and_clear_async_rc(lli->lli_clob); 2375 if (lli->lli_clob) {
2553 if (rc == 0) 2376 err = lov_read_and_clear_async_rc(lli->lli_clob);
2554 rc = err; 2377 if (rc == 0)
2378 rc = err;
2379 }
2555 } 2380 }
2556 2381
2557 err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req); 2382 err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
@@ -2588,7 +2413,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
2588 }; 2413 };
2589 struct md_op_data *op_data; 2414 struct md_op_data *op_data;
2590 struct lustre_handle lockh = {0}; 2415 struct lustre_handle lockh = {0};
2591 ldlm_policy_data_t flock = { {0} }; 2416 union ldlm_policy_data flock = { { 0 } };
2592 int fl_type = file_lock->fl_type; 2417 int fl_type = file_lock->fl_type;
2593 __u64 flags = 0; 2418 __u64 flags = 0;
2594 int rc; 2419 int rc;
@@ -2707,7 +2532,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
2707} 2532}
2708 2533
2709int ll_get_fid_by_name(struct inode *parent, const char *name, 2534int ll_get_fid_by_name(struct inode *parent, const char *name,
2710 int namelen, struct lu_fid *fid) 2535 int namelen, struct lu_fid *fid,
2536 struct inode **inode)
2711{ 2537{
2712 struct md_op_data *op_data = NULL; 2538 struct md_op_data *op_data = NULL;
2713 struct ptlrpc_request *req; 2539 struct ptlrpc_request *req;
@@ -2719,7 +2545,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
2719 if (IS_ERR(op_data)) 2545 if (IS_ERR(op_data))
2720 return PTR_ERR(op_data); 2546 return PTR_ERR(op_data);
2721 2547
2722 op_data->op_valid = OBD_MD_FLID; 2548 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
2723 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req); 2549 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
2724 ll_finish_md_op_data(op_data); 2550 ll_finish_md_op_data(op_data);
2725 if (rc < 0) 2551 if (rc < 0)
@@ -2732,6 +2558,9 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
2732 } 2558 }
2733 if (fid) 2559 if (fid)
2734 *fid = body->mbo_fid1; 2560 *fid = body->mbo_fid1;
2561
2562 if (inode)
2563 rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
2735out_req: 2564out_req:
2736 ptlrpc_req_finished(req); 2565 ptlrpc_req_finished(req);
2737 return rc; 2566 return rc;
@@ -2741,9 +2570,12 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
2741 const char *name, int namelen) 2570 const char *name, int namelen)
2742{ 2571{
2743 struct ptlrpc_request *request = NULL; 2572 struct ptlrpc_request *request = NULL;
2573 struct obd_client_handle *och = NULL;
2744 struct inode *child_inode = NULL; 2574 struct inode *child_inode = NULL;
2745 struct dentry *dchild = NULL; 2575 struct dentry *dchild = NULL;
2746 struct md_op_data *op_data; 2576 struct md_op_data *op_data;
2577 struct mdt_body *body;
2578 u64 data_version = 0;
2747 struct qstr qstr; 2579 struct qstr qstr;
2748 int rc; 2580 int rc;
2749 2581
@@ -2762,22 +2594,25 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
2762 dchild = d_lookup(file_dentry(file), &qstr); 2594 dchild = d_lookup(file_dentry(file), &qstr);
2763 if (dchild) { 2595 if (dchild) {
2764 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode); 2596 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
2765 if (dchild->d_inode) { 2597 if (dchild->d_inode)
2766 child_inode = igrab(dchild->d_inode); 2598 child_inode = igrab(dchild->d_inode);
2767 if (child_inode) {
2768 inode_lock(child_inode);
2769 op_data->op_fid3 = *ll_inode2fid(child_inode);
2770 ll_invalidate_aliases(child_inode);
2771 }
2772 }
2773 dput(dchild); 2599 dput(dchild);
2774 } else { 2600 }
2601
2602 if (!child_inode) {
2775 rc = ll_get_fid_by_name(parent, name, namelen, 2603 rc = ll_get_fid_by_name(parent, name, namelen,
2776 &op_data->op_fid3); 2604 &op_data->op_fid3, &child_inode);
2777 if (rc) 2605 if (rc)
2778 goto out_free; 2606 goto out_free;
2779 } 2607 }
2780 2608
2609 if (!child_inode) {
2610 rc = -EINVAL;
2611 goto out_free;
2612 }
2613
2614 inode_lock(child_inode);
2615 op_data->op_fid3 = *ll_inode2fid(child_inode);
2781 if (!fid_is_sane(&op_data->op_fid3)) { 2616 if (!fid_is_sane(&op_data->op_fid3)) {
2782 CERROR("%s: migrate %s, but fid "DFID" is insane\n", 2617 CERROR("%s: migrate %s, but fid "DFID" is insane\n",
2783 ll_get_fsname(parent->i_sb, NULL, 0), name, 2618 ll_get_fsname(parent->i_sb, NULL, 0), name,
@@ -2796,6 +2631,26 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
2796 rc = 0; 2631 rc = 0;
2797 goto out_free; 2632 goto out_free;
2798 } 2633 }
2634again:
2635 if (S_ISREG(child_inode->i_mode)) {
2636 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
2637 if (IS_ERR(och)) {
2638 rc = PTR_ERR(och);
2639 och = NULL;
2640 goto out_free;
2641 }
2642
2643 rc = ll_data_version(child_inode, &data_version,
2644 LL_DV_WR_FLUSH);
2645 if (rc)
2646 goto out_free;
2647
2648 op_data->op_handle = och->och_fh;
2649 op_data->op_data = och->och_mod;
2650 op_data->op_data_version = data_version;
2651 op_data->op_lease_handle = och->och_lease_handle;
2652 op_data->op_bias |= MDS_RENAME_MIGRATE;
2653 }
2799 2654
2800 op_data->op_mds = mdtidx; 2655 op_data->op_mds = mdtidx;
2801 op_data->op_cli_flags = CLI_MIGRATE; 2656 op_data->op_cli_flags = CLI_MIGRATE;
@@ -2804,10 +2659,32 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
2804 if (!rc) 2659 if (!rc)
2805 ll_update_times(request, parent); 2660 ll_update_times(request, parent);
2806 2661
2807 ptlrpc_req_finished(request); 2662 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
2663 if (!body) {
2664 rc = -EPROTO;
2665 goto out_free;
2666 }
2667
2668 /*
2669 * If the server does release layout lock, then we cleanup
2670 * the client och here, otherwise release it in out_free:
2671 */
2672 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
2673 obd_mod_put(och->och_mod);
2674 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, och);
2675 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
2676 kfree(och);
2677 och = NULL;
2678 }
2808 2679
2680 ptlrpc_req_finished(request);
2681 /* Try again if the file layout has changed. */
2682 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
2683 goto again;
2809out_free: 2684out_free:
2810 if (child_inode) { 2685 if (child_inode) {
2686 if (och) /* close the file */
2687 ll_lease_close(och, child_inode, NULL);
2811 clear_nlink(child_inode); 2688 clear_nlink(child_inode);
2812 inode_unlock(child_inode); 2689 inode_unlock(child_inode);
2813 iput(child_inode); 2690 iput(child_inode);
@@ -2837,7 +2714,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
2837 enum ldlm_mode l_req_mode) 2714 enum ldlm_mode l_req_mode)
2838{ 2715{
2839 struct lustre_handle lockh; 2716 struct lustre_handle lockh;
2840 ldlm_policy_data_t policy; 2717 union ldlm_policy_data policy;
2841 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? 2718 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
2842 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode; 2719 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
2843 struct lu_fid *fid; 2720 struct lu_fid *fid;
@@ -2878,7 +2755,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
2878 struct lustre_handle *lockh, __u64 flags, 2755 struct lustre_handle *lockh, __u64 flags,
2879 enum ldlm_mode mode) 2756 enum ldlm_mode mode)
2880{ 2757{
2881 ldlm_policy_data_t policy = { .l_inodebits = {bits} }; 2758 union ldlm_policy_data policy = { .l_inodebits = { bits } };
2882 struct lu_fid *fid; 2759 struct lu_fid *fid;
2883 2760
2884 fid = &ll_i2info(inode)->lli_fid; 2761 fid = &ll_i2info(inode)->lli_fid;
@@ -2893,6 +2770,13 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
2893 /* Already unlinked. Just update nlink and return success */ 2770 /* Already unlinked. Just update nlink and return success */
2894 if (rc == -ENOENT) { 2771 if (rc == -ENOENT) {
2895 clear_nlink(inode); 2772 clear_nlink(inode);
2773 /* If it is striped directory, and there is bad stripe
2774 * Let's revalidate the dentry again, instead of returning
2775 * error
2776 */
2777 if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md)
2778 return 0;
2779
2896 /* This path cannot be hit for regular files unless in 2780 /* This path cannot be hit for regular files unless in
2897 * case of obscure races, so no need to validate size. 2781 * case of obscure races, so no need to validate size.
2898 */ 2782 */
@@ -3040,6 +2924,8 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
3040 LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime; 2924 LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
3041 LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime; 2925 LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
3042 } else { 2926 } else {
2927 struct ll_inode_info *lli = ll_i2info(inode);
2928
3043 /* In case of restore, the MDT has the right size and has 2929 /* In case of restore, the MDT has the right size and has
3044 * already send it back without granting the layout lock, 2930 * already send it back without granting the layout lock,
3045 * inode is up-to-date so glimpse is useless. 2931 * inode is up-to-date so glimpse is useless.
@@ -3047,7 +2933,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
3047 * restore the MDT holds the layout lock so the glimpse will 2933 * restore the MDT holds the layout lock so the glimpse will
3048 * block up to the end of restore (getattr will block) 2934 * block up to the end of restore (getattr will block)
3049 */ 2935 */
3050 if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING)) 2936 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags))
3051 rc = ll_glimpse_size(inode); 2937 rc = ll_glimpse_size(inode);
3052 } 2938 }
3053 return rc; 2939 return rc;
@@ -3095,13 +2981,12 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3095{ 2981{
3096 int rc; 2982 int rc;
3097 size_t num_bytes; 2983 size_t num_bytes;
3098 struct ll_user_fiemap *fiemap; 2984 struct fiemap *fiemap;
3099 unsigned int extent_count = fieinfo->fi_extents_max; 2985 unsigned int extent_count = fieinfo->fi_extents_max;
3100 2986
3101 num_bytes = sizeof(*fiemap) + (extent_count * 2987 num_bytes = sizeof(*fiemap) + (extent_count *
3102 sizeof(struct ll_fiemap_extent)); 2988 sizeof(struct fiemap_extent));
3103 fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS); 2989 fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
3104
3105 if (!fiemap) 2990 if (!fiemap)
3106 return -ENOMEM; 2991 return -ENOMEM;
3107 2992
@@ -3109,9 +2994,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3109 fiemap->fm_extent_count = fieinfo->fi_extents_max; 2994 fiemap->fm_extent_count = fieinfo->fi_extents_max;
3110 fiemap->fm_start = start; 2995 fiemap->fm_start = start;
3111 fiemap->fm_length = len; 2996 fiemap->fm_length = len;
2997
3112 if (extent_count > 0 && 2998 if (extent_count > 0 &&
3113 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, 2999 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
3114 sizeof(struct ll_fiemap_extent)) != 0) { 3000 sizeof(struct fiemap_extent))) {
3115 rc = -EFAULT; 3001 rc = -EFAULT;
3116 goto out; 3002 goto out;
3117 } 3003 }
@@ -3123,11 +3009,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3123 if (extent_count > 0 && 3009 if (extent_count > 0 &&
3124 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], 3010 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
3125 fiemap->fm_mapped_extents * 3011 fiemap->fm_mapped_extents *
3126 sizeof(struct ll_fiemap_extent)) != 0) { 3012 sizeof(struct fiemap_extent))) {
3127 rc = -EFAULT; 3013 rc = -EFAULT;
3128 goto out; 3014 goto out;
3129 } 3015 }
3130
3131out: 3016out:
3132 kvfree(fiemap); 3017 kvfree(fiemap);
3133 return rc; 3018 return rc;
@@ -3370,35 +3255,50 @@ ll_iocontrol_call(struct inode *inode, struct file *file,
3370int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) 3255int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
3371{ 3256{
3372 struct ll_inode_info *lli = ll_i2info(inode); 3257 struct ll_inode_info *lli = ll_i2info(inode);
3373 struct cl_env_nest nest; 3258 struct cl_object *obj = lli->lli_clob;
3374 struct lu_env *env; 3259 struct lu_env *env;
3375 int result; 3260 int rc;
3261 int refcheck;
3376 3262
3377 if (!lli->lli_clob) 3263 if (!obj)
3378 return 0; 3264 return 0;
3379 3265
3380 env = cl_env_nested_get(&nest); 3266 env = cl_env_get(&refcheck);
3381 if (IS_ERR(env)) 3267 if (IS_ERR(env))
3382 return PTR_ERR(env); 3268 return PTR_ERR(env);
3383 3269
3384 result = cl_conf_set(env, lli->lli_clob, conf); 3270 rc = cl_conf_set(env, obj, conf);
3385 cl_env_nested_put(&nest, env); 3271 if (rc < 0)
3272 goto out;
3386 3273
3387 if (conf->coc_opc == OBJECT_CONF_SET) { 3274 if (conf->coc_opc == OBJECT_CONF_SET) {
3388 struct ldlm_lock *lock = conf->coc_lock; 3275 struct ldlm_lock *lock = conf->coc_lock;
3276 struct cl_layout cl = {
3277 .cl_layout_gen = 0,
3278 };
3389 3279
3390 LASSERT(lock); 3280 LASSERT(lock);
3391 LASSERT(ldlm_has_layout(lock)); 3281 LASSERT(ldlm_has_layout(lock));
3392 if (result == 0) { 3282
3393 /* it can only be allowed to match after layout is 3283 /* it can only be allowed to match after layout is
3394 * applied to inode otherwise false layout would be 3284 * applied to inode otherwise false layout would be
3395 * seen. Applying layout should happen before dropping 3285 * seen. Applying layout should happen before dropping
3396 * the intent lock. 3286 * the intent lock.
3397 */ 3287 */
3398 ldlm_lock_allow_match(lock); 3288 ldlm_lock_allow_match(lock);
3399 } 3289
3290 rc = cl_object_layout_get(env, obj, &cl);
3291 if (rc < 0)
3292 goto out;
3293
3294 CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
3295 PFID(&lli->lli_fid), ll_layout_version_get(lli),
3296 cl.cl_layout_gen);
3297 ll_layout_version_set(lli, cl.cl_layout_gen);
3400 } 3298 }
3401 return result; 3299out:
3300 cl_env_put(env, &refcheck);
3301 return rc;
3402} 3302}
3403 3303
3404/* Fetch layout from MDT with getxattr request, if it's not ready yet */ 3304/* Fetch layout from MDT with getxattr request, if it's not ready yet */
@@ -3477,12 +3377,11 @@ out:
3477 * in this function. 3377 * in this function.
3478 */ 3378 */
3479static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, 3379static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
3480 struct inode *inode, __u32 *gen, bool reconf) 3380 struct inode *inode)
3481{ 3381{
3482 struct ll_inode_info *lli = ll_i2info(inode); 3382 struct ll_inode_info *lli = ll_i2info(inode);
3483 struct ll_sb_info *sbi = ll_i2sbi(inode); 3383 struct ll_sb_info *sbi = ll_i2sbi(inode);
3484 struct ldlm_lock *lock; 3384 struct ldlm_lock *lock;
3485 struct lustre_md md = { NULL };
3486 struct cl_object_conf conf; 3385 struct cl_object_conf conf;
3487 int rc = 0; 3386 int rc = 0;
3488 bool lvb_ready; 3387 bool lvb_ready;
@@ -3494,8 +3393,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
3494 LASSERT(lock); 3393 LASSERT(lock);
3495 LASSERT(ldlm_has_layout(lock)); 3394 LASSERT(ldlm_has_layout(lock));
3496 3395
3497 LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d", 3396 LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured",
3498 PFID(&lli->lli_fid), inode, reconf); 3397 PFID(&lli->lli_fid), inode);
3499 3398
3500 /* in case this is a caching lock and reinstate with new inode */ 3399 /* in case this is a caching lock and reinstate with new inode */
3501 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL); 3400 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
@@ -3506,15 +3405,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
3506 /* checking lvb_ready is racy but this is okay. The worst case is 3405 /* checking lvb_ready is racy but this is okay. The worst case is
3507 * that multi processes may configure the file on the same time. 3406 * that multi processes may configure the file on the same time.
3508 */ 3407 */
3509 if (lvb_ready || !reconf) { 3408 if (lvb_ready) {
3510 rc = -ENODATA; 3409 rc = 0;
3511 if (lvb_ready) {
3512 /* layout_gen must be valid if layout lock is not
3513 * cancelled and stripe has already set
3514 */
3515 *gen = ll_layout_version_get(lli);
3516 rc = 0;
3517 }
3518 goto out; 3410 goto out;
3519 } 3411 }
3520 3412
@@ -3524,39 +3416,19 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
3524 3416
3525 /* for layout lock, lmm is returned in lock's lvb. 3417 /* for layout lock, lmm is returned in lock's lvb.
3526 * lvb_data is immutable if the lock is held so it's safe to access it 3418 * lvb_data is immutable if the lock is held so it's safe to access it
3527 * without res lock. See the description in ldlm_lock_decref_internal() 3419 * without res lock.
3528 * for the condition to free lvb_data of layout lock 3420 *
3529 */ 3421 * set layout to file. Unlikely this will fail as old layout was
3530 if (lock->l_lvb_data) {
3531 rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
3532 lock->l_lvb_data, lock->l_lvb_len);
3533 if (rc >= 0) {
3534 *gen = LL_LAYOUT_GEN_EMPTY;
3535 if (md.lsm)
3536 *gen = md.lsm->lsm_layout_gen;
3537 rc = 0;
3538 } else {
3539 CERROR("%s: file " DFID " unpackmd error: %d\n",
3540 ll_get_fsname(inode->i_sb, NULL, 0),
3541 PFID(&lli->lli_fid), rc);
3542 }
3543 }
3544 if (rc < 0)
3545 goto out;
3546
3547 /* set layout to file. Unlikely this will fail as old layout was
3548 * surely eliminated 3422 * surely eliminated
3549 */ 3423 */
3550 memset(&conf, 0, sizeof(conf)); 3424 memset(&conf, 0, sizeof(conf));
3551 conf.coc_opc = OBJECT_CONF_SET; 3425 conf.coc_opc = OBJECT_CONF_SET;
3552 conf.coc_inode = inode; 3426 conf.coc_inode = inode;
3553 conf.coc_lock = lock; 3427 conf.coc_lock = lock;
3554 conf.u.coc_md = &md; 3428 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
3429 conf.u.coc_layout.lb_len = lock->l_lvb_len;
3555 rc = ll_layout_conf(inode, &conf); 3430 rc = ll_layout_conf(inode, &conf);
3556 3431
3557 if (md.lsm)
3558 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
3559
3560 /* refresh layout failed, need to wait */ 3432 /* refresh layout failed, need to wait */
3561 wait_layout = rc == -EBUSY; 3433 wait_layout = rc == -EBUSY;
3562 3434
@@ -3584,20 +3456,7 @@ out:
3584 return rc; 3456 return rc;
3585} 3457}
3586 3458
3587/** 3459static int ll_layout_refresh_locked(struct inode *inode)
3588 * This function checks if there exists a LAYOUT lock on the client side,
3589 * or enqueues it if it doesn't have one in cache.
3590 *
3591 * This function will not hold layout lock so it may be revoked any time after
3592 * this function returns. Any operations depend on layout should be redone
3593 * in that case.
3594 *
3595 * This function should be called before lov_io_init() to get an uptodate
3596 * layout version, the caller should save the version number and after IO
3597 * is finished, this function should be called again to verify that layout
3598 * is not changed during IO time.
3599 */
3600int ll_layout_refresh(struct inode *inode, __u32 *gen)
3601{ 3460{
3602 struct ll_inode_info *lli = ll_i2info(inode); 3461 struct ll_inode_info *lli = ll_i2info(inode);
3603 struct ll_sb_info *sbi = ll_i2sbi(inode); 3462 struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -3613,17 +3472,6 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
3613 }; 3472 };
3614 int rc; 3473 int rc;
3615 3474
3616 *gen = ll_layout_version_get(lli);
3617 if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE)
3618 return 0;
3619
3620 /* sanity checks */
3621 LASSERT(fid_is_sane(ll_inode2fid(inode)));
3622 LASSERT(S_ISREG(inode->i_mode));
3623
3624 /* take layout lock mutex to enqueue layout lock exclusively. */
3625 mutex_lock(&lli->lli_layout_mutex);
3626
3627again: 3475again:
3628 /* mostly layout lock is caching on the local side, so try to match 3476 /* mostly layout lock is caching on the local side, so try to match
3629 * it before grabbing layout lock mutex. 3477 * it before grabbing layout lock mutex.
@@ -3631,20 +3479,16 @@ again:
3631 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, 3479 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
3632 LCK_CR | LCK_CW | LCK_PR | LCK_PW); 3480 LCK_CR | LCK_CW | LCK_PR | LCK_PW);
3633 if (mode != 0) { /* hit cached lock */ 3481 if (mode != 0) { /* hit cached lock */
3634 rc = ll_layout_lock_set(&lockh, mode, inode, gen, true); 3482 rc = ll_layout_lock_set(&lockh, mode, inode);
3635 if (rc == -EAGAIN) 3483 if (rc == -EAGAIN)
3636 goto again; 3484 goto again;
3637
3638 mutex_unlock(&lli->lli_layout_mutex);
3639 return rc; 3485 return rc;
3640 } 3486 }
3641 3487
3642 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 3488 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
3643 0, 0, LUSTRE_OPC_ANY, NULL); 3489 0, 0, LUSTRE_OPC_ANY, NULL);
3644 if (IS_ERR(op_data)) { 3490 if (IS_ERR(op_data))
3645 mutex_unlock(&lli->lli_layout_mutex);
3646 return PTR_ERR(op_data); 3491 return PTR_ERR(op_data);
3647 }
3648 3492
3649 /* have to enqueue one */ 3493 /* have to enqueue one */
3650 memset(&it, 0, sizeof(it)); 3494 memset(&it, 0, sizeof(it));
@@ -3668,10 +3512,50 @@ again:
3668 if (rc == 0) { 3512 if (rc == 0) {
3669 /* set lock data in case this is a new lock */ 3513 /* set lock data in case this is a new lock */
3670 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL); 3514 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
3671 rc = ll_layout_lock_set(&lockh, mode, inode, gen, true); 3515 rc = ll_layout_lock_set(&lockh, mode, inode);
3672 if (rc == -EAGAIN) 3516 if (rc == -EAGAIN)
3673 goto again; 3517 goto again;
3674 } 3518 }
3519
3520 return rc;
3521}
3522
3523/**
3524 * This function checks if there exists a LAYOUT lock on the client side,
3525 * or enqueues it if it doesn't have one in cache.
3526 *
3527 * This function will not hold layout lock so it may be revoked any time after
3528 * this function returns. Any operations depend on layout should be redone
3529 * in that case.
3530 *
3531 * This function should be called before lov_io_init() to get an uptodate
3532 * layout version, the caller should save the version number and after IO
3533 * is finished, this function should be called again to verify that layout
3534 * is not changed during IO time.
3535 */
3536int ll_layout_refresh(struct inode *inode, __u32 *gen)
3537{
3538 struct ll_inode_info *lli = ll_i2info(inode);
3539 struct ll_sb_info *sbi = ll_i2sbi(inode);
3540 int rc;
3541
3542 *gen = ll_layout_version_get(lli);
3543 if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
3544 return 0;
3545
3546 /* sanity checks */
3547 LASSERT(fid_is_sane(ll_inode2fid(inode)));
3548 LASSERT(S_ISREG(inode->i_mode));
3549
3550 /* take layout lock mutex to enqueue layout lock exclusively. */
3551 mutex_lock(&lli->lli_layout_mutex);
3552
3553 rc = ll_layout_refresh_locked(inode);
3554 if (rc < 0)
3555 goto out;
3556
3557 *gen = ll_layout_version_get(lli);
3558out:
3675 mutex_unlock(&lli->lli_layout_mutex); 3559 mutex_unlock(&lli->lli_layout_mutex);
3676 3560
3677 return rc; 3561 return rc;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 22507b9c6d69..504498de536e 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -80,69 +80,60 @@ blkcnt_t dirty_cnt(struct inode *inode)
80int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, 80int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
81 struct inode *inode, struct cl_object *clob, int agl) 81 struct inode *inode, struct cl_object *clob, int agl)
82{ 82{
83 struct ll_inode_info *lli = ll_i2info(inode);
84 const struct lu_fid *fid = lu_object_fid(&clob->co_lu); 83 const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
85 int result; 84 struct cl_lock *lock = vvp_env_lock(env);
85 struct cl_lock_descr *descr = &lock->cll_descr;
86 int result = 0;
87
88 CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
89
90 /* NOTE: this looks like DLM lock request, but it may
91 * not be one. Due to CEF_ASYNC flag (translated
92 * to LDLM_FL_HAS_INTENT by osc), this is
93 * glimpse request, that won't revoke any
94 * conflicting DLM locks held. Instead,
95 * ll_glimpse_callback() will be called on each
96 * client holding a DLM lock against this file,
97 * and resulting size will be returned for each
98 * stripe. DLM lock on [0, EOF] is acquired only
99 * if there were no conflicting locks. If there
100 * were conflicting locks, enqueuing or waiting
101 * fails with -ENAVAIL, but valid inode
102 * attributes are returned anyway.
103 */
104 *descr = whole_file;
105 descr->cld_obj = clob;
106 descr->cld_mode = CLM_READ;
107 descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
108 if (agl)
109 descr->cld_enq_flags |= CEF_AGL;
110 /*
111 * CEF_ASYNC is used because glimpse sub-locks cannot
112 * deadlock (because they never conflict with other
113 * locks) and, hence, can be enqueued out-of-order.
114 *
115 * CEF_MUST protects glimpse lock from conversion into
116 * a lockless mode.
117 */
118 result = cl_lock_request(env, io, lock);
119 if (result < 0)
120 return result;
86 121
87 result = 0; 122 if (!agl) {
88 if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) { 123 ll_merge_attr(env, inode);
89 CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid)); 124 if (i_size_read(inode) > 0 && !inode->i_blocks) {
90 if (lli->lli_has_smd) {
91 struct cl_lock *lock = vvp_env_lock(env);
92 struct cl_lock_descr *descr = &lock->cll_descr;
93
94 /* NOTE: this looks like DLM lock request, but it may
95 * not be one. Due to CEF_ASYNC flag (translated
96 * to LDLM_FL_HAS_INTENT by osc), this is
97 * glimpse request, that won't revoke any
98 * conflicting DLM locks held. Instead,
99 * ll_glimpse_callback() will be called on each
100 * client holding a DLM lock against this file,
101 * and resulting size will be returned for each
102 * stripe. DLM lock on [0, EOF] is acquired only
103 * if there were no conflicting locks. If there
104 * were conflicting locks, enqueuing or waiting
105 * fails with -ENAVAIL, but valid inode
106 * attributes are returned anyway.
107 */
108 *descr = whole_file;
109 descr->cld_obj = clob;
110 descr->cld_mode = CLM_READ;
111 descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
112 if (agl)
113 descr->cld_enq_flags |= CEF_AGL;
114 /* 125 /*
115 * CEF_ASYNC is used because glimpse sub-locks cannot 126 * LU-417: Add dirty pages block count
116 * deadlock (because they never conflict with other 127 * lest i_blocks reports 0, some "cp" or
117 * locks) and, hence, can be enqueued out-of-order. 128 * "tar" may think it's a completely
118 * 129 * sparse file and skip it.
119 * CEF_MUST protects glimpse lock from conversion into
120 * a lockless mode.
121 */ 130 */
122 result = cl_lock_request(env, io, lock); 131 inode->i_blocks = dirty_cnt(inode);
123 if (result < 0)
124 return result;
125
126 if (!agl) {
127 ll_merge_attr(env, inode);
128 if (i_size_read(inode) > 0 &&
129 inode->i_blocks == 0) {
130 /*
131 * LU-417: Add dirty pages block count
132 * lest i_blocks reports 0, some "cp" or
133 * "tar" may think it's a completely
134 * sparse file and skip it.
135 */
136 inode->i_blocks = dirty_cnt(inode);
137 }
138 }
139 cl_lock_release(env, lock);
140 } else {
141 CDEBUG(D_DLMTRACE, "No objects for inode\n");
142 ll_merge_attr(env, inode);
143 } 132 }
144 } 133 }
145 134
135 cl_lock_release(env, lock);
136
146 return result; 137 return result;
147} 138}
148 139
@@ -212,39 +203,3 @@ again:
212 } 203 }
213 return result; 204 return result;
214} 205}
215
216int cl_local_size(struct inode *inode)
217{
218 struct lu_env *env = NULL;
219 struct cl_io *io = NULL;
220 struct cl_object *clob;
221 int result;
222 int refcheck;
223
224 if (!ll_i2info(inode)->lli_has_smd)
225 return 0;
226
227 result = cl_io_get(inode, &env, &io, &refcheck);
228 if (result <= 0)
229 return result;
230
231 clob = io->ci_obj;
232 result = cl_io_init(env, io, CIT_MISC, clob);
233 if (result > 0) {
234 result = io->ci_result;
235 } else if (result == 0) {
236 struct cl_lock *lock = vvp_env_lock(env);
237
238 lock->cll_descr = whole_file;
239 lock->cll_descr.cld_enq_flags = CEF_PEEK;
240 lock->cll_descr.cld_obj = clob;
241 result = cl_lock_request(env, io, lock);
242 if (result == 0) {
243 ll_merge_attr(env, inode);
244 cl_lock_release(env, lock);
245 }
246 }
247 cl_io_fini(env, io);
248 cl_env_put(env, &refcheck);
249 return result;
250}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 084330d08f7a..dd1cfd8f5213 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -80,7 +80,8 @@ int cl_inode_fini_refcheck;
80 */ 80 */
81static DEFINE_MUTEX(cl_inode_fini_guard); 81static DEFINE_MUTEX(cl_inode_fini_guard);
82 82
83int cl_setattr_ost(struct inode *inode, const struct iattr *attr) 83int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
84 unsigned int attr_flags)
84{ 85{
85 struct lu_env *env; 86 struct lu_env *env;
86 struct cl_io *io; 87 struct cl_io *io;
@@ -92,14 +93,15 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
92 return PTR_ERR(env); 93 return PTR_ERR(env);
93 94
94 io = vvp_env_thread_io(env); 95 io = vvp_env_thread_io(env);
95 io->ci_obj = ll_i2info(inode)->lli_clob; 96 io->ci_obj = obj;
96 97
97 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); 98 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
98 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); 99 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
99 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); 100 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
100 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; 101 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
102 io->u.ci_setattr.sa_attr_flags = attr_flags;
101 io->u.ci_setattr.sa_valid = attr->ia_valid; 103 io->u.ci_setattr.sa_valid = attr->ia_valid;
102 io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode); 104 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
103 105
104again: 106again:
105 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { 107 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
@@ -148,7 +150,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
148 struct cl_object_conf conf = { 150 struct cl_object_conf conf = {
149 .coc_inode = inode, 151 .coc_inode = inode,
150 .u = { 152 .u = {
151 .coc_md = md 153 .coc_layout = md->layout,
152 } 154 }
153 }; 155 };
154 int result = 0; 156 int result = 0;
@@ -182,7 +184,6 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
182 * locked by I_NEW bit. 184 * locked by I_NEW bit.
183 */ 185 */
184 lli->lli_clob = clob; 186 lli->lli_clob = clob;
185 lli->lli_has_smd = lsm_has_objects(md->lsm);
186 lu_object_ref_add(&clob->co_lu, "inode", inode); 187 lu_object_ref_add(&clob->co_lu, "inode", inode);
187 } else { 188 } else {
188 result = PTR_ERR(clob); 189 result = PTR_ERR(clob);
@@ -245,15 +246,11 @@ void cl_inode_fini(struct inode *inode)
245 int emergency; 246 int emergency;
246 247
247 if (clob) { 248 if (clob) {
248 void *cookie;
249
250 cookie = cl_env_reenter();
251 env = cl_env_get(&refcheck); 249 env = cl_env_get(&refcheck);
252 emergency = IS_ERR(env); 250 emergency = IS_ERR(env);
253 if (emergency) { 251 if (emergency) {
254 mutex_lock(&cl_inode_fini_guard); 252 mutex_lock(&cl_inode_fini_guard);
255 LASSERT(cl_inode_fini_env); 253 LASSERT(cl_inode_fini_env);
256 cl_env_implant(cl_inode_fini_env, &refcheck);
257 env = cl_inode_fini_env; 254 env = cl_inode_fini_env;
258 } 255 }
259 /* 256 /*
@@ -265,13 +262,10 @@ void cl_inode_fini(struct inode *inode)
265 lu_object_ref_del(&clob->co_lu, "inode", inode); 262 lu_object_ref_del(&clob->co_lu, "inode", inode);
266 cl_object_put_last(env, clob); 263 cl_object_put_last(env, clob);
267 lli->lli_clob = NULL; 264 lli->lli_clob = NULL;
268 if (emergency) { 265 if (emergency)
269 cl_env_unplant(cl_inode_fini_env, &refcheck);
270 mutex_unlock(&cl_inode_fini_guard); 266 mutex_unlock(&cl_inode_fini_guard);
271 } else { 267 else
272 cl_env_put(env, &refcheck); 268 cl_env_put(env, &refcheck);
273 }
274 cl_env_reexit(cookie);
275 } 269 }
276} 270}
277 271
@@ -302,22 +296,3 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
302 gen = fid_flatten(fid) >> 32; 296 gen = fid_flatten(fid) >> 32;
303 return gen; 297 return gen;
304} 298}
305
306/* lsm is unreliable after hsm implementation as layout can be changed at
307 * any time. This is only to support old, non-clio-ized interfaces. It will
308 * cause deadlock if clio operations are called with this extra layout refcount
309 * because in case the layout changed during the IO, ll_layout_refresh() will
310 * have to wait for the refcount to become zero to destroy the older layout.
311 *
312 * Notice that the lsm returned by this function may not be valid unless called
313 * inside layout lock - MDS_INODELOCK_LAYOUT.
314 */
315struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
316{
317 return lov_lsm_get(ll_i2info(inode)->lli_clob);
318}
319
320inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
321{
322 lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
323}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index fb346c12dad2..f48660ed350f 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -47,36 +47,29 @@
47 */ 47 */
48int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) 48int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
49{ 49{
50 struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 }; 50 u32 val_size, max_easize, def_easize;
51 __u32 valsize = sizeof(struct lov_desc); 51 int rc;
52 int rc, easize, def_easize, cookiesize; 52
53 struct lov_desc desc; 53 val_size = sizeof(max_easize);
54 __u16 stripes, def_stripes; 54 rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE,
55 55 &val_size, &max_easize);
56 rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
57 &valsize, &desc, NULL);
58 if (rc) 56 if (rc)
59 return rc; 57 return rc;
60 58
61 stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT); 59 val_size = sizeof(def_easize);
62 lsm.lsm_stripe_count = stripes; 60 rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE),
63 easize = obd_size_diskmd(dt_exp, &lsm); 61 KEY_DEFAULT_EASIZE, &val_size, &def_easize);
64 62 if (rc)
65 def_stripes = min_t(__u32, desc.ld_default_stripe_count, 63 return rc;
66 LOV_MAX_STRIPE_COUNT);
67 lsm.lsm_stripe_count = def_stripes;
68 def_easize = obd_size_diskmd(dt_exp, &lsm);
69
70 cookiesize = stripes * sizeof(struct llog_cookie);
71 64
72 /* default cookiesize is 0 because from 2.4 server doesn't send 65 /*
66 * default cookiesize is 0 because from 2.4 server doesn't send
73 * llog cookies to client. 67 * llog cookies to client.
74 */ 68 */
75 CDEBUG(D_HA, 69 CDEBUG(D_HA, "updating def/max_easize: %d/%d\n",
76 "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n", 70 def_easize, max_easize);
77 def_easize, easize, cookiesize);
78 71
79 rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0); 72 rc = md_init_ea_size(md_exp, max_easize, def_easize);
80 return rc; 73 return rc;
81} 74}
82 75
@@ -169,13 +162,11 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
169 return rc; 162 return rc;
170 } 163 }
171 164
172 cg->lg_env = cl_env_get(&refcheck); 165 cg->lg_env = env;
173 cg->lg_io = io; 166 cg->lg_io = io;
174 cg->lg_lock = lock; 167 cg->lg_lock = lock;
175 cg->lg_gid = gid; 168 cg->lg_gid = gid;
176 LASSERT(cg->lg_env == env);
177 169
178 cl_env_unplant(env, &refcheck);
179 return 0; 170 return 0;
180} 171}
181 172
@@ -184,14 +175,10 @@ void cl_put_grouplock(struct ll_grouplock *cg)
184 struct lu_env *env = cg->lg_env; 175 struct lu_env *env = cg->lg_env;
185 struct cl_io *io = cg->lg_io; 176 struct cl_io *io = cg->lg_io;
186 struct cl_lock *lock = cg->lg_lock; 177 struct cl_lock *lock = cg->lg_lock;
187 int refcheck;
188 178
189 LASSERT(cg->lg_env); 179 LASSERT(cg->lg_env);
190 LASSERT(cg->lg_gid); 180 LASSERT(cg->lg_gid);
191 181
192 cl_env_implant(env, &refcheck);
193 cl_env_put(env, &refcheck);
194
195 cl_lock_release(env, lock); 182 cl_lock_release(env, lock);
196 cl_io_fini(env, io); 183 cl_io_fini(env, io);
197 cl_env_put(env, NULL); 184 cl_env_put(env, NULL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
deleted file mode 100644
index 8644631bf2ba..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ /dev/null
@@ -1,395 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2012, Intel Corporation.
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/llite/llite_close.c
33 *
34 * Lustre Lite routines to issue a secondary close after writeback
35 */
36
37#include <linux/module.h>
38
39#define DEBUG_SUBSYSTEM S_LLITE
40
41#include "llite_internal.h"
42
43/** records that a write is in flight */
44void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
45{
46 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
47
48 spin_lock(&lli->lli_lock);
49 lli->lli_flags |= LLIF_SOM_DIRTY;
50 if (page && list_empty(&page->vpg_pending_linkage))
51 list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
52 spin_unlock(&lli->lli_lock);
53}
54
55/** records that a write has completed */
56void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
57{
58 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
59 int rc = 0;
60
61 spin_lock(&lli->lli_lock);
62 if (page && !list_empty(&page->vpg_pending_linkage)) {
63 list_del_init(&page->vpg_pending_linkage);
64 rc = 1;
65 }
66 spin_unlock(&lli->lli_lock);
67 if (rc)
68 ll_queue_done_writing(club->vob_inode, 0);
69}
70
71/** Queues DONE_WRITING if
72 * - done writing is allowed;
73 * - inode has no no dirty pages;
74 */
75void ll_queue_done_writing(struct inode *inode, unsigned long flags)
76{
77 struct ll_inode_info *lli = ll_i2info(inode);
78 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
79
80 spin_lock(&lli->lli_lock);
81 lli->lli_flags |= flags;
82
83 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
84 list_empty(&club->vob_pending_list)) {
85 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
86
87 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
88 CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
89 ll_get_fsname(inode->i_sb, NULL, 0),
90 PFID(ll_inode2fid(inode)), lli->lli_flags);
91 /* DONE_WRITING is allowed and inode has no dirty page. */
92 spin_lock(&lcq->lcq_lock);
93
94 LASSERT(list_empty(&lli->lli_close_list));
95 CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
96 PFID(ll_inode2fid(inode)));
97 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
98
99 /* Avoid a concurrent insertion into the close thread queue:
100 * an inode is already in the close thread, open(), write(),
101 * close() happen, epoch is closed as the inode is marked as
102 * LLIF_EPOCH_PENDING. When pages are written inode should not
103 * be inserted into the queue again, clear this flag to avoid
104 * it.
105 */
106 lli->lli_flags &= ~LLIF_DONE_WRITING;
107
108 wake_up(&lcq->lcq_waitq);
109 spin_unlock(&lcq->lcq_lock);
110 }
111 spin_unlock(&lli->lli_lock);
112}
113
114/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
115void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
116{
117 struct ll_inode_info *lli = ll_i2info(inode);
118
119 op_data->op_flags |= MF_SOM_CHANGE;
120 /* Check if Size-on-MDS attributes are valid. */
121 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
122 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
123 ll_get_fsname(inode->i_sb, NULL, 0),
124 PFID(ll_inode2fid(inode)), lli->lli_flags);
125
126 if (!cl_local_size(inode)) {
127 /* Send Size-on-MDS Attributes if valid. */
128 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
129 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
130 }
131}
132
133/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
134void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
135 struct obd_client_handle **och, unsigned long flags)
136{
137 struct ll_inode_info *lli = ll_i2info(inode);
138 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
139
140 spin_lock(&lli->lli_lock);
141 if (!(list_empty(&club->vob_pending_list))) {
142 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
143 LASSERT(*och);
144 LASSERT(!lli->lli_pending_och);
145 /* Inode is dirty and there is no pending write done
146 * request yet, DONE_WRITE is to be sent later.
147 */
148 lli->lli_flags |= LLIF_EPOCH_PENDING;
149 lli->lli_pending_och = *och;
150 spin_unlock(&lli->lli_lock);
151
152 inode = igrab(inode);
153 LASSERT(inode);
154 goto out;
155 }
156 if (flags & LLIF_DONE_WRITING) {
157 /* Some pages are still dirty, it is early to send
158 * DONE_WRITE. Wait until all pages will be flushed
159 * and try DONE_WRITE again later.
160 */
161 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
162 lli->lli_flags |= LLIF_DONE_WRITING;
163 spin_unlock(&lli->lli_lock);
164
165 inode = igrab(inode);
166 LASSERT(inode);
167 goto out;
168 }
169 }
170 CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
171 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
172 op_data->op_flags |= MF_EPOCH_CLOSE;
173
174 if (flags & LLIF_DONE_WRITING) {
175 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
176 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
177 *och = lli->lli_pending_och;
178 lli->lli_pending_och = NULL;
179 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
180 } else {
181 /* Pack Size-on-MDS inode attributes only if they has changed */
182 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
183 spin_unlock(&lli->lli_lock);
184 goto out;
185 }
186
187 /* There is a pending DONE_WRITE -- close epoch with no
188 * attribute change.
189 */
190 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
191 spin_unlock(&lli->lli_lock);
192 goto out;
193 }
194 }
195
196 LASSERT(list_empty(&club->vob_pending_list));
197 lli->lli_flags &= ~LLIF_SOM_DIRTY;
198 spin_unlock(&lli->lli_lock);
199 ll_done_writing_attr(inode, op_data);
200
201out:
202 return;
203}
204
205/**
206 * Cliens updates SOM attributes on MDS (including llog cookies):
207 * obd_getattr with no lock and md_setattr.
208 */
209int ll_som_update(struct inode *inode, struct md_op_data *op_data)
210{
211 struct ll_inode_info *lli = ll_i2info(inode);
212 struct ptlrpc_request *request = NULL;
213 __u32 old_flags;
214 struct obdo *oa;
215 int rc;
216
217 LASSERT(op_data);
218 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
219 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
220 ll_get_fsname(inode->i_sb, NULL, 0),
221 PFID(ll_inode2fid(inode)), lli->lli_flags);
222
223 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
224 if (!oa) {
225 CERROR("can't allocate memory for Size-on-MDS update.\n");
226 return -ENOMEM;
227 }
228
229 old_flags = op_data->op_flags;
230 op_data->op_flags = MF_SOM_CHANGE;
231
232 /* If inode is already in another epoch, skip getattr from OSTs. */
233 if (lli->lli_ioepoch == op_data->op_ioepoch) {
234 rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
235 old_flags & MF_GETATTR_LOCK);
236 if (rc) {
237 oa->o_valid = 0;
238 if (rc != -ENOENT)
239 CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
240 ll_get_fsname(inode->i_sb, NULL, 0),
241 PFID(ll_inode2fid(inode)), rc);
242 } else {
243 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
244 PFID(&lli->lli_fid));
245 }
246 /* Install attributes into op_data. */
247 md_from_obdo(op_data, oa, oa->o_valid);
248 }
249
250 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
251 NULL, 0, NULL, 0, &request, NULL);
252 ptlrpc_req_finished(request);
253
254 kmem_cache_free(obdo_cachep, oa);
255 return rc;
256}
257
258/**
259 * Closes the ioepoch and packs all the attributes into @op_data for
260 * DONE_WRITING rpc.
261 */
262static void ll_prepare_done_writing(struct inode *inode,
263 struct md_op_data *op_data,
264 struct obd_client_handle **och)
265{
266 ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
267 /* If there is no @och, we do not do D_W yet. */
268 if (!*och)
269 return;
270
271 ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
272 ll_prep_md_op_data(op_data, inode, NULL, NULL,
273 0, 0, LUSTRE_OPC_ANY, NULL);
274}
275
276/** Send a DONE_WRITING rpc. */
277static void ll_done_writing(struct inode *inode)
278{
279 struct obd_client_handle *och = NULL;
280 struct md_op_data *op_data;
281 int rc;
282
283 LASSERT(exp_connect_som(ll_i2mdexp(inode)));
284
285 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
286 if (!op_data)
287 return;
288
289 ll_prepare_done_writing(inode, op_data, &och);
290 /* If there is no @och, we do not do D_W yet. */
291 if (!och)
292 goto out;
293
294 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
295 if (rc == -EAGAIN)
296 /* MDS has instructed us to obtain Size-on-MDS attribute from
297 * OSTs and send setattr to back to MDS.
298 */
299 rc = ll_som_update(inode, op_data);
300 else if (rc) {
301 CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
302 ll_get_fsname(inode->i_sb, NULL, 0),
303 PFID(ll_inode2fid(inode)), rc);
304 }
305out:
306 ll_finish_md_op_data(op_data);
307 if (och) {
308 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
309 kfree(och);
310 }
311}
312
313static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
314{
315 struct ll_inode_info *lli = NULL;
316
317 spin_lock(&lcq->lcq_lock);
318
319 if (!list_empty(&lcq->lcq_head)) {
320 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
321 lli_close_list);
322 list_del_init(&lli->lli_close_list);
323 } else if (atomic_read(&lcq->lcq_stop)) {
324 lli = ERR_PTR(-EALREADY);
325 }
326
327 spin_unlock(&lcq->lcq_lock);
328 return lli;
329}
330
331static int ll_close_thread(void *arg)
332{
333 struct ll_close_queue *lcq = arg;
334
335 complete(&lcq->lcq_comp);
336
337 while (1) {
338 struct l_wait_info lwi = { 0 };
339 struct ll_inode_info *lli;
340 struct inode *inode;
341
342 l_wait_event_exclusive(lcq->lcq_waitq,
343 (lli = ll_close_next_lli(lcq)) != NULL,
344 &lwi);
345 if (IS_ERR(lli))
346 break;
347
348 inode = ll_info2i(lli);
349 CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
350 PFID(ll_inode2fid(inode)));
351 ll_done_writing(inode);
352 iput(inode);
353 }
354
355 CDEBUG(D_INFO, "ll_close exiting\n");
356 complete(&lcq->lcq_comp);
357 return 0;
358}
359
360int ll_close_thread_start(struct ll_close_queue **lcq_ret)
361{
362 struct ll_close_queue *lcq;
363 struct task_struct *task;
364
365 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
366 return -EINTR;
367
368 lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
369 if (!lcq)
370 return -ENOMEM;
371
372 spin_lock_init(&lcq->lcq_lock);
373 INIT_LIST_HEAD(&lcq->lcq_head);
374 init_waitqueue_head(&lcq->lcq_waitq);
375 init_completion(&lcq->lcq_comp);
376
377 task = kthread_run(ll_close_thread, lcq, "ll_close");
378 if (IS_ERR(task)) {
379 kfree(lcq);
380 return PTR_ERR(task);
381 }
382
383 wait_for_completion(&lcq->lcq_comp);
384 *lcq_ret = lcq;
385 return 0;
386}
387
388void ll_close_thread_shutdown(struct ll_close_queue *lcq)
389{
390 init_completion(&lcq->lcq_comp);
391 atomic_inc(&lcq->lcq_stop);
392 wake_up(&lcq->lcq_waitq);
393 wait_for_completion(&lcq->lcq_comp);
394 kfree(lcq);
395}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 4bc551279aa4..2f46d475cd7d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -97,31 +97,20 @@ struct ll_grouplock {
97 unsigned long lg_gid; 97 unsigned long lg_gid;
98}; 98};
99 99
100enum lli_flags { 100enum ll_file_flags {
101 /* MDS has an authority for the Size-on-MDS attributes. */
102 LLIF_MDS_SIZE_LOCK = (1 << 0),
103 /* Epoch close is postponed. */
104 LLIF_EPOCH_PENDING = (1 << 1),
105 /* DONE WRITING is allowed. */
106 LLIF_DONE_WRITING = (1 << 2),
107 /* Sizeon-on-MDS attributes are changed. An attribute update needs to
108 * be sent to MDS.
109 */
110 LLIF_SOM_DIRTY = (1 << 3),
111 /* File data is modified. */ 101 /* File data is modified. */
112 LLIF_DATA_MODIFIED = (1 << 4), 102 LLIF_DATA_MODIFIED = 0,
113 /* File is being restored */ 103 /* File is being restored */
114 LLIF_FILE_RESTORING = (1 << 5), 104 LLIF_FILE_RESTORING = 1,
115 /* Xattr cache is attached to the file */ 105 /* Xattr cache is attached to the file */
116 LLIF_XATTR_CACHE = (1 << 6), 106 LLIF_XATTR_CACHE = 2,
117}; 107};
118 108
119struct ll_inode_info { 109struct ll_inode_info {
120 __u32 lli_inode_magic; 110 __u32 lli_inode_magic;
121 __u32 lli_flags;
122 __u64 lli_ioepoch;
123 111
124 spinlock_t lli_lock; 112 spinlock_t lli_lock;
113 unsigned long lli_flags;
125 struct posix_acl *lli_posix_acl; 114 struct posix_acl *lli_posix_acl;
126 115
127 /* identifying fields for both metadata and data stacks. */ 116 /* identifying fields for both metadata and data stacks. */
@@ -129,14 +118,6 @@ struct ll_inode_info {
129 /* master inode fid for stripe directory */ 118 /* master inode fid for stripe directory */
130 struct lu_fid lli_pfid; 119 struct lu_fid lli_pfid;
131 120
132 struct list_head lli_close_list;
133
134 /* handle is to be sent to MDS later on done_writing and setattr.
135 * Open handle data are needed for the recovery to reconstruct
136 * the inode state on the MDS. XXX: recovery is not ready yet.
137 */
138 struct obd_client_handle *lli_pending_och;
139
140 /* We need all three because every inode may be opened in different 121 /* We need all three because every inode may be opened in different
141 * modes 122 * modes
142 */ 123 */
@@ -204,7 +185,6 @@ struct ll_inode_info {
204 struct { 185 struct {
205 struct mutex lli_size_mutex; 186 struct mutex lli_size_mutex;
206 char *lli_symlink_name; 187 char *lli_symlink_name;
207 __u64 lli_maxbytes;
208 /* 188 /*
209 * struct rw_semaphore { 189 * struct rw_semaphore {
210 * signed long count; // align d.d_def_acl 190 * signed long count; // align d.d_def_acl
@@ -245,7 +225,6 @@ struct ll_inode_info {
245 * In the future, if more members are added only for directory, 225 * In the future, if more members are added only for directory,
246 * some of the following members can be moved into u.f. 226 * some of the following members can be moved into u.f.
247 */ 227 */
248 bool lli_has_smd;
249 struct cl_object *lli_clob; 228 struct cl_object *lli_clob;
250 229
251 /* mutex to request for layout lock exclusively. */ 230 /* mutex to request for layout lock exclusively. */
@@ -282,6 +261,9 @@ int ll_xattr_cache_destroy(struct inode *inode);
282int ll_xattr_cache_get(struct inode *inode, const char *name, 261int ll_xattr_cache_get(struct inode *inode, const char *name,
283 char *buffer, size_t size, __u64 valid); 262 char *buffer, size_t size, __u64 valid);
284 263
264int ll_init_security(struct dentry *dentry, struct inode *inode,
265 struct inode *dir);
266
285/* 267/*
286 * Locking to guarantee consistency of non-atomic updates to long long i_size, 268 * Locking to guarantee consistency of non-atomic updates to long long i_size,
287 * consistency between file size and KMS. 269 * consistency between file size and KMS.
@@ -400,7 +382,7 @@ enum stats_track_type {
400#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */ 382#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
401#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */ 383#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
402#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */ 384#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
403#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */ 385/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
404#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */ 386#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
405#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */ 387#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
406#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */ 388#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
@@ -409,6 +391,8 @@ enum stats_track_type {
409#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */ 391#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
410#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */ 392#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
411#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */ 393#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
394#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
395 * suppress_pings */
412 396
413#define LL_SBI_FLAGS { \ 397#define LL_SBI_FLAGS { \
414 "nolck", \ 398 "nolck", \
@@ -432,6 +416,7 @@ enum stats_track_type {
432 "user_fid2path",\ 416 "user_fid2path",\
433 "xattr_cache", \ 417 "xattr_cache", \
434 "norootsquash", \ 418 "norootsquash", \
419 "always_ping", \
435} 420}
436 421
437/* 422/*
@@ -466,10 +451,10 @@ struct ll_sb_info {
466 451
467 int ll_flags; 452 int ll_flags;
468 unsigned int ll_umounting:1, 453 unsigned int ll_umounting:1,
469 ll_xattr_cache_enabled:1; 454 ll_xattr_cache_enabled:1,
470 struct lustre_client_ocd ll_lco; 455 ll_client_common_fill_super_succeeded:1;
471 456
472 struct ll_close_queue *ll_lcq; 457 struct lustre_client_ocd ll_lco;
473 458
474 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ 459 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
475 460
@@ -630,8 +615,6 @@ struct ll_file_data {
630 struct list_head fd_lccs; /* list of ll_cl_context */ 615 struct list_head fd_lccs; /* list of ll_cl_context */
631}; 616};
632 617
633struct lov_stripe_md;
634
635extern struct dentry *llite_root; 618extern struct dentry *llite_root;
636extern struct kset *llite_kset; 619extern struct kset *llite_kset;
637 620
@@ -682,8 +665,6 @@ enum {
682 LPROC_LL_WRITE_BYTES, 665 LPROC_LL_WRITE_BYTES,
683 LPROC_LL_BRW_READ, 666 LPROC_LL_BRW_READ,
684 LPROC_LL_BRW_WRITE, 667 LPROC_LL_BRW_WRITE,
685 LPROC_LL_OSC_READ,
686 LPROC_LL_OSC_WRITE,
687 LPROC_LL_IOCTL, 668 LPROC_LL_IOCTL,
688 LPROC_LL_OPEN, 669 LPROC_LL_OPEN,
689 LPROC_LL_RELEASE, 670 LPROC_LL_RELEASE,
@@ -741,9 +722,7 @@ int ll_writepage(struct page *page, struct writeback_control *wbc);
741int ll_writepages(struct address_space *, struct writeback_control *wbc); 722int ll_writepages(struct address_space *, struct writeback_control *wbc);
742int ll_readpage(struct file *file, struct page *page); 723int ll_readpage(struct file *file, struct page *page);
743void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras); 724void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
744int ll_readahead(const struct lu_env *env, struct cl_io *io, 725int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
745 struct cl_page_list *queue, struct ll_readahead_state *ras,
746 bool hit);
747struct ll_cl_context *ll_cl_find(struct file *file); 726struct ll_cl_context *ll_cl_find(struct file *file);
748void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io); 727void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
749void ll_cl_remove(struct file *file, const struct lu_env *env); 728void ll_cl_remove(struct file *file, const struct lu_env *env);
@@ -762,25 +741,14 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
762 enum ldlm_mode mode); 741 enum ldlm_mode mode);
763int ll_file_open(struct inode *inode, struct file *file); 742int ll_file_open(struct inode *inode, struct file *file);
764int ll_file_release(struct inode *inode, struct file *file); 743int ll_file_release(struct inode *inode, struct file *file);
765int ll_glimpse_ioctl(struct ll_sb_info *sbi,
766 struct lov_stripe_md *lsm, lstat_t *st);
767void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
768int ll_release_openhandle(struct inode *, struct lookup_intent *); 744int ll_release_openhandle(struct inode *, struct lookup_intent *);
769int ll_md_real_close(struct inode *inode, fmode_t fmode); 745int ll_md_real_close(struct inode *inode, fmode_t fmode);
770void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
771 struct obd_client_handle **och, unsigned long flags);
772void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
773int ll_som_update(struct inode *inode, struct md_op_data *op_data);
774int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
775 __u64 ioepoch, int sync);
776void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
777 struct lustre_handle *fh);
778int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); 746int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
779struct posix_acl *ll_get_acl(struct inode *inode, int type); 747struct posix_acl *ll_get_acl(struct inode *inode, int type);
780int ll_migrate(struct inode *parent, struct file *file, int mdtidx, 748int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
781 const char *name, int namelen); 749 const char *name, int namelen);
782int ll_get_fid_by_name(struct inode *parent, const char *name, 750int ll_get_fid_by_name(struct inode *parent, const char *name,
783 int namelen, struct lu_fid *fid); 751 int namelen, struct lu_fid *fid, struct inode **inode);
784int ll_inode_permission(struct inode *inode, int mask); 752int ll_inode_permission(struct inode *inode, int mask);
785 753
786int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, 754int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
@@ -818,6 +786,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
818void ll_put_super(struct super_block *sb); 786void ll_put_super(struct super_block *sb);
819void ll_kill_super(struct super_block *sb); 787void ll_kill_super(struct super_block *sb);
820struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock); 788struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
789void ll_dir_clear_lsm_md(struct inode *inode);
821void ll_clear_inode(struct inode *inode); 790void ll_clear_inode(struct inode *inode);
822int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import); 791int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
823int ll_setattr(struct dentry *de, struct iattr *attr); 792int ll_setattr(struct dentry *de, struct iattr *attr);
@@ -891,18 +860,6 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
891/* llite/symlink.c */ 860/* llite/symlink.c */
892extern const struct inode_operations ll_fast_symlink_inode_operations; 861extern const struct inode_operations ll_fast_symlink_inode_operations;
893 862
894/* llite/llite_close.c */
895struct ll_close_queue {
896 spinlock_t lcq_lock;
897 struct list_head lcq_head;
898 wait_queue_head_t lcq_waitq;
899 struct completion lcq_comp;
900 atomic_t lcq_stop;
901};
902
903void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
904void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
905
906/** 863/**
907 * IO arguments for various VFS I/O interfaces. 864 * IO arguments for various VFS I/O interfaces.
908 */ 865 */
@@ -945,15 +902,11 @@ static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
945 return &ll_env_info(env)->lti_args; 902 return &ll_env_info(env)->lti_args;
946} 903}
947 904
948void ll_queue_done_writing(struct inode *inode, unsigned long flags);
949void ll_close_thread_shutdown(struct ll_close_queue *lcq);
950int ll_close_thread_start(struct ll_close_queue **lcq_ret);
951
952/* llite/llite_mmap.c */ 905/* llite/llite_mmap.c */
953 906
954int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); 907int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
955int ll_file_mmap(struct file *file, struct vm_area_struct *vma); 908int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
956void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, 909void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
957 unsigned long addr, size_t count); 910 unsigned long addr, size_t count);
958struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, 911struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
959 size_t count); 912 size_t count);
@@ -1024,9 +977,14 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode)
1024 return fid; 977 return fid;
1025} 978}
1026 979
1027static inline __u64 ll_file_maxbytes(struct inode *inode) 980static inline loff_t ll_file_maxbytes(struct inode *inode)
1028{ 981{
1029 return ll_i2info(inode)->lli_maxbytes; 982 struct cl_object *obj = ll_i2info(inode)->lli_clob;
983
984 if (!obj)
985 return MAX_LFS_FILESIZE;
986
987 return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
1030} 988}
1031 989
1032/* llite/xattr.c */ 990/* llite/xattr.c */
@@ -1043,17 +1001,18 @@ extern const struct xattr_handler *ll_xattr_handlers[];
1043ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); 1001ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
1044int ll_xattr_list(struct inode *inode, const char *name, int type, 1002int ll_xattr_list(struct inode *inode, const char *name, int type,
1045 void *buffer, size_t size, __u64 valid); 1003 void *buffer, size_t size, __u64 valid);
1004const struct xattr_handler *get_xattr_type(const char *name);
1046 1005
1047/** 1006/**
1048 * Common IO arguments for various VFS I/O interfaces. 1007 * Common IO arguments for various VFS I/O interfaces.
1049 */ 1008 */
1050int cl_sb_init(struct super_block *sb); 1009int cl_sb_init(struct super_block *sb);
1051int cl_sb_fini(struct super_block *sb); 1010int cl_sb_fini(struct super_block *sb);
1052void ll_io_init(struct cl_io *io, const struct file *file, int write);
1053 1011
1054void ras_update(struct ll_sb_info *sbi, struct inode *inode, 1012enum ras_update_flags {
1055 struct ll_readahead_state *ras, unsigned long index, 1013 LL_RAS_HIT = 0x1,
1056 unsigned hit); 1014 LL_RAS_MMAP = 0x2
1015};
1057void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len); 1016void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1058void ll_ra_stats_inc(struct inode *inode, enum ra_stat which); 1017void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
1059 1018
@@ -1258,15 +1217,6 @@ struct ll_dio_pages {
1258 int ldp_nr; 1217 int ldp_nr;
1259}; 1218};
1260 1219
1261static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
1262 int rc)
1263{
1264 int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
1265 LPROC_LL_OSC_WRITE;
1266
1267 ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
1268}
1269
1270ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, 1220ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
1271 int rw, struct inode *inode, 1221 int rw, struct inode *inode,
1272 struct ll_dio_pages *pv); 1222 struct ll_dio_pages *pv);
@@ -1365,11 +1315,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
1365 spin_unlock(&dentry->d_lock); 1315 spin_unlock(&dentry->d_lock);
1366} 1316}
1367 1317
1368enum {
1369 LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
1370 LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
1371};
1372
1373int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf); 1318int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1374int ll_layout_refresh(struct inode *inode, __u32 *gen); 1319int ll_layout_refresh(struct inode *inode, __u32 *gen);
1375int ll_layout_restore(struct inode *inode, loff_t start, __u64 length); 1320int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
@@ -1383,14 +1328,14 @@ int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1383int ll_getparent(struct file *file, struct getparent __user *arg); 1328int ll_getparent(struct file *file, struct getparent __user *arg);
1384 1329
1385/* lcommon_cl.c */ 1330/* lcommon_cl.c */
1386int cl_setattr_ost(struct inode *inode, const struct iattr *attr); 1331int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
1332 unsigned int attr_flags);
1387 1333
1388extern struct lu_env *cl_inode_fini_env; 1334extern struct lu_env *cl_inode_fini_env;
1389extern int cl_inode_fini_refcheck; 1335extern int cl_inode_fini_refcheck;
1390 1336
1391int cl_file_inode_init(struct inode *inode, struct lustre_md *md); 1337int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
1392void cl_inode_fini(struct inode *inode); 1338void cl_inode_fini(struct inode *inode);
1393int cl_local_size(struct inode *inode);
1394 1339
1395__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32); 1340__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
1396__u32 cl_fid_build_gen(const struct lu_fid *fid); 1341__u32 cl_fid_build_gen(const struct lu_fid *fid);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index e5c62f4ce3d8..25f5aed97f63 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -191,10 +191,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
191 OBD_CONNECT_FLOCK_DEAD | 191 OBD_CONNECT_FLOCK_DEAD |
192 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | 192 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
193 OBD_CONNECT_OPEN_BY_FID | 193 OBD_CONNECT_OPEN_BY_FID |
194 OBD_CONNECT_DIR_STRIPE; 194 OBD_CONNECT_DIR_STRIPE |
195 195 OBD_CONNECT_BULK_MBITS;
196 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
197 data->ocd_connect_flags |= OBD_CONNECT_SOM;
198 196
199 if (sbi->ll_flags & LL_SBI_LRU_RESIZE) 197 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
200 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; 198 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
@@ -226,6 +224,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
226 /* real client */ 224 /* real client */
227 data->ocd_connect_flags |= OBD_CONNECT_REAL; 225 data->ocd_connect_flags |= OBD_CONNECT_REAL;
228 226
227 /* always ping even if server suppress_pings */
228 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
229 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
230
229 data->ocd_brw_size = MD_MAX_BRW_SIZE; 231 data->ocd_brw_size = MD_MAX_BRW_SIZE;
230 232
231 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, 233 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
@@ -288,7 +290,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
288 290
289 size = sizeof(*data); 291 size = sizeof(*data);
290 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA), 292 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
291 KEY_CONN_DATA, &size, data, NULL); 293 KEY_CONN_DATA, &size, data);
292 if (err) { 294 if (err) {
293 CERROR("%s: Get connect data failed: rc = %d\n", 295 CERROR("%s: Get connect data failed: rc = %d\n",
294 sbi->ll_md_exp->exp_obd->obd_name, err); 296 sbi->ll_md_exp->exp_obd->obd_name, err);
@@ -355,10 +357,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
355 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | 357 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
356 OBD_CONNECT_EINPROGRESS | 358 OBD_CONNECT_EINPROGRESS |
357 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE | 359 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
358 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS; 360 OBD_CONNECT_LAYOUTLOCK |
359 361 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
360 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW) 362 OBD_CONNECT_BULK_MBITS;
361 data->ocd_connect_flags |= OBD_CONNECT_SOM;
362 363
363 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) { 364 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
364 /* OBD_CONNECT_CKSUM should always be set, even if checksums are 365 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
@@ -376,6 +377,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
376 377
377 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; 378 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
378 379
380 /* always ping even if server suppress_pings */
381 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
382 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
383
379 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n", 384 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
380 data->ocd_connect_flags, 385 data->ocd_connect_flags,
381 data->ocd_version, data->ocd_grant); 386 data->ocd_version, data->ocd_grant);
@@ -475,8 +480,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
475 ptlrpc_req_finished(request); 480 ptlrpc_req_finished(request);
476 481
477 if (IS_ERR(root)) { 482 if (IS_ERR(root)) {
478 if (lmd.lsm)
479 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
480#ifdef CONFIG_FS_POSIX_ACL 483#ifdef CONFIG_FS_POSIX_ACL
481 if (lmd.posix_acl) { 484 if (lmd.posix_acl) {
482 posix_acl_release(lmd.posix_acl); 485 posix_acl_release(lmd.posix_acl);
@@ -488,12 +491,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
488 goto out_root; 491 goto out_root;
489 } 492 }
490 493
491 err = ll_close_thread_start(&sbi->ll_lcq);
492 if (err) {
493 CERROR("cannot start close thread: rc %d\n", err);
494 goto out_root;
495 }
496
497 checksum = sbi->ll_flags & LL_SBI_CHECKSUM; 494 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
498 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM), 495 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
499 KEY_CHECKSUM, sizeof(checksum), &checksum, 496 KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -572,10 +569,18 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
572{ 569{
573 int size, rc; 570 int size, rc;
574 571
575 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL); 572 size = sizeof(*lmmsize);
573 rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
574 KEY_MAX_EASIZE, &size, lmmsize);
575 if (rc) {
576 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
577 sbi->ll_dt_exp->exp_obd->obd_name, rc);
578 return rc;
579 }
580
576 size = sizeof(int); 581 size = sizeof(int);
577 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE), 582 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
578 KEY_MAX_EASIZE, &size, lmmsize, NULL); 583 KEY_MAX_EASIZE, &size, lmmsize);
579 if (rc) 584 if (rc)
580 CERROR("Get max mdsize error rc %d\n", rc); 585 CERROR("Get max mdsize error rc %d\n", rc);
581 586
@@ -599,7 +604,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
599 604
600 size = sizeof(int); 605 size = sizeof(int);
601 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), 606 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
602 KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); 607 KEY_DEFAULT_EASIZE, &size, lmmsize);
603 if (rc) 608 if (rc)
604 CERROR("Get default mdsize error rc %d\n", rc); 609 CERROR("Get default mdsize error rc %d\n", rc);
605 610
@@ -633,8 +638,6 @@ static void client_common_put_super(struct super_block *sb)
633{ 638{
634 struct ll_sb_info *sbi = ll_s2sbi(sb); 639 struct ll_sb_info *sbi = ll_s2sbi(sb);
635 640
636 ll_close_thread_shutdown(sbi->ll_lcq);
637
638 cl_sb_fini(sb); 641 cl_sb_fini(sb);
639 642
640 obd_fid_fini(sbi->ll_dt_exp->exp_obd); 643 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
@@ -725,6 +728,18 @@ static int ll_options(char *options, int *flags)
725 *flags &= ~tmp; 728 *flags &= ~tmp;
726 goto next; 729 goto next;
727 } 730 }
731 tmp = ll_set_opt("context", s1, 1);
732 if (tmp)
733 goto next;
734 tmp = ll_set_opt("fscontext", s1, 1);
735 if (tmp)
736 goto next;
737 tmp = ll_set_opt("defcontext", s1, 1);
738 if (tmp)
739 goto next;
740 tmp = ll_set_opt("rootcontext", s1, 1);
741 if (tmp)
742 goto next;
728 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH); 743 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
729 if (tmp) { 744 if (tmp) {
730 *flags |= tmp; 745 *flags |= tmp;
@@ -766,11 +781,6 @@ static int ll_options(char *options, int *flags)
766 *flags &= ~tmp; 781 *flags &= ~tmp;
767 goto next; 782 goto next;
768 } 783 }
769 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
770 if (tmp) {
771 *flags |= tmp;
772 goto next;
773 }
774 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API); 784 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
775 if (tmp) { 785 if (tmp) {
776 *flags |= tmp; 786 *flags |= tmp;
@@ -786,6 +796,11 @@ static int ll_options(char *options, int *flags)
786 *flags &= ~tmp; 796 *flags &= ~tmp;
787 goto next; 797 goto next;
788 } 798 }
799 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
800 if (tmp) {
801 *flags |= tmp;
802 goto next;
803 }
789 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n", 804 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
790 s1); 805 s1);
791 return -EINVAL; 806 return -EINVAL;
@@ -804,14 +819,10 @@ void ll_lli_init(struct ll_inode_info *lli)
804{ 819{
805 lli->lli_inode_magic = LLI_INODE_MAGIC; 820 lli->lli_inode_magic = LLI_INODE_MAGIC;
806 lli->lli_flags = 0; 821 lli->lli_flags = 0;
807 lli->lli_ioepoch = 0;
808 lli->lli_maxbytes = MAX_LFS_FILESIZE;
809 spin_lock_init(&lli->lli_lock); 822 spin_lock_init(&lli->lli_lock);
810 lli->lli_posix_acl = NULL; 823 lli->lli_posix_acl = NULL;
811 /* Do not set lli_fid, it has been initialized already. */ 824 /* Do not set lli_fid, it has been initialized already. */
812 fid_zero(&lli->lli_pfid); 825 fid_zero(&lli->lli_pfid);
813 INIT_LIST_HEAD(&lli->lli_close_list);
814 lli->lli_pending_och = NULL;
815 lli->lli_mds_read_och = NULL; 826 lli->lli_mds_read_och = NULL;
816 lli->lli_mds_write_och = NULL; 827 lli->lli_mds_write_och = NULL;
817 lli->lli_mds_exec_och = NULL; 828 lli->lli_mds_exec_och = NULL;
@@ -820,9 +831,8 @@ void ll_lli_init(struct ll_inode_info *lli)
820 lli->lli_open_fd_exec_count = 0; 831 lli->lli_open_fd_exec_count = 0;
821 mutex_init(&lli->lli_och_mutex); 832 mutex_init(&lli->lli_och_mutex);
822 spin_lock_init(&lli->lli_agl_lock); 833 spin_lock_init(&lli->lli_agl_lock);
823 lli->lli_has_smd = false;
824 spin_lock_init(&lli->lli_layout_lock); 834 spin_lock_init(&lli->lli_layout_lock);
825 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE); 835 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
826 lli->lli_clob = NULL; 836 lli->lli_clob = NULL;
827 837
828 init_rwsem(&lli->lli_xattrs_list_rwsem); 838 init_rwsem(&lli->lli_xattrs_list_rwsem);
@@ -941,10 +951,14 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
941 951
942 /* connections, registrations, sb setup */ 952 /* connections, registrations, sb setup */
943 err = client_common_fill_super(sb, md, dt, mnt); 953 err = client_common_fill_super(sb, md, dt, mnt);
954 if (!err)
955 sbi->ll_client_common_fill_super_succeeded = 1;
944 956
945out_free: 957out_free:
946 kfree(md); 958 kfree(md);
947 kfree(dt); 959 kfree(dt);
960 if (lprof)
961 class_put_profile(lprof);
948 if (err) 962 if (err)
949 ll_put_super(sb); 963 ll_put_super(sb);
950 else if (sbi->ll_flags & LL_SBI_VERBOSE) 964 else if (sbi->ll_flags & LL_SBI_VERBOSE)
@@ -1002,7 +1016,7 @@ void ll_put_super(struct super_block *sb)
1002 } 1016 }
1003 } 1017 }
1004 1018
1005 if (sbi->ll_lcq) { 1019 if (sbi->ll_client_common_fill_super_succeeded) {
1006 /* Only if client_common_fill_super succeeded */ 1020 /* Only if client_common_fill_super succeeded */
1007 client_common_put_super(sb); 1021 client_common_put_super(sb);
1008 } 1022 }
@@ -1057,7 +1071,7 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1057 return inode; 1071 return inode;
1058} 1072}
1059 1073
1060static void ll_dir_clear_lsm_md(struct inode *inode) 1074void ll_dir_clear_lsm_md(struct inode *inode)
1061{ 1075{
1062 struct ll_inode_info *lli = ll_i2info(inode); 1076 struct ll_inode_info *lli = ll_i2info(inode);
1063 1077
@@ -1205,16 +1219,44 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1205 1219
1206 /* set the directory layout */ 1220 /* set the directory layout */
1207 if (!lli->lli_lsm_md) { 1221 if (!lli->lli_lsm_md) {
1222 struct cl_attr *attr;
1223
1208 rc = ll_init_lsm_md(inode, md); 1224 rc = ll_init_lsm_md(inode, md);
1209 if (rc) 1225 if (rc)
1210 return rc; 1226 return rc;
1211 1227
1212 lli->lli_lsm_md = lsm;
1213 /* 1228 /*
1214 * set lsm_md to NULL, so the following free lustre_md 1229 * set lsm_md to NULL, so the following free lustre_md
1215 * will not free this lsm 1230 * will not free this lsm
1216 */ 1231 */
1217 md->lmv = NULL; 1232 md->lmv = NULL;
1233 lli->lli_lsm_md = lsm;
1234
1235 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1236 if (!attr)
1237 return -ENOMEM;
1238
1239 /* validate the lsm */
1240 rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
1241 ll_md_blocking_ast);
1242 if (rc) {
1243 kfree(attr);
1244 return rc;
1245 }
1246
1247 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1248 md->body->mbo_nlink = attr->cat_nlink;
1249 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1250 md->body->mbo_size = attr->cat_size;
1251 if (md->body->mbo_valid & OBD_MD_FLATIME)
1252 md->body->mbo_atime = attr->cat_atime;
1253 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1254 md->body->mbo_ctime = attr->cat_ctime;
1255 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1256 md->body->mbo_mtime = attr->cat_mtime;
1257
1258 kfree(attr);
1259
1218 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm, 1260 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1219 lsm->lsm_md_magic, PFID(ll_inode2fid(inode))); 1261 lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
1220 return 0; 1262 return 0;
@@ -1272,9 +1314,6 @@ void ll_clear_inode(struct inode *inode)
1272 LASSERT(lli->lli_opendir_pid == 0); 1314 LASSERT(lli->lli_opendir_pid == 0);
1273 } 1315 }
1274 1316
1275 spin_lock(&lli->lli_lock);
1276 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1277 spin_unlock(&lli->lli_lock);
1278 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode)); 1317 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1279 1318
1280 LASSERT(!lli->lli_open_fd_write_count); 1319 LASSERT(!lli->lli_open_fd_write_count);
@@ -1313,13 +1352,11 @@ void ll_clear_inode(struct inode *inode)
1313 * cl_object still uses inode lsm. 1352 * cl_object still uses inode lsm.
1314 */ 1353 */
1315 cl_inode_fini(inode); 1354 cl_inode_fini(inode);
1316 lli->lli_has_smd = false;
1317} 1355}
1318 1356
1319#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) 1357#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1320 1358
1321static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, 1359static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1322 struct md_open_data **mod)
1323{ 1360{
1324 struct lustre_md md; 1361 struct lustre_md md;
1325 struct inode *inode = d_inode(dentry); 1362 struct inode *inode = d_inode(dentry);
@@ -1332,8 +1369,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1332 if (IS_ERR(op_data)) 1369 if (IS_ERR(op_data))
1333 return PTR_ERR(op_data); 1370 return PTR_ERR(op_data);
1334 1371
1335 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, 1372 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1336 &request, mod);
1337 if (rc) { 1373 if (rc) {
1338 ptlrpc_req_finished(request); 1374 ptlrpc_req_finished(request);
1339 if (rc == -ENOENT) { 1375 if (rc == -ENOENT) {
@@ -1369,48 +1405,12 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1369 rc = simple_setattr(dentry, &op_data->op_attr); 1405 rc = simple_setattr(dentry, &op_data->op_attr);
1370 op_data->op_attr.ia_valid = ia_valid; 1406 op_data->op_attr.ia_valid = ia_valid;
1371 1407
1372 /* Extract epoch data if obtained. */
1373 op_data->op_handle = md.body->mbo_handle;
1374 op_data->op_ioepoch = md.body->mbo_ioepoch;
1375
1376 rc = ll_update_inode(inode, &md); 1408 rc = ll_update_inode(inode, &md);
1377 ptlrpc_req_finished(request); 1409 ptlrpc_req_finished(request);
1378 1410
1379 return rc; 1411 return rc;
1380} 1412}
1381 1413
1382/* Close IO epoch and send Size-on-MDS attribute update. */
1383static int ll_setattr_done_writing(struct inode *inode,
1384 struct md_op_data *op_data,
1385 struct md_open_data *mod)
1386{
1387 struct ll_inode_info *lli = ll_i2info(inode);
1388 int rc = 0;
1389
1390 if (!S_ISREG(inode->i_mode))
1391 return 0;
1392
1393 CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
1394 op_data->op_ioepoch, PFID(&lli->lli_fid));
1395
1396 op_data->op_flags = MF_EPOCH_CLOSE;
1397 ll_done_writing_attr(inode, op_data);
1398 ll_pack_inode2opdata(inode, op_data, NULL);
1399
1400 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1401 if (rc == -EAGAIN)
1402 /* MDS has instructed us to obtain Size-on-MDS attribute
1403 * from OSTs and send setattr to back to MDS.
1404 */
1405 rc = ll_som_update(inode, op_data);
1406 else if (rc) {
1407 CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
1408 ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
1409 PFID(ll_inode2fid(inode)), rc);
1410 }
1411 return rc;
1412}
1413
1414/* If this inode has objects allocated to it (lsm != NULL), then the OST 1414/* If this inode has objects allocated to it (lsm != NULL), then the OST
1415 * object(s) determine the file size and mtime. Otherwise, the MDS will 1415 * object(s) determine the file size and mtime. Otherwise, the MDS will
1416 * keep these values until such a time that objects are allocated for it. 1416 * keep these values until such a time that objects are allocated for it.
@@ -1431,9 +1431,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1431 struct inode *inode = d_inode(dentry); 1431 struct inode *inode = d_inode(dentry);
1432 struct ll_inode_info *lli = ll_i2info(inode); 1432 struct ll_inode_info *lli = ll_i2info(inode);
1433 struct md_op_data *op_data = NULL; 1433 struct md_op_data *op_data = NULL;
1434 struct md_open_data *mod = NULL;
1435 bool file_is_released = false; 1434 bool file_is_released = false;
1436 int rc = 0, rc1 = 0; 1435 int rc = 0;
1437 1436
1438 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n", 1437 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1439 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode, 1438 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
@@ -1503,14 +1502,33 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1503 * but other attributes must be set 1502 * but other attributes must be set
1504 */ 1503 */
1505 if (S_ISREG(inode->i_mode)) { 1504 if (S_ISREG(inode->i_mode)) {
1506 struct lov_stripe_md *lsm; 1505 struct cl_layout cl = {
1506 .cl_is_released = false,
1507 };
1508 struct lu_env *env;
1509 int refcheck;
1507 __u32 gen; 1510 __u32 gen;
1508 1511
1509 ll_layout_refresh(inode, &gen); 1512 rc = ll_layout_refresh(inode, &gen);
1510 lsm = ccc_inode_lsm_get(inode); 1513 if (rc < 0)
1511 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED) 1514 goto out;
1512 file_is_released = true; 1515
1513 ccc_inode_lsm_put(inode, lsm); 1516 /*
1517 * XXX: the only place we need to know the layout type,
1518 * this will be removed by a later patch. -Jinshan
1519 */
1520 env = cl_env_get(&refcheck);
1521 if (IS_ERR(env)) {
1522 rc = PTR_ERR(env);
1523 goto out;
1524 }
1525
1526 rc = cl_object_layout_get(env, lli->lli_clob, &cl);
1527 cl_env_put(env, &refcheck);
1528 if (rc < 0)
1529 goto out;
1530
1531 file_is_released = cl.cl_is_released;
1514 1532
1515 if (!hsm_import && attr->ia_valid & ATTR_SIZE) { 1533 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1516 if (file_is_released) { 1534 if (file_is_released) {
@@ -1527,32 +1545,16 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1527 * modified, flag it. 1545 * modified, flag it.
1528 */ 1546 */
1529 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE; 1547 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1530 spin_lock(&lli->lli_lock);
1531 lli->lli_flags |= LLIF_DATA_MODIFIED;
1532 spin_unlock(&lli->lli_lock);
1533 op_data->op_bias |= MDS_DATA_MODIFIED; 1548 op_data->op_bias |= MDS_DATA_MODIFIED;
1534 } 1549 }
1535 } 1550 }
1536 1551
1537 memcpy(&op_data->op_attr, attr, sizeof(*attr)); 1552 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1538 1553
1539 /* Open epoch for truncate. */ 1554 rc = ll_md_setattr(dentry, op_data);
1540 if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
1541 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1542 op_data->op_flags = MF_EPOCH_OPEN;
1543
1544 rc = ll_md_setattr(dentry, op_data, &mod);
1545 if (rc) 1555 if (rc)
1546 goto out; 1556 goto out;
1547 1557
1548 /* RPC to MDT is sent, cancel data modification flag */
1549 if (op_data->op_bias & MDS_DATA_MODIFIED) {
1550 spin_lock(&lli->lli_lock);
1551 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1552 spin_unlock(&lli->lli_lock);
1553 }
1554
1555 ll_ioepoch_open(lli, op_data->op_ioepoch);
1556 if (!S_ISREG(inode->i_mode) || file_is_released) { 1558 if (!S_ISREG(inode->i_mode) || file_is_released) {
1557 rc = 0; 1559 rc = 0;
1558 goto out; 1560 goto out;
@@ -1568,19 +1570,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1568 * setting times to past, but it is necessary due to possible 1570 * setting times to past, but it is necessary due to possible
1569 * time de-synchronization between MDT inode and OST objects 1571 * time de-synchronization between MDT inode and OST objects
1570 */ 1572 */
1571 if (attr->ia_valid & ATTR_SIZE) 1573 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
1572 down_write(&lli->lli_trunc_sem);
1573 rc = cl_setattr_ost(inode, attr);
1574 if (attr->ia_valid & ATTR_SIZE)
1575 up_write(&lli->lli_trunc_sem);
1576 } 1574 }
1577out: 1575out:
1578 if (op_data->op_ioepoch) { 1576 if (op_data)
1579 rc1 = ll_setattr_done_writing(inode, op_data, mod); 1577 ll_finish_md_op_data(op_data);
1580 if (!rc)
1581 rc = rc1;
1582 }
1583 ll_finish_md_op_data(op_data);
1584 1578
1585 if (!S_ISDIR(inode->i_mode)) { 1579 if (!S_ISDIR(inode->i_mode)) {
1586 inode_lock(inode); 1580 inode_lock(inode);
@@ -1736,19 +1730,10 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
1736{ 1730{
1737 struct ll_inode_info *lli = ll_i2info(inode); 1731 struct ll_inode_info *lli = ll_i2info(inode);
1738 struct mdt_body *body = md->body; 1732 struct mdt_body *body = md->body;
1739 struct lov_stripe_md *lsm = md->lsm;
1740 struct ll_sb_info *sbi = ll_i2sbi(inode); 1733 struct ll_sb_info *sbi = ll_i2sbi(inode);
1741 1734
1742 LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0)); 1735 if (body->mbo_valid & OBD_MD_FLEASIZE)
1743 if (lsm) { 1736 cl_file_inode_init(inode, md);
1744 if (!lli->lli_has_smd &&
1745 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1746 cl_file_inode_init(inode, md);
1747
1748 lli->lli_maxbytes = lsm->lsm_maxbytes;
1749 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1750 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1751 }
1752 1737
1753 if (S_ISDIR(inode->i_mode)) { 1738 if (S_ISDIR(inode->i_mode)) {
1754 int rc; 1739 int rc;
@@ -1828,48 +1813,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
1828 LASSERT(fid_seq(&lli->lli_fid) != 0); 1813 LASSERT(fid_seq(&lli->lli_fid) != 0);
1829 1814
1830 if (body->mbo_valid & OBD_MD_FLSIZE) { 1815 if (body->mbo_valid & OBD_MD_FLSIZE) {
1831 if (exp_connect_som(ll_i2mdexp(inode)) && 1816 i_size_write(inode, body->mbo_size);
1832 S_ISREG(inode->i_mode)) {
1833 struct lustre_handle lockh;
1834 enum ldlm_mode mode;
1835
1836 /* As it is possible a blocking ast has been processed
1837 * by this time, we need to check there is an UPDATE
1838 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1839 * it.
1840 */
1841 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1842 &lockh, LDLM_FL_CBPENDING,
1843 LCK_CR | LCK_CW |
1844 LCK_PR | LCK_PW);
1845 if (mode) {
1846 if (lli->lli_flags & (LLIF_DONE_WRITING |
1847 LLIF_EPOCH_PENDING |
1848 LLIF_SOM_DIRTY)) {
1849 CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
1850 sbi->ll_md_exp->exp_obd->obd_name,
1851 PFID(ll_inode2fid(inode)),
1852 lli->lli_flags);
1853 } else {
1854 /* Use old size assignment to avoid
1855 * deadlock bz14138 & bz14326
1856 */
1857 i_size_write(inode, body->mbo_size);
1858 spin_lock(&lli->lli_lock);
1859 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1860 spin_unlock(&lli->lli_lock);
1861 }
1862 ldlm_lock_decref(&lockh, mode);
1863 }
1864 } else {
1865 /* Use old size assignment to avoid
1866 * deadlock bz14138 & bz14326
1867 */
1868 i_size_write(inode, body->mbo_size);
1869 1817
1870 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n", 1818 CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
1871 inode->i_ino, (unsigned long long)body->mbo_size); 1819 PFID(ll_inode2fid(inode)),
1872 } 1820 (unsigned long long)body->mbo_size);
1873 1821
1874 if (body->mbo_valid & OBD_MD_FLBLOCKS) 1822 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1875 inode->i_blocks = body->mbo_blocks; 1823 inode->i_blocks = body->mbo_blocks;
@@ -1877,7 +1825,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
1877 1825
1878 if (body->mbo_valid & OBD_MD_TSTATE) { 1826 if (body->mbo_valid & OBD_MD_TSTATE) {
1879 if (body->mbo_t_state & MS_RESTORE) 1827 if (body->mbo_t_state & MS_RESTORE)
1880 lli->lli_flags |= LLIF_FILE_RESTORING; 1828 set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
1881 } 1829 }
1882 1830
1883 return 0; 1831 return 0;
@@ -1892,8 +1840,6 @@ int ll_read_inode2(struct inode *inode, void *opaque)
1892 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n", 1840 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1893 PFID(&lli->lli_fid), inode); 1841 PFID(&lli->lli_fid), inode);
1894 1842
1895 LASSERT(!lli->lli_has_smd);
1896
1897 /* Core attributes from the MDS first. This is a new inode, and 1843 /* Core attributes from the MDS first. This is a new inode, and
1898 * the VFS doesn't zero times in the core inode so we have to do 1844 * the VFS doesn't zero times in the core inode so we have to do
1899 * it ourselves. They will be overwritten by either MDS or OST 1845 * it ourselves. They will be overwritten by either MDS or OST
@@ -1988,9 +1934,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
1988 return put_user(flags, (int __user *)arg); 1934 return put_user(flags, (int __user *)arg);
1989 } 1935 }
1990 case FSFILT_IOC_SETFLAGS: { 1936 case FSFILT_IOC_SETFLAGS: {
1991 struct lov_stripe_md *lsm;
1992 struct obd_info oinfo = { };
1993 struct md_op_data *op_data; 1937 struct md_op_data *op_data;
1938 struct cl_object *obj;
1939 struct iattr *attr;
1994 1940
1995 if (get_user(flags, (int __user *)arg)) 1941 if (get_user(flags, (int __user *)arg))
1996 return -EFAULT; 1942 return -EFAULT;
@@ -2002,8 +1948,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
2002 1948
2003 op_data->op_attr_flags = flags; 1949 op_data->op_attr_flags = flags;
2004 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; 1950 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
2005 rc = md_setattr(sbi->ll_md_exp, op_data, 1951 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2006 NULL, 0, NULL, 0, &req, NULL);
2007 ll_finish_md_op_data(op_data); 1952 ll_finish_md_op_data(op_data);
2008 ptlrpc_req_finished(req); 1953 ptlrpc_req_finished(req);
2009 if (rc) 1954 if (rc)
@@ -2011,30 +1956,17 @@ int ll_iocontrol(struct inode *inode, struct file *file,
2011 1956
2012 inode->i_flags = ll_ext_to_inode_flags(flags); 1957 inode->i_flags = ll_ext_to_inode_flags(flags);
2013 1958
2014 lsm = ccc_inode_lsm_get(inode); 1959 obj = ll_i2info(inode)->lli_clob;
2015 if (!lsm_has_objects(lsm)) { 1960 if (!obj)
2016 ccc_inode_lsm_put(inode, lsm);
2017 return 0; 1961 return 0;
2018 }
2019 1962
2020 oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); 1963 attr = kzalloc(sizeof(*attr), GFP_NOFS);
2021 if (!oinfo.oi_oa) { 1964 if (!attr)
2022 ccc_inode_lsm_put(inode, lsm);
2023 return -ENOMEM; 1965 return -ENOMEM;
2024 }
2025 oinfo.oi_md = lsm;
2026 oinfo.oi_oa->o_oi = lsm->lsm_oi;
2027 oinfo.oi_oa->o_flags = flags;
2028 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
2029 OBD_MD_FLGROUP;
2030 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
2031 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
2032 kmem_cache_free(obdo_cachep, oinfo.oi_oa);
2033 ccc_inode_lsm_put(inode, lsm);
2034
2035 if (rc && rc != -EPERM && rc != -EACCES)
2036 CERROR("osc_setattr_async fails: rc = %d\n", rc);
2037 1966
1967 attr->ia_valid = ATTR_ATTR_FLAG;
1968 rc = cl_setattr_ost(obj, attr, flags);
1969 kfree(attr);
2038 return rc; 1970 return rc;
2039 } 1971 }
2040 default: 1972 default:
@@ -2164,7 +2096,6 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2164 return; 2096 return;
2165 2097
2166 op_data->op_fid1 = body->mbo_fid1; 2098 op_data->op_fid1 = body->mbo_fid1;
2167 op_data->op_ioepoch = body->mbo_ioepoch;
2168 op_data->op_handle = body->mbo_handle; 2099 op_data->op_handle = body->mbo_handle;
2169 op_data->op_mod_time = get_seconds(); 2100 op_data->op_mod_time = get_seconds();
2170 md_close(exp, op_data, NULL, &close_req); 2101 md_close(exp, op_data, NULL, &close_req);
@@ -2244,17 +2175,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2244 conf.coc_opc = OBJECT_CONF_SET; 2175 conf.coc_opc = OBJECT_CONF_SET;
2245 conf.coc_inode = *inode; 2176 conf.coc_inode = *inode;
2246 conf.coc_lock = lock; 2177 conf.coc_lock = lock;
2247 conf.u.coc_md = &md; 2178 conf.u.coc_layout = md.layout;
2248 (void)ll_layout_conf(*inode, &conf); 2179 (void)ll_layout_conf(*inode, &conf);
2249 } 2180 }
2250 LDLM_LOCK_PUT(lock); 2181 LDLM_LOCK_PUT(lock);
2251 } 2182 }
2252 2183
2253out: 2184out:
2254 if (md.lsm)
2255 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2256 md_free_lustre_md(sbi->ll_md_exp, &md); 2185 md_free_lustre_md(sbi->ll_md_exp, &md);
2257
2258cleanup: 2186cleanup:
2259 if (rc != 0 && it && it->it_op & IT_OPEN) 2187 if (rc != 0 && it && it->it_op & IT_OPEN)
2260 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req); 2188 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
@@ -2380,8 +2308,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2380 op_data->op_default_stripe_offset = -1; 2308 op_data->op_default_stripe_offset = -1;
2381 if (S_ISDIR(i1->i_mode)) { 2309 if (S_ISDIR(i1->i_mode)) {
2382 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md; 2310 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2383 op_data->op_default_stripe_offset = 2311 if (opc == LUSTRE_OPC_MKDIR)
2384 ll_i2info(i1)->lli_def_stripe_offset; 2312 op_data->op_default_stripe_offset =
2313 ll_i2info(i1)->lli_def_stripe_offset;
2385 } 2314 }
2386 2315
2387 if (i2) { 2316 if (i2) {
@@ -2405,8 +2334,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2405 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); 2334 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2406 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); 2335 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2407 op_data->op_cap = cfs_curproc_cap_pack(); 2336 op_data->op_cap = cfs_curproc_cap_pack();
2408 op_data->op_bias = 0;
2409 op_data->op_cli_flags = 0;
2410 if ((opc == LUSTRE_OPC_CREATE) && name && 2337 if ((opc == LUSTRE_OPC_CREATE) && name &&
2411 filename_is_volatile(name, namelen, &op_data->op_mds)) 2338 filename_is_volatile(name, namelen, &op_data->op_mds))
2412 op_data->op_bias |= MDS_CREATE_VOLATILE; 2339 op_data->op_bias |= MDS_CREATE_VOLATILE;
@@ -2414,10 +2341,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2414 op_data->op_mds = 0; 2341 op_data->op_mds = 0;
2415 op_data->op_data = data; 2342 op_data->op_data = data;
2416 2343
2417 /* When called by ll_setattr_raw, file is i1. */
2418 if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
2419 op_data->op_bias |= MDS_DATA_MODIFIED;
2420
2421 return op_data; 2344 return op_data;
2422} 2345}
2423 2346
@@ -2451,6 +2374,9 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2451 if (sbi->ll_flags & LL_SBI_USER_FID2PATH) 2374 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2452 seq_puts(seq, ",user_fid2path"); 2375 seq_puts(seq, ",user_fid2path");
2453 2376
2377 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
2378 seq_puts(seq, ",always_ping");
2379
2454 return 0; 2380 return 0;
2455} 2381}
2456 2382
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 436691814a5e..ee01f20d8b11 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -47,7 +47,7 @@
47 47
48static const struct vm_operations_struct ll_file_vm_ops; 48static const struct vm_operations_struct ll_file_vm_ops;
49 49
50void policy_from_vma(ldlm_policy_data_t *policy, 50void policy_from_vma(union ldlm_policy_data *policy,
51 struct vm_area_struct *vma, unsigned long addr, 51 struct vm_area_struct *vma, unsigned long addr,
52 size_t count) 52 size_t count)
53{ 53{
@@ -80,43 +80,24 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
80 * API independent part for page fault initialization. 80 * API independent part for page fault initialization.
81 * \param vma - virtual memory area addressed to page fault 81 * \param vma - virtual memory area addressed to page fault
82 * \param env - corespondent lu_env to processing 82 * \param env - corespondent lu_env to processing
83 * \param nest - nested level
84 * \param index - page index corespondent to fault. 83 * \param index - page index corespondent to fault.
85 * \parm ra_flags - vma readahead flags. 84 * \parm ra_flags - vma readahead flags.
86 * 85 *
87 * \return allocated and initialized env for fault operation. 86 * \return error codes from cl_io_init.
88 * \retval EINVAL if env can't allocated
89 * \return other error codes from cl_io_init.
90 */ 87 */
91static struct cl_io * 88static struct cl_io *
92ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, 89ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
93 struct cl_env_nest *nest, pgoff_t index, 90 pgoff_t index, unsigned long *ra_flags)
94 unsigned long *ra_flags)
95{ 91{
96 struct file *file = vma->vm_file; 92 struct file *file = vma->vm_file;
97 struct inode *inode = file_inode(file); 93 struct inode *inode = file_inode(file);
98 struct cl_io *io; 94 struct cl_io *io;
99 struct cl_fault_io *fio; 95 struct cl_fault_io *fio;
100 struct lu_env *env;
101 int rc; 96 int rc;
102 97
103 *env_ret = NULL;
104 if (ll_file_nolock(file)) 98 if (ll_file_nolock(file))
105 return ERR_PTR(-EOPNOTSUPP); 99 return ERR_PTR(-EOPNOTSUPP);
106 100
107 /*
108 * page fault can be called when lustre IO is
109 * already active for the current thread, e.g., when doing read/write
110 * against user level buffer mapped from Lustre buffer. To avoid
111 * stomping on existing context, optionally force an allocation of a new
112 * one.
113 */
114 env = cl_env_nested_get(nest);
115 if (IS_ERR(env))
116 return ERR_PTR(-EINVAL);
117
118 *env_ret = env;
119
120restart: 101restart:
121 io = vvp_env_thread_io(env); 102 io = vvp_env_thread_io(env);
122 io->ci_obj = ll_i2info(inode)->lli_clob; 103 io->ci_obj = ll_i2info(inode)->lli_clob;
@@ -155,7 +136,6 @@ restart:
155 if (io->ci_need_restart) 136 if (io->ci_need_restart)
156 goto restart; 137 goto restart;
157 138
158 cl_env_nested_put(nest, env);
159 io = ERR_PTR(rc); 139 io = ERR_PTR(rc);
160 } 140 }
161 141
@@ -169,13 +149,17 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
169 struct lu_env *env; 149 struct lu_env *env;
170 struct cl_io *io; 150 struct cl_io *io;
171 struct vvp_io *vio; 151 struct vvp_io *vio;
172 struct cl_env_nest nest;
173 int result; 152 int result;
153 int refcheck;
174 sigset_t set; 154 sigset_t set;
175 struct inode *inode; 155 struct inode *inode;
176 struct ll_inode_info *lli; 156 struct ll_inode_info *lli;
177 157
178 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); 158 env = cl_env_get(&refcheck);
159 if (IS_ERR(env))
160 return PTR_ERR(env);
161
162 io = ll_fault_io_init(env, vma, vmpage->index, NULL);
179 if (IS_ERR(io)) { 163 if (IS_ERR(io)) {
180 result = PTR_ERR(io); 164 result = PTR_ERR(io);
181 goto out; 165 goto out;
@@ -231,17 +215,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
231 result = -EAGAIN; 215 result = -EAGAIN;
232 } 216 }
233 217
234 if (result == 0) { 218 if (!result)
235 spin_lock(&lli->lli_lock); 219 set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
236 lli->lli_flags |= LLIF_DATA_MODIFIED;
237 spin_unlock(&lli->lli_lock);
238 }
239 } 220 }
240 221
241out_io: 222out_io:
242 cl_io_fini(env, io); 223 cl_io_fini(env, io);
243 cl_env_nested_put(&nest, env);
244out: 224out:
225 cl_env_put(env, &refcheck);
245 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); 226 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
246 LASSERT(ergo(result == 0, PageLocked(vmpage))); 227 LASSERT(ergo(result == 0, PageLocked(vmpage)));
247 228
@@ -285,13 +266,19 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
285 struct vvp_io *vio = NULL; 266 struct vvp_io *vio = NULL;
286 struct page *vmpage; 267 struct page *vmpage;
287 unsigned long ra_flags; 268 unsigned long ra_flags;
288 struct cl_env_nest nest; 269 int result = 0;
289 int result;
290 int fault_ret = 0; 270 int fault_ret = 0;
271 int refcheck;
272
273 env = cl_env_get(&refcheck);
274 if (IS_ERR(env))
275 return PTR_ERR(env);
291 276
292 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); 277 io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
293 if (IS_ERR(io)) 278 if (IS_ERR(io)) {
294 return to_fault_error(PTR_ERR(io)); 279 result = to_fault_error(PTR_ERR(io));
280 goto out;
281 }
295 282
296 result = io->ci_result; 283 result = io->ci_result;
297 if (result == 0) { 284 if (result == 0) {
@@ -322,14 +309,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
322 } 309 }
323 } 310 }
324 cl_io_fini(env, io); 311 cl_io_fini(env, io);
325 cl_env_nested_put(&nest, env);
326 312
327 vma->vm_flags |= ra_flags; 313 vma->vm_flags |= ra_flags;
314
315out:
316 cl_env_put(env, &refcheck);
328 if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) 317 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
329 fault_ret |= to_fault_error(result); 318 fault_ret |= to_fault_error(result);
330 319
331 CDEBUG(D_MMAP, "%s fault %d/%d\n", 320 CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
332 current->comm, fault_ret, result);
333 return fault_ret; 321 return fault_ret;
334} 322}
335 323
@@ -381,6 +369,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
381 bool retry; 369 bool retry;
382 int result; 370 int result;
383 371
372 file_update_time(vma->vm_file);
384 do { 373 do {
385 retry = false; 374 retry = false;
386 result = ll_page_mkwrite0(vma, vmf->page, &retry); 375 result = ll_page_mkwrite0(vma, vmf->page, &retry);
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 709230571b4b..c63236580b0f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -226,7 +226,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
226 226
227static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, 227static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
228 int namelen, loff_t hash, u64 ino, 228 int namelen, loff_t hash, u64 ino,
229 unsigned type) 229 unsigned int type)
230{ 230{
231 /* It is hack to access lde_fid for comparison with lgd_fid. 231 /* It is hack to access lde_fid for comparison with lgd_fid.
232 * So the input 'name' must be part of the 'lu_dirent'. 232 * So the input 'name' must be part of the 'lu_dirent'.
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 23fda9d98bff..03682c10fc9e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1060,10 +1060,6 @@ static const struct llite_file_opcode {
1060 "brw_read" }, 1060 "brw_read" },
1061 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES, 1061 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
1062 "brw_write" }, 1062 "brw_write" },
1063 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1064 "osc_read" },
1065 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1066 "osc_write" },
1067 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" }, 1063 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
1068 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" }, 1064 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
1069 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" }, 1065 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 180f35e3afd9..9426759aedc9 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -113,13 +113,18 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
113 if (inode->i_state & I_NEW) { 113 if (inode->i_state & I_NEW) {
114 rc = ll_read_inode2(inode, md); 114 rc = ll_read_inode2(inode, md);
115 if (!rc && S_ISREG(inode->i_mode) && 115 if (!rc && S_ISREG(inode->i_mode) &&
116 !ll_i2info(inode)->lli_clob) { 116 !ll_i2info(inode)->lli_clob)
117 CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n",
118 ll_get_fsname(sb, NULL, 0), md->lsm,
119 PFID(ll_inode2fid(inode)));
120 rc = cl_file_inode_init(inode, md); 117 rc = cl_file_inode_init(inode, md);
121 } 118
122 if (rc) { 119 if (rc) {
120 /*
121 * Let's clear directory lsm here, otherwise
122 * make_bad_inode() will reset the inode mode
123 * to regular, then ll_clear_inode will not
124 * be able to clear lsm_md
125 */
126 if (S_ISDIR(inode->i_mode))
127 ll_dir_clear_lsm_md(inode);
123 make_bad_inode(inode); 128 make_bad_inode(inode);
124 unlock_new_inode(inode); 129 unlock_new_inode(inode);
125 iput(inode); 130 iput(inode);
@@ -132,6 +137,8 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
132 CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n", 137 CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
133 PFID(&md->body->mbo_fid1), inode, rc); 138 PFID(&md->body->mbo_fid1), inode, rc);
134 if (rc) { 139 if (rc) {
140 if (S_ISDIR(inode->i_mode))
141 ll_dir_clear_lsm_md(inode);
135 iput(inode); 142 iput(inode);
136 inode = ERR_PTR(rc); 143 inode = ERR_PTR(rc);
137 } 144 }
@@ -258,7 +265,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
258 struct ll_inode_info *lli = ll_i2info(inode); 265 struct ll_inode_info *lli = ll_i2info(inode);
259 266
260 spin_lock(&lli->lli_lock); 267 spin_lock(&lli->lli_lock);
261 lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK; 268 LTIME_S(inode->i_mtime) = 0;
269 LTIME_S(inode->i_atime) = 0;
270 LTIME_S(inode->i_ctime) = 0;
262 spin_unlock(&lli->lli_lock); 271 spin_unlock(&lli->lli_lock);
263 } 272 }
264 273
@@ -287,11 +296,39 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
287 296
288 hash = cl_fid_build_ino(&lli->lli_pfid, 297 hash = cl_fid_build_ino(&lli->lli_pfid,
289 ll_need_32bit_api(ll_i2sbi(inode))); 298 ll_need_32bit_api(ll_i2sbi(inode)));
290 299 /*
291 master_inode = ilookup5(inode->i_sb, hash, 300 * Do not lookup the inode with ilookup5,
292 ll_test_inode_by_fid, 301 * otherwise it will cause dead lock,
293 (void *)&lli->lli_pfid); 302 *
294 if (master_inode && !IS_ERR(master_inode)) { 303 * 1. Client1 send chmod req to the MDT0, then
304 * on MDT0, it enqueues master and all of its
305 * slaves lock, (mdt_attr_set() ->
306 * mdt_lock_slaves()), after gets master and
307 * stripe0 lock, it will send the enqueue req
308 * (for stripe1) to MDT1, then MDT1 finds the
309 * lock has been granted to client2. Then MDT1
310 * sends blocking ast to client2.
311 *
312 * 2. At the same time, client2 tries to unlink
313 * the striped dir (rm -rf striped_dir), and
314 * during lookup, it will hold the master inode
315 * of the striped directory, whose inode state
316 * is NEW, then tries to revalidate all of its
317 * slaves, (ll_prep_inode()->ll_iget()->
318 * ll_read_inode2()-> ll_update_inode().). And
319 * it will be blocked on the server side because
320 * of 1.
321 *
322 * 3. Then the client get the blocking_ast req,
323 * cancel the lock, but being blocked if using
324 * ->ilookup5()), because master inode state is
325 * NEW.
326 */
327 master_inode = ilookup5_nowait(inode->i_sb,
328 hash,
329 ll_test_inode_by_fid,
330 (void *)&lli->lli_pfid);
331 if (master_inode) {
295 ll_invalidate_negative_children(master_inode); 332 ll_invalidate_negative_children(master_inode);
296 iput(master_inode); 333 iput(master_inode);
297 } 334 }
@@ -535,6 +572,10 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
535 } 572 }
536 } 573 }
537 574
575 if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
576 dentry->d_sb->s_flags & MS_RDONLY)
577 return ERR_PTR(-EROFS);
578
538 if (it->it_op & IT_CREAT) 579 if (it->it_op & IT_CREAT)
539 opc = LUSTRE_OPC_CREATE; 580 opc = LUSTRE_OPC_CREATE;
540 else 581 else
@@ -801,7 +842,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry,
801 return PTR_ERR(inode); 842 return PTR_ERR(inode);
802 843
803 d_instantiate(dentry, inode); 844 d_instantiate(dentry, inode);
804 return 0; 845
846 return ll_init_security(dentry, inode, dir);
805} 847}
806 848
807void ll_update_times(struct ptlrpc_request *request, struct inode *inode) 849void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
@@ -896,6 +938,8 @@ again:
896 goto err_exit; 938 goto err_exit;
897 939
898 d_instantiate(dentry, inode); 940 d_instantiate(dentry, inode);
941
942 err = ll_init_security(dentry, inode, dir);
899err_exit: 943err_exit:
900 if (request) 944 if (request)
901 ptlrpc_req_finished(request); 945 ptlrpc_req_finished(request);
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 76a6836cdf70..f10e092979fe 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -181,90 +181,73 @@ void ll_ras_enter(struct file *f)
181 spin_unlock(&ras->ras_lock); 181 spin_unlock(&ras->ras_lock);
182} 182}
183 183
184static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, 184/**
185 struct cl_page_list *queue, struct cl_page *page, 185 * Initiates read-ahead of a page with given index.
186 struct cl_object *clob, pgoff_t *max_index) 186 *
187 * \retval +ve: page was already uptodate so it will be skipped
188 * from being added;
189 * \retval -ve: page wasn't added to \a queue for error;
190 * \retval 0: page was added into \a queue for read ahead.
191 */
192static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
193 struct cl_page_list *queue, pgoff_t index)
187{ 194{
188 struct page *vmpage = page->cp_vmpage; 195 enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
196 struct cl_object *clob = io->ci_obj;
197 struct inode *inode = vvp_object_inode(clob);
198 const char *msg = NULL;
199 struct cl_page *page;
189 struct vvp_page *vpg; 200 struct vvp_page *vpg;
190 int rc; 201 struct page *vmpage;
202 int rc = 0;
203
204 vmpage = grab_cache_page_nowait(inode->i_mapping, index);
205 if (!vmpage) {
206 which = RA_STAT_FAILED_GRAB_PAGE;
207 msg = "g_c_p_n failed";
208 rc = -EBUSY;
209 goto out;
210 }
211
212 /* Check if vmpage was truncated or reclaimed */
213 if (vmpage->mapping != inode->i_mapping) {
214 which = RA_STAT_WRONG_GRAB_PAGE;
215 msg = "g_c_p_n returned invalid page";
216 rc = -EBUSY;
217 goto out;
218 }
219
220 page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
221 if (IS_ERR(page)) {
222 which = RA_STAT_FAILED_GRAB_PAGE;
223 msg = "cl_page_find failed";
224 rc = PTR_ERR(page);
225 goto out;
226 }
191 227
192 rc = 0;
193 cl_page_assume(env, io, page);
194 lu_ref_add(&page->cp_reference, "ra", current); 228 lu_ref_add(&page->cp_reference, "ra", current);
229 cl_page_assume(env, io, page);
195 vpg = cl2vvp_page(cl_object_page_slice(clob, page)); 230 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
196 if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) { 231 if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
197 CDEBUG(D_READA, "page index %lu, max_index: %lu\n", 232 vpg->vpg_defer_uptodate = 1;
198 vvp_index(vpg), *max_index); 233 vpg->vpg_ra_used = 0;
199 if (*max_index == 0 || vvp_index(vpg) > *max_index) 234 cl_page_list_add(queue, page);
200 rc = cl_page_is_under_lock(env, io, page, max_index);
201 if (rc == 0) {
202 vpg->vpg_defer_uptodate = 1;
203 vpg->vpg_ra_used = 0;
204 cl_page_list_add(queue, page);
205 rc = 1;
206 } else {
207 cl_page_discard(env, io, page);
208 rc = -ENOLCK;
209 }
210 } else { 235 } else {
211 /* skip completed pages */ 236 /* skip completed pages */
212 cl_page_unassume(env, io, page); 237 cl_page_unassume(env, io, page);
238 /* This page is already uptodate, returning a positive number
239 * to tell the callers about this
240 */
241 rc = 1;
213 } 242 }
243
214 lu_ref_del(&page->cp_reference, "ra", current); 244 lu_ref_del(&page->cp_reference, "ra", current);
215 cl_page_put(env, page); 245 cl_page_put(env, page);
216 return rc; 246out:
217}
218
219/**
220 * Initiates read-ahead of a page with given index.
221 *
222 * \retval +ve: page was added to \a queue.
223 *
224 * \retval -ENOLCK: there is no extent lock for this part of a file, stop
225 * read-ahead.
226 *
227 * \retval -ve, 0: page wasn't added to \a queue for other reason.
228 */
229static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
230 struct cl_page_list *queue,
231 pgoff_t index, pgoff_t *max_index)
232{
233 struct cl_object *clob = io->ci_obj;
234 struct inode *inode = vvp_object_inode(clob);
235 struct page *vmpage;
236 struct cl_page *page;
237 enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
238 int rc = 0;
239 const char *msg = NULL;
240
241 vmpage = grab_cache_page_nowait(inode->i_mapping, index);
242 if (vmpage) { 247 if (vmpage) {
243 /* Check if vmpage was truncated or reclaimed */ 248 if (rc)
244 if (vmpage->mapping == inode->i_mapping) {
245 page = cl_page_find(env, clob, vmpage->index,
246 vmpage, CPT_CACHEABLE);
247 if (!IS_ERR(page)) {
248 rc = cl_read_ahead_page(env, io, queue,
249 page, clob, max_index);
250 if (rc == -ENOLCK) {
251 which = RA_STAT_FAILED_MATCH;
252 msg = "lock match failed";
253 }
254 } else {
255 which = RA_STAT_FAILED_GRAB_PAGE;
256 msg = "cl_page_find failed";
257 }
258 } else {
259 which = RA_STAT_WRONG_GRAB_PAGE;
260 msg = "g_c_p_n returned invalid page";
261 }
262 if (rc != 1)
263 unlock_page(vmpage); 249 unlock_page(vmpage);
264 put_page(vmpage); 250 put_page(vmpage);
265 } else {
266 which = RA_STAT_FAILED_GRAB_PAGE;
267 msg = "g_c_p_n failed";
268 } 251 }
269 if (msg) { 252 if (msg) {
270 ll_ra_stats_inc(inode, which); 253 ll_ra_stats_inc(inode, which);
@@ -379,12 +362,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
379 struct cl_io *io, struct cl_page_list *queue, 362 struct cl_io *io, struct cl_page_list *queue,
380 struct ra_io_arg *ria, 363 struct ra_io_arg *ria,
381 unsigned long *reserved_pages, 364 unsigned long *reserved_pages,
382 unsigned long *ra_end) 365 pgoff_t *ra_end)
383{ 366{
367 struct cl_read_ahead ra = { 0 };
384 int rc, count = 0; 368 int rc, count = 0;
385 bool stride_ria; 369 bool stride_ria;
386 pgoff_t page_idx; 370 pgoff_t page_idx;
387 pgoff_t max_index = 0;
388 371
389 LASSERT(ria); 372 LASSERT(ria);
390 RIA_DEBUG(ria); 373 RIA_DEBUG(ria);
@@ -393,14 +376,23 @@ static int ll_read_ahead_pages(const struct lu_env *env,
393 for (page_idx = ria->ria_start; 376 for (page_idx = ria->ria_start;
394 page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { 377 page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
395 if (ras_inside_ra_window(page_idx, ria)) { 378 if (ras_inside_ra_window(page_idx, ria)) {
379 if (!ra.cra_end || ra.cra_end < page_idx) {
380 cl_read_ahead_release(env, &ra);
381
382 rc = cl_io_read_ahead(env, io, page_idx, &ra);
383 if (rc < 0)
384 break;
385
386 LASSERTF(ra.cra_end >= page_idx,
387 "object: %p, indcies %lu / %lu\n",
388 io->ci_obj, ra.cra_end, page_idx);
389 }
390
396 /* If the page is inside the read-ahead window*/ 391 /* If the page is inside the read-ahead window*/
397 rc = ll_read_ahead_page(env, io, queue, 392 rc = ll_read_ahead_page(env, io, queue, page_idx);
398 page_idx, &max_index); 393 if (!rc) {
399 if (rc == 1) {
400 (*reserved_pages)--; 394 (*reserved_pages)--;
401 count++; 395 count++;
402 } else if (rc == -ENOLCK) {
403 break;
404 } 396 }
405 } else if (stride_ria) { 397 } else if (stride_ria) {
406 /* If it is not in the read-ahead window, and it is 398 /* If it is not in the read-ahead window, and it is
@@ -426,19 +418,21 @@ static int ll_read_ahead_pages(const struct lu_env *env,
426 } 418 }
427 } 419 }
428 } 420 }
421 cl_read_ahead_release(env, &ra);
422
429 *ra_end = page_idx; 423 *ra_end = page_idx;
430 return count; 424 return count;
431} 425}
432 426
433int ll_readahead(const struct lu_env *env, struct cl_io *io, 427static int ll_readahead(const struct lu_env *env, struct cl_io *io,
434 struct cl_page_list *queue, struct ll_readahead_state *ras, 428 struct cl_page_list *queue,
435 bool hit) 429 struct ll_readahead_state *ras, bool hit)
436{ 430{
437 struct vvp_io *vio = vvp_env_io(env); 431 struct vvp_io *vio = vvp_env_io(env);
438 struct ll_thread_info *lti = ll_env_info(env); 432 struct ll_thread_info *lti = ll_env_info(env);
439 struct cl_attr *attr = vvp_env_thread_attr(env); 433 struct cl_attr *attr = vvp_env_thread_attr(env);
440 unsigned long start = 0, end = 0, reserved; 434 unsigned long len, mlen = 0, reserved;
441 unsigned long ra_end, len, mlen = 0; 435 pgoff_t ra_end, start = 0, end = 0;
442 struct inode *inode; 436 struct inode *inode;
443 struct ra_io_arg *ria = &lti->lti_ria; 437 struct ra_io_arg *ria = &lti->lti_ria;
444 struct cl_object *clob; 438 struct cl_object *clob;
@@ -464,30 +458,25 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
464 458
465 spin_lock(&ras->ras_lock); 459 spin_lock(&ras->ras_lock);
466 460
467 /* Enlarge the RA window to encompass the full read */ 461 /**
468 if (vio->vui_ra_valid && 462 * Note: other thread might rollback the ras_next_readahead,
469 ras->ras_window_start + ras->ras_window_len < 463 * if it can not get the full size of prepared pages, see the
470 vio->vui_ra_start + vio->vui_ra_count) { 464 * end of this function. For stride read ahead, it needs to
471 ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count - 465 * make sure the offset is no less than ras_stride_offset,
472 ras->ras_window_start; 466 * so that stride read ahead can work correctly.
473 } 467 */
468 if (stride_io_mode(ras))
469 start = max(ras->ras_next_readahead, ras->ras_stride_offset);
470 else
471 start = ras->ras_next_readahead;
474 472
475 /* Reserve a part of the read-ahead window that we'll be issuing */ 473 if (ras->ras_window_len > 0)
476 if (ras->ras_window_len > 0) {
477 /*
478 * Note: other thread might rollback the ras_next_readahead,
479 * if it can not get the full size of prepared pages, see the
480 * end of this function. For stride read ahead, it needs to
481 * make sure the offset is no less than ras_stride_offset,
482 * so that stride read ahead can work correctly.
483 */
484 if (stride_io_mode(ras))
485 start = max(ras->ras_next_readahead,
486 ras->ras_stride_offset);
487 else
488 start = ras->ras_next_readahead;
489 end = ras->ras_window_start + ras->ras_window_len - 1; 474 end = ras->ras_window_start + ras->ras_window_len - 1;
490 } 475
476 /* Enlarge the RA window to encompass the full read */
477 if (vio->vui_ra_valid &&
478 end < vio->vui_ra_start + vio->vui_ra_count - 1)
479 end = vio->vui_ra_start + vio->vui_ra_count - 1;
491 480
492 if (end != 0) { 481 if (end != 0) {
493 unsigned long rpc_boundary; 482 unsigned long rpc_boundary;
@@ -576,8 +565,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
576 * if the region we failed to issue read-ahead on is still ahead 565 * if the region we failed to issue read-ahead on is still ahead
577 * of the app and behind the next index to start read-ahead from 566 * of the app and behind the next index to start read-ahead from
578 */ 567 */
579 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n", 568 CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n",
580 ra_end, end, ria->ria_end); 569 ra_end, end, ria->ria_end, ret);
581 570
582 if (ra_end != end + 1) { 571 if (ra_end != end + 1) {
583 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); 572 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
@@ -609,7 +598,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
609 ras->ras_consecutive_pages = 0; 598 ras->ras_consecutive_pages = 0;
610 ras->ras_window_len = 0; 599 ras->ras_window_len = 0;
611 ras_set_start(inode, ras, index); 600 ras_set_start(inode, ras, index);
612 ras->ras_next_readahead = max(ras->ras_window_start, index); 601 ras->ras_next_readahead = max(ras->ras_window_start, index + 1);
613 602
614 RAS_CDEBUG(ras); 603 RAS_CDEBUG(ras);
615} 604}
@@ -738,12 +727,13 @@ static void ras_increase_window(struct inode *inode,
738 ra->ra_max_pages_per_file); 727 ra->ra_max_pages_per_file);
739} 728}
740 729
741void ras_update(struct ll_sb_info *sbi, struct inode *inode, 730static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
742 struct ll_readahead_state *ras, unsigned long index, 731 struct ll_readahead_state *ras, unsigned long index,
743 unsigned hit) 732 enum ras_update_flags flags)
744{ 733{
745 struct ll_ra_info *ra = &sbi->ll_ra_info; 734 struct ll_ra_info *ra = &sbi->ll_ra_info;
746 int zero = 0, stride_detect = 0, ra_miss = 0; 735 int zero = 0, stride_detect = 0, ra_miss = 0;
736 bool hit = flags & LL_RAS_HIT;
747 737
748 spin_lock(&ras->ras_lock); 738 spin_lock(&ras->ras_lock);
749 739
@@ -773,7 +763,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
773 * to for subsequent IO. The mmap case does not increment 763 * to for subsequent IO. The mmap case does not increment
774 * ras_requests and thus can never trigger this behavior. 764 * ras_requests and thus can never trigger this behavior.
775 */ 765 */
776 if (ras->ras_requests == 2 && !ras->ras_request_index) { 766 if (ras->ras_requests >= 2 && !ras->ras_request_index) {
777 __u64 kms_pages; 767 __u64 kms_pages;
778 768
779 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> 769 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
@@ -785,8 +775,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
785 if (kms_pages && 775 if (kms_pages &&
786 kms_pages <= ra->ra_max_read_ahead_whole_pages) { 776 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
787 ras->ras_window_start = 0; 777 ras->ras_window_start = 0;
788 ras->ras_last_readpage = 0; 778 ras->ras_next_readahead = index + 1;
789 ras->ras_next_readahead = 0;
790 ras->ras_window_len = min(ra->ra_max_pages_per_file, 779 ras->ras_window_len = min(ra->ra_max_pages_per_file,
791 ra->ra_max_read_ahead_whole_pages); 780 ra->ra_max_read_ahead_whole_pages);
792 goto out_unlock; 781 goto out_unlock;
@@ -816,13 +805,20 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
816 if (ra_miss) { 805 if (ra_miss) {
817 if (index_in_stride_window(ras, index) && 806 if (index_in_stride_window(ras, index) &&
818 stride_io_mode(ras)) { 807 stride_io_mode(ras)) {
819 /*If stride-RA hit cache miss, the stride dector
820 *will not be reset to avoid the overhead of
821 *redetecting read-ahead mode
822 */
823 if (index != ras->ras_last_readpage + 1) 808 if (index != ras->ras_last_readpage + 1)
824 ras->ras_consecutive_pages = 0; 809 ras->ras_consecutive_pages = 0;
825 ras_reset(inode, ras, index); 810 ras_reset(inode, ras, index);
811
812 /* If stride-RA hit cache miss, the stride
813 * detector will not be reset to avoid the
814 * overhead of redetecting read-ahead mode,
815 * but on the condition that the stride window
816 * is still intersect with normal sequential
817 * read-ahead window.
818 */
819 if (ras->ras_window_start <
820 ras->ras_stride_offset)
821 ras_stride_reset(ras);
826 RAS_CDEBUG(ras); 822 RAS_CDEBUG(ras);
827 } else { 823 } else {
828 /* Reset both stride window and normal RA 824 /* Reset both stride window and normal RA
@@ -867,8 +863,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
867 /* Trigger RA in the mmap case where ras_consecutive_requests 863 /* Trigger RA in the mmap case where ras_consecutive_requests
868 * is not incremented and thus can't be used to trigger RA 864 * is not incremented and thus can't be used to trigger RA
869 */ 865 */
870 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { 866 if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) {
871 ras->ras_window_len = RAS_INCREASE_STEP(inode); 867 ras_increase_window(inode, ras, ra);
868 /*
869 * reset consecutive pages so that the readahead window can
870 * grow gradually.
871 */
872 ras->ras_consecutive_pages = 0;
872 goto out_unlock; 873 goto out_unlock;
873 } 874 }
874 875
@@ -903,17 +904,17 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
903 struct cl_io *io; 904 struct cl_io *io;
904 struct cl_page *page; 905 struct cl_page *page;
905 struct cl_object *clob; 906 struct cl_object *clob;
906 struct cl_env_nest nest;
907 bool redirtied = false; 907 bool redirtied = false;
908 bool unlocked = false; 908 bool unlocked = false;
909 int result; 909 int result;
910 int refcheck;
910 911
911 LASSERT(PageLocked(vmpage)); 912 LASSERT(PageLocked(vmpage));
912 LASSERT(!PageWriteback(vmpage)); 913 LASSERT(!PageWriteback(vmpage));
913 914
914 LASSERT(ll_i2dtexp(inode)); 915 LASSERT(ll_i2dtexp(inode));
915 916
916 env = cl_env_nested_get(&nest); 917 env = cl_env_get(&refcheck);
917 if (IS_ERR(env)) { 918 if (IS_ERR(env)) {
918 result = PTR_ERR(env); 919 result = PTR_ERR(env);
919 goto out; 920 goto out;
@@ -978,7 +979,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
978 } 979 }
979 } 980 }
980 981
981 cl_env_nested_put(&nest, env); 982 cl_env_put(env, &refcheck);
982 goto out; 983 goto out;
983 984
984out: 985out:
@@ -1088,6 +1089,63 @@ void ll_cl_remove(struct file *file, const struct lu_env *env)
1088 write_unlock(&fd->fd_lock); 1089 write_unlock(&fd->fd_lock);
1089} 1090}
1090 1091
1092static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
1093 struct cl_page *page)
1094{
1095 struct inode *inode = vvp_object_inode(page->cp_obj);
1096 struct ll_file_data *fd = vvp_env_io(env)->vui_fd;
1097 struct ll_readahead_state *ras = &fd->fd_ras;
1098 struct cl_2queue *queue = &io->ci_queue;
1099 struct ll_sb_info *sbi = ll_i2sbi(inode);
1100 struct vvp_page *vpg;
1101 int rc = 0;
1102
1103 vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
1104 if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1105 sbi->ll_ra_info.ra_max_pages > 0) {
1106 struct vvp_io *vio = vvp_env_io(env);
1107 enum ras_update_flags flags = 0;
1108
1109 if (vpg->vpg_defer_uptodate)
1110 flags |= LL_RAS_HIT;
1111 if (!vio->vui_ra_valid)
1112 flags |= LL_RAS_MMAP;
1113 ras_update(sbi, inode, ras, vvp_index(vpg), flags);
1114 }
1115
1116 if (vpg->vpg_defer_uptodate) {
1117 vpg->vpg_ra_used = 1;
1118 cl_page_export(env, page, 1);
1119 }
1120
1121 cl_2queue_init(queue);
1122 /*
1123 * Add page into the queue even when it is marked uptodate above.
1124 * this will unlock it automatically as part of cl_page_list_disown().
1125 */
1126 cl_page_list_add(&queue->c2_qin, page);
1127 if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1128 sbi->ll_ra_info.ra_max_pages > 0) {
1129 int rc2;
1130
1131 rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
1132 vpg->vpg_defer_uptodate);
1133 CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
1134 PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
1135 }
1136
1137 if (queue->c2_qin.pl_nr > 0)
1138 rc = cl_io_submit_rw(env, io, CRT_READ, queue);
1139
1140 /*
1141 * Unlock unsent pages in case of error.
1142 */
1143 cl_page_list_disown(env, io, &queue->c2_qin);
1144 cl_2queue_fini(env, queue);
1145
1146 return rc;
1147}
1148
1091int ll_readpage(struct file *file, struct page *vmpage) 1149int ll_readpage(struct file *file, struct page *vmpage)
1092{ 1150{
1093 struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob; 1151 struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
@@ -1111,7 +1169,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
1111 LASSERT(page->cp_type == CPT_CACHEABLE); 1169 LASSERT(page->cp_type == CPT_CACHEABLE);
1112 if (likely(!PageUptodate(vmpage))) { 1170 if (likely(!PageUptodate(vmpage))) {
1113 cl_page_assume(env, io, page); 1171 cl_page_assume(env, io, page);
1114 result = cl_io_read_page(env, io, page); 1172 result = ll_io_read_page(env, io, page);
1115 } else { 1173 } else {
1116 /* Page from a non-object file. */ 1174 /* Page from a non-object file. */
1117 unlock_page(vmpage); 1175 unlock_page(vmpage);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37873a7..21e06e5b514e 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -71,8 +71,6 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
71 struct cl_page *page; 71 struct cl_page *page;
72 struct cl_object *obj; 72 struct cl_object *obj;
73 73
74 int refcheck;
75
76 LASSERT(PageLocked(vmpage)); 74 LASSERT(PageLocked(vmpage));
77 LASSERT(!PageWriteback(vmpage)); 75 LASSERT(!PageWriteback(vmpage));
78 76
@@ -82,28 +80,27 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
82 * happening with locked page too 80 * happening with locked page too
83 */ 81 */
84 if (offset == 0 && length == PAGE_SIZE) { 82 if (offset == 0 && length == PAGE_SIZE) {
85 env = cl_env_get(&refcheck); 83 /* See the comment in ll_releasepage() */
86 if (!IS_ERR(env)) { 84 env = cl_env_percpu_get();
87 inode = vmpage->mapping->host; 85 LASSERT(!IS_ERR(env));
88 obj = ll_i2info(inode)->lli_clob; 86 inode = vmpage->mapping->host;
89 if (obj) { 87 obj = ll_i2info(inode)->lli_clob;
90 page = cl_vmpage_page(vmpage, obj); 88 if (obj) {
91 if (page) { 89 page = cl_vmpage_page(vmpage, obj);
92 cl_page_delete(env, page); 90 if (page) {
93 cl_page_put(env, page); 91 cl_page_delete(env, page);
94 } 92 cl_page_put(env, page);
95 } else {
96 LASSERT(vmpage->private == 0);
97 } 93 }
98 cl_env_put(env, &refcheck); 94 } else {
95 LASSERT(vmpage->private == 0);
99 } 96 }
97 cl_env_percpu_put(env);
100 } 98 }
101} 99}
102 100
103static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) 101static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
104{ 102{
105 struct lu_env *env; 103 struct lu_env *env;
106 void *cookie;
107 struct cl_object *obj; 104 struct cl_object *obj;
108 struct cl_page *page; 105 struct cl_page *page;
109 struct address_space *mapping; 106 struct address_space *mapping;
@@ -129,7 +126,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
129 if (!page) 126 if (!page)
130 return 1; 127 return 1;
131 128
132 cookie = cl_env_reenter();
133 env = cl_env_percpu_get(); 129 env = cl_env_percpu_get();
134 LASSERT(!IS_ERR(env)); 130 LASSERT(!IS_ERR(env));
135 131
@@ -155,7 +151,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
155 cl_page_put(env, page); 151 cl_page_put(env, page);
156 152
157 cl_env_percpu_put(env); 153 cl_env_percpu_put(env);
158 cl_env_reexit(cookie);
159 return result; 154 return result;
160} 155}
161 156
@@ -340,19 +335,15 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
340 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) 335 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
341static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter) 336static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
342{ 337{
343 struct lu_env *env; 338 struct ll_cl_context *lcc;
339 const struct lu_env *env;
344 struct cl_io *io; 340 struct cl_io *io;
345 struct file *file = iocb->ki_filp; 341 struct file *file = iocb->ki_filp;
346 struct inode *inode = file->f_mapping->host; 342 struct inode *inode = file->f_mapping->host;
347 loff_t file_offset = iocb->ki_pos; 343 loff_t file_offset = iocb->ki_pos;
348 ssize_t count = iov_iter_count(iter); 344 ssize_t count = iov_iter_count(iter);
349 ssize_t tot_bytes = 0, result = 0; 345 ssize_t tot_bytes = 0, result = 0;
350 struct ll_inode_info *lli = ll_i2info(inode);
351 long size = MAX_DIO_SIZE; 346 long size = MAX_DIO_SIZE;
352 int refcheck;
353
354 if (!lli->lli_has_smd)
355 return -EBADF;
356 347
357 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ 348 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
358 if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK)) 349 if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
@@ -367,9 +358,13 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
367 if (iov_iter_alignment(iter) & ~PAGE_MASK) 358 if (iov_iter_alignment(iter) & ~PAGE_MASK)
368 return -EINVAL; 359 return -EINVAL;
369 360
370 env = cl_env_get(&refcheck); 361 lcc = ll_cl_find(file);
362 if (!lcc)
363 return -EIO;
364
365 env = lcc->lcc_env;
371 LASSERT(!IS_ERR(env)); 366 LASSERT(!IS_ERR(env));
372 io = vvp_env_io(env)->vui_cl.cis_io; 367 io = lcc->lcc_io;
373 LASSERT(io); 368 LASSERT(io);
374 369
375 while (iov_iter_count(iter)) { 370 while (iov_iter_count(iter)) {
@@ -426,7 +421,6 @@ out:
426 vio->u.write.vui_written += tot_bytes; 421 vio->u.write.vui_written += tot_bytes;
427 } 422 }
428 423
429 cl_env_put(env, &refcheck);
430 return tot_bytes ? tot_bytes : result; 424 return tot_bytes ? tot_bytes : result;
431} 425}
432 426
@@ -466,13 +460,13 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
466} 460}
467 461
468static int ll_write_begin(struct file *file, struct address_space *mapping, 462static int ll_write_begin(struct file *file, struct address_space *mapping,
469 loff_t pos, unsigned len, unsigned flags, 463 loff_t pos, unsigned int len, unsigned int flags,
470 struct page **pagep, void **fsdata) 464 struct page **pagep, void **fsdata)
471{ 465{
472 struct ll_cl_context *lcc; 466 struct ll_cl_context *lcc;
473 const struct lu_env *env; 467 const struct lu_env *env = NULL;
474 struct cl_io *io; 468 struct cl_io *io;
475 struct cl_page *page; 469 struct cl_page *page = NULL;
476 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; 470 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
477 pgoff_t index = pos >> PAGE_SHIFT; 471 pgoff_t index = pos >> PAGE_SHIFT;
478 struct page *vmpage = NULL; 472 struct page *vmpage = NULL;
@@ -484,6 +478,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
484 478
485 lcc = ll_cl_find(file); 479 lcc = ll_cl_find(file);
486 if (!lcc) { 480 if (!lcc) {
481 io = NULL;
487 result = -EIO; 482 result = -EIO;
488 goto out; 483 goto out;
489 } 484 }
@@ -560,6 +555,12 @@ out:
560 unlock_page(vmpage); 555 unlock_page(vmpage);
561 put_page(vmpage); 556 put_page(vmpage);
562 } 557 }
558 if (!IS_ERR_OR_NULL(page)) {
559 lu_ref_del(&page->cp_reference, "cl_io", io);
560 cl_page_put(env, page);
561 }
562 if (io)
563 io->ci_result = result;
563 } else { 564 } else {
564 *pagep = vmpage; 565 *pagep = vmpage;
565 *fsdata = lcc; 566 *fsdata = lcc;
@@ -576,7 +577,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
576 struct cl_io *io; 577 struct cl_io *io;
577 struct vvp_io *vio; 578 struct vvp_io *vio;
578 struct cl_page *page; 579 struct cl_page *page;
579 unsigned from = pos & (PAGE_SIZE - 1); 580 unsigned int from = pos & (PAGE_SIZE - 1);
580 bool unplug = false; 581 bool unplug = false;
581 int result = 0; 582 int result = 0;
582 583
@@ -629,6 +630,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
629 file->f_flags & O_SYNC || IS_SYNC(file_inode(file))) 630 file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
630 result = vvp_io_write_commit(env, io); 631 result = vvp_io_write_commit(env, io);
631 632
633 if (result < 0)
634 io->ci_result = result;
632 return result >= 0 ? copied : result; 635 return result >= 0 ? copied : result;
633} 636}
634 637
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 0677513476ec..4769a2230ae1 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -659,8 +659,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
659 struct ll_inode_info *lli = ll_i2info(dir); 659 struct ll_inode_info *lli = ll_i2info(dir);
660 struct ll_statahead_info *sai = lli->lli_sai; 660 struct ll_statahead_info *sai = lli->lli_sai;
661 struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata; 661 struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
662 wait_queue_head_t *waitq = NULL;
662 __u64 handle = 0; 663 __u64 handle = 0;
663 bool wakeup;
664 664
665 if (it_disposition(it, DISP_LOOKUP_NEG)) 665 if (it_disposition(it, DISP_LOOKUP_NEG))
666 rc = -ENOENT; 666 rc = -ENOENT;
@@ -693,7 +693,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
693 693
694 spin_lock(&lli->lli_sa_lock); 694 spin_lock(&lli->lli_sa_lock);
695 if (rc) { 695 if (rc) {
696 wakeup = __sa_make_ready(sai, entry, rc); 696 if (__sa_make_ready(sai, entry, rc))
697 waitq = &sai->sai_waitq;
697 } else { 698 } else {
698 entry->se_minfo = minfo; 699 entry->se_minfo = minfo;
699 entry->se_req = ptlrpc_request_addref(req); 700 entry->se_req = ptlrpc_request_addref(req);
@@ -704,13 +705,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
704 * with parent's lock held, for example: unlink. 705 * with parent's lock held, for example: unlink.
705 */ 706 */
706 entry->se_handle = handle; 707 entry->se_handle = handle;
707 wakeup = !sa_has_callback(sai); 708 if (!sa_has_callback(sai))
709 waitq = &sai->sai_thread.t_ctl_waitq;
710
708 list_add_tail(&entry->se_list, &sai->sai_interim_entries); 711 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
709 } 712 }
710 sai->sai_replied++; 713 sai->sai_replied++;
711 714
712 if (wakeup) 715 if (waitq)
713 wake_up(&sai->sai_thread.t_ctl_waitq); 716 wake_up(waitq);
714 spin_unlock(&lli->lli_sa_lock); 717 spin_unlock(&lli->lli_sa_lock);
715 718
716 return rc; 719 return rc;
@@ -1397,10 +1400,10 @@ static int revalidate_statahead_dentry(struct inode *dir,
1397 struct dentry **dentryp, 1400 struct dentry **dentryp,
1398 bool unplug) 1401 bool unplug)
1399{ 1402{
1403 struct ll_inode_info *lli = ll_i2info(dir);
1400 struct sa_entry *entry = NULL; 1404 struct sa_entry *entry = NULL;
1401 struct l_wait_info lwi = { 0 }; 1405 struct l_wait_info lwi = { 0 };
1402 struct ll_dentry_data *ldd; 1406 struct ll_dentry_data *ldd;
1403 struct ll_inode_info *lli;
1404 int rc = 0; 1407 int rc = 0;
1405 1408
1406 if ((*dentryp)->d_name.name[0] == '.') { 1409 if ((*dentryp)->d_name.name[0] == '.') {
@@ -1446,7 +1449,9 @@ static int revalidate_statahead_dentry(struct inode *dir,
1446 sa_handle_callback(sai); 1449 sa_handle_callback(sai);
1447 1450
1448 if (!sa_ready(entry)) { 1451 if (!sa_ready(entry)) {
1452 spin_lock(&lli->lli_sa_lock);
1449 sai->sai_index_wait = entry->se_index; 1453 sai->sai_index_wait = entry->se_index;
1454 spin_unlock(&lli->lli_sa_lock);
1450 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL, 1455 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1451 LWI_ON_SIGNAL_NOOP, NULL); 1456 LWI_ON_SIGNAL_NOOP, NULL);
1452 rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi); 1457 rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
@@ -1475,6 +1480,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
1475 1480
1476 alias = ll_splice_alias(inode, *dentryp); 1481 alias = ll_splice_alias(inode, *dentryp);
1477 if (IS_ERR(alias)) { 1482 if (IS_ERR(alias)) {
1483 ll_intent_release(&it);
1478 rc = PTR_ERR(alias); 1484 rc = PTR_ERR(alias);
1479 goto out_unplug; 1485 goto out_unplug;
1480 } 1486 }
@@ -1493,6 +1499,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
1493 *dentryp, 1499 *dentryp,
1494 PFID(ll_inode2fid((*dentryp)->d_inode)), 1500 PFID(ll_inode2fid((*dentryp)->d_inode)),
1495 PFID(ll_inode2fid(inode))); 1501 PFID(ll_inode2fid(inode)));
1502 ll_intent_release(&it);
1496 rc = -ESTALE; 1503 rc = -ESTALE;
1497 goto out_unplug; 1504 goto out_unplug;
1498 } 1505 }
@@ -1512,7 +1519,6 @@ out_unplug:
1512 * dentry_may_statahead(). 1519 * dentry_may_statahead().
1513 */ 1520 */
1514 ldd = ll_d2d(*dentryp); 1521 ldd = ll_d2d(*dentryp);
1515 lli = ll_i2info(dir);
1516 /* ldd can be NULL if llite lookup failed. */ 1522 /* ldd can be NULL if llite lookup failed. */
1517 if (ldd) 1523 if (ldd)
1518 ldd->lld_sa_generation = lli->lli_sa_generation; 1524 ldd->lld_sa_generation = lli->lli_sa_generation;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8aa8ecc09a48..12c129f7e4ad 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -55,7 +55,6 @@
55static struct kmem_cache *ll_thread_kmem; 55static struct kmem_cache *ll_thread_kmem;
56struct kmem_cache *vvp_lock_kmem; 56struct kmem_cache *vvp_lock_kmem;
57struct kmem_cache *vvp_object_kmem; 57struct kmem_cache *vvp_object_kmem;
58struct kmem_cache *vvp_req_kmem;
59static struct kmem_cache *vvp_session_kmem; 58static struct kmem_cache *vvp_session_kmem;
60static struct kmem_cache *vvp_thread_kmem; 59static struct kmem_cache *vvp_thread_kmem;
61 60
@@ -76,11 +75,6 @@ static struct lu_kmem_descr vvp_caches[] = {
76 .ckd_size = sizeof(struct vvp_object), 75 .ckd_size = sizeof(struct vvp_object),
77 }, 76 },
78 { 77 {
79 .ckd_cache = &vvp_req_kmem,
80 .ckd_name = "vvp_req_kmem",
81 .ckd_size = sizeof(struct vvp_req),
82 },
83 {
84 .ckd_cache = &vvp_session_kmem, 78 .ckd_cache = &vvp_session_kmem,
85 .ckd_name = "vvp_session_kmem", 79 .ckd_name = "vvp_session_kmem",
86 .ckd_size = sizeof(struct vvp_session) 80 .ckd_size = sizeof(struct vvp_session)
@@ -177,10 +171,6 @@ static const struct lu_device_operations vvp_lu_ops = {
177 .ldo_object_alloc = vvp_object_alloc 171 .ldo_object_alloc = vvp_object_alloc
178}; 172};
179 173
180static const struct cl_device_operations vvp_cl_ops = {
181 .cdo_req_init = vvp_req_init
182};
183
184static struct lu_device *vvp_device_free(const struct lu_env *env, 174static struct lu_device *vvp_device_free(const struct lu_env *env,
185 struct lu_device *d) 175 struct lu_device *d)
186{ 176{
@@ -213,7 +203,6 @@ static struct lu_device *vvp_device_alloc(const struct lu_env *env,
213 lud = &vdv->vdv_cl.cd_lu_dev; 203 lud = &vdv->vdv_cl.cd_lu_dev;
214 cl_device_init(&vdv->vdv_cl, t); 204 cl_device_init(&vdv->vdv_cl, t);
215 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops; 205 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
216 vdv->vdv_cl.cd_ops = &vvp_cl_ops;
217 206
218 site = kzalloc(sizeof(*site), GFP_NOFS); 207 site = kzalloc(sizeof(*site), GFP_NOFS);
219 if (site) { 208 if (site) {
@@ -332,7 +321,6 @@ int cl_sb_init(struct super_block *sb)
332 cl = cl_type_setup(env, NULL, &vvp_device_type, 321 cl = cl_type_setup(env, NULL, &vvp_device_type,
333 sbi->ll_dt_exp->exp_obd->obd_lu_dev); 322 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
334 if (!IS_ERR(cl)) { 323 if (!IS_ERR(cl)) {
335 cl2vvp_dev(cl)->vdv_sb = sb;
336 sbi->ll_cl = cl; 324 sbi->ll_cl = cl;
337 sbi->ll_site = cl2lu_dev(cl)->ld_site; 325 sbi->ll_site = cl2lu_dev(cl)->ld_site;
338 } 326 }
@@ -521,11 +509,10 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
521 509
522 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); 510 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
523 vmpage = vpg->vpg_page; 511 vmpage = vpg->vpg_page;
524 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [", 512 seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [",
525 0 /* gen */, 513 0 /* gen */,
526 vpg, page, 514 vpg, page,
527 "none", 515 "none",
528 vpg->vpg_write_queued ? "wq" : "- ",
529 vpg->vpg_defer_uptodate ? "du" : "- ", 516 vpg->vpg_defer_uptodate ? "du" : "- ",
530 PageWriteback(vmpage) ? "wb" : "-", 517 PageWriteback(vmpage) ? "wb" : "-",
531 vmpage, PFID(ll_inode2fid(vmpage->mapping->host)), 518 vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 4464ad258387..c60d0414ac25 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -42,9 +42,7 @@
42 42
43enum obd_notify_event; 43enum obd_notify_event;
44struct inode; 44struct inode;
45struct lov_stripe_md;
46struct lustre_md; 45struct lustre_md;
47struct obd_capa;
48struct obd_device; 46struct obd_device;
49struct obd_export; 47struct obd_export;
50struct page; 48struct page;
@@ -122,7 +120,6 @@ extern struct lu_context_key vvp_thread_key;
122 120
123extern struct kmem_cache *vvp_lock_kmem; 121extern struct kmem_cache *vvp_lock_kmem;
124extern struct kmem_cache *vvp_object_kmem; 122extern struct kmem_cache *vvp_object_kmem;
125extern struct kmem_cache *vvp_req_kmem;
126 123
127struct vvp_thread_info { 124struct vvp_thread_info {
128 struct cl_lock vti_lock; 125 struct cl_lock vti_lock;
@@ -195,14 +192,6 @@ struct vvp_object {
195 struct inode *vob_inode; 192 struct inode *vob_inode;
196 193
197 /** 194 /**
198 * A list of dirty pages pending IO in the cache. Used by
199 * SOM. Protected by ll_inode_info::lli_lock.
200 *
201 * \see vvp_page::vpg_pending_linkage
202 */
203 struct list_head vob_pending_list;
204
205 /**
206 * Number of transient pages. This is no longer protected by i_sem, 195 * Number of transient pages. This is no longer protected by i_sem,
207 * and needs to be atomic. This is not actually used for anything, 196 * and needs to be atomic. This is not actually used for anything,
208 * and can probably be removed. 197 * and can probably be removed.
@@ -235,15 +224,7 @@ struct vvp_object {
235struct vvp_page { 224struct vvp_page {
236 struct cl_page_slice vpg_cl; 225 struct cl_page_slice vpg_cl;
237 unsigned int vpg_defer_uptodate:1, 226 unsigned int vpg_defer_uptodate:1,
238 vpg_ra_used:1, 227 vpg_ra_used:1;
239 vpg_write_queued:1;
240 /**
241 * Non-empty iff this page is already counted in
242 * vvp_object::vob_pending_list. This list is only used as a flag,
243 * that is, never iterated through, only checked for list_empty(), but
244 * having a list is useful for debugging.
245 */
246 struct list_head vpg_pending_linkage;
247 /** VM page */ 228 /** VM page */
248 struct page *vpg_page; 229 struct page *vpg_page;
249}; 230};
@@ -260,7 +241,6 @@ static inline pgoff_t vvp_index(struct vvp_page *vvp)
260 241
261struct vvp_device { 242struct vvp_device {
262 struct cl_device vdv_cl; 243 struct cl_device vdv_cl;
263 struct super_block *vdv_sb;
264 struct cl_device *vdv_next; 244 struct cl_device *vdv_next;
265}; 245};
266 246
@@ -268,10 +248,6 @@ struct vvp_lock {
268 struct cl_lock_slice vlk_cl; 248 struct cl_lock_slice vlk_cl;
269}; 249};
270 250
271struct vvp_req {
272 struct cl_req_slice vrq_cl;
273};
274
275void *ccc_key_init(const struct lu_context *ctx, 251void *ccc_key_init(const struct lu_context *ctx,
276 struct lu_context_key *key); 252 struct lu_context_key *key);
277void ccc_key_fini(const struct lu_context *ctx, 253void ccc_key_fini(const struct lu_context *ctx,
@@ -325,21 +301,8 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
325# define CLOBINVRNT(env, clob, expr) \ 301# define CLOBINVRNT(env, clob, expr) \
326 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr))) 302 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
327 303
328/**
329 * New interfaces to get and put lov_stripe_md from lov layer. This violates
330 * layering because lov_stripe_md is supposed to be a private data in lov.
331 *
332 * NB: If you find you have to use these interfaces for your new code, please
333 * think about it again. These interfaces may be removed in the future for
334 * better layering.
335 */
336struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
337void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
338int lov_read_and_clear_async_rc(struct cl_object *clob); 304int lov_read_and_clear_async_rc(struct cl_object *clob);
339 305
340struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
341void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
342
343int vvp_io_init(const struct lu_env *env, struct cl_object *obj, 306int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
344 struct cl_io *io); 307 struct cl_io *io);
345int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); 308int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -347,8 +310,6 @@ int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
347 struct cl_lock *lock, const struct cl_io *io); 310 struct cl_lock *lock, const struct cl_io *io);
348int vvp_page_init(const struct lu_env *env, struct cl_object *obj, 311int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
349 struct cl_page *page, pgoff_t index); 312 struct cl_page *page, pgoff_t index);
350int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
351 struct cl_req *req);
352struct lu_object *vvp_object_alloc(const struct lu_env *env, 313struct lu_object *vvp_object_alloc(const struct lu_env *env,
353 const struct lu_object_header *hdr, 314 const struct lu_object_header *hdr,
354 struct lu_device *dev); 315 struct lu_device *dev);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 2b7f182a15e2..0b6d388d8aa4 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -72,9 +72,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 /* don't need lock here to check lli_layout_gen as we have held 72 /* don't need lock here to check lli_layout_gen as we have held
73 * extent lock and GROUP lock has to hold to swap layout 73 * extent lock and GROUP lock has to hold to swap layout
74 */ 74 */
75 if (ll_layout_version_get(lli) != vio->vui_layout_gen) { 75 if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
76 OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
76 io->ci_need_restart = 1; 77 io->ci_need_restart = 1;
77 /* this will return application a short read/write */ 78 /* this will cause a short read/write */
78 io->ci_continue = 0; 79 io->ci_continue = 0;
79 rc = false; 80 rc = false;
80 } 81 }
@@ -328,8 +329,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
328 vio->vui_layout_gen, gen); 329 vio->vui_layout_gen, gen);
329 /* today successful restore is the only possible case */ 330 /* today successful restore is the only possible case */
330 /* restore was done, clear restoring state */ 331 /* restore was done, clear restoring state */
331 ll_i2info(vvp_object_inode(obj))->lli_flags &= 332 clear_bit(LLIF_FILE_RESTORING,
332 ~LLIF_FILE_RESTORING; 333 &ll_i2info(inode)->lli_flags);
333 } 334 }
334 } 335 }
335} 336}
@@ -369,7 +370,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
369 struct mm_struct *mm = current->mm; 370 struct mm_struct *mm = current->mm;
370 struct vm_area_struct *vma; 371 struct vm_area_struct *vma;
371 struct cl_lock_descr *descr = &cti->vti_descr; 372 struct cl_lock_descr *descr = &cti->vti_descr;
372 ldlm_policy_data_t policy; 373 union ldlm_policy_data policy;
373 unsigned long addr; 374 unsigned long addr;
374 ssize_t count; 375 ssize_t count;
375 int result = 0; 376 int result = 0;
@@ -450,7 +451,8 @@ static void vvp_io_advance(const struct lu_env *env,
450 struct vvp_io *vio = cl2vvp_io(env, ios); 451 struct vvp_io *vio = cl2vvp_io(env, ios);
451 CLOBINVRNT(env, obj, vvp_object_invariant(obj)); 452 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
452 453
453 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob); 454 vio->vui_tot_count -= nob;
455 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
454} 456}
455 457
456static void vvp_io_update_iov(const struct lu_env *env, 458static void vvp_io_update_iov(const struct lu_env *env,
@@ -551,9 +553,16 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
551 if (new_size == 0) 553 if (new_size == 0)
552 enqflags = CEF_DISCARD_DATA; 554 enqflags = CEF_DISCARD_DATA;
553 } else { 555 } else {
554 if ((io->u.ci_setattr.sa_attr.lvb_mtime >= 556 unsigned int valid = io->u.ci_setattr.sa_valid;
555 io->u.ci_setattr.sa_attr.lvb_ctime) || 557
556 (io->u.ci_setattr.sa_attr.lvb_atime >= 558 if (!(valid & TIMES_SET_FLAGS))
559 return 0;
560
561 if ((!(valid & ATTR_MTIME) ||
562 io->u.ci_setattr.sa_attr.lvb_mtime >=
563 io->u.ci_setattr.sa_attr.lvb_ctime) &&
564 (!(valid & ATTR_ATIME) ||
565 io->u.ci_setattr.sa_attr.lvb_atime >=
557 io->u.ci_setattr.sa_attr.lvb_ctime)) 566 io->u.ci_setattr.sa_attr.lvb_ctime))
558 return 0; 567 return 0;
559 new_size = 0; 568 new_size = 0;
@@ -580,14 +589,6 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
580 return result; 589 return result;
581} 590}
582 591
583static int vvp_io_setattr_trunc(const struct lu_env *env,
584 const struct cl_io_slice *ios,
585 struct inode *inode, loff_t size)
586{
587 inode_dio_wait(inode);
588 return 0;
589}
590
591static int vvp_io_setattr_time(const struct lu_env *env, 592static int vvp_io_setattr_time(const struct lu_env *env,
592 const struct cl_io_slice *ios) 593 const struct cl_io_slice *ios)
593{ 594{
@@ -618,15 +619,20 @@ static int vvp_io_setattr_start(const struct lu_env *env,
618{ 619{
619 struct cl_io *io = ios->cis_io; 620 struct cl_io *io = ios->cis_io;
620 struct inode *inode = vvp_object_inode(io->ci_obj); 621 struct inode *inode = vvp_object_inode(io->ci_obj);
621 int result = 0; 622 struct ll_inode_info *lli = ll_i2info(inode);
622 623
623 inode_lock(inode); 624 if (cl_io_is_trunc(io)) {
624 if (cl_io_is_trunc(io)) 625 down_write(&lli->lli_trunc_sem);
625 result = vvp_io_setattr_trunc(env, ios, inode, 626 inode_lock(inode);
626 io->u.ci_setattr.sa_attr.lvb_size); 627 inode_dio_wait(inode);
627 if (result == 0) 628 } else {
628 result = vvp_io_setattr_time(env, ios); 629 inode_lock(inode);
629 return result; 630 }
631
632 if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
633 return vvp_io_setattr_time(env, ios);
634
635 return 0;
630} 636}
631 637
632static void vvp_io_setattr_end(const struct lu_env *env, 638static void vvp_io_setattr_end(const struct lu_env *env,
@@ -634,14 +640,18 @@ static void vvp_io_setattr_end(const struct lu_env *env,
634{ 640{
635 struct cl_io *io = ios->cis_io; 641 struct cl_io *io = ios->cis_io;
636 struct inode *inode = vvp_object_inode(io->ci_obj); 642 struct inode *inode = vvp_object_inode(io->ci_obj);
643 struct ll_inode_info *lli = ll_i2info(inode);
637 644
638 if (cl_io_is_trunc(io)) 645 if (cl_io_is_trunc(io)) {
639 /* Truncate in memory pages - they must be clean pages 646 /* Truncate in memory pages - they must be clean pages
640 * because osc has already notified to destroy osc_extents. 647 * because osc has already notified to destroy osc_extents.
641 */ 648 */
642 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); 649 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
643 650 inode_unlock(inode);
644 inode_unlock(inode); 651 up_write(&lli->lli_trunc_sem);
652 } else {
653 inode_unlock(inode);
654 }
645} 655}
646 656
647static void vvp_io_setattr_fini(const struct lu_env *env, 657static void vvp_io_setattr_fini(const struct lu_env *env,
@@ -657,6 +667,7 @@ static int vvp_io_read_start(const struct lu_env *env,
657 struct cl_io *io = ios->cis_io; 667 struct cl_io *io = ios->cis_io;
658 struct cl_object *obj = io->ci_obj; 668 struct cl_object *obj = io->ci_obj;
659 struct inode *inode = vvp_object_inode(obj); 669 struct inode *inode = vvp_object_inode(obj);
670 struct ll_inode_info *lli = ll_i2info(inode);
660 struct file *file = vio->vui_fd->fd_file; 671 struct file *file = vio->vui_fd->fd_file;
661 672
662 int result; 673 int result;
@@ -669,6 +680,8 @@ static int vvp_io_read_start(const struct lu_env *env,
669 680
670 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); 681 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
671 682
683 down_read(&lli->lli_trunc_sem);
684
672 if (!can_populate_pages(env, io, inode)) 685 if (!can_populate_pages(env, io, inode))
673 return 0; 686 return 0;
674 687
@@ -770,16 +783,11 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
770static void write_commit_callback(const struct lu_env *env, struct cl_io *io, 783static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
771 struct cl_page *page) 784 struct cl_page *page)
772{ 785{
773 struct vvp_page *vpg;
774 struct page *vmpage = page->cp_vmpage; 786 struct page *vmpage = page->cp_vmpage;
775 struct cl_object *clob = cl_io_top(io)->ci_obj;
776 787
777 SetPageUptodate(vmpage); 788 SetPageUptodate(vmpage);
778 set_page_dirty(vmpage); 789 set_page_dirty(vmpage);
779 790
780 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
781 vvp_write_pending(cl2vvp(clob), vpg);
782
783 cl_page_disown(env, io, page); 791 cl_page_disown(env, io, page);
784 792
785 /* held in ll_cl_init() */ 793 /* held in ll_cl_init() */
@@ -899,10 +907,13 @@ static int vvp_io_write_start(const struct lu_env *env,
899 struct cl_io *io = ios->cis_io; 907 struct cl_io *io = ios->cis_io;
900 struct cl_object *obj = io->ci_obj; 908 struct cl_object *obj = io->ci_obj;
901 struct inode *inode = vvp_object_inode(obj); 909 struct inode *inode = vvp_object_inode(obj);
910 struct ll_inode_info *lli = ll_i2info(inode);
902 ssize_t result = 0; 911 ssize_t result = 0;
903 loff_t pos = io->u.ci_wr.wr.crw_pos; 912 loff_t pos = io->u.ci_wr.wr.crw_pos;
904 size_t cnt = io->u.ci_wr.wr.crw_count; 913 size_t cnt = io->u.ci_wr.wr.crw_count;
905 914
915 down_read(&lli->lli_trunc_sem);
916
906 if (!can_populate_pages(env, io, inode)) 917 if (!can_populate_pages(env, io, inode))
907 return 0; 918 return 0;
908 919
@@ -921,6 +932,20 @@ static int vvp_io_write_start(const struct lu_env *env,
921 932
922 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); 933 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
923 934
935 /*
936 * The maximum Lustre file size is variable, based on the OST maximum
937 * object size and number of stripes. This needs another check in
938 * addition to the VFS checks earlier.
939 */
940 if (pos + cnt > ll_file_maxbytes(inode)) {
941 CDEBUG(D_INODE,
942 "%s: file " DFID " offset %llu > maxbytes %llu\n",
943 ll_get_fsname(inode->i_sb, NULL, 0),
944 PFID(ll_inode2fid(inode)), pos + cnt,
945 ll_file_maxbytes(inode));
946 return -EFBIG;
947 }
948
924 if (!vio->vui_iter) { 949 if (!vio->vui_iter) {
925 /* from a temp io in ll_cl_init(). */ 950 /* from a temp io in ll_cl_init(). */
926 result = 0; 951 result = 0;
@@ -957,11 +982,7 @@ static int vvp_io_write_start(const struct lu_env *env,
957 } 982 }
958 } 983 }
959 if (result > 0) { 984 if (result > 0) {
960 struct ll_inode_info *lli = ll_i2info(inode); 985 set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
961
962 spin_lock(&lli->lli_lock);
963 lli->lli_flags |= LLIF_DATA_MODIFIED;
964 spin_unlock(&lli->lli_lock);
965 986
966 if (result < cnt) 987 if (result < cnt)
967 io->ci_continue = 0; 988 io->ci_continue = 0;
@@ -972,6 +993,15 @@ static int vvp_io_write_start(const struct lu_env *env,
972 return result; 993 return result;
973} 994}
974 995
996static void vvp_io_rw_end(const struct lu_env *env,
997 const struct cl_io_slice *ios)
998{
999 struct inode *inode = vvp_object_inode(ios->cis_obj);
1000 struct ll_inode_info *lli = ll_i2info(inode);
1001
1002 up_read(&lli->lli_trunc_sem);
1003}
1004
975static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) 1005static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
976{ 1006{
977 struct vm_fault *vmf = cfio->ft_vmf; 1007 struct vm_fault *vmf = cfio->ft_vmf;
@@ -1014,13 +1044,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
1014static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, 1044static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
1015 struct cl_page *page) 1045 struct cl_page *page)
1016{ 1046{
1017 struct vvp_page *vpg;
1018 struct cl_object *clob = cl_io_top(io)->ci_obj;
1019
1020 set_page_dirty(page->cp_vmpage); 1047 set_page_dirty(page->cp_vmpage);
1021
1022 vpg = cl2vvp_page(cl_object_page_slice(clob, page));
1023 vvp_write_pending(cl2vvp(clob), vpg);
1024} 1048}
1025 1049
1026static int vvp_io_fault_start(const struct lu_env *env, 1050static int vvp_io_fault_start(const struct lu_env *env,
@@ -1030,6 +1054,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
1030 struct cl_io *io = ios->cis_io; 1054 struct cl_io *io = ios->cis_io;
1031 struct cl_object *obj = io->ci_obj; 1055 struct cl_object *obj = io->ci_obj;
1032 struct inode *inode = vvp_object_inode(obj); 1056 struct inode *inode = vvp_object_inode(obj);
1057 struct ll_inode_info *lli = ll_i2info(inode);
1033 struct cl_fault_io *fio = &io->u.ci_fault; 1058 struct cl_fault_io *fio = &io->u.ci_fault;
1034 struct vvp_fault_io *cfio = &vio->u.fault; 1059 struct vvp_fault_io *cfio = &vio->u.fault;
1035 loff_t offset; 1060 loff_t offset;
@@ -1039,11 +1064,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
1039 loff_t size; 1064 loff_t size;
1040 pgoff_t last_index; 1065 pgoff_t last_index;
1041 1066
1042 if (fio->ft_executable && 1067 down_read(&lli->lli_trunc_sem);
1043 inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
1044 CWARN("binary "DFID
1045 " changed while waiting for the page fault lock\n",
1046 PFID(lu_object_fid(&obj->co_lu)));
1047 1068
1048 /* offset of the last byte on the page */ 1069 /* offset of the last byte on the page */
1049 offset = cl_offset(obj, fio->ft_index + 1) - 1; 1070 offset = cl_offset(obj, fio->ft_index + 1) - 1;
@@ -1192,6 +1213,17 @@ out:
1192 return result; 1213 return result;
1193} 1214}
1194 1215
1216static void vvp_io_fault_end(const struct lu_env *env,
1217 const struct cl_io_slice *ios)
1218{
1219 struct inode *inode = vvp_object_inode(ios->cis_obj);
1220 struct ll_inode_info *lli = ll_i2info(inode);
1221
1222 CLOBINVRNT(env, ios->cis_io->ci_obj,
1223 vvp_object_invariant(ios->cis_io->ci_obj));
1224 up_read(&lli->lli_trunc_sem);
1225}
1226
1195static int vvp_io_fsync_start(const struct lu_env *env, 1227static int vvp_io_fsync_start(const struct lu_env *env,
1196 const struct cl_io_slice *ios) 1228 const struct cl_io_slice *ios)
1197{ 1229{
@@ -1202,46 +1234,23 @@ static int vvp_io_fsync_start(const struct lu_env *env,
1202 return 0; 1234 return 0;
1203} 1235}
1204 1236
1205static int vvp_io_read_page(const struct lu_env *env, 1237static int vvp_io_read_ahead(const struct lu_env *env,
1206 const struct cl_io_slice *ios, 1238 const struct cl_io_slice *ios,
1207 const struct cl_page_slice *slice) 1239 pgoff_t start, struct cl_read_ahead *ra)
1208{ 1240{
1209 struct cl_io *io = ios->cis_io; 1241 int result = 0;
1210 struct vvp_page *vpg = cl2vvp_page(slice);
1211 struct cl_page *page = slice->cpl_page;
1212 struct inode *inode = vvp_object_inode(slice->cpl_obj);
1213 struct ll_sb_info *sbi = ll_i2sbi(inode);
1214 struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
1215 struct ll_readahead_state *ras = &fd->fd_ras;
1216 struct cl_2queue *queue = &io->ci_queue;
1217
1218 if (sbi->ll_ra_info.ra_max_pages_per_file &&
1219 sbi->ll_ra_info.ra_max_pages)
1220 ras_update(sbi, inode, ras, vvp_index(vpg),
1221 vpg->vpg_defer_uptodate);
1222
1223 if (vpg->vpg_defer_uptodate) {
1224 vpg->vpg_ra_used = 1;
1225 cl_page_export(env, page, 1);
1226 }
1227 /*
1228 * Add page into the queue even when it is marked uptodate above.
1229 * this will unlock it automatically as part of cl_page_list_disown().
1230 */
1231 1242
1232 cl_page_list_add(&queue->c2_qin, page); 1243 if (ios->cis_io->ci_type == CIT_READ ||
1233 if (sbi->ll_ra_info.ra_max_pages_per_file && 1244 ios->cis_io->ci_type == CIT_FAULT) {
1234 sbi->ll_ra_info.ra_max_pages) 1245 struct vvp_io *vio = cl2vvp_io(env, ios);
1235 ll_readahead(env, io, &queue->c2_qin, ras,
1236 vpg->vpg_defer_uptodate);
1237 1246
1238 return 0; 1247 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1239} 1248 ra->cra_end = CL_PAGE_EOF;
1249 result = 1; /* no need to call down */
1250 }
1251 }
1240 1252
1241static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios) 1253 return result;
1242{
1243 CLOBINVRNT(env, ios->cis_io->ci_obj,
1244 vvp_object_invariant(ios->cis_io->ci_obj));
1245} 1254}
1246 1255
1247static const struct cl_io_operations vvp_io_ops = { 1256static const struct cl_io_operations vvp_io_ops = {
@@ -1250,6 +1259,7 @@ static const struct cl_io_operations vvp_io_ops = {
1250 .cio_fini = vvp_io_fini, 1259 .cio_fini = vvp_io_fini,
1251 .cio_lock = vvp_io_read_lock, 1260 .cio_lock = vvp_io_read_lock,
1252 .cio_start = vvp_io_read_start, 1261 .cio_start = vvp_io_read_start,
1262 .cio_end = vvp_io_rw_end,
1253 .cio_advance = vvp_io_advance, 1263 .cio_advance = vvp_io_advance,
1254 }, 1264 },
1255 [CIT_WRITE] = { 1265 [CIT_WRITE] = {
@@ -1258,6 +1268,7 @@ static const struct cl_io_operations vvp_io_ops = {
1258 .cio_iter_fini = vvp_io_write_iter_fini, 1268 .cio_iter_fini = vvp_io_write_iter_fini,
1259 .cio_lock = vvp_io_write_lock, 1269 .cio_lock = vvp_io_write_lock,
1260 .cio_start = vvp_io_write_start, 1270 .cio_start = vvp_io_write_start,
1271 .cio_end = vvp_io_rw_end,
1261 .cio_advance = vvp_io_advance, 1272 .cio_advance = vvp_io_advance,
1262 }, 1273 },
1263 [CIT_SETATTR] = { 1274 [CIT_SETATTR] = {
@@ -1272,7 +1283,7 @@ static const struct cl_io_operations vvp_io_ops = {
1272 .cio_iter_init = vvp_io_fault_iter_init, 1283 .cio_iter_init = vvp_io_fault_iter_init,
1273 .cio_lock = vvp_io_fault_lock, 1284 .cio_lock = vvp_io_fault_lock,
1274 .cio_start = vvp_io_fault_start, 1285 .cio_start = vvp_io_fault_start,
1275 .cio_end = vvp_io_end, 1286 .cio_end = vvp_io_fault_end,
1276 }, 1287 },
1277 [CIT_FSYNC] = { 1288 [CIT_FSYNC] = {
1278 .cio_start = vvp_io_fsync_start, 1289 .cio_start = vvp_io_fsync_start,
@@ -1282,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
1282 .cio_fini = vvp_io_fini 1293 .cio_fini = vvp_io_fini
1283 } 1294 }
1284 }, 1295 },
1285 .cio_read_page = vvp_io_read_page, 1296 .cio_read_ahead = vvp_io_read_ahead,
1286}; 1297};
1287 1298
1288int vvp_io_init(const struct lu_env *env, struct cl_object *obj, 1299int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index b57195d15674..8e18cf86cefc 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -65,8 +65,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
65 struct inode *inode = obj->vob_inode; 65 struct inode *inode = obj->vob_inode;
66 struct ll_inode_info *lli; 66 struct ll_inode_info *lli;
67 67
68 (*p)(env, cookie, "(%s %d %d) inode: %p ", 68 (*p)(env, cookie, "(%d %d) inode: %p ",
69 list_empty(&obj->vob_pending_list) ? "-" : "+",
70 atomic_read(&obj->vob_transient_pages), 69 atomic_read(&obj->vob_transient_pages),
71 atomic_read(&obj->vob_mmap_cnt), inode); 70 atomic_read(&obj->vob_mmap_cnt), inode);
72 if (inode) { 71 if (inode) {
@@ -133,7 +132,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
133 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n", 132 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
134 PFID(&lli->lli_fid)); 133 PFID(&lli->lli_fid));
135 134
136 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE); 135 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
137 136
138 /* Clean up page mmap for this inode. 137 /* Clean up page mmap for this inode.
139 * The reason for us to do this is that if the page has 138 * The reason for us to do this is that if the page has
@@ -146,27 +145,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
146 */ 145 */
147 unmap_mapping_range(conf->coc_inode->i_mapping, 146 unmap_mapping_range(conf->coc_inode->i_mapping,
148 0, OBD_OBJECT_EOF, 0); 147 0, OBD_OBJECT_EOF, 0);
149
150 return 0;
151 } 148 }
152 149
153 if (conf->coc_opc != OBJECT_CONF_SET)
154 return 0;
155
156 if (conf->u.coc_md && conf->u.coc_md->lsm) {
157 CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
158 PFID(&lli->lli_fid), lli->lli_layout_gen,
159 conf->u.coc_md->lsm->lsm_layout_gen);
160
161 lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
162 ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
163 } else {
164 CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
165 PFID(&lli->lli_fid), lli->lli_layout_gen);
166
167 lli->lli_has_smd = false;
168 ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
169 }
170 return 0; 150 return 0;
171} 151}
172 152
@@ -204,6 +184,26 @@ static int vvp_object_glimpse(const struct lu_env *env,
204 return 0; 184 return 0;
205} 185}
206 186
187static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
188 struct cl_req_attr *attr)
189{
190 u64 valid_flags = OBD_MD_FLTYPE;
191 struct inode *inode;
192 struct obdo *oa;
193
194 oa = attr->cra_oa;
195 inode = vvp_object_inode(obj);
196
197 if (attr->cra_type == CRT_WRITE)
198 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
199 OBD_MD_FLUID | OBD_MD_FLGID;
200 obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
201 obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
202 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
203 oa->o_parent_oid++;
204 memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
205}
206
207static const struct cl_object_operations vvp_ops = { 207static const struct cl_object_operations vvp_ops = {
208 .coo_page_init = vvp_page_init, 208 .coo_page_init = vvp_page_init,
209 .coo_lock_init = vvp_lock_init, 209 .coo_lock_init = vvp_lock_init,
@@ -212,7 +212,8 @@ static const struct cl_object_operations vvp_ops = {
212 .coo_attr_update = vvp_attr_update, 212 .coo_attr_update = vvp_attr_update,
213 .coo_conf_set = vvp_conf_set, 213 .coo_conf_set = vvp_conf_set,
214 .coo_prune = vvp_prune, 214 .coo_prune = vvp_prune,
215 .coo_glimpse = vvp_object_glimpse 215 .coo_glimpse = vvp_object_glimpse,
216 .coo_req_attr_set = vvp_req_attr_set
216}; 217};
217 218
218static int vvp_object_init0(const struct lu_env *env, 219static int vvp_object_init0(const struct lu_env *env,
@@ -240,7 +241,6 @@ static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
240 const struct cl_object_conf *cconf; 241 const struct cl_object_conf *cconf;
241 242
242 cconf = lu2cl_conf(conf); 243 cconf = lu2cl_conf(conf);
243 INIT_LIST_HEAD(&vob->vob_pending_list);
244 lu_object_add(obj, below); 244 lu_object_add(obj, below);
245 result = vvp_object_init0(env, vob, cconf); 245 result = vvp_object_init0(env, vob, cconf);
246 } else { 246 } else {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 046e84d7a158..23d66308ff20 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -162,13 +162,10 @@ static void vvp_page_delete(const struct lu_env *env,
162 LASSERT((struct cl_page *)vmpage->private == page); 162 LASSERT((struct cl_page *)vmpage->private == page);
163 LASSERT(inode == vvp_object_inode(obj)); 163 LASSERT(inode == vvp_object_inode(obj));
164 164
165 vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
166
167 /* Drop the reference count held in vvp_page_init */ 165 /* Drop the reference count held in vvp_page_init */
168 refc = atomic_dec_return(&page->cp_ref); 166 refc = atomic_dec_return(&page->cp_ref);
169 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc); 167 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
170 168
171 ClearPageUptodate(vmpage);
172 ClearPagePrivate(vmpage); 169 ClearPagePrivate(vmpage);
173 vmpage->private = 0; 170 vmpage->private = 0;
174 /* 171 /*
@@ -221,8 +218,6 @@ static int vvp_page_prep_write(const struct lu_env *env,
221 if (!pg->cp_sync_io) 218 if (!pg->cp_sync_io)
222 set_page_writeback(vmpage); 219 set_page_writeback(vmpage);
223 220
224 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
225
226 return 0; 221 return 0;
227} 222}
228 223
@@ -287,19 +282,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
287 282
288 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); 283 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
289 284
290 /*
291 * TODO: Actually it makes sense to add the page into oap pending
292 * list again and so that we don't need to take the page out from
293 * SoM write pending list, if we just meet a recoverable error,
294 * -ENOMEM, etc.
295 * To implement this, we just need to return a non zero value in
296 * ->cpo_completion method. The underlying transfer should be notified
297 * and then re-add the page into pending transfer queue. -jay
298 */
299
300 vpg->vpg_write_queued = 0;
301 vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
302
303 if (pg->cp_sync_io) { 285 if (pg->cp_sync_io) {
304 LASSERT(PageLocked(vmpage)); 286 LASSERT(PageLocked(vmpage));
305 LASSERT(!PageWriteback(vmpage)); 287 LASSERT(!PageWriteback(vmpage));
@@ -341,7 +323,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
341 LASSERT(pg->cp_state == CPS_CACHED); 323 LASSERT(pg->cp_state == CPS_CACHED);
342 /* This actually clears the dirty bit in the radix tree. */ 324 /* This actually clears the dirty bit in the radix tree. */
343 set_page_writeback(vmpage); 325 set_page_writeback(vmpage);
344 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
345 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); 326 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
346 } else if (pg->cp_state == CPS_PAGEOUT) { 327 } else if (pg->cp_state == CPS_PAGEOUT) {
347 /* is it possible for osc_flush_async_page() to already 328 /* is it possible for osc_flush_async_page() to already
@@ -357,20 +338,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
357 return result; 338 return result;
358} 339}
359 340
360static int vvp_page_is_under_lock(const struct lu_env *env,
361 const struct cl_page_slice *slice,
362 struct cl_io *io, pgoff_t *max_index)
363{
364 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
365 io->ci_type == CIT_FAULT) {
366 struct vvp_io *vio = vvp_env_io(env);
367
368 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
369 *max_index = CL_PAGE_EOF;
370 }
371 return 0;
372}
373
374static int vvp_page_print(const struct lu_env *env, 341static int vvp_page_print(const struct lu_env *env,
375 const struct cl_page_slice *slice, 342 const struct cl_page_slice *slice,
376 void *cookie, lu_printer_t printer) 343 void *cookie, lu_printer_t printer)
@@ -378,9 +345,8 @@ static int vvp_page_print(const struct lu_env *env,
378 struct vvp_page *vpg = cl2vvp_page(slice); 345 struct vvp_page *vpg = cl2vvp_page(slice);
379 struct page *vmpage = vpg->vpg_page; 346 struct page *vmpage = vpg->vpg_page;
380 347
381 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", 348 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ",
382 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, 349 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
383 vpg->vpg_write_queued, vmpage);
384 if (vmpage) { 350 if (vmpage) {
385 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", 351 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
386 (long)vmpage->flags, page_count(vmpage), 352 (long)vmpage->flags, page_count(vmpage),
@@ -416,7 +382,6 @@ static const struct cl_page_operations vvp_page_ops = {
416 .cpo_is_vmlocked = vvp_page_is_vmlocked, 382 .cpo_is_vmlocked = vvp_page_is_vmlocked,
417 .cpo_fini = vvp_page_fini, 383 .cpo_fini = vvp_page_fini,
418 .cpo_print = vvp_page_print, 384 .cpo_print = vvp_page_print,
419 .cpo_is_under_lock = vvp_page_is_under_lock,
420 .io = { 385 .io = {
421 [CRT_READ] = { 386 [CRT_READ] = {
422 .cpo_prep = vvp_page_prep_read, 387 .cpo_prep = vvp_page_prep_read,
@@ -515,7 +480,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
515 .cpo_fini = vvp_transient_page_fini, 480 .cpo_fini = vvp_transient_page_fini,
516 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, 481 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
517 .cpo_print = vvp_page_print, 482 .cpo_print = vvp_page_print,
518 .cpo_is_under_lock = vvp_page_is_under_lock,
519 .io = { 483 .io = {
520 [CRT_READ] = { 484 [CRT_READ] = {
521 .cpo_prep = vvp_transient_page_prep, 485 .cpo_prep = vvp_transient_page_prep,
@@ -539,7 +503,6 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
539 vpg->vpg_page = vmpage; 503 vpg->vpg_page = vmpage;
540 get_page(vmpage); 504 get_page(vmpage);
541 505
542 INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
543 if (page->cp_type == CPT_CACHEABLE) { 506 if (page->cp_type == CPT_CACHEABLE) {
544 /* in cache, decref in vvp_page_delete */ 507 /* in cache, decref in vvp_page_delete */
545 atomic_inc(&page->cp_ref); 508 atomic_inc(&page->cp_ref);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
deleted file mode 100644
index e3f4c790d646..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2014, Intel Corporation.
27 */
28
29#define DEBUG_SUBSYSTEM S_LLITE
30
31#include "../include/lustre/lustre_idl.h"
32#include "../include/cl_object.h"
33#include "../include/obd.h"
34#include "../include/obd_support.h"
35#include "llite_internal.h"
36#include "vvp_internal.h"
37
38static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
39{
40 return container_of0(slice, struct vvp_req, vrq_cl);
41}
42
43/**
44 * Implementation of struct cl_req_operations::cro_attr_set() for VVP
45 * layer. VVP is responsible for
46 *
47 * - o_[mac]time
48 *
49 * - o_mode
50 *
51 * - o_parent_seq
52 *
53 * - o_[ug]id
54 *
55 * - o_parent_oid
56 *
57 * - o_parent_ver
58 *
59 * - o_ioepoch,
60 *
61 */
62static void vvp_req_attr_set(const struct lu_env *env,
63 const struct cl_req_slice *slice,
64 const struct cl_object *obj,
65 struct cl_req_attr *attr, u64 flags)
66{
67 struct inode *inode;
68 struct obdo *oa;
69 u32 valid_flags;
70
71 oa = attr->cra_oa;
72 inode = vvp_object_inode(obj);
73 valid_flags = OBD_MD_FLTYPE;
74
75 if (slice->crs_req->crq_type == CRT_WRITE) {
76 if (flags & OBD_MD_FLEPOCH) {
77 oa->o_valid |= OBD_MD_FLEPOCH;
78 oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
79 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
80 OBD_MD_FLUID | OBD_MD_FLGID;
81 }
82 }
83 obdo_from_inode(oa, inode, valid_flags & flags);
84 obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
85 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
86 oa->o_parent_oid++;
87 memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
88 LUSTRE_JOBID_SIZE);
89}
90
91static void vvp_req_completion(const struct lu_env *env,
92 const struct cl_req_slice *slice, int ioret)
93{
94 struct vvp_req *vrq;
95
96 if (ioret > 0)
97 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
98
99 vrq = cl2vvp_req(slice);
100 kmem_cache_free(vvp_req_kmem, vrq);
101}
102
103static const struct cl_req_operations vvp_req_ops = {
104 .cro_attr_set = vvp_req_attr_set,
105 .cro_completion = vvp_req_completion
106};
107
108int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
109 struct cl_req *req)
110{
111 struct vvp_req *vrq;
112 int result;
113
114 vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
115 if (vrq) {
116 cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
117 result = 0;
118 } else {
119 result = -ENOMEM;
120 }
121 return result;
122}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index e070adb7a3cc..7a848ebc57c1 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -44,48 +44,39 @@
44 44
45#include "llite_internal.h" 45#include "llite_internal.h"
46 46
47static 47const struct xattr_handler *get_xattr_type(const char *name)
48int get_xattr_type(const char *name)
49{ 48{
50 if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)) 49 int i = 0;
51 return XATTR_ACL_ACCESS_T;
52 50
53 if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT)) 51 while (ll_xattr_handlers[i]) {
54 return XATTR_ACL_DEFAULT_T; 52 size_t len = strlen(ll_xattr_handlers[i]->prefix);
55 53
56 if (!strncmp(name, XATTR_USER_PREFIX, 54 if (!strncmp(ll_xattr_handlers[i]->prefix, name, len))
57 sizeof(XATTR_USER_PREFIX) - 1)) 55 return ll_xattr_handlers[i];
58 return XATTR_USER_T; 56 i++;
59 57 }
60 if (!strncmp(name, XATTR_TRUSTED_PREFIX, 58 return NULL;
61 sizeof(XATTR_TRUSTED_PREFIX) - 1))
62 return XATTR_TRUSTED_T;
63
64 if (!strncmp(name, XATTR_SECURITY_PREFIX,
65 sizeof(XATTR_SECURITY_PREFIX) - 1))
66 return XATTR_SECURITY_T;
67
68 if (!strncmp(name, XATTR_LUSTRE_PREFIX,
69 sizeof(XATTR_LUSTRE_PREFIX) - 1))
70 return XATTR_LUSTRE_T;
71
72 return XATTR_OTHER_T;
73} 59}
74 60
75static 61static int xattr_type_filter(struct ll_sb_info *sbi,
76int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type) 62 const struct xattr_handler *handler)
77{ 63{
78 if ((xattr_type == XATTR_ACL_ACCESS_T || 64 /* No handler means XATTR_OTHER_T */
79 xattr_type == XATTR_ACL_DEFAULT_T) && 65 if (!handler)
66 return -EOPNOTSUPP;
67
68 if ((handler->flags == XATTR_ACL_ACCESS_T ||
69 handler->flags == XATTR_ACL_DEFAULT_T) &&
80 !(sbi->ll_flags & LL_SBI_ACL)) 70 !(sbi->ll_flags & LL_SBI_ACL))
81 return -EOPNOTSUPP; 71 return -EOPNOTSUPP;
82 72
83 if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR)) 73 if (handler->flags == XATTR_USER_T &&
74 !(sbi->ll_flags & LL_SBI_USER_XATTR))
84 return -EOPNOTSUPP; 75 return -EOPNOTSUPP;
85 if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN)) 76
77 if (handler->flags == XATTR_TRUSTED_T &&
78 !capable(CFS_CAP_SYS_ADMIN))
86 return -EPERM; 79 return -EPERM;
87 if (xattr_type == XATTR_OTHER_T)
88 return -EOPNOTSUPP;
89 80
90 return 0; 81 return 0;
91} 82}
@@ -111,7 +102,7 @@ ll_xattr_set_common(const struct xattr_handler *handler,
111 valid = OBD_MD_FLXATTR; 102 valid = OBD_MD_FLXATTR;
112 } 103 }
113 104
114 rc = xattr_type_filter(sbi, handler->flags); 105 rc = xattr_type_filter(sbi, handler);
115 if (rc) 106 if (rc)
116 return rc; 107 return rc;
117 108
@@ -121,8 +112,9 @@ ll_xattr_set_common(const struct xattr_handler *handler,
121 return -EPERM; 112 return -EPERM;
122 113
123 /* b10667: ignore lustre special xattr for now */ 114 /* b10667: ignore lustre special xattr for now */
124 if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) || 115 if (!strcmp(name, "hsm") ||
125 (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))) 116 ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
117 (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))))
126 return 0; 118 return 0;
127 119
128 /* b15587: ignore security.capability xattr for now */ 120 /* b15587: ignore security.capability xattr for now */
@@ -135,6 +127,11 @@ ll_xattr_set_common(const struct xattr_handler *handler,
135 strcmp(name, "selinux") == 0) 127 strcmp(name, "selinux") == 0)
136 return -EOPNOTSUPP; 128 return -EOPNOTSUPP;
137 129
130 /*FIXME: enable IMA when the conditions are ready */
131 if (handler->flags == XATTR_SECURITY_T &&
132 (!strcmp(name, "ima") || !strcmp(name, "evm")))
133 return -EOPNOTSUPP;
134
138 sprintf(fullname, "%s%s\n", handler->prefix, name); 135 sprintf(fullname, "%s%s\n", handler->prefix, name);
139 rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), 136 rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
140 valid, fullname, pv, size, 0, flags, 137 valid, fullname, pv, size, 0, flags,
@@ -151,6 +148,37 @@ ll_xattr_set_common(const struct xattr_handler *handler,
151 return 0; 148 return 0;
152} 149}
153 150
151static int get_hsm_state(struct inode *inode, u32 *hus_states)
152{
153 struct md_op_data *op_data;
154 struct hsm_user_state *hus;
155 int rc;
156
157 hus = kzalloc(sizeof(*hus), GFP_NOFS);
158 if (!hus)
159 return -ENOMEM;
160
161 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
162 LUSTRE_OPC_ANY, hus);
163 if (!IS_ERR(op_data)) {
164 rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode),
165 sizeof(*op_data), op_data, NULL);
166 if (!rc)
167 *hus_states = hus->hus_states;
168 else
169 CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n",
170 rc);
171
172 ll_finish_md_op_data(op_data);
173 } else {
174 rc = PTR_ERR(op_data);
175 CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n",
176 rc);
177 }
178 kfree(hus);
179 return rc;
180}
181
154static int ll_xattr_set(const struct xattr_handler *handler, 182static int ll_xattr_set(const struct xattr_handler *handler,
155 struct dentry *dentry, struct inode *inode, 183 struct dentry *dentry, struct inode *inode,
156 const char *name, const void *value, size_t size, 184 const char *name, const void *value, size_t size,
@@ -187,6 +215,31 @@ static int ll_xattr_set(const struct xattr_handler *handler,
187 if (lump && lump->lmm_stripe_offset == 0) 215 if (lump && lump->lmm_stripe_offset == 0)
188 lump->lmm_stripe_offset = -1; 216 lump->lmm_stripe_offset = -1;
189 217
218 /* Avoid anyone directly setting the RELEASED flag. */
219 if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
220 /* Only if we have a released flag check if the file
221 * was indeed archived.
222 */
223 u32 state = HS_NONE;
224
225 rc = get_hsm_state(inode, &state);
226 if (rc)
227 return rc;
228
229 if (!(state & HS_ARCHIVED)) {
230 CDEBUG(D_VFSTRACE,
231 "hus_states state = %x, pattern = %x\n",
232 state, lump->lmm_pattern);
233 /*
234 * Here the state is: real file is not
235 * archived but user is requesting to set
236 * the RELEASED flag so we mask off the
237 * released flag from the request
238 */
239 lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED;
240 }
241 }
242
190 if (lump && S_ISREG(inode->i_mode)) { 243 if (lump && S_ISREG(inode->i_mode)) {
191 __u64 it_flags = FMODE_WRITE; 244 __u64 it_flags = FMODE_WRITE;
192 int lum_size; 245 int lum_size;
@@ -225,7 +278,8 @@ ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
225 void *xdata; 278 void *xdata;
226 int rc; 279 int rc;
227 280
228 if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) { 281 if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T &&
282 (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) {
229 rc = ll_xattr_cache_get(inode, name, buffer, size, valid); 283 rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
230 if (rc == -EAGAIN) 284 if (rc == -EAGAIN)
231 goto getxattr_nocache; 285 goto getxattr_nocache;
@@ -313,7 +367,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
313 367
314 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); 368 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
315 369
316 rc = xattr_type_filter(sbi, handler->flags); 370 rc = xattr_type_filter(sbi, handler);
317 if (rc) 371 if (rc)
318 return rc; 372 return rc;
319 373
@@ -353,80 +407,99 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
353 OBD_MD_FLXATTR); 407 OBD_MD_FLXATTR);
354} 408}
355 409
356static int ll_xattr_get(const struct xattr_handler *handler, 410static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
357 struct dentry *dentry, struct inode *inode,
358 const char *name, void *buffer, size_t size)
359{ 411{
360 LASSERT(inode); 412 ssize_t rc;
361 LASSERT(name);
362 413
363 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", 414 if (S_ISREG(inode->i_mode)) {
364 PFID(ll_inode2fid(inode)), inode, name); 415 struct cl_object *obj = ll_i2info(inode)->lli_clob;
416 struct cl_layout cl = {
417 .cl_buf.lb_buf = buf,
418 .cl_buf.lb_len = buf_size,
419 };
420 struct lu_env *env;
421 int refcheck;
422
423 if (!obj)
424 return -ENODATA;
365 425
366 if (!strcmp(name, "lov")) { 426 env = cl_env_get(&refcheck);
367 struct lov_stripe_md *lsm; 427 if (IS_ERR(env))
368 struct lov_user_md *lump; 428 return PTR_ERR(env);
369 struct lov_mds_md *lmm = NULL;
370 struct ptlrpc_request *request = NULL;
371 int rc = 0, lmmsize = 0;
372 429
373 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); 430 rc = cl_object_layout_get(env, obj, &cl);
374 431 if (rc < 0)
375 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) 432 goto out_env;
376 return -ENODATA;
377 433
378 lsm = ccc_inode_lsm_get(inode); 434 if (!cl.cl_size) {
379 if (!lsm) { 435 rc = -ENODATA;
380 if (S_ISDIR(inode->i_mode)) { 436 goto out_env;
381 rc = ll_dir_getstripe(inode, (void **)&lmm,
382 &lmmsize, &request, 0);
383 } else {
384 rc = -ENODATA;
385 }
386 } else {
387 /* LSM is present already after lookup/getattr call.
388 * we need to grab layout lock once it is implemented
389 */
390 rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
391 lmmsize = rc;
392 } 437 }
393 ccc_inode_lsm_put(inode, lsm);
394 438
439 rc = cl.cl_size;
440
441 if (!buf_size)
442 goto out_env;
443
444 LASSERT(buf && rc <= buf_size);
445
446 /*
447 * Do not return layout gen for getxattr() since
448 * otherwise it would confuse tar --xattr by
449 * recognizing layout gen as stripe offset when the
450 * file is restored. See LU-2809.
451 */
452 ((struct lov_mds_md *)buf)->lmm_layout_gen = 0;
453out_env:
454 cl_env_put(env, &refcheck);
455
456 return rc;
457 } else if (S_ISDIR(inode->i_mode)) {
458 struct ptlrpc_request *req = NULL;
459 struct lov_mds_md *lmm = NULL;
460 int lmm_size = 0;
461
462 rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size,
463 &req, 0);
395 if (rc < 0) 464 if (rc < 0)
396 goto out; 465 goto out_req;
397 466
398 if (size == 0) { 467 if (!buf_size) {
399 /* used to call ll_get_max_mdsize() forward to get 468 rc = lmm_size;
400 * the maximum buffer size, while some apps (such as 469 goto out_req;
401 * rsync 3.0.x) care much about the exact xattr value
402 * size
403 */
404 rc = lmmsize;
405 goto out;
406 } 470 }
407 471
408 if (size < lmmsize) { 472 if (buf_size < lmm_size) {
409 CERROR("server bug: replied size %d > %d for %pd (%s)\n",
410 lmmsize, (int)size, dentry, name);
411 rc = -ERANGE; 473 rc = -ERANGE;
412 goto out; 474 goto out_req;
413 } 475 }
414 476
415 lump = buffer; 477 memcpy(buf, lmm, lmm_size);
416 memcpy(lump, lmm, lmmsize); 478 rc = lmm_size;
417 /* do not return layout gen for getxattr otherwise it would 479out_req:
418 * confuse tar --xattr by recognizing layout gen as stripe 480 if (req)
419 * offset when the file is restored. See LU-2809. 481 ptlrpc_req_finished(req);
420 */
421 lump->lmm_layout_gen = 0;
422 482
423 rc = lmmsize;
424out:
425 if (request)
426 ptlrpc_req_finished(request);
427 else if (lmm)
428 obd_free_diskmd(ll_i2dtexp(inode), &lmm);
429 return rc; 483 return rc;
484 } else {
485 return -ENODATA;
486 }
487}
488
489static int ll_xattr_get(const struct xattr_handler *handler,
490 struct dentry *dentry, struct inode *inode,
491 const char *name, void *buffer, size_t size)
492{
493 LASSERT(inode);
494 LASSERT(name);
495
496 CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
497 PFID(ll_inode2fid(inode)), inode, name);
498
499 if (!strcmp(name, "lov")) {
500 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
501
502 return ll_getxattr_lov(inode, buffer, size);
430 } 503 }
431 504
432 return ll_xattr_get_common(handler, dentry, inode, name, buffer, size); 505 return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
@@ -435,10 +508,10 @@ out:
435ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) 508ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
436{ 509{
437 struct inode *inode = d_inode(dentry); 510 struct inode *inode = d_inode(dentry);
438 int rc = 0, rc2 = 0; 511 struct ll_sb_info *sbi = ll_i2sbi(inode);
439 struct lov_mds_md *lmm = NULL; 512 char *xattr_name;
440 struct ptlrpc_request *request = NULL; 513 ssize_t rc, rc2;
441 int lmmsize; 514 size_t len, rem;
442 515
443 LASSERT(inode); 516 LASSERT(inode);
444 517
@@ -450,65 +523,48 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
450 rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size, 523 rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
451 OBD_MD_FLXATTRLS); 524 OBD_MD_FLXATTRLS);
452 if (rc < 0) 525 if (rc < 0)
453 goto out; 526 return rc;
454 527 /*
455 if (buffer) { 528 * If we're being called to get the size of the xattr list
456 struct ll_sb_info *sbi = ll_i2sbi(inode); 529 * (buf_size == 0) then just assume that a lustre.lov xattr
457 char *xattr_name = buffer; 530 * exists.
458 int xlen, rem = rc; 531 */
459 532 if (!size)
460 while (rem > 0) { 533 return rc + sizeof(XATTR_LUSTRE_LOV);
461 xlen = strnlen(xattr_name, rem - 1) + 1; 534
462 rem -= xlen; 535 xattr_name = buffer;
463 if (xattr_type_filter(sbi, 536 rem = rc;
464 get_xattr_type(xattr_name)) == 0) { 537
465 /* skip OK xattr type 538 while (rem > 0) {
466 * leave it in buffer 539 len = strnlen(xattr_name, rem - 1) + 1;
467 */ 540 rem -= len;
468 xattr_name += xlen; 541 if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) {
469 continue; 542 /* Skip OK xattr type leave it in buffer */
470 } 543 xattr_name += len;
471 /* move up remaining xattrs in buffer 544 continue;
472 * removing the xattr that is not OK
473 */
474 memmove(xattr_name, xattr_name + xlen, rem);
475 rc -= xlen;
476 } 545 }
477 } 546
478 if (S_ISREG(inode->i_mode)) { 547 /*
479 if (!ll_i2info(inode)->lli_has_smd) 548 * Move up remaining xattrs in buffer
480 rc2 = -1; 549 * removing the xattr that is not OK
481 } else if (S_ISDIR(inode->i_mode)) { 550 */
482 rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, 551 memmove(xattr_name, xattr_name + len, rem);
483 &request, 0); 552 rc -= len;
484 } 553 }
485 554
486 if (rc2 < 0) { 555 rc2 = ll_getxattr_lov(inode, NULL, 0);
487 rc2 = 0; 556 if (rc2 == -ENODATA)
488 goto out; 557 return rc;
489 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
490 const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
491 const size_t name_len = sizeof("lov") - 1;
492 const size_t total_len = prefix_len + name_len + 1;
493
494 if (((rc + total_len) > size) && buffer) {
495 ptlrpc_req_finished(request);
496 return -ERANGE;
497 }
498 558
499 if (buffer) { 559 if (rc2 < 0)
500 buffer += rc; 560 return rc2;
501 memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
502 memcpy(buffer + prefix_len, "lov", name_len);
503 buffer[prefix_len + name_len] = '\0';
504 }
505 rc2 = total_len;
506 }
507out:
508 ptlrpc_req_finished(request);
509 rc = rc + rc2;
510 561
511 return rc; 562 if (size < rc + sizeof(XATTR_LUSTRE_LOV))
563 return -ERANGE;
564
565 memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV));
566
567 return rc + sizeof(XATTR_LUSTRE_LOV);
512} 568}
513 569
514static const struct xattr_handler ll_user_xattr_handler = { 570static const struct xattr_handler ll_user_xattr_handler = {
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 50a19a40bd4e..38f75f6aa887 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -26,8 +26,8 @@ struct ll_xattr_entry {
26 */ 26 */
27 char *xe_name; /* xattr name, \0-terminated */ 27 char *xe_name; /* xattr name, \0-terminated */
28 char *xe_value; /* xattr value */ 28 char *xe_value; /* xattr value */
29 unsigned xe_namelen; /* strlen(xe_name) + 1 */ 29 unsigned int xe_namelen; /* strlen(xe_name) + 1 */
30 unsigned xe_vallen; /* xattr value length */ 30 unsigned int xe_vallen; /* xattr value length */
31}; 31};
32 32
33static struct kmem_cache *xattr_kmem; 33static struct kmem_cache *xattr_kmem;
@@ -60,7 +60,7 @@ void ll_xattr_fini(void)
60static void ll_xattr_cache_init(struct ll_inode_info *lli) 60static void ll_xattr_cache_init(struct ll_inode_info *lli)
61{ 61{
62 INIT_LIST_HEAD(&lli->lli_xattrs); 62 INIT_LIST_HEAD(&lli->lli_xattrs);
63 lli->lli_flags |= LLIF_XATTR_CACHE; 63 set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
64} 64}
65 65
66/** 66/**
@@ -104,7 +104,7 @@ static int ll_xattr_cache_find(struct list_head *cache,
104static int ll_xattr_cache_add(struct list_head *cache, 104static int ll_xattr_cache_add(struct list_head *cache,
105 const char *xattr_name, 105 const char *xattr_name,
106 const char *xattr_val, 106 const char *xattr_val,
107 unsigned xattr_val_len) 107 unsigned int xattr_val_len)
108{ 108{
109 struct ll_xattr_entry *xattr; 109 struct ll_xattr_entry *xattr;
110 110
@@ -216,7 +216,7 @@ static int ll_xattr_cache_list(struct list_head *cache,
216 */ 216 */
217static int ll_xattr_cache_valid(struct ll_inode_info *lli) 217static int ll_xattr_cache_valid(struct ll_inode_info *lli)
218{ 218{
219 return !!(lli->lli_flags & LLIF_XATTR_CACHE); 219 return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
220} 220}
221 221
222/** 222/**
@@ -233,7 +233,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
233 233
234 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0) 234 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
235 ; /* empty loop */ 235 ; /* empty loop */
236 lli->lli_flags &= ~LLIF_XATTR_CACHE; 236
237 clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
237 238
238 return 0; 239 return 0;
239} 240}
@@ -415,6 +416,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
415 CDEBUG(D_CACHE, "not caching %s\n", 416 CDEBUG(D_CACHE, "not caching %s\n",
416 XATTR_NAME_ACL_ACCESS); 417 XATTR_NAME_ACL_ACCESS);
417 rc = 0; 418 rc = 0;
419 } else if (!strcmp(xdata, "security.selinux")) {
420 /* Filter out security.selinux, it is cached in slab */
421 CDEBUG(D_CACHE, "not caching security.selinux\n");
422 rc = 0;
418 } else { 423 } else {
419 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval, 424 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
420 *xsizes); 425 *xsizes);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
new file mode 100644
index 000000000000..d61d8018001a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -0,0 +1,88 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see http://www.gnu.org/licenses
18 *
19 * GPL HEADER END
20 */
21
22/*
23 * Copyright (c) 2014 Bull SAS
24 * Author: Sebastien Buisson sebastien.buisson@bull.net
25 */
26
27/*
28 * lustre/llite/xattr_security.c
29 * Handler for storing security labels as extended attributes.
30 */
31#include <linux/security.h>
32#include <linux/xattr.h>
33#include "llite_internal.h"
34
35/**
36 * A helper function for ll_security_inode_init_security()
37 * that takes care of setting xattrs
38 *
39 * Get security context of @inode from @xattr_array,
40 * and put it in 'security.xxx' xattr of dentry
41 * stored in @fs_info.
42 *
43 * \retval 0 success
44 * \retval -ENOMEM if no memory could be allocated for xattr name
45 * \retval < 0 failure to set xattr
46 */
47static int
48ll_initxattrs(struct inode *inode, const struct xattr *xattr_array,
49 void *fs_info)
50{
51 const struct xattr_handler *handler;
52 struct dentry *dentry = fs_info;
53 const struct xattr *xattr;
54 int err = 0;
55
56 handler = get_xattr_type(XATTR_SECURITY_PREFIX);
57 if (!handler)
58 return -ENXIO;
59
60 for (xattr = xattr_array; xattr->name; xattr++) {
61 err = handler->set(handler, dentry, inode, xattr->name,
62 xattr->value, xattr->value_len,
63 XATTR_CREATE);
64 if (err < 0)
65 break;
66 }
67 return err;
68}
69
70/**
71 * Initializes security context
72 *
73 * Get security context of @inode in @dir,
74 * and put it in 'security.xxx' xattr of @dentry.
75 *
76 * \retval 0 success, or SELinux is disabled
77 * \retval -ENOMEM if no memory could be allocated for xattr name
78 * \retval < 0 failure to get security context or set xattr
79 */
80int
81ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir)
82{
83 if (!selinux_is_enabled())
84 return 0;
85
86 return security_inode_init_security(inode, dir, NULL,
87 &ll_initxattrs, dentry);
88}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 9f4e826bb0af..b1071cf5a70c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -223,7 +223,14 @@ int lmv_revalidate_slaves(struct obd_export *exp,
223 LASSERT(body); 223 LASSERT(body);
224 224
225 if (unlikely(body->mbo_nlink < 2)) { 225 if (unlikely(body->mbo_nlink < 2)) {
226 CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n", 226 /*
227 * If this is bad stripe, most likely due
228 * to the race between close(unlink) and
229 * getattr, let's return -EONENT, so llite
230 * will revalidate the dentry see
231 * ll_inode_revalidate_fini()
232 */
233 CDEBUG(D_INODE, "%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
227 obd->obd_name, body->mbo_nlink, i, 234 obd->obd_name, body->mbo_nlink, i,
228 PFID(&lsm->lsm_md_oinfo[i].lmo_fid), 235 PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
229 PFID(&lsm->lsm_md_oinfo[0].lmo_fid)); 236 PFID(&lsm->lsm_md_oinfo[0].lmo_fid));
@@ -233,7 +240,7 @@ int lmv_revalidate_slaves(struct obd_export *exp,
233 it.it_lock_mode = 0; 240 it.it_lock_mode = 0;
234 } 241 }
235 242
236 rc = -EIO; 243 rc = -ENOENT;
237 goto cleanup; 244 goto cleanup;
238 } 245 }
239 246
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 52b03745ac19..12731a17e263 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -54,9 +54,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
54int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp, 54int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
55 struct lu_fid *fid, struct md_op_data *op_data); 55 struct lu_fid *fid, struct md_op_data *op_data);
56 56
57int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
58 const union lmv_mds_md *lmm, int stripe_count);
59
60int lmv_revalidate_slaves(struct obd_export *exp, 57int lmv_revalidate_slaves(struct obd_export *exp,
61 const struct lmv_stripe_md *lsm, 58 const struct lmv_stripe_md *lsm,
62 ldlm_blocking_callback cb_blocking, 59 ldlm_blocking_callback cb_blocking,
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b946acf..f124f6c05ea4 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -62,6 +62,7 @@ static void lmv_activate_target(struct lmv_obd *lmv,
62 62
63 tgt->ltd_active = activate; 63 tgt->ltd_active = activate;
64 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1); 64 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
65 tgt->ltd_exp->exp_obd->obd_inactive = !activate;
65} 66}
66 67
67/** 68/**
@@ -245,8 +246,7 @@ static int lmv_connect(const struct lu_env *env,
245 return rc; 246 return rc;
246} 247}
247 248
248static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, 249static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
249 u32 cookiesize, u32 def_cookiesize)
250{ 250{
251 struct obd_device *obd = exp->exp_obd; 251 struct obd_device *obd = exp->exp_obd;
252 struct lmv_obd *lmv = &obd->u.lmv; 252 struct lmv_obd *lmv = &obd->u.lmv;
@@ -262,14 +262,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
262 lmv->max_def_easize = def_easize; 262 lmv->max_def_easize = def_easize;
263 change = 1; 263 change = 1;
264 } 264 }
265 if (lmv->max_cookiesize < cookiesize) { 265
266 lmv->max_cookiesize = cookiesize;
267 change = 1;
268 }
269 if (lmv->max_def_cookiesize < def_cookiesize) {
270 lmv->max_def_cookiesize = def_cookiesize;
271 change = 1;
272 }
273 if (change == 0) 266 if (change == 0)
274 return 0; 267 return 0;
275 268
@@ -284,8 +277,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
284 continue; 277 continue;
285 } 278 }
286 279
287 rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize, 280 rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
288 cookiesize, def_cookiesize);
289 if (rc) { 281 if (rc) {
290 CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", 282 CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
291 obd->obd_name, i, rc); 283 obd->obd_name, i, rc);
@@ -368,8 +360,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
368 tgt->ltd_exp = mdc_exp; 360 tgt->ltd_exp = mdc_exp;
369 lmv->desc.ld_active_tgt_count++; 361 lmv->desc.ld_active_tgt_count++;
370 362
371 md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize, 363 md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
372 lmv->max_cookiesize, lmv->max_def_cookiesize);
373 364
374 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", 365 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
375 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, 366 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
@@ -396,27 +387,23 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
396 __u32 index, int gen) 387 __u32 index, int gen)
397{ 388{
398 struct lmv_obd *lmv = &obd->u.lmv; 389 struct lmv_obd *lmv = &obd->u.lmv;
390 struct obd_device *mdc_obd;
399 struct lmv_tgt_desc *tgt; 391 struct lmv_tgt_desc *tgt;
400 int orig_tgt_count = 0; 392 int orig_tgt_count = 0;
401 int rc = 0; 393 int rc = 0;
402 394
403 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index); 395 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
404 396
405 mutex_lock(&lmv->lmv_init_mutex); 397 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
406 398 &obd->obd_uuid);
407 if (lmv->desc.ld_tgt_count == 0) { 399 if (!mdc_obd) {
408 struct obd_device *mdc_obd; 400 CERROR("%s: Target %s not attached: rc = %d\n",
409 401 obd->obd_name, uuidp->uuid, -EINVAL);
410 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, 402 return -EINVAL;
411 &obd->obd_uuid);
412 if (!mdc_obd) {
413 mutex_unlock(&lmv->lmv_init_mutex);
414 CERROR("%s: Target %s not attached: rc = %d\n",
415 obd->obd_name, uuidp->uuid, -EINVAL);
416 return -EINVAL;
417 }
418 } 403 }
419 404
405 mutex_lock(&lmv->lmv_init_mutex);
406
420 if ((index < lmv->tgts_size) && lmv->tgts[index]) { 407 if ((index < lmv->tgts_size) && lmv->tgts[index]) {
421 tgt = lmv->tgts[index]; 408 tgt = lmv->tgts[index];
422 CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", 409 CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
@@ -472,22 +459,27 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
472 lmv->desc.ld_tgt_count = index + 1; 459 lmv->desc.ld_tgt_count = index + 1;
473 } 460 }
474 461
475 if (lmv->connected) { 462 if (!lmv->connected) {
476 rc = lmv_connect_mdc(obd, tgt); 463 /* lmv_check_connect() will connect this target. */
477 if (rc) { 464 mutex_unlock(&lmv->lmv_init_mutex);
478 spin_lock(&lmv->lmv_lock); 465 return rc;
479 if (lmv->desc.ld_tgt_count == index + 1)
480 lmv->desc.ld_tgt_count = orig_tgt_count;
481 memset(tgt, 0, sizeof(*tgt));
482 spin_unlock(&lmv->lmv_lock);
483 } else {
484 int easize = sizeof(struct lmv_stripe_md) +
485 lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
486 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
487 }
488 } 466 }
489 467
468 /* Otherwise let's connect it ourselves */
490 mutex_unlock(&lmv->lmv_init_mutex); 469 mutex_unlock(&lmv->lmv_init_mutex);
470 rc = lmv_connect_mdc(obd, tgt);
471 if (rc) {
472 spin_lock(&lmv->lmv_lock);
473 if (lmv->desc.ld_tgt_count == index + 1)
474 lmv->desc.ld_tgt_count = orig_tgt_count;
475 memset(tgt, 0, sizeof(*tgt));
476 spin_unlock(&lmv->lmv_lock);
477 } else {
478 int easize = sizeof(struct lmv_stripe_md) +
479 lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
480 lmv_init_ea_size(obd->obd_self_export, easize, 0);
481 }
482
491 return rc; 483 return rc;
492} 484}
493 485
@@ -538,7 +530,7 @@ int lmv_check_connect(struct obd_device *obd)
538 class_export_put(lmv->exp); 530 class_export_put(lmv->exp);
539 lmv->connected = 1; 531 lmv->connected = 1;
540 easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC); 532 easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
541 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); 533 lmv_init_ea_size(obd->obd_self_export, easize, 0);
542 mutex_unlock(&lmv->lmv_init_mutex); 534 mutex_unlock(&lmv->lmv_init_mutex);
543 return 0; 535 return 0;
544 536
@@ -1128,9 +1120,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1128 mdc_obd = class_exp2obd(tgt->ltd_exp); 1120 mdc_obd = class_exp2obd(tgt->ltd_exp);
1129 mdc_obd->obd_force = obddev->obd_force; 1121 mdc_obd->obd_force = obddev->obd_force;
1130 err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); 1122 err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1131 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { 1123 if (err) {
1132 return err;
1133 } else if (err) {
1134 if (tgt->ltd_active) { 1124 if (tgt->ltd_active) {
1135 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", 1125 CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
1136 tgt->ltd_uuid.uuid, i, cmd, err); 1126 tgt->ltd_uuid.uuid, i, cmd, err);
@@ -1284,7 +1274,6 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1284 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid); 1274 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1285 lmv->desc.ld_tgt_count = 0; 1275 lmv->desc.ld_tgt_count = 0;
1286 lmv->desc.ld_active_tgt_count = 0; 1276 lmv->desc.ld_active_tgt_count = 0;
1287 lmv->max_cookiesize = 0;
1288 lmv->max_def_easize = 0; 1277 lmv->max_def_easize = 0;
1289 lmv->max_easize = 0; 1278 lmv->max_easize = 0;
1290 lmv->lmv_placement = PLACEMENT_CHAR_POLICY; 1279 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
@@ -1630,27 +1619,28 @@ lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
1630 * ct_restore(). 1619 * ct_restore().
1631 */ 1620 */
1632 if (op_data->op_bias & MDS_CREATE_VOLATILE && 1621 if (op_data->op_bias & MDS_CREATE_VOLATILE &&
1633 (int)op_data->op_mds != -1 && lsm) { 1622 (int)op_data->op_mds != -1) {
1634 int i; 1623 int i;
1635 1624
1636 tgt = lmv_get_target(lmv, op_data->op_mds, NULL); 1625 tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
1637 if (IS_ERR(tgt)) 1626 if (IS_ERR(tgt))
1638 return tgt; 1627 return tgt;
1639 1628
1640 /* refill the right parent fid */ 1629 if (lsm) {
1641 for (i = 0; i < lsm->lsm_md_stripe_count; i++) { 1630 /* refill the right parent fid */
1642 struct lmv_oinfo *oinfo; 1631 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1632 struct lmv_oinfo *oinfo;
1643 1633
1644 oinfo = &lsm->lsm_md_oinfo[i]; 1634 oinfo = &lsm->lsm_md_oinfo[i];
1645 if (oinfo->lmo_mds == op_data->op_mds) { 1635 if (oinfo->lmo_mds == op_data->op_mds) {
1646 *fid = oinfo->lmo_fid; 1636 *fid = oinfo->lmo_fid;
1647 break; 1637 break;
1638 }
1648 } 1639 }
1649 }
1650 1640
1651 /* Hmm, can not find the stripe by mdt_index(op_mds) */ 1641 if (i == lsm->lsm_md_stripe_count)
1652 if (i == lsm->lsm_md_stripe_count) 1642 *fid = lsm->lsm_md_oinfo[0].lmo_fid;
1653 tgt = ERR_PTR(-EINVAL); 1643 }
1654 1644
1655 return tgt; 1645 return tgt;
1656 } 1646 }
@@ -1728,30 +1718,9 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1728 return rc; 1718 return rc;
1729} 1719}
1730 1720
1731static int lmv_done_writing(struct obd_export *exp,
1732 struct md_op_data *op_data,
1733 struct md_open_data *mod)
1734{
1735 struct obd_device *obd = exp->exp_obd;
1736 struct lmv_obd *lmv = &obd->u.lmv;
1737 struct lmv_tgt_desc *tgt;
1738 int rc;
1739
1740 rc = lmv_check_connect(obd);
1741 if (rc)
1742 return rc;
1743
1744 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1745 if (IS_ERR(tgt))
1746 return PTR_ERR(tgt);
1747
1748 rc = md_done_writing(tgt->ltd_exp, op_data, mod);
1749 return rc;
1750}
1751
1752static int 1721static int
1753lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, 1722lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1754 const ldlm_policy_data_t *policy, 1723 const union ldlm_policy_data *policy,
1755 struct lookup_intent *it, struct md_op_data *op_data, 1724 struct lookup_intent *it, struct md_op_data *op_data,
1756 struct lustre_handle *lockh, __u64 extra_lock_flags) 1725 struct lustre_handle *lockh, __u64 extra_lock_flags)
1757{ 1726{
@@ -1847,7 +1816,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
1847 struct lu_fid *fid = md_op_data_fid(op_data, flag); 1816 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1848 struct obd_device *obd = exp->exp_obd; 1817 struct obd_device *obd = exp->exp_obd;
1849 struct lmv_obd *lmv = &obd->u.lmv; 1818 struct lmv_obd *lmv = &obd->u.lmv;
1850 ldlm_policy_data_t policy = { {0} }; 1819 union ldlm_policy_data policy = { { 0 } };
1851 int rc = 0; 1820 int rc = 0;
1852 1821
1853 if (!fid_is_sane(fid)) 1822 if (!fid_is_sane(fid))
@@ -1937,7 +1906,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
1937{ 1906{
1938 struct obd_device *obd = exp->exp_obd; 1907 struct obd_device *obd = exp->exp_obd;
1939 struct lmv_obd *lmv = &obd->u.lmv; 1908 struct lmv_obd *lmv = &obd->u.lmv;
1909 struct obd_export *target_exp;
1940 struct lmv_tgt_desc *src_tgt; 1910 struct lmv_tgt_desc *src_tgt;
1911 struct lmv_tgt_desc *tgt_tgt;
1912 struct mdt_body *body;
1941 int rc; 1913 int rc;
1942 1914
1943 LASSERT(oldlen != 0); 1915 LASSERT(oldlen != 0);
@@ -1977,6 +1949,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
1977 if (rc) 1949 if (rc)
1978 return rc; 1950 return rc;
1979 src_tgt = lmv_find_target(lmv, &op_data->op_fid3); 1951 src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
1952 if (IS_ERR(src_tgt))
1953 return PTR_ERR(src_tgt);
1954
1955 target_exp = src_tgt->ltd_exp;
1980 } else { 1956 } else {
1981 if (op_data->op_mea1) { 1957 if (op_data->op_mea1) {
1982 struct lmv_stripe_md *lsm = op_data->op_mea1; 1958 struct lmv_stripe_md *lsm = op_data->op_mea1;
@@ -1985,29 +1961,27 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
1985 oldlen, 1961 oldlen,
1986 &op_data->op_fid1, 1962 &op_data->op_fid1,
1987 &op_data->op_mds); 1963 &op_data->op_mds);
1988 if (IS_ERR(src_tgt))
1989 return PTR_ERR(src_tgt);
1990 } else { 1964 } else {
1991 src_tgt = lmv_find_target(lmv, &op_data->op_fid1); 1965 src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
1992 if (IS_ERR(src_tgt))
1993 return PTR_ERR(src_tgt);
1994
1995 op_data->op_mds = src_tgt->ltd_idx;
1996 } 1966 }
1967 if (IS_ERR(src_tgt))
1968 return PTR_ERR(src_tgt);
1997 1969
1998 if (op_data->op_mea2) { 1970 if (op_data->op_mea2) {
1999 struct lmv_stripe_md *lsm = op_data->op_mea2; 1971 struct lmv_stripe_md *lsm = op_data->op_mea2;
2000 const struct lmv_oinfo *oinfo;
2001 1972
2002 oinfo = lsm_name_to_stripe_info(lsm, new, newlen); 1973 tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
2003 if (IS_ERR(oinfo)) 1974 newlen,
2004 return PTR_ERR(oinfo); 1975 &op_data->op_fid2,
2005 1976 &op_data->op_mds);
2006 op_data->op_fid2 = oinfo->lmo_fid; 1977 } else {
1978 tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
2007 } 1979 }
1980 if (IS_ERR(tgt_tgt))
1981 return PTR_ERR(tgt_tgt);
1982
1983 target_exp = tgt_tgt->ltd_exp;
2008 } 1984 }
2009 if (IS_ERR(src_tgt))
2010 return PTR_ERR(src_tgt);
2011 1985
2012 /* 1986 /*
2013 * LOOKUP lock on src child (fid3) should also be cancelled for 1987 * LOOKUP lock on src child (fid3) should also be cancelled for
@@ -2048,26 +2022,56 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2048 return rc; 2022 return rc;
2049 } 2023 }
2050 2024
2025retry_rename:
2051 /* 2026 /*
2052 * Cancel all the locks on tgt child (fid4). 2027 * Cancel all the locks on tgt child (fid4).
2053 */ 2028 */
2054 if (fid_is_sane(&op_data->op_fid4)) 2029 if (fid_is_sane(&op_data->op_fid4)) {
2030 struct lmv_tgt_desc *tgt;
2031
2055 rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, 2032 rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
2056 LCK_EX, MDS_INODELOCK_FULL, 2033 LCK_EX, MDS_INODELOCK_FULL,
2057 MF_MDC_CANCEL_FID4); 2034 MF_MDC_CANCEL_FID4);
2035 if (rc)
2036 return rc;
2037
2038 tgt = lmv_find_target(lmv, &op_data->op_fid4);
2039 if (IS_ERR(tgt))
2040 return PTR_ERR(tgt);
2058 2041
2059 CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1), 2042 /*
2060 op_data->op_mds, PFID(&op_data->op_fid2)); 2043 * Since the target child might be destroyed, and it might
2044 * become orphan, and we can only check orphan on the local
2045 * MDT right now, so we send rename request to the MDT where
2046 * target child is located. If target child does not exist,
2047 * then it will send the request to the target parent
2048 */
2049 target_exp = tgt->ltd_exp;
2050 }
2061 2051
2062 rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen, 2052 rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request);
2063 new, newlen, request); 2053 if (rc && rc != -EREMOTE)
2064 return rc; 2054 return rc;
2055
2056 body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
2057 if (!body)
2058 return -EPROTO;
2059
2060 /* Not cross-ref case, just get out of here. */
2061 if (likely(!(body->mbo_valid & OBD_MD_MDS)))
2062 return rc;
2063
2064 CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n",
2065 exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
2066
2067 op_data->op_fid4 = body->mbo_fid1;
2068 ptlrpc_req_finished(*request);
2069 *request = NULL;
2070 goto retry_rename;
2065} 2071}
2066 2072
2067static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, 2073static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2068 void *ea, size_t ealen, void *ea2, size_t ea2len, 2074 void *ea, size_t ealen, struct ptlrpc_request **request)
2069 struct ptlrpc_request **request,
2070 struct md_open_data **mod)
2071{ 2075{
2072 struct obd_device *obd = exp->exp_obd; 2076 struct obd_device *obd = exp->exp_obd;
2073 struct lmv_obd *lmv = &obd->u.lmv; 2077 struct lmv_obd *lmv = &obd->u.lmv;
@@ -2086,10 +2090,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2086 if (IS_ERR(tgt)) 2090 if (IS_ERR(tgt))
2087 return PTR_ERR(tgt); 2091 return PTR_ERR(tgt);
2088 2092
2089 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2, 2093 return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
2090 ea2len, request, mod);
2091
2092 return rc;
2093} 2094}
2094 2095
2095static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, 2096static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -2623,23 +2624,10 @@ try_next_stripe:
2623 goto retry_unlink; 2624 goto retry_unlink;
2624} 2625}
2625 2626
2626static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) 2627static int lmv_precleanup(struct obd_device *obd)
2627{ 2628{
2628 struct lmv_obd *lmv = &obd->u.lmv; 2629 fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
2629 2630 lprocfs_obd_cleanup(obd);
2630 switch (stage) {
2631 case OBD_CLEANUP_EARLY:
2632 /* XXX: here should be calling obd_precleanup() down to
2633 * stack.
2634 */
2635 break;
2636 case OBD_CLEANUP_EXPORTS:
2637 fld_client_debugfs_fini(&lmv->lmv_fld);
2638 lprocfs_obd_cleanup(obd);
2639 break;
2640 default:
2641 break;
2642 }
2643 return 0; 2631 return 0;
2644} 2632}
2645 2633
@@ -2654,14 +2642,12 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2654 * \param[in] key identifier of key to get value for 2642 * \param[in] key identifier of key to get value for
2655 * \param[in] vallen size of \a val 2643 * \param[in] vallen size of \a val
2656 * \param[out] val pointer to storage location for value 2644 * \param[out] val pointer to storage location for value
2657 * \param[in] lsm optional striping metadata of object
2658 * 2645 *
2659 * \retval 0 on success 2646 * \retval 0 on success
2660 * \retval negative negated errno on failure 2647 * \retval negative negated errno on failure
2661 */ 2648 */
2662static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, 2649static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2663 __u32 keylen, void *key, __u32 *vallen, void *val, 2650 __u32 keylen, void *key, __u32 *vallen, void *val)
2664 struct lov_stripe_md *lsm)
2665{ 2651{
2666 struct obd_device *obd; 2652 struct obd_device *obd;
2667 struct lmv_obd *lmv; 2653 struct lmv_obd *lmv;
@@ -2693,7 +2679,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2693 continue; 2679 continue;
2694 2680
2695 if (!obd_get_info(env, tgt->ltd_exp, keylen, key, 2681 if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
2696 vallen, val, NULL)) 2682 vallen, val))
2697 return 0; 2683 return 0;
2698 } 2684 }
2699 return -EINVAL; 2685 return -EINVAL;
@@ -2709,7 +2695,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2709 * desc. 2695 * desc.
2710 */ 2696 */
2711 rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key, 2697 rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
2712 vallen, val, NULL); 2698 vallen, val);
2713 if (!rc && KEY_IS(KEY_CONN_DATA)) 2699 if (!rc && KEY_IS(KEY_CONN_DATA))
2714 exp->exp_connect_data = *(struct obd_connect_data *)val; 2700 exp->exp_connect_data = *(struct obd_connect_data *)val;
2715 return rc; 2701 return rc;
@@ -2777,90 +2763,6 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
2777 return -EINVAL; 2763 return -EINVAL;
2778} 2764}
2779 2765
2780static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm,
2781 struct lmv_mds_md_v1 *lmm1)
2782{
2783 int cplen;
2784 int i;
2785
2786 lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic);
2787 lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count);
2788 lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index);
2789 lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type);
2790 cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name,
2791 sizeof(lmm1->lmv_pool_name));
2792 if (cplen >= sizeof(lmm1->lmv_pool_name))
2793 return -E2BIG;
2794
2795 for (i = 0; i < lsm->lsm_md_stripe_count; i++)
2796 fid_cpu_to_le(&lmm1->lmv_stripe_fids[i],
2797 &lsm->lsm_md_oinfo[i].lmo_fid);
2798 return 0;
2799}
2800
2801static int
2802lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm,
2803 int stripe_count)
2804{
2805 int lmm_size = 0, rc = 0;
2806 bool allocated = false;
2807
2808 LASSERT(lmmp);
2809
2810 /* Free lmm */
2811 if (*lmmp && !lsm) {
2812 int stripe_cnt;
2813
2814 stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp);
2815 lmm_size = lmv_mds_md_size(stripe_cnt,
2816 le32_to_cpu((*lmmp)->lmv_magic));
2817 if (!lmm_size)
2818 return -EINVAL;
2819 kvfree(*lmmp);
2820 *lmmp = NULL;
2821 return 0;
2822 }
2823
2824 /* Alloc lmm */
2825 if (!*lmmp && !lsm) {
2826 lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC);
2827 LASSERT(lmm_size > 0);
2828 *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
2829 if (!*lmmp)
2830 return -ENOMEM;
2831 lmv_mds_md_stripe_count_set(*lmmp, stripe_count);
2832 (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC);
2833 return lmm_size;
2834 }
2835
2836 /* pack lmm */
2837 LASSERT(lsm);
2838 lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count,
2839 lsm->lsm_md_magic);
2840 if (!*lmmp) {
2841 *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
2842 if (!*lmmp)
2843 return -ENOMEM;
2844 allocated = true;
2845 }
2846
2847 switch (lsm->lsm_md_magic) {
2848 case LMV_MAGIC_V1:
2849 rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1);
2850 break;
2851 default:
2852 rc = -EINVAL;
2853 break;
2854 }
2855
2856 if (rc && allocated) {
2857 kvfree(*lmmp);
2858 *lmmp = NULL;
2859 }
2860
2861 return lmm_size;
2862}
2863
2864static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, 2766static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
2865 const struct lmv_mds_md_v1 *lmm1) 2767 const struct lmv_mds_md_v1 *lmm1)
2866{ 2768{
@@ -2903,8 +2805,8 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
2903 return rc; 2805 return rc;
2904} 2806}
2905 2807
2906int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, 2808static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
2907 const union lmv_mds_md *lmm, int stripe_count) 2809 const union lmv_mds_md *lmm, size_t lmm_size)
2908{ 2810{
2909 struct lmv_stripe_md *lsm; 2811 struct lmv_stripe_md *lsm;
2910 bool allocated = false; 2812 bool allocated = false;
@@ -2933,17 +2835,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
2933 return 0; 2835 return 0;
2934 } 2836 }
2935 2837
2936 /* Alloc memmd */
2937 if (!lsm && !lmm) {
2938 lsm_size = lmv_stripe_md_size(stripe_count);
2939 lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
2940 if (!lsm)
2941 return -ENOMEM;
2942 lsm->lsm_md_stripe_count = stripe_count;
2943 *lsmp = lsm;
2944 return 0;
2945 }
2946
2947 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) 2838 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
2948 return -EPERM; 2839 return -EPERM;
2949 2840
@@ -2991,38 +2882,17 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
2991 } 2882 }
2992 return lsm_size; 2883 return lsm_size;
2993} 2884}
2994EXPORT_SYMBOL(lmv_unpack_md);
2995 2885
2996static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, 2886void lmv_free_memmd(struct lmv_stripe_md *lsm)
2997 struct lov_mds_md *lmm, int disk_len)
2998{ 2887{
2999 return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp, 2888 lmv_unpackmd(NULL, &lsm, NULL, 0);
3000 (union lmv_mds_md *)lmm, disk_len);
3001}
3002
3003static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
3004 struct lov_stripe_md *lsm)
3005{
3006 const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm;
3007 struct obd_device *obd = exp->exp_obd;
3008 struct lmv_obd *lmv_obd = &obd->u.lmv;
3009 int stripe_count;
3010
3011 if (!lmmp) {
3012 if (lsm)
3013 stripe_count = lmv->lsm_md_stripe_count;
3014 else
3015 stripe_count = lmv_obd->desc.ld_tgt_count;
3016
3017 return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1);
3018 }
3019
3020 return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0);
3021} 2889}
2890EXPORT_SYMBOL(lmv_free_memmd);
3022 2891
3023static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, 2892static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
3024 ldlm_policy_data_t *policy, enum ldlm_mode mode, 2893 union ldlm_policy_data *policy,
3025 enum ldlm_cancel_flags flags, void *opaque) 2894 enum ldlm_mode mode, enum ldlm_cancel_flags flags,
2895 void *opaque)
3026{ 2896{
3027 struct obd_device *obd = exp->exp_obd; 2897 struct obd_device *obd = exp->exp_obd;
3028 struct lmv_obd *lmv = &obd->u.lmv; 2898 struct lmv_obd *lmv = &obd->u.lmv;
@@ -3064,7 +2934,7 @@ static int lmv_set_lock_data(struct obd_export *exp,
3064static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, 2934static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
3065 const struct lu_fid *fid, 2935 const struct lu_fid *fid,
3066 enum ldlm_type type, 2936 enum ldlm_type type,
3067 ldlm_policy_data_t *policy, 2937 union ldlm_policy_data *policy,
3068 enum ldlm_mode mode, 2938 enum ldlm_mode mode,
3069 struct lustre_handle *lockh) 2939 struct lustre_handle *lockh)
3070{ 2940{
@@ -3271,32 +3141,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3271 return rc; 3141 return rc;
3272} 3142}
3273 3143
3274static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
3275 struct obd_quotactl *oqctl)
3276{
3277 struct obd_device *obd = class_exp2obd(exp);
3278 struct lmv_obd *lmv = &obd->u.lmv;
3279 struct lmv_tgt_desc *tgt;
3280 int rc = 0;
3281 u32 i;
3282
3283 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3284 int err;
3285
3286 tgt = lmv->tgts[i];
3287 if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
3288 CERROR("lmv idx %d inactive\n", i);
3289 return -EIO;
3290 }
3291
3292 err = obd_quotacheck(tgt->ltd_exp, oqctl);
3293 if (err && !rc)
3294 rc = err;
3295 }
3296
3297 return rc;
3298}
3299
3300static int lmv_merge_attr(struct obd_export *exp, 3144static int lmv_merge_attr(struct obd_export *exp,
3301 const struct lmv_stripe_md *lsm, 3145 const struct lmv_stripe_md *lsm,
3302 struct cl_attr *attr, 3146 struct cl_attr *attr,
@@ -3349,12 +3193,9 @@ static struct obd_ops lmv_obd_ops = {
3349 .statfs = lmv_statfs, 3193 .statfs = lmv_statfs,
3350 .get_info = lmv_get_info, 3194 .get_info = lmv_get_info,
3351 .set_info_async = lmv_set_info_async, 3195 .set_info_async = lmv_set_info_async,
3352 .packmd = lmv_packmd,
3353 .unpackmd = lmv_unpackmd,
3354 .notify = lmv_notify, 3196 .notify = lmv_notify,
3355 .get_uuid = lmv_get_uuid, 3197 .get_uuid = lmv_get_uuid,
3356 .iocontrol = lmv_iocontrol, 3198 .iocontrol = lmv_iocontrol,
3357 .quotacheck = lmv_quotacheck,
3358 .quotactl = lmv_quotactl 3199 .quotactl = lmv_quotactl
3359}; 3200};
3360 3201
@@ -3363,7 +3204,6 @@ static struct md_ops lmv_md_ops = {
3363 .null_inode = lmv_null_inode, 3204 .null_inode = lmv_null_inode,
3364 .close = lmv_close, 3205 .close = lmv_close,
3365 .create = lmv_create, 3206 .create = lmv_create,
3366 .done_writing = lmv_done_writing,
3367 .enqueue = lmv_enqueue, 3207 .enqueue = lmv_enqueue,
3368 .getattr = lmv_getattr, 3208 .getattr = lmv_getattr,
3369 .getxattr = lmv_getxattr, 3209 .getxattr = lmv_getxattr,
@@ -3388,6 +3228,7 @@ static struct md_ops lmv_md_ops = {
3388 .intent_getattr_async = lmv_intent_getattr_async, 3228 .intent_getattr_async = lmv_intent_getattr_async,
3389 .revalidate_lock = lmv_revalidate_lock, 3229 .revalidate_lock = lmv_revalidate_lock,
3390 .get_fid_from_lsm = lmv_get_fid_from_lsm, 3230 .get_fid_from_lsm = lmv_get_fid_from_lsm,
3231 .unpackmd = lmv_unpackmd,
3391}; 3232};
3392 3233
3393static int __init lmv_init(void) 3234static int __init lmv_init(void)
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4d2b7d303fea..c49a34bf10e5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -217,7 +217,7 @@ struct lov_object {
217 217
218 union lov_layout_state { 218 union lov_layout_state {
219 struct lov_layout_raid0 { 219 struct lov_layout_raid0 {
220 unsigned lo_nr; 220 unsigned int lo_nr;
221 /** 221 /**
222 * When this is true, lov_object::lo_attr contains 222 * When this is true, lov_object::lo_attr contains
223 * valid up to date attributes for a top-level 223 * valid up to date attributes for a top-level
@@ -412,7 +412,6 @@ struct lov_io_sub {
412 int sub_refcheck; 412 int sub_refcheck;
413 int sub_refcheck2; 413 int sub_refcheck2;
414 int sub_reenter; 414 int sub_reenter;
415 void *sub_cookie;
416}; 415};
417 416
418/** 417/**
@@ -473,20 +472,6 @@ struct lov_session {
473 struct lov_sublock_env ls_subenv; 472 struct lov_sublock_env ls_subenv;
474}; 473};
475 474
476/**
477 * State of transfer for lov.
478 */
479struct lov_req {
480 struct cl_req_slice lr_cl;
481};
482
483/**
484 * State of transfer for lovsub.
485 */
486struct lovsub_req {
487 struct cl_req_slice lsrq_cl;
488};
489
490extern struct lu_device_type lov_device_type; 475extern struct lu_device_type lov_device_type;
491extern struct lu_device_type lovsub_device_type; 476extern struct lu_device_type lovsub_device_type;
492 477
@@ -497,11 +482,9 @@ extern struct kmem_cache *lov_lock_kmem;
497extern struct kmem_cache *lov_object_kmem; 482extern struct kmem_cache *lov_object_kmem;
498extern struct kmem_cache *lov_thread_kmem; 483extern struct kmem_cache *lov_thread_kmem;
499extern struct kmem_cache *lov_session_kmem; 484extern struct kmem_cache *lov_session_kmem;
500extern struct kmem_cache *lov_req_kmem;
501 485
502extern struct kmem_cache *lovsub_lock_kmem; 486extern struct kmem_cache *lovsub_lock_kmem;
503extern struct kmem_cache *lovsub_object_kmem; 487extern struct kmem_cache *lovsub_object_kmem;
504extern struct kmem_cache *lovsub_req_kmem;
505 488
506extern struct kmem_cache *lov_lock_link_kmem; 489extern struct kmem_cache *lov_lock_link_kmem;
507 490
@@ -700,11 +683,6 @@ static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
700 return container_of0(slice, struct lov_page, lps_cl); 683 return container_of0(slice, struct lov_page, lps_cl);
701} 684}
702 685
703static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
704{
705 return container_of0(slice, struct lov_req, lr_cl);
706}
707
708static inline struct lovsub_page * 686static inline struct lovsub_page *
709cl2lovsub_page(const struct cl_page_slice *slice) 687cl2lovsub_page(const struct cl_page_slice *slice)
710{ 688{
@@ -712,11 +690,6 @@ cl2lovsub_page(const struct cl_page_slice *slice)
712 return container_of0(slice, struct lovsub_page, lsb_cl); 690 return container_of0(slice, struct lovsub_page, lsb_cl);
713} 691}
714 692
715static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
716{
717 return container_of0(slice, struct lovsub_req, lsrq_cl);
718}
719
720static inline struct lov_io *cl2lov_io(const struct lu_env *env, 693static inline struct lov_io *cl2lov_io(const struct lu_env *env,
721 const struct cl_io_slice *ios) 694 const struct cl_io_slice *ios)
722{ 695{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 056ae2ed88e8..7301f6e579a1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -46,11 +46,9 @@ struct kmem_cache *lov_lock_kmem;
46struct kmem_cache *lov_object_kmem; 46struct kmem_cache *lov_object_kmem;
47struct kmem_cache *lov_thread_kmem; 47struct kmem_cache *lov_thread_kmem;
48struct kmem_cache *lov_session_kmem; 48struct kmem_cache *lov_session_kmem;
49struct kmem_cache *lov_req_kmem;
50 49
51struct kmem_cache *lovsub_lock_kmem; 50struct kmem_cache *lovsub_lock_kmem;
52struct kmem_cache *lovsub_object_kmem; 51struct kmem_cache *lovsub_object_kmem;
53struct kmem_cache *lovsub_req_kmem;
54 52
55struct kmem_cache *lov_lock_link_kmem; 53struct kmem_cache *lov_lock_link_kmem;
56 54
@@ -79,11 +77,6 @@ struct lu_kmem_descr lov_caches[] = {
79 .ckd_size = sizeof(struct lov_session) 77 .ckd_size = sizeof(struct lov_session)
80 }, 78 },
81 { 79 {
82 .ckd_cache = &lov_req_kmem,
83 .ckd_name = "lov_req_kmem",
84 .ckd_size = sizeof(struct lov_req)
85 },
86 {
87 .ckd_cache = &lovsub_lock_kmem, 80 .ckd_cache = &lovsub_lock_kmem,
88 .ckd_name = "lovsub_lock_kmem", 81 .ckd_name = "lovsub_lock_kmem",
89 .ckd_size = sizeof(struct lovsub_lock) 82 .ckd_size = sizeof(struct lovsub_lock)
@@ -94,11 +87,6 @@ struct lu_kmem_descr lov_caches[] = {
94 .ckd_size = sizeof(struct lovsub_object) 87 .ckd_size = sizeof(struct lovsub_object)
95 }, 88 },
96 { 89 {
97 .ckd_cache = &lovsub_req_kmem,
98 .ckd_name = "lovsub_req_kmem",
99 .ckd_size = sizeof(struct lovsub_req)
100 },
101 {
102 .ckd_cache = &lov_lock_link_kmem, 90 .ckd_cache = &lov_lock_link_kmem,
103 .ckd_name = "lov_lock_link_kmem", 91 .ckd_name = "lov_lock_link_kmem",
104 .ckd_size = sizeof(struct lov_lock_link) 92 .ckd_size = sizeof(struct lov_lock_link)
@@ -110,25 +98,6 @@ struct lu_kmem_descr lov_caches[] = {
110 98
111/***************************************************************************** 99/*****************************************************************************
112 * 100 *
113 * Lov transfer operations.
114 *
115 */
116
117static void lov_req_completion(const struct lu_env *env,
118 const struct cl_req_slice *slice, int ioret)
119{
120 struct lov_req *lr;
121
122 lr = cl2lov_req(slice);
123 kmem_cache_free(lov_req_kmem, lr);
124}
125
126static const struct cl_req_operations lov_req_ops = {
127 .cro_completion = lov_req_completion
128};
129
130/*****************************************************************************
131 *
132 * Lov device and device type functions. 101 * Lov device and device type functions.
133 * 102 *
134 */ 103 */
@@ -248,26 +217,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
248 return rc; 217 return rc;
249} 218}
250 219
251static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
252 struct cl_req *req)
253{
254 struct lov_req *lr;
255 int result;
256
257 lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
258 if (lr) {
259 cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
260 result = 0;
261 } else {
262 result = -ENOMEM;
263 }
264 return result;
265}
266
267static const struct cl_device_operations lov_cl_ops = {
268 .cdo_req_init = lov_req_init
269};
270
271static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) 220static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
272{ 221{
273 int i; 222 int i;
@@ -478,7 +427,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
478 cl_device_init(&ld->ld_cl, t); 427 cl_device_init(&ld->ld_cl, t);
479 d = lov2lu_dev(ld); 428 d = lov2lu_dev(ld);
480 d->ld_ops = &lov_lu_ops; 429 d->ld_ops = &lov_lu_ops;
481 ld->ld_cl.cd_ops = &lov_cl_ops;
482 430
483 mutex_init(&ld->ld_mutex); 431 mutex_init(&ld->ld_mutex);
484 lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class); 432 lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 214c561767e0..ac0bf64c08c1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -76,18 +76,19 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
76 return 0; 76 return 0;
77} 77}
78 78
79struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) 79struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count)
80{ 80{
81 size_t oinfo_ptrs_size, lsm_size;
81 struct lov_stripe_md *lsm; 82 struct lov_stripe_md *lsm;
82 struct lov_oinfo *loi; 83 struct lov_oinfo *loi;
83 int i, oinfo_ptrs_size; 84 int i;
84 85
85 LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT); 86 LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
86 87
87 oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count; 88 oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
88 *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size; 89 lsm_size = sizeof(*lsm) + oinfo_ptrs_size;
89 90
90 lsm = libcfs_kvzalloc(*size, GFP_NOFS); 91 lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
91 if (!lsm) 92 if (!lsm)
92 return NULL; 93 return NULL;
93 94
@@ -117,9 +118,43 @@ void lsm_free_plain(struct lov_stripe_md *lsm)
117 kvfree(lsm); 118 kvfree(lsm);
118} 119}
119 120
120static void lsm_unpackmd_common(struct lov_stripe_md *lsm, 121/*
121 struct lov_mds_md *lmm) 122 * Find minimum stripe maxbytes value. For inactive or
123 * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
124 */
125static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
126{
127 loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
128 struct obd_import *imp;
129
130 if (!tgt->ltd_active)
131 return maxbytes;
132
133 imp = tgt->ltd_obd->u.cli.cl_import;
134 if (!imp)
135 return maxbytes;
136
137 spin_lock(&imp->imp_lock);
138 if (imp->imp_state == LUSTRE_IMP_FULL &&
139 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
140 imp->imp_connect_data.ocd_maxbytes > 0)
141 maxbytes = imp->imp_connect_data.ocd_maxbytes;
142
143 spin_unlock(&imp->imp_lock);
144
145 return maxbytes;
146}
147
148static int lsm_unpackmd_common(struct lov_obd *lov,
149 struct lov_stripe_md *lsm,
150 struct lov_mds_md *lmm,
151 struct lov_ost_data_v1 *objects)
122{ 152{
153 loff_t stripe_maxbytes = LLONG_MAX;
154 unsigned int stripe_count;
155 struct lov_oinfo *loi;
156 unsigned int i;
157
123 /* 158 /*
124 * This supposes lov_mds_md_v1/v3 first fields are 159 * This supposes lov_mds_md_v1/v3 first fields are
125 * are the same 160 * are the same
@@ -129,11 +164,54 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
129 lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern); 164 lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
130 lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen); 165 lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
131 lsm->lsm_pool_name[0] = '\0'; 166 lsm->lsm_pool_name[0] = '\0';
167
168 stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
169
170 for (i = 0; i < stripe_count; i++) {
171 loff_t tgt_bytes;
172
173 loi = lsm->lsm_oinfo[i];
174 ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
175 loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
176 loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
177 if (lov_oinfo_is_dummy(loi))
178 continue;
179
180 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
181 !lov2obd(lov)->obd_process_conf) {
182 CERROR("%s: OST index %d more than OST count %d\n",
183 (char *)lov->desc.ld_uuid.uuid,
184 loi->loi_ost_idx, lov->desc.ld_tgt_count);
185 lov_dump_lmm_v1(D_WARNING, lmm);
186 return -EINVAL;
187 }
188
189 if (!lov->lov_tgts[loi->loi_ost_idx]) {
190 CERROR("%s: OST index %d missing\n",
191 (char *)lov->desc.ld_uuid.uuid,
192 loi->loi_ost_idx);
193 lov_dump_lmm_v1(D_WARNING, lmm);
194 continue;
195 }
196
197 tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]);
198 stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes);
199 }
200
201 if (stripe_maxbytes == LLONG_MAX)
202 stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
203
204 if (!lsm->lsm_stripe_count)
205 lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
206 else
207 lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
208
209 return 0;
132} 210}
133 211
134static void 212static void
135lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, 213lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
136 u64 *lov_off, u64 *swidth) 214 loff_t *lov_off, loff_t *swidth)
137{ 215{
138 if (swidth) 216 if (swidth)
139 *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; 217 *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
@@ -141,36 +219,12 @@ lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
141 219
142static void 220static void
143lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno, 221lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
144 u64 *lov_off, u64 *swidth) 222 loff_t *lov_off, loff_t *swidth)
145{ 223{
146 if (swidth) 224 if (swidth)
147 *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; 225 *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
148} 226}
149 227
150/* Find minimum stripe maxbytes value. For inactive or
151 * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
152 */
153static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
154{
155 struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
156
157 if (!imp || !tgt->ltd_active) {
158 *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
159 return;
160 }
161
162 spin_lock(&imp->imp_lock);
163 if (imp->imp_state == LUSTRE_IMP_FULL &&
164 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
165 imp->imp_connect_data.ocd_maxbytes > 0) {
166 if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
167 *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
168 } else {
169 *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
170 }
171 spin_unlock(&imp->imp_lock);
172}
173
174static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes, 228static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
175 __u16 *stripe_count) 229 __u16 *stripe_count)
176{ 230{
@@ -197,45 +251,7 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
197static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm, 251static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
198 struct lov_mds_md_v1 *lmm) 252 struct lov_mds_md_v1 *lmm)
199{ 253{
200 struct lov_oinfo *loi; 254 return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects);
201 int i;
202 int stripe_count;
203 __u64 stripe_maxbytes = OBD_OBJECT_EOF;
204
205 lsm_unpackmd_common(lsm, lmm);
206
207 stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
208
209 for (i = 0; i < stripe_count; i++) {
210 /* XXX LOV STACKING call down to osc_unpackmd() */
211 loi = lsm->lsm_oinfo[i];
212 ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
213 loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
214 loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
215 if (lov_oinfo_is_dummy(loi))
216 continue;
217
218 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
219 CERROR("OST index %d more than OST count %d\n",
220 loi->loi_ost_idx, lov->desc.ld_tgt_count);
221 lov_dump_lmm_v1(D_WARNING, lmm);
222 return -EINVAL;
223 }
224 if (!lov->lov_tgts[loi->loi_ost_idx]) {
225 CERROR("OST index %d missing\n", loi->loi_ost_idx);
226 lov_dump_lmm_v1(D_WARNING, lmm);
227 return -EINVAL;
228 }
229 /* calculate the minimum stripe max bytes */
230 lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
231 &stripe_maxbytes);
232 }
233
234 lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
235 if (lsm->lsm_stripe_count == 0)
236 lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
237
238 return 0;
239} 255}
240 256
241const struct lsm_operations lsm_v1_ops = { 257const struct lsm_operations lsm_v1_ops = {
@@ -275,55 +291,21 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
275} 291}
276 292
277static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm, 293static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
278 struct lov_mds_md *lmmv1) 294 struct lov_mds_md *lmm)
279{ 295{
280 struct lov_mds_md_v3 *lmm; 296 struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm;
281 struct lov_oinfo *loi; 297 size_t cplen = 0;
282 int i; 298 int rc;
283 int stripe_count;
284 __u64 stripe_maxbytes = OBD_OBJECT_EOF;
285 int cplen = 0;
286 299
287 lmm = (struct lov_mds_md_v3 *)lmmv1; 300 rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects);
301 if (rc)
302 return rc;
288 303
289 lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm); 304 cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name,
290
291 stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
292
293 cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
294 sizeof(lsm->lsm_pool_name)); 305 sizeof(lsm->lsm_pool_name));
295 if (cplen >= sizeof(lsm->lsm_pool_name)) 306 if (cplen >= sizeof(lsm->lsm_pool_name))
296 return -E2BIG; 307 return -E2BIG;
297 308
298 for (i = 0; i < stripe_count; i++) {
299 /* XXX LOV STACKING call down to osc_unpackmd() */
300 loi = lsm->lsm_oinfo[i];
301 ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
302 loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
303 loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
304 if (lov_oinfo_is_dummy(loi))
305 continue;
306
307 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
308 CERROR("OST index %d more than OST count %d\n",
309 loi->loi_ost_idx, lov->desc.ld_tgt_count);
310 lov_dump_lmm_v3(D_WARNING, lmm);
311 return -EINVAL;
312 }
313 if (!lov->lov_tgts[loi->loi_ost_idx]) {
314 CERROR("OST index %d missing\n", loi->loi_ost_idx);
315 lov_dump_lmm_v3(D_WARNING, lmm);
316 return -EINVAL;
317 }
318 /* calculate the minimum stripe max bytes */
319 lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
320 &stripe_maxbytes);
321 }
322
323 lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
324 if (lsm->lsm_stripe_count == 0)
325 lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
326
327 return 0; 309 return 0;
328} 310}
329 311
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 07e5ede3e952..774499c74daa 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -36,6 +36,77 @@
36#include "../include/obd_class.h" 36#include "../include/obd_class.h"
37#include "../include/lustre/lustre_user.h" 37#include "../include/lustre/lustre_user.h"
38 38
39/*
40 * If we are unable to get the maximum object size from the OST in
41 * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
42 * the old maximum object size from ext3.
43 */
44#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
45
46struct lov_stripe_md {
47 atomic_t lsm_refc;
48 spinlock_t lsm_lock;
49 pid_t lsm_lock_owner; /* debugging */
50
51 /*
52 * maximum possible file size, might change as OSTs status changes,
53 * e.g. disconnected, deactivated
54 */
55 loff_t lsm_maxbytes;
56 struct ost_id lsm_oi;
57 u32 lsm_magic;
58 u32 lsm_stripe_size;
59 u32 lsm_pattern; /* RAID0, RAID1, released, ... */
60 u16 lsm_stripe_count;
61 u16 lsm_layout_gen;
62 char lsm_pool_name[LOV_MAXPOOLNAME + 1];
63 struct lov_oinfo *lsm_oinfo[0];
64};
65
66static inline bool lsm_is_released(struct lov_stripe_md *lsm)
67{
68 return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
69}
70
71static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
72{
73 if (!lsm)
74 return false;
75
76 if (lsm_is_released(lsm))
77 return false;
78
79 return true;
80}
81
82struct lsm_operations {
83 void (*lsm_free)(struct lov_stripe_md *);
84 void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *,
85 loff_t *);
86 void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *,
87 loff_t *);
88 int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
89 u16 *stripe_count);
90 int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
91 struct lov_mds_md *lmm);
92};
93
94extern const struct lsm_operations lsm_v1_ops;
95extern const struct lsm_operations lsm_v3_ops;
96
97static inline const struct lsm_operations *lsm_op_find(int magic)
98{
99 switch (magic) {
100 case LOV_MAGIC_V1:
101 return &lsm_v1_ops;
102 case LOV_MAGIC_V3:
103 return &lsm_v3_ops;
104 default:
105 CERROR("unrecognized lsm_magic %08x\n", magic);
106 return NULL;
107 }
108}
109
39/* lov_do_div64(a, b) returns a % b, and a = a / b. 110/* lov_do_div64(a, b) returns a % b, and a = a / b.
40 * The 32-bit code is LOV-specific due to knowing about stripe limits in 111 * The 32-bit code is LOV-specific due to knowing about stripe limits in
41 * order to reduce the divisor to a 32-bit number. If the divisor is 112 * order to reduce the divisor to a 32-bit number. If the divisor is
@@ -110,8 +181,6 @@ struct lov_request_set {
110 atomic_t set_completes; 181 atomic_t set_completes;
111 atomic_t set_success; 182 atomic_t set_success;
112 atomic_t set_finish_checked; 183 atomic_t set_finish_checked;
113 struct llog_cookie *set_cookies;
114 int set_cookie_sent;
115 struct list_head set_list; 184 struct list_head set_list;
116 wait_queue_head_t set_waitq; 185 wait_queue_head_t set_waitq;
117}; 186};
@@ -132,8 +201,6 @@ static inline void lov_put_reqset(struct lov_request_set *set)
132 (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid) 201 (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
133 202
134/* lov_merge.c */ 203/* lov_merge.c */
135void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
136 struct lov_stripe_md *lsm, int stripeno, int *set);
137int lov_merge_lvb_kms(struct lov_stripe_md *lsm, 204int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
138 struct ost_lvb *lvb, __u64 *kms_place); 205 struct ost_lvb *lvb, __u64 *kms_place);
139 206
@@ -150,17 +217,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
150 int stripe); 217 int stripe);
151 218
152/* lov_request.c */ 219/* lov_request.c */
153int lov_update_common_set(struct lov_request_set *set,
154 struct lov_request *req, int rc);
155int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, 220int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
156 struct lov_request_set **reqset); 221 struct lov_request_set **reqset);
157int lov_fini_getattr_set(struct lov_request_set *set); 222int lov_fini_getattr_set(struct lov_request_set *set);
158int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
159 struct obd_trans_info *oti,
160 struct lov_request_set **reqset);
161int lov_update_setattr_set(struct lov_request_set *set,
162 struct lov_request *req, int rc);
163int lov_fini_setattr_set(struct lov_request_set *set);
164int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, 223int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
165 struct lov_request_set **reqset); 224 struct lov_request_set **reqset);
166int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, 225int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
@@ -186,12 +245,10 @@ int lov_del_target(struct obd_device *obd, __u32 index,
186 struct obd_uuid *uuidp, int gen); 245 struct obd_uuid *uuidp, int gen);
187 246
188/* lov_pack.c */ 247/* lov_pack.c */
189int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm, 248ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
190 struct lov_stripe_md *lsm); 249 size_t buf_size);
191int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, 250struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
192 struct lov_mds_md *lmm, int lmm_bytes); 251 size_t lmm_size);
193int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
194 int pattern, int magic);
195int lov_free_memmd(struct lov_stripe_md **lsmp); 252int lov_free_memmd(struct lov_stripe_md **lsmp);
196 253
197void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm); 254void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
@@ -199,7 +256,7 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
199void lov_dump_lmm_common(int level, void *lmmp); 256void lov_dump_lmm_common(int level, void *lmmp);
200 257
201/* lov_ea.c */ 258/* lov_ea.c */
202struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size); 259struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count);
203void lsm_free_plain(struct lov_stripe_md *lsm); 260void lsm_free_plain(struct lov_stripe_md *lsm);
204void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm); 261void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
205 262
@@ -244,4 +301,9 @@ static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
244 return false; 301 return false;
245} 302}
246 303
304static inline struct obd_device *lov2obd(const struct lov_obd *lov)
305{
306 return container_of0(lov, struct obd_device, u.lov);
307}
308
247#endif 309#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index d10157985ed9..002326c282a7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -86,6 +86,8 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
86 switch (io->ci_type) { 86 switch (io->ci_type) {
87 case CIT_SETATTR: { 87 case CIT_SETATTR: {
88 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr; 88 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
89 io->u.ci_setattr.sa_attr_flags =
90 parent->u.ci_setattr.sa_attr_flags;
89 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid; 91 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
90 io->u.ci_setattr.sa_stripe_index = stripe; 92 io->u.ci_setattr.sa_stripe_index = stripe;
91 io->u.ci_setattr.sa_parent_fid = 93 io->u.ci_setattr.sa_parent_fid =
@@ -98,6 +100,12 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
98 } 100 }
99 break; 101 break;
100 } 102 }
103 case CIT_DATA_VERSION: {
104 io->u.ci_data_version.dv_data_version = 0;
105 io->u.ci_data_version.dv_flags =
106 parent->u.ci_data_version.dv_flags;
107 break;
108 }
101 case CIT_FAULT: { 109 case CIT_FAULT: {
102 struct cl_object *obj = parent->ci_obj; 110 struct cl_object *obj = parent->ci_obj;
103 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index); 111 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
@@ -159,12 +167,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
159 sub->sub_env = ld->ld_emrg[stripe]->emrg_env; 167 sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
160 sub->sub_borrowed = 1; 168 sub->sub_borrowed = 1;
161 } else { 169 } else {
162 void *cookie;
163
164 /* obtain new environment */
165 cookie = cl_env_reenter();
166 sub->sub_env = cl_env_get(&sub->sub_refcheck); 170 sub->sub_env = cl_env_get(&sub->sub_refcheck);
167 cl_env_reexit(cookie);
168 if (IS_ERR(sub->sub_env)) 171 if (IS_ERR(sub->sub_env))
169 result = PTR_ERR(sub->sub_env); 172 result = PTR_ERR(sub->sub_env);
170 173
@@ -337,6 +340,11 @@ static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
337 lio->lis_endpos = OBD_OBJECT_EOF; 340 lio->lis_endpos = OBD_OBJECT_EOF;
338 break; 341 break;
339 342
343 case CIT_DATA_VERSION:
344 lio->lis_pos = 0;
345 lio->lis_endpos = OBD_OBJECT_EOF;
346 break;
347
340 case CIT_FAULT: { 348 case CIT_FAULT: {
341 pgoff_t index = io->u.ci_fault.ft_index; 349 pgoff_t index = io->u.ci_fault.ft_index;
342 350
@@ -514,6 +522,24 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
514 return 0; 522 return 0;
515} 523}
516 524
525static void
526lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
527{
528 struct lov_io *lio = cl2lov_io(env, ios);
529 struct cl_io *parent = lio->lis_cl.cis_io;
530 struct lov_io_sub *sub;
531
532 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
533 lov_io_end_wrapper(env, sub->sub_io);
534
535 parent->u.ci_data_version.dv_data_version +=
536 sub->sub_io->u.ci_data_version.dv_data_version;
537
538 if (!parent->ci_result)
539 parent->ci_result = sub->sub_io->ci_result;
540 }
541}
542
517static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io) 543static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
518{ 544{
519 cl_io_iter_fini(env, io); 545 cl_io_iter_fini(env, io);
@@ -555,6 +581,65 @@ static void lov_io_unlock(const struct lu_env *env,
555 LASSERT(rc == 0); 581 LASSERT(rc == 0);
556} 582}
557 583
584static int lov_io_read_ahead(const struct lu_env *env,
585 const struct cl_io_slice *ios,
586 pgoff_t start, struct cl_read_ahead *ra)
587{
588 struct lov_io *lio = cl2lov_io(env, ios);
589 struct lov_object *loo = lio->lis_object;
590 struct cl_object *obj = lov2cl(loo);
591 struct lov_layout_raid0 *r0 = lov_r0(loo);
592 unsigned int pps; /* pages per stripe */
593 struct lov_io_sub *sub;
594 pgoff_t ra_end;
595 loff_t suboff;
596 int stripe;
597 int rc;
598
599 stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
600 if (unlikely(!r0->lo_sub[stripe]))
601 return -EIO;
602
603 sub = lov_sub_get(env, lio, stripe);
604 if (IS_ERR(sub))
605 return PTR_ERR(sub);
606
607 lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
608 rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
609 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
610 ra);
611 lov_sub_put(sub);
612
613 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
614 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
615 if (rc)
616 return rc;
617
618 /**
619 * Adjust the stripe index by layout of raid0. ra->cra_end is
620 * the maximum page index covered by an underlying DLM lock.
621 * This function converts cra_end from stripe level to file
622 * level, and make sure it's not beyond stripe boundary.
623 */
624 if (r0->lo_nr == 1) /* single stripe file */
625 return 0;
626
627 /* cra_end is stripe level, convert it into file level */
628 ra_end = ra->cra_end;
629 if (ra_end != CL_PAGE_EOF)
630 ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
631
632 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
633
634 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n",
635 PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
636 loo->lo_lsm->lsm_stripe_size, stripe, start);
637
638 /* never exceed the end of the stripe */
639 ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
640 return 0;
641}
642
558/** 643/**
559 * lov implementation of cl_operations::cio_submit() method. It takes a list 644 * lov implementation of cl_operations::cio_submit() method. It takes a list
560 * of pages in \a queue, splits it into per-stripe sub-lists, invokes 645 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -779,6 +864,15 @@ static const struct cl_io_operations lov_io_ops = {
779 .cio_start = lov_io_start, 864 .cio_start = lov_io_start,
780 .cio_end = lov_io_end 865 .cio_end = lov_io_end
781 }, 866 },
867 [CIT_DATA_VERSION] = {
868 .cio_fini = lov_io_fini,
869 .cio_iter_init = lov_io_iter_init,
870 .cio_iter_fini = lov_io_iter_fini,
871 .cio_lock = lov_io_lock,
872 .cio_unlock = lov_io_unlock,
873 .cio_start = lov_io_start,
874 .cio_end = lov_io_data_version_end,
875 },
782 [CIT_FAULT] = { 876 [CIT_FAULT] = {
783 .cio_fini = lov_io_fini, 877 .cio_fini = lov_io_fini,
784 .cio_iter_init = lov_io_iter_init, 878 .cio_iter_init = lov_io_iter_init,
@@ -801,6 +895,7 @@ static const struct cl_io_operations lov_io_ops = {
801 .cio_fini = lov_io_fini 895 .cio_fini = lov_io_fini
802 } 896 }
803 }, 897 },
898 .cio_read_ahead = lov_io_read_ahead,
804 .cio_submit = lov_io_submit, 899 .cio_submit = lov_io_submit,
805 .cio_commit_async = lov_io_commit_async, 900 .cio_commit_async = lov_io_commit_async,
806}; 901};
@@ -820,6 +915,13 @@ static void lov_empty_io_fini(const struct lu_env *env,
820 wake_up_all(&lov->lo_waitq); 915 wake_up_all(&lov->lo_waitq);
821} 916}
822 917
918static int lov_empty_io_submit(const struct lu_env *env,
919 const struct cl_io_slice *ios,
920 enum cl_req_type crt, struct cl_2queue *queue)
921{
922 return -EBADF;
923}
924
823static void lov_empty_impossible(const struct lu_env *env, 925static void lov_empty_impossible(const struct lu_env *env,
824 struct cl_io_slice *ios) 926 struct cl_io_slice *ios)
825{ 927{
@@ -870,7 +972,7 @@ static const struct cl_io_operations lov_empty_io_ops = {
870 .cio_fini = lov_empty_io_fini 972 .cio_fini = lov_empty_io_fini
871 } 973 }
872 }, 974 },
873 .cio_submit = LOV_EMPTY_IMPOSSIBLE, 975 .cio_submit = lov_empty_io_submit,
874 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE 976 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
875}; 977};
876 978
@@ -909,6 +1011,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
909 break; 1011 break;
910 case CIT_FSYNC: 1012 case CIT_FSYNC:
911 case CIT_SETATTR: 1013 case CIT_SETATTR:
1014 case CIT_DATA_VERSION:
912 result = 1; 1015 result = 1;
913 break; 1016 break;
914 case CIT_WRITE: 1017 case CIT_WRITE:
@@ -944,6 +1047,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
944 LASSERTF(0, "invalid type %d\n", io->ci_type); 1047 LASSERTF(0, "invalid type %d\n", io->ci_type);
945 case CIT_MISC: 1048 case CIT_MISC:
946 case CIT_FSYNC: 1049 case CIT_FSYNC:
1050 case CIT_DATA_VERSION:
947 result = 1; 1051 result = 1;
948 break; 1052 break;
949 case CIT_SETATTR: 1053 case CIT_SETATTR:
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 674af106b50b..391dfd207177 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -104,53 +104,3 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
104 lvb->lvb_ctime = current_ctime; 104 lvb->lvb_ctime = current_ctime;
105 return rc; 105 return rc;
106} 106}
107
108void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
109 struct lov_stripe_md *lsm, int stripeno, int *set)
110{
111 valid &= src->o_valid;
112
113 if (*set) {
114 tgt->o_valid &= valid;
115 if (valid & OBD_MD_FLSIZE) {
116 /* this handles sparse files properly */
117 u64 lov_size;
118
119 lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
120 if (lov_size > tgt->o_size)
121 tgt->o_size = lov_size;
122 }
123 if (valid & OBD_MD_FLBLOCKS)
124 tgt->o_blocks += src->o_blocks;
125 if (valid & OBD_MD_FLBLKSZ)
126 tgt->o_blksize += src->o_blksize;
127 if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
128 tgt->o_ctime = src->o_ctime;
129 if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
130 tgt->o_mtime = src->o_mtime;
131 if (valid & OBD_MD_FLDATAVERSION)
132 tgt->o_data_version += src->o_data_version;
133
134 /* handle flags */
135 if (valid & OBD_MD_FLFLAGS)
136 tgt->o_flags &= src->o_flags;
137 else
138 tgt->o_flags = 0;
139 } else {
140 memcpy(tgt, src, sizeof(*tgt));
141 tgt->o_oi = lsm->lsm_oi;
142 tgt->o_valid = valid;
143 if (valid & OBD_MD_FLSIZE)
144 tgt->o_size = lov_stripe_size(lsm, src->o_size,
145 stripeno);
146 tgt->o_flags = 0;
147 if (valid & OBD_MD_FLFLAGS)
148 tgt->o_flags = src->o_flags;
149 }
150
151 /* data_version needs to be valid on all stripes to be correct! */
152 if (!(valid & OBD_MD_FLDATAVERSION))
153 tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
154
155 *set += 1;
156}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index b23016f7ec26..63b064523c6a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -40,19 +40,20 @@
40#define DEBUG_SUBSYSTEM S_LOV 40#define DEBUG_SUBSYSTEM S_LOV
41#include "../../include/linux/libcfs/libcfs.h" 41#include "../../include/linux/libcfs/libcfs.h"
42 42
43#include "../include/obd_support.h"
44#include "../include/lustre/lustre_ioctl.h"
45#include "../include/lustre_lib.h"
46#include "../include/lustre_net.h"
47#include "../include/lustre/lustre_idl.h" 43#include "../include/lustre/lustre_idl.h"
44#include "../include/lustre/lustre_ioctl.h"
45
46#include "../include/cl_object.h"
48#include "../include/lustre_dlm.h" 47#include "../include/lustre_dlm.h"
48#include "../include/lustre_fid.h"
49#include "../include/lustre_lib.h"
49#include "../include/lustre_mds.h" 50#include "../include/lustre_mds.h"
50#include "../include/obd_class.h" 51#include "../include/lustre_net.h"
51#include "../include/lprocfs_status.h"
52#include "../include/lustre_param.h" 52#include "../include/lustre_param.h"
53#include "../include/cl_object.h" 53#include "../include/lustre_swab.h"
54#include "../include/lustre/ll_fiemap.h" 54#include "../include/lprocfs_status.h"
55#include "../include/lustre_fid.h" 55#include "../include/obd_class.h"
56#include "../include/obd_support.h"
56 57
57#include "lov_internal.h" 58#include "lov_internal.h"
58 59
@@ -826,29 +827,6 @@ out:
826 return rc; 827 return rc;
827} 828}
828 829
829static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
830{
831 struct lov_obd *lov = &obd->u.lov;
832
833 switch (stage) {
834 case OBD_CLEANUP_EARLY: {
835 int i;
836
837 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
838 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
839 continue;
840 obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
841 OBD_CLEANUP_EARLY);
842 }
843 break;
844 }
845 default:
846 break;
847 }
848
849 return 0;
850}
851
852static int lov_cleanup(struct obd_device *obd) 830static int lov_cleanup(struct obd_device *obd)
853{ 831{
854 struct lov_obd *lov = &obd->u.lov; 832 struct lov_obd *lov = &obd->u.lov;
@@ -972,163 +950,6 @@ out:
972 return rc; 950 return rc;
973} 951}
974 952
975#define ASSERT_LSM_MAGIC(lsmp) \
976do { \
977 LASSERT((lsmp)); \
978 LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
979 (lsmp)->lsm_magic == LOV_MAGIC_V3), \
980 "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
981} while (0)
982
983static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
984 void *data, int rc)
985{
986 struct lov_request_set *lovset = (struct lov_request_set *)data;
987 int err;
988
989 /* don't do attribute merge if this async op failed */
990 if (rc)
991 atomic_set(&lovset->set_completes, 0);
992 err = lov_fini_getattr_set(lovset);
993 return rc ? rc : err;
994}
995
996static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
997 struct ptlrpc_request_set *rqset)
998{
999 struct lov_request_set *lovset;
1000 struct lov_obd *lov;
1001 struct lov_request *req;
1002 int rc = 0, err;
1003
1004 LASSERT(oinfo);
1005 ASSERT_LSM_MAGIC(oinfo->oi_md);
1006
1007 if (!exp || !exp->exp_obd)
1008 return -ENODEV;
1009
1010 lov = &exp->exp_obd->u.lov;
1011
1012 rc = lov_prep_getattr_set(exp, oinfo, &lovset);
1013 if (rc)
1014 return rc;
1015
1016 CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
1017 POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
1018 oinfo->oi_md->lsm_stripe_size);
1019
1020 list_for_each_entry(req, &lovset->set_list, rq_link) {
1021 CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
1022 POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
1023 POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
1024 rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
1025 &req->rq_oi, rqset);
1026 if (rc) {
1027 CERROR("%s: getattr objid "DOSTID" subobj"
1028 DOSTID" on OST idx %d: rc = %d\n",
1029 exp->exp_obd->obd_name,
1030 POSTID(&oinfo->oi_oa->o_oi),
1031 POSTID(&req->rq_oi.oi_oa->o_oi),
1032 req->rq_idx, rc);
1033 goto out;
1034 }
1035 }
1036
1037 if (!list_empty(&rqset->set_requests)) {
1038 LASSERT(rc == 0);
1039 LASSERT(!rqset->set_interpret);
1040 rqset->set_interpret = lov_getattr_interpret;
1041 rqset->set_arg = (void *)lovset;
1042 return rc;
1043 }
1044out:
1045 if (rc)
1046 atomic_set(&lovset->set_completes, 0);
1047 err = lov_fini_getattr_set(lovset);
1048 return rc ? rc : err;
1049}
1050
1051static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
1052 void *data, int rc)
1053{
1054 struct lov_request_set *lovset = (struct lov_request_set *)data;
1055 int err;
1056
1057 if (rc)
1058 atomic_set(&lovset->set_completes, 0);
1059 err = lov_fini_setattr_set(lovset);
1060 return rc ? rc : err;
1061}
1062
1063/* If @oti is given, the request goes from MDS and responses from OSTs are not
1064 * needed. Otherwise, a client is waiting for responses.
1065 */
1066static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
1067 struct obd_trans_info *oti,
1068 struct ptlrpc_request_set *rqset)
1069{
1070 struct lov_request_set *set;
1071 struct lov_request *req;
1072 struct lov_obd *lov;
1073 int rc = 0;
1074
1075 LASSERT(oinfo);
1076 ASSERT_LSM_MAGIC(oinfo->oi_md);
1077 if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
1078 LASSERT(oti);
1079 LASSERT(oti->oti_logcookies);
1080 }
1081
1082 if (!exp || !exp->exp_obd)
1083 return -ENODEV;
1084
1085 lov = &exp->exp_obd->u.lov;
1086 rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
1087 if (rc)
1088 return rc;
1089
1090 CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
1091 POSTID(&oinfo->oi_md->lsm_oi),
1092 oinfo->oi_md->lsm_stripe_count,
1093 oinfo->oi_md->lsm_stripe_size);
1094
1095 list_for_each_entry(req, &set->set_list, rq_link) {
1096 if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
1097 oti->oti_logcookies = set->set_cookies + req->rq_stripe;
1098
1099 CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
1100 POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
1101 POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
1102
1103 rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
1104 &req->rq_oi, oti, rqset);
1105 if (rc) {
1106 CERROR("error: setattr objid "DOSTID" subobj"
1107 DOSTID" on OST idx %d: rc = %d\n",
1108 POSTID(&set->set_oi->oi_oa->o_oi),
1109 POSTID(&req->rq_oi.oi_oa->o_oi),
1110 req->rq_idx, rc);
1111 break;
1112 }
1113 }
1114
1115 /* If we are not waiting for responses on async requests, return. */
1116 if (rc || !rqset || list_empty(&rqset->set_requests)) {
1117 int err;
1118
1119 if (rc)
1120 atomic_set(&set->set_completes, 0);
1121 err = lov_fini_setattr_set(set);
1122 return rc ? rc : err;
1123 }
1124
1125 LASSERT(!rqset->set_interpret);
1126 rqset->set_interpret = lov_setattr_interpret;
1127 rqset->set_arg = (void *)set;
1128
1129 return 0;
1130}
1131
1132int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) 953int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
1133{ 954{
1134 struct lov_request_set *lovset = (struct lov_request_set *)data; 955 struct lov_request_set *lovset = (struct lov_request_set *)data;
@@ -1183,7 +1004,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
1183 struct obd_statfs *osfs, __u64 max_age, __u32 flags) 1004 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
1184{ 1005{
1185 struct ptlrpc_request_set *set = NULL; 1006 struct ptlrpc_request_set *set = NULL;
1186 struct obd_info oinfo = { }; 1007 struct obd_info oinfo = {
1008 .oi_osfs = osfs,
1009 .oi_flags = flags,
1010 };
1187 int rc = 0; 1011 int rc = 0;
1188 1012
1189 /* for obdclass we forbid using obd_statfs_rqset, but prefer using async 1013 /* for obdclass we forbid using obd_statfs_rqset, but prefer using async
@@ -1193,8 +1017,6 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
1193 if (!set) 1017 if (!set)
1194 return -ENOMEM; 1018 return -ENOMEM;
1195 1019
1196 oinfo.oi_osfs = osfs;
1197 oinfo.oi_flags = flags;
1198 rc = lov_statfs_async(exp, &oinfo, max_age, set); 1020 rc = lov_statfs_async(exp, &oinfo, max_age, set);
1199 if (rc == 0) 1021 if (rc == 0)
1200 rc = ptlrpc_set_wait(set); 1022 rc = ptlrpc_set_wait(set);
@@ -1235,8 +1057,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1235 1057
1236 /* copy UUID */ 1058 /* copy UUID */
1237 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), 1059 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
1238 min((int)data->ioc_plen2, 1060 min_t(unsigned long, data->ioc_plen2,
1239 (int)sizeof(struct obd_uuid)))) 1061 sizeof(struct obd_uuid))))
1240 return -EFAULT; 1062 return -EFAULT;
1241 1063
1242 memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); 1064 memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
@@ -1249,8 +1071,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1249 if (rc) 1071 if (rc)
1250 return rc; 1072 return rc;
1251 if (copy_to_user(data->ioc_pbuf1, &stat_buf, 1073 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
1252 min((int)data->ioc_plen1, 1074 min_t(unsigned long, data->ioc_plen1,
1253 (int)sizeof(stat_buf)))) 1075 sizeof(stat_buf))))
1254 return -EFAULT; 1076 return -EFAULT;
1255 break; 1077 break;
1256 } 1078 }
@@ -1367,8 +1189,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1367 osc_obd->obd_force = obddev->obd_force; 1189 osc_obd->obd_force = obddev->obd_force;
1368 err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, 1190 err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
1369 len, karg, uarg); 1191 len, karg, uarg);
1370 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
1371 return err;
1372 if (err) { 1192 if (err) {
1373 if (lov->lov_tgts[i]->ltd_active) { 1193 if (lov->lov_tgts[i]->ltd_active) {
1374 CDEBUG(err == -ENOTTY ? 1194 CDEBUG(err == -ENOTTY ?
@@ -1391,454 +1211,35 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1391 return rc; 1211 return rc;
1392} 1212}
1393 1213
1394#define FIEMAP_BUFFER_SIZE 4096
1395
1396/**
1397 * Non-zero fe_logical indicates that this is a continuation FIEMAP
1398 * call. The local end offset and the device are sent in the first
1399 * fm_extent. This function calculates the stripe number from the index.
1400 * This function returns a stripe_no on which mapping is to be restarted.
1401 *
1402 * This function returns fm_end_offset which is the in-OST offset at which
1403 * mapping should be restarted. If fm_end_offset=0 is returned then caller
1404 * will re-calculate proper offset in next stripe.
1405 * Note that the first extent is passed to lov_get_info via the value field.
1406 *
1407 * \param fiemap fiemap request header
1408 * \param lsm striping information for the file
1409 * \param fm_start logical start of mapping
1410 * \param fm_end logical end of mapping
1411 * \param start_stripe starting stripe will be returned in this
1412 */
1413static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
1414 struct lov_stripe_md *lsm, u64 fm_start,
1415 u64 fm_end, int *start_stripe)
1416{
1417 u64 local_end = fiemap->fm_extents[0].fe_logical;
1418 u64 lun_start, lun_end;
1419 u64 fm_end_offset;
1420 int stripe_no = -1, i;
1421
1422 if (fiemap->fm_extent_count == 0 ||
1423 fiemap->fm_extents[0].fe_logical == 0)
1424 return 0;
1425
1426 /* Find out stripe_no from ost_index saved in the fe_device */
1427 for (i = 0; i < lsm->lsm_stripe_count; i++) {
1428 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
1429
1430 if (lov_oinfo_is_dummy(oinfo))
1431 continue;
1432
1433 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1434 stripe_no = i;
1435 break;
1436 }
1437 }
1438 if (stripe_no == -1)
1439 return -EINVAL;
1440
1441 /* If we have finished mapping on previous device, shift logical
1442 * offset to start of next device
1443 */
1444 if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
1445 &lun_start, &lun_end)) != 0 &&
1446 local_end < lun_end) {
1447 fm_end_offset = local_end;
1448 *start_stripe = stripe_no;
1449 } else {
1450 /* This is a special value to indicate that caller should
1451 * calculate offset in next stripe.
1452 */
1453 fm_end_offset = 0;
1454 *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
1455 }
1456
1457 return fm_end_offset;
1458}
1459
1460/**
1461 * We calculate on which OST the mapping will end. If the length of mapping
1462 * is greater than (stripe_size * stripe_count) then the last_stripe will
1463 * will be one just before start_stripe. Else we check if the mapping
1464 * intersects each OST and find last_stripe.
1465 * This function returns the last_stripe and also sets the stripe_count
1466 * over which the mapping is spread
1467 *
1468 * \param lsm striping information for the file
1469 * \param fm_start logical start of mapping
1470 * \param fm_end logical end of mapping
1471 * \param start_stripe starting stripe of the mapping
1472 * \param stripe_count the number of stripes across which to map is returned
1473 *
1474 * \retval last_stripe return the last stripe of the mapping
1475 */
1476static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start,
1477 u64 fm_end, int start_stripe,
1478 int *stripe_count)
1479{
1480 int last_stripe;
1481 u64 obd_start, obd_end;
1482 int i, j;
1483
1484 if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
1485 last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
1486 start_stripe - 1;
1487 *stripe_count = lsm->lsm_stripe_count;
1488 } else {
1489 for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
1490 i = (i + 1) % lsm->lsm_stripe_count, j++) {
1491 if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
1492 &obd_start, &obd_end)) == 0)
1493 break;
1494 }
1495 *stripe_count = j;
1496 last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
1497 }
1498
1499 return last_stripe;
1500}
1501
1502/**
1503 * Set fe_device and copy extents from local buffer into main return buffer.
1504 *
1505 * \param fiemap fiemap request header
1506 * \param lcl_fm_ext array of local fiemap extents to be copied
1507 * \param ost_index OST index to be written into the fm_device field for each
1508 extent
1509 * \param ext_count number of extents to be copied
1510 * \param current_extent where to start copying in main extent array
1511 */
1512static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
1513 struct ll_fiemap_extent *lcl_fm_ext,
1514 int ost_index, unsigned int ext_count,
1515 int current_extent)
1516{
1517 char *to;
1518 int ext;
1519
1520 for (ext = 0; ext < ext_count; ext++) {
1521 lcl_fm_ext[ext].fe_device = ost_index;
1522 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1523 }
1524
1525 /* Copy fm_extent's from fm_local to return buffer */
1526 to = (char *)fiemap + fiemap_count_to_size(current_extent);
1527 memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
1528}
1529
1530/**
1531 * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1532 * This also handles the restarting of FIEMAP calls in case mapping overflows
1533 * the available number of extents in single call.
1534 */
1535static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
1536 __u32 *vallen, void *val, struct lov_stripe_md *lsm)
1537{
1538 struct ll_fiemap_info_key *fm_key = key;
1539 struct ll_user_fiemap *fiemap = val;
1540 struct ll_user_fiemap *fm_local = NULL;
1541 struct ll_fiemap_extent *lcl_fm_ext;
1542 int count_local;
1543 unsigned int get_num_extents = 0;
1544 int ost_index = 0, actual_start_stripe, start_stripe;
1545 u64 fm_start, fm_end, fm_length, fm_end_offset;
1546 u64 curr_loc;
1547 int current_extent = 0, rc = 0, i;
1548 /* Whether have we collected enough extents */
1549 bool enough = false;
1550 int ost_eof = 0; /* EOF for object */
1551 int ost_done = 0; /* done with required mapping for this OST? */
1552 int last_stripe;
1553 int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
1554 unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1555
1556 if (!lsm_has_objects(lsm)) {
1557 if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
1558 fm_key->oa.o_size)) {
1559 /*
1560 * released file, return a minimal FIEMAP if
1561 * request fits in file-size.
1562 */
1563 fiemap->fm_mapped_extents = 1;
1564 fiemap->fm_extents[0].fe_logical =
1565 fm_key->fiemap.fm_start;
1566 if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
1567 fm_key->oa.o_size) {
1568 fiemap->fm_extents[0].fe_length =
1569 fm_key->fiemap.fm_length;
1570 } else {
1571 fiemap->fm_extents[0].fe_length =
1572 fm_key->oa.o_size - fm_key->fiemap.fm_start;
1573 fiemap->fm_extents[0].fe_flags |=
1574 (FIEMAP_EXTENT_UNKNOWN |
1575 FIEMAP_EXTENT_LAST);
1576 }
1577 }
1578 rc = 0;
1579 goto out;
1580 }
1581
1582 if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
1583 buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
1584
1585 fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
1586 if (!fm_local) {
1587 rc = -ENOMEM;
1588 goto out;
1589 }
1590 lcl_fm_ext = &fm_local->fm_extents[0];
1591
1592 count_local = fiemap_size_to_count(buffer_size);
1593
1594 memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
1595 fm_start = fiemap->fm_start;
1596 fm_length = fiemap->fm_length;
1597 /* Calculate start stripe, last stripe and length of mapping */
1598 start_stripe = lov_stripe_number(lsm, fm_start);
1599 actual_start_stripe = start_stripe;
1600 fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
1601 fm_start + fm_length - 1);
1602 /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
1603 if (fm_end > fm_key->oa.o_size)
1604 fm_end = fm_key->oa.o_size;
1605
1606 last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
1607 actual_start_stripe,
1608 &stripe_count);
1609
1610 fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
1611 fm_end, &start_stripe);
1612 if (fm_end_offset == -EINVAL) {
1613 rc = -EINVAL;
1614 goto out;
1615 }
1616
1617 if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen)
1618 fiemap->fm_extent_count = fiemap_size_to_count(*vallen);
1619 if (fiemap->fm_extent_count == 0) {
1620 get_num_extents = 1;
1621 count_local = 0;
1622 }
1623 /* Check each stripe */
1624 for (cur_stripe = start_stripe, i = 0; i < stripe_count;
1625 i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
1626 u64 req_fm_len; /* Stores length of required mapping */
1627 u64 len_mapped_single_call;
1628 u64 lun_start, lun_end, obd_object_end;
1629 unsigned int ext_count;
1630
1631 cur_stripe_wrap = cur_stripe;
1632
1633 /* Find out range of mapping on this stripe */
1634 if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
1635 &lun_start, &obd_object_end)) == 0)
1636 continue;
1637
1638 if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
1639 rc = -EIO;
1640 goto out;
1641 }
1642
1643 /* If this is a continuation FIEMAP call and we are on
1644 * starting stripe then lun_start needs to be set to
1645 * fm_end_offset
1646 */
1647 if (fm_end_offset != 0 && cur_stripe == start_stripe)
1648 lun_start = fm_end_offset;
1649
1650 if (fm_length != ~0ULL) {
1651 /* Handle fm_start + fm_length overflow */
1652 if (fm_start + fm_length < fm_start)
1653 fm_length = ~0ULL - fm_start;
1654 lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
1655 cur_stripe);
1656 } else {
1657 lun_end = ~0ULL;
1658 }
1659
1660 if (lun_start == lun_end)
1661 continue;
1662
1663 req_fm_len = obd_object_end - lun_start;
1664 fm_local->fm_length = 0;
1665 len_mapped_single_call = 0;
1666
1667 /* If the output buffer is very large and the objects have many
1668 * extents we may need to loop on a single OST repeatedly
1669 */
1670 ost_eof = 0;
1671 ost_done = 0;
1672 do {
1673 if (get_num_extents == 0) {
1674 /* Don't get too many extents. */
1675 if (current_extent + count_local >
1676 fiemap->fm_extent_count)
1677 count_local = fiemap->fm_extent_count -
1678 current_extent;
1679 }
1680
1681 lun_start += len_mapped_single_call;
1682 fm_local->fm_length = req_fm_len - len_mapped_single_call;
1683 req_fm_len = fm_local->fm_length;
1684 fm_local->fm_extent_count = enough ? 1 : count_local;
1685 fm_local->fm_mapped_extents = 0;
1686 fm_local->fm_flags = fiemap->fm_flags;
1687
1688 fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
1689 ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
1690
1691 if (ost_index < 0 ||
1692 ost_index >= lov->desc.ld_tgt_count) {
1693 rc = -EINVAL;
1694 goto out;
1695 }
1696
1697 /* If OST is inactive, return extent with UNKNOWN flag */
1698 if (!lov->lov_tgts[ost_index]->ltd_active) {
1699 fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
1700 fm_local->fm_mapped_extents = 1;
1701
1702 lcl_fm_ext[0].fe_logical = lun_start;
1703 lcl_fm_ext[0].fe_length = obd_object_end -
1704 lun_start;
1705 lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1706
1707 goto inactive_tgt;
1708 }
1709
1710 fm_local->fm_start = lun_start;
1711 fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1712 memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
1713 *vallen = fiemap_count_to_size(fm_local->fm_extent_count);
1714 rc = obd_get_info(NULL,
1715 lov->lov_tgts[ost_index]->ltd_exp,
1716 keylen, key, vallen, fm_local, lsm);
1717 if (rc != 0)
1718 goto out;
1719
1720inactive_tgt:
1721 ext_count = fm_local->fm_mapped_extents;
1722 if (ext_count == 0) {
1723 ost_done = 1;
1724 /* If last stripe has hole at the end,
1725 * then we need to return
1726 */
1727 if (cur_stripe_wrap == last_stripe) {
1728 fiemap->fm_mapped_extents = 0;
1729 goto finish;
1730 }
1731 break;
1732 } else if (enough) {
1733 /*
1734 * We've collected enough extents and there are
1735 * more extents after it.
1736 */
1737 goto finish;
1738 }
1739
1740 /* If we just need num of extents then go to next device */
1741 if (get_num_extents) {
1742 current_extent += ext_count;
1743 break;
1744 }
1745
1746 len_mapped_single_call =
1747 lcl_fm_ext[ext_count - 1].fe_logical -
1748 lun_start + lcl_fm_ext[ext_count - 1].fe_length;
1749
1750 /* Have we finished mapping on this device? */
1751 if (req_fm_len <= len_mapped_single_call)
1752 ost_done = 1;
1753
1754 /* Clear the EXTENT_LAST flag which can be present on
1755 * last extent
1756 */
1757 if (lcl_fm_ext[ext_count - 1].fe_flags &
1758 FIEMAP_EXTENT_LAST)
1759 lcl_fm_ext[ext_count - 1].fe_flags &=
1760 ~FIEMAP_EXTENT_LAST;
1761
1762 curr_loc = lov_stripe_size(lsm,
1763 lcl_fm_ext[ext_count - 1].fe_logical +
1764 lcl_fm_ext[ext_count - 1].fe_length,
1765 cur_stripe);
1766 if (curr_loc >= fm_key->oa.o_size)
1767 ost_eof = 1;
1768
1769 fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
1770 ost_index, ext_count,
1771 current_extent);
1772
1773 current_extent += ext_count;
1774
1775 /* Ran out of available extents? */
1776 if (current_extent >= fiemap->fm_extent_count)
1777 enough = true;
1778 } while (ost_done == 0 && ost_eof == 0);
1779
1780 if (cur_stripe_wrap == last_stripe)
1781 goto finish;
1782 }
1783
1784finish:
1785 /* Indicate that we are returning device offsets unless file just has
1786 * single stripe
1787 */
1788 if (lsm->lsm_stripe_count > 1)
1789 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1790
1791 if (get_num_extents)
1792 goto skip_last_device_calc;
1793
1794 /* Check if we have reached the last stripe and whether mapping for that
1795 * stripe is done.
1796 */
1797 if (cur_stripe_wrap == last_stripe) {
1798 if (ost_done || ost_eof)
1799 fiemap->fm_extents[current_extent - 1].fe_flags |=
1800 FIEMAP_EXTENT_LAST;
1801 }
1802
1803skip_last_device_calc:
1804 fiemap->fm_mapped_extents = current_extent;
1805
1806out:
1807 kvfree(fm_local);
1808 return rc;
1809}
1810
1811static int lov_get_info(const struct lu_env *env, struct obd_export *exp, 1214static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
1812 __u32 keylen, void *key, __u32 *vallen, void *val, 1215 __u32 keylen, void *key, __u32 *vallen, void *val)
1813 struct lov_stripe_md *lsm)
1814{ 1216{
1815 struct obd_device *obddev = class_exp2obd(exp); 1217 struct obd_device *obddev = class_exp2obd(exp);
1816 struct lov_obd *lov = &obddev->u.lov; 1218 struct lov_obd *lov = &obddev->u.lov;
1817 int rc; 1219 struct lov_desc *ld = &lov->desc;
1220 int rc = 0;
1818 1221
1819 if (!vallen || !val) 1222 if (!vallen || !val)
1820 return -EFAULT; 1223 return -EFAULT;
1821 1224
1822 obd_getref(obddev); 1225 obd_getref(obddev);
1823 1226
1824 if (KEY_IS(KEY_LOVDESC)) { 1227 if (KEY_IS(KEY_MAX_EASIZE)) {
1825 struct lov_desc *desc_ret = val; 1228 u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count,
1826 *desc_ret = lov->desc; 1229 LOV_MAX_STRIPE_COUNT);
1827 1230
1828 rc = 0; 1231 *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3);
1829 goto out; 1232 } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
1830 } else if (KEY_IS(KEY_FIEMAP)) { 1233 u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count,
1831 rc = lov_fiemap(lov, keylen, key, vallen, val, lsm); 1234 LOV_MAX_STRIPE_COUNT);
1832 goto out; 1235
1236 *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3);
1833 } else if (KEY_IS(KEY_TGT_COUNT)) { 1237 } else if (KEY_IS(KEY_TGT_COUNT)) {
1834 *((int *)val) = lov->desc.ld_tgt_count; 1238 *((int *)val) = lov->desc.ld_tgt_count;
1835 rc = 0; 1239 } else {
1836 goto out; 1240 rc = -EINVAL;
1837 } 1241 }
1838 1242
1839 rc = -EINVAL;
1840
1841out:
1842 obd_putref(obddev); 1243 obd_putref(obddev);
1843 return rc; 1244 return rc;
1844} 1245}
@@ -1926,12 +1327,8 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
1926 __u64 bhardlimit = 0; 1327 __u64 bhardlimit = 0;
1927 int i, rc = 0; 1328 int i, rc = 0;
1928 1329
1929 if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON && 1330 if (oqctl->qc_cmd != Q_GETOQUOTA &&
1930 oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF && 1331 oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) {
1931 oqctl->qc_cmd != Q_GETOQUOTA &&
1932 oqctl->qc_cmd != Q_INITQUOTA &&
1933 oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
1934 oqctl->qc_cmd != Q_FINVALIDATE) {
1935 CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); 1332 CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
1936 return -EFAULT; 1333 return -EFAULT;
1937 } 1334 }
@@ -1978,63 +1375,15 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
1978 return rc; 1375 return rc;
1979} 1376}
1980 1377
1981static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
1982 struct obd_quotactl *oqctl)
1983{
1984 struct lov_obd *lov = &obd->u.lov;
1985 int i, rc = 0;
1986
1987 obd_getref(obd);
1988
1989 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
1990 if (!lov->lov_tgts[i])
1991 continue;
1992
1993 /* Skip quota check on the administratively disabled OSTs. */
1994 if (!lov->lov_tgts[i]->ltd_activate) {
1995 CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
1996 i);
1997 continue;
1998 }
1999
2000 if (!lov->lov_tgts[i]->ltd_active) {
2001 CERROR("lov idx %d inactive\n", i);
2002 rc = -EIO;
2003 goto out;
2004 }
2005 }
2006
2007 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
2008 int err;
2009
2010 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
2011 continue;
2012
2013 err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
2014 if (err && !rc)
2015 rc = err;
2016 }
2017
2018out:
2019 obd_putref(obd);
2020
2021 return rc;
2022}
2023
2024static struct obd_ops lov_obd_ops = { 1378static struct obd_ops lov_obd_ops = {
2025 .owner = THIS_MODULE, 1379 .owner = THIS_MODULE,
2026 .setup = lov_setup, 1380 .setup = lov_setup,
2027 .precleanup = lov_precleanup,
2028 .cleanup = lov_cleanup, 1381 .cleanup = lov_cleanup,
2029 /*.process_config = lov_process_config,*/ 1382 /*.process_config = lov_process_config,*/
2030 .connect = lov_connect, 1383 .connect = lov_connect,
2031 .disconnect = lov_disconnect, 1384 .disconnect = lov_disconnect,
2032 .statfs = lov_statfs, 1385 .statfs = lov_statfs,
2033 .statfs_async = lov_statfs_async, 1386 .statfs_async = lov_statfs_async,
2034 .packmd = lov_packmd,
2035 .unpackmd = lov_unpackmd,
2036 .getattr_async = lov_getattr_async,
2037 .setattr_async = lov_setattr_async,
2038 .iocontrol = lov_iocontrol, 1387 .iocontrol = lov_iocontrol,
2039 .get_info = lov_get_info, 1388 .get_info = lov_get_info,
2040 .set_info_async = lov_set_info_async, 1389 .set_info_async = lov_set_info_async,
@@ -2046,7 +1395,6 @@ static struct obd_ops lov_obd_ops = {
2046 .getref = lov_getref, 1395 .getref = lov_getref,
2047 .putref = lov_putref, 1396 .putref = lov_putref,
2048 .quotactl = lov_quotactl, 1397 .quotactl = lov_quotactl,
2049 .quotacheck = lov_quotacheck,
2050}; 1398};
2051 1399
2052struct kmem_cache *lov_oinfo_slab; 1400struct kmem_cache *lov_oinfo_slab;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 52f736338887..76d4256fa828 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -39,6 +39,11 @@
39 39
40#include "lov_cl_internal.h" 40#include "lov_cl_internal.h"
41 41
42static inline struct lov_device *lov_object_dev(struct lov_object *obj)
43{
44 return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
45}
46
42/** \addtogroup lov 47/** \addtogroup lov
43 * @{ 48 * @{
44 */ 49 */
@@ -51,7 +56,7 @@
51 56
52struct lov_layout_operations { 57struct lov_layout_operations {
53 int (*llo_init)(const struct lu_env *env, struct lov_device *dev, 58 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
54 struct lov_object *lov, 59 struct lov_object *lov, struct lov_stripe_md *lsm,
55 const struct cl_object_conf *conf, 60 const struct cl_object_conf *conf,
56 union lov_layout_state *state); 61 union lov_layout_state *state);
57 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, 62 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
@@ -75,12 +80,11 @@ struct lov_layout_operations {
75 80
76static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov); 81static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
77 82
78void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm) 83static void lov_lsm_put(struct lov_stripe_md *lsm)
79{ 84{
80 if (lsm) 85 if (lsm)
81 lov_free_memmd(&lsm); 86 lov_free_memmd(&lsm);
82} 87}
83EXPORT_SYMBOL(lov_lsm_put);
84 88
85/***************************************************************************** 89/*****************************************************************************
86 * 90 *
@@ -97,17 +101,17 @@ static void lov_install_empty(const struct lu_env *env,
97 */ 101 */
98} 102}
99 103
100static int lov_init_empty(const struct lu_env *env, 104static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
101 struct lov_device *dev, struct lov_object *lov, 105 struct lov_object *lov, struct lov_stripe_md *lsm,
102 const struct cl_object_conf *conf, 106 const struct cl_object_conf *conf,
103 union lov_layout_state *state) 107 union lov_layout_state *state)
104{ 108{
105 return 0; 109 return 0;
106} 110}
107 111
108static void lov_install_raid0(const struct lu_env *env, 112static void lov_install_raid0(const struct lu_env *env,
109 struct lov_object *lov, 113 struct lov_object *lov,
110 union lov_layout_state *state) 114 union lov_layout_state *state)
111{ 115{
112} 116}
113 117
@@ -212,8 +216,8 @@ static int lov_page_slice_fixup(struct lov_object *lov,
212 return cl_object_header(stripe)->coh_page_bufsize; 216 return cl_object_header(stripe)->coh_page_bufsize;
213} 217}
214 218
215static int lov_init_raid0(const struct lu_env *env, 219static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
216 struct lov_device *dev, struct lov_object *lov, 220 struct lov_object *lov, struct lov_stripe_md *lsm,
217 const struct cl_object_conf *conf, 221 const struct cl_object_conf *conf,
218 union lov_layout_state *state) 222 union lov_layout_state *state)
219{ 223{
@@ -223,7 +227,6 @@ static int lov_init_raid0(const struct lu_env *env,
223 struct cl_object *stripe; 227 struct cl_object *stripe;
224 struct lov_thread_info *lti = lov_env_info(env); 228 struct lov_thread_info *lti = lov_env_info(env);
225 struct cl_object_conf *subconf = &lti->lti_stripe_conf; 229 struct cl_object_conf *subconf = &lti->lti_stripe_conf;
226 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
227 struct lu_fid *ofid = &lti->lti_fid; 230 struct lu_fid *ofid = &lti->lti_fid;
228 struct lov_layout_raid0 *r0 = &state->raid0; 231 struct lov_layout_raid0 *r0 = &state->raid0;
229 232
@@ -298,13 +301,11 @@ out:
298 return result; 301 return result;
299} 302}
300 303
301static int lov_init_released(const struct lu_env *env, 304static int lov_init_released(const struct lu_env *env, struct lov_device *dev,
302 struct lov_device *dev, struct lov_object *lov, 305 struct lov_object *lov, struct lov_stripe_md *lsm,
303 const struct cl_object_conf *conf, 306 const struct cl_object_conf *conf,
304 union lov_layout_state *state) 307 union lov_layout_state *state)
305{ 308{
306 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
307
308 LASSERT(lsm); 309 LASSERT(lsm);
309 LASSERT(lsm_is_released(lsm)); 310 LASSERT(lsm_is_released(lsm));
310 LASSERT(!lov->lo_lsm); 311 LASSERT(!lov->lo_lsm);
@@ -313,6 +314,40 @@ static int lov_init_released(const struct lu_env *env,
313 return 0; 314 return 0;
314} 315}
315 316
317static struct cl_object *lov_find_subobj(const struct lu_env *env,
318 struct lov_object *lov,
319 struct lov_stripe_md *lsm,
320 int stripe_idx)
321{
322 struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
323 struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
324 struct lov_thread_info *lti = lov_env_info(env);
325 struct lu_fid *ofid = &lti->lti_fid;
326 struct cl_device *subdev;
327 struct cl_object *result;
328 int ost_idx;
329 int rc;
330
331 if (lov->lo_type != LLT_RAID0) {
332 result = NULL;
333 goto out;
334 }
335
336 ost_idx = oinfo->loi_ost_idx;
337 rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
338 if (rc) {
339 result = NULL;
340 goto out;
341 }
342
343 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
344 result = lov_sub_find(env, subdev, ofid, NULL);
345out:
346 if (!result)
347 result = ERR_PTR(-EINVAL);
348 return result;
349}
350
316static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov, 351static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
317 union lov_layout_state *state) 352 union lov_layout_state *state)
318{ 353{
@@ -687,31 +722,24 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
687} 722}
688 723
689static int lov_layout_change(const struct lu_env *unused, 724static int lov_layout_change(const struct lu_env *unused,
690 struct lov_object *lov, 725 struct lov_object *lov, struct lov_stripe_md *lsm,
691 const struct cl_object_conf *conf) 726 const struct cl_object_conf *conf)
692{ 727{
693 int result; 728 enum lov_layout_type llt = lov_type(lsm);
694 enum lov_layout_type llt = LLT_EMPTY;
695 union lov_layout_state *state = &lov->u; 729 union lov_layout_state *state = &lov->u;
696 const struct lov_layout_operations *old_ops; 730 const struct lov_layout_operations *old_ops;
697 const struct lov_layout_operations *new_ops; 731 const struct lov_layout_operations *new_ops;
698
699 void *cookie;
700 struct lu_env *env; 732 struct lu_env *env;
701 int refcheck; 733 int refcheck;
734 int rc;
702 735
703 LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); 736 LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
704 737
705 if (conf->u.coc_md)
706 llt = lov_type(conf->u.coc_md->lsm);
707 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
708
709 cookie = cl_env_reenter();
710 env = cl_env_get(&refcheck); 738 env = cl_env_get(&refcheck);
711 if (IS_ERR(env)) { 739 if (IS_ERR(env))
712 cl_env_reexit(cookie);
713 return PTR_ERR(env); 740 return PTR_ERR(env);
714 } 741
742 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
715 743
716 CDEBUG(D_INODE, DFID" from %s to %s\n", 744 CDEBUG(D_INODE, DFID" from %s to %s\n",
717 PFID(lu_object_fid(lov2lu(lov))), 745 PFID(lu_object_fid(lov2lu(lov))),
@@ -720,38 +748,37 @@ static int lov_layout_change(const struct lu_env *unused,
720 old_ops = &lov_dispatch[lov->lo_type]; 748 old_ops = &lov_dispatch[lov->lo_type];
721 new_ops = &lov_dispatch[llt]; 749 new_ops = &lov_dispatch[llt];
722 750
723 result = cl_object_prune(env, &lov->lo_cl); 751 rc = cl_object_prune(env, &lov->lo_cl);
724 if (result != 0) 752 if (rc)
753 goto out;
754
755 rc = old_ops->llo_delete(env, lov, &lov->u);
756 if (rc)
725 goto out; 757 goto out;
726 758
727 result = old_ops->llo_delete(env, lov, &lov->u); 759 old_ops->llo_fini(env, lov, &lov->u);
728 if (result == 0) {
729 old_ops->llo_fini(env, lov, &lov->u);
730 760
731 LASSERT(atomic_read(&lov->lo_active_ios) == 0); 761 LASSERT(!atomic_read(&lov->lo_active_ios));
732 762
733 lov->lo_type = LLT_EMPTY; 763 lov->lo_type = LLT_EMPTY;
734 /* page bufsize fixup */ 764
735 cl_object_header(&lov->lo_cl)->coh_page_bufsize -= 765 /* page bufsize fixup */
766 cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
736 lov_page_slice_fixup(lov, NULL); 767 lov_page_slice_fixup(lov, NULL);
737 768
738 result = new_ops->llo_init(env, 769 rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state);
739 lu2lov_dev(lov->lo_cl.co_lu.lo_dev), 770 if (rc) {
740 lov, conf, state); 771 new_ops->llo_delete(env, lov, state);
741 if (result == 0) { 772 new_ops->llo_fini(env, lov, state);
742 new_ops->llo_install(env, lov, state); 773 /* this file becomes an EMPTY file. */
743 lov->lo_type = llt; 774 goto out;
744 } else {
745 new_ops->llo_delete(env, lov, state);
746 new_ops->llo_fini(env, lov, state);
747 /* this file becomes an EMPTY file. */
748 }
749 } 775 }
750 776
777 new_ops->llo_install(env, lov, state);
778 lov->lo_type = llt;
751out: 779out:
752 cl_env_put(env, &refcheck); 780 cl_env_put(env, &refcheck);
753 cl_env_reexit(cookie); 781 return rc;
754 return result;
755} 782}
756 783
757/***************************************************************************** 784/*****************************************************************************
@@ -762,26 +789,38 @@ out:
762int lov_object_init(const struct lu_env *env, struct lu_object *obj, 789int lov_object_init(const struct lu_env *env, struct lu_object *obj,
763 const struct lu_object_conf *conf) 790 const struct lu_object_conf *conf)
764{ 791{
765 struct lov_device *dev = lu2lov_dev(obj->lo_dev);
766 struct lov_object *lov = lu2lov(obj); 792 struct lov_object *lov = lu2lov(obj);
793 struct lov_device *dev = lov_object_dev(lov);
767 const struct cl_object_conf *cconf = lu2cl_conf(conf); 794 const struct cl_object_conf *cconf = lu2cl_conf(conf);
768 union lov_layout_state *set = &lov->u; 795 union lov_layout_state *set = &lov->u;
769 const struct lov_layout_operations *ops; 796 const struct lov_layout_operations *ops;
770 int result; 797 struct lov_stripe_md *lsm = NULL;
798 int rc;
771 799
772 init_rwsem(&lov->lo_type_guard); 800 init_rwsem(&lov->lo_type_guard);
773 atomic_set(&lov->lo_active_ios, 0); 801 atomic_set(&lov->lo_active_ios, 0);
774 init_waitqueue_head(&lov->lo_waitq); 802 init_waitqueue_head(&lov->lo_waitq);
775
776 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page)); 803 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
777 804
805 lov->lo_type = LLT_EMPTY;
806 if (cconf->u.coc_layout.lb_buf) {
807 lsm = lov_unpackmd(dev->ld_lov,
808 cconf->u.coc_layout.lb_buf,
809 cconf->u.coc_layout.lb_len);
810 if (IS_ERR(lsm))
811 return PTR_ERR(lsm);
812 }
813
778 /* no locking is necessary, as object is being created */ 814 /* no locking is necessary, as object is being created */
779 lov->lo_type = lov_type(cconf->u.coc_md->lsm); 815 lov->lo_type = lov_type(lsm);
780 ops = &lov_dispatch[lov->lo_type]; 816 ops = &lov_dispatch[lov->lo_type];
781 result = ops->llo_init(env, dev, lov, cconf, set); 817 rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
782 if (result == 0) 818 if (!rc)
783 ops->llo_install(env, lov, set); 819 ops->llo_install(env, lov, set);
784 return result; 820
821 lov_lsm_put(lsm);
822
823 return rc;
785} 824}
786 825
787static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, 826static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -791,6 +830,15 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
791 struct lov_object *lov = cl2lov(obj); 830 struct lov_object *lov = cl2lov(obj);
792 int result = 0; 831 int result = 0;
793 832
833 if (conf->coc_opc == OBJECT_CONF_SET &&
834 conf->u.coc_layout.lb_buf) {
835 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
836 conf->u.coc_layout.lb_buf,
837 conf->u.coc_layout.lb_len);
838 if (IS_ERR(lsm))
839 return PTR_ERR(lsm);
840 }
841
794 lov_conf_lock(lov); 842 lov_conf_lock(lov);
795 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { 843 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
796 lov->lo_layout_invalid = true; 844 lov->lo_layout_invalid = true;
@@ -810,8 +858,6 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
810 858
811 LASSERT(conf->coc_opc == OBJECT_CONF_SET); 859 LASSERT(conf->coc_opc == OBJECT_CONF_SET);
812 860
813 if (conf->u.coc_md)
814 lsm = conf->u.coc_md->lsm;
815 if ((!lsm && !lov->lo_lsm) || 861 if ((!lsm && !lov->lo_lsm) ||
816 ((lsm && lov->lo_lsm) && 862 ((lsm && lov->lo_lsm) &&
817 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && 863 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
@@ -829,11 +875,12 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
829 goto out; 875 goto out;
830 } 876 }
831 877
832 result = lov_layout_change(env, lov, conf); 878 result = lov_layout_change(env, lov, lsm, conf);
833 lov->lo_layout_invalid = result != 0; 879 lov->lo_layout_invalid = result != 0;
834 880
835out: 881out:
836 lov_conf_unlock(lov); 882 lov_conf_unlock(lov);
883 lov_lsm_put(lsm);
837 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n", 884 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
838 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid); 885 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
839 return result; 886 return result;
@@ -911,6 +958,473 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
911 io); 958 io);
912} 959}
913 960
961/**
962 * We calculate on which OST the mapping will end. If the length of mapping
963 * is greater than (stripe_size * stripe_count) then the last_stripe will
964 * will be one just before start_stripe. Else we check if the mapping
965 * intersects each OST and find last_stripe.
966 * This function returns the last_stripe and also sets the stripe_count
967 * over which the mapping is spread
968 *
969 * \param lsm [in] striping information for the file
970 * \param fm_start [in] logical start of mapping
971 * \param fm_end [in] logical end of mapping
972 * \param start_stripe [in] starting stripe of the mapping
973 * \param stripe_count [out] the number of stripes across which to map is
974 * returned
975 *
976 * \retval last_stripe return the last stripe of the mapping
977 */
978static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
979 loff_t fm_start, loff_t fm_end,
980 int start_stripe, int *stripe_count)
981{
982 int last_stripe;
983 loff_t obd_start;
984 loff_t obd_end;
985 int i, j;
986
987 if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
988 last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
989 start_stripe - 1);
990 *stripe_count = lsm->lsm_stripe_count;
991 } else {
992 for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
993 i = (i + 1) % lsm->lsm_stripe_count, j++) {
994 if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end,
995 &obd_start, &obd_end)))
996 break;
997 }
998 *stripe_count = j;
999 last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
1000 }
1001
1002 return last_stripe;
1003}
1004
1005/**
1006 * Set fe_device and copy extents from local buffer into main return buffer.
1007 *
1008 * \param fiemap [out] fiemap to hold all extents
1009 * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
1010 * \param ost_index [in] OST index to be written into the fm_device
1011 * field for each extent
1012 * \param ext_count [in] number of extents to be copied
1013 * \param current_extent [in] where to start copying in the extent array
1014 */
1015static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1016 struct fiemap_extent *lcl_fm_ext,
1017 int ost_index, unsigned int ext_count,
1018 int current_extent)
1019{
1020 unsigned int ext;
1021 char *to;
1022
1023 for (ext = 0; ext < ext_count; ext++) {
1024 lcl_fm_ext[ext].fe_device = ost_index;
1025 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1026 }
1027
1028 /* Copy fm_extent's from fm_local to return buffer */
1029 to = (char *)fiemap + fiemap_count_to_size(current_extent);
1030 memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1031}
1032
1033#define FIEMAP_BUFFER_SIZE 4096
1034
1035/**
1036 * Non-zero fe_logical indicates that this is a continuation FIEMAP
1037 * call. The local end offset and the device are sent in the first
1038 * fm_extent. This function calculates the stripe number from the index.
1039 * This function returns a stripe_no on which mapping is to be restarted.
1040 *
1041 * This function returns fm_end_offset which is the in-OST offset at which
1042 * mapping should be restarted. If fm_end_offset=0 is returned then caller
1043 * will re-calculate proper offset in next stripe.
1044 * Note that the first extent is passed to lov_get_info via the value field.
1045 *
1046 * \param fiemap [in] fiemap request header
1047 * \param lsm [in] striping information for the file
1048 * \param fm_start [in] logical start of mapping
1049 * \param fm_end [in] logical end of mapping
1050 * \param start_stripe [out] starting stripe will be returned in this
1051 */
1052static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1053 struct lov_stripe_md *lsm,
1054 loff_t fm_start, loff_t fm_end,
1055 int *start_stripe)
1056{
1057 loff_t local_end = fiemap->fm_extents[0].fe_logical;
1058 loff_t lun_start, lun_end;
1059 loff_t fm_end_offset;
1060 int stripe_no = -1;
1061 int i;
1062
1063 if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical)
1064 return 0;
1065
1066 /* Find out stripe_no from ost_index saved in the fe_device */
1067 for (i = 0; i < lsm->lsm_stripe_count; i++) {
1068 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
1069
1070 if (lov_oinfo_is_dummy(oinfo))
1071 continue;
1072
1073 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1074 stripe_no = i;
1075 break;
1076 }
1077 }
1078
1079 if (stripe_no == -1)
1080 return -EINVAL;
1081
1082 /*
1083 * If we have finished mapping on previous device, shift logical
1084 * offset to start of next device
1085 */
1086 if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
1087 &lun_start, &lun_end) &&
1088 local_end < lun_end) {
1089 fm_end_offset = local_end;
1090 *start_stripe = stripe_no;
1091 } else {
1092 /* This is a special value to indicate that caller should
1093 * calculate offset in next stripe.
1094 */
1095 fm_end_offset = 0;
1096 *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
1097 }
1098
1099 return fm_end_offset;
1100}
1101
1102/**
1103 * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1104 * This also handles the restarting of FIEMAP calls in case mapping overflows
1105 * the available number of extents in single call.
1106 *
1107 * \param env [in] lustre environment
1108 * \param obj [in] file object
1109 * \param fmkey [in] fiemap request header and other info
1110 * \param fiemap [out] fiemap buffer holding retrived map extents
1111 * \param buflen [in/out] max buffer length of @fiemap, when iterate
1112 * each OST, it is used to limit max map needed
1113 * \retval 0 success
1114 * \retval < 0 error
1115 */
1116static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1117 struct ll_fiemap_info_key *fmkey,
1118 struct fiemap *fiemap, size_t *buflen)
1119{
1120 struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1121 unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1122 struct fiemap_extent *lcl_fm_ext;
1123 struct cl_object *subobj = NULL;
1124 struct fiemap *fm_local = NULL;
1125 struct lov_stripe_md *lsm;
1126 loff_t fm_start;
1127 loff_t fm_end;
1128 loff_t fm_length;
1129 loff_t fm_end_offset;
1130 int count_local;
1131 int ost_index = 0;
1132 int start_stripe;
1133 int current_extent = 0;
1134 int rc = 0;
1135 int last_stripe;
1136 int cur_stripe = 0;
1137 int cur_stripe_wrap = 0;
1138 int stripe_count;
1139 /* Whether have we collected enough extents */
1140 bool enough = false;
1141 /* EOF for object */
1142 bool ost_eof = false;
1143 /* done with required mapping for this OST? */
1144 bool ost_done = false;
1145
1146 lsm = lov_lsm_addref(cl2lov(obj));
1147 if (!lsm)
1148 return -ENODATA;
1149
1150 /**
1151 * If the stripe_count > 1 and the application does not understand
1152 * DEVICE_ORDER flag, it cannot interpret the extents correctly.
1153 */
1154 if (lsm->lsm_stripe_count > 1 &&
1155 !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1156 rc = -ENOTSUPP;
1157 goto out;
1158 }
1159
1160 if (lsm_is_released(lsm)) {
1161 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1162 /**
1163 * released file, return a minimal FIEMAP if
1164 * request fits in file-size.
1165 */
1166 fiemap->fm_mapped_extents = 1;
1167 fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1168 if (fiemap->fm_start + fiemap->fm_length <
1169 fmkey->lfik_oa.o_size)
1170 fiemap->fm_extents[0].fe_length =
1171 fiemap->fm_length;
1172 else
1173 fiemap->fm_extents[0].fe_length =
1174 fmkey->lfik_oa.o_size -
1175 fiemap->fm_start;
1176 fiemap->fm_extents[0].fe_flags |=
1177 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1178 }
1179 rc = 0;
1180 goto out;
1181 }
1182
1183 if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1184 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1185
1186 fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
1187 if (!fm_local) {
1188 rc = -ENOMEM;
1189 goto out;
1190 }
1191 lcl_fm_ext = &fm_local->fm_extents[0];
1192 count_local = fiemap_size_to_count(buffer_size);
1193
1194 fm_start = fiemap->fm_start;
1195 fm_length = fiemap->fm_length;
1196 /* Calculate start stripe, last stripe and length of mapping */
1197 start_stripe = lov_stripe_number(lsm, fm_start);
1198 fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size :
1199 fm_start + fm_length - 1;
1200 /* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */
1201 if (fm_end > fmkey->lfik_oa.o_size)
1202 fm_end = fmkey->lfik_oa.o_size;
1203
1204 last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
1205 start_stripe, &stripe_count);
1206 fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end,
1207 &start_stripe);
1208 if (fm_end_offset == -EINVAL) {
1209 rc = -EINVAL;
1210 goto out;
1211 }
1212
1213 /**
1214 * Requested extent count exceeds the fiemap buffer size, shrink our
1215 * ambition.
1216 */
1217 if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1218 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1219 if (!fiemap->fm_extent_count)
1220 count_local = 0;
1221
1222 /* Check each stripe */
1223 for (cur_stripe = start_stripe; stripe_count > 0;
1224 --stripe_count,
1225 cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
1226 loff_t req_fm_len; /* Stores length of required mapping */
1227 loff_t len_mapped_single_call;
1228 loff_t lun_start;
1229 loff_t lun_end;
1230 loff_t obd_object_end;
1231 unsigned int ext_count;
1232
1233 cur_stripe_wrap = cur_stripe;
1234
1235 /* Find out range of mapping on this stripe */
1236 if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
1237 &lun_start, &obd_object_end)))
1238 continue;
1239
1240 if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
1241 rc = -EIO;
1242 goto out;
1243 }
1244
1245 /*
1246 * If this is a continuation FIEMAP call and we are on
1247 * starting stripe then lun_start needs to be set to
1248 * fm_end_offset
1249 */
1250 if (fm_end_offset && cur_stripe == start_stripe)
1251 lun_start = fm_end_offset;
1252
1253 if (fm_length != ~0ULL) {
1254 /* Handle fm_start + fm_length overflow */
1255 if (fm_start + fm_length < fm_start)
1256 fm_length = ~0ULL - fm_start;
1257 lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
1258 cur_stripe);
1259 } else {
1260 lun_end = ~0ULL;
1261 }
1262
1263 if (lun_start == lun_end)
1264 continue;
1265
1266 req_fm_len = obd_object_end - lun_start;
1267 fm_local->fm_length = 0;
1268 len_mapped_single_call = 0;
1269
1270 /* find lobsub object */
1271 subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1272 cur_stripe);
1273 if (IS_ERR(subobj)) {
1274 rc = PTR_ERR(subobj);
1275 goto out;
1276 }
1277 /*
1278 * If the output buffer is very large and the objects have many
1279 * extents we may need to loop on a single OST repeatedly
1280 */
1281 ost_eof = false;
1282 ost_done = false;
1283 do {
1284 if (fiemap->fm_extent_count > 0) {
1285 /* Don't get too many extents. */
1286 if (current_extent + count_local >
1287 fiemap->fm_extent_count)
1288 count_local = fiemap->fm_extent_count -
1289 current_extent;
1290 }
1291
1292 lun_start += len_mapped_single_call;
1293 fm_local->fm_length = req_fm_len -
1294 len_mapped_single_call;
1295 req_fm_len = fm_local->fm_length;
1296 fm_local->fm_extent_count = enough ? 1 : count_local;
1297 fm_local->fm_mapped_extents = 0;
1298 fm_local->fm_flags = fiemap->fm_flags;
1299
1300 ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
1301
1302 if (ost_index < 0 ||
1303 ost_index >= lov->desc.ld_tgt_count) {
1304 rc = -EINVAL;
1305 goto obj_put;
1306 }
1307 /*
1308 * If OST is inactive, return extent with UNKNOWN
1309 * flag.
1310 */
1311 if (!lov->lov_tgts[ost_index]->ltd_active) {
1312 fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
1313 fm_local->fm_mapped_extents = 1;
1314
1315 lcl_fm_ext[0].fe_logical = lun_start;
1316 lcl_fm_ext[0].fe_length = obd_object_end -
1317 lun_start;
1318 lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1319
1320 goto inactive_tgt;
1321 }
1322
1323 fm_local->fm_start = lun_start;
1324 fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1325 memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local));
1326 *buflen = fiemap_count_to_size(fm_local->fm_extent_count);
1327
1328 rc = cl_object_fiemap(env, subobj, fmkey, fm_local,
1329 buflen);
1330 if (rc)
1331 goto obj_put;
1332inactive_tgt:
1333 ext_count = fm_local->fm_mapped_extents;
1334 if (!ext_count) {
1335 ost_done = true;
1336 /*
1337 * If last stripe has hold at the end,
1338 * we need to return
1339 */
1340 if (cur_stripe_wrap == last_stripe) {
1341 fiemap->fm_mapped_extents = 0;
1342 goto finish;
1343 }
1344 break;
1345 } else if (enough) {
1346 /*
1347 * We've collected enough extents and there are
1348 * more extents after it.
1349 */
1350 goto finish;
1351 }
1352
1353 /* If we just need num of extents, got to next device */
1354 if (!fiemap->fm_extent_count) {
1355 current_extent += ext_count;
1356 break;
1357 }
1358
1359 /* prepare to copy retrived map extents */
1360 len_mapped_single_call =
1361 lcl_fm_ext[ext_count - 1].fe_logical -
1362 lun_start + lcl_fm_ext[ext_count - 1].fe_length;
1363
1364 /* Have we finished mapping on this device? */
1365 if (req_fm_len <= len_mapped_single_call)
1366 ost_done = true;
1367
1368 /*
1369 * Clear the EXTENT_LAST flag which can be present on
1370 * the last extent
1371 */
1372 if (lcl_fm_ext[ext_count - 1].fe_flags &
1373 FIEMAP_EXTENT_LAST)
1374 lcl_fm_ext[ext_count - 1].fe_flags &=
1375 ~FIEMAP_EXTENT_LAST;
1376
1377 if (lov_stripe_size(lsm,
1378 lcl_fm_ext[ext_count - 1].fe_logical +
1379 lcl_fm_ext[ext_count - 1].fe_length,
1380 cur_stripe) >= fmkey->lfik_oa.o_size)
1381 ost_eof = true;
1382
1383 fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
1384 ost_index, ext_count,
1385 current_extent);
1386 current_extent += ext_count;
1387
1388 /* Ran out of available extents? */
1389 if (current_extent >= fiemap->fm_extent_count)
1390 enough = true;
1391 } while (!ost_done && !ost_eof);
1392
1393 cl_object_put(env, subobj);
1394 subobj = NULL;
1395
1396 if (cur_stripe_wrap == last_stripe)
1397 goto finish;
1398 } /* for each stripe */
1399finish:
1400 /*
1401 * Indicate that we are returning device offsets unless file just has
1402 * single stripe
1403 */
1404 if (lsm->lsm_stripe_count > 1)
1405 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1406
1407 if (!fiemap->fm_extent_count)
1408 goto skip_last_device_calc;
1409
1410 /*
1411 * Check if we have reached the last stripe and whether mapping for that
1412 * stripe is done.
1413 */
1414 if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof))
1415 fiemap->fm_extents[current_extent - 1].fe_flags |=
1416 FIEMAP_EXTENT_LAST;
1417skip_last_device_calc:
1418 fiemap->fm_mapped_extents = current_extent;
1419obj_put:
1420 if (subobj)
1421 cl_object_put(env, subobj);
1422out:
1423 kvfree(fm_local);
1424 lov_lsm_put(lsm);
1425 return rc;
1426}
1427
914static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj, 1428static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
915 struct lov_user_md __user *lum) 1429 struct lov_user_md __user *lum)
916{ 1430{
@@ -923,10 +1437,53 @@ static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
923 return -ENODATA; 1437 return -ENODATA;
924 1438
925 rc = lov_getstripe(cl2lov(obj), lsm, lum); 1439 rc = lov_getstripe(cl2lov(obj), lsm, lum);
926 lov_lsm_put(obj, lsm); 1440 lov_lsm_put(lsm);
927 return rc; 1441 return rc;
928} 1442}
929 1443
1444static int lov_object_layout_get(const struct lu_env *env,
1445 struct cl_object *obj,
1446 struct cl_layout *cl)
1447{
1448 struct lov_object *lov = cl2lov(obj);
1449 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1450 struct lu_buf *buf = &cl->cl_buf;
1451 ssize_t rc;
1452
1453 if (!lsm) {
1454 cl->cl_size = 0;
1455 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
1456 cl->cl_is_released = false;
1457
1458 return 0;
1459 }
1460
1461 cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
1462 cl->cl_layout_gen = lsm->lsm_layout_gen;
1463 cl->cl_is_released = lsm_is_released(lsm);
1464
1465 rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
1466 lov_lsm_put(lsm);
1467
1468 return rc < 0 ? rc : 0;
1469}
1470
1471static loff_t lov_object_maxbytes(struct cl_object *obj)
1472{
1473 struct lov_object *lov = cl2lov(obj);
1474 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1475 loff_t maxbytes;
1476
1477 if (!lsm)
1478 return LLONG_MAX;
1479
1480 maxbytes = lsm->lsm_maxbytes;
1481
1482 lov_lsm_put(lsm);
1483
1484 return maxbytes;
1485}
1486
930static const struct cl_object_operations lov_ops = { 1487static const struct cl_object_operations lov_ops = {
931 .coo_page_init = lov_page_init, 1488 .coo_page_init = lov_page_init,
932 .coo_lock_init = lov_lock_init, 1489 .coo_lock_init = lov_lock_init,
@@ -934,7 +1491,10 @@ static const struct cl_object_operations lov_ops = {
934 .coo_attr_get = lov_attr_get, 1491 .coo_attr_get = lov_attr_get,
935 .coo_attr_update = lov_attr_update, 1492 .coo_attr_update = lov_attr_update,
936 .coo_conf_set = lov_conf_set, 1493 .coo_conf_set = lov_conf_set,
937 .coo_getstripe = lov_object_getstripe 1494 .coo_getstripe = lov_object_getstripe,
1495 .coo_layout_get = lov_object_layout_get,
1496 .coo_maxbytes = lov_object_maxbytes,
1497 .coo_fiemap = lov_object_fiemap,
938}; 1498};
939 1499
940static const struct lu_object_operations lov_lu_obj_ops = { 1500static const struct lu_object_operations lov_lu_obj_ops = {
@@ -986,22 +1546,6 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
986 return lsm; 1546 return lsm;
987} 1547}
988 1548
989struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
990{
991 struct lu_object *luobj;
992 struct lov_stripe_md *lsm = NULL;
993
994 if (!clobj)
995 return NULL;
996
997 luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
998 &lov_device_type);
999 if (luobj)
1000 lsm = lov_lsm_addref(lu2lov(luobj));
1001 return lsm;
1002}
1003EXPORT_SYMBOL(lov_lsm_get);
1004
1005int lov_read_and_clear_async_rc(struct cl_object *clob) 1549int lov_read_and_clear_async_rc(struct cl_object *clob)
1006{ 1550{
1007 struct lu_object *luobj; 1551 struct lu_object *luobj;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index be6e9857ce2a..6c93d180aef7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -38,14 +38,17 @@
38 38
39#define DEBUG_SUBSYSTEM S_LOV 39#define DEBUG_SUBSYSTEM S_LOV
40 40
41#include "../include/lustre/lustre_idl.h"
42#include "../include/lustre/lustre_user.h"
43
41#include "../include/lustre_net.h" 44#include "../include/lustre_net.h"
45#include "../include/lustre_swab.h"
42#include "../include/obd.h" 46#include "../include/obd.h"
43#include "../include/obd_class.h" 47#include "../include/obd_class.h"
44#include "../include/obd_support.h" 48#include "../include/obd_support.h"
45#include "../include/lustre/lustre_user.h"
46 49
47#include "lov_internal.h"
48#include "lov_cl_internal.h" 50#include "lov_cl_internal.h"
51#include "lov_internal.h"
49 52
50void lov_dump_lmm_common(int level, void *lmmp) 53void lov_dump_lmm_common(int level, void *lmmp)
51{ 54{
@@ -97,120 +100,54 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
97 le16_to_cpu(lmm->lmm_stripe_count)); 100 le16_to_cpu(lmm->lmm_stripe_count));
98} 101}
99 102
100/* Pack LOV object metadata for disk storage. It is packed in LE byte 103/**
101 * order and is opaque to the networking layer. 104 * Pack LOV striping metadata for disk storage format (in little
105 * endian byte order).
102 * 106 *
103 * XXX In the future, this will be enhanced to get the EA size from the 107 * This follows the getxattr() conventions. If \a buf_size is zero
104 * underlying OSC device(s) to get their EA sizes so we can stack 108 * then return the size needed. If \a buf_size is too small then
105 * LOVs properly. For now lov_mds_md_size() just assumes one u64 109 * return -ERANGE. Otherwise return the size of the result.
106 * per stripe.
107 */ 110 */
108int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp, 111ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
109 struct lov_stripe_md *lsm) 112 size_t buf_size)
110{ 113{
111 struct lov_mds_md_v1 *lmmv1;
112 struct lov_mds_md_v3 *lmmv3;
113 __u16 stripe_count;
114 struct lov_ost_data_v1 *lmm_objects; 114 struct lov_ost_data_v1 *lmm_objects;
115 int lmm_size, lmm_magic; 115 struct lov_mds_md_v1 *lmmv1 = buf;
116 int i; 116 struct lov_mds_md_v3 *lmmv3 = buf;
117 int cplen = 0; 117 size_t lmm_size;
118 118 unsigned int i;
119 if (lsm) {
120 lmm_magic = lsm->lsm_magic;
121 } else {
122 if (lmmp && *lmmp)
123 lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
124 else
125 /* lsm == NULL and lmmp == NULL */
126 lmm_magic = LOV_MAGIC;
127 }
128
129 if ((lmm_magic != LOV_MAGIC_V1) &&
130 (lmm_magic != LOV_MAGIC_V3)) {
131 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
132 lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
133 return -EINVAL;
134 }
135
136 if (lsm) {
137 /* If we are just sizing the EA, limit the stripe count
138 * to the actual number of OSTs in this filesystem.
139 */
140 if (!lmmp) {
141 stripe_count = lov_get_stripecnt(lov, lmm_magic,
142 lsm->lsm_stripe_count);
143 lsm->lsm_stripe_count = stripe_count;
144 } else if (!lsm_is_released(lsm)) {
145 stripe_count = lsm->lsm_stripe_count;
146 } else {
147 stripe_count = 0;
148 }
149 } else {
150 /*
151 * To calculate maximum easize by active targets at present,
152 * which is exactly the maximum easize to be seen by LOV
153 */
154 stripe_count = lov->desc.ld_active_tgt_count;
155 }
156 119
157 /* XXX LOV STACKING call into osc for sizes */ 120 lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
158 lmm_size = lov_mds_md_size(stripe_count, lmm_magic); 121 if (!buf_size)
159
160 if (!lmmp)
161 return lmm_size; 122 return lmm_size;
162 123
163 if (*lmmp && !lsm) { 124 if (buf_size < lmm_size)
164 stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count); 125 return -ERANGE;
165 lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
166 kvfree(*lmmp);
167 *lmmp = NULL;
168 return 0;
169 }
170
171 if (!*lmmp) {
172 *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
173 if (!*lmmp)
174 return -ENOMEM;
175 }
176
177 CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
178 lmm_magic, lmm_size);
179
180 lmmv1 = *lmmp;
181 lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
182 if (lmm_magic == LOV_MAGIC_V3)
183 lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
184 else
185 lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
186
187 if (!lsm)
188 return lmm_size;
189 126
190 /* lmmv1 and lmmv3 point to the same struct and have the 127 /*
128 * lmmv1 and lmmv3 point to the same struct and have the
191 * same first fields 129 * same first fields
192 */ 130 */
131 lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
193 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); 132 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
194 lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size); 133 lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
195 lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count); 134 lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
196 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern); 135 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
197 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); 136 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
137
198 if (lsm->lsm_magic == LOV_MAGIC_V3) { 138 if (lsm->lsm_magic == LOV_MAGIC_V3) {
199 cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, 139 CLASSERT(sizeof(lsm->lsm_pool_name) ==
200 sizeof(lmmv3->lmm_pool_name)); 140 sizeof(lmmv3->lmm_pool_name));
201 if (cplen >= sizeof(lmmv3->lmm_pool_name)) 141 strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
202 return -E2BIG; 142 sizeof(lmmv3->lmm_pool_name));
203 lmm_objects = lmmv3->lmm_objects; 143 lmm_objects = lmmv3->lmm_objects;
204 } else { 144 } else {
205 lmm_objects = lmmv1->lmm_objects; 145 lmm_objects = lmmv1->lmm_objects;
206 } 146 }
207 147
208 for (i = 0; i < stripe_count; i++) { 148 for (i = 0; i < lsm->lsm_stripe_count; i++) {
209 struct lov_oinfo *loi = lsm->lsm_oinfo[i]; 149 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
210 /* XXX LOV STACKING call down to osc_packmd() to do packing */ 150
211 LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
212 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
213 i, stripe_count, loi->loi_ost_idx);
214 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); 151 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
215 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); 152 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
216 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); 153 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
@@ -219,15 +156,6 @@ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
219 return lmm_size; 156 return lmm_size;
220} 157}
221 158
222int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
223 struct lov_stripe_md *lsm)
224{
225 struct obd_device *obd = class_exp2obd(exp);
226 struct lov_obd *lov = &obd->u.lov;
227
228 return lov_obd_packmd(lov, lmmp, lsm);
229}
230
231/* Find the max stripecount we should use */ 159/* Find the max stripecount we should use */
232__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) 160__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
233{ 161{
@@ -270,34 +198,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
270 return rc; 198 return rc;
271} 199}
272 200
273int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, 201struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic)
274 int pattern, int magic)
275{ 202{
276 int i, lsm_size; 203 struct lov_stripe_md *lsm;
204 unsigned int i;
277 205
278 CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count); 206 CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
279 207
280 *lsmp = lsm_alloc_plain(stripe_count, &lsm_size); 208 lsm = lsm_alloc_plain(stripe_count);
281 if (!*lsmp) { 209 if (!lsm) {
282 CERROR("can't allocate lsmp stripe_count %d\n", stripe_count); 210 CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
283 return -ENOMEM; 211 return ERR_PTR(-ENOMEM);
284 } 212 }
285 213
286 atomic_set(&(*lsmp)->lsm_refc, 1); 214 atomic_set(&lsm->lsm_refc, 1);
287 spin_lock_init(&(*lsmp)->lsm_lock); 215 spin_lock_init(&lsm->lsm_lock);
288 (*lsmp)->lsm_magic = magic; 216 lsm->lsm_magic = magic;
289 (*lsmp)->lsm_stripe_count = stripe_count; 217 lsm->lsm_stripe_count = stripe_count;
290 (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count; 218 lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
291 (*lsmp)->lsm_pattern = pattern; 219 lsm->lsm_pattern = pattern;
292 (*lsmp)->lsm_pool_name[0] = '\0'; 220 lsm->lsm_pool_name[0] = '\0';
293 (*lsmp)->lsm_layout_gen = 0; 221 lsm->lsm_layout_gen = 0;
294 if (stripe_count > 0) 222 if (stripe_count > 0)
295 (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0; 223 lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
296 224
297 for (i = 0; i < stripe_count; i++) 225 for (i = 0; i < stripe_count; i++)
298 loi_init((*lsmp)->lsm_oinfo[i]); 226 loi_init(lsm->lsm_oinfo[i]);
299 227
300 return lsm_size; 228 return lsm;
301} 229}
302 230
303int lov_free_memmd(struct lov_stripe_md **lsmp) 231int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -317,56 +245,34 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
317/* Unpack LOV object metadata from disk storage. It is packed in LE byte 245/* Unpack LOV object metadata from disk storage. It is packed in LE byte
318 * order and is opaque to the networking layer. 246 * order and is opaque to the networking layer.
319 */ 247 */
320int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, 248struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
321 struct lov_mds_md *lmm, int lmm_bytes) 249 size_t lmm_size)
322{ 250{
323 struct obd_device *obd = class_exp2obd(exp); 251 struct lov_stripe_md *lsm;
324 struct lov_obd *lov = &obd->u.lov; 252 u16 stripe_count;
325 int rc = 0, lsm_size; 253 u32 pattern;
326 __u16 stripe_count; 254 u32 magic;
327 __u32 magic; 255 int rc;
328 __u32 pattern;
329
330 /* If passed an MDS struct use values from there, otherwise defaults */
331 if (lmm) {
332 rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
333 if (rc)
334 return rc;
335 magic = le32_to_cpu(lmm->lmm_magic);
336 pattern = le32_to_cpu(lmm->lmm_pattern);
337 } else {
338 magic = LOV_MAGIC;
339 stripe_count = lov_get_stripecnt(lov, magic, 0);
340 pattern = LOV_PATTERN_RAID0;
341 }
342 256
343 /* If we aren't passed an lsmp struct, we just want the size */ 257 rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
344 if (!lsmp) { 258 if (rc)
345 /* XXX LOV STACKING call into osc for sizes */ 259 return ERR_PTR(rc);
346 LBUG();
347 return lov_stripe_md_size(stripe_count);
348 }
349 /* If we are passed an allocated struct but nothing to unpack, free */
350 if (*lsmp && !lmm) {
351 lov_free_memmd(lsmp);
352 return 0;
353 }
354 260
355 lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic); 261 magic = le32_to_cpu(lmm->lmm_magic);
356 if (lsm_size < 0) 262 pattern = le32_to_cpu(lmm->lmm_pattern);
357 return lsm_size;
358 263
359 /* If we are passed a pointer but nothing to unpack, we only alloc */ 264 lsm = lov_lsm_alloc(stripe_count, pattern, magic);
360 if (!lmm) 265 if (IS_ERR(lsm))
361 return lsm_size; 266 return lsm;
362 267
363 rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm); 268 LASSERT(lsm_op_find(magic));
269 rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
364 if (rc) { 270 if (rc) {
365 lov_free_memmd(lsmp); 271 lov_free_memmd(&lsm);
366 return rc; 272 return ERR_PTR(rc);
367 } 273 }
368 274
369 return lsm_size; 275 return lsm;
370} 276}
371 277
372/* Retrieve object striping information. 278/* Retrieve object striping information.
@@ -378,15 +284,14 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
378int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, 284int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
379 struct lov_user_md __user *lump) 285 struct lov_user_md __user *lump)
380{ 286{
381 /*
382 * XXX huge struct allocated on stack.
383 */
384 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ 287 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
385 struct lov_obd *lov;
386 struct lov_user_md_v3 lum; 288 struct lov_user_md_v3 lum;
387 struct lov_mds_md *lmmk = NULL; 289 struct lov_mds_md *lmmk;
388 int rc, lmmk_size, lmm_size; 290 u32 stripe_count;
389 int lum_size; 291 ssize_t lmm_size;
292 size_t lmmk_size;
293 size_t lum_size;
294 int rc;
390 mm_segment_t seg; 295 mm_segment_t seg;
391 296
392 if (!lsm) 297 if (!lsm)
@@ -399,6 +304,18 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
399 seg = get_fs(); 304 seg = get_fs();
400 set_fs(KERNEL_DS); 305 set_fs(KERNEL_DS);
401 306
307 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
308 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
309 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
310 rc = -EIO;
311 goto out;
312 }
313
314 if (!lsm_is_released(lsm))
315 stripe_count = lsm->lsm_stripe_count;
316 else
317 stripe_count = 0;
318
402 /* we only need the header part from user space to get lmm_magic and 319 /* we only need the header part from user space to get lmm_magic and
403 * lmm_stripe_count, (the header part is common to v1 and v3) 320 * lmm_stripe_count, (the header part is common to v1 and v3)
404 */ 321 */
@@ -417,32 +334,40 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
417 if (lum.lmm_stripe_count && 334 if (lum.lmm_stripe_count &&
418 (lum.lmm_stripe_count < lsm->lsm_stripe_count)) { 335 (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
419 /* Return right size of stripe to user */ 336 /* Return right size of stripe to user */
420 lum.lmm_stripe_count = lsm->lsm_stripe_count; 337 lum.lmm_stripe_count = stripe_count;
421 rc = copy_to_user(lump, &lum, lum_size); 338 rc = copy_to_user(lump, &lum, lum_size);
422 rc = -EOVERFLOW; 339 rc = -EOVERFLOW;
423 goto out; 340 goto out;
424 } 341 }
425 lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov; 342 lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
426 rc = lov_obd_packmd(lov, &lmmk, lsm); 343
427 if (rc < 0) 344
345 lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
346 if (!lmmk) {
347 rc = -ENOMEM;
428 goto out; 348 goto out;
429 lmmk_size = rc; 349 }
430 lmm_size = rc; 350
431 rc = 0; 351 lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
352 if (lmm_size < 0) {
353 rc = lmm_size;
354 goto out_free;
355 }
432 356
433 /* FIXME: Bug 1185 - copy fields properly when structs change */ 357 /* FIXME: Bug 1185 - copy fields properly when structs change */
434 /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */ 358 /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
435 CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3)); 359 CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
436 CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0])); 360 CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
437 361
438 if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) && 362 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
439 ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) || 363 (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
440 (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) { 364 lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
441 lustre_swab_lov_mds_md(lmmk); 365 lustre_swab_lov_mds_md(lmmk);
442 lustre_swab_lov_user_md_objects( 366 lustre_swab_lov_user_md_objects(
443 (struct lov_user_ost_data *)lmmk->lmm_objects, 367 (struct lov_user_ost_data *)lmmk->lmm_objects,
444 lmmk->lmm_stripe_count); 368 lmmk->lmm_stripe_count);
445 } 369 }
370
446 if (lum.lmm_magic == LOV_USER_MAGIC) { 371 if (lum.lmm_magic == LOV_USER_MAGIC) {
447 /* User request for v1, we need skip lmm_pool_name */ 372 /* User request for v1, we need skip lmm_pool_name */
448 if (lmmk->lmm_magic == LOV_MAGIC_V3) { 373 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
@@ -474,9 +399,11 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
474 ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count; 399 ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
475 if (copy_to_user(lump, lmmk, lmm_size)) 400 if (copy_to_user(lump, lmmk, lmm_size))
476 rc = -EFAULT; 401 rc = -EFAULT;
402 else
403 rc = 0;
477 404
478out_free: 405out_free:
479 kfree(lmmk); 406 kvfree(lmmk);
480out: 407out:
481 set_fs(seg); 408 set_fs(seg);
482 return rc; 409 return rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 00bfabad78eb..62ceb6dfdfdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -49,51 +49,6 @@
49 * 49 *
50 */ 50 */
51 51
52/**
53 * Adjust the stripe index by layout of raid0. @max_index is the maximum
54 * page index covered by an underlying DLM lock.
55 * This function converts max_index from stripe level to file level, and make
56 * sure it's not beyond one stripe.
57 */
58static int lov_raid0_page_is_under_lock(const struct lu_env *env,
59 const struct cl_page_slice *slice,
60 struct cl_io *unused,
61 pgoff_t *max_index)
62{
63 struct lov_object *loo = cl2lov(slice->cpl_obj);
64 struct lov_layout_raid0 *r0 = lov_r0(loo);
65 pgoff_t index = *max_index;
66 unsigned int pps; /* pages per stripe */
67
68 CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n",
69 PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr);
70
71 if (index == 0) /* the page is not covered by any lock */
72 return 0;
73
74 if (r0->lo_nr == 1) /* single stripe file */
75 return 0;
76
77 /* max_index is stripe level, convert it into file level */
78 if (index != CL_PAGE_EOF) {
79 int stripeno = lov_page_stripe(slice->cpl_page);
80 *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
81 }
82
83 /* calculate the end of current stripe */
84 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
85 index = slice->cpl_index + pps - slice->cpl_index % pps - 1;
86
87 CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n",
88 PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps,
89 loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page),
90 slice->cpl_index);
91
92 /* never exceed the end of the stripe */
93 *max_index = min_t(pgoff_t, *max_index, index);
94 return 0;
95}
96
97static int lov_raid0_page_print(const struct lu_env *env, 52static int lov_raid0_page_print(const struct lu_env *env,
98 const struct cl_page_slice *slice, 53 const struct cl_page_slice *slice,
99 void *cookie, lu_printer_t printer) 54 void *cookie, lu_printer_t printer)
@@ -104,7 +59,6 @@ static int lov_raid0_page_print(const struct lu_env *env,
104} 59}
105 60
106static const struct cl_page_operations lov_raid0_page_ops = { 61static const struct cl_page_operations lov_raid0_page_ops = {
107 .cpo_is_under_lock = lov_raid0_page_is_under_lock,
108 .cpo_print = lov_raid0_page_print 62 .cpo_print = lov_raid0_page_print
109}; 63};
110 64
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index f8c8a361ef79..7daa8671fdc3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -81,7 +81,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool)
81 * Chapter 6.4. 81 * Chapter 6.4.
82 * Addison Wesley, 1973 82 * Addison Wesley, 1973
83 */ 83 */
84static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask) 84static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
85 unsigned int mask)
85{ 86{
86 int i; 87 int i;
87 __u32 result; 88 __u32 result;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 09dcaf484c89..d43cc88ae641 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -44,7 +44,6 @@ static void lov_init_set(struct lov_request_set *set)
44 atomic_set(&set->set_completes, 0); 44 atomic_set(&set->set_completes, 0);
45 atomic_set(&set->set_success, 0); 45 atomic_set(&set->set_success, 0);
46 atomic_set(&set->set_finish_checked, 0); 46 atomic_set(&set->set_finish_checked, 0);
47 set->set_cookies = NULL;
48 INIT_LIST_HEAD(&set->set_list); 47 INIT_LIST_HEAD(&set->set_list);
49 atomic_set(&set->set_refcount, 1); 48 atomic_set(&set->set_refcount, 1);
50 init_waitqueue_head(&set->set_waitq); 49 init_waitqueue_head(&set->set_waitq);
@@ -61,8 +60,6 @@ void lov_finish_set(struct lov_request_set *set)
61 rq_link); 60 rq_link);
62 list_del_init(&req->rq_link); 61 list_del_init(&req->rq_link);
63 62
64 if (req->rq_oi.oi_oa)
65 kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa);
66 kfree(req->rq_oi.oi_osfs); 63 kfree(req->rq_oi.oi_osfs);
67 kfree(req); 64 kfree(req);
68 } 65 }
@@ -97,22 +94,6 @@ static void lov_update_set(struct lov_request_set *set,
97 wake_up(&set->set_waitq); 94 wake_up(&set->set_waitq);
98} 95}
99 96
100int lov_update_common_set(struct lov_request_set *set,
101 struct lov_request *req, int rc)
102{
103 struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
104
105 lov_update_set(set, req, rc);
106
107 /* grace error on inactive ost */
108 if (rc && !(lov->lov_tgts[req->rq_idx] &&
109 lov->lov_tgts[req->rq_idx]->ltd_active))
110 rc = 0;
111
112 /* FIXME in raid1 regime, should return 0 */
113 return rc;
114}
115
116static void lov_set_add_req(struct lov_request *req, 97static void lov_set_add_req(struct lov_request *req,
117 struct lov_request_set *set) 98 struct lov_request_set *set)
118{ 99{
@@ -183,279 +164,6 @@ out:
183 return rc; 164 return rc;
184} 165}
185 166
186static int common_attr_done(struct lov_request_set *set)
187{
188 struct lov_request *req;
189 struct obdo *tmp_oa;
190 int rc = 0, attrset = 0;
191
192 if (!set->set_oi->oi_oa)
193 return 0;
194
195 if (!atomic_read(&set->set_success))
196 return -EIO;
197
198 tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
199 if (!tmp_oa) {
200 rc = -ENOMEM;
201 goto out;
202 }
203
204 list_for_each_entry(req, &set->set_list, rq_link) {
205 if (!req->rq_complete || req->rq_rc)
206 continue;
207 if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
208 continue;
209 lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
210 req->rq_oi.oi_oa->o_valid,
211 set->set_oi->oi_md, req->rq_stripe, &attrset);
212 }
213 if (!attrset) {
214 CERROR("No stripes had valid attrs\n");
215 rc = -EIO;
216 }
217 if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
218 (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
219 /* When we take attributes of some epoch, we require all the
220 * ost to be active.
221 */
222 CERROR("Not all the stripes had valid attrs\n");
223 rc = -EIO;
224 goto out;
225 }
226
227 tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
228 memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
229out:
230 if (tmp_oa)
231 kmem_cache_free(obdo_cachep, tmp_oa);
232 return rc;
233}
234
235int lov_fini_getattr_set(struct lov_request_set *set)
236{
237 int rc = 0;
238
239 if (!set)
240 return 0;
241 LASSERT(set->set_exp);
242 if (atomic_read(&set->set_completes))
243 rc = common_attr_done(set);
244
245 lov_put_reqset(set);
246
247 return rc;
248}
249
250/* The callback for osc_getattr_async that finalizes a request info when a
251 * response is received.
252 */
253static int cb_getattr_update(void *cookie, int rc)
254{
255 struct obd_info *oinfo = cookie;
256 struct lov_request *lovreq;
257
258 lovreq = container_of(oinfo, struct lov_request, rq_oi);
259 return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
260}
261
262int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
263 struct lov_request_set **reqset)
264{
265 struct lov_request_set *set;
266 struct lov_obd *lov = &exp->exp_obd->u.lov;
267 int rc = 0, i;
268
269 set = kzalloc(sizeof(*set), GFP_NOFS);
270 if (!set)
271 return -ENOMEM;
272 lov_init_set(set);
273
274 set->set_exp = exp;
275 set->set_oi = oinfo;
276
277 for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
278 struct lov_oinfo *loi;
279 struct lov_request *req;
280
281 loi = oinfo->oi_md->lsm_oinfo[i];
282 if (lov_oinfo_is_dummy(loi))
283 continue;
284
285 if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
286 CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
287 if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) {
288 /* SOM requires all the OSTs to be active. */
289 rc = -EIO;
290 goto out_set;
291 }
292 continue;
293 }
294
295 req = kzalloc(sizeof(*req), GFP_NOFS);
296 if (!req) {
297 rc = -ENOMEM;
298 goto out_set;
299 }
300
301 req->rq_stripe = i;
302 req->rq_idx = loi->loi_ost_idx;
303
304 req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
305 if (!req->rq_oi.oi_oa) {
306 kfree(req);
307 rc = -ENOMEM;
308 goto out_set;
309 }
310 memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
311 sizeof(*req->rq_oi.oi_oa));
312 req->rq_oi.oi_oa->o_oi = loi->loi_oi;
313 req->rq_oi.oi_cb_up = cb_getattr_update;
314
315 lov_set_add_req(req, set);
316 }
317 if (!set->set_count) {
318 rc = -EIO;
319 goto out_set;
320 }
321 *reqset = set;
322 return rc;
323out_set:
324 lov_fini_getattr_set(set);
325 return rc;
326}
327
328int lov_fini_setattr_set(struct lov_request_set *set)
329{
330 int rc = 0;
331
332 if (!set)
333 return 0;
334 LASSERT(set->set_exp);
335 if (atomic_read(&set->set_completes)) {
336 rc = common_attr_done(set);
337 /* FIXME update qos data here */
338 }
339
340 lov_put_reqset(set);
341 return rc;
342}
343
344int lov_update_setattr_set(struct lov_request_set *set,
345 struct lov_request *req, int rc)
346{
347 struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
348 struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
349
350 lov_update_set(set, req, rc);
351
352 /* grace error on inactive ost */
353 if (rc && !(lov->lov_tgts[req->rq_idx] &&
354 lov->lov_tgts[req->rq_idx]->ltd_active))
355 rc = 0;
356
357 if (rc == 0) {
358 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
359 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
360 req->rq_oi.oi_oa->o_ctime;
361 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
362 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
363 req->rq_oi.oi_oa->o_mtime;
364 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
365 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
366 req->rq_oi.oi_oa->o_atime;
367 }
368
369 return rc;
370}
371
372/* The callback for osc_setattr_async that finalizes a request info when a
373 * response is received.
374 */
375static int cb_setattr_update(void *cookie, int rc)
376{
377 struct obd_info *oinfo = cookie;
378 struct lov_request *lovreq;
379
380 lovreq = container_of(oinfo, struct lov_request, rq_oi);
381 return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
382}
383
384int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
385 struct obd_trans_info *oti,
386 struct lov_request_set **reqset)
387{
388 struct lov_request_set *set;
389 struct lov_obd *lov = &exp->exp_obd->u.lov;
390 int rc = 0, i;
391
392 set = kzalloc(sizeof(*set), GFP_NOFS);
393 if (!set)
394 return -ENOMEM;
395 lov_init_set(set);
396
397 set->set_exp = exp;
398 set->set_oi = oinfo;
399 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
400 set->set_cookies = oti->oti_logcookies;
401
402 for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
403 struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
404 struct lov_request *req;
405
406 if (lov_oinfo_is_dummy(loi))
407 continue;
408
409 if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
410 CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
411 continue;
412 }
413
414 req = kzalloc(sizeof(*req), GFP_NOFS);
415 if (!req) {
416 rc = -ENOMEM;
417 goto out_set;
418 }
419 req->rq_stripe = i;
420 req->rq_idx = loi->loi_ost_idx;
421
422 req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
423 if (!req->rq_oi.oi_oa) {
424 kfree(req);
425 rc = -ENOMEM;
426 goto out_set;
427 }
428 memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
429 sizeof(*req->rq_oi.oi_oa));
430 req->rq_oi.oi_oa->o_oi = loi->loi_oi;
431 req->rq_oi.oi_oa->o_stripe_idx = i;
432 req->rq_oi.oi_cb_up = cb_setattr_update;
433
434 if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
435 int off = lov_stripe_offset(oinfo->oi_md,
436 oinfo->oi_oa->o_size, i,
437 &req->rq_oi.oi_oa->o_size);
438
439 if (off < 0 && req->rq_oi.oi_oa->o_size)
440 req->rq_oi.oi_oa->o_size--;
441
442 CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n",
443 i, req->rq_oi.oi_oa->o_size,
444 oinfo->oi_oa->o_size);
445 }
446 lov_set_add_req(req, set);
447 }
448 if (!set->set_count) {
449 rc = -EIO;
450 goto out_set;
451 }
452 *reqset = set;
453 return rc;
454out_set:
455 lov_fini_setattr_set(set);
456 return rc;
457}
458
459#define LOV_U64_MAX ((__u64)~0ULL) 167#define LOV_U64_MAX ((__u64)~0ULL)
460#define LOV_SUM_MAX(tot, add) \ 168#define LOV_SUM_MAX(tot, add) \
461 do { \ 169 do { \
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index b519a1940e1e..5d6536f8a4f7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -44,46 +44,6 @@
44 44
45/***************************************************************************** 45/*****************************************************************************
46 * 46 *
47 * Lovsub transfer operations.
48 *
49 */
50
51static void lovsub_req_completion(const struct lu_env *env,
52 const struct cl_req_slice *slice, int ioret)
53{
54 struct lovsub_req *lsr;
55
56 lsr = cl2lovsub_req(slice);
57 kmem_cache_free(lovsub_req_kmem, lsr);
58}
59
60/**
61 * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
62 * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
63 * field, which is filled there.
64 */
65static void lovsub_req_attr_set(const struct lu_env *env,
66 const struct cl_req_slice *slice,
67 const struct cl_object *obj,
68 struct cl_req_attr *attr, u64 flags)
69{
70 struct lovsub_object *subobj;
71
72 subobj = cl2lovsub(obj);
73 /*
74 * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
75 * unconditionally. It never changes anyway.
76 */
77 attr->cra_oa->o_stripe_idx = subobj->lso_index;
78}
79
80static const struct cl_req_operations lovsub_req_ops = {
81 .cro_attr_set = lovsub_req_attr_set,
82 .cro_completion = lovsub_req_completion
83};
84
85/*****************************************************************************
86 *
87 * Lov-sub device and device type functions. 47 * Lov-sub device and device type functions.
88 * 48 *
89 */ 49 */
@@ -137,32 +97,12 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
137 return next; 97 return next;
138} 98}
139 99
140static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
141 struct cl_req *req)
142{
143 struct lovsub_req *lsr;
144 int result;
145
146 lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
147 if (lsr) {
148 cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
149 result = 0;
150 } else {
151 result = -ENOMEM;
152 }
153 return result;
154}
155
156static const struct lu_device_operations lovsub_lu_ops = { 100static const struct lu_device_operations lovsub_lu_ops = {
157 .ldo_object_alloc = lovsub_object_alloc, 101 .ldo_object_alloc = lovsub_object_alloc,
158 .ldo_process_config = NULL, 102 .ldo_process_config = NULL,
159 .ldo_recovery_complete = NULL 103 .ldo_recovery_complete = NULL
160}; 104};
161 105
162static const struct cl_device_operations lovsub_cl_ops = {
163 .cdo_req_init = lovsub_req_init
164};
165
166static struct lu_device *lovsub_device_alloc(const struct lu_env *env, 106static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
167 struct lu_device_type *t, 107 struct lu_device_type *t,
168 struct lustre_cfg *cfg) 108 struct lustre_cfg *cfg)
@@ -178,7 +118,6 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
178 if (result == 0) { 118 if (result == 0) {
179 d = lovsub2lu_dev(lsd); 119 d = lovsub2lu_dev(lsd);
180 d->ld_ops = &lovsub_lu_ops; 120 d->ld_ops = &lovsub_lu_ops;
181 lsd->acid_cl.cd_ops = &lovsub_cl_ops;
182 } else { 121 } else {
183 d = ERR_PTR(result); 122 d = ERR_PTR(result);
184 } 123 }
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index a2bac7a3b71b..011296ee16e6 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -116,11 +116,31 @@ static int lovsub_object_glimpse(const struct lu_env *env,
116 return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb); 116 return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
117} 117}
118 118
119/**
120 * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub
121 * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
122 * field, which is filled there.
123 */
124static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj,
125 struct cl_req_attr *attr)
126{
127 struct lovsub_object *subobj = cl2lovsub(obj);
128
129 cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr);
130
131 /*
132 * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
133 * unconditionally. It never changes anyway.
134 */
135 attr->cra_oa->o_stripe_idx = subobj->lso_index;
136}
137
119static const struct cl_object_operations lovsub_ops = { 138static const struct cl_object_operations lovsub_ops = {
120 .coo_page_init = lovsub_page_init, 139 .coo_page_init = lovsub_page_init,
121 .coo_lock_init = lovsub_lock_init, 140 .coo_lock_init = lovsub_lock_init,
122 .coo_attr_update = lovsub_attr_update, 141 .coo_attr_update = lovsub_attr_update,
123 .coo_glimpse = lovsub_object_glimpse 142 .coo_glimpse = lovsub_object_glimpse,
143 .coo_req_attr_set = lovsub_req_attr_set
124}; 144};
125 145
126static const struct lu_object_operations lovsub_lu_obj_ops = { 146static const struct lu_object_operations lovsub_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index fca9450de57c..9021c465c044 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -36,6 +36,42 @@
36#include "../include/lprocfs_status.h" 36#include "../include/lprocfs_status.h"
37#include "mdc_internal.h" 37#include "mdc_internal.h"
38 38
39static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
40 char *buf)
41{
42 struct obd_device *dev = container_of(kobj, struct obd_device,
43 obd_kobj);
44
45 return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive);
46}
47
48static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
49 const char *buffer, size_t count)
50{
51 struct obd_device *dev = container_of(kobj, struct obd_device,
52 obd_kobj);
53 unsigned long val;
54 int rc;
55
56 rc = kstrtoul(buffer, 10, &val);
57 if (rc)
58 return rc;
59
60 if (val < 0 || val > 1)
61 return -ERANGE;
62
63 /* opposite senses */
64 if (dev->u.cli.cl_import->imp_deactive == val) {
65 rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
66 if (rc)
67 count = rc;
68 } else {
69 CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val);
70 }
71 return count;
72}
73LUSTRE_RW_ATTR(active);
74
39static ssize_t max_rpcs_in_flight_show(struct kobject *kobj, 75static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
40 struct attribute *attr, 76 struct attribute *attr,
41 char *buf) 77 char *buf)
@@ -73,6 +109,64 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
73} 109}
74LUSTRE_RW_ATTR(max_rpcs_in_flight); 110LUSTRE_RW_ATTR(max_rpcs_in_flight);
75 111
112static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj,
113 struct attribute *attr,
114 char *buf)
115{
116 struct obd_device *dev = container_of(kobj, struct obd_device,
117 obd_kobj);
118 u16 max;
119 int len;
120
121 max = dev->u.cli.cl_max_mod_rpcs_in_flight;
122 len = sprintf(buf, "%hu\n", max);
123
124 return len;
125}
126
127static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj,
128 struct attribute *attr,
129 const char *buffer,
130 size_t count)
131{
132 struct obd_device *dev = container_of(kobj, struct obd_device,
133 obd_kobj);
134 u16 val;
135 int rc;
136
137 rc = kstrtou16(buffer, 10, &val);
138 if (rc)
139 return rc;
140
141 rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val);
142 if (rc)
143 count = rc;
144
145 return count;
146}
147LUSTRE_RW_ATTR(max_mod_rpcs_in_flight);
148
149static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v)
150{
151 struct obd_device *dev = seq->private;
152
153 return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq);
154}
155
156static ssize_t mdc_rpc_stats_seq_write(struct file *file,
157 const char __user *buf,
158 size_t len, loff_t *off)
159{
160 struct seq_file *seq = file->private_data;
161 struct obd_device *dev = seq->private;
162 struct client_obd *cli = &dev->u.cli;
163
164 lprocfs_oh_clear(&cli->cl_mod_rpcs_hist);
165
166 return len;
167}
168LPROC_SEQ_FOPS(mdc_rpc_stats);
169
76LPROC_SEQ_FOPS_WR_ONLY(mdc, ping); 170LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
77 171
78LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags); 172LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
@@ -112,11 +206,15 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
112 { "import", &mdc_import_fops, NULL, 0 }, 206 { "import", &mdc_import_fops, NULL, 0 },
113 { "state", &mdc_state_fops, NULL, 0 }, 207 { "state", &mdc_state_fops, NULL, 0 },
114 { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 }, 208 { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 },
209 { .name = "rpc_stats",
210 .fops = &mdc_rpc_stats_fops },
115 { NULL } 211 { NULL }
116}; 212};
117 213
118static struct attribute *mdc_attrs[] = { 214static struct attribute *mdc_attrs[] = {
215 &lustre_attr_active.attr,
119 &lustre_attr_max_rpcs_in_flight.attr, 216 &lustre_attr_max_rpcs_in_flight.attr,
217 &lustre_attr_max_mod_rpcs_in_flight.attr,
120 &lustre_attr_max_pages_per_rpc.attr, 218 &lustre_attr_max_pages_per_rpc.attr,
121 NULL, 219 NULL,
122}; 220};
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index f446c1c2584b..881c6a0676a6 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -46,7 +46,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
46void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, 46void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
47 struct md_op_data *data, size_t ea_size); 47 struct md_op_data *data, size_t ea_size);
48void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, 48void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
49 void *ea, size_t ealen, void *ea2, size_t ea2len); 49 void *ea, size_t ealen);
50void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, 50void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
51 const void *data, size_t datalen, umode_t mode, uid_t uid, 51 const void *data, size_t datalen, umode_t mode, uid_t uid,
52 gid_t gid, cfs_cap_t capability, __u64 rdev); 52 gid_t gid, cfs_cap_t capability, __u64 rdev);
@@ -75,7 +75,7 @@ int mdc_intent_lock(struct obd_export *exp,
75 __u64 extra_lock_flags); 75 __u64 extra_lock_flags);
76 76
77int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, 77int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
78 const ldlm_policy_data_t *policy, 78 const union ldlm_policy_data *policy,
79 struct lookup_intent *it, struct md_op_data *op_data, 79 struct lookup_intent *it, struct md_op_data *op_data,
80 struct lustre_handle *lockh, __u64 extra_lock_flags); 80 struct lustre_handle *lockh, __u64 extra_lock_flags);
81 81
@@ -105,12 +105,11 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
105 const char *new, size_t newlen, 105 const char *new, size_t newlen,
106 struct ptlrpc_request **request); 106 struct ptlrpc_request **request);
107int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, 107int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
108 void *ea, size_t ealen, void *ea2, size_t ea2len, 108 void *ea, size_t ealen, struct ptlrpc_request **request);
109 struct ptlrpc_request **request, struct md_open_data **mod);
110int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, 109int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
111 struct ptlrpc_request **request); 110 struct ptlrpc_request **request);
112int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, 111int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
113 ldlm_policy_data_t *policy, enum ldlm_mode mode, 112 union ldlm_policy_data *policy, enum ldlm_mode mode,
114 enum ldlm_cancel_flags flags, void *opaque); 113 enum ldlm_cancel_flags flags, void *opaque);
115 114
116int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, 115int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -122,7 +121,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
122 121
123enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, 122enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
124 const struct lu_fid *fid, enum ldlm_type type, 123 const struct lu_fid *fid, enum ldlm_type type,
125 ldlm_policy_data_t *policy, enum ldlm_mode mode, 124 union ldlm_policy_data *policy,
125 enum ldlm_mode mode,
126 struct lustre_handle *lockh); 126 struct lustre_handle *lockh);
127 127
128static inline int mdc_prep_elc_req(struct obd_export *exp, 128static inline int mdc_prep_elc_req(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index aac7e04873e2..f35e1f9afdef 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -139,7 +139,7 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
139 rec->cr_time = op_data->op_mod_time; 139 rec->cr_time = op_data->op_mod_time;
140 rec->cr_suppgid1 = op_data->op_suppgids[0]; 140 rec->cr_suppgid1 = op_data->op_suppgids[0];
141 rec->cr_suppgid2 = op_data->op_suppgids[1]; 141 rec->cr_suppgid2 = op_data->op_suppgids[1];
142 flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS; 142 flags = 0;
143 if (op_data->op_bias & MDS_CREATE_VOLATILE) 143 if (op_data->op_bias & MDS_CREATE_VOLATILE)
144 flags |= MDS_OPEN_VOLATILE; 144 flags |= MDS_OPEN_VOLATILE;
145 set_mrc_cr_flags(rec, flags); 145 set_mrc_cr_flags(rec, flags);
@@ -301,16 +301,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
301static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch, 301static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
302 struct md_op_data *op_data) 302 struct md_op_data *op_data)
303{ 303{
304 memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle)); 304 epoch->mio_handle = op_data->op_handle;
305 epoch->ioepoch = op_data->op_ioepoch; 305 epoch->mio_unused1 = 0;
306 epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS; 306 epoch->mio_unused2 = 0;
307 epoch->mio_padding = 0;
307} 308}
308 309
309void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, 310void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
310 void *ea, size_t ealen, void *ea2, size_t ea2len) 311 void *ea, size_t ealen)
311{ 312{
312 struct mdt_rec_setattr *rec; 313 struct mdt_rec_setattr *rec;
313 struct mdt_ioepoch *epoch;
314 struct lov_user_md *lum = NULL; 314 struct lov_user_md *lum = NULL;
315 315
316 CLASSERT(sizeof(struct mdt_rec_reint) == 316 CLASSERT(sizeof(struct mdt_rec_reint) ==
@@ -318,11 +318,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
318 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); 318 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
319 mdc_setattr_pack_rec(rec, op_data); 319 mdc_setattr_pack_rec(rec, op_data);
320 320
321 if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
322 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
323 mdc_ioepoch_pack(epoch, op_data);
324 }
325
326 if (ealen == 0) 321 if (ealen == 0)
327 return; 322 return;
328 323
@@ -335,12 +330,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
335 } else { 330 } else {
336 memcpy(lum, ea, ealen); 331 memcpy(lum, ea, ealen);
337 } 332 }
338
339 if (ea2len == 0)
340 return;
341
342 memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
343 ea2len);
344} 333}
345 334
346void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) 335void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
@@ -387,6 +376,31 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
387 mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); 376 mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
388} 377}
389 378
379static void mdc_intent_close_pack(struct ptlrpc_request *req,
380 struct md_op_data *op_data)
381{
382 enum mds_op_bias bias = op_data->op_bias;
383 struct close_data *data;
384 struct ldlm_lock *lock;
385
386 if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP |
387 MDS_RENAME_MIGRATE)))
388 return;
389
390 data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
391 LASSERT(data);
392
393 lock = ldlm_handle2lock(&op_data->op_lease_handle);
394 if (lock) {
395 data->cd_handle = lock->l_remote_handle;
396 LDLM_LOCK_PUT(lock);
397 }
398 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
399
400 data->cd_data_version = op_data->op_data_version;
401 data->cd_fid = op_data->op_fid2;
402}
403
390void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, 404void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
391 const char *old, size_t oldlen, 405 const char *old, size_t oldlen,
392 const char *new, size_t newlen) 406 const char *new, size_t newlen)
@@ -415,6 +429,15 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
415 429
416 if (new) 430 if (new)
417 mdc_pack_name(req, &RMF_SYMTGT, new, newlen); 431 mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
432
433 if (op_data->op_cli_flags & CLI_MIGRATE &&
434 op_data->op_bias & MDS_RENAME_MIGRATE) {
435 struct mdt_ioepoch *epoch;
436
437 mdc_intent_close_pack(req, op_data);
438 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
439 mdc_ioepoch_pack(epoch, op_data);
440 }
418} 441}
419 442
420void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, 443void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
@@ -441,27 +464,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
441 op_data->op_namelen); 464 op_data->op_namelen);
442} 465}
443 466
444static void mdc_hsm_release_pack(struct ptlrpc_request *req,
445 struct md_op_data *op_data)
446{
447 if (op_data->op_bias & MDS_HSM_RELEASE) {
448 struct close_data *data;
449 struct ldlm_lock *lock;
450
451 data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
452
453 lock = ldlm_handle2lock(&op_data->op_lease_handle);
454 if (lock) {
455 data->cd_handle = lock->l_remote_handle;
456 LDLM_LOCK_PUT(lock);
457 }
458 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
459
460 data->cd_data_version = op_data->op_data_version;
461 data->cd_fid = op_data->op_fid2;
462 }
463}
464
465void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data) 467void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
466{ 468{
467 struct mdt_ioepoch *epoch; 469 struct mdt_ioepoch *epoch;
@@ -484,5 +486,5 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
484 rec->sa_valid &= ~MDS_ATTR_ATIME; 486 rec->sa_valid &= ~MDS_ATTR_ATIME;
485 487
486 mdc_ioepoch_pack(epoch, op_data); 488 mdc_ioepoch_pack(epoch, op_data);
487 mdc_hsm_release_pack(req, op_data); 489 mdc_intent_close_pack(req, op_data);
488} 490}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index f1f6c082fa42..54ebb9952d66 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -38,10 +38,12 @@
38#include "../include/obd.h" 38#include "../include/obd.h"
39#include "../include/obd_class.h" 39#include "../include/obd_class.h"
40#include "../include/lustre_dlm.h" 40#include "../include/lustre_dlm.h"
41#include "../include/lustre_fid.h" /* fid_res_name_eq() */ 41#include "../include/lustre_fid.h"
42#include "../include/lustre_mdc.h" 42#include "../include/lustre_mdc.h"
43#include "../include/lustre_net.h" 43#include "../include/lustre_net.h"
44#include "../include/lustre_req_layout.h" 44#include "../include/lustre_req_layout.h"
45#include "../include/lustre_swab.h"
46
45#include "mdc_internal.h" 47#include "mdc_internal.h"
46 48
47struct mdc_getattr_args { 49struct mdc_getattr_args {
@@ -131,7 +133,8 @@ int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
131 133
132enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, 134enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
133 const struct lu_fid *fid, enum ldlm_type type, 135 const struct lu_fid *fid, enum ldlm_type type,
134 ldlm_policy_data_t *policy, enum ldlm_mode mode, 136 union ldlm_policy_data *policy,
137 enum ldlm_mode mode,
135 struct lustre_handle *lockh) 138 struct lustre_handle *lockh)
136{ 139{
137 struct ldlm_res_id res_id; 140 struct ldlm_res_id res_id;
@@ -147,7 +150,7 @@ enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
147 150
148int mdc_cancel_unused(struct obd_export *exp, 151int mdc_cancel_unused(struct obd_export *exp,
149 const struct lu_fid *fid, 152 const struct lu_fid *fid,
150 ldlm_policy_data_t *policy, 153 union ldlm_policy_data *policy,
151 enum ldlm_mode mode, 154 enum ldlm_mode mode,
152 enum ldlm_cancel_flags flags, 155 enum ldlm_cancel_flags flags,
153 void *opaque) 156 void *opaque)
@@ -386,8 +389,6 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
386 389
387 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 390 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
388 obddev->u.cli.cl_default_mds_easize); 391 obddev->u.cli.cl_default_mds_easize);
389 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
390 obddev->u.cli.cl_default_mds_cookiesize);
391 ptlrpc_request_set_replen(req); 392 ptlrpc_request_set_replen(req);
392 return req; 393 return req;
393} 394}
@@ -688,20 +689,20 @@ static int mdc_finish_enqueue(struct obd_export *exp,
688 * we don't know in advance the file type. 689 * we don't know in advance the file type.
689 */ 690 */
690int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, 691int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
691 const ldlm_policy_data_t *policy, 692 const union ldlm_policy_data *policy,
692 struct lookup_intent *it, struct md_op_data *op_data, 693 struct lookup_intent *it, struct md_op_data *op_data,
693 struct lustre_handle *lockh, u64 extra_lock_flags) 694 struct lustre_handle *lockh, u64 extra_lock_flags)
694{ 695{
695 static const ldlm_policy_data_t lookup_policy = { 696 static const union ldlm_policy_data lookup_policy = {
696 .l_inodebits = { MDS_INODELOCK_LOOKUP } 697 .l_inodebits = { MDS_INODELOCK_LOOKUP }
697 }; 698 };
698 static const ldlm_policy_data_t update_policy = { 699 static const union ldlm_policy_data update_policy = {
699 .l_inodebits = { MDS_INODELOCK_UPDATE } 700 .l_inodebits = { MDS_INODELOCK_UPDATE }
700 }; 701 };
701 static const ldlm_policy_data_t layout_policy = { 702 static const union ldlm_policy_data layout_policy = {
702 .l_inodebits = { MDS_INODELOCK_LAYOUT } 703 .l_inodebits = { MDS_INODELOCK_LAYOUT }
703 }; 704 };
704 static const ldlm_policy_data_t getxattr_policy = { 705 static const union ldlm_policy_data getxattr_policy = {
705 .l_inodebits = { MDS_INODELOCK_XATTR } 706 .l_inodebits = { MDS_INODELOCK_XATTR }
706 }; 707 };
707 struct obd_device *obddev = class_exp2obd(exp); 708 struct obd_device *obddev = class_exp2obd(exp);
@@ -762,27 +763,22 @@ resend:
762 if (IS_ERR(req)) 763 if (IS_ERR(req))
763 return PTR_ERR(req); 764 return PTR_ERR(req);
764 765
765 if (req && it && it->it_op & IT_CREAT)
766 /* ask ptlrpc not to resend on EINPROGRESS since we have our own
767 * retry logic
768 */
769 req->rq_no_retry_einprogress = 1;
770
771 if (resends) { 766 if (resends) {
772 req->rq_generation_set = 1; 767 req->rq_generation_set = 1;
773 req->rq_import_generation = generation; 768 req->rq_import_generation = generation;
774 req->rq_sent = ktime_get_real_seconds() + resends; 769 req->rq_sent = ktime_get_real_seconds() + resends;
775 } 770 }
776 771
777 /* It is important to obtain rpc_lock first (if applicable), so that 772 /* It is important to obtain modify RPC slot first (if applicable), so
778 * threads that are serialised with rpc_lock are not polluting our 773 * that threads that are waiting for a modify RPC slot are not polluting
779 * rpcs in flight counter. We do not do flock request limiting, though 774 * our rpcs in flight counter.
775 * We do not do flock request limiting, though
780 */ 776 */
781 if (it) { 777 if (it) {
782 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); 778 mdc_get_mod_rpc_slot(req, it);
783 rc = obd_get_request_slot(&obddev->u.cli); 779 rc = obd_get_request_slot(&obddev->u.cli);
784 if (rc != 0) { 780 if (rc != 0) {
785 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); 781 mdc_put_mod_rpc_slot(req, it);
786 mdc_clear_replay_flag(req, 0); 782 mdc_clear_replay_flag(req, 0);
787 ptlrpc_req_finished(req); 783 ptlrpc_req_finished(req);
788 return rc; 784 return rc;
@@ -809,7 +805,7 @@ resend:
809 } 805 }
810 806
811 obd_put_request_slot(&obddev->u.cli); 807 obd_put_request_slot(&obddev->u.cli);
812 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); 808 mdc_put_mod_rpc_slot(req, it);
813 809
814 if (rc < 0) { 810 if (rc < 0) {
815 CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n", 811 CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
@@ -825,11 +821,12 @@ resend:
825 lockrep->lock_policy_res2 = 821 lockrep->lock_policy_res2 =
826 ptlrpc_status_ntoh(lockrep->lock_policy_res2); 822 ptlrpc_status_ntoh(lockrep->lock_policy_res2);
827 823
828 /* Retry the create infinitely when we get -EINPROGRESS from 824 /*
829 * server. This is required by the new quota design. 825 * Retry infinitely when the server returns -EINPROGRESS for the
826 * intent operation, when server returns -EINPROGRESS for acquiring
827 * intent lock, we'll retry in after_reply().
830 */ 828 */
831 if (it->it_op & IT_CREAT && 829 if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
832 (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
833 mdc_clear_replay_flag(req, rc); 830 mdc_clear_replay_flag(req, rc);
834 ptlrpc_req_finished(req); 831 ptlrpc_req_finished(req);
835 resends++; 832 resends++;
@@ -931,7 +928,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
931 */ 928 */
932 lock = ldlm_handle2lock(lockh); 929 lock = ldlm_handle2lock(lockh);
933 if (lock) { 930 if (lock) {
934 ldlm_policy_data_t policy = lock->l_policy_data; 931 union ldlm_policy_data policy = lock->l_policy_data;
935 932
936 LDLM_DEBUG(lock, "matching against this"); 933 LDLM_DEBUG(lock, "matching against this");
937 934
@@ -967,7 +964,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
967 */ 964 */
968 struct ldlm_res_id res_id; 965 struct ldlm_res_id res_id;
969 struct lustre_handle lockh; 966 struct lustre_handle lockh;
970 ldlm_policy_data_t policy; 967 union ldlm_policy_data policy;
971 enum ldlm_mode mode; 968 enum ldlm_mode mode;
972 969
973 if (it->it_lock_handle) { 970 if (it->it_lock_handle) {
@@ -1169,10 +1166,9 @@ int mdc_intent_getattr_async(struct obd_export *exp,
1169 * for statahead currently. Consider CMD in future, such two bits 1166 * for statahead currently. Consider CMD in future, such two bits
1170 * maybe managed by different MDS, should be adjusted then. 1167 * maybe managed by different MDS, should be adjusted then.
1171 */ 1168 */
1172 ldlm_policy_data_t policy = { 1169 union ldlm_policy_data policy = {
1173 .l_inodebits = { MDS_INODELOCK_LOOKUP | 1170 .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE }
1174 MDS_INODELOCK_UPDATE } 1171 };
1175 };
1176 int rc = 0; 1172 int rc = 0;
1177 __u64 flags = LDLM_FL_HAS_INTENT; 1173 __u64 flags = LDLM_FL_HAS_INTENT;
1178 1174
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index c921e471fa27..07b168490f09 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -40,17 +40,15 @@
40#include "../include/lustre_fid.h" 40#include "../include/lustre_fid.h"
41 41
42/* mdc_setattr does its own semaphore handling */ 42/* mdc_setattr does its own semaphore handling */
43static int mdc_reint(struct ptlrpc_request *request, 43static int mdc_reint(struct ptlrpc_request *request, int level)
44 struct mdc_rpc_lock *rpc_lock,
45 int level)
46{ 44{
47 int rc; 45 int rc;
48 46
49 request->rq_send_state = level; 47 request->rq_send_state = level;
50 48
51 mdc_get_rpc_lock(rpc_lock, NULL); 49 mdc_get_mod_rpc_slot(request, NULL);
52 rc = ptlrpc_queue_wait(request); 50 rc = ptlrpc_queue_wait(request);
53 mdc_put_rpc_lock(rpc_lock, NULL); 51 mdc_put_mod_rpc_slot(request, NULL);
54 if (rc) 52 if (rc)
55 CDEBUG(D_INFO, "error in handling %d\n", rc); 53 CDEBUG(D_INFO, "error in handling %d\n", rc);
56 else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY)) 54 else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
@@ -68,7 +66,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
68 __u64 bits) 66 __u64 bits)
69{ 67{
70 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; 68 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
71 ldlm_policy_data_t policy = {}; 69 union ldlm_policy_data policy = {};
72 struct ldlm_res_id res_id; 70 struct ldlm_res_id res_id;
73 struct ldlm_resource *res; 71 struct ldlm_resource *res;
74 int count; 72 int count;
@@ -99,13 +97,10 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
99} 97}
100 98
101int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, 99int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
102 void *ea, size_t ealen, void *ea2, size_t ea2len, 100 void *ea, size_t ealen, struct ptlrpc_request **request)
103 struct ptlrpc_request **request, struct md_open_data **mod)
104{ 101{
105 LIST_HEAD(cancels); 102 LIST_HEAD(cancels);
106 struct ptlrpc_request *req; 103 struct ptlrpc_request *req;
107 struct mdc_rpc_lock *rpc_lock;
108 struct obd_device *obd = exp->exp_obd;
109 int count = 0, rc; 104 int count = 0, rc;
110 __u64 bits; 105 __u64 bits;
111 106
@@ -122,12 +117,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
122 ldlm_lock_list_put(&cancels, l_bl_ast, count); 117 ldlm_lock_list_put(&cancels, l_bl_ast, count);
123 return -ENOMEM; 118 return -ENOMEM;
124 } 119 }
125 if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0) 120 req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
126 req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
127 0);
128 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); 121 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
129 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 122 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);
130 ea2len);
131 123
132 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); 124 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
133 if (rc) { 125 if (rc) {
@@ -135,63 +127,21 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
135 return rc; 127 return rc;
136 } 128 }
137 129
138 rpc_lock = obd->u.cli.cl_rpc_lock;
139
140 if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME)) 130 if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
141 CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n", 131 CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
142 LTIME_S(op_data->op_attr.ia_mtime), 132 LTIME_S(op_data->op_attr.ia_mtime),
143 LTIME_S(op_data->op_attr.ia_ctime)); 133 LTIME_S(op_data->op_attr.ia_ctime));
144 mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len); 134 mdc_setattr_pack(req, op_data, ea, ealen);
145 135
146 ptlrpc_request_set_replen(req); 136 ptlrpc_request_set_replen(req);
147 if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
148 req->rq_import->imp_replayable) {
149 LASSERT(!*mod);
150
151 *mod = obd_mod_alloc();
152 if (!*mod) {
153 DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
154 } else {
155 req->rq_replay = 1;
156 req->rq_cb_data = *mod;
157 (*mod)->mod_open_req = req;
158 req->rq_commit_cb = mdc_commit_open;
159 (*mod)->mod_is_create = true;
160 /**
161 * Take an extra reference on \var mod, it protects \var
162 * mod from being freed on eviction (commit callback is
163 * called despite rq_replay flag).
164 * Will be put on mdc_done_writing().
165 */
166 obd_mod_get(*mod);
167 }
168 }
169
170 rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
171 137
172 /* Save the obtained info in the original RPC for the replay case. */ 138 rc = mdc_reint(req, LUSTRE_IMP_FULL);
173 if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
174 struct mdt_ioepoch *epoch;
175 struct mdt_body *body;
176 139
177 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); 140 if (rc == -ERESTARTSYS)
178 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
179 epoch->handle = body->mbo_handle;
180 epoch->ioepoch = body->mbo_ioepoch;
181 req->rq_replay_cb = mdc_replay_open;
182 /** bug 3633, open may be committed and estale answer is not error */
183 } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
184 rc = 0;
185 } else if (rc == -ERESTARTSYS) {
186 rc = 0; 141 rc = 0;
187 } 142
188 *request = req; 143 *request = req;
189 if (rc && req->rq_commit_cb) { 144
190 /* Put an extra reference on \var mod on error case. */
191 if (mod && *mod)
192 obd_mod_put(*mod);
193 req->rq_commit_cb(req);
194 }
195 return rc; 145 return rc;
196} 146}
197 147
@@ -264,7 +214,7 @@ rebuild:
264 } 214 }
265 level = LUSTRE_IMP_FULL; 215 level = LUSTRE_IMP_FULL;
266 resend: 216 resend:
267 rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level); 217 rc = mdc_reint(req, level);
268 218
269 /* Resend if we were told to. */ 219 /* Resend if we were told to. */
270 if (rc == -ERESTARTSYS) { 220 if (rc == -ERESTARTSYS) {
@@ -332,13 +282,11 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
332 282
333 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 283 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
334 obd->u.cli.cl_default_mds_easize); 284 obd->u.cli.cl_default_mds_easize);
335 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
336 obd->u.cli.cl_default_mds_cookiesize);
337 ptlrpc_request_set_replen(req); 285 ptlrpc_request_set_replen(req);
338 286
339 *request = req; 287 *request = req;
340 288
341 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); 289 rc = mdc_reint(req, LUSTRE_IMP_FULL);
342 if (rc == -ERESTARTSYS) 290 if (rc == -ERESTARTSYS)
343 rc = 0; 291 rc = 0;
344 return rc; 292 return rc;
@@ -348,7 +296,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
348 struct ptlrpc_request **request) 296 struct ptlrpc_request **request)
349{ 297{
350 LIST_HEAD(cancels); 298 LIST_HEAD(cancels);
351 struct obd_device *obd = exp->exp_obd;
352 struct ptlrpc_request *req; 299 struct ptlrpc_request *req;
353 int count = 0, rc; 300 int count = 0, rc;
354 301
@@ -380,7 +327,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
380 mdc_link_pack(req, op_data); 327 mdc_link_pack(req, op_data);
381 ptlrpc_request_set_replen(req); 328 ptlrpc_request_set_replen(req);
382 329
383 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); 330 rc = mdc_reint(req, LUSTRE_IMP_FULL);
384 *request = req; 331 *request = req;
385 if (rc == -ERESTARTSYS) 332 if (rc == -ERESTARTSYS)
386 rc = 0; 333 rc = 0;
@@ -419,7 +366,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
419 MDS_INODELOCK_FULL); 366 MDS_INODELOCK_FULL);
420 367
421 req = ptlrpc_request_alloc(class_exp2cliimp(exp), 368 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
422 &RQF_MDS_REINT_RENAME); 369 op_data->op_cli_flags & CLI_MIGRATE ?
370 &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME);
423 if (!req) { 371 if (!req) {
424 ldlm_lock_list_put(&cancels, l_bl_ast, count); 372 ldlm_lock_list_put(&cancels, l_bl_ast, count);
425 return -ENOMEM; 373 return -ENOMEM;
@@ -435,6 +383,23 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
435 return rc; 383 return rc;
436 } 384 }
437 385
386 if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) {
387 struct md_open_data *mod = op_data->op_data;
388
389 LASSERTF(mod->mod_open_req &&
390 mod->mod_open_req->rq_type != LI_POISON,
391 "POISONED open %p!\n", mod->mod_open_req);
392
393 DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
394 /*
395 * We no longer want to preserve this open for replay even
396 * though the open was committed. b=3632, b=3633
397 */
398 spin_lock(&mod->mod_open_req->rq_lock);
399 mod->mod_open_req->rq_replay = 0;
400 spin_unlock(&mod->mod_open_req->rq_lock);
401 }
402
438 if (exp_connect_cancelset(exp) && req) 403 if (exp_connect_cancelset(exp) && req)
439 ldlm_cli_cancel_list(&cancels, count, req, 0); 404 ldlm_cli_cancel_list(&cancels, count, req, 0);
440 405
@@ -442,11 +407,9 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
442 407
443 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 408 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
444 obd->u.cli.cl_default_mds_easize); 409 obd->u.cli.cl_default_mds_easize);
445 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
446 obd->u.cli.cl_default_mds_cookiesize);
447 ptlrpc_request_set_replen(req); 410 ptlrpc_request_set_replen(req);
448 411
449 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); 412 rc = mdc_reint(req, LUSTRE_IMP_FULL);
450 *request = req; 413 *request = req;
451 if (rc == -ERESTARTSYS) 414 if (rc == -ERESTARTSYS)
452 rc = 0; 415 rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index f56ea643f9bf..2cfd913f9bc5 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -38,15 +38,18 @@
38# include <linux/init.h> 38# include <linux/init.h>
39# include <linux/utsname.h> 39# include <linux/utsname.h>
40 40
41#include "../include/cl_object.h"
42#include "../include/llog_swab.h"
43#include "../include/lprocfs_status.h"
41#include "../include/lustre_acl.h" 44#include "../include/lustre_acl.h"
45#include "../include/lustre_fid.h"
42#include "../include/lustre/lustre_ioctl.h" 46#include "../include/lustre/lustre_ioctl.h"
43#include "../include/obd_class.h" 47#include "../include/lustre_kernelcomm.h"
44#include "../include/lustre_lmv.h" 48#include "../include/lustre_lmv.h"
45#include "../include/lustre_fid.h"
46#include "../include/lprocfs_status.h"
47#include "../include/lustre_param.h"
48#include "../include/lustre_log.h" 49#include "../include/lustre_log.h"
49#include "../include/lustre_kernelcomm.h" 50#include "../include/lustre_param.h"
51#include "../include/lustre_swab.h"
52#include "../include/obd_class.h"
50 53
51#include "mdc_internal.h" 54#include "mdc_internal.h"
52 55
@@ -327,12 +330,12 @@ static int mdc_xattr_common(struct obd_export *exp,
327 330
328 /* make rpc */ 331 /* make rpc */
329 if (opcode == MDS_REINT) 332 if (opcode == MDS_REINT)
330 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); 333 mdc_get_mod_rpc_slot(req, NULL);
331 334
332 rc = ptlrpc_queue_wait(req); 335 rc = ptlrpc_queue_wait(req);
333 336
334 if (opcode == MDS_REINT) 337 if (opcode == MDS_REINT)
335 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); 338 mdc_put_mod_rpc_slot(req, NULL);
336 339
337 if (rc) 340 if (rc)
338 ptlrpc_req_finished(req); 341 ptlrpc_req_finished(req);
@@ -420,9 +423,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
420 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); 423 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
421 424
422 if (md->body->mbo_valid & OBD_MD_FLEASIZE) { 425 if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
423 int lmmsize;
424 struct lov_mds_md *lmm;
425
426 if (!S_ISREG(md->body->mbo_mode)) { 426 if (!S_ISREG(md->body->mbo_mode)) {
427 CDEBUG(D_INFO, 427 CDEBUG(D_INFO,
428 "OBD_MD_FLEASIZE set, should be a regular file, but is not\n"); 428 "OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
@@ -436,28 +436,18 @@ static int mdc_get_lustre_md(struct obd_export *exp,
436 rc = -EPROTO; 436 rc = -EPROTO;
437 goto out; 437 goto out;
438 } 438 }
439 lmmsize = md->body->mbo_eadatasize;
440 lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
441 if (!lmm) {
442 rc = -EPROTO;
443 goto out;
444 }
445
446 rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
447 if (rc < 0)
448 goto out;
449 439
450 if (rc < (typeof(rc))sizeof(*md->lsm)) { 440 md->layout.lb_len = md->body->mbo_eadatasize;
451 CDEBUG(D_INFO, 441 md->layout.lb_buf = req_capsule_server_sized_get(pill,
452 "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n", 442 &RMF_MDT_MD,
453 rc, (int)sizeof(*md->lsm)); 443 md->layout.lb_len);
444 if (!md->layout.lb_buf) {
454 rc = -EPROTO; 445 rc = -EPROTO;
455 goto out; 446 goto out;
456 } 447 }
457
458 } else if (md->body->mbo_valid & OBD_MD_FLDIREA) { 448 } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
459 int lmvsize; 449 const union lmv_mds_md *lmv;
460 struct lov_mds_md *lmv; 450 size_t lmv_size;
461 451
462 if (!S_ISDIR(md->body->mbo_mode)) { 452 if (!S_ISDIR(md->body->mbo_mode)) {
463 CDEBUG(D_INFO, 453 CDEBUG(D_INFO,
@@ -466,22 +456,21 @@ static int mdc_get_lustre_md(struct obd_export *exp,
466 goto out; 456 goto out;
467 } 457 }
468 458
469 if (md->body->mbo_eadatasize == 0) { 459 lmv_size = md->body->mbo_eadatasize;
460 if (!lmv_size) {
470 CDEBUG(D_INFO, 461 CDEBUG(D_INFO,
471 "OBD_MD_FLDIREA is set, but eadatasize 0\n"); 462 "OBD_MD_FLDIREA is set, but eadatasize 0\n");
472 return -EPROTO; 463 return -EPROTO;
473 } 464 }
474 if (md->body->mbo_valid & OBD_MD_MEA) { 465 if (md->body->mbo_valid & OBD_MD_MEA) {
475 lmvsize = md->body->mbo_eadatasize;
476 lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD, 466 lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
477 lmvsize); 467 lmv_size);
478 if (!lmv) { 468 if (!lmv) {
479 rc = -EPROTO; 469 rc = -EPROTO;
480 goto out; 470 goto out;
481 } 471 }
482 472
483 rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv, 473 rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
484 lmvsize);
485 if (rc < 0) 474 if (rc < 0)
486 goto out; 475 goto out;
487 476
@@ -517,8 +506,6 @@ out:
517#ifdef CONFIG_FS_POSIX_ACL 506#ifdef CONFIG_FS_POSIX_ACL
518 posix_acl_release(md->posix_acl); 507 posix_acl_release(md->posix_acl);
519#endif 508#endif
520 if (md->lsm)
521 obd_free_memmd(dt_exp, &md->lsm);
522 } 509 }
523 return rc; 510 return rc;
524} 511}
@@ -528,10 +515,6 @@ static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
528 return 0; 515 return 0;
529} 516}
530 517
531/**
532 * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
533 * RPC chains.
534 */
535void mdc_replay_open(struct ptlrpc_request *req) 518void mdc_replay_open(struct ptlrpc_request *req)
536{ 519{
537 struct md_open_data *mod = req->rq_cb_data; 520 struct md_open_data *mod = req->rq_cb_data;
@@ -565,15 +548,15 @@ void mdc_replay_open(struct ptlrpc_request *req)
565 __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); 548 __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
566 struct mdt_ioepoch *epoch; 549 struct mdt_ioepoch *epoch;
567 550
568 LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING); 551 LASSERT(opc == MDS_CLOSE);
569 epoch = req_capsule_client_get(&close_req->rq_pill, 552 epoch = req_capsule_client_get(&close_req->rq_pill,
570 &RMF_MDT_EPOCH); 553 &RMF_MDT_EPOCH);
571 LASSERT(epoch); 554 LASSERT(epoch);
572 555
573 if (och) 556 if (och)
574 LASSERT(!memcmp(&old, &epoch->handle, sizeof(old))); 557 LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
575 DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); 558 DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
576 epoch->handle = body->mbo_handle; 559 epoch->mio_handle = body->mbo_handle;
577 } 560 }
578} 561}
579 562
@@ -715,22 +698,6 @@ static int mdc_clear_open_replay_data(struct obd_export *exp,
715 return 0; 698 return 0;
716} 699}
717 700
718/* Prepares the request for the replay by the given reply */
719static void mdc_close_handle_reply(struct ptlrpc_request *req,
720 struct md_op_data *op_data, int rc) {
721 struct mdt_body *repbody;
722 struct mdt_ioepoch *epoch;
723
724 if (req && rc == -EAGAIN) {
725 repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
726 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
727
728 epoch->flags |= MF_SOM_AU;
729 if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
730 op_data->op_flags |= MF_GETATTR_LOCK;
731 }
732}
733
734static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, 701static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
735 struct md_open_data *mod, struct ptlrpc_request **request) 702 struct md_open_data *mod, struct ptlrpc_request **request)
736{ 703{
@@ -740,9 +707,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
740 int rc; 707 int rc;
741 int saved_rc = 0; 708 int saved_rc = 0;
742 709
743 req_fmt = &RQF_MDS_CLOSE;
744 if (op_data->op_bias & MDS_HSM_RELEASE) { 710 if (op_data->op_bias & MDS_HSM_RELEASE) {
745 req_fmt = &RQF_MDS_RELEASE_CLOSE; 711 req_fmt = &RQF_MDS_INTENT_CLOSE;
746 712
747 /* allocate a FID for volatile file */ 713 /* allocate a FID for volatile file */
748 rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); 714 rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
@@ -752,6 +718,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
752 /* save the errcode and proceed to close */ 718 /* save the errcode and proceed to close */
753 saved_rc = rc; 719 saved_rc = rc;
754 } 720 }
721 } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
722 req_fmt = &RQF_MDS_INTENT_CLOSE;
723 } else {
724 req_fmt = &RQF_MDS_CLOSE;
755 } 725 }
756 726
757 *request = NULL; 727 *request = NULL;
@@ -807,14 +777,12 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
807 777
808 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 778 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
809 obd->u.cli.cl_default_mds_easize); 779 obd->u.cli.cl_default_mds_easize);
810 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
811 obd->u.cli.cl_default_mds_cookiesize);
812 780
813 ptlrpc_request_set_replen(req); 781 ptlrpc_request_set_replen(req);
814 782
815 mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL); 783 mdc_get_mod_rpc_slot(req, NULL);
816 rc = ptlrpc_queue_wait(req); 784 rc = ptlrpc_queue_wait(req);
817 mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); 785 mdc_put_mod_rpc_slot(req, NULL);
818 786
819 if (!req->rq_repmsg) { 787 if (!req->rq_repmsg) {
820 CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, 788 CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
@@ -857,79 +825,9 @@ out:
857 obd_mod_put(mod); 825 obd_mod_put(mod);
858 } 826 }
859 *request = req; 827 *request = req;
860 mdc_close_handle_reply(req, op_data, rc);
861 return rc < 0 ? rc : saved_rc; 828 return rc < 0 ? rc : saved_rc;
862} 829}
863 830
864static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
865 struct md_open_data *mod)
866{
867 struct obd_device *obd = class_exp2obd(exp);
868 struct ptlrpc_request *req;
869 int rc;
870
871 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
872 &RQF_MDS_DONE_WRITING);
873 if (!req)
874 return -ENOMEM;
875
876 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
877 if (rc) {
878 ptlrpc_request_free(req);
879 return rc;
880 }
881
882 if (mod) {
883 LASSERTF(mod->mod_open_req &&
884 mod->mod_open_req->rq_type != LI_POISON,
885 "POISONED setattr %p!\n", mod->mod_open_req);
886
887 mod->mod_close_req = req;
888 DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
889 /* We no longer want to preserve this setattr for replay even
890 * though the open was committed. b=3632, b=3633
891 */
892 spin_lock(&mod->mod_open_req->rq_lock);
893 mod->mod_open_req->rq_replay = 0;
894 spin_unlock(&mod->mod_open_req->rq_lock);
895 }
896
897 mdc_close_pack(req, op_data);
898 ptlrpc_request_set_replen(req);
899
900 mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
901 rc = ptlrpc_queue_wait(req);
902 mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
903
904 if (rc == -ESTALE) {
905 /**
906 * it can be allowed error after 3633 if open or setattr were
907 * committed and server failed before close was sent.
908 * Let's check if mod exists and return no error in that case
909 */
910 if (mod) {
911 if (mod->mod_open_req->rq_committed)
912 rc = 0;
913 }
914 }
915
916 if (mod) {
917 if (rc != 0)
918 mod->mod_close_req = NULL;
919 LASSERT(mod->mod_open_req);
920 mdc_free_open(mod);
921
922 /* Since now, mod is accessed through setattr req only,
923 * thus DW req does not keep a reference on mod anymore.
924 */
925 obd_mod_put(mod);
926 }
927
928 mdc_close_handle_reply(req, op_data, rc);
929 ptlrpc_req_finished(req);
930 return rc;
931}
932
933static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid, 831static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
934 u64 offset, struct page **pages, int npages, 832 u64 offset, struct page **pages, int npages,
935 struct ptlrpc_request **request) 833 struct ptlrpc_request **request)
@@ -959,8 +857,10 @@ restart_bulk:
959 req->rq_request_portal = MDS_READPAGE_PORTAL; 857 req->rq_request_portal = MDS_READPAGE_PORTAL;
960 ptlrpc_at_set_req_timeout(req); 858 ptlrpc_at_set_req_timeout(req);
961 859
962 desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK, 860 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
963 MDS_BULK_PORTAL); 861 PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
862 MDS_BULK_PORTAL,
863 &ptlrpc_bulk_kiov_pin_ops);
964 if (!desc) { 864 if (!desc) {
965 ptlrpc_request_free(req); 865 ptlrpc_request_free(req);
966 return -ENOMEM; 866 return -ENOMEM;
@@ -968,7 +868,7 @@ restart_bulk:
968 868
969 /* NB req now owns desc and will free it when it gets freed */ 869 /* NB req now owns desc and will free it when it gets freed */
970 for (i = 0; i < npages; i++) 870 for (i = 0; i < npages; i++)
971 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); 871 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
972 872
973 mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid); 873 mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
974 874
@@ -1546,7 +1446,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
1546 /* Val is struct getinfo_fid2path result plus path */ 1446 /* Val is struct getinfo_fid2path result plus path */
1547 vallen = sizeof(*gf) + gf->gf_pathlen; 1447 vallen = sizeof(*gf) + gf->gf_pathlen;
1548 1448
1549 rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL); 1449 rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
1550 if (rc != 0 && rc != -EREMOTE) 1450 if (rc != 0 && rc != -EREMOTE)
1551 goto out; 1451 goto out;
1552 1452
@@ -1558,8 +1458,11 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
1558 goto out; 1458 goto out;
1559 } 1459 }
1560 1460
1561 CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n", 1461 CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n",
1562 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path); 1462 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
1463 gf->gf_pathlen < 512 ? gf->gf_path :
1464 /* only log the last 512 characters of the path */
1465 gf->gf_path + gf->gf_pathlen - 512);
1563 1466
1564out: 1467out:
1565 kfree(key); 1468 kfree(key);
@@ -1595,7 +1498,9 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
1595 1498
1596 ptlrpc_request_set_replen(req); 1499 ptlrpc_request_set_replen(req);
1597 1500
1598 rc = mdc_queue_wait(req); 1501 mdc_get_mod_rpc_slot(req, NULL);
1502 rc = ptlrpc_queue_wait(req);
1503 mdc_put_mod_rpc_slot(req, NULL);
1599out: 1504out:
1600 ptlrpc_req_finished(req); 1505 ptlrpc_req_finished(req);
1601 return rc; 1506 return rc;
@@ -1773,7 +1678,9 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
1773 1678
1774 ptlrpc_request_set_replen(req); 1679 ptlrpc_request_set_replen(req);
1775 1680
1776 rc = mdc_queue_wait(req); 1681 mdc_get_mod_rpc_slot(req, NULL);
1682 rc = ptlrpc_queue_wait(req);
1683 mdc_put_mod_rpc_slot(req, NULL);
1777out: 1684out:
1778 ptlrpc_req_finished(req); 1685 ptlrpc_req_finished(req);
1779 return rc; 1686 return rc;
@@ -1836,7 +1743,9 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
1836 1743
1837 ptlrpc_request_set_replen(req); 1744 ptlrpc_request_set_replen(req);
1838 1745
1839 rc = mdc_queue_wait(req); 1746 mdc_get_mod_rpc_slot(req, NULL);
1747 rc = ptlrpc_queue_wait(req);
1748 mdc_put_mod_rpc_slot(req, NULL);
1840out: 1749out:
1841 ptlrpc_req_finished(req); 1750 ptlrpc_req_finished(req);
1842 return rc; 1751 return rc;
@@ -1957,10 +1866,8 @@ static int mdc_changelog_send_thread(void *csdata)
1957 1866
1958 /* Send EOF no matter what our result */ 1867 /* Send EOF no matter what our result */
1959 kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags); 1868 kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
1960 if (kuch) { 1869 kuch->kuc_msgtype = CL_EOF;
1961 kuch->kuc_msgtype = CL_EOF; 1870 libcfs_kkuc_msg_put(cs->cs_fp, kuch);
1962 libcfs_kkuc_msg_put(cs->cs_fp, kuch);
1963 }
1964 1871
1965out: 1872out:
1966 fput(cs->cs_fp); 1873 fput(cs->cs_fp);
@@ -2015,52 +1922,6 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
2015static int mdc_ioc_hsm_ct_start(struct obd_export *exp, 1922static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2016 struct lustre_kernelcomm *lk); 1923 struct lustre_kernelcomm *lk);
2017 1924
2018static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
2019 struct obd_quotactl *oqctl)
2020{
2021 struct client_obd *cli = &exp->exp_obd->u.cli;
2022 struct ptlrpc_request *req;
2023 struct obd_quotactl *body;
2024 int rc;
2025
2026 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
2027 &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
2028 MDS_QUOTACHECK);
2029 if (!req)
2030 return -ENOMEM;
2031
2032 body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
2033 *body = *oqctl;
2034
2035 ptlrpc_request_set_replen(req);
2036
2037 /* the next poll will find -ENODATA, that means quotacheck is
2038 * going on
2039 */
2040 cli->cl_qchk_stat = -ENODATA;
2041 rc = ptlrpc_queue_wait(req);
2042 if (rc)
2043 cli->cl_qchk_stat = rc;
2044 ptlrpc_req_finished(req);
2045 return rc;
2046}
2047
2048static int mdc_quota_poll_check(struct obd_export *exp,
2049 struct if_quotacheck *qchk)
2050{
2051 struct client_obd *cli = &exp->exp_obd->u.cli;
2052 int rc;
2053
2054 qchk->obd_uuid = cli->cl_target_uuid;
2055 memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
2056
2057 rc = cli->cl_qchk_stat;
2058 /* the client is not the previous one */
2059 if (rc == CL_NOT_QUOTACHECKED)
2060 rc = -EINTR;
2061 return rc;
2062}
2063
2064static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, 1925static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
2065 struct obd_quotactl *oqctl) 1926 struct obd_quotactl *oqctl)
2066{ 1927{
@@ -2215,9 +2076,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2215 case IOC_OSC_SET_ACTIVE: 2076 case IOC_OSC_SET_ACTIVE:
2216 rc = ptlrpc_set_import_active(imp, data->ioc_offset); 2077 rc = ptlrpc_set_import_active(imp, data->ioc_offset);
2217 goto out; 2078 goto out;
2218 case OBD_IOC_POLL_QUOTACHECK:
2219 rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
2220 goto out;
2221 case OBD_IOC_PING_TARGET: 2079 case OBD_IOC_PING_TARGET:
2222 rc = ptlrpc_obd_ping(obd); 2080 rc = ptlrpc_obd_ping(obd);
2223 goto out; 2081 goto out;
@@ -2528,8 +2386,7 @@ static int mdc_set_info_async(const struct lu_env *env,
2528} 2386}
2529 2387
2530static int mdc_get_info(const struct lu_env *env, struct obd_export *exp, 2388static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
2531 __u32 keylen, void *key, __u32 *vallen, void *val, 2389 __u32 keylen, void *key, __u32 *vallen, void *val)
2532 struct lov_stripe_md *lsm)
2533{ 2390{
2534 int rc = -EINVAL; 2391 int rc = -EINVAL;
2535 2392
@@ -2733,29 +2590,17 @@ static void mdc_llog_finish(struct obd_device *obd)
2733 2590
2734static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg) 2591static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2735{ 2592{
2736 struct client_obd *cli = &obd->u.cli;
2737 struct lprocfs_static_vars lvars = { NULL }; 2593 struct lprocfs_static_vars lvars = { NULL };
2738 int rc; 2594 int rc;
2739 2595
2740 cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS);
2741 if (!cli->cl_rpc_lock)
2742 return -ENOMEM;
2743 mdc_init_rpc_lock(cli->cl_rpc_lock);
2744
2745 rc = ptlrpcd_addref(); 2596 rc = ptlrpcd_addref();
2746 if (rc < 0) 2597 if (rc < 0)
2747 goto err_rpc_lock; 2598 return rc;
2748
2749 cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
2750 if (!cli->cl_close_lock) {
2751 rc = -ENOMEM;
2752 goto err_ptlrpcd_decref;
2753 }
2754 mdc_init_rpc_lock(cli->cl_close_lock);
2755 2599
2756 rc = client_obd_setup(obd, cfg); 2600 rc = client_obd_setup(obd, cfg);
2757 if (rc) 2601 if (rc)
2758 goto err_close_lock; 2602 goto err_ptlrpcd_decref;
2603
2759 lprocfs_mdc_init_vars(&lvars); 2604 lprocfs_mdc_init_vars(&lvars);
2760 lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); 2605 lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
2761 sptlrpc_lprocfs_cliobd_attach(obd); 2606 sptlrpc_lprocfs_cliobd_attach(obd);
@@ -2769,29 +2614,25 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2769 if (rc) { 2614 if (rc) {
2770 mdc_cleanup(obd); 2615 mdc_cleanup(obd);
2771 CERROR("failed to setup llogging subsystems\n"); 2616 CERROR("failed to setup llogging subsystems\n");
2617 return rc;
2772 } 2618 }
2773 2619
2774 return rc; 2620 return rc;
2775 2621
2776err_close_lock:
2777 kfree(cli->cl_close_lock);
2778err_ptlrpcd_decref: 2622err_ptlrpcd_decref:
2779 ptlrpcd_decref(); 2623 ptlrpcd_decref();
2780err_rpc_lock:
2781 kfree(cli->cl_rpc_lock);
2782 return rc; 2624 return rc;
2783} 2625}
2784 2626
2785/* Initialize the default and maximum LOV EA and cookie sizes. This allows 2627/* Initialize the default and maximum LOV EA sizes. This allows
2786 * us to make MDS RPCs with large enough reply buffers to hold a default 2628 * us to make MDS RPCs with large enough reply buffers to hold a default
2787 * sized EA and cookie without having to calculate this (via a call into the 2629 * sized EA without having to calculate this (via a call into the
2788 * LOV + OSCs) each time we make an RPC. The maximum size is also tracked 2630 * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
2789 * but not used to avoid wastefully vmalloc()'ing large reply buffers when 2631 * but not used to avoid wastefully vmalloc()'ing large reply buffers when
2790 * a large number of stripes is possible. If a larger reply buffer is 2632 * a large number of stripes is possible. If a larger reply buffer is
2791 * required it will be reallocated in the ptlrpc layer due to overflow. 2633 * required it will be reallocated in the ptlrpc layer due to overflow.
2792 */ 2634 */
2793static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, 2635static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
2794 u32 cookiesize, u32 def_cookiesize)
2795{ 2636{
2796 struct obd_device *obd = exp->exp_obd; 2637 struct obd_device *obd = exp->exp_obd;
2797 struct client_obd *cli = &obd->u.cli; 2638 struct client_obd *cli = &obd->u.cli;
@@ -2802,42 +2643,24 @@ static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
2802 if (cli->cl_default_mds_easize < def_easize) 2643 if (cli->cl_default_mds_easize < def_easize)
2803 cli->cl_default_mds_easize = def_easize; 2644 cli->cl_default_mds_easize = def_easize;
2804 2645
2805 if (cli->cl_max_mds_cookiesize < cookiesize)
2806 cli->cl_max_mds_cookiesize = cookiesize;
2807
2808 if (cli->cl_default_mds_cookiesize < def_cookiesize)
2809 cli->cl_default_mds_cookiesize = def_cookiesize;
2810
2811 return 0; 2646 return 0;
2812} 2647}
2813 2648
2814static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) 2649static int mdc_precleanup(struct obd_device *obd)
2815{ 2650{
2816 switch (stage) { 2651 /* Failsafe, ok if racy */
2817 case OBD_CLEANUP_EARLY: 2652 if (obd->obd_type->typ_refcnt <= 1)
2818 break; 2653 libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
2819 case OBD_CLEANUP_EXPORTS:
2820 /* Failsafe, ok if racy */
2821 if (obd->obd_type->typ_refcnt <= 1)
2822 libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
2823 2654
2824 obd_cleanup_client_import(obd); 2655 obd_cleanup_client_import(obd);
2825 ptlrpc_lprocfs_unregister_obd(obd); 2656 ptlrpc_lprocfs_unregister_obd(obd);
2826 lprocfs_obd_cleanup(obd); 2657 lprocfs_obd_cleanup(obd);
2827 2658 mdc_llog_finish(obd);
2828 mdc_llog_finish(obd);
2829 break;
2830 }
2831 return 0; 2659 return 0;
2832} 2660}
2833 2661
2834static int mdc_cleanup(struct obd_device *obd) 2662static int mdc_cleanup(struct obd_device *obd)
2835{ 2663{
2836 struct client_obd *cli = &obd->u.cli;
2837
2838 kfree(cli->cl_rpc_lock);
2839 kfree(cli->cl_close_lock);
2840
2841 ptlrpcd_decref(); 2664 ptlrpcd_decref();
2842 2665
2843 return client_obd_cleanup(obd); 2666 return client_obd_cleanup(obd);
@@ -2881,7 +2704,6 @@ static struct obd_ops mdc_obd_ops = {
2881 .process_config = mdc_process_config, 2704 .process_config = mdc_process_config,
2882 .get_uuid = mdc_get_uuid, 2705 .get_uuid = mdc_get_uuid,
2883 .quotactl = mdc_quotactl, 2706 .quotactl = mdc_quotactl,
2884 .quotacheck = mdc_quotacheck
2885}; 2707};
2886 2708
2887static struct md_ops mdc_md_ops = { 2709static struct md_ops mdc_md_ops = {
@@ -2889,7 +2711,6 @@ static struct md_ops mdc_md_ops = {
2889 .null_inode = mdc_null_inode, 2711 .null_inode = mdc_null_inode,
2890 .close = mdc_close, 2712 .close = mdc_close,
2891 .create = mdc_create, 2713 .create = mdc_create,
2892 .done_writing = mdc_done_writing,
2893 .enqueue = mdc_enqueue, 2714 .enqueue = mdc_enqueue,
2894 .getattr = mdc_getattr, 2715 .getattr = mdc_getattr,
2895 .getattr_name = mdc_getattr_name, 2716 .getattr_name = mdc_getattr_name,
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 23374cae5133..b9c522a3c7a4 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -38,11 +38,13 @@
38#define D_MGC D_CONFIG /*|D_WARNING*/ 38#define D_MGC D_CONFIG /*|D_WARNING*/
39 39
40#include <linux/module.h> 40#include <linux/module.h>
41#include "../include/obd_class.h" 41
42#include "../include/lustre_dlm.h"
43#include "../include/lprocfs_status.h" 42#include "../include/lprocfs_status.h"
44#include "../include/lustre_log.h" 43#include "../include/lustre_dlm.h"
45#include "../include/lustre_disk.h" 44#include "../include/lustre_disk.h"
45#include "../include/lustre_log.h"
46#include "../include/lustre_swab.h"
47#include "../include/obd_class.h"
46 48
47#include "mgc_internal.h" 49#include "mgc_internal.h"
48 50
@@ -373,7 +375,7 @@ out_err:
373 return rc; 375 return rc;
374} 376}
375 377
376DEFINE_MUTEX(llog_process_lock); 378static DEFINE_MUTEX(llog_process_lock);
377 379
378/** Stop watching for updates on this log. 380/** Stop watching for updates on this log.
379 */ 381 */
@@ -684,35 +686,33 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
684} 686}
685 687
686static atomic_t mgc_count = ATOMIC_INIT(0); 688static atomic_t mgc_count = ATOMIC_INIT(0);
687static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) 689static int mgc_precleanup(struct obd_device *obd)
688{ 690{
689 int rc = 0; 691 int rc = 0;
690 int temp; 692 int temp;
691 693
692 switch (stage) { 694 if (atomic_dec_and_test(&mgc_count)) {
693 case OBD_CLEANUP_EARLY: 695 LASSERT(rq_state & RQ_RUNNING);
694 break; 696 /* stop requeue thread */
695 case OBD_CLEANUP_EXPORTS: 697 temp = RQ_STOP;
696 if (atomic_dec_and_test(&mgc_count)) { 698 } else {
697 LASSERT(rq_state & RQ_RUNNING); 699 /* wakeup requeue thread to clean our cld */
698 /* stop requeue thread */ 700 temp = RQ_NOW | RQ_PRECLEANUP;
699 temp = RQ_STOP;
700 } else {
701 /* wakeup requeue thread to clean our cld */
702 temp = RQ_NOW | RQ_PRECLEANUP;
703 }
704 spin_lock(&config_list_lock);
705 rq_state |= temp;
706 spin_unlock(&config_list_lock);
707 wake_up(&rq_waitq);
708 if (temp & RQ_STOP)
709 wait_for_completion(&rq_exit);
710 obd_cleanup_client_import(obd);
711 rc = mgc_llog_fini(NULL, obd);
712 if (rc != 0)
713 CERROR("failed to cleanup llogging subsystems\n");
714 break;
715 } 701 }
702
703 spin_lock(&config_list_lock);
704 rq_state |= temp;
705 spin_unlock(&config_list_lock);
706 wake_up(&rq_waitq);
707
708 if (temp & RQ_STOP)
709 wait_for_completion(&rq_exit);
710 obd_cleanup_client_import(obd);
711
712 rc = mgc_llog_fini(NULL, obd);
713 if (rc)
714 CERROR("failed to cleanup llogging subsystems\n");
715
716 return rc; 716 return rc;
717} 717}
718 718
@@ -887,8 +887,8 @@ static int mgc_set_mgs_param(struct obd_export *exp,
887} 887}
888 888
889/* Take a config lock so we can get cancel notifications */ 889/* Take a config lock so we can get cancel notifications */
890static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, 890static int mgc_enqueue(struct obd_export *exp, __u32 type,
891 __u32 type, ldlm_policy_data_t *policy, __u32 mode, 891 union ldlm_policy_data *policy, __u32 mode,
892 __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb, 892 __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
893 void *data, __u32 lvb_len, void *lvb_swabber, 893 void *data, __u32 lvb_len, void *lvb_swabber,
894 struct lustre_handle *lockh) 894 struct lustre_handle *lockh)
@@ -1059,8 +1059,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
1059} 1059}
1060 1060
1061static int mgc_get_info(const struct lu_env *env, struct obd_export *exp, 1061static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
1062 __u32 keylen, void *key, __u32 *vallen, void *val, 1062 __u32 keylen, void *key, __u32 *vallen, void *val)
1063 struct lov_stripe_md *unused)
1064{ 1063{
1065 int rc = -EINVAL; 1064 int rc = -EINVAL;
1066 1065
@@ -1387,15 +1386,17 @@ again:
1387 body->mcb_units = nrpages; 1386 body->mcb_units = nrpages;
1388 1387
1389 /* allocate bulk transfer descriptor */ 1388 /* allocate bulk transfer descriptor */
1390 desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, 1389 desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
1391 MGS_BULK_PORTAL); 1390 PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
1391 MGS_BULK_PORTAL,
1392 &ptlrpc_bulk_kiov_pin_ops);
1392 if (!desc) { 1393 if (!desc) {
1393 rc = -ENOMEM; 1394 rc = -ENOMEM;
1394 goto out; 1395 goto out;
1395 } 1396 }
1396 1397
1397 for (i = 0; i < nrpages; i++) 1398 for (i = 0; i < nrpages; i++)
1398 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); 1399 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
1399 1400
1400 ptlrpc_request_set_replen(req); 1401 ptlrpc_request_set_replen(req);
1401 rc = ptlrpc_queue_wait(req); 1402 rc = ptlrpc_queue_wait(req);
@@ -1553,14 +1554,52 @@ out_free:
1553 return rc; 1554 return rc;
1554} 1555}
1555 1556
1556/** Get a config log from the MGS and process it. 1557static bool mgc_import_in_recovery(struct obd_import *imp)
1557 * This func is called for both clients and servers. 1558{
1558 * Copy the log locally before parsing it if appropriate (non-MGS server) 1559 bool in_recovery = true;
1560
1561 spin_lock(&imp->imp_lock);
1562 if (imp->imp_state == LUSTRE_IMP_FULL ||
1563 imp->imp_state == LUSTRE_IMP_CLOSED)
1564 in_recovery = false;
1565 spin_unlock(&imp->imp_lock);
1566
1567 return in_recovery;
1568}
1569
1570/**
1571 * Get a configuration log from the MGS and process it.
1572 *
1573 * This function is called for both clients and servers to process the
1574 * configuration log from the MGS. The MGC enqueues a DLM lock on the
1575 * log from the MGS, and if the lock gets revoked the MGC will be notified
1576 * by the lock cancellation callback that the config log has changed,
1577 * and will enqueue another MGS lock on it, and then continue processing
1578 * the new additions to the end of the log.
1579 *
1580 * Since the MGC import is not replayable, if the import is being evicted
1581 * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
1582 * the log until recovery is finished or the import is closed.
1583 *
1584 * Make a local copy of the log before parsing it if appropriate (non-MGS
1585 * server) so that the server can start even when the MGS is down.
1586 *
1587 * There shouldn't be multiple processes running process_log at once --
1588 * sounds like badness. It actually might be fine, as long as they're not
1589 * trying to update from the same log simultaneously, in which case we
1590 * should use a per-log semaphore instead of cld_lock.
1591 *
1592 * \param[in] mgc MGC device by which to fetch the configuration log
1593 * \param[in] cld log processing state (stored in lock callback data)
1594 *
1595 * \retval 0 on success
1596 * \retval negative errno on failure
1559 */ 1597 */
1560int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) 1598int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
1561{ 1599{
1562 struct lustre_handle lockh = { 0 }; 1600 struct lustre_handle lockh = { 0 };
1563 __u64 flags = LDLM_FL_NO_LRU; 1601 __u64 flags = LDLM_FL_NO_LRU;
1602 bool retry = false;
1564 int rc = 0, rcl; 1603 int rc = 0, rcl;
1565 1604
1566 LASSERT(cld); 1605 LASSERT(cld);
@@ -1570,6 +1609,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
1570 * we're not trying to update from the same log 1609 * we're not trying to update from the same log
1571 * simultaneously (in which case we should use a per-log sem.) 1610 * simultaneously (in which case we should use a per-log sem.)
1572 */ 1611 */
1612restart:
1573 mutex_lock(&cld->cld_lock); 1613 mutex_lock(&cld->cld_lock);
1574 if (cld->cld_stopping) { 1614 if (cld->cld_stopping) {
1575 mutex_unlock(&cld->cld_lock); 1615 mutex_unlock(&cld->cld_lock);
@@ -1582,7 +1622,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
1582 cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1); 1622 cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
1583 1623
1584 /* Get the cfg lock on the llog */ 1624 /* Get the cfg lock on the llog */
1585 rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL, 1625 rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
1586 LCK_CR, &flags, NULL, NULL, NULL, 1626 LCK_CR, &flags, NULL, NULL, NULL,
1587 cld, 0, NULL, &lockh); 1627 cld, 0, NULL, &lockh);
1588 if (rcl == 0) { 1628 if (rcl == 0) {
@@ -1593,18 +1633,57 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
1593 } else { 1633 } else {
1594 CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); 1634 CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
1595 1635
1596 /* mark cld_lostlock so that it will requeue 1636 if (rcl == -ESHUTDOWN &&
1597 * after MGC becomes available. 1637 atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
1598 */ 1638 int secs = cfs_time_seconds(obd_timeout);
1599 cld->cld_lostlock = 1; 1639 struct obd_import *imp;
1640 struct l_wait_info lwi;
1641
1642 mutex_unlock(&cld->cld_lock);
1643 imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
1644
1645 /*
1646 * Let's force the pinger, and wait the import to be
1647 * connected, note: since mgc import is non-replayable,
1648 * and even the import state is disconnected, it does
1649 * not mean the "recovery" is stopped, so we will keep
1650 * waitting until timeout or the import state is
1651 * FULL or closed
1652 */
1653 ptlrpc_pinger_force(imp);
1654
1655 lwi = LWI_TIMEOUT(secs, NULL, NULL);
1656 l_wait_event(imp->imp_recovery_waitq,
1657 !mgc_import_in_recovery(imp), &lwi);
1658
1659 if (imp->imp_state == LUSTRE_IMP_FULL) {
1660 retry = true;
1661 goto restart;
1662 } else {
1663 mutex_lock(&cld->cld_lock);
1664 cld->cld_lostlock = 1;
1665 }
1666 } else {
1667 /* mark cld_lostlock so that it will requeue
1668 * after MGC becomes available.
1669 */
1670 cld->cld_lostlock = 1;
1671 }
1600 /* Get extra reference, it will be put in requeue thread */ 1672 /* Get extra reference, it will be put in requeue thread */
1601 config_log_get(cld); 1673 config_log_get(cld);
1602 } 1674 }
1603 1675
1604 if (cld_is_recover(cld)) { 1676 if (cld_is_recover(cld)) {
1605 rc = 0; /* this is not a fatal error for recover log */ 1677 rc = 0; /* this is not a fatal error for recover log */
1606 if (rcl == 0) 1678 if (!rcl) {
1607 rc = mgc_process_recover_log(mgc, cld); 1679 rc = mgc_process_recover_log(mgc, cld);
1680 if (rc) {
1681 CERROR("%s: recover log %s failed: rc = %d not fatal.\n",
1682 mgc->obd_name, cld->cld_logname, rc);
1683 rc = 0;
1684 cld->cld_lostlock = 1;
1685 }
1686 }
1608 } else { 1687 } else {
1609 rc = mgc_process_cfg_log(mgc, cld, rcl != 0); 1688 rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
1610 } 1689 }
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index b42e109b30e0..af570c0db15b 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_LUSTRE_FS) += obdclass.o 1obj-$(CONFIG_LUSTRE_FS) += obdclass.o
2 2
3obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \ 3obdclass-y := linux/linux-module.o linux/linux-sysctl.o \
4 llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ 4 llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
5 genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ 5 genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
6 lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \ 6 lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index e866754a42d5..7b403fbd5f94 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -50,25 +50,6 @@ enum clt_nesting_level {
50}; 50};
51 51
52/** 52/**
53 * Counters used to check correctness of cl_lock interface usage.
54 */
55struct cl_thread_counters {
56 /**
57 * Number of outstanding calls to cl_lock_mutex_get() made by the
58 * current thread. For debugging.
59 */
60 int ctc_nr_locks_locked;
61 /** List of locked locks. */
62 struct lu_ref ctc_locks_locked;
63 /** Number of outstanding holds on locks. */
64 int ctc_nr_held;
65 /** Number of outstanding uses on locks. */
66 int ctc_nr_used;
67 /** Number of held extent locks. */
68 int ctc_nr_locks_acquired;
69};
70
71/**
72 * Thread local state internal for generic cl-code. 53 * Thread local state internal for generic cl-code.
73 */ 54 */
74struct cl_thread_info { 55struct cl_thread_info {
@@ -83,10 +64,6 @@ struct cl_thread_info {
83 */ 64 */
84 struct cl_lock_descr clt_descr; 65 struct cl_lock_descr clt_descr;
85 struct cl_page_list clt_list; 66 struct cl_page_list clt_list;
86 /**
87 * Counters for every level of lock nesting.
88 */
89 struct cl_thread_counters clt_counters[CNL_NR];
90 /** @} debugging */ 67 /** @} debugging */
91 68
92 /* 69 /*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index bc4b7b6b9a20..3f42457b0d7d 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -126,6 +126,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
126 switch (io->ci_type) { 126 switch (io->ci_type) {
127 case CIT_READ: 127 case CIT_READ:
128 case CIT_WRITE: 128 case CIT_WRITE:
129 case CIT_DATA_VERSION:
129 break; 130 break;
130 case CIT_FAULT: 131 case CIT_FAULT:
131 break; 132 break;
@@ -411,7 +412,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
411 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); 412 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
412 } 413 }
413 io->ci_state = CIS_UNLOCKED; 414 io->ci_state = CIS_UNLOCKED;
414 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
415} 415}
416EXPORT_SYMBOL(cl_io_unlock); 416EXPORT_SYMBOL(cl_io_unlock);
417 417
@@ -586,67 +586,32 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
586} 586}
587EXPORT_SYMBOL(cl_io_end); 587EXPORT_SYMBOL(cl_io_end);
588 588
589static const struct cl_page_slice *
590cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
591{
592 const struct cl_page_slice *slice;
593
594 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
595 LINVRNT(slice);
596 return slice;
597}
598
599/** 589/**
600 * Called by read io, when page has to be read from the server. 590 * Called by read io, to decide the readahead extent
601 * 591 *
602 * \see cl_io_operations::cio_read_page() 592 * \see cl_io_operations::cio_read_ahead()
603 */ 593 */
604int cl_io_read_page(const struct lu_env *env, struct cl_io *io, 594int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
605 struct cl_page *page) 595 pgoff_t start, struct cl_read_ahead *ra)
606{ 596{
607 const struct cl_io_slice *scan; 597 const struct cl_io_slice *scan;
608 struct cl_2queue *queue;
609 int result = 0; 598 int result = 0;
610 599
611 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT); 600 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
612 LINVRNT(cl_page_is_owned(page, io));
613 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); 601 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
614 LINVRNT(cl_io_invariant(io)); 602 LINVRNT(cl_io_invariant(io));
615 603
616 queue = &io->ci_queue;
617
618 cl_2queue_init(queue);
619 /*
620 * ->cio_read_page() methods called in the loop below are supposed to
621 * never block waiting for network (the only subtle point is the
622 * creation of new pages for read-ahead that might result in cache
623 * shrinking, but currently only clean pages are shrunk and this
624 * requires no network io).
625 *
626 * Should this ever starts blocking, retry loop would be needed for
627 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
628 */
629 cl_io_for_each(scan, io) { 604 cl_io_for_each(scan, io) {
630 if (scan->cis_iop->cio_read_page) { 605 if (!scan->cis_iop->cio_read_ahead)
631 const struct cl_page_slice *slice; 606 continue;
632 607
633 slice = cl_io_slice_page(scan, page); 608 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
634 LINVRNT(slice); 609 if (result)
635 result = scan->cis_iop->cio_read_page(env, scan, slice); 610 break;
636 if (result != 0)
637 break;
638 }
639 } 611 }
640 if (result == 0 && queue->c2_qin.pl_nr > 0) 612 return result > 0 ? 0 : result;
641 result = cl_io_submit_rw(env, io, CRT_READ, queue);
642 /*
643 * Unlock unsent pages in case of error.
644 */
645 cl_page_list_disown(env, io, &queue->c2_qin);
646 cl_2queue_fini(env, queue);
647 return result;
648} 613}
649EXPORT_SYMBOL(cl_io_read_page); 614EXPORT_SYMBOL(cl_io_read_ahead);
650 615
651/** 616/**
652 * Commit a list of contiguous pages into writeback cache. 617 * Commit a list of contiguous pages into writeback cache.
@@ -1080,235 +1045,18 @@ struct cl_io *cl_io_top(struct cl_io *io)
1080EXPORT_SYMBOL(cl_io_top); 1045EXPORT_SYMBOL(cl_io_top);
1081 1046
1082/** 1047/**
1083 * Adds request slice to the compound request.
1084 *
1085 * This is called by cl_device_operations::cdo_req_init() methods to add a
1086 * per-layer state to the request. New state is added at the end of
1087 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1088 *
1089 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1090 */
1091void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1092 struct cl_device *dev,
1093 const struct cl_req_operations *ops)
1094{
1095 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1096 slice->crs_dev = dev;
1097 slice->crs_ops = ops;
1098 slice->crs_req = req;
1099}
1100EXPORT_SYMBOL(cl_req_slice_add);
1101
1102static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1103{
1104 unsigned i;
1105
1106 LASSERT(list_empty(&req->crq_pages));
1107 LASSERT(req->crq_nrpages == 0);
1108 LINVRNT(list_empty(&req->crq_layers));
1109 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
1110
1111 if (req->crq_o) {
1112 for (i = 0; i < req->crq_nrobjs; ++i) {
1113 struct cl_object *obj = req->crq_o[i].ro_obj;
1114
1115 if (obj) {
1116 lu_object_ref_del_at(&obj->co_lu,
1117 &req->crq_o[i].ro_obj_ref,
1118 "cl_req", req);
1119 cl_object_put(env, obj);
1120 }
1121 }
1122 kfree(req->crq_o);
1123 }
1124 kfree(req);
1125}
1126
1127static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1128 struct cl_page *page)
1129{
1130 struct cl_device *dev;
1131 struct cl_page_slice *slice;
1132 int result;
1133
1134 result = 0;
1135 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1136 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1137 if (dev->cd_ops->cdo_req_init) {
1138 result = dev->cd_ops->cdo_req_init(env, dev, req);
1139 if (result != 0)
1140 break;
1141 }
1142 }
1143 return result;
1144}
1145
1146/**
1147 * Invokes per-request transfer completion call-backs
1148 * (cl_req_operations::cro_completion()) bottom-to-top.
1149 */
1150void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1151{
1152 struct cl_req_slice *slice;
1153
1154 /*
1155 * for the lack of list_for_each_entry_reverse_safe()...
1156 */
1157 while (!list_empty(&req->crq_layers)) {
1158 slice = list_entry(req->crq_layers.prev,
1159 struct cl_req_slice, crs_linkage);
1160 list_del_init(&slice->crs_linkage);
1161 if (slice->crs_ops->cro_completion)
1162 slice->crs_ops->cro_completion(env, slice, rc);
1163 }
1164 cl_req_free(env, req);
1165}
1166EXPORT_SYMBOL(cl_req_completion);
1167
1168/**
1169 * Allocates new transfer request.
1170 */
1171struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1172 enum cl_req_type crt, int nr_objects)
1173{
1174 struct cl_req *req;
1175
1176 LINVRNT(nr_objects > 0);
1177
1178 req = kzalloc(sizeof(*req), GFP_NOFS);
1179 if (req) {
1180 int result;
1181
1182 req->crq_type = crt;
1183 INIT_LIST_HEAD(&req->crq_pages);
1184 INIT_LIST_HEAD(&req->crq_layers);
1185
1186 req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
1187 GFP_NOFS);
1188 if (req->crq_o) {
1189 req->crq_nrobjs = nr_objects;
1190 result = cl_req_init(env, req, page);
1191 } else {
1192 result = -ENOMEM;
1193 }
1194 if (result != 0) {
1195 cl_req_completion(env, req, result);
1196 req = ERR_PTR(result);
1197 }
1198 } else {
1199 req = ERR_PTR(-ENOMEM);
1200 }
1201 return req;
1202}
1203EXPORT_SYMBOL(cl_req_alloc);
1204
1205/**
1206 * Adds a page to a request.
1207 */
1208void cl_req_page_add(const struct lu_env *env,
1209 struct cl_req *req, struct cl_page *page)
1210{
1211 struct cl_object *obj;
1212 struct cl_req_obj *rqo;
1213 unsigned int i;
1214
1215 LASSERT(list_empty(&page->cp_flight));
1216 LASSERT(!page->cp_req);
1217
1218 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1219 req, req->crq_type, req->crq_nrpages);
1220
1221 list_add_tail(&page->cp_flight, &req->crq_pages);
1222 ++req->crq_nrpages;
1223 page->cp_req = req;
1224 obj = cl_object_top(page->cp_obj);
1225 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1226 if (!rqo->ro_obj) {
1227 rqo->ro_obj = obj;
1228 cl_object_get(obj);
1229 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1230 "cl_req", req);
1231 break;
1232 }
1233 }
1234 LASSERT(i < req->crq_nrobjs);
1235}
1236EXPORT_SYMBOL(cl_req_page_add);
1237
1238/**
1239 * Removes a page from a request.
1240 */
1241void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1242{
1243 struct cl_req *req = page->cp_req;
1244
1245 LASSERT(!list_empty(&page->cp_flight));
1246 LASSERT(req->crq_nrpages > 0);
1247
1248 list_del_init(&page->cp_flight);
1249 --req->crq_nrpages;
1250 page->cp_req = NULL;
1251}
1252EXPORT_SYMBOL(cl_req_page_done);
1253
1254/**
1255 * Notifies layers that request is about to depart by calling
1256 * cl_req_operations::cro_prep() top-to-bottom.
1257 */
1258int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1259{
1260 unsigned int i;
1261 int result;
1262 const struct cl_req_slice *slice;
1263
1264 /*
1265 * Check that the caller of cl_req_alloc() didn't lie about the number
1266 * of objects.
1267 */
1268 for (i = 0; i < req->crq_nrobjs; ++i)
1269 LASSERT(req->crq_o[i].ro_obj);
1270
1271 result = 0;
1272 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1273 if (slice->crs_ops->cro_prep) {
1274 result = slice->crs_ops->cro_prep(env, slice);
1275 if (result != 0)
1276 break;
1277 }
1278 }
1279 return result;
1280}
1281EXPORT_SYMBOL(cl_req_prep);
1282
1283/**
1284 * Fills in attributes that are passed to server together with transfer. Only 1048 * Fills in attributes that are passed to server together with transfer. Only
1285 * attributes from \a flags may be touched. This can be called multiple times 1049 * attributes from \a flags may be touched. This can be called multiple times
1286 * for the same request. 1050 * for the same request.
1287 */ 1051 */
1288void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, 1052void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1289 struct cl_req_attr *attr, u64 flags) 1053 struct cl_req_attr *attr)
1290{ 1054{
1291 const struct cl_req_slice *slice; 1055 struct cl_object *scan;
1292 struct cl_page *page; 1056
1293 unsigned int i; 1057 cl_object_for_each(scan, obj) {
1294 1058 if (scan->co_ops->coo_req_attr_set)
1295 LASSERT(!list_empty(&req->crq_pages)); 1059 scan->co_ops->coo_req_attr_set(env, scan, attr);
1296
1297 /* Take any page to use as a model. */
1298 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1299
1300 for (i = 0; i < req->crq_nrobjs; ++i) {
1301 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1302 const struct cl_page_slice *scan;
1303 const struct cl_object *obj;
1304
1305 scan = cl_page_at(page,
1306 slice->crs_dev->cd_lu_dev.ld_type);
1307 obj = scan->cpl_obj;
1308 if (slice->crs_ops->cro_attr_set)
1309 slice->crs_ops->cro_attr_set(env, slice, obj,
1310 attr + i, flags);
1311 }
1312 } 1060 }
1313} 1061}
1314EXPORT_SYMBOL(cl_req_attr_set); 1062EXPORT_SYMBOL(cl_req_attr_set);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 3199dd4a3b72..f5d4e23c64b7 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -335,7 +335,7 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
335 if (obj->co_ops->coo_getstripe) { 335 if (obj->co_ops->coo_getstripe) {
336 result = obj->co_ops->coo_getstripe(env, obj, uarg); 336 result = obj->co_ops->coo_getstripe(env, obj, uarg);
337 if (result) 337 if (result)
338 break; 338 break;
339 } 339 }
340 } 340 }
341 return result; 341 return result;
@@ -343,6 +343,67 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
343EXPORT_SYMBOL(cl_object_getstripe); 343EXPORT_SYMBOL(cl_object_getstripe);
344 344
345/** 345/**
346 * Get fiemap extents from file object.
347 *
348 * \param env [in] lustre environment
349 * \param obj [in] file object
350 * \param key [in] fiemap request argument
351 * \param fiemap [out] fiemap extents mapping retrived
352 * \param buflen [in] max buffer length of @fiemap
353 *
354 * \retval 0 success
355 * \retval < 0 error
356 */
357int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
358 struct ll_fiemap_info_key *key,
359 struct fiemap *fiemap, size_t *buflen)
360{
361 struct lu_object_header *top;
362 int result = 0;
363
364 top = obj->co_lu.lo_header;
365 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
366 if (obj->co_ops->coo_fiemap) {
367 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
368 buflen);
369 if (result)
370 break;
371 }
372 }
373 return result;
374}
375EXPORT_SYMBOL(cl_object_fiemap);
376
377int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
378 struct cl_layout *cl)
379{
380 struct lu_object_header *top = obj->co_lu.lo_header;
381
382 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
383 if (obj->co_ops->coo_layout_get)
384 return obj->co_ops->coo_layout_get(env, obj, cl);
385 }
386
387 return -EOPNOTSUPP;
388}
389EXPORT_SYMBOL(cl_object_layout_get);
390
391loff_t cl_object_maxbytes(struct cl_object *obj)
392{
393 struct lu_object_header *top = obj->co_lu.lo_header;
394 loff_t maxbytes = LLONG_MAX;
395
396 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
397 if (obj->co_ops->coo_maxbytes)
398 maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
399 maxbytes);
400 }
401
402 return maxbytes;
403}
404EXPORT_SYMBOL(cl_object_maxbytes);
405
406/**
346 * Helper function removing all object locks, and marking object for 407 * Helper function removing all object locks, and marking object for
347 * deletion. All object pages must have been deleted at this point. 408 * deletion. All object pages must have been deleted at this point.
348 * 409 *
@@ -483,36 +544,20 @@ EXPORT_SYMBOL(cl_site_stats_print);
483 * bz20044, bz22683. 544 * bz20044, bz22683.
484 */ 545 */
485 546
486static LIST_HEAD(cl_envs); 547static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
487static unsigned int cl_envs_cached_nr; 548 * for now.
488static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit 549 */
489 * for now. 550static struct cl_env_cache {
490 */ 551 rwlock_t cec_guard;
491static DEFINE_SPINLOCK(cl_envs_guard); 552 unsigned int cec_count;
553 struct list_head cec_envs;
554} *cl_envs = NULL;
492 555
493struct cl_env { 556struct cl_env {
494 void *ce_magic; 557 void *ce_magic;
495 struct lu_env ce_lu; 558 struct lu_env ce_lu;
496 struct lu_context ce_ses; 559 struct lu_context ce_ses;
497 560
498 /**
499 * This allows cl_env to be entered into cl_env_hash which implements
500 * the current thread -> client environment lookup.
501 */
502 struct hlist_node ce_node;
503 /**
504 * Owner for the current cl_env.
505 *
506 * If LL_TASK_CL_ENV is defined, this point to the owning current,
507 * only for debugging purpose ;
508 * Otherwise hash is used, and this is the key for cfs_hash.
509 * Now current thread pid is stored. Note using thread pointer would
510 * lead to unbalanced hash because of its specific allocation locality
511 * and could be varied for different platforms and OSes, even different
512 * OS versions.
513 */
514 void *ce_owner;
515
516 /* 561 /*
517 * Linkage into global list of all client environments. Used for 562 * Linkage into global list of all client environments. Used for
518 * garbage collection. 563 * garbage collection.
@@ -536,122 +581,13 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
536{ 581{
537 LASSERT(cle->ce_ref == 0); 582 LASSERT(cle->ce_ref == 0);
538 LASSERT(cle->ce_magic == &cl_env_init0); 583 LASSERT(cle->ce_magic == &cl_env_init0);
539 LASSERT(!cle->ce_debug && !cle->ce_owner); 584 LASSERT(!cle->ce_debug);
540 585
541 cle->ce_ref = 1; 586 cle->ce_ref = 1;
542 cle->ce_debug = debug; 587 cle->ce_debug = debug;
543 CL_ENV_INC(busy); 588 CL_ENV_INC(busy);
544} 589}
545 590
546/*
547 * The implementation of using hash table to connect cl_env and thread
548 */
549
550static struct cfs_hash *cl_env_hash;
551
552static unsigned cl_env_hops_hash(struct cfs_hash *lh,
553 const void *key, unsigned mask)
554{
555#if BITS_PER_LONG == 64
556 return cfs_hash_u64_hash((__u64)key, mask);
557#else
558 return cfs_hash_u32_hash((__u32)key, mask);
559#endif
560}
561
562static void *cl_env_hops_obj(struct hlist_node *hn)
563{
564 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
565
566 LASSERT(cle->ce_magic == &cl_env_init0);
567 return (void *)cle;
568}
569
570static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
571{
572 struct cl_env *cle = cl_env_hops_obj(hn);
573
574 LASSERT(cle->ce_owner);
575 return (key == cle->ce_owner);
576}
577
578static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
579{
580 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
581
582 LASSERT(cle->ce_magic == &cl_env_init0);
583}
584
585static struct cfs_hash_ops cl_env_hops = {
586 .hs_hash = cl_env_hops_hash,
587 .hs_key = cl_env_hops_obj,
588 .hs_keycmp = cl_env_hops_keycmp,
589 .hs_object = cl_env_hops_obj,
590 .hs_get = cl_env_hops_noop,
591 .hs_put_locked = cl_env_hops_noop,
592};
593
594static inline struct cl_env *cl_env_fetch(void)
595{
596 struct cl_env *cle;
597
598 cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
599 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
600 return cle;
601}
602
603static inline void cl_env_attach(struct cl_env *cle)
604{
605 if (cle) {
606 int rc;
607
608 LASSERT(!cle->ce_owner);
609 cle->ce_owner = (void *)(long)current->pid;
610 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
611 &cle->ce_node);
612 LASSERT(rc == 0);
613 }
614}
615
616static inline void cl_env_do_detach(struct cl_env *cle)
617{
618 void *cookie;
619
620 LASSERT(cle->ce_owner == (void *)(long)current->pid);
621 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
622 &cle->ce_node);
623 LASSERT(cookie == cle);
624 cle->ce_owner = NULL;
625}
626
627static int cl_env_store_init(void)
628{
629 cl_env_hash = cfs_hash_create("cl_env",
630 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
631 HASH_CL_ENV_BKT_BITS, 0,
632 CFS_HASH_MIN_THETA,
633 CFS_HASH_MAX_THETA,
634 &cl_env_hops,
635 CFS_HASH_RW_BKTLOCK);
636 return cl_env_hash ? 0 : -ENOMEM;
637}
638
639static void cl_env_store_fini(void)
640{
641 cfs_hash_putref(cl_env_hash);
642}
643
644static inline struct cl_env *cl_env_detach(struct cl_env *cle)
645{
646 if (!cle)
647 cle = cl_env_fetch();
648
649 if (cle && cle->ce_owner)
650 cl_env_do_detach(cle);
651
652 return cle;
653}
654
655static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) 591static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
656{ 592{
657 struct lu_env *env; 593 struct lu_env *env;
@@ -701,16 +637,20 @@ static struct lu_env *cl_env_obtain(void *debug)
701{ 637{
702 struct cl_env *cle; 638 struct cl_env *cle;
703 struct lu_env *env; 639 struct lu_env *env;
640 int cpu = get_cpu();
704 641
705 spin_lock(&cl_envs_guard); 642 read_lock(&cl_envs[cpu].cec_guard);
706 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); 643 LASSERT(equi(cl_envs[cpu].cec_count == 0,
707 if (cl_envs_cached_nr > 0) { 644 list_empty(&cl_envs[cpu].cec_envs)));
645 if (cl_envs[cpu].cec_count > 0) {
708 int rc; 646 int rc;
709 647
710 cle = container_of(cl_envs.next, struct cl_env, ce_linkage); 648 cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
649 ce_linkage);
711 list_del_init(&cle->ce_linkage); 650 list_del_init(&cle->ce_linkage);
712 cl_envs_cached_nr--; 651 cl_envs[cpu].cec_count--;
713 spin_unlock(&cl_envs_guard); 652 read_unlock(&cl_envs[cpu].cec_guard);
653 put_cpu();
714 654
715 env = &cle->ce_lu; 655 env = &cle->ce_lu;
716 rc = lu_env_refill(env); 656 rc = lu_env_refill(env);
@@ -723,7 +663,8 @@ static struct lu_env *cl_env_obtain(void *debug)
723 env = ERR_PTR(rc); 663 env = ERR_PTR(rc);
724 } 664 }
725 } else { 665 } else {
726 spin_unlock(&cl_envs_guard); 666 read_unlock(&cl_envs[cpu].cec_guard);
667 put_cpu();
727 env = cl_env_new(lu_context_tags_default, 668 env = cl_env_new(lu_context_tags_default,
728 lu_session_tags_default, debug); 669 lu_session_tags_default, debug);
729 } 670 }
@@ -735,27 +676,6 @@ static inline struct cl_env *cl_env_container(struct lu_env *env)
735 return container_of(env, struct cl_env, ce_lu); 676 return container_of(env, struct cl_env, ce_lu);
736} 677}
737 678
738static struct lu_env *cl_env_peek(int *refcheck)
739{
740 struct lu_env *env;
741 struct cl_env *cle;
742
743 CL_ENV_INC(lookup);
744
745 /* check that we don't go far from untrusted pointer */
746 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
747
748 env = NULL;
749 cle = cl_env_fetch();
750 if (cle) {
751 CL_ENV_INC(hit);
752 env = &cle->ce_lu;
753 *refcheck = ++cle->ce_ref;
754 }
755 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
756 return env;
757}
758
759/** 679/**
760 * Returns lu_env: if there already is an environment associated with the 680 * Returns lu_env: if there already is an environment associated with the
761 * current thread, it is returned, otherwise, new environment is allocated. 681 * current thread, it is returned, otherwise, new environment is allocated.
@@ -773,17 +693,13 @@ struct lu_env *cl_env_get(int *refcheck)
773{ 693{
774 struct lu_env *env; 694 struct lu_env *env;
775 695
776 env = cl_env_peek(refcheck); 696 env = cl_env_obtain(__builtin_return_address(0));
777 if (!env) { 697 if (!IS_ERR(env)) {
778 env = cl_env_obtain(__builtin_return_address(0)); 698 struct cl_env *cle;
779 if (!IS_ERR(env)) {
780 struct cl_env *cle;
781 699
782 cle = cl_env_container(env); 700 cle = cl_env_container(env);
783 cl_env_attach(cle); 701 *refcheck = cle->ce_ref;
784 *refcheck = cle->ce_ref; 702 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
785 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
786 }
787 } 703 }
788 return env; 704 return env;
789} 705}
@@ -798,7 +714,6 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
798{ 714{
799 struct lu_env *env; 715 struct lu_env *env;
800 716
801 LASSERT(!cl_env_peek(refcheck));
802 env = cl_env_new(tags, tags, __builtin_return_address(0)); 717 env = cl_env_new(tags, tags, __builtin_return_address(0));
803 if (!IS_ERR(env)) { 718 if (!IS_ERR(env)) {
804 struct cl_env *cle; 719 struct cl_env *cle;
@@ -813,7 +728,6 @@ EXPORT_SYMBOL(cl_env_alloc);
813 728
814static void cl_env_exit(struct cl_env *cle) 729static void cl_env_exit(struct cl_env *cle)
815{ 730{
816 LASSERT(!cle->ce_owner);
817 lu_context_exit(&cle->ce_lu.le_ctx); 731 lu_context_exit(&cle->ce_lu.le_ctx);
818 lu_context_exit(&cle->ce_ses); 732 lu_context_exit(&cle->ce_ses);
819} 733}
@@ -826,20 +740,25 @@ static void cl_env_exit(struct cl_env *cle)
826unsigned int cl_env_cache_purge(unsigned int nr) 740unsigned int cl_env_cache_purge(unsigned int nr)
827{ 741{
828 struct cl_env *cle; 742 struct cl_env *cle;
743 unsigned int i;
829 744
830 spin_lock(&cl_envs_guard); 745 for_each_possible_cpu(i) {
831 for (; !list_empty(&cl_envs) && nr > 0; --nr) { 746 write_lock(&cl_envs[i].cec_guard);
832 cle = container_of(cl_envs.next, struct cl_env, ce_linkage); 747 for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
833 list_del_init(&cle->ce_linkage); 748 cle = container_of(cl_envs[i].cec_envs.next,
834 LASSERT(cl_envs_cached_nr > 0); 749 struct cl_env, ce_linkage);
835 cl_envs_cached_nr--; 750 list_del_init(&cle->ce_linkage);
836 spin_unlock(&cl_envs_guard); 751 LASSERT(cl_envs[i].cec_count > 0);
752 cl_envs[i].cec_count--;
753 write_unlock(&cl_envs[i].cec_guard);
837 754
838 cl_env_fini(cle); 755 cl_env_fini(cle);
839 spin_lock(&cl_envs_guard); 756 write_lock(&cl_envs[i].cec_guard);
757 }
758 LASSERT(equi(cl_envs[i].cec_count == 0,
759 list_empty(&cl_envs[i].cec_envs)));
760 write_unlock(&cl_envs[i].cec_guard);
840 } 761 }
841 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
842 spin_unlock(&cl_envs_guard);
843 return nr; 762 return nr;
844} 763}
845EXPORT_SYMBOL(cl_env_cache_purge); 764EXPORT_SYMBOL(cl_env_cache_purge);
@@ -862,8 +781,9 @@ void cl_env_put(struct lu_env *env, int *refcheck)
862 781
863 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); 782 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
864 if (--cle->ce_ref == 0) { 783 if (--cle->ce_ref == 0) {
784 int cpu = get_cpu();
785
865 CL_ENV_DEC(busy); 786 CL_ENV_DEC(busy);
866 cl_env_detach(cle);
867 cle->ce_debug = NULL; 787 cle->ce_debug = NULL;
868 cl_env_exit(cle); 788 cl_env_exit(cle);
869 /* 789 /*
@@ -872,107 +792,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
872 * Return environment to the cache only when it was allocated 792 * Return environment to the cache only when it was allocated
873 * with the standard tags. 793 * with the standard tags.
874 */ 794 */
875 if (cl_envs_cached_nr < cl_envs_cached_max && 795 if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
876 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && 796 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
877 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { 797 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
878 spin_lock(&cl_envs_guard); 798 read_lock(&cl_envs[cpu].cec_guard);
879 list_add(&cle->ce_linkage, &cl_envs); 799 list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
880 cl_envs_cached_nr++; 800 cl_envs[cpu].cec_count++;
881 spin_unlock(&cl_envs_guard); 801 read_unlock(&cl_envs[cpu].cec_guard);
882 } else { 802 } else {
883 cl_env_fini(cle); 803 cl_env_fini(cle);
884 } 804 }
805 put_cpu();
885 } 806 }
886} 807}
887EXPORT_SYMBOL(cl_env_put); 808EXPORT_SYMBOL(cl_env_put);
888 809
889/** 810/**
890 * Declares a point of re-entrancy.
891 *
892 * \see cl_env_reexit()
893 */
894void *cl_env_reenter(void)
895{
896 return cl_env_detach(NULL);
897}
898EXPORT_SYMBOL(cl_env_reenter);
899
900/**
901 * Exits re-entrancy.
902 */
903void cl_env_reexit(void *cookie)
904{
905 cl_env_detach(NULL);
906 cl_env_attach(cookie);
907}
908EXPORT_SYMBOL(cl_env_reexit);
909
910/**
911 * Setup user-supplied \a env as a current environment. This is to be used to
912 * guaranteed that environment exists even when cl_env_get() fails. It is up
913 * to user to ensure proper concurrency control.
914 *
915 * \see cl_env_unplant()
916 */
917void cl_env_implant(struct lu_env *env, int *refcheck)
918{
919 struct cl_env *cle = cl_env_container(env);
920
921 LASSERT(cle->ce_ref > 0);
922
923 cl_env_attach(cle);
924 cl_env_get(refcheck);
925 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
926}
927EXPORT_SYMBOL(cl_env_implant);
928
929/**
930 * Detach environment installed earlier by cl_env_implant().
931 */
932void cl_env_unplant(struct lu_env *env, int *refcheck)
933{
934 struct cl_env *cle = cl_env_container(env);
935
936 LASSERT(cle->ce_ref > 1);
937
938 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
939
940 cl_env_detach(cle);
941 cl_env_put(env, refcheck);
942}
943EXPORT_SYMBOL(cl_env_unplant);
944
945struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
946{
947 struct lu_env *env;
948
949 nest->cen_cookie = NULL;
950 env = cl_env_peek(&nest->cen_refcheck);
951 if (env) {
952 if (!cl_io_is_going(env))
953 return env;
954 cl_env_put(env, &nest->cen_refcheck);
955 nest->cen_cookie = cl_env_reenter();
956 }
957 env = cl_env_get(&nest->cen_refcheck);
958 if (IS_ERR(env)) {
959 cl_env_reexit(nest->cen_cookie);
960 return env;
961 }
962
963 LASSERT(!cl_io_is_going(env));
964 return env;
965}
966EXPORT_SYMBOL(cl_env_nested_get);
967
968void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
969{
970 cl_env_put(env, &nest->cen_refcheck);
971 cl_env_reexit(nest->cen_cookie);
972}
973EXPORT_SYMBOL(cl_env_nested_put);
974
975/**
976 * Converts struct ost_lvb to struct cl_attr. 811 * Converts struct ost_lvb to struct cl_attr.
977 * 812 *
978 * \see cl_attr2lvb 813 * \see cl_attr2lvb
@@ -999,6 +834,10 @@ static int cl_env_percpu_init(void)
999 for_each_possible_cpu(i) { 834 for_each_possible_cpu(i) {
1000 struct lu_env *env; 835 struct lu_env *env;
1001 836
837 rwlock_init(&cl_envs[i].cec_guard);
838 INIT_LIST_HEAD(&cl_envs[i].cec_envs);
839 cl_envs[i].cec_count = 0;
840
1002 cle = &cl_env_percpu[i]; 841 cle = &cl_env_percpu[i];
1003 env = &cle->ce_lu; 842 env = &cle->ce_lu;
1004 843
@@ -1066,7 +905,6 @@ void cl_env_percpu_put(struct lu_env *env)
1066 LASSERT(cle->ce_ref == 0); 905 LASSERT(cle->ce_ref == 0);
1067 906
1068 CL_ENV_DEC(busy); 907 CL_ENV_DEC(busy);
1069 cl_env_detach(cle);
1070 cle->ce_debug = NULL; 908 cle->ce_debug = NULL;
1071 909
1072 put_cpu(); 910 put_cpu();
@@ -1080,7 +918,6 @@ struct lu_env *cl_env_percpu_get(void)
1080 cle = &cl_env_percpu[get_cpu()]; 918 cle = &cl_env_percpu[get_cpu()];
1081 cl_env_init0(cle, __builtin_return_address(0)); 919 cl_env_init0(cle, __builtin_return_address(0));
1082 920
1083 cl_env_attach(cle);
1084 return &cle->ce_lu; 921 return &cle->ce_lu;
1085} 922}
1086EXPORT_SYMBOL(cl_env_percpu_get); 923EXPORT_SYMBOL(cl_env_percpu_get);
@@ -1144,51 +981,19 @@ LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1144static void *cl_key_init(const struct lu_context *ctx, 981static void *cl_key_init(const struct lu_context *ctx,
1145 struct lu_context_key *key) 982 struct lu_context_key *key)
1146{ 983{
1147 struct cl_thread_info *info; 984 return cl0_key_init(ctx, key);
1148
1149 info = cl0_key_init(ctx, key);
1150 if (!IS_ERR(info)) {
1151 size_t i;
1152
1153 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1154 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1155 }
1156 return info;
1157} 985}
1158 986
1159static void cl_key_fini(const struct lu_context *ctx, 987static void cl_key_fini(const struct lu_context *ctx,
1160 struct lu_context_key *key, void *data) 988 struct lu_context_key *key, void *data)
1161{ 989{
1162 struct cl_thread_info *info;
1163 size_t i;
1164
1165 info = data;
1166 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1167 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1168 cl0_key_fini(ctx, key, data); 990 cl0_key_fini(ctx, key, data);
1169} 991}
1170 992
1171static void cl_key_exit(const struct lu_context *ctx,
1172 struct lu_context_key *key, void *data)
1173{
1174 struct cl_thread_info *info = data;
1175 size_t i;
1176
1177 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1178 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1179 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1180 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1181 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1182 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1183 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1184 }
1185}
1186
1187static struct lu_context_key cl_key = { 993static struct lu_context_key cl_key = {
1188 .lct_tags = LCT_CL_THREAD, 994 .lct_tags = LCT_CL_THREAD,
1189 .lct_init = cl_key_init, 995 .lct_init = cl_key_init,
1190 .lct_fini = cl_key_fini, 996 .lct_fini = cl_key_fini,
1191 .lct_exit = cl_key_exit
1192}; 997};
1193 998
1194static struct lu_kmem_descr cl_object_caches[] = { 999static struct lu_kmem_descr cl_object_caches[] = {
@@ -1212,13 +1017,15 @@ int cl_global_init(void)
1212{ 1017{
1213 int result; 1018 int result;
1214 1019
1215 result = cl_env_store_init(); 1020 cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
1216 if (result) 1021 if (!cl_envs) {
1217 return result; 1022 result = -ENOMEM;
1023 goto out;
1024 }
1218 1025
1219 result = lu_kmem_init(cl_object_caches); 1026 result = lu_kmem_init(cl_object_caches);
1220 if (result) 1027 if (result)
1221 goto out_store; 1028 goto out_envs;
1222 1029
1223 LU_CONTEXT_KEY_INIT(&cl_key); 1030 LU_CONTEXT_KEY_INIT(&cl_key);
1224 result = lu_context_key_register(&cl_key); 1031 result = lu_context_key_register(&cl_key);
@@ -1228,16 +1035,17 @@ int cl_global_init(void)
1228 result = cl_env_percpu_init(); 1035 result = cl_env_percpu_init();
1229 if (result) 1036 if (result)
1230 /* no cl_env_percpu_fini on error */ 1037 /* no cl_env_percpu_fini on error */
1231 goto out_context; 1038 goto out_keys;
1232 1039
1233 return 0; 1040 return 0;
1234 1041
1235out_context: 1042out_keys:
1236 lu_context_key_degister(&cl_key); 1043 lu_context_key_degister(&cl_key);
1237out_kmem: 1044out_kmem:
1238 lu_kmem_fini(cl_object_caches); 1045 lu_kmem_fini(cl_object_caches);
1239out_store: 1046out_envs:
1240 cl_env_store_fini(); 1047 kfree(cl_envs);
1048out:
1241 return result; 1049 return result;
1242} 1050}
1243 1051
@@ -1249,5 +1057,5 @@ void cl_global_fini(void)
1249 cl_env_percpu_fini(); 1057 cl_env_percpu_fini();
1250 lu_context_key_degister(&cl_key); 1058 lu_context_key_degister(&cl_key);
1251 lu_kmem_fini(cl_object_caches); 1059 lu_kmem_fini(cl_object_caches);
1252 cl_env_store_fini(); 1060 kfree(cl_envs);
1253} 1061}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 63973ba096da..cd9a40ca4448 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -99,7 +99,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
99 99
100 PASSERT(env, page, list_empty(&page->cp_batch)); 100 PASSERT(env, page, list_empty(&page->cp_batch));
101 PASSERT(env, page, !page->cp_owner); 101 PASSERT(env, page, !page->cp_owner);
102 PASSERT(env, page, !page->cp_req);
103 PASSERT(env, page, page->cp_state == CPS_FREEING); 102 PASSERT(env, page, page->cp_state == CPS_FREEING);
104 103
105 while (!list_empty(&page->cp_layers)) { 104 while (!list_empty(&page->cp_layers)) {
@@ -150,7 +149,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
150 page->cp_type = type; 149 page->cp_type = type;
151 INIT_LIST_HEAD(&page->cp_layers); 150 INIT_LIST_HEAD(&page->cp_layers);
152 INIT_LIST_HEAD(&page->cp_batch); 151 INIT_LIST_HEAD(&page->cp_batch);
153 INIT_LIST_HEAD(&page->cp_flight);
154 lu_ref_init(&page->cp_reference); 152 lu_ref_init(&page->cp_reference);
155 head = o->co_lu.lo_header; 153 head = o->co_lu.lo_header;
156 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { 154 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
@@ -390,30 +388,6 @@ EXPORT_SYMBOL(cl_page_at);
390 __result; \ 388 __result; \
391}) 389})
392 390
393#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
394({ \
395 const struct lu_env *__env = (_env); \
396 struct cl_page *__page = (_page); \
397 const struct cl_page_slice *__scan; \
398 int __result; \
399 ptrdiff_t __op = (_op); \
400 int (*__method)_proto; \
401 \
402 __result = 0; \
403 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
404 cpl_linkage) { \
405 __method = *(void **)((char *)__scan->cpl_ops + __op); \
406 if (__method) { \
407 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
408 if (__result != 0) \
409 break; \
410 } \
411 } \
412 if (__result > 0) \
413 __result = 0; \
414 __result; \
415})
416
417#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \ 391#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
418do { \ 392do { \
419 const struct lu_env *__env = (_env); \ 393 const struct lu_env *__env = (_env); \
@@ -552,7 +526,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
552 io, nonblock); 526 io, nonblock);
553 if (result == 0) { 527 if (result == 0) {
554 PASSERT(env, pg, !pg->cp_owner); 528 PASSERT(env, pg, !pg->cp_owner);
555 PASSERT(env, pg, !pg->cp_req);
556 pg->cp_owner = cl_io_top(io); 529 pg->cp_owner = cl_io_top(io);
557 cl_page_owner_set(pg); 530 cl_page_owner_set(pg);
558 if (pg->cp_state != CPS_FREEING) { 531 if (pg->cp_state != CPS_FREEING) {
@@ -694,7 +667,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
694 PASSERT(env, pg, pg->cp_state != CPS_FREEING); 667 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
695 668
696 /* 669 /*
697 * Severe all ways to obtain new pointers to @pg. 670 * Sever all ways to obtain new pointers to @pg.
698 */ 671 */
699 cl_page_owner_clear(pg); 672 cl_page_owner_clear(pg);
700 673
@@ -845,8 +818,6 @@ void cl_page_completion(const struct lu_env *env,
845 struct cl_sync_io *anchor = pg->cp_sync_io; 818 struct cl_sync_io *anchor = pg->cp_sync_io;
846 819
847 PASSERT(env, pg, crt < CRT_NR); 820 PASSERT(env, pg, crt < CRT_NR);
848 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
849 PASSERT(env, pg, !pg->cp_req);
850 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); 821 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
851 822
852 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); 823 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -860,16 +831,8 @@ void cl_page_completion(const struct lu_env *env,
860 if (anchor) { 831 if (anchor) {
861 LASSERT(pg->cp_sync_io == anchor); 832 LASSERT(pg->cp_sync_io == anchor);
862 pg->cp_sync_io = NULL; 833 pg->cp_sync_io = NULL;
863 }
864 /*
865 * As page->cp_obj is pinned by a reference from page->cp_req, it is
866 * safe to call cl_page_put() without risking object destruction in a
867 * non-blocking context.
868 */
869 cl_page_put(env, pg);
870
871 if (anchor)
872 cl_sync_io_note(env, anchor, ioret); 834 cl_sync_io_note(env, anchor, ioret);
835 }
873} 836}
874EXPORT_SYMBOL(cl_page_completion); 837EXPORT_SYMBOL(cl_page_completion);
875 838
@@ -927,29 +890,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
927EXPORT_SYMBOL(cl_page_flush); 890EXPORT_SYMBOL(cl_page_flush);
928 891
929/** 892/**
930 * Checks whether page is protected by any extent lock is at least required
931 * mode.
932 *
933 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
934 * \see cl_page_operations::cpo_is_under_lock()
935 */
936int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
937 struct cl_page *page, pgoff_t *max_index)
938{
939 int rc;
940
941 PINVRNT(env, page, cl_page_invariant(page));
942
943 rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
944 (const struct lu_env *,
945 const struct cl_page_slice *,
946 struct cl_io *, pgoff_t *),
947 io, max_index);
948 return rc;
949}
950EXPORT_SYMBOL(cl_page_is_under_lock);
951
952/**
953 * Tells transfer engine that only part of a page is to be transmitted. 893 * Tells transfer engine that only part of a page is to be transmitted.
954 * 894 *
955 * \see cl_page_operations::cpo_clip() 895 * \see cl_page_operations::cpo_clip()
@@ -974,10 +914,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
974 lu_printer_t printer, const struct cl_page *pg) 914 lu_printer_t printer, const struct cl_page *pg)
975{ 915{
976 (*printer)(env, cookie, 916 (*printer)(env, cookie,
977 "page@%p[%d %p %d %d %p %p]\n", 917 "page@%p[%d %p %d %d %p]\n",
978 pg, atomic_read(&pg->cp_ref), pg->cp_obj, 918 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
979 pg->cp_state, pg->cp_type, 919 pg->cp_state, pg->cp_type,
980 pg->cp_owner, pg->cp_req); 920 pg->cp_owner);
981} 921}
982EXPORT_SYMBOL(cl_page_header_print); 922EXPORT_SYMBOL(cl_page_header_print);
983 923
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf8bb2a2f40b..fa0d38ddccb2 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -907,6 +907,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
907 INIT_LIST_HEAD(&imp->imp_sending_list); 907 INIT_LIST_HEAD(&imp->imp_sending_list);
908 INIT_LIST_HEAD(&imp->imp_delayed_list); 908 INIT_LIST_HEAD(&imp->imp_delayed_list);
909 INIT_LIST_HEAD(&imp->imp_committed_list); 909 INIT_LIST_HEAD(&imp->imp_committed_list);
910 INIT_LIST_HEAD(&imp->imp_unreplied_list);
911 imp->imp_known_replied_xid = 0;
910 imp->imp_replay_cursor = &imp->imp_committed_list; 912 imp->imp_replay_cursor = &imp->imp_committed_list;
911 spin_lock_init(&imp->imp_lock); 913 spin_lock_init(&imp->imp_lock);
912 imp->imp_last_success_conn = 0; 914 imp->imp_last_success_conn = 0;
@@ -1408,13 +1410,33 @@ EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
1408int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) 1410int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
1409{ 1411{
1410 struct obd_request_slot_waiter *orsw; 1412 struct obd_request_slot_waiter *orsw;
1413 const char *typ_name;
1411 __u32 old; 1414 __u32 old;
1412 int diff; 1415 int diff;
1416 int rc;
1413 int i; 1417 int i;
1414 1418
1415 if (max > OBD_MAX_RIF_MAX || max < 1) 1419 if (max > OBD_MAX_RIF_MAX || max < 1)
1416 return -ERANGE; 1420 return -ERANGE;
1417 1421
1422 typ_name = cli->cl_import->imp_obd->obd_type->typ_name;
1423 if (!strcmp(typ_name, LUSTRE_MDC_NAME)) {
1424 /*
1425 * adjust max_mod_rpcs_in_flight to ensure it is always
1426 * strictly lower that max_rpcs_in_flight
1427 */
1428 if (max < 2) {
1429 CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n",
1430 cli->cl_import->imp_obd->obd_name);
1431 return -ERANGE;
1432 }
1433 if (max <= cli->cl_max_mod_rpcs_in_flight) {
1434 rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1);
1435 if (rc)
1436 return rc;
1437 }
1438 }
1439
1418 spin_lock(&cli->cl_loi_list_lock); 1440 spin_lock(&cli->cl_loi_list_lock);
1419 old = cli->cl_max_rpcs_in_flight; 1441 old = cli->cl_max_rpcs_in_flight;
1420 cli->cl_max_rpcs_in_flight = max; 1442 cli->cl_max_rpcs_in_flight = max;
@@ -1436,3 +1458,209 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
1436 return 0; 1458 return 0;
1437} 1459}
1438EXPORT_SYMBOL(obd_set_max_rpcs_in_flight); 1460EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
1461
1462int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max)
1463{
1464 struct obd_connect_data *ocd;
1465 u16 maxmodrpcs;
1466 u16 prev;
1467
1468 if (max > OBD_MAX_RIF_MAX || max < 1)
1469 return -ERANGE;
1470
1471 /* cannot exceed or equal max_rpcs_in_flight */
1472 if (max >= cli->cl_max_rpcs_in_flight) {
1473 CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n",
1474 cli->cl_import->imp_obd->obd_name,
1475 max, cli->cl_max_rpcs_in_flight);
1476 return -ERANGE;
1477 }
1478
1479 /* cannot exceed max modify RPCs in flight supported by the server */
1480 ocd = &cli->cl_import->imp_connect_data;
1481 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1482 maxmodrpcs = ocd->ocd_maxmodrpcs;
1483 else
1484 maxmodrpcs = 1;
1485 if (max > maxmodrpcs) {
1486 CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n",
1487 cli->cl_import->imp_obd->obd_name,
1488 max, maxmodrpcs);
1489 return -ERANGE;
1490 }
1491
1492 spin_lock(&cli->cl_mod_rpcs_lock);
1493
1494 prev = cli->cl_max_mod_rpcs_in_flight;
1495 cli->cl_max_mod_rpcs_in_flight = max;
1496
1497 /* wakeup waiters if limit has been increased */
1498 if (cli->cl_max_mod_rpcs_in_flight > prev)
1499 wake_up(&cli->cl_mod_rpcs_waitq);
1500
1501 spin_unlock(&cli->cl_mod_rpcs_lock);
1502
1503 return 0;
1504}
1505EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight);
1506
1507#define pct(a, b) (b ? (a * 100) / b : 0)
1508
1509int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq)
1510{
1511 unsigned long mod_tot = 0, mod_cum;
1512 struct timespec64 now;
1513 int i;
1514
1515 ktime_get_real_ts64(&now);
1516
1517 spin_lock(&cli->cl_mod_rpcs_lock);
1518
1519 seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n",
1520 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1521 seq_printf(seq, "modify_RPCs_in_flight: %hu\n",
1522 cli->cl_mod_rpcs_in_flight);
1523
1524 seq_puts(seq, "\n\t\t\tmodify\n");
1525 seq_puts(seq, "rpcs in flight rpcs %% cum %%\n");
1526
1527 mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist);
1528
1529 mod_cum = 0;
1530 for (i = 0; i < OBD_HIST_MAX; i++) {
1531 unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
1532
1533 mod_cum += mod;
1534 seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n",
1535 i, mod, pct(mod, mod_tot),
1536 pct(mod_cum, mod_tot));
1537 if (mod_cum == mod_tot)
1538 break;
1539 }
1540
1541 spin_unlock(&cli->cl_mod_rpcs_lock);
1542
1543 return 0;
1544}
1545EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show);
1546#undef pct
1547
1548/*
1549 * The number of modify RPCs sent in parallel is limited
1550 * because the server has a finite number of slots per client to
1551 * store request result and ensure reply reconstruction when needed.
1552 * On the client, this limit is stored in cl_max_mod_rpcs_in_flight
1553 * that takes into account server limit and cl_max_rpcs_in_flight
1554 * value.
1555 * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462),
1556 * one close request is allowed above the maximum.
1557 */
1558static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli,
1559 bool close_req)
1560{
1561 bool avail;
1562
1563 /* A slot is available if
1564 * - number of modify RPCs in flight is less than the max
1565 * - it's a close RPC and no other close request is in flight
1566 */
1567 avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight ||
1568 (close_req && !cli->cl_close_rpcs_in_flight);
1569
1570 return avail;
1571}
1572
1573static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli,
1574 bool close_req)
1575{
1576 bool avail;
1577
1578 spin_lock(&cli->cl_mod_rpcs_lock);
1579 avail = obd_mod_rpc_slot_avail_locked(cli, close_req);
1580 spin_unlock(&cli->cl_mod_rpcs_lock);
1581 return avail;
1582}
1583
1584/* Get a modify RPC slot from the obd client @cli according
1585 * to the kind of operation @opc that is going to be sent
1586 * and the intent @it of the operation if it applies.
1587 * If the maximum number of modify RPCs in flight is reached
1588 * the thread is put to sleep.
1589 * Returns the tag to be set in the request message. Tag 0
1590 * is reserved for non-modifying requests.
1591 */
1592u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
1593 struct lookup_intent *it)
1594{
1595 struct l_wait_info lwi = LWI_INTR(NULL, NULL);
1596 bool close_req = false;
1597 u16 i, max;
1598
1599 /* read-only metadata RPCs don't consume a slot on MDT
1600 * for reply reconstruction
1601 */
1602 if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
1603 it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
1604 return 0;
1605
1606 if (opc == MDS_CLOSE)
1607 close_req = true;
1608
1609 do {
1610 spin_lock(&cli->cl_mod_rpcs_lock);
1611 max = cli->cl_max_mod_rpcs_in_flight;
1612 if (obd_mod_rpc_slot_avail_locked(cli, close_req)) {
1613 /* there is a slot available */
1614 cli->cl_mod_rpcs_in_flight++;
1615 if (close_req)
1616 cli->cl_close_rpcs_in_flight++;
1617 lprocfs_oh_tally(&cli->cl_mod_rpcs_hist,
1618 cli->cl_mod_rpcs_in_flight);
1619 /* find a free tag */
1620 i = find_first_zero_bit(cli->cl_mod_tag_bitmap,
1621 max + 1);
1622 LASSERT(i < OBD_MAX_RIF_MAX);
1623 LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap));
1624 spin_unlock(&cli->cl_mod_rpcs_lock);
1625 /* tag 0 is reserved for non-modify RPCs */
1626 return i + 1;
1627 }
1628 spin_unlock(&cli->cl_mod_rpcs_lock);
1629
1630 CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n",
1631 cli->cl_import->imp_obd->obd_name, opc, max);
1632
1633 l_wait_event(cli->cl_mod_rpcs_waitq,
1634 obd_mod_rpc_slot_avail(cli, close_req), &lwi);
1635 } while (true);
1636}
1637EXPORT_SYMBOL(obd_get_mod_rpc_slot);
1638
1639/*
1640 * Put a modify RPC slot from the obd client @cli according
1641 * to the kind of operation @opc that has been sent and the
1642 * intent @it of the operation if it applies.
1643 */
1644void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
1645 struct lookup_intent *it, u16 tag)
1646{
1647 bool close_req = false;
1648
1649 if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
1650 it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
1651 return;
1652
1653 if (opc == MDS_CLOSE)
1654 close_req = true;
1655
1656 spin_lock(&cli->cl_mod_rpcs_lock);
1657 cli->cl_mod_rpcs_in_flight--;
1658 if (close_req)
1659 cli->cl_close_rpcs_in_flight--;
1660 /* release the tag in the bitmap */
1661 LASSERT(tag - 1 < OBD_MAX_RIF_MAX);
1662 LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0);
1663 spin_unlock(&cli->cl_mod_rpcs_lock);
1664 wake_up(&cli->cl_mod_rpcs_waitq);
1665}
1666EXPORT_SYMBOL(obd_put_mod_rpc_slot);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index be09e04b042f..9f5e8299d7e4 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -217,8 +217,8 @@ static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
217 return sprintf(buf, "%s\n", "on"); 217 return sprintf(buf, "%s\n", "on");
218} 218}
219 219
220static ssize_t health_show(struct kobject *kobj, struct attribute *attr, 220static ssize_t
221 char *buf) 221health_check_show(struct kobject *kobj, struct attribute *attr, char *buf)
222{ 222{
223 bool healthy = true; 223 bool healthy = true;
224 int i; 224 int i;
@@ -311,14 +311,14 @@ EXPORT_SYMBOL_GPL(debugfs_lustre_root);
311 311
312LUSTRE_RO_ATTR(version); 312LUSTRE_RO_ATTR(version);
313LUSTRE_RO_ATTR(pinger); 313LUSTRE_RO_ATTR(pinger);
314LUSTRE_RO_ATTR(health); 314LUSTRE_RO_ATTR(health_check);
315LUSTRE_RW_ATTR(jobid_var); 315LUSTRE_RW_ATTR(jobid_var);
316LUSTRE_RW_ATTR(jobid_name); 316LUSTRE_RW_ATTR(jobid_name);
317 317
318static struct attribute *lustre_attrs[] = { 318static struct attribute *lustre_attrs[] = {
319 &lustre_attr_version.attr, 319 &lustre_attr_version.attr,
320 &lustre_attr_pinger.attr, 320 &lustre_attr_pinger.attr,
321 &lustre_attr_health.attr, 321 &lustre_attr_health_check.attr,
322 &lustre_attr_jobid_name.attr, 322 &lustre_attr_jobid_name.attr,
323 &lustre_attr_jobid_var.attr, 323 &lustre_attr_jobid_var.attr,
324 NULL, 324 NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
deleted file mode 100644
index 41b77a30feb3..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2012, Intel Corporation.
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/obdclass/linux/linux-obdo.c
33 *
34 * Object Devices Class Driver
35 * These are the only exported functions, they provide some generic
36 * infrastructure for managing object devices
37 */
38
39#define DEBUG_SUBSYSTEM S_CLASS
40
41#include <linux/module.h>
42#include "../../include/obd_class.h"
43#include "../../include/lustre/lustre_idl.h"
44
45#include <linux/fs.h>
46
47void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid)
48{
49 valid &= src->o_valid;
50
51 if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
52 CDEBUG(D_INODE,
53 "valid %#llx, cur time %lu/%lu, new %llu/%llu\n",
54 src->o_valid, LTIME_S(dst->i_mtime),
55 LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
56
57 if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
58 LTIME_S(dst->i_atime) = src->o_atime;
59 if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
60 LTIME_S(dst->i_mtime) = src->o_mtime;
61 if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
62 LTIME_S(dst->i_ctime) = src->o_ctime;
63 if (valid & OBD_MD_FLSIZE)
64 i_size_write(dst, src->o_size);
65 /* optimum IO size */
66 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
67 dst->i_blkbits = ffs(src->o_blksize) - 1;
68
69 if (dst->i_blkbits < PAGE_SHIFT)
70 dst->i_blkbits = PAGE_SHIFT;
71
72 /* allocation of space */
73 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
74 /*
75 * XXX shouldn't overflow be checked here like in
76 * obdo_to_inode().
77 */
78 dst->i_blocks = src->o_blocks;
79}
80EXPORT_SYMBOL(obdo_refresh_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 43797f106745..736ea1067c93 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -43,8 +43,9 @@
43 43
44#define DEBUG_SUBSYSTEM S_LOG 44#define DEBUG_SUBSYSTEM S_LOG
45 45
46#include "../include/obd_class.h" 46#include "../include/llog_swab.h"
47#include "../include/lustre_log.h" 47#include "../include/lustre_log.h"
48#include "../include/obd_class.h"
48#include "llog_internal.h" 49#include "llog_internal.h"
49 50
50/* 51/*
@@ -80,8 +81,7 @@ static void llog_free_handle(struct llog_handle *loghandle)
80 LASSERT(list_empty(&loghandle->u.phd.phd_entry)); 81 LASSERT(list_empty(&loghandle->u.phd.phd_entry));
81 else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) 82 else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
82 LASSERT(list_empty(&loghandle->u.chd.chd_head)); 83 LASSERT(list_empty(&loghandle->u.chd.chd_head));
83 LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE); 84 kvfree(loghandle->lgh_hdr);
84 kfree(loghandle->lgh_hdr);
85out: 85out:
86 kfree(loghandle); 86 kfree(loghandle);
87} 87}
@@ -115,20 +115,29 @@ static int llog_read_header(const struct lu_env *env,
115 rc = lop->lop_read_header(env, handle); 115 rc = lop->lop_read_header(env, handle);
116 if (rc == LLOG_EEMPTY) { 116 if (rc == LLOG_EEMPTY) {
117 struct llog_log_hdr *llh = handle->lgh_hdr; 117 struct llog_log_hdr *llh = handle->lgh_hdr;
118 size_t len;
118 119
120 /* lrh_len should be initialized in llog_init_handle */
119 handle->lgh_last_idx = 0; /* header is record with index 0 */ 121 handle->lgh_last_idx = 0; /* header is record with index 0 */
120 llh->llh_count = 1; /* for the header record */ 122 llh->llh_count = 1; /* for the header record */
121 llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC; 123 llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
122 llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE; 124 LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE);
123 llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE; 125 llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
124 llh->llh_hdr.lrh_index = 0; 126 llh->llh_hdr.lrh_index = 0;
125 llh->llh_tail.lrt_index = 0;
126 llh->llh_timestamp = ktime_get_real_seconds(); 127 llh->llh_timestamp = ktime_get_real_seconds();
127 if (uuid) 128 if (uuid)
128 memcpy(&llh->llh_tgtuuid, uuid, 129 memcpy(&llh->llh_tgtuuid, uuid,
129 sizeof(llh->llh_tgtuuid)); 130 sizeof(llh->llh_tgtuuid));
130 llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap); 131 llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
131 ext2_set_bit(0, llh->llh_bitmap); 132 /*
133 * Since update llog header might also call this function,
134 * let's reset the bitmap to 0 here
135 */
136 len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset;
137 memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail));
138 ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
139 LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
140 LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
132 rc = 0; 141 rc = 0;
133 } 142 }
134 return rc; 143 return rc;
@@ -137,16 +146,19 @@ static int llog_read_header(const struct lu_env *env,
137int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, 146int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
138 int flags, struct obd_uuid *uuid) 147 int flags, struct obd_uuid *uuid)
139{ 148{
149 int chunk_size = handle->lgh_ctxt->loc_chunk_size;
140 enum llog_flag fmt = flags & LLOG_F_EXT_MASK; 150 enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
141 struct llog_log_hdr *llh; 151 struct llog_log_hdr *llh;
142 int rc; 152 int rc;
143 153
144 LASSERT(!handle->lgh_hdr); 154 LASSERT(!handle->lgh_hdr);
145 155
146 llh = kzalloc(sizeof(*llh), GFP_NOFS); 156 LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
157 llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS);
147 if (!llh) 158 if (!llh)
148 return -ENOMEM; 159 return -ENOMEM;
149 handle->lgh_hdr = llh; 160 handle->lgh_hdr = llh;
161 handle->lgh_hdr_size = chunk_size;
150 /* first assign flags to use llog_client_ops */ 162 /* first assign flags to use llog_client_ops */
151 llh->llh_flags = flags; 163 llh->llh_flags = flags;
152 rc = llog_read_header(env, handle, uuid); 164 rc = llog_read_header(env, handle, uuid);
@@ -189,6 +201,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
189 LASSERT(list_empty(&handle->u.chd.chd_head)); 201 LASSERT(list_empty(&handle->u.chd.chd_head));
190 INIT_LIST_HEAD(&handle->u.chd.chd_head); 202 INIT_LIST_HEAD(&handle->u.chd.chd_head);
191 llh->llh_size = sizeof(struct llog_logid_rec); 203 llh->llh_size = sizeof(struct llog_logid_rec);
204 llh->llh_flags |= LLOG_F_IS_FIXSIZE;
192 } else if (!(flags & LLOG_F_IS_PLAIN)) { 205 } else if (!(flags & LLOG_F_IS_PLAIN)) {
193 CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n", 206 CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
194 handle->lgh_ctxt->loc_obd->obd_name, 207 handle->lgh_ctxt->loc_obd->obd_name,
@@ -198,7 +211,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
198 llh->llh_flags |= fmt; 211 llh->llh_flags |= fmt;
199out: 212out:
200 if (rc) { 213 if (rc) {
201 kfree(llh); 214 kvfree(llh);
202 handle->lgh_hdr = NULL; 215 handle->lgh_hdr = NULL;
203 } 216 }
204 return rc; 217 return rc;
@@ -212,15 +225,21 @@ static int llog_process_thread(void *arg)
212 struct llog_log_hdr *llh = loghandle->lgh_hdr; 225 struct llog_log_hdr *llh = loghandle->lgh_hdr;
213 struct llog_process_cat_data *cd = lpi->lpi_catdata; 226 struct llog_process_cat_data *cd = lpi->lpi_catdata;
214 char *buf; 227 char *buf;
215 __u64 cur_offset = LLOG_CHUNK_SIZE; 228 u64 cur_offset, tmp_offset;
216 __u64 last_offset; 229 int chunk_size;
217 int rc = 0, index = 1, last_index; 230 int rc = 0, index = 1, last_index;
218 int saved_index = 0; 231 int saved_index = 0;
219 int last_called_index = 0; 232 int last_called_index = 0;
220 233
221 LASSERT(llh); 234 if (!llh)
235 return -EINVAL;
236
237 cur_offset = llh->llh_hdr.lrh_len;
238 chunk_size = llh->llh_hdr.lrh_len;
239 /* expect chunk_size to be power of two */
240 LASSERT(is_power_of_2(chunk_size));
222 241
223 buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS); 242 buf = libcfs_kvzalloc(chunk_size, GFP_NOFS);
224 if (!buf) { 243 if (!buf) {
225 lpi->lpi_rc = -ENOMEM; 244 lpi->lpi_rc = -ENOMEM;
226 return 0; 245 return 0;
@@ -233,41 +252,53 @@ static int llog_process_thread(void *arg)
233 if (cd && cd->lpcd_last_idx) 252 if (cd && cd->lpcd_last_idx)
234 last_index = cd->lpcd_last_idx; 253 last_index = cd->lpcd_last_idx;
235 else 254 else
236 last_index = LLOG_BITMAP_BYTES * 8 - 1; 255 last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;
237
238 /* Record is not in this buffer. */
239 if (index > last_index)
240 goto out;
241 256
242 while (rc == 0) { 257 while (rc == 0) {
258 unsigned int buf_offset = 0;
243 struct llog_rec_hdr *rec; 259 struct llog_rec_hdr *rec;
260 bool partial_chunk;
261 off_t chunk_offset;
244 262
245 /* skip records not set in bitmap */ 263 /* skip records not set in bitmap */
246 while (index <= last_index && 264 while (index <= last_index &&
247 !ext2_test_bit(index, llh->llh_bitmap)) 265 !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
248 ++index; 266 ++index;
249 267
250 LASSERT(index <= last_index + 1); 268 if (index > last_index)
251 if (index == last_index + 1)
252 break; 269 break;
253repeat: 270
254 CDEBUG(D_OTHER, "index: %d last_index %d\n", 271 CDEBUG(D_OTHER, "index: %d last_index %d\n",
255 index, last_index); 272 index, last_index);
256 273repeat:
257 /* get the buf with our target record; avoid old garbage */ 274 /* get the buf with our target record; avoid old garbage */
258 memset(buf, 0, LLOG_CHUNK_SIZE); 275 memset(buf, 0, chunk_size);
259 last_offset = cur_offset;
260 rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index, 276 rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
261 index, &cur_offset, buf, LLOG_CHUNK_SIZE); 277 index, &cur_offset, buf, chunk_size);
262 if (rc) 278 if (rc)
263 goto out; 279 goto out;
264 280
281 /*
282 * NB: after llog_next_block() call the cur_offset is the
283 * offset of the next block after read one.
284 * The absolute offset of the current chunk is calculated
285 * from cur_offset value and stored in chunk_offset variable.
286 */
287 tmp_offset = cur_offset;
288 if (do_div(tmp_offset, chunk_size)) {
289 partial_chunk = true;
290 chunk_offset = cur_offset & ~(chunk_size - 1);
291 } else {
292 partial_chunk = false;
293 chunk_offset = cur_offset - chunk_size;
294 }
295
265 /* NB: when rec->lrh_len is accessed it is already swabbed 296 /* NB: when rec->lrh_len is accessed it is already swabbed
266 * since it is used at the "end" of the loop and the rec 297 * since it is used at the "end" of the loop and the rec
267 * swabbing is done at the beginning of the loop. 298 * swabbing is done at the beginning of the loop.
268 */ 299 */
269 for (rec = (struct llog_rec_hdr *)buf; 300 for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
270 (char *)rec < buf + LLOG_CHUNK_SIZE; 301 (char *)rec < buf + chunk_size;
271 rec = llog_rec_hdr_next(rec)) { 302 rec = llog_rec_hdr_next(rec)) {
272 CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n", 303 CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
273 rec, rec->lrh_type); 304 rec, rec->lrh_type);
@@ -278,15 +309,29 @@ repeat:
278 CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n", 309 CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
279 rec->lrh_type, rec->lrh_index); 310 rec->lrh_type, rec->lrh_index);
280 311
281 if (rec->lrh_index == 0) { 312 /*
282 /* probably another rec just got added? */ 313 * for partial chunk the end of it is zeroed, check
283 rc = 0; 314 * for index 0 to distinguish it.
284 if (index <= loghandle->lgh_last_idx) 315 */
285 goto repeat; 316 if (partial_chunk && !rec->lrh_index) {
286 goto out; /* no more records */ 317 /* concurrent llog_add() might add new records
318 * while llog_processing, check this is not
319 * the case and re-read the current chunk
320 * otherwise.
321 */
322 if (index > loghandle->lgh_last_idx) {
323 rc = 0;
324 goto out;
325 }
326 CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
327 index, loghandle->lgh_last_idx);
328 /* save offset inside buffer for the re-read */
329 buf_offset = (char *)rec - (char *)buf;
330 cur_offset = chunk_offset;
331 goto repeat;
287 } 332 }
288 if (rec->lrh_len == 0 || 333
289 rec->lrh_len > LLOG_CHUNK_SIZE) { 334 if (!rec->lrh_len || rec->lrh_len > chunk_size) {
290 CWARN("invalid length %d in llog record for index %d/%d\n", 335 CWARN("invalid length %d in llog record for index %d/%d\n",
291 rec->lrh_len, 336 rec->lrh_len,
292 rec->lrh_index, index); 337 rec->lrh_index, index);
@@ -300,32 +345,38 @@ repeat:
300 continue; 345 continue;
301 } 346 }
302 347
348 if (rec->lrh_index != index) {
349 CERROR("%s: Invalid record: index %u but expected %u\n",
350 loghandle->lgh_ctxt->loc_obd->obd_name,
351 rec->lrh_index, index);
352 rc = -ERANGE;
353 goto out;
354 }
355
303 CDEBUG(D_OTHER, 356 CDEBUG(D_OTHER,
304 "lrh_index: %d lrh_len: %d (%d remains)\n", 357 "lrh_index: %d lrh_len: %d (%d remains)\n",
305 rec->lrh_index, rec->lrh_len, 358 rec->lrh_index, rec->lrh_len,
306 (int)(buf + LLOG_CHUNK_SIZE - (char *)rec)); 359 (int)(buf + chunk_size - (char *)rec));
307 360
308 loghandle->lgh_cur_idx = rec->lrh_index; 361 loghandle->lgh_cur_idx = rec->lrh_index;
309 loghandle->lgh_cur_offset = (char *)rec - (char *)buf + 362 loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
310 last_offset; 363 chunk_offset;
311 364
312 /* if set, process the callback on this record */ 365 /* if set, process the callback on this record */
313 if (ext2_test_bit(index, llh->llh_bitmap)) { 366 if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
314 rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec, 367 rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
315 lpi->lpi_cbdata); 368 lpi->lpi_cbdata);
316 last_called_index = index; 369 last_called_index = index;
317 if (rc) 370 if (rc)
318 goto out; 371 goto out;
319 } else {
320 CDEBUG(D_OTHER, "Skipped index %d\n", index);
321 } 372 }
322 373
323 /* next record, still in buffer? */ 374 /* exit if the last index is reached */
324 ++index; 375 if (index >= last_index) {
325 if (index > last_index) {
326 rc = 0; 376 rc = 0;
327 goto out; 377 goto out;
328 } 378 }
379 index++;
329 } 380 }
330 } 381 }
331 382
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index a4277d684614..8574ad401f66 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -158,6 +158,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
158 mutex_init(&ctxt->loc_mutex); 158 mutex_init(&ctxt->loc_mutex);
159 ctxt->loc_exp = class_export_get(disk_obd->obd_self_export); 159 ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
160 ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED; 160 ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
161 ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE;
161 162
162 rc = llog_group_set_ctxt(olg, ctxt, index); 163 rc = llog_group_set_ctxt(olg, ctxt, index);
163 if (rc) { 164 if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 8c4c1b3f1b45..723c212c6747 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -38,6 +38,7 @@
38 38
39#define DEBUG_SUBSYSTEM S_LOG 39#define DEBUG_SUBSYSTEM S_LOG
40 40
41#include "../include/llog_swab.h"
41#include "../include/lustre_log.h" 42#include "../include/lustre_log.h"
42 43
43static void print_llogd_body(struct llogd_body *d) 44static void print_llogd_body(struct llogd_body *d)
@@ -244,7 +245,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
244 __swab32s(&llh->llh_flags); 245 __swab32s(&llh->llh_flags);
245 __swab32s(&llh->llh_size); 246 __swab32s(&llh->llh_size);
246 __swab32s(&llh->llh_cat_idx); 247 __swab32s(&llh->llh_cat_idx);
247 tail = &llh->llh_tail; 248 tail = LLOG_HDR_TAIL(llh);
248 break; 249 break;
249 } 250 }
250 case LLOG_LOGID_MAGIC: 251 case LLOG_LOGID_MAGIC:
@@ -290,8 +291,10 @@ static void print_llog_hdr(struct llog_log_hdr *h)
290 CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags); 291 CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
291 CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size); 292 CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
292 CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx); 293 CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
293 CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index); 294 CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n",
294 CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len); 295 LLOG_HDR_TAIL(h)->lrt_index);
296 CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n",
297 LLOG_HDR_TAIL(h)->lrt_len);
295} 298}
296 299
297void lustre_swab_llog_hdr(struct llog_log_hdr *h) 300void lustre_swab_llog_hdr(struct llog_log_hdr *h)
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 852a5acfefab..2c99717b0aba 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -100,9 +100,13 @@ static const char * const obd_connect_names[] = {
100 "lfsck", 100 "lfsck",
101 "unknown", 101 "unknown",
102 "unlink_close", 102 "unlink_close",
103 "unknown", 103 "multi_mod_rpcs",
104 "dir_stripe", 104 "dir_stripe",
105 "unknown", 105 "subtree",
106 "lock_ahead",
107 "bulk_mbits",
108 "compact_obdo",
109 "second_flags",
106 NULL 110 NULL
107}; 111};
108 112
@@ -127,7 +131,7 @@ EXPORT_SYMBOL(obd_connect_flags2str);
127static void obd_connect_data_seqprint(struct seq_file *m, 131static void obd_connect_data_seqprint(struct seq_file *m,
128 struct obd_connect_data *ocd) 132 struct obd_connect_data *ocd)
129{ 133{
130 int flags; 134 u64 flags;
131 135
132 LASSERT(ocd); 136 LASSERT(ocd);
133 flags = ocd->ocd_connect_flags; 137 flags = ocd->ocd_connect_flags;
@@ -172,6 +176,9 @@ static void obd_connect_data_seqprint(struct seq_file *m,
172 if (flags & OBD_CONNECT_MAXBYTES) 176 if (flags & OBD_CONNECT_MAXBYTES)
173 seq_printf(m, " max_object_bytes: %llx\n", 177 seq_printf(m, " max_object_bytes: %llx\n",
174 ocd->ocd_maxbytes); 178 ocd->ocd_maxbytes);
179 if (flags & OBD_CONNECT_MULTIMODRPCS)
180 seq_printf(m, " max_mod_rpcs: %hu\n",
181 ocd->ocd_maxmodrpcs);
175} 182}
176 183
177int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, 184int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
@@ -396,10 +403,17 @@ int lprocfs_wr_uint(struct file *file, const char __user *buffer,
396 char dummy[MAX_STRING_SIZE + 1], *end; 403 char dummy[MAX_STRING_SIZE + 1], *end;
397 unsigned long tmp; 404 unsigned long tmp;
398 405
399 dummy[MAX_STRING_SIZE] = '\0'; 406 if (count >= sizeof(dummy))
400 if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) 407 return -EINVAL;
408
409 if (count == 0)
410 return 0;
411
412 if (copy_from_user(dummy, buffer, count))
401 return -EFAULT; 413 return -EFAULT;
402 414
415 dummy[count] = '\0';
416
403 tmp = simple_strtoul(dummy, &end, 0); 417 tmp = simple_strtoul(dummy, &end, 0);
404 if (dummy == end) 418 if (dummy == end)
405 return -EINVAL; 419 return -EINVAL;
@@ -1275,7 +1289,8 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name,
1275EXPORT_SYMBOL_GPL(ldebugfs_register_stats); 1289EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
1276 1290
1277void lprocfs_counter_init(struct lprocfs_stats *stats, int index, 1291void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
1278 unsigned conf, const char *name, const char *units) 1292 unsigned int conf, const char *name,
1293 const char *units)
1279{ 1294{
1280 struct lprocfs_counter_header *header; 1295 struct lprocfs_counter_header *header;
1281 struct lprocfs_counter *percpu_cntr; 1296 struct lprocfs_counter *percpu_cntr;
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 054e567e6c8d..7971562a3efd 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -68,6 +68,7 @@ enum {
68 68
69#define LU_SITE_BITS_MIN 12 69#define LU_SITE_BITS_MIN 12
70#define LU_SITE_BITS_MAX 24 70#define LU_SITE_BITS_MAX 24
71#define LU_SITE_BITS_MAX_CL 19
71/** 72/**
72 * total 256 buckets, we don't want too many buckets because: 73 * total 256 buckets, we don't want too many buckets because:
73 * - consume too much memory 74 * - consume too much memory
@@ -338,7 +339,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
338 struct cfs_hash_bd bd2; 339 struct cfs_hash_bd bd2;
339 struct list_head dispose; 340 struct list_head dispose;
340 int did_sth; 341 int did_sth;
341 unsigned int start; 342 unsigned int start = 0;
342 int count; 343 int count;
343 int bnr; 344 int bnr;
344 unsigned int i; 345 unsigned int i;
@@ -351,7 +352,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
351 * Under LRU list lock, scan LRU list and move unreferenced objects to 352 * Under LRU list lock, scan LRU list and move unreferenced objects to
352 * the dispose list, removing them from LRU and hash table. 353 * the dispose list, removing them from LRU and hash table.
353 */ 354 */
354 start = s->ls_purge_start; 355 if (nr != ~0)
356 start = s->ls_purge_start;
355 bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; 357 bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
356 again: 358 again:
357 /* 359 /*
@@ -877,6 +879,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
877 unsigned long cache_size; 879 unsigned long cache_size;
878 unsigned long bits; 880 unsigned long bits;
879 881
882 if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME))
883 bits_max = LU_SITE_BITS_MAX_CL;
884
880 /* 885 /*
881 * Calculate hash table size, assuming that we want reasonable 886 * Calculate hash table size, assuming that we want reasonable
882 * performance when 20% of total memory is occupied by cache of 887 * performance when 20% of total memory is occupied by cache of
@@ -909,8 +914,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
909 return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); 914 return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
910} 915}
911 916
912static unsigned lu_obj_hop_hash(struct cfs_hash *hs, 917static unsigned int lu_obj_hop_hash(struct cfs_hash *hs,
913 const void *key, unsigned mask) 918 const void *key, unsigned int mask)
914{ 919{
915 struct lu_fid *fid = (struct lu_fid *)key; 920 struct lu_fid *fid = (struct lu_fid *)key;
916 __u32 hash; 921 __u32 hash;
@@ -1311,6 +1316,7 @@ enum {
1311static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; 1316static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1312 1317
1313static DEFINE_SPINLOCK(lu_keys_guard); 1318static DEFINE_SPINLOCK(lu_keys_guard);
1319static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
1314 1320
1315/** 1321/**
1316 * Global counter incremented whenever key is registered, unregistered, 1322 * Global counter incremented whenever key is registered, unregistered,
@@ -1318,7 +1324,7 @@ static DEFINE_SPINLOCK(lu_keys_guard);
1318 * lu_context_refill(). No locking is provided, as initialization and shutdown 1324 * lu_context_refill(). No locking is provided, as initialization and shutdown
1319 * are supposed to be externally serialized. 1325 * are supposed to be externally serialized.
1320 */ 1326 */
1321static unsigned key_set_version; 1327static unsigned int key_set_version;
1322 1328
1323/** 1329/**
1324 * Register new key. 1330 * Register new key.
@@ -1385,6 +1391,19 @@ void lu_context_key_degister(struct lu_context_key *key)
1385 ++key_set_version; 1391 ++key_set_version;
1386 spin_lock(&lu_keys_guard); 1392 spin_lock(&lu_keys_guard);
1387 key_fini(&lu_shrink_env.le_ctx, key->lct_index); 1393 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1394
1395 /**
1396 * Wait until all transient contexts referencing this key have
1397 * run lu_context_key::lct_fini() method.
1398 */
1399 while (atomic_read(&key->lct_used) > 1) {
1400 spin_unlock(&lu_keys_guard);
1401 CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n",
1402 key->lct_owner ? key->lct_owner->name : "", key,
1403 atomic_read(&key->lct_used));
1404 schedule();
1405 spin_lock(&lu_keys_guard);
1406 }
1388 if (lu_keys[key->lct_index]) { 1407 if (lu_keys[key->lct_index]) {
1389 lu_keys[key->lct_index] = NULL; 1408 lu_keys[key->lct_index] = NULL;
1390 lu_ref_fini(&key->lct_reference); 1409 lu_ref_fini(&key->lct_reference);
@@ -1507,14 +1526,25 @@ void lu_context_key_quiesce(struct lu_context_key *key)
1507 1526
1508 if (!(key->lct_tags & LCT_QUIESCENT)) { 1527 if (!(key->lct_tags & LCT_QUIESCENT)) {
1509 /* 1528 /*
1510 * XXX layering violation.
1511 */
1512 cl_env_cache_purge(~0);
1513 key->lct_tags |= LCT_QUIESCENT;
1514 /*
1515 * XXX memory barrier has to go here. 1529 * XXX memory barrier has to go here.
1516 */ 1530 */
1517 spin_lock(&lu_keys_guard); 1531 spin_lock(&lu_keys_guard);
1532 key->lct_tags |= LCT_QUIESCENT;
1533
1534 /**
1535 * Wait until all lu_context_key::lct_init() methods
1536 * have completed.
1537 */
1538 while (atomic_read(&lu_key_initing_cnt) > 0) {
1539 spin_unlock(&lu_keys_guard);
1540 CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n",
1541 key->lct_owner ? key->lct_owner->name : "",
1542 key, atomic_read(&key->lct_used),
1543 atomic_read(&lu_key_initing_cnt));
1544 schedule();
1545 spin_lock(&lu_keys_guard);
1546 }
1547
1518 list_for_each_entry(ctx, &lu_context_remembered, lc_remember) 1548 list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
1519 key_fini(ctx, key->lct_index); 1549 key_fini(ctx, key->lct_index);
1520 spin_unlock(&lu_keys_guard); 1550 spin_unlock(&lu_keys_guard);
@@ -1546,6 +1576,19 @@ static int keys_fill(struct lu_context *ctx)
1546{ 1576{
1547 unsigned int i; 1577 unsigned int i;
1548 1578
1579 /*
1580 * A serialisation with lu_context_key_quiesce() is needed, but some
1581 * "key->lct_init()" are calling kernel memory allocation routine and
1582 * can't be called while holding a spin_lock.
1583 * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
1584 * to ensure the start of the serialisation.
1585 * An atomic_t variable is still used, in order not to reacquire the
1586 * lock when decrementing the counter.
1587 */
1588 spin_lock(&lu_keys_guard);
1589 atomic_inc(&lu_key_initing_cnt);
1590 spin_unlock(&lu_keys_guard);
1591
1549 LINVRNT(ctx->lc_value); 1592 LINVRNT(ctx->lc_value);
1550 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { 1593 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1551 struct lu_context_key *key; 1594 struct lu_context_key *key;
@@ -1563,12 +1606,19 @@ static int keys_fill(struct lu_context *ctx)
1563 LINVRNT(key->lct_init); 1606 LINVRNT(key->lct_init);
1564 LINVRNT(key->lct_index == i); 1607 LINVRNT(key->lct_index == i);
1565 1608
1609 LASSERT(key->lct_owner);
1610 if (!(ctx->lc_tags & LCT_NOREF) &&
1611 !try_module_get(key->lct_owner)) {
1612 /* module is unloading, skip this key */
1613 continue;
1614 }
1615
1566 value = key->lct_init(ctx, key); 1616 value = key->lct_init(ctx, key);
1567 if (IS_ERR(value)) 1617 if (unlikely(IS_ERR(value))) {
1618 atomic_dec(&lu_key_initing_cnt);
1568 return PTR_ERR(value); 1619 return PTR_ERR(value);
1620 }
1569 1621
1570 if (!(ctx->lc_tags & LCT_NOREF))
1571 try_module_get(key->lct_owner);
1572 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); 1622 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1573 atomic_inc(&key->lct_used); 1623 atomic_inc(&key->lct_used);
1574 /* 1624 /*
@@ -1582,6 +1632,7 @@ static int keys_fill(struct lu_context *ctx)
1582 } 1632 }
1583 ctx->lc_version = key_set_version; 1633 ctx->lc_version = key_set_version;
1584 } 1634 }
1635 atomic_dec(&lu_key_initing_cnt);
1585 return 0; 1636 return 0;
1586} 1637}
1587 1638
@@ -1663,6 +1714,9 @@ void lu_context_exit(struct lu_context *ctx)
1663 ctx->lc_state = LCS_LEFT; 1714 ctx->lc_state = LCS_LEFT;
1664 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { 1715 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
1665 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { 1716 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1717 /* could race with key quiescency */
1718 if (ctx->lc_tags & LCT_REMEMBER)
1719 spin_lock(&lu_keys_guard);
1666 if (ctx->lc_value[i]) { 1720 if (ctx->lc_value[i]) {
1667 struct lu_context_key *key; 1721 struct lu_context_key *key;
1668 1722
@@ -1671,6 +1725,8 @@ void lu_context_exit(struct lu_context *ctx)
1671 key->lct_exit(ctx, 1725 key->lct_exit(ctx,
1672 key, ctx->lc_value[i]); 1726 key, ctx->lc_value[i]);
1673 } 1727 }
1728 if (ctx->lc_tags & LCT_REMEMBER)
1729 spin_unlock(&lu_keys_guard);
1674 } 1730 }
1675 } 1731 }
1676} 1732}
@@ -1930,7 +1986,7 @@ int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
1930 memset(&stats, 0, sizeof(stats)); 1986 memset(&stats, 0, sizeof(stats));
1931 lu_site_stats_get(s->ls_obj_hash, &stats, 1); 1987 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1932 1988
1933 seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n", 1989 seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n",
1934 stats.lss_busy, 1990 stats.lss_busy,
1935 stats.lss_total, 1991 stats.lss_total,
1936 stats.lss_populated, 1992 stats.lss_populated,
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index bbed1b72d52e..9ca84c7d49de 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -35,12 +35,15 @@
35 */ 35 */
36 36
37#define DEBUG_SUBSYSTEM S_CLASS 37#define DEBUG_SUBSYSTEM S_CLASS
38#include "../include/obd_class.h" 38
39#include <linux/string.h> 39#include <linux/string.h>
40
40#include "../include/lustre/lustre_ioctl.h" 41#include "../include/lustre/lustre_ioctl.h"
41#include "../include/lustre_log.h" 42#include "../include/llog_swab.h"
42#include "../include/lprocfs_status.h" 43#include "../include/lprocfs_status.h"
44#include "../include/lustre_log.h"
43#include "../include/lustre_param.h" 45#include "../include/lustre_param.h"
46#include "../include/obd_class.h"
44 47
45#include "llog_internal.h" 48#include "llog_internal.h"
46 49
@@ -446,7 +449,7 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
446 LASSERT(obd->obd_self_export); 449 LASSERT(obd->obd_self_export);
447 450
448 /* Precleanup, we must make sure all exports get destroyed. */ 451 /* Precleanup, we must make sure all exports get destroyed. */
449 err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS); 452 err = obd_precleanup(obd);
450 if (err) 453 if (err)
451 CERROR("Precleanup %s returned %d\n", 454 CERROR("Precleanup %s returned %d\n",
452 obd->obd_name, err); 455 obd->obd_name, err);
@@ -585,16 +588,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
585} 588}
586 589
587static LIST_HEAD(lustre_profile_list); 590static LIST_HEAD(lustre_profile_list);
591static DEFINE_SPINLOCK(lustre_profile_list_lock);
588 592
589struct lustre_profile *class_get_profile(const char *prof) 593struct lustre_profile *class_get_profile(const char *prof)
590{ 594{
591 struct lustre_profile *lprof; 595 struct lustre_profile *lprof;
592 596
597 spin_lock(&lustre_profile_list_lock);
593 list_for_each_entry(lprof, &lustre_profile_list, lp_list) { 598 list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
594 if (!strcmp(lprof->lp_profile, prof)) { 599 if (!strcmp(lprof->lp_profile, prof)) {
600 lprof->lp_refs++;
601 spin_unlock(&lustre_profile_list_lock);
595 return lprof; 602 return lprof;
596 } 603 }
597 } 604 }
605 spin_unlock(&lustre_profile_list_lock);
598 return NULL; 606 return NULL;
599} 607}
600EXPORT_SYMBOL(class_get_profile); 608EXPORT_SYMBOL(class_get_profile);
@@ -639,7 +647,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
639 } 647 }
640 } 648 }
641 649
650 spin_lock(&lustre_profile_list_lock);
651 lprof->lp_refs = 1;
652 lprof->lp_list_deleted = false;
642 list_add(&lprof->lp_list, &lustre_profile_list); 653 list_add(&lprof->lp_list, &lustre_profile_list);
654 spin_unlock(&lustre_profile_list_lock);
643 return err; 655 return err;
644 656
645free_lp_dt: 657free_lp_dt:
@@ -659,27 +671,59 @@ void class_del_profile(const char *prof)
659 671
660 lprof = class_get_profile(prof); 672 lprof = class_get_profile(prof);
661 if (lprof) { 673 if (lprof) {
674 spin_lock(&lustre_profile_list_lock);
675 /* because get profile increments the ref counter */
676 lprof->lp_refs--;
662 list_del(&lprof->lp_list); 677 list_del(&lprof->lp_list);
663 kfree(lprof->lp_profile); 678 lprof->lp_list_deleted = true;
664 kfree(lprof->lp_dt); 679 spin_unlock(&lustre_profile_list_lock);
665 kfree(lprof->lp_md); 680
666 kfree(lprof); 681 class_put_profile(lprof);
667 } 682 }
668} 683}
669EXPORT_SYMBOL(class_del_profile); 684EXPORT_SYMBOL(class_del_profile);
670 685
686void class_put_profile(struct lustre_profile *lprof)
687{
688 spin_lock(&lustre_profile_list_lock);
689 if (--lprof->lp_refs > 0) {
690 LASSERT(lprof->lp_refs > 0);
691 spin_unlock(&lustre_profile_list_lock);
692 return;
693 }
694 spin_unlock(&lustre_profile_list_lock);
695
696 /* confirm not a negative number */
697 LASSERT(!lprof->lp_refs);
698
699 /*
700 * At least one class_del_profile/profiles must be called
701 * on the target profile or lustre_profile_list will corrupt
702 */
703 LASSERT(lprof->lp_list_deleted);
704 kfree(lprof->lp_profile);
705 kfree(lprof->lp_dt);
706 kfree(lprof->lp_md);
707 kfree(lprof);
708}
709EXPORT_SYMBOL(class_put_profile);
710
671/* COMPAT_146 */ 711/* COMPAT_146 */
672void class_del_profiles(void) 712void class_del_profiles(void)
673{ 713{
674 struct lustre_profile *lprof, *n; 714 struct lustre_profile *lprof, *n;
675 715
716 spin_lock(&lustre_profile_list_lock);
676 list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { 717 list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
677 list_del(&lprof->lp_list); 718 list_del(&lprof->lp_list);
678 kfree(lprof->lp_profile); 719 lprof->lp_list_deleted = true;
679 kfree(lprof->lp_dt); 720 spin_unlock(&lustre_profile_list_lock);
680 kfree(lprof->lp_md); 721
681 kfree(lprof); 722 class_put_profile(lprof);
723
724 spin_lock(&lustre_profile_list_lock);
682 } 725 }
726 spin_unlock(&lustre_profile_list_lock);
683} 727}
684EXPORT_SYMBOL(class_del_profiles); 728EXPORT_SYMBOL(class_del_profiles);
685 729
@@ -1406,8 +1450,8 @@ EXPORT_SYMBOL(class_manual_cleanup);
1406 * uuid<->export lustre hash operations 1450 * uuid<->export lustre hash operations
1407 */ 1451 */
1408 1452
1409static unsigned 1453static unsigned int
1410uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask) 1454uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
1411{ 1455{
1412 return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid, 1456 return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
1413 sizeof(((struct obd_uuid *)key)->uuid), mask); 1457 sizeof(((struct obd_uuid *)key)->uuid), mask);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 0d3a3b05a637..2283e920d839 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -261,7 +261,7 @@ int lustre_start_mgc(struct super_block *sb)
261 261
262 rc = obd_get_info(NULL, obd->obd_self_export, 262 rc = obd_get_info(NULL, obd->obd_self_export,
263 strlen(KEY_CONN_DATA), KEY_CONN_DATA, 263 strlen(KEY_CONN_DATA), KEY_CONN_DATA,
264 &vallen, data, NULL); 264 &vallen, data);
265 LASSERT(rc == 0); 265 LASSERT(rc == 0);
266 has_ir = OCD_HAS_FLAG(data, IMP_RECOV); 266 has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
267 if (has_ir ^ !(*flags & LMD_FLG_NOIR)) { 267 if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
@@ -382,7 +382,7 @@ int lustre_start_mgc(struct super_block *sb)
382 /* We connect to the MGS at setup, and don't disconnect until cleanup */ 382 /* We connect to the MGS at setup, and don't disconnect until cleanup */
383 data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT | 383 data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
384 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | 384 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
385 OBD_CONNECT_LVB_TYPE; 385 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS;
386 386
387#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE 387#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
388 data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB; 388 data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
@@ -1216,8 +1216,7 @@ static struct file_system_type lustre_fs_type = {
1216 .name = "lustre", 1216 .name = "lustre",
1217 .mount = lustre_mount, 1217 .mount = lustre_mount,
1218 .kill_sb = lustre_kill_super, 1218 .kill_sb = lustre_kill_super,
1219 .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV | 1219 .fs_flags = FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE,
1220 FS_RENAME_DOES_D_MOVE,
1221}; 1220};
1222MODULE_ALIAS_FS("lustre"); 1221MODULE_ALIAS_FS("lustre");
1223 1222
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 79104a66da96..c52b9e07d7dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -124,68 +124,3 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
124 ioobj->ioo_max_brw = 0; 124 ioobj->ioo_max_brw = 0;
125} 125}
126EXPORT_SYMBOL(obdo_to_ioobj); 126EXPORT_SYMBOL(obdo_to_ioobj);
127
128static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
129 u32 valid)
130{
131 valid &= oa->o_valid;
132
133 if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
134 CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n",
135 oa->o_valid, oa->o_mtime, oa->o_ctime);
136
137 attr->ia_valid = 0;
138 if (valid & OBD_MD_FLATIME) {
139 LTIME_S(attr->ia_atime) = oa->o_atime;
140 attr->ia_valid |= ATTR_ATIME;
141 }
142 if (valid & OBD_MD_FLMTIME) {
143 LTIME_S(attr->ia_mtime) = oa->o_mtime;
144 attr->ia_valid |= ATTR_MTIME;
145 }
146 if (valid & OBD_MD_FLCTIME) {
147 LTIME_S(attr->ia_ctime) = oa->o_ctime;
148 attr->ia_valid |= ATTR_CTIME;
149 }
150 if (valid & OBD_MD_FLSIZE) {
151 attr->ia_size = oa->o_size;
152 attr->ia_valid |= ATTR_SIZE;
153 }
154#if 0 /* you shouldn't be able to change a file's type with setattr */
155 if (valid & OBD_MD_FLTYPE) {
156 attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
157 (oa->o_mode & S_IFMT);
158 attr->ia_valid |= ATTR_MODE;
159 }
160#endif
161 if (valid & OBD_MD_FLMODE) {
162 attr->ia_mode = (attr->ia_mode & S_IFMT) |
163 (oa->o_mode & ~S_IFMT);
164 attr->ia_valid |= ATTR_MODE;
165 if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
166 !capable(CFS_CAP_FSETID))
167 attr->ia_mode &= ~S_ISGID;
168 }
169 if (valid & OBD_MD_FLUID) {
170 attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
171 attr->ia_valid |= ATTR_UID;
172 }
173 if (valid & OBD_MD_FLGID) {
174 attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
175 attr->ia_valid |= ATTR_GID;
176 }
177}
178
179void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid)
180{
181 iattr_from_obdo(&op_data->op_attr, oa, valid);
182 if (valid & OBD_MD_FLBLOCKS) {
183 op_data->op_attr_blocks = oa->o_blocks;
184 op_data->op_attr.ia_valid |= ATTR_BLOCKS;
185 }
186 if (valid & OBD_MD_FLFLAGS) {
187 op_data->op_attr_flags = oa->o_flags;
188 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
189 }
190}
191EXPORT_SYMBOL(md_from_obdo);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 505582ff4d1e..549076193bde 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -55,7 +55,7 @@ struct echo_device {
55 struct echo_client_obd *ed_ec; 55 struct echo_client_obd *ed_ec;
56 56
57 struct cl_site ed_site_myself; 57 struct cl_site ed_site_myself;
58 struct cl_site *ed_site; 58 struct lu_site *ed_site;
59 struct lu_device *ed_next; 59 struct lu_device *ed_next;
60}; 60};
61 61
@@ -505,9 +505,6 @@ static const struct lu_device_operations echo_device_lu_ops = {
505 505
506/** @} echo_lu_dev_ops */ 506/** @} echo_lu_dev_ops */
507 507
508static const struct cl_device_operations echo_device_cl_ops = {
509};
510
511/** \defgroup echo_init Setup and teardown 508/** \defgroup echo_init Setup and teardown
512 * 509 *
513 * Init and fini functions for echo client. 510 * Init and fini functions for echo client.
@@ -527,17 +524,19 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
527 } 524 }
528 525
529 rc = lu_site_init_finish(&site->cs_lu); 526 rc = lu_site_init_finish(&site->cs_lu);
530 if (rc) 527 if (rc) {
528 cl_site_fini(site);
531 return rc; 529 return rc;
530 }
532 531
533 ed->ed_site = site; 532 ed->ed_site = &site->cs_lu;
534 return 0; 533 return 0;
535} 534}
536 535
537static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) 536static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
538{ 537{
539 if (ed->ed_site) { 538 if (ed->ed_site) {
540 cl_site_fini(ed->ed_site); 539 lu_site_fini(ed->ed_site);
541 ed->ed_site = NULL; 540 ed->ed_site = NULL;
542 } 541 }
543} 542}
@@ -561,16 +560,10 @@ static void echo_thread_key_fini(const struct lu_context *ctx,
561 kmem_cache_free(echo_thread_kmem, info); 560 kmem_cache_free(echo_thread_kmem, info);
562} 561}
563 562
564static void echo_thread_key_exit(const struct lu_context *ctx,
565 struct lu_context_key *key, void *data)
566{
567}
568
569static struct lu_context_key echo_thread_key = { 563static struct lu_context_key echo_thread_key = {
570 .lct_tags = LCT_CL_THREAD, 564 .lct_tags = LCT_CL_THREAD,
571 .lct_init = echo_thread_key_init, 565 .lct_init = echo_thread_key_init,
572 .lct_fini = echo_thread_key_fini, 566 .lct_fini = echo_thread_key_fini,
573 .lct_exit = echo_thread_key_exit
574}; 567};
575 568
576static void *echo_session_key_init(const struct lu_context *ctx, 569static void *echo_session_key_init(const struct lu_context *ctx,
@@ -592,16 +585,10 @@ static void echo_session_key_fini(const struct lu_context *ctx,
592 kmem_cache_free(echo_session_kmem, session); 585 kmem_cache_free(echo_session_kmem, session);
593} 586}
594 587
595static void echo_session_key_exit(const struct lu_context *ctx,
596 struct lu_context_key *key, void *data)
597{
598}
599
600static struct lu_context_key echo_session_key = { 588static struct lu_context_key echo_session_key = {
601 .lct_tags = LCT_SESSION, 589 .lct_tags = LCT_SESSION,
602 .lct_init = echo_session_key_init, 590 .lct_init = echo_session_key_init,
603 .lct_fini = echo_session_key_fini, 591 .lct_fini = echo_session_key_fini,
604 .lct_exit = echo_session_key_exit
605}; 592};
606 593
607LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key); 594LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
@@ -630,7 +617,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
630 goto out_free; 617 goto out_free;
631 618
632 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops; 619 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
633 cd->cd_ops = &echo_device_cl_ops;
634 620
635 obd = class_name2obd(lustre_cfg_string(cfg, 0)); 621 obd = class_name2obd(lustre_cfg_string(cfg, 0));
636 LASSERT(obd); 622 LASSERT(obd);
@@ -674,7 +660,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
674 goto out_cleanup; 660 goto out_cleanup;
675 } 661 }
676 662
677 next->ld_site = &ed->ed_site->cs_lu; 663 next->ld_site = ed->ed_site;
678 rc = next->ld_type->ldt_ops->ldto_device_init(env, next, 664 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
679 next->ld_type->ldt_name, 665 next->ld_type->ldt_name,
680 NULL); 666 NULL);
@@ -741,7 +727,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
741 CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", 727 CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
742 ed, next); 728 ed, next);
743 729
744 lu_site_purge(env, &ed->ed_site->cs_lu, -1); 730 lu_site_purge(env, ed->ed_site, -1);
745 731
746 /* check if there are objects still alive. 732 /* check if there are objects still alive.
747 * It shouldn't have any object because lu_site_purge would cleanup 733 * It shouldn't have any object because lu_site_purge would cleanup
@@ -754,7 +740,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
754 spin_unlock(&ec->ec_lock); 740 spin_unlock(&ec->ec_lock);
755 741
756 /* purge again */ 742 /* purge again */
757 lu_site_purge(env, &ed->ed_site->cs_lu, -1); 743 lu_site_purge(env, ed->ed_site, -1);
758 744
759 CDEBUG(D_INFO, 745 CDEBUG(D_INFO,
760 "Waiting for the reference of echo object to be dropped\n"); 746 "Waiting for the reference of echo object to be dropped\n");
@@ -766,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
766 CERROR("echo_client still has objects at cleanup time, wait for 1 second\n"); 752 CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
767 set_current_state(TASK_UNINTERRUPTIBLE); 753 set_current_state(TASK_UNINTERRUPTIBLE);
768 schedule_timeout(cfs_time_seconds(1)); 754 schedule_timeout(cfs_time_seconds(1));
769 lu_site_purge(env, &ed->ed_site->cs_lu, -1); 755 lu_site_purge(env, ed->ed_site, -1);
770 spin_lock(&ec->ec_lock); 756 spin_lock(&ec->ec_lock);
771 } 757 }
772 spin_unlock(&ec->ec_lock); 758 spin_unlock(&ec->ec_lock);
@@ -780,11 +766,13 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
780 while (next) 766 while (next)
781 next = next->ld_type->ldt_ops->ldto_device_free(env, next); 767 next = next->ld_type->ldt_ops->ldto_device_free(env, next);
782 768
783 LASSERT(ed->ed_site == lu2cl_site(d->ld_site)); 769 LASSERT(ed->ed_site == d->ld_site);
784 echo_site_fini(env, ed); 770 echo_site_fini(env, ed);
785 cl_device_fini(&ed->ed_cl); 771 cl_device_fini(&ed->ed_cl);
786 kfree(ed); 772 kfree(ed);
787 773
774 cl_env_cache_purge(~0);
775
788 return NULL; 776 return NULL;
789} 777}
790 778
@@ -1100,7 +1088,7 @@ out:
1100static u64 last_object_id; 1088static u64 last_object_id;
1101 1089
1102static int echo_create_object(const struct lu_env *env, struct echo_device *ed, 1090static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
1103 struct obdo *oa, struct obd_trans_info *oti) 1091 struct obdo *oa)
1104{ 1092{
1105 struct echo_object *eco; 1093 struct echo_object *eco;
1106 struct echo_client_obd *ec = ed->ed_ec; 1094 struct echo_client_obd *ec = ed->ed_ec;
@@ -1117,7 +1105,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
1117 if (!ostid_id(&oa->o_oi)) 1105 if (!ostid_id(&oa->o_oi))
1118 ostid_set_id(&oa->o_oi, ++last_object_id); 1106 ostid_set_id(&oa->o_oi, ++last_object_id);
1119 1107
1120 rc = obd_create(env, ec->ec_exp, oa, oti); 1108 rc = obd_create(env, ec->ec_exp, oa);
1121 if (rc != 0) { 1109 if (rc != 0) {
1122 CERROR("Cannot create objects: rc = %d\n", rc); 1110 CERROR("Cannot create objects: rc = %d\n", rc);
1123 goto failed; 1111 goto failed;
@@ -1137,7 +1125,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
1137 1125
1138 failed: 1126 failed:
1139 if (created && rc) 1127 if (created && rc)
1140 obd_destroy(env, ec->ec_exp, oa, oti); 1128 obd_destroy(env, ec->ec_exp, oa);
1141 if (rc) 1129 if (rc)
1142 CERROR("create object failed with: rc = %d\n", rc); 1130 CERROR("create object failed with: rc = %d\n", rc);
1143 return rc; 1131 return rc;
@@ -1237,8 +1225,7 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
1237 1225
1238static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, 1226static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1239 struct echo_object *eco, u64 offset, 1227 struct echo_object *eco, u64 offset,
1240 u64 count, int async, 1228 u64 count, int async)
1241 struct obd_trans_info *oti)
1242{ 1229{
1243 u32 npages; 1230 u32 npages;
1244 struct brw_page *pga; 1231 struct brw_page *pga;
@@ -1332,12 +1319,11 @@ static int echo_client_prep_commit(const struct lu_env *env,
1332 struct obd_export *exp, int rw, 1319 struct obd_export *exp, int rw,
1333 struct obdo *oa, struct echo_object *eco, 1320 struct obdo *oa, struct echo_object *eco,
1334 u64 offset, u64 count, 1321 u64 offset, u64 count,
1335 u64 batch, struct obd_trans_info *oti, 1322 u64 batch, int async)
1336 int async)
1337{ 1323{
1338 struct obd_ioobj ioo; 1324 struct obd_ioobj ioo;
1339 struct niobuf_local *lnb; 1325 struct niobuf_local *lnb;
1340 struct niobuf_remote *rnb; 1326 struct niobuf_remote rnb;
1341 u64 off; 1327 u64 off;
1342 u64 npages, tot_pages; 1328 u64 npages, tot_pages;
1343 int i, ret = 0, brw_flags = 0; 1329 int i, ret = 0, brw_flags = 0;
@@ -1349,9 +1335,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
1349 tot_pages = count >> PAGE_SHIFT; 1335 tot_pages = count >> PAGE_SHIFT;
1350 1336
1351 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); 1337 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
1352 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); 1338 if (!lnb) {
1353
1354 if (!lnb || !rnb) {
1355 ret = -ENOMEM; 1339 ret = -ENOMEM;
1356 goto out; 1340 goto out;
1357 } 1341 }
@@ -1363,26 +1347,22 @@ static int echo_client_prep_commit(const struct lu_env *env,
1363 1347
1364 off = offset; 1348 off = offset;
1365 1349
1366 for (; tot_pages; tot_pages -= npages) { 1350 for (; tot_pages > 0; tot_pages -= npages) {
1367 int lpages; 1351 int lpages;
1368 1352
1369 if (tot_pages < npages) 1353 if (tot_pages < npages)
1370 npages = tot_pages; 1354 npages = tot_pages;
1371 1355
1372 for (i = 0; i < npages; i++, off += PAGE_SIZE) { 1356 rnb.rnb_offset = off;
1373 rnb[i].rnb_offset = off; 1357 rnb.rnb_len = npages * PAGE_SIZE;
1374 rnb[i].rnb_len = PAGE_SIZE; 1358 rnb.rnb_flags = brw_flags;
1375 rnb[i].rnb_flags = brw_flags; 1359 ioo.ioo_bufcnt = 1;
1376 } 1360 off += npages * PAGE_SIZE;
1377
1378 ioo.ioo_bufcnt = npages;
1379 1361
1380 lpages = npages; 1362 lpages = npages;
1381 ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages, 1363 ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
1382 lnb, oti);
1383 if (ret != 0) 1364 if (ret != 0)
1384 goto out; 1365 goto out;
1385 LASSERT(lpages == npages);
1386 1366
1387 for (i = 0; i < lpages; i++) { 1367 for (i = 0; i < lpages; i++) {
1388 struct page *page = lnb[i].lnb_page; 1368 struct page *page = lnb[i].lnb_page;
@@ -1401,24 +1381,21 @@ static int echo_client_prep_commit(const struct lu_env *env,
1401 1381
1402 if (rw == OBD_BRW_WRITE) 1382 if (rw == OBD_BRW_WRITE)
1403 echo_client_page_debug_setup(page, rw, 1383 echo_client_page_debug_setup(page, rw,
1404 ostid_id(&oa->o_oi), 1384 ostid_id(&oa->o_oi),
1405 rnb[i].rnb_offset, 1385 lnb[i].lnb_file_offset,
1406 rnb[i].rnb_len); 1386 lnb[i].lnb_len);
1407 else 1387 else
1408 echo_client_page_debug_check(page, 1388 echo_client_page_debug_check(page,
1409 ostid_id(&oa->o_oi), 1389 ostid_id(&oa->o_oi),
1410 rnb[i].rnb_offset, 1390 lnb[i].lnb_file_offset,
1411 rnb[i].rnb_len); 1391 lnb[i].lnb_len);
1412 } 1392 }
1413 1393
1414 ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, 1394 ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
1415 rnb, npages, lnb, oti, ret); 1395 ret);
1416 if (ret != 0) 1396 if (ret != 0)
1417 goto out; 1397 goto out;
1418 1398
1419 /* Reset oti otherwise it would confuse ldiskfs. */
1420 memset(oti, 0, sizeof(*oti));
1421
1422 /* Reuse env context. */ 1399 /* Reuse env context. */
1423 lu_context_exit((struct lu_context *)&env->le_ctx); 1400 lu_context_exit((struct lu_context *)&env->le_ctx);
1424 lu_context_enter((struct lu_context *)&env->le_ctx); 1401 lu_context_enter((struct lu_context *)&env->le_ctx);
@@ -1426,14 +1403,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
1426 1403
1427out: 1404out:
1428 kfree(lnb); 1405 kfree(lnb);
1429 kfree(rnb);
1430 return ret; 1406 return ret;
1431} 1407}
1432 1408
1433static int echo_client_brw_ioctl(const struct lu_env *env, int rw, 1409static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
1434 struct obd_export *exp, 1410 struct obd_export *exp,
1435 struct obd_ioctl_data *data, 1411 struct obd_ioctl_data *data)
1436 struct obd_trans_info *dummy_oti)
1437{ 1412{
1438 struct obd_device *obd = class_exp2obd(exp); 1413 struct obd_device *obd = class_exp2obd(exp);
1439 struct echo_device *ed = obd2echo_dev(obd); 1414 struct echo_device *ed = obd2echo_dev(obd);
@@ -1470,15 +1445,13 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
1470 case 1: 1445 case 1:
1471 /* fall through */ 1446 /* fall through */
1472 case 2: 1447 case 2:
1473 rc = echo_client_kbrw(ed, rw, oa, 1448 rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
1474 eco, data->ioc_offset, 1449 data->ioc_count, async);
1475 data->ioc_count, async, dummy_oti);
1476 break; 1450 break;
1477 case 3: 1451 case 3:
1478 rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, 1452 rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
1479 eco, data->ioc_offset, 1453 data->ioc_offset, data->ioc_count,
1480 data->ioc_count, data->ioc_plen1, 1454 data->ioc_plen1, async);
1481 dummy_oti, async);
1482 break; 1455 break;
1483 default: 1456 default:
1484 rc = -EINVAL; 1457 rc = -EINVAL;
@@ -1496,16 +1469,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1496 struct echo_client_obd *ec = ed->ed_ec; 1469 struct echo_client_obd *ec = ed->ed_ec;
1497 struct echo_object *eco; 1470 struct echo_object *eco;
1498 struct obd_ioctl_data *data = karg; 1471 struct obd_ioctl_data *data = karg;
1499 struct obd_trans_info dummy_oti;
1500 struct lu_env *env; 1472 struct lu_env *env;
1501 struct oti_req_ack_lock *ack_lock;
1502 struct obdo *oa; 1473 struct obdo *oa;
1503 struct lu_fid fid; 1474 struct lu_fid fid;
1504 int rw = OBD_BRW_READ; 1475 int rw = OBD_BRW_READ;
1505 int rc = 0; 1476 int rc = 0;
1506 int i;
1507
1508 memset(&dummy_oti, 0, sizeof(dummy_oti));
1509 1477
1510 oa = &data->ioc_obdo1; 1478 oa = &data->ioc_obdo1;
1511 if (!(oa->o_valid & OBD_MD_FLGROUP)) { 1479 if (!(oa->o_valid & OBD_MD_FLGROUP)) {
@@ -1535,7 +1503,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1535 goto out; 1503 goto out;
1536 } 1504 }
1537 1505
1538 rc = echo_create_object(env, ed, oa, &dummy_oti); 1506 rc = echo_create_object(env, ed, oa);
1539 goto out; 1507 goto out;
1540 1508
1541 case OBD_IOC_DESTROY: 1509 case OBD_IOC_DESTROY:
@@ -1546,7 +1514,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1546 1514
1547 rc = echo_get_object(&eco, ed, oa); 1515 rc = echo_get_object(&eco, ed, oa);
1548 if (rc == 0) { 1516 if (rc == 0) {
1549 rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti); 1517 rc = obd_destroy(env, ec->ec_exp, oa);
1550 if (rc == 0) 1518 if (rc == 0)
1551 eco->eo_deleted = 1; 1519 eco->eo_deleted = 1;
1552 echo_put_object(eco); 1520 echo_put_object(eco);
@@ -1556,11 +1524,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1556 case OBD_IOC_GETATTR: 1524 case OBD_IOC_GETATTR:
1557 rc = echo_get_object(&eco, ed, oa); 1525 rc = echo_get_object(&eco, ed, oa);
1558 if (rc == 0) { 1526 if (rc == 0) {
1559 struct obd_info oinfo = { 1527 rc = obd_getattr(env, ec->ec_exp, oa);
1560 .oi_oa = oa,
1561 };
1562
1563 rc = obd_getattr(env, ec->ec_exp, &oinfo);
1564 echo_put_object(eco); 1528 echo_put_object(eco);
1565 } 1529 }
1566 goto out; 1530 goto out;
@@ -1573,11 +1537,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1573 1537
1574 rc = echo_get_object(&eco, ed, oa); 1538 rc = echo_get_object(&eco, ed, oa);
1575 if (rc == 0) { 1539 if (rc == 0) {
1576 struct obd_info oinfo = { 1540 rc = obd_setattr(env, ec->ec_exp, oa);
1577 .oi_oa = oa,
1578 };
1579
1580 rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
1581 echo_put_object(eco); 1541 echo_put_object(eco);
1582 } 1542 }
1583 goto out; 1543 goto out;
@@ -1591,7 +1551,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1591 rw = OBD_BRW_WRITE; 1551 rw = OBD_BRW_WRITE;
1592 /* fall through */ 1552 /* fall through */
1593 case OBD_IOC_BRW_READ: 1553 case OBD_IOC_BRW_READ:
1594 rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti); 1554 rc = echo_client_brw_ioctl(env, rw, exp, data);
1595 goto out; 1555 goto out;
1596 1556
1597 default: 1557 default:
@@ -1604,14 +1564,6 @@ out:
1604 lu_env_fini(env); 1564 lu_env_fini(env);
1605 kfree(env); 1565 kfree(env);
1606 1566
1607 /* XXX this should be in a helper also called by target_send_reply */
1608 for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
1609 i++, ack_lock++) {
1610 if (!ack_lock->mode)
1611 break;
1612 ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
1613 }
1614
1615 return rc; 1567 return rc;
1616} 1568}
1617 1569
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index f0062d44ee03..575b2969ad83 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,7 +162,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
162 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ 162 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
163 163
164 if (pages_number <= 0 || 164 if (pages_number <= 0 ||
165 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || 165 pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
166 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 166 pages_number > totalram_pages / 4) /* 1/4 of RAM */
167 return -ERANGE; 167 return -ERANGE;
168 168
@@ -183,10 +183,12 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
183 183
184 seq_printf(m, 184 seq_printf(m,
185 "used_mb: %ld\n" 185 "used_mb: %ld\n"
186 "busy_cnt: %ld\n", 186 "busy_cnt: %ld\n"
187 "reclaim: %llu\n",
187 (atomic_long_read(&cli->cl_lru_in_list) + 188 (atomic_long_read(&cli->cl_lru_in_list) +
188 atomic_long_read(&cli->cl_lru_busy)) >> shift, 189 atomic_long_read(&cli->cl_lru_busy)) >> shift,
189 atomic_long_read(&cli->cl_lru_busy)); 190 atomic_long_read(&cli->cl_lru_busy),
191 cli->cl_lru_reclaim);
190 192
191 return 0; 193 return 0;
192} 194}
@@ -585,7 +587,8 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
585 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); 587 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
586 /* max_pages_per_rpc must be chunk aligned */ 588 /* max_pages_per_rpc must be chunk aligned */
587 val = (val + ~chunk_mask) & chunk_mask; 589 val = (val + ~chunk_mask) & chunk_mask;
588 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { 590 if (!val || (ocd->ocd_brw_size &&
591 val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
589 return -ERANGE; 592 return -ERANGE;
590 } 593 }
591 spin_lock(&cli->cl_loi_list_lock); 594 spin_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219add98..b0f030c6c9c9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -360,6 +360,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
360 360
361 RB_CLEAR_NODE(&ext->oe_node); 361 RB_CLEAR_NODE(&ext->oe_node);
362 ext->oe_obj = obj; 362 ext->oe_obj = obj;
363 cl_object_get(osc2cl(obj));
363 atomic_set(&ext->oe_refc, 1); 364 atomic_set(&ext->oe_refc, 1);
364 atomic_set(&ext->oe_users, 0); 365 atomic_set(&ext->oe_users, 0);
365 INIT_LIST_HEAD(&ext->oe_link); 366 INIT_LIST_HEAD(&ext->oe_link);
@@ -398,6 +399,7 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
398 LDLM_LOCK_PUT(ext->oe_dlmlock); 399 LDLM_LOCK_PUT(ext->oe_dlmlock);
399 ext->oe_dlmlock = NULL; 400 ext->oe_dlmlock = NULL;
400 } 401 }
402 cl_object_put(env, osc2cl(ext->oe_obj));
401 osc_extent_free(ext); 403 osc_extent_free(ext);
402 } 404 }
403} 405}
@@ -959,7 +961,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
959 if (rc == -ETIMEDOUT) { 961 if (rc == -ETIMEDOUT) {
960 OSC_EXTENT_DUMP(D_ERROR, ext, 962 OSC_EXTENT_DUMP(D_ERROR, ext,
961 "%s: wait ext to %u timedout, recovery in progress?\n", 963 "%s: wait ext to %u timedout, recovery in progress?\n",
962 osc_export(obj)->exp_obd->obd_name, state); 964 cli_name(osc_cli(obj)), state);
963 965
964 lwi = LWI_INTR(NULL, NULL); 966 lwi = LWI_INTR(NULL, NULL);
965 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), 967 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -977,7 +979,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
977static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, 979static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
978 bool partial) 980 bool partial)
979{ 981{
980 struct cl_env_nest nest;
981 struct lu_env *env; 982 struct lu_env *env;
982 struct cl_io *io; 983 struct cl_io *io;
983 struct osc_object *obj = ext->oe_obj; 984 struct osc_object *obj = ext->oe_obj;
@@ -990,6 +991,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
990 int grants = 0; 991 int grants = 0;
991 int nr_pages = 0; 992 int nr_pages = 0;
992 int rc = 0; 993 int rc = 0;
994 int refcheck;
993 995
994 LASSERT(sanity_check(ext) == 0); 996 LASSERT(sanity_check(ext) == 0);
995 EASSERT(ext->oe_state == OES_TRUNC, ext); 997 EASSERT(ext->oe_state == OES_TRUNC, ext);
@@ -999,7 +1001,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
999 * We can't use that env from osc_cache_truncate_start() because 1001 * We can't use that env from osc_cache_truncate_start() because
1000 * it's from lov_io_sub and not fully initialized. 1002 * it's from lov_io_sub and not fully initialized.
1001 */ 1003 */
1002 env = cl_env_nested_get(&nest); 1004 env = cl_env_get(&refcheck);
1003 io = &osc_env_info(env)->oti_io; 1005 io = &osc_env_info(env)->oti_io;
1004 io->ci_obj = cl_object_top(osc2cl(obj)); 1006 io->ci_obj = cl_object_top(osc2cl(obj));
1005 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); 1007 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
@@ -1085,7 +1087,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
1085 1087
1086out: 1088out:
1087 cl_io_fini(env, io); 1089 cl_io_fini(env, io);
1088 cl_env_nested_put(&nest, env); 1090 cl_env_put(env, &refcheck);
1089 return rc; 1091 return rc;
1090} 1092}
1091 1093
@@ -1327,7 +1329,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1327{ 1329{
1328 struct osc_page *opg = oap2osc_page(oap); 1330 struct osc_page *opg = oap2osc_page(oap);
1329 struct cl_page *page = oap2cl_page(oap); 1331 struct cl_page *page = oap2cl_page(oap);
1330 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
1331 enum cl_req_type crt; 1332 enum cl_req_type crt;
1332 int srvlock; 1333 int srvlock;
1333 1334
@@ -1338,25 +1339,10 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1338 "cp_state:%u, cmd:%d\n", page->cp_state, cmd); 1339 "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
1339 LASSERT(opg->ops_transfer_pinned); 1340 LASSERT(opg->ops_transfer_pinned);
1340 1341
1341 /*
1342 * page->cp_req can be NULL if io submission failed before
1343 * cl_req was allocated.
1344 */
1345 if (page->cp_req)
1346 cl_req_page_done(env, page);
1347 LASSERT(!page->cp_req);
1348
1349 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; 1342 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
1350 /* Clear opg->ops_transfer_pinned before VM lock is released. */ 1343 /* Clear opg->ops_transfer_pinned before VM lock is released. */
1351 opg->ops_transfer_pinned = 0; 1344 opg->ops_transfer_pinned = 0;
1352 1345
1353 spin_lock(&obj->oo_seatbelt);
1354 LASSERT(opg->ops_submitter);
1355 LASSERT(!list_empty(&opg->ops_inflight));
1356 list_del_init(&opg->ops_inflight);
1357 opg->ops_submitter = NULL;
1358 spin_unlock(&obj->oo_seatbelt);
1359
1360 opg->ops_submit_time = 0; 1346 opg->ops_submit_time = 0;
1361 srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK; 1347 srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
1362 1348
@@ -1380,16 +1366,17 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1380 lu_ref_del(&page->cp_reference, "transfer", page); 1366 lu_ref_del(&page->cp_reference, "transfer", page);
1381 1367
1382 cl_page_completion(env, page, crt, rc); 1368 cl_page_completion(env, page, crt, rc);
1369 cl_page_put(env, page);
1383 1370
1384 return 0; 1371 return 0;
1385} 1372}
1386 1373
1387#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \ 1374#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
1388 struct client_obd *__tmp = (cli); \ 1375 struct client_obd *__tmp = (cli); \
1389 CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \ 1376 CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
1390 "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \ 1377 "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
1391 "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \ 1378 "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
1392 __tmp->cl_import->imp_obd->obd_name, \ 1379 cli_name(__tmp), \
1393 __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \ 1380 __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
1394 atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \ 1381 atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
1395 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \ 1382 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
@@ -1627,7 +1614,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1627 osc_io_unplug_async(env, cli, NULL); 1614 osc_io_unplug_async(env, cli, NULL);
1628 1615
1629 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n", 1616 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
1630 cli->cl_import->imp_obd->obd_name, &ocw, oap); 1617 cli_name(cli), &ocw, oap);
1631 1618
1632 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi); 1619 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1633 1620
@@ -1671,7 +1658,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1671 break; 1658 break;
1672 default: 1659 default:
1673 CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n", 1660 CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
1674 cli->cl_import->imp_obd->obd_name, &ocw, rc); 1661 cli_name(cli), &ocw, rc);
1675 break; 1662 break;
1676 } 1663 }
1677out: 1664out:
@@ -1931,7 +1918,8 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
1931 } 1918 }
1932 1919
1933 if (tmp->oe_srvlock != ext->oe_srvlock || 1920 if (tmp->oe_srvlock != ext->oe_srvlock ||
1934 !tmp->oe_grants != !ext->oe_grants) 1921 !tmp->oe_grants != !ext->oe_grants ||
1922 tmp->oe_no_merge || ext->oe_no_merge)
1935 return 0; 1923 return 0;
1936 1924
1937 /* remove break for strict check */ 1925 /* remove break for strict check */
@@ -2250,14 +2238,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
2250 return 0; 2238 return 0;
2251 2239
2252 if (!async) { 2240 if (!async) {
2253 /* disable osc_lru_shrink() temporarily to avoid
2254 * potential stack overrun problem. LU-2859
2255 */
2256 atomic_inc(&cli->cl_lru_shrinkers);
2257 spin_lock(&cli->cl_loi_list_lock); 2241 spin_lock(&cli->cl_loi_list_lock);
2258 osc_check_rpcs(env, cli); 2242 osc_check_rpcs(env, cli);
2259 spin_unlock(&cli->cl_loi_list_lock); 2243 spin_unlock(&cli->cl_loi_list_lock);
2260 atomic_dec(&cli->cl_lru_shrinkers);
2261 } else { 2244 } else {
2262 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); 2245 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
2263 LASSERT(cli->cl_writeback_work); 2246 LASSERT(cli->cl_writeback_work);
@@ -2479,7 +2462,6 @@ int osc_teardown_async_page(const struct lu_env *env,
2479 struct osc_object *obj, struct osc_page *ops) 2462 struct osc_object *obj, struct osc_page *ops)
2480{ 2463{
2481 struct osc_async_page *oap = &ops->ops_oap; 2464 struct osc_async_page *oap = &ops->ops_oap;
2482 struct osc_extent *ext = NULL;
2483 int rc = 0; 2465 int rc = 0;
2484 2466
2485 LASSERT(oap->oap_magic == OAP_MAGIC); 2467 LASSERT(oap->oap_magic == OAP_MAGIC);
@@ -2487,12 +2469,15 @@ int osc_teardown_async_page(const struct lu_env *env,
2487 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n", 2469 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
2488 oap, ops, osc_index(oap2osc(oap))); 2470 oap, ops, osc_index(oap2osc(oap)));
2489 2471
2490 osc_object_lock(obj);
2491 if (!list_empty(&oap->oap_rpc_item)) { 2472 if (!list_empty(&oap->oap_rpc_item)) {
2492 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap); 2473 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
2493 rc = -EBUSY; 2474 rc = -EBUSY;
2494 } else if (!list_empty(&oap->oap_pending_item)) { 2475 } else if (!list_empty(&oap->oap_pending_item)) {
2476 struct osc_extent *ext = NULL;
2477
2478 osc_object_lock(obj);
2495 ext = osc_extent_lookup(obj, osc_index(oap2osc(oap))); 2479 ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
2480 osc_object_unlock(obj);
2496 /* only truncated pages are allowed to be taken out. 2481 /* only truncated pages are allowed to be taken out.
2497 * See osc_extent_truncate() and osc_cache_truncate_start() 2482 * See osc_extent_truncate() and osc_cache_truncate_start()
2498 * for details. 2483 * for details.
@@ -2502,10 +2487,9 @@ int osc_teardown_async_page(const struct lu_env *env,
2502 osc_index(oap2osc(oap))); 2487 osc_index(oap2osc(oap)));
2503 rc = -EBUSY; 2488 rc = -EBUSY;
2504 } 2489 }
2490 if (ext)
2491 osc_extent_put(env, ext);
2505 } 2492 }
2506 osc_object_unlock(obj);
2507 if (ext)
2508 osc_extent_put(env, ext);
2509 return rc; 2493 return rc;
2510} 2494}
2511 2495
@@ -2666,11 +2650,13 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2666 struct osc_async_page *oap, *tmp; 2650 struct osc_async_page *oap, *tmp;
2667 int page_count = 0; 2651 int page_count = 0;
2668 int mppr = cli->cl_max_pages_per_rpc; 2652 int mppr = cli->cl_max_pages_per_rpc;
2653 bool can_merge = true;
2669 pgoff_t start = CL_PAGE_EOF; 2654 pgoff_t start = CL_PAGE_EOF;
2670 pgoff_t end = 0; 2655 pgoff_t end = 0;
2671 2656
2672 list_for_each_entry(oap, list, oap_pending_item) { 2657 list_for_each_entry(oap, list, oap_pending_item) {
2673 pgoff_t index = osc_index(oap2osc(oap)); 2658 struct osc_page *opg = oap2osc_page(oap);
2659 pgoff_t index = osc_index(opg);
2674 2660
2675 if (index > end) 2661 if (index > end)
2676 end = index; 2662 end = index;
@@ -2678,6 +2664,9 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2678 start = index; 2664 start = index;
2679 ++page_count; 2665 ++page_count;
2680 mppr <<= (page_count > mppr); 2666 mppr <<= (page_count > mppr);
2667
2668 if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
2669 can_merge = false;
2681 } 2670 }
2682 2671
2683 ext = osc_extent_alloc(obj); 2672 ext = osc_extent_alloc(obj);
@@ -2691,6 +2680,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2691 2680
2692 ext->oe_rw = !!(cmd & OBD_BRW_READ); 2681 ext->oe_rw = !!(cmd & OBD_BRW_READ);
2693 ext->oe_sync = 1; 2682 ext->oe_sync = 1;
2683 ext->oe_no_merge = !can_merge;
2694 ext->oe_urgent = 1; 2684 ext->oe_urgent = 1;
2695 ext->oe_start = start; 2685 ext->oe_start = start;
2696 ext->oe_end = end; 2686 ext->oe_end = end;
@@ -3158,7 +3148,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
3158 struct cl_page *page = ops->ops_cl.cpl_page; 3148 struct cl_page *page = ops->ops_cl.cpl_page;
3159 3149
3160 /* refresh non-overlapped index */ 3150 /* refresh non-overlapped index */
3161 tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0); 3151 tmp = osc_dlmlock_at_pgoff(env, osc, index,
3152 OSC_DAP_FL_TEST_LOCK);
3162 if (tmp) { 3153 if (tmp) {
3163 __u64 end = tmp->l_policy_data.l_extent.end; 3154 __u64 end = tmp->l_policy_data.l_extent.end;
3164 /* Cache the first-non-overlapped index so as to skip 3155 /* Cache the first-non-overlapped index so as to skip
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 9c8de15c309c..cce55a9689f0 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -77,7 +77,6 @@ struct osc_io {
77 77
78 /** write osc_lock for this IO, used by osc_extent_find(). */ 78 /** write osc_lock for this IO, used by osc_extent_find(). */
79 struct osc_lock *oi_write_osclock; 79 struct osc_lock *oi_write_osclock;
80 struct obd_info oi_info;
81 struct obdo oi_oa; 80 struct obdo oi_oa;
82 struct osc_async_cbargs { 81 struct osc_async_cbargs {
83 bool opc_rpc_sent; 82 bool opc_rpc_sent;
@@ -87,13 +86,6 @@ struct osc_io {
87}; 86};
88 87
89/** 88/**
90 * State of transfer for osc.
91 */
92struct osc_req {
93 struct cl_req_slice or_cl;
94};
95
96/**
97 * State maintained by osc layer for the duration of a system call. 89 * State maintained by osc layer for the duration of a system call.
98 */ 90 */
99struct osc_session { 91struct osc_session {
@@ -103,7 +95,7 @@ struct osc_session {
103#define OTI_PVEC_SIZE 256 95#define OTI_PVEC_SIZE 256
104struct osc_thread_info { 96struct osc_thread_info {
105 struct ldlm_res_id oti_resname; 97 struct ldlm_res_id oti_resname;
106 ldlm_policy_data_t oti_policy; 98 union ldlm_policy_data oti_policy;
107 struct cl_lock_descr oti_descr; 99 struct cl_lock_descr oti_descr;
108 struct cl_attr oti_attr; 100 struct cl_attr oti_attr;
109 struct lustre_handle oti_handle; 101 struct lustre_handle oti_handle;
@@ -116,6 +108,7 @@ struct osc_thread_info {
116 pgoff_t oti_next_index; 108 pgoff_t oti_next_index;
117 pgoff_t oti_fn_index; /* first non-overlapped index */ 109 pgoff_t oti_fn_index; /* first non-overlapped index */
118 struct cl_sync_io oti_anchor; 110 struct cl_sync_io oti_anchor;
111 struct cl_req_attr oti_req_attr;
119}; 112};
120 113
121struct osc_object { 114struct osc_object {
@@ -127,16 +120,6 @@ struct osc_object {
127 int oo_contended; 120 int oo_contended;
128 unsigned long oo_contention_time; 121 unsigned long oo_contention_time;
129 /** 122 /**
130 * List of pages in transfer.
131 */
132 struct list_head oo_inflight[CRT_NR];
133 /**
134 * Lock, protecting osc_page::ops_inflight, because a seat-belt is
135 * locked during take-off and landing.
136 */
137 spinlock_t oo_seatbelt;
138
139 /**
140 * used by the osc to keep track of what objects to build into rpcs. 123 * used by the osc to keep track of what objects to build into rpcs.
141 * Protected by client_obd->cli_loi_list_lock. 124 * Protected by client_obd->cli_loi_list_lock.
142 */ 125 */
@@ -364,15 +347,6 @@ struct osc_page {
364 */ 347 */
365 struct list_head ops_lru; 348 struct list_head ops_lru;
366 /** 349 /**
367 * Linkage into a per-osc_object list of pages in flight. For
368 * debugging.
369 */
370 struct list_head ops_inflight;
371 /**
372 * Thread that submitted this page for transfer. For debugging.
373 */
374 struct task_struct *ops_submitter;
375 /**
376 * Submit time - the time when the page is starting RPC. For debugging. 350 * Submit time - the time when the page is starting RPC. For debugging.
377 */ 351 */
378 unsigned long ops_submit_time; 352 unsigned long ops_submit_time;
@@ -382,7 +356,6 @@ extern struct kmem_cache *osc_lock_kmem;
382extern struct kmem_cache *osc_object_kmem; 356extern struct kmem_cache *osc_object_kmem;
383extern struct kmem_cache *osc_thread_kmem; 357extern struct kmem_cache *osc_thread_kmem;
384extern struct kmem_cache *osc_session_kmem; 358extern struct kmem_cache *osc_session_kmem;
385extern struct kmem_cache *osc_req_kmem;
386extern struct kmem_cache *osc_extent_kmem; 359extern struct kmem_cache *osc_extent_kmem;
387 360
388extern struct lu_device_type osc_device_type; 361extern struct lu_device_type osc_device_type;
@@ -396,15 +369,14 @@ int osc_lock_init(const struct lu_env *env,
396 const struct cl_io *io); 369 const struct cl_io *io);
397int osc_io_init(const struct lu_env *env, 370int osc_io_init(const struct lu_env *env,
398 struct cl_object *obj, struct cl_io *io); 371 struct cl_object *obj, struct cl_io *io);
399int osc_req_init(const struct lu_env *env, struct cl_device *dev,
400 struct cl_req *req);
401struct lu_object *osc_object_alloc(const struct lu_env *env, 372struct lu_object *osc_object_alloc(const struct lu_env *env,
402 const struct lu_object_header *hdr, 373 const struct lu_object_header *hdr,
403 struct lu_device *dev); 374 struct lu_device *dev);
404int osc_page_init(const struct lu_env *env, struct cl_object *obj, 375int osc_page_init(const struct lu_env *env, struct cl_object *obj,
405 struct cl_page *page, pgoff_t ind); 376 struct cl_page *page, pgoff_t ind);
406 377
407void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, 378void osc_index2policy(union ldlm_policy_data *policy,
379 const struct cl_object *obj,
408 pgoff_t start, pgoff_t end); 380 pgoff_t start, pgoff_t end);
409int osc_lvb_print(const struct lu_env *env, void *cookie, 381int osc_lvb_print(const struct lu_env *env, void *cookie,
410 lu_printer_t p, const struct ost_lvb *lvb); 382 lu_printer_t p, const struct ost_lvb *lvb);
@@ -554,6 +526,16 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
554 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap); 526 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
555} 527}
556 528
529static inline struct osc_page *
530osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
531{
532 const struct cl_page_slice *slice;
533
534 LASSERT(osc);
535 slice = cl_object_page_slice(&osc->oo_cl, page);
536 return cl2osc_page(slice);
537}
538
557static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice) 539static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
558{ 540{
559 LINVRNT(osc_is_object(&slice->cls_obj->co_lu)); 541 LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
@@ -615,6 +597,10 @@ struct osc_extent {
615 oe_rw:1, 597 oe_rw:1,
616 /** sync extent, queued by osc_queue_sync_pages() */ 598 /** sync extent, queued by osc_queue_sync_pages() */
617 oe_sync:1, 599 oe_sync:1,
600 /** set if this extent has partial, sync pages.
601 * Extents with partial page(s) can't merge with others in RPC
602 */
603 oe_no_merge:1,
618 oe_srvlock:1, 604 oe_srvlock:1,
619 oe_memalloc:1, 605 oe_memalloc:1,
620 /** an ACTIVE extent is going to be truncated, so when this extent 606 /** an ACTIVE extent is going to be truncated, so when this extent
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 83d30c135ba4..c5d62aeaeab5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -29,7 +29,7 @@
29 * This file is part of Lustre, http://www.lustre.org/ 29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc. 30 * Lustre is a trademark of Sun Microsystems, Inc.
31 * 31 *
32 * Implementation of cl_device, cl_req for OSC layer. 32 * Implementation of cl_device, for OSC layer.
33 * 33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com> 34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 */ 35 */
@@ -49,7 +49,6 @@ struct kmem_cache *osc_lock_kmem;
49struct kmem_cache *osc_object_kmem; 49struct kmem_cache *osc_object_kmem;
50struct kmem_cache *osc_thread_kmem; 50struct kmem_cache *osc_thread_kmem;
51struct kmem_cache *osc_session_kmem; 51struct kmem_cache *osc_session_kmem;
52struct kmem_cache *osc_req_kmem;
53struct kmem_cache *osc_extent_kmem; 52struct kmem_cache *osc_extent_kmem;
54struct kmem_cache *osc_quota_kmem; 53struct kmem_cache *osc_quota_kmem;
55 54
@@ -75,11 +74,6 @@ struct lu_kmem_descr osc_caches[] = {
75 .ckd_size = sizeof(struct osc_session) 74 .ckd_size = sizeof(struct osc_session)
76 }, 75 },
77 { 76 {
78 .ckd_cache = &osc_req_kmem,
79 .ckd_name = "osc_req_kmem",
80 .ckd_size = sizeof(struct osc_req)
81 },
82 {
83 .ckd_cache = &osc_extent_kmem, 77 .ckd_cache = &osc_extent_kmem,
84 .ckd_name = "osc_extent_kmem", 78 .ckd_name = "osc_extent_kmem",
85 .ckd_size = sizeof(struct osc_extent) 79 .ckd_size = sizeof(struct osc_extent)
@@ -94,8 +88,6 @@ struct lu_kmem_descr osc_caches[] = {
94 } 88 }
95}; 89};
96 90
97struct lock_class_key osc_ast_guard_class;
98
99/***************************************************************************** 91/*****************************************************************************
100 * 92 *
101 * Type conversions. 93 * Type conversions.
@@ -178,10 +170,6 @@ static const struct lu_device_operations osc_lu_ops = {
178 .ldo_recovery_complete = NULL 170 .ldo_recovery_complete = NULL
179}; 171};
180 172
181static const struct cl_device_operations osc_cl_ops = {
182 .cdo_req_init = osc_req_init
183};
184
185static int osc_device_init(const struct lu_env *env, struct lu_device *d, 173static int osc_device_init(const struct lu_env *env, struct lu_device *d,
186 const char *name, struct lu_device *next) 174 const char *name, struct lu_device *next)
187{ 175{
@@ -220,7 +208,6 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
220 cl_device_init(&od->od_cl, t); 208 cl_device_init(&od->od_cl, t);
221 d = osc2lu_dev(od); 209 d = osc2lu_dev(od);
222 d->ld_ops = &osc_lu_ops; 210 d->ld_ops = &osc_lu_ops;
223 od->od_cl.cd_ops = &osc_cl_ops;
224 211
225 /* Setup OSC OBD */ 212 /* Setup OSC OBD */
226 obd = class_name2obd(lustre_cfg_string(cfg, 0)); 213 obd = class_name2obd(lustre_cfg_string(cfg, 0));
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 67fe0a254991..688783dcc1e4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -107,26 +107,24 @@ typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
107 int rc); 107 int rc);
108 108
109int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, 109int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
110 __u64 *flags, ldlm_policy_data_t *policy, 110 __u64 *flags, union ldlm_policy_data *policy,
111 struct ost_lvb *lvb, int kms_valid, 111 struct ost_lvb *lvb, int kms_valid,
112 osc_enqueue_upcall_f upcall, 112 osc_enqueue_upcall_f upcall,
113 void *cookie, struct ldlm_enqueue_info *einfo, 113 void *cookie, struct ldlm_enqueue_info *einfo,
114 struct ptlrpc_request_set *rqset, int async, int agl); 114 struct ptlrpc_request_set *rqset, int async, int agl);
115int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
116 115
117int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, 116int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
118 __u32 type, ldlm_policy_data_t *policy, __u32 mode, 117 __u32 type, union ldlm_policy_data *policy, __u32 mode,
119 __u64 *flags, void *data, struct lustre_handle *lockh, 118 __u64 *flags, void *data, struct lustre_handle *lockh,
120 int unref); 119 int unref);
121 120
122int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, 121int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
123 struct obd_trans_info *oti, 122 obd_enqueue_update_f upcall, void *cookie,
124 obd_enqueue_update_f upcall, void *cookie, 123 struct ptlrpc_request_set *rqset);
125 struct ptlrpc_request_set *rqset); 124int osc_punch_base(struct obd_export *exp, struct obdo *oa,
126int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
127 obd_enqueue_update_f upcall, void *cookie, 125 obd_enqueue_update_f upcall, void *cookie,
128 struct ptlrpc_request_set *rqset); 126 struct ptlrpc_request_set *rqset);
129int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, 127int osc_sync_base(struct osc_object *exp, struct obdo *oa,
130 obd_enqueue_update_f upcall, void *cookie, 128 obd_enqueue_update_f upcall, void *cookie,
131 struct ptlrpc_request_set *rqset); 129 struct ptlrpc_request_set *rqset);
132 130
@@ -135,7 +133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
135 struct list_head *ext_list, int cmd); 133 struct list_head *ext_list, int cmd);
136long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, 134long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
137 long target, bool force); 135 long target, bool force);
138long osc_lru_reclaim(struct client_obd *cli); 136long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
139 137
140unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock); 138unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
141 139
@@ -157,6 +155,11 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli)
157 return cli->cl_r_in_flight + cli->cl_w_in_flight; 155 return cli->cl_r_in_flight + cli->cl_w_in_flight;
158} 156}
159 157
158static inline char *cli_name(struct client_obd *cli)
159{
160 return cli->cl_import->imp_obd->obd_name;
161}
162
160struct osc_device { 163struct osc_device {
161 struct cl_device od_cl; 164 struct cl_device od_cl;
162 struct obd_export *od_exp; 165 struct obd_export *od_exp;
@@ -192,15 +195,27 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
192int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]); 195int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
193int osc_quotactl(struct obd_device *unused, struct obd_export *exp, 196int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
194 struct obd_quotactl *oqctl); 197 struct obd_quotactl *oqctl);
195int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
196 struct obd_quotactl *oqctl);
197int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
198void osc_inc_unstable_pages(struct ptlrpc_request *req); 198void osc_inc_unstable_pages(struct ptlrpc_request *req);
199void osc_dec_unstable_pages(struct ptlrpc_request *req); 199void osc_dec_unstable_pages(struct ptlrpc_request *req);
200bool osc_over_unstable_soft_limit(struct client_obd *cli); 200bool osc_over_unstable_soft_limit(struct client_obd *cli);
201 201
202/**
203 * Bit flags for osc_dlm_lock_at_pageoff().
204 */
205enum osc_dap_flags {
206 /**
207 * Just check if the desired lock exists, it won't hold reference
208 * count on lock.
209 */
210 OSC_DAP_FL_TEST_LOCK = BIT(0),
211 /**
212 * Return the lock even if it is being canceled.
213 */
214 OSC_DAP_FL_CANCELING = BIT(1),
215};
216
202struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, 217struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
203 struct osc_object *obj, pgoff_t index, 218 struct osc_object *obj, pgoff_t index,
204 int pending, int canceling); 219 enum osc_dap_flags flags);
205 220
206#endif /* OSC_INTERNAL_H */ 221#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 8a559cbcdd0c..228a97c098fe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -49,12 +49,6 @@
49 * 49 *
50 */ 50 */
51 51
52static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
53{
54 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
55 return container_of0(slice, struct osc_req, or_cl);
56}
57
58static struct osc_io *cl2osc_io(const struct lu_env *env, 52static struct osc_io *cl2osc_io(const struct lu_env *env,
59 const struct cl_io_slice *slice) 53 const struct cl_io_slice *slice)
60{ 54{
@@ -64,20 +58,6 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
64 return oio; 58 return oio;
65} 59}
66 60
67static struct osc_page *osc_cl_page_osc(struct cl_page *page,
68 struct osc_object *osc)
69{
70 const struct cl_page_slice *slice;
71
72 if (osc)
73 slice = cl_object_page_slice(&osc->oo_cl, page);
74 else
75 slice = cl_page_at(page, &osc_device_type);
76 LASSERT(slice);
77
78 return cl2osc_page(slice);
79}
80
81/***************************************************************************** 61/*****************************************************************************
82 * 62 *
83 * io operations. 63 * io operations.
@@ -88,6 +68,45 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
88{ 68{
89} 69}
90 70
71static void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
72{
73 struct ldlm_lock *dlmlock = cbdata;
74 struct lustre_handle lockh;
75
76 ldlm_lock2handle(dlmlock, &lockh);
77 ldlm_lock_decref(&lockh, LCK_PR);
78 LDLM_LOCK_PUT(dlmlock);
79}
80
81static int osc_io_read_ahead(const struct lu_env *env,
82 const struct cl_io_slice *ios,
83 pgoff_t start, struct cl_read_ahead *ra)
84{
85 struct osc_object *osc = cl2osc(ios->cis_obj);
86 struct ldlm_lock *dlmlock;
87 int result = -ENODATA;
88
89 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
90 if (dlmlock) {
91 LASSERT(dlmlock->l_ast_data == osc);
92 if (dlmlock->l_req_mode != LCK_PR) {
93 struct lustre_handle lockh;
94
95 ldlm_lock2handle(dlmlock, &lockh);
96 ldlm_lock_addref(&lockh, LCK_PR);
97 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
98 }
99
100 ra->cra_end = cl_index(osc2cl(osc),
101 dlmlock->l_policy_data.l_extent.end);
102 ra->cra_release = osc_read_ahead_release;
103 ra->cra_cbdata = dlmlock;
104 result = 0;
105 }
106
107 return result;
108}
109
91/** 110/**
92 * An implementation of cl_io_operations::cio_io_submit() method for osc 111 * An implementation of cl_io_operations::cio_io_submit() method for osc
93 * layer. Iterates over pages in the in-queue, prepares each for io by calling 112 * layer. Iterates over pages in the in-queue, prepares each for io by calling
@@ -334,7 +353,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
334 npages = max_pages; 353 npages = max_pages;
335 354
336 c = atomic_long_read(cli->cl_lru_left); 355 c = atomic_long_read(cli->cl_lru_left);
337 if (c < npages && osc_lru_reclaim(cli) > 0) 356 if (c < npages && osc_lru_reclaim(cli, npages) > 0)
338 c = atomic_long_read(cli->cl_lru_left); 357 c = atomic_long_read(cli->cl_lru_left);
339 while (c >= npages) { 358 while (c >= npages) {
340 if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) { 359 if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
@@ -343,6 +362,17 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
343 } 362 }
344 c = atomic_long_read(cli->cl_lru_left); 363 c = atomic_long_read(cli->cl_lru_left);
345 } 364 }
365 if (atomic_long_read(cli->cl_lru_left) < max_pages) {
366 /*
367 * If there aren't enough pages in the per-OSC LRU then
368 * wake up the LRU thread to try and clear out space, so
369 * we don't block if pages are being dirtied quickly.
370 */
371 CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
372 cli_name(cli), atomic_long_read(cli->cl_lru_left),
373 max_pages);
374 (void)ptlrpcd_queue_work(cli->cl_lru_work);
375 }
346 376
347 return 0; 377 return 0;
348} 378}
@@ -446,7 +476,6 @@ static int osc_io_setattr_start(const struct lu_env *env,
446 __u64 size = io->u.ci_setattr.sa_attr.lvb_size; 476 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
447 unsigned int ia_valid = io->u.ci_setattr.sa_valid; 477 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
448 int result = 0; 478 int result = 0;
449 struct obd_info oinfo = { };
450 479
451 /* truncate cache dirty pages first */ 480 /* truncate cache dirty pages first */
452 if (cl_io_is_trunc(io)) 481 if (cl_io_is_trunc(io))
@@ -486,11 +515,19 @@ static int osc_io_setattr_start(const struct lu_env *env,
486 oa->o_oi = loi->loi_oi; 515 oa->o_oi = loi->loi_oi;
487 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid); 516 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
488 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index; 517 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
489 oa->o_mtime = attr->cat_mtime; 518 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
490 oa->o_atime = attr->cat_atime; 519 if (ia_valid & ATTR_CTIME) {
491 oa->o_ctime = attr->cat_ctime; 520 oa->o_valid |= OBD_MD_FLCTIME;
492 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME | 521 oa->o_ctime = attr->cat_ctime;
493 OBD_MD_FLCTIME | OBD_MD_FLMTIME; 522 }
523 if (ia_valid & ATTR_ATIME) {
524 oa->o_valid |= OBD_MD_FLATIME;
525 oa->o_atime = attr->cat_atime;
526 }
527 if (ia_valid & ATTR_MTIME) {
528 oa->o_valid |= OBD_MD_FLMTIME;
529 oa->o_mtime = attr->cat_mtime;
530 }
494 if (ia_valid & ATTR_SIZE) { 531 if (ia_valid & ATTR_SIZE) {
495 oa->o_size = size; 532 oa->o_size = size;
496 oa->o_blocks = OBD_OBJECT_EOF; 533 oa->o_blocks = OBD_OBJECT_EOF;
@@ -503,19 +540,21 @@ static int osc_io_setattr_start(const struct lu_env *env,
503 } else { 540 } else {
504 LASSERT(oio->oi_lockless == 0); 541 LASSERT(oio->oi_lockless == 0);
505 } 542 }
543 if (ia_valid & ATTR_ATTR_FLAG) {
544 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
545 oa->o_valid |= OBD_MD_FLFLAGS;
546 }
506 547
507 oinfo.oi_oa = oa;
508 init_completion(&cbargs->opc_sync); 548 init_completion(&cbargs->opc_sync);
509 549
510 if (ia_valid & ATTR_SIZE) 550 if (ia_valid & ATTR_SIZE)
511 result = osc_punch_base(osc_export(cl2osc(obj)), 551 result = osc_punch_base(osc_export(cl2osc(obj)),
512 &oinfo, osc_async_upcall, 552 oa, osc_async_upcall,
513 cbargs, PTLRPCD_SET); 553 cbargs, PTLRPCD_SET);
514 else 554 else
515 result = osc_setattr_async_base(osc_export(cl2osc(obj)), 555 result = osc_setattr_async(osc_export(cl2osc(obj)),
516 &oinfo, NULL, 556 oa, osc_async_upcall,
517 osc_async_upcall, 557 cbargs, PTLRPCD_SET);
518 cbargs, PTLRPCD_SET);
519 cbargs->opc_rpc_sent = result == 0; 558 cbargs->opc_rpc_sent = result == 0;
520 } 559 }
521 return result; 560 return result;
@@ -557,6 +596,107 @@ static void osc_io_setattr_end(const struct lu_env *env,
557 } 596 }
558} 597}
559 598
599struct osc_data_version_args {
600 struct osc_io *dva_oio;
601};
602
603static int
604osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
605 void *arg, int rc)
606{
607 struct osc_data_version_args *dva = arg;
608 struct osc_io *oio = dva->dva_oio;
609 const struct ost_body *body;
610
611 if (rc < 0)
612 goto out;
613
614 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
615 if (!body) {
616 rc = -EPROTO;
617 goto out;
618 }
619
620 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
621 &body->oa);
622out:
623 oio->oi_cbarg.opc_rc = rc;
624 complete(&oio->oi_cbarg.opc_sync);
625
626 return 0;
627}
628
629static int osc_io_data_version_start(const struct lu_env *env,
630 const struct cl_io_slice *slice)
631{
632 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
633 struct osc_io *oio = cl2osc_io(env, slice);
634 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
635 struct osc_object *obj = cl2osc(slice->cis_obj);
636 struct obd_export *exp = osc_export(obj);
637 struct lov_oinfo *loi = obj->oo_oinfo;
638 struct osc_data_version_args *dva;
639 struct obdo *oa = &oio->oi_oa;
640 struct ptlrpc_request *req;
641 struct ost_body *body;
642 int rc;
643
644 memset(oa, 0, sizeof(*oa));
645 oa->o_oi = loi->loi_oi;
646 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
647
648 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
649 oa->o_valid |= OBD_MD_FLFLAGS;
650 oa->o_flags |= OBD_FL_SRVLOCK;
651 if (dv->dv_flags & LL_DV_WR_FLUSH)
652 oa->o_flags |= OBD_FL_FLUSH;
653 }
654
655 init_completion(&cbargs->opc_sync);
656
657 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
658 if (!req)
659 return -ENOMEM;
660
661 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
662 if (rc < 0) {
663 ptlrpc_request_free(req);
664 return rc;
665 }
666
667 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
668 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
669
670 ptlrpc_request_set_replen(req);
671 req->rq_interpret_reply = osc_data_version_interpret;
672 CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
673 dva = ptlrpc_req_async_args(req);
674 dva->dva_oio = oio;
675
676 ptlrpcd_add_req(req);
677
678 return 0;
679}
680
681static void osc_io_data_version_end(const struct lu_env *env,
682 const struct cl_io_slice *slice)
683{
684 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
685 struct osc_io *oio = cl2osc_io(env, slice);
686 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
687
688 wait_for_completion(&cbargs->opc_sync);
689
690 if (cbargs->opc_rc) {
691 slice->cis_io->ci_result = cbargs->opc_rc;
692 } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
693 slice->cis_io->ci_result = -EOPNOTSUPP;
694 } else {
695 dv->dv_data_version = oio->oi_oa.o_data_version;
696 slice->cis_io->ci_result = 0;
697 }
698}
699
560static int osc_io_read_start(const struct lu_env *env, 700static int osc_io_read_start(const struct lu_env *env,
561 const struct cl_io_slice *slice) 701 const struct cl_io_slice *slice)
562{ 702{
@@ -595,7 +735,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
595{ 735{
596 struct osc_io *oio = osc_env_io(env); 736 struct osc_io *oio = osc_env_io(env);
597 struct obdo *oa = &oio->oi_oa; 737 struct obdo *oa = &oio->oi_oa;
598 struct obd_info *oinfo = &oio->oi_info;
599 struct lov_oinfo *loi = obj->oo_oinfo; 738 struct lov_oinfo *loi = obj->oo_oinfo;
600 struct osc_async_cbargs *cbargs = &oio->oi_cbarg; 739 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
601 int rc = 0; 740 int rc = 0;
@@ -611,12 +750,9 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
611 750
612 obdo_set_parent_fid(oa, fio->fi_fid); 751 obdo_set_parent_fid(oa, fio->fi_fid);
613 752
614 memset(oinfo, 0, sizeof(*oinfo));
615 oinfo->oi_oa = oa;
616 init_completion(&cbargs->opc_sync); 753 init_completion(&cbargs->opc_sync);
617 754
618 rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs, 755 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
619 PTLRPCD_SET);
620 return rc; 756 return rc;
621} 757}
622 758
@@ -710,6 +846,10 @@ static const struct cl_io_operations osc_io_ops = {
710 .cio_start = osc_io_setattr_start, 846 .cio_start = osc_io_setattr_start,
711 .cio_end = osc_io_setattr_end 847 .cio_end = osc_io_setattr_end
712 }, 848 },
849 [CIT_DATA_VERSION] = {
850 .cio_start = osc_io_data_version_start,
851 .cio_end = osc_io_data_version_end,
852 },
713 [CIT_FAULT] = { 853 [CIT_FAULT] = {
714 .cio_start = osc_io_fault_start, 854 .cio_start = osc_io_fault_start,
715 .cio_end = osc_io_end, 855 .cio_end = osc_io_end,
@@ -724,6 +864,7 @@ static const struct cl_io_operations osc_io_ops = {
724 .cio_fini = osc_io_fini 864 .cio_fini = osc_io_fini
725 } 865 }
726 }, 866 },
867 .cio_read_ahead = osc_io_read_ahead,
727 .cio_submit = osc_io_submit, 868 .cio_submit = osc_io_submit,
728 .cio_commit_async = osc_io_commit_async 869 .cio_commit_async = osc_io_commit_async
729}; 870};
@@ -734,103 +875,6 @@ static const struct cl_io_operations osc_io_ops = {
734 * 875 *
735 */ 876 */
736 877
737static int osc_req_prep(const struct lu_env *env,
738 const struct cl_req_slice *slice)
739{
740 return 0;
741}
742
743static void osc_req_completion(const struct lu_env *env,
744 const struct cl_req_slice *slice, int ioret)
745{
746 struct osc_req *or;
747
748 or = cl2osc_req(slice);
749 kmem_cache_free(osc_req_kmem, or);
750}
751
752/**
753 * Implementation of struct cl_req_operations::cro_attr_set() for osc
754 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
755 * fields.
756 */
757static void osc_req_attr_set(const struct lu_env *env,
758 const struct cl_req_slice *slice,
759 const struct cl_object *obj,
760 struct cl_req_attr *attr, u64 flags)
761{
762 struct lov_oinfo *oinfo;
763 struct cl_req *clerq;
764 struct cl_page *apage; /* _some_ page in @clerq */
765 struct ldlm_lock *lock; /* _some_ lock protecting @apage */
766 struct osc_page *opg;
767 struct obdo *oa;
768 struct ost_lvb *lvb;
769
770 oinfo = cl2osc(obj)->oo_oinfo;
771 lvb = &oinfo->loi_lvb;
772 oa = attr->cra_oa;
773
774 if ((flags & OBD_MD_FLMTIME) != 0) {
775 oa->o_mtime = lvb->lvb_mtime;
776 oa->o_valid |= OBD_MD_FLMTIME;
777 }
778 if ((flags & OBD_MD_FLATIME) != 0) {
779 oa->o_atime = lvb->lvb_atime;
780 oa->o_valid |= OBD_MD_FLATIME;
781 }
782 if ((flags & OBD_MD_FLCTIME) != 0) {
783 oa->o_ctime = lvb->lvb_ctime;
784 oa->o_valid |= OBD_MD_FLCTIME;
785 }
786 if (flags & OBD_MD_FLGROUP) {
787 ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
788 oa->o_valid |= OBD_MD_FLGROUP;
789 }
790 if (flags & OBD_MD_FLID) {
791 ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
792 oa->o_valid |= OBD_MD_FLID;
793 }
794 if (flags & OBD_MD_FLHANDLE) {
795 clerq = slice->crs_req;
796 LASSERT(!list_empty(&clerq->crq_pages));
797 apage = container_of(clerq->crq_pages.next,
798 struct cl_page, cp_flight);
799 opg = osc_cl_page_osc(apage, NULL);
800 lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
801 1, 1);
802 if (!lock && !opg->ops_srvlock) {
803 struct ldlm_resource *res;
804 struct ldlm_res_id *resname;
805
806 CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
807
808 resname = &osc_env_info(env)->oti_resname;
809 ostid_build_res_name(&oinfo->loi_oi, resname);
810 res = ldlm_resource_get(
811 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
812 NULL, resname, LDLM_EXTENT, 0);
813 ldlm_resource_dump(D_ERROR, res);
814
815 dump_stack();
816 LBUG();
817 }
818
819 /* check for lockless io. */
820 if (lock) {
821 oa->o_handle = lock->l_remote_handle;
822 oa->o_valid |= OBD_MD_FLHANDLE;
823 LDLM_LOCK_PUT(lock);
824 }
825 }
826}
827
828static const struct cl_req_operations osc_req_ops = {
829 .cro_prep = osc_req_prep,
830 .cro_attr_set = osc_req_attr_set,
831 .cro_completion = osc_req_completion
832};
833
834int osc_io_init(const struct lu_env *env, 878int osc_io_init(const struct lu_env *env,
835 struct cl_object *obj, struct cl_io *io) 879 struct cl_object *obj, struct cl_io *io)
836{ 880{
@@ -841,20 +885,4 @@ int osc_io_init(const struct lu_env *env,
841 return 0; 885 return 0;
842} 886}
843 887
844int osc_req_init(const struct lu_env *env, struct cl_device *dev,
845 struct cl_req *req)
846{
847 struct osc_req *or;
848 int result;
849
850 or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
851 if (or) {
852 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
853 result = 0;
854 } else {
855 result = -ENOMEM;
856 }
857 return result;
858}
859
860/** @} osc */ 888/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 39a8a5851603..5f799a4c78f9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -145,7 +145,7 @@ static void osc_lock_fini(const struct lu_env *env,
145 145
146static void osc_lock_build_policy(const struct lu_env *env, 146static void osc_lock_build_policy(const struct lu_env *env,
147 const struct cl_lock *lock, 147 const struct cl_lock *lock,
148 ldlm_policy_data_t *policy) 148 union ldlm_policy_data *policy)
149{ 149{
150 const struct cl_lock_descr *d = &lock->cll_descr; 150 const struct cl_lock_descr *d = &lock->cll_descr;
151 151
@@ -188,7 +188,7 @@ static void osc_lock_lvb_update(const struct lu_env *env,
188 struct cl_object *obj = osc2cl(osc); 188 struct cl_object *obj = osc2cl(osc);
189 struct lov_oinfo *oinfo = osc->oo_oinfo; 189 struct lov_oinfo *oinfo = osc->oo_oinfo;
190 struct cl_attr *attr = &osc_env_info(env)->oti_attr; 190 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
191 unsigned valid; 191 unsigned int valid;
192 192
193 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; 193 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
194 if (!lvb) 194 if (!lvb)
@@ -294,10 +294,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
294 struct osc_lock *oscl = cookie; 294 struct osc_lock *oscl = cookie;
295 struct cl_lock_slice *slice = &oscl->ols_cl; 295 struct cl_lock_slice *slice = &oscl->ols_cl;
296 struct lu_env *env; 296 struct lu_env *env;
297 struct cl_env_nest nest;
298 int rc; 297 int rc;
298 int refcheck;
299 299
300 env = cl_env_nested_get(&nest); 300 env = cl_env_get(&refcheck);
301 /* should never happen, similar to osc_ldlm_blocking_ast(). */ 301 /* should never happen, similar to osc_ldlm_blocking_ast(). */
302 LASSERT(!IS_ERR(env)); 302 LASSERT(!IS_ERR(env));
303 303
@@ -336,7 +336,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
336 336
337 if (oscl->ols_owner) 337 if (oscl->ols_owner)
338 cl_sync_io_note(env, oscl->ols_owner, rc); 338 cl_sync_io_note(env, oscl->ols_owner, rc);
339 cl_env_nested_put(&nest, env); 339 cl_env_put(env, &refcheck);
340 340
341 return rc; 341 return rc;
342} 342}
@@ -347,9 +347,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
347 struct osc_object *osc = cookie; 347 struct osc_object *osc = cookie;
348 struct ldlm_lock *dlmlock; 348 struct ldlm_lock *dlmlock;
349 struct lu_env *env; 349 struct lu_env *env;
350 struct cl_env_nest nest; 350 int refcheck;
351 351
352 env = cl_env_nested_get(&nest); 352 env = cl_env_get(&refcheck);
353 LASSERT(!IS_ERR(env)); 353 LASSERT(!IS_ERR(env));
354 354
355 if (errcode == ELDLM_LOCK_MATCHED) { 355 if (errcode == ELDLM_LOCK_MATCHED) {
@@ -374,7 +374,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
374 374
375out: 375out:
376 cl_object_put(env, osc2cl(osc)); 376 cl_object_put(env, osc2cl(osc));
377 cl_env_nested_put(&nest, env); 377 cl_env_put(env, &refcheck);
378 return ldlm_error2errno(errcode); 378 return ldlm_error2errno(errcode);
379} 379}
380 380
@@ -382,11 +382,11 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
382 enum cl_lock_mode mode, int discard) 382 enum cl_lock_mode mode, int discard)
383{ 383{
384 struct lu_env *env; 384 struct lu_env *env;
385 struct cl_env_nest nest; 385 int refcheck;
386 int rc = 0; 386 int rc = 0;
387 int rc2 = 0; 387 int rc2 = 0;
388 388
389 env = cl_env_nested_get(&nest); 389 env = cl_env_get(&refcheck);
390 if (IS_ERR(env)) 390 if (IS_ERR(env))
391 return PTR_ERR(env); 391 return PTR_ERR(env);
392 392
@@ -404,7 +404,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
404 if (rc == 0 && rc2 < 0) 404 if (rc == 0 && rc2 < 0)
405 rc = rc2; 405 rc = rc2;
406 406
407 cl_env_nested_put(&nest, env); 407 cl_env_put(env, &refcheck);
408 return rc; 408 return rc;
409} 409}
410 410
@@ -536,7 +536,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
536 } 536 }
537 case LDLM_CB_CANCELING: { 537 case LDLM_CB_CANCELING: {
538 struct lu_env *env; 538 struct lu_env *env;
539 struct cl_env_nest nest; 539 int refcheck;
540 540
541 /* 541 /*
542 * This can be called in the context of outer IO, e.g., 542 * This can be called in the context of outer IO, e.g.,
@@ -549,14 +549,14 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
549 * new environment has to be created to not corrupt outer 549 * new environment has to be created to not corrupt outer
550 * context. 550 * context.
551 */ 551 */
552 env = cl_env_nested_get(&nest); 552 env = cl_env_get(&refcheck);
553 if (IS_ERR(env)) { 553 if (IS_ERR(env)) {
554 result = PTR_ERR(env); 554 result = PTR_ERR(env);
555 break; 555 break;
556 } 556 }
557 557
558 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); 558 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
559 cl_env_nested_put(&nest, env); 559 cl_env_put(env, &refcheck);
560 break; 560 break;
561 } 561 }
562 default: 562 default:
@@ -568,61 +568,63 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
568static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) 568static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
569{ 569{
570 struct ptlrpc_request *req = data; 570 struct ptlrpc_request *req = data;
571 struct cl_env_nest nest;
572 struct lu_env *env; 571 struct lu_env *env;
573 struct ost_lvb *lvb; 572 struct ost_lvb *lvb;
574 struct req_capsule *cap; 573 struct req_capsule *cap;
574 struct cl_object *obj = NULL;
575 int result; 575 int result;
576 int refcheck;
576 577
577 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); 578 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
578 579
579 env = cl_env_nested_get(&nest); 580 env = cl_env_get(&refcheck);
580 if (!IS_ERR(env)) { 581 if (IS_ERR(env)) {
581 struct cl_object *obj = NULL; 582 result = PTR_ERR(env);
583 goto out;
584 }
582 585
583 lock_res_and_lock(dlmlock); 586 lock_res_and_lock(dlmlock);
584 if (dlmlock->l_ast_data) { 587 if (dlmlock->l_ast_data) {
585 obj = osc2cl(dlmlock->l_ast_data); 588 obj = osc2cl(dlmlock->l_ast_data);
586 cl_object_get(obj); 589 cl_object_get(obj);
587 } 590 }
588 unlock_res_and_lock(dlmlock); 591 unlock_res_and_lock(dlmlock);
589 592
590 if (obj) { 593 if (obj) {
591 /* Do not grab the mutex of cl_lock for glimpse. 594 /* Do not grab the mutex of cl_lock for glimpse.
592 * See LU-1274 for details. 595 * See LU-1274 for details.
593 * BTW, it's okay for cl_lock to be cancelled during 596 * BTW, it's okay for cl_lock to be cancelled during
594 * this period because server can handle this race. 597 * this period because server can handle this race.
595 * See ldlm_server_glimpse_ast() for details. 598 * See ldlm_server_glimpse_ast() for details.
596 * cl_lock_mutex_get(env, lock); 599 * cl_lock_mutex_get(env, lock);
597 */ 600 */
598 cap = &req->rq_pill; 601 cap = &req->rq_pill;
599 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); 602 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
600 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, 603 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
601 sizeof(*lvb)); 604 sizeof(*lvb));
602 result = req_capsule_server_pack(cap); 605 result = req_capsule_server_pack(cap);
603 if (result == 0) { 606 if (result == 0) {
604 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); 607 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
605 result = cl_object_glimpse(env, obj, lvb); 608 result = cl_object_glimpse(env, obj, lvb);
606 } 609 }
607 if (!exp_connect_lvb_type(req->rq_export)) 610 if (!exp_connect_lvb_type(req->rq_export)) {
608 req_capsule_shrink(&req->rq_pill, 611 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
609 &RMF_DLM_LVB, 612 sizeof(struct ost_lvb_v1),
610 sizeof(struct ost_lvb_v1), 613 RCL_SERVER);
611 RCL_SERVER);
612 cl_object_put(env, obj);
613 } else {
614 /*
615 * These errors are normal races, so we don't want to
616 * fill the console with messages by calling
617 * ptlrpc_error()
618 */
619 lustre_pack_reply(req, 1, NULL, NULL);
620 result = -ELDLM_NO_LOCK_DATA;
621 } 614 }
622 cl_env_nested_put(&nest, env); 615 cl_object_put(env, obj);
623 } else { 616 } else {
624 result = PTR_ERR(env); 617 /*
618 * These errors are normal races, so we don't want to
619 * fill the console with messages by calling
620 * ptlrpc_error()
621 */
622 lustre_pack_reply(req, 1, NULL, NULL);
623 result = -ELDLM_NO_LOCK_DATA;
625 } 624 }
625 cl_env_put(env, &refcheck);
626
627out:
626 req->rq_status = result; 628 req->rq_status = result;
627 return result; 629 return result;
628} 630}
@@ -677,12 +679,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
677 */ 679 */
678unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) 680unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
679{ 681{
680 struct cl_env_nest nest;
681 struct lu_env *env; 682 struct lu_env *env;
682 struct osc_object *obj; 683 struct osc_object *obj;
683 struct osc_lock *oscl; 684 struct osc_lock *oscl;
684 unsigned long weight; 685 unsigned long weight;
685 bool found = false; 686 bool found = false;
687 int refcheck;
686 688
687 might_sleep(); 689 might_sleep();
688 /* 690 /*
@@ -692,7 +694,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
692 * the upper context because cl_lock_put don't modify environment 694 * the upper context because cl_lock_put don't modify environment
693 * variables. But just in case .. 695 * variables. But just in case ..
694 */ 696 */
695 env = cl_env_nested_get(&nest); 697 env = cl_env_get(&refcheck);
696 if (IS_ERR(env)) 698 if (IS_ERR(env))
697 /* Mostly because lack of memory, do not eliminate this lock */ 699 /* Mostly because lack of memory, do not eliminate this lock */
698 return 1; 700 return 1;
@@ -722,7 +724,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
722 weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent); 724 weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
723 725
724out: 726out:
725 cl_env_nested_put(&nest, env); 727 cl_env_put(env, &refcheck);
726 return weight; 728 return weight;
727} 729}
728 730
@@ -912,7 +914,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
912 struct osc_lock *oscl = cl2osc_lock(slice); 914 struct osc_lock *oscl = cl2osc_lock(slice);
913 struct cl_lock *lock = slice->cls_lock; 915 struct cl_lock *lock = slice->cls_lock;
914 struct ldlm_res_id *resname = &info->oti_resname; 916 struct ldlm_res_id *resname = &info->oti_resname;
915 ldlm_policy_data_t *policy = &info->oti_policy; 917 union ldlm_policy_data *policy = &info->oti_policy;
916 osc_enqueue_upcall_f upcall = osc_lock_upcall; 918 osc_enqueue_upcall_f upcall = osc_lock_upcall;
917 void *cookie = oscl; 919 void *cookie = oscl;
918 bool async = false; 920 bool async = false;
@@ -1009,7 +1011,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
1009 1011
1010 if (olck->ols_hold) { 1012 if (olck->ols_hold) {
1011 olck->ols_hold = 0; 1013 olck->ols_hold = 0;
1012 osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode); 1014 ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
1013 olck->ols_handle.cookie = 0ULL; 1015 olck->ols_handle.cookie = 0ULL;
1014 } 1016 }
1015 1017
@@ -1180,11 +1182,11 @@ int osc_lock_init(const struct lu_env *env,
1180 */ 1182 */
1181struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, 1183struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
1182 struct osc_object *obj, pgoff_t index, 1184 struct osc_object *obj, pgoff_t index,
1183 int pending, int canceling) 1185 enum osc_dap_flags dap_flags)
1184{ 1186{
1185 struct osc_thread_info *info = osc_env_info(env); 1187 struct osc_thread_info *info = osc_env_info(env);
1186 struct ldlm_res_id *resname = &info->oti_resname; 1188 struct ldlm_res_id *resname = &info->oti_resname;
1187 ldlm_policy_data_t *policy = &info->oti_policy; 1189 union ldlm_policy_data *policy = &info->oti_policy;
1188 struct lustre_handle lockh; 1190 struct lustre_handle lockh;
1189 struct ldlm_lock *lock = NULL; 1191 struct ldlm_lock *lock = NULL;
1190 enum ldlm_mode mode; 1192 enum ldlm_mode mode;
@@ -1194,17 +1196,18 @@ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
1194 osc_index2policy(policy, osc2cl(obj), index, index); 1196 osc_index2policy(policy, osc2cl(obj), index, index);
1195 policy->l_extent.gid = LDLM_GID_ANY; 1197 policy->l_extent.gid = LDLM_GID_ANY;
1196 1198
1197 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; 1199 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
1198 if (pending) 1200 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
1199 flags |= LDLM_FL_CBPENDING; 1201 flags |= LDLM_FL_TEST_LOCK;
1202
1200 /* 1203 /*
1201 * It is fine to match any group lock since there could be only one 1204 * It is fine to match any group lock since there could be only one
1202 * with a uniq gid and it conflicts with all other lock modes too 1205 * with a uniq gid and it conflicts with all other lock modes too
1203 */ 1206 */
1204again: 1207again:
1205 mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace, 1208 mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
1206 flags, resname, LDLM_EXTENT, policy, 1209 LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
1207 LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling); 1210 dap_flags & OSC_DAP_FL_CANCELING);
1208 if (mode != 0) { 1211 if (mode != 0) {
1209 lock = ldlm_handle2lock(&lockh); 1212 lock = ldlm_handle2lock(&lockh);
1210 /* RACE: the lock is cancelled so let's try again */ 1213 /* RACE: the lock is cancelled so let's try again */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index aae3a2d4243f..e0c3324857dd 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -71,13 +71,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
71{ 71{
72 struct osc_object *osc = lu2osc(obj); 72 struct osc_object *osc = lu2osc(obj);
73 const struct cl_object_conf *cconf = lu2cl_conf(conf); 73 const struct cl_object_conf *cconf = lu2cl_conf(conf);
74 int i;
75 74
76 osc->oo_oinfo = cconf->u.coc_oinfo; 75 osc->oo_oinfo = cconf->u.coc_oinfo;
77 spin_lock_init(&osc->oo_seatbelt);
78 for (i = 0; i < CRT_NR; ++i)
79 INIT_LIST_HEAD(&osc->oo_inflight[i]);
80
81 INIT_LIST_HEAD(&osc->oo_ready_item); 76 INIT_LIST_HEAD(&osc->oo_ready_item);
82 INIT_LIST_HEAD(&osc->oo_hp_ready_item); 77 INIT_LIST_HEAD(&osc->oo_hp_ready_item);
83 INIT_LIST_HEAD(&osc->oo_write_item); 78 INIT_LIST_HEAD(&osc->oo_write_item);
@@ -103,10 +98,6 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
103static void osc_object_free(const struct lu_env *env, struct lu_object *obj) 98static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
104{ 99{
105 struct osc_object *osc = lu2osc(obj); 100 struct osc_object *osc = lu2osc(obj);
106 int i;
107
108 for (i = 0; i < CRT_NR; ++i)
109 LASSERT(list_empty(&osc->oo_inflight[i]));
110 101
111 LASSERT(list_empty(&osc->oo_ready_item)); 102 LASSERT(list_empty(&osc->oo_ready_item));
112 LASSERT(list_empty(&osc->oo_hp_ready_item)); 103 LASSERT(list_empty(&osc->oo_hp_ready_item));
@@ -218,6 +209,94 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
218 return 0; 209 return 0;
219} 210}
220 211
212static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
213 struct ll_fiemap_info_key *fmkey,
214 struct fiemap *fiemap, size_t *buflen)
215{
216 struct obd_export *exp = osc_export(cl2osc(obj));
217 union ldlm_policy_data policy;
218 struct ptlrpc_request *req;
219 struct lustre_handle lockh;
220 struct ldlm_res_id resid;
221 enum ldlm_mode mode = 0;
222 struct fiemap *reply;
223 char *tmp;
224 int rc;
225
226 fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
227 if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
228 goto skip_locking;
229
230 policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
231
232 if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
233 fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
234 policy.l_extent.end = OBD_OBJECT_EOF;
235 else
236 policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
237 fmkey->lfik_fiemap.fm_length +
238 PAGE_SIZE - 1) & PAGE_MASK;
239
240 ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
241 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
242 LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
243 &resid, LDLM_EXTENT, &policy,
244 LCK_PR | LCK_PW, &lockh, 0);
245 if (mode) { /* lock is cached on client */
246 if (mode != LCK_PR) {
247 ldlm_lock_addref(&lockh, LCK_PR);
248 ldlm_lock_decref(&lockh, LCK_PW);
249 }
250 } else { /* no cached lock, needs acquire lock on server side */
251 fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
252 fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
253 }
254
255skip_locking:
256 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
257 &RQF_OST_GET_INFO_FIEMAP);
258 if (!req) {
259 rc = -ENOMEM;
260 goto drop_lock;
261 }
262
263 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
264 sizeof(*fmkey));
265 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
266 *buflen);
267 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
268 *buflen);
269
270 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
271 if (rc) {
272 ptlrpc_request_free(req);
273 goto drop_lock;
274 }
275 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
276 memcpy(tmp, fmkey, sizeof(*fmkey));
277 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
278 memcpy(tmp, fiemap, *buflen);
279 ptlrpc_request_set_replen(req);
280
281 rc = ptlrpc_queue_wait(req);
282 if (rc)
283 goto fini_req;
284
285 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
286 if (!reply) {
287 rc = -EPROTO;
288 goto fini_req;
289 }
290
291 memcpy(fiemap, reply, *buflen);
292fini_req:
293 ptlrpc_req_finished(req);
294drop_lock:
295 if (mode)
296 ldlm_lock_decref(&lockh, LCK_PR);
297 return rc;
298}
299
221void osc_object_set_contended(struct osc_object *obj) 300void osc_object_set_contended(struct osc_object *obj)
222{ 301{
223 obj->oo_contention_time = cfs_time_current(); 302 obj->oo_contention_time = cfs_time_current();
@@ -256,6 +335,76 @@ int osc_object_is_contended(struct osc_object *obj)
256 return 1; 335 return 1;
257} 336}
258 337
338/**
339 * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
340 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
341 * fields.
342 */
343static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
344 struct cl_req_attr *attr)
345{
346 u64 flags = attr->cra_flags;
347 struct lov_oinfo *oinfo;
348 struct ost_lvb *lvb;
349 struct obdo *oa;
350
351 oinfo = cl2osc(obj)->oo_oinfo;
352 lvb = &oinfo->loi_lvb;
353 oa = attr->cra_oa;
354
355 if (flags & OBD_MD_FLMTIME) {
356 oa->o_mtime = lvb->lvb_mtime;
357 oa->o_valid |= OBD_MD_FLMTIME;
358 }
359 if (flags & OBD_MD_FLATIME) {
360 oa->o_atime = lvb->lvb_atime;
361 oa->o_valid |= OBD_MD_FLATIME;
362 }
363 if (flags & OBD_MD_FLCTIME) {
364 oa->o_ctime = lvb->lvb_ctime;
365 oa->o_valid |= OBD_MD_FLCTIME;
366 }
367 if (flags & OBD_MD_FLGROUP) {
368 ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
369 oa->o_valid |= OBD_MD_FLGROUP;
370 }
371 if (flags & OBD_MD_FLID) {
372 ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
373 oa->o_valid |= OBD_MD_FLID;
374 }
375 if (flags & OBD_MD_FLHANDLE) {
376 struct ldlm_lock *lock;
377 struct osc_page *opg;
378
379 opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
380 lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
381 OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
382 if (!lock && !opg->ops_srvlock) {
383 struct ldlm_resource *res;
384 struct ldlm_res_id *resname;
385
386 CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
387 "uncovered page!\n");
388
389 resname = &osc_env_info(env)->oti_resname;
390 ostid_build_res_name(&oinfo->loi_oi, resname);
391 res = ldlm_resource_get(
392 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
393 NULL, resname, LDLM_EXTENT, 0);
394 ldlm_resource_dump(D_ERROR, res);
395
396 LBUG();
397 }
398
399 /* check for lockless io. */
400 if (lock) {
401 oa->o_handle = lock->l_remote_handle;
402 oa->o_valid |= OBD_MD_FLHANDLE;
403 LDLM_LOCK_PUT(lock);
404 }
405 }
406}
407
259static const struct cl_object_operations osc_ops = { 408static const struct cl_object_operations osc_ops = {
260 .coo_page_init = osc_page_init, 409 .coo_page_init = osc_page_init,
261 .coo_lock_init = osc_lock_init, 410 .coo_lock_init = osc_lock_init,
@@ -263,7 +412,9 @@ static const struct cl_object_operations osc_ops = {
263 .coo_attr_get = osc_attr_get, 412 .coo_attr_get = osc_attr_get,
264 .coo_attr_update = osc_attr_update, 413 .coo_attr_update = osc_attr_update,
265 .coo_glimpse = osc_object_glimpse, 414 .coo_glimpse = osc_object_glimpse,
266 .coo_prune = osc_object_prune 415 .coo_prune = osc_object_prune,
416 .coo_fiemap = osc_object_fiemap,
417 .coo_req_attr_set = osc_req_attr_set
267}; 418};
268 419
269static const struct lu_object_operations osc_lu_obj_ops = { 420static const struct lu_object_operations osc_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2a7a70aa9e80..e356e4af08e1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -37,6 +37,7 @@
37 37
38#define DEBUG_SUBSYSTEM S_OSC 38#define DEBUG_SUBSYSTEM S_OSC
39 39
40#include <linux/math64.h>
40#include "osc_cl_internal.h" 41#include "osc_cl_internal.h"
41 42
42static void osc_lru_del(struct client_obd *cli, struct osc_page *opg); 43static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
@@ -86,11 +87,6 @@ static void osc_page_transfer_add(const struct lu_env *env,
86 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); 87 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
87 88
88 osc_lru_use(osc_cli(obj), opg); 89 osc_lru_use(osc_cli(obj), opg);
89
90 spin_lock(&obj->oo_seatbelt);
91 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
92 opg->ops_submitter = current;
93 spin_unlock(&obj->oo_seatbelt);
94} 90}
95 91
96int osc_page_cache_add(const struct lu_env *env, 92int osc_page_cache_add(const struct lu_env *env,
@@ -109,7 +105,8 @@ int osc_page_cache_add(const struct lu_env *env,
109 return result; 105 return result;
110} 106}
111 107
112void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, 108void osc_index2policy(union ldlm_policy_data *policy,
109 const struct cl_object *obj,
113 pgoff_t start, pgoff_t end) 110 pgoff_t start, pgoff_t end)
114{ 111{
115 memset(policy, 0, sizeof(*policy)); 112 memset(policy, 0, sizeof(*policy));
@@ -117,25 +114,6 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
117 policy->l_extent.end = cl_offset(obj, end + 1) - 1; 114 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
118} 115}
119 116
120static int osc_page_is_under_lock(const struct lu_env *env,
121 const struct cl_page_slice *slice,
122 struct cl_io *unused, pgoff_t *max_index)
123{
124 struct osc_page *opg = cl2osc_page(slice);
125 struct ldlm_lock *dlmlock;
126 int result = -ENODATA;
127
128 dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
129 osc_index(opg), 1, 0);
130 if (dlmlock) {
131 *max_index = cl_index(slice->cpl_obj,
132 dlmlock->l_policy_data.l_extent.end);
133 LDLM_LOCK_PUT(dlmlock);
134 result = 0;
135 }
136 return result;
137}
138
139static const char *osc_list(struct list_head *head) 117static const char *osc_list(struct list_head *head)
140{ 118{
141 return list_empty(head) ? "-" : "+"; 119 return list_empty(head) ? "-" : "+";
@@ -158,7 +136,7 @@ static int osc_page_print(const struct lu_env *env,
158 struct osc_object *obj = cl2osc(slice->cpl_obj); 136 struct osc_object *obj = cl2osc(slice->cpl_obj);
159 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli; 137 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
160 138
161 return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", 139 return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
162 opg, osc_index(opg), 140 opg, osc_index(opg),
163 /* 1 */ 141 /* 1 */
164 oap->oap_magic, oap->oap_cmd, 142 oap->oap_magic, oap->oap_cmd,
@@ -170,8 +148,7 @@ static int osc_page_print(const struct lu_env *env,
170 oap->oap_async_flags, oap->oap_brw_flags, 148 oap->oap_async_flags, oap->oap_brw_flags,
171 oap->oap_request, oap->oap_cli, obj, 149 oap->oap_request, oap->oap_cli, obj,
172 /* 3 */ 150 /* 3 */
173 osc_list(&opg->ops_inflight), 151 opg->ops_transfer_pinned,
174 opg->ops_submitter, opg->ops_transfer_pinned,
175 osc_submit_duration(opg), opg->ops_srvlock, 152 osc_submit_duration(opg), opg->ops_srvlock,
176 /* 4 */ 153 /* 4 */
177 cli->cl_r_in_flight, cli->cl_w_in_flight, 154 cli->cl_r_in_flight, cli->cl_w_in_flight,
@@ -210,14 +187,6 @@ static void osc_page_delete(const struct lu_env *env,
210 LASSERT(0); 187 LASSERT(0);
211 } 188 }
212 189
213 spin_lock(&obj->oo_seatbelt);
214 if (opg->ops_submitter) {
215 LASSERT(!list_empty(&opg->ops_inflight));
216 list_del_init(&opg->ops_inflight);
217 opg->ops_submitter = NULL;
218 }
219 spin_unlock(&obj->oo_seatbelt);
220
221 osc_lru_del(osc_cli(obj), opg); 190 osc_lru_del(osc_cli(obj), opg);
222 191
223 if (slice->cpl_page->cp_type == CPT_CACHEABLE) { 192 if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
@@ -276,7 +245,6 @@ static int osc_page_flush(const struct lu_env *env,
276static const struct cl_page_operations osc_page_ops = { 245static const struct cl_page_operations osc_page_ops = {
277 .cpo_print = osc_page_print, 246 .cpo_print = osc_page_print,
278 .cpo_delete = osc_page_delete, 247 .cpo_delete = osc_page_delete,
279 .cpo_is_under_lock = osc_page_is_under_lock,
280 .cpo_clip = osc_page_clip, 248 .cpo_clip = osc_page_clip,
281 .cpo_cancel = osc_page_cancel, 249 .cpo_cancel = osc_page_cancel,
282 .cpo_flush = osc_page_flush 250 .cpo_flush = osc_page_flush
@@ -301,10 +269,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
301 cl_page_slice_add(page, &opg->ops_cl, obj, index, 269 cl_page_slice_add(page, &opg->ops_cl, obj, index,
302 &osc_page_ops); 270 &osc_page_ops);
303 } 271 }
304 /* ops_inflight and ops_lru are the same field, but it doesn't
305 * hurt to initialize it twice :-)
306 */
307 INIT_LIST_HEAD(&opg->ops_inflight);
308 INIT_LIST_HEAD(&opg->ops_lru); 272 INIT_LIST_HEAD(&opg->ops_lru);
309 273
310 /* reserve an LRU space for this page */ 274 /* reserve an LRU space for this page */
@@ -362,16 +326,27 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
362 * OSC to free slots voluntarily to maintain a reasonable number of free slots 326 * OSC to free slots voluntarily to maintain a reasonable number of free slots
363 * at any time. 327 * at any time.
364 */ 328 */
365
366static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); 329static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
367/* LRU pages are freed in batch mode. OSC should at least free this 330
368 * number of pages to avoid running out of LRU budget, and.. 331/**
332 * LRU pages are freed in batch mode. OSC should at least free this
333 * number of pages to avoid running out of LRU slots.
334 */
335static inline int lru_shrink_min(struct client_obd *cli)
336{
337 return cli->cl_max_pages_per_rpc * 2;
338}
339
340/**
341 * free this number at most otherwise it will take too long time to finish.
369 */ 342 */
370static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ 343static inline int lru_shrink_max(struct client_obd *cli)
371/* free this number at most otherwise it will take too long time to finish. */ 344{
372static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */ 345 return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
346}
373 347
374/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 348/**
349 * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
375 * we should free slots aggressively. In this way, slots are freed in a steady 350 * we should free slots aggressively. In this way, slots are freed in a steady
376 * step to maintain fairness among OSCs. 351 * step to maintain fairness among OSCs.
377 * 352 *
@@ -388,13 +363,20 @@ static int osc_cache_too_much(struct client_obd *cli)
388 /* if it's going to run out LRU slots, we should free some, but not 363 /* if it's going to run out LRU slots, we should free some, but not
389 * too much to maintain fairness among OSCs. 364 * too much to maintain fairness among OSCs.
390 */ 365 */
391 if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { 366 if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
392 if (pages >= budget) 367 if (pages >= budget)
393 return lru_shrink_max; 368 return lru_shrink_max(cli);
394 else if (pages >= budget / 2) 369 else if (pages >= budget / 2)
395 return lru_shrink_min; 370 return lru_shrink_min(cli);
396 } else if (pages >= budget * 2) { 371 } else {
397 return lru_shrink_min; 372 time64_t duration = ktime_get_real_seconds();
373
374 /* knock out pages by duration of no IO activity */
375 duration -= cli->cl_lru_last_used;
376 duration >>= 6; /* approximately 1 minute */
377 if (duration > 0 &&
378 pages >= div64_s64((s64)budget, duration))
379 return lru_shrink_min(cli);
398 } 380 }
399 return 0; 381 return 0;
400} 382}
@@ -402,11 +384,21 @@ static int osc_cache_too_much(struct client_obd *cli)
402int lru_queue_work(const struct lu_env *env, void *data) 384int lru_queue_work(const struct lu_env *env, void *data)
403{ 385{
404 struct client_obd *cli = data; 386 struct client_obd *cli = data;
387 int count;
405 388
406 CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli); 389 CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
407 390
408 if (osc_cache_too_much(cli)) 391 count = osc_cache_too_much(cli);
409 osc_lru_shrink(env, cli, lru_shrink_max, true); 392 if (count > 0) {
393 int rc = osc_lru_shrink(env, cli, count, false);
394
395 CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
396 cli_name(cli), rc, count);
397 if (rc >= count) {
398 CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
399 ptlrpcd_queue_work(cli->cl_lru_work);
400 }
401 }
410 402
411 return 0; 403 return 0;
412} 404}
@@ -433,10 +425,10 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
433 list_splice_tail(&lru, &cli->cl_lru_list); 425 list_splice_tail(&lru, &cli->cl_lru_list);
434 atomic_long_sub(npages, &cli->cl_lru_busy); 426 atomic_long_sub(npages, &cli->cl_lru_busy);
435 atomic_long_add(npages, &cli->cl_lru_in_list); 427 atomic_long_add(npages, &cli->cl_lru_in_list);
428 cli->cl_lru_last_used = ktime_get_real_seconds();
436 spin_unlock(&cli->cl_lru_list_lock); 429 spin_unlock(&cli->cl_lru_list_lock);
437 430
438 /* XXX: May set force to be true for better performance */ 431 if (waitqueue_active(&osc_lru_waitq))
439 if (osc_cache_too_much(cli))
440 (void)ptlrpcd_queue_work(cli->cl_lru_work); 432 (void)ptlrpcd_queue_work(cli->cl_lru_work);
441 } 433 }
442} 434}
@@ -469,8 +461,10 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
469 * this osc occupies too many LRU pages and kernel is 461 * this osc occupies too many LRU pages and kernel is
470 * stealing one of them. 462 * stealing one of them.
471 */ 463 */
472 if (!memory_pressure_get()) 464 if (osc_cache_too_much(cli)) {
465 CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
473 (void)ptlrpcd_queue_work(cli->cl_lru_work); 466 (void)ptlrpcd_queue_work(cli->cl_lru_work);
467 }
474 wake_up(&osc_lru_waitq); 468 wake_up(&osc_lru_waitq);
475 } else { 469 } else {
476 LASSERT(list_empty(&opg->ops_lru)); 470 LASSERT(list_empty(&opg->ops_lru));
@@ -502,6 +496,7 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
502 struct cl_page *page = pvec[i]; 496 struct cl_page *page = pvec[i];
503 497
504 LASSERT(cl_page_is_owned(page, io)); 498 LASSERT(cl_page_is_owned(page, io));
499 cl_page_delete(env, page);
505 cl_page_discard(env, io, page); 500 cl_page_discard(env, io, page);
506 cl_page_disown(env, io, page); 501 cl_page_disown(env, io, page);
507 cl_page_put(env, page); 502 cl_page_put(env, page);
@@ -542,7 +537,6 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
542 struct cl_object *clobj = NULL; 537 struct cl_object *clobj = NULL;
543 struct cl_page **pvec; 538 struct cl_page **pvec;
544 struct osc_page *opg; 539 struct osc_page *opg;
545 struct osc_page *temp;
546 int maxscan = 0; 540 int maxscan = 0;
547 long count = 0; 541 long count = 0;
548 int index = 0; 542 int index = 0;
@@ -552,6 +546,8 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
552 if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0) 546 if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
553 return 0; 547 return 0;
554 548
549 CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
550 cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
555 if (!force) { 551 if (!force) {
556 if (atomic_read(&cli->cl_lru_shrinkers) > 0) 552 if (atomic_read(&cli->cl_lru_shrinkers) > 0)
557 return -EBUSY; 553 return -EBUSY;
@@ -568,14 +564,21 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
568 io = &osc_env_info(env)->oti_io; 564 io = &osc_env_info(env)->oti_io;
569 565
570 spin_lock(&cli->cl_lru_list_lock); 566 spin_lock(&cli->cl_lru_list_lock);
567 if (force)
568 cli->cl_lru_reclaim++;
571 maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list)); 569 maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
572 list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { 570 while (!list_empty(&cli->cl_lru_list)) {
573 struct cl_page *page; 571 struct cl_page *page;
574 bool will_free = false; 572 bool will_free = false;
575 573
574 if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
575 break;
576
576 if (--maxscan < 0) 577 if (--maxscan < 0)
577 break; 578 break;
578 579
580 opg = list_entry(cli->cl_lru_list.next, struct osc_page,
581 ops_lru);
579 page = opg->ops_cl.cpl_page; 582 page = opg->ops_cl.cpl_page;
580 if (lru_page_busy(cli, page)) { 583 if (lru_page_busy(cli, page)) {
581 list_move_tail(&opg->ops_lru, &cli->cl_lru_list); 584 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
@@ -662,34 +665,43 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
662 return count > 0 ? count : rc; 665 return count > 0 ? count : rc;
663} 666}
664 667
665long osc_lru_reclaim(struct client_obd *cli) 668/**
669 * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
670 * \@npages of LRU slots. For performance consideration, it's better to drop
671 * LRU pages in batch. Therefore, the actual number is adjusted at least
672 * max_pages_per_rpc.
673 */
674long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
666{ 675{
667 struct cl_env_nest nest;
668 struct lu_env *env; 676 struct lu_env *env;
669 struct cl_client_cache *cache = cli->cl_cache; 677 struct cl_client_cache *cache = cli->cl_cache;
670 int max_scans; 678 int max_scans;
679 int refcheck;
671 long rc = 0; 680 long rc = 0;
672 681
673 LASSERT(cache); 682 LASSERT(cache);
674 683
675 env = cl_env_nested_get(&nest); 684 env = cl_env_get(&refcheck);
676 if (IS_ERR(env)) 685 if (IS_ERR(env))
677 return 0; 686 return 0;
678 687
679 rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false); 688 npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
680 if (rc != 0) { 689 CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
681 if (rc == -EBUSY) 690 cli_name(cli), npages);
682 rc = 0; 691 rc = osc_lru_shrink(env, cli, npages, true);
683 692 if (rc >= npages) {
684 CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n", 693 CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
685 cli->cl_import->imp_obd->obd_name, rc, cli); 694 cli_name(cli), rc, npages);
695 if (osc_cache_too_much(cli) > 0)
696 ptlrpcd_queue_work(cli->cl_lru_work);
686 goto out; 697 goto out;
698 } else if (rc > 0) {
699 npages -= rc;
687 } 700 }
688 701
689 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n", 702 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
690 cli->cl_import->imp_obd->obd_name, cli, 703 cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
691 atomic_long_read(&cli->cl_lru_in_list), 704 atomic_long_read(&cli->cl_lru_busy), npages);
692 atomic_long_read(&cli->cl_lru_busy));
693 705
694 /* Reclaim LRU slots from other client_obd as it can't free enough 706 /* Reclaim LRU slots from other client_obd as it can't free enough
695 * from its own. This should rarely happen. 707 * from its own. This should rarely happen.
@@ -706,7 +718,7 @@ long osc_lru_reclaim(struct client_obd *cli)
706 cl_lru_osc); 718 cl_lru_osc);
707 719
708 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n", 720 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
709 cli->cl_import->imp_obd->obd_name, cli, 721 cli_name(cli), cli,
710 atomic_long_read(&cli->cl_lru_in_list), 722 atomic_long_read(&cli->cl_lru_in_list),
711 atomic_long_read(&cli->cl_lru_busy)); 723 atomic_long_read(&cli->cl_lru_busy));
712 724
@@ -714,19 +726,20 @@ long osc_lru_reclaim(struct client_obd *cli)
714 if (osc_cache_too_much(cli) > 0) { 726 if (osc_cache_too_much(cli) > 0) {
715 spin_unlock(&cache->ccc_lru_lock); 727 spin_unlock(&cache->ccc_lru_lock);
716 728
717 rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), 729 rc = osc_lru_shrink(env, cli, npages, true);
718 true);
719 spin_lock(&cache->ccc_lru_lock); 730 spin_lock(&cache->ccc_lru_lock);
720 if (rc != 0) 731 if (rc >= npages)
721 break; 732 break;
733 if (rc > 0)
734 npages -= rc;
722 } 735 }
723 } 736 }
724 spin_unlock(&cache->ccc_lru_lock); 737 spin_unlock(&cache->ccc_lru_lock);
725 738
726out: 739out:
727 cl_env_nested_put(&nest, env); 740 cl_env_put(env, &refcheck);
728 CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n", 741 CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
729 cli->cl_import->imp_obd->obd_name, cli, rc); 742 cli_name(cli), cli, rc);
730 return rc; 743 return rc;
731} 744}
732 745
@@ -756,7 +769,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
756 LASSERT(atomic_long_read(cli->cl_lru_left) >= 0); 769 LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
757 while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) { 770 while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
758 /* run out of LRU spaces, try to drop some by itself */ 771 /* run out of LRU spaces, try to drop some by itself */
759 rc = osc_lru_reclaim(cli); 772 rc = osc_lru_reclaim(cli, 1);
760 if (rc < 0) 773 if (rc < 0)
761 break; 774 break;
762 if (rc > 0) 775 if (rc > 0)
@@ -796,8 +809,10 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
796 int count = 0; 809 int count = 0;
797 int i; 810 int i;
798 811
812 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
813
799 for (i = 0; i < page_count; i++) { 814 for (i = 0; i < page_count; i++) {
800 pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page); 815 pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page);
801 816
802 if (likely(pgdat == last)) { 817 if (likely(pgdat == last)) {
803 ++count; 818 ++count;
@@ -857,7 +872,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
857 if (!unstable_count) 872 if (!unstable_count)
858 wake_up_all(&cli->cl_cache->ccc_unstable_waitq); 873 wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
859 874
860 if (osc_cache_too_much(cli)) 875 if (waitqueue_active(&osc_lru_waitq))
861 (void)ptlrpcd_queue_work(cli->cl_lru_work); 876 (void)ptlrpcd_queue_work(cli->cl_lru_work);
862} 877}
863 878
@@ -913,8 +928,7 @@ bool osc_over_unstable_soft_limit(struct client_obd *cli)
913 928
914 CDEBUG(D_CACHE, 929 CDEBUG(D_CACHE,
915 "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n", 930 "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
916 cli->cl_import->imp_obd->obd_name, cli, 931 cli_name(cli), cli, unstable_nr, osc_unstable_count);
917 unstable_nr, osc_unstable_count);
918 932
919 /* 933 /*
920 * If the LRU slots are in shortage - 25% remaining AND this OSC 934 * If the LRU slots are in shortage - 25% remaining AND this OSC
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 194d8ede40a2..fed4da63ee45 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -106,7 +106,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
106 } 106 }
107 107
108 CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n", 108 CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
109 cli->cl_import->imp_obd->obd_name, 109 cli_name(cli),
110 type == USRQUOTA ? "user" : "group", 110 type == USRQUOTA ? "user" : "group",
111 qid[type], rc); 111 qid[type], rc);
112 } else { 112 } else {
@@ -122,7 +122,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
122 kmem_cache_free(osc_quota_kmem, oqi); 122 kmem_cache_free(osc_quota_kmem, oqi);
123 123
124 CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n", 124 CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
125 cli->cl_import->imp_obd->obd_name, 125 cli_name(cli),
126 type == USRQUOTA ? "user" : "group", 126 type == USRQUOTA ? "user" : "group",
127 qid[type], oqi); 127 qid[type], oqi);
128 } 128 }
@@ -134,8 +134,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
134/* 134/*
135 * Hash operations for uid/gid <-> osc_quota_info 135 * Hash operations for uid/gid <-> osc_quota_info
136 */ 136 */
137static unsigned 137static unsigned int
138oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) 138oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
139{ 139{
140 return cfs_hash_u32_hash(*((__u32 *)key), mask); 140 return cfs_hash_u32_hash(*((__u32 *)key), mask);
141} 141}
@@ -281,47 +281,3 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
281 281
282 return rc; 282 return rc;
283} 283}
284
285int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
286 struct obd_quotactl *oqctl)
287{
288 struct client_obd *cli = &exp->exp_obd->u.cli;
289 struct ptlrpc_request *req;
290 struct obd_quotactl *body;
291 int rc;
292
293 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
294 &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
295 OST_QUOTACHECK);
296 if (!req)
297 return -ENOMEM;
298
299 body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
300 *body = *oqctl;
301
302 ptlrpc_request_set_replen(req);
303
304 /* the next poll will find -ENODATA, that means quotacheck is going on
305 */
306 cli->cl_qchk_stat = -ENODATA;
307 rc = ptlrpc_queue_wait(req);
308 if (rc)
309 cli->cl_qchk_stat = rc;
310 ptlrpc_req_finished(req);
311 return rc;
312}
313
314int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
315{
316 struct client_obd *cli = &exp->exp_obd->u.cli;
317 int rc;
318
319 qchk->obd_uuid = cli->cl_target_uuid;
320 memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
321
322 rc = cli->cl_qchk_stat;
323 /* the client is not the previous one */
324 if (rc == CL_NOT_QUOTACHECKED)
325 rc = -EINTR;
326 return rc;
327}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 749781f022e2..7143564ae7e7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -68,7 +68,6 @@ struct osc_brw_async_args {
68 struct client_obd *aa_cli; 68 struct client_obd *aa_cli;
69 struct list_head aa_oaps; 69 struct list_head aa_oaps;
70 struct list_head aa_exts; 70 struct list_head aa_exts;
71 struct cl_req *aa_clerq;
72}; 71};
73 72
74struct osc_async_args { 73struct osc_async_args {
@@ -82,7 +81,8 @@ struct osc_setattr_args {
82}; 81};
83 82
84struct osc_fsync_args { 83struct osc_fsync_args {
85 struct obd_info *fa_oi; 84 struct osc_object *fa_obj;
85 struct obdo *fa_oa;
86 obd_enqueue_update_f fa_upcall; 86 obd_enqueue_update_f fa_upcall;
87 void *fa_cookie; 87 void *fa_cookie;
88}; 88};
@@ -103,140 +103,19 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count);
103static int brw_interpret(const struct lu_env *env, 103static int brw_interpret(const struct lu_env *env,
104 struct ptlrpc_request *req, void *data, int rc); 104 struct ptlrpc_request *req, void *data, int rc);
105 105
106/* Unpack OSC object metadata from disk storage (LE byte order). */
107static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
108 struct lov_mds_md *lmm, int lmm_bytes)
109{
110 int lsm_size;
111 struct obd_import *imp = class_exp2cliimp(exp);
112
113 if (lmm) {
114 if (lmm_bytes < sizeof(*lmm)) {
115 CERROR("%s: lov_mds_md too small: %d, need %d\n",
116 exp->exp_obd->obd_name, lmm_bytes,
117 (int)sizeof(*lmm));
118 return -EINVAL;
119 }
120 /* XXX LOV_MAGIC etc check? */
121
122 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
123 CERROR("%s: zero lmm_object_id: rc = %d\n",
124 exp->exp_obd->obd_name, -EINVAL);
125 return -EINVAL;
126 }
127 }
128
129 lsm_size = lov_stripe_md_size(1);
130 if (!lsmp)
131 return lsm_size;
132
133 if (*lsmp && !lmm) {
134 kfree((*lsmp)->lsm_oinfo[0]);
135 kfree(*lsmp);
136 *lsmp = NULL;
137 return 0;
138 }
139
140 if (!*lsmp) {
141 *lsmp = kzalloc(lsm_size, GFP_NOFS);
142 if (unlikely(!*lsmp))
143 return -ENOMEM;
144 (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
145 GFP_NOFS);
146 if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
147 kfree(*lsmp);
148 return -ENOMEM;
149 }
150 loi_init((*lsmp)->lsm_oinfo[0]);
151 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
152 return -EBADF;
153 }
154
155 if (lmm)
156 /* XXX zero *lsmp? */
157 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
158
159 if (imp &&
160 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
161 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
162 else
163 (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
164
165 return lsm_size;
166}
167
168static inline void osc_pack_req_body(struct ptlrpc_request *req, 106static inline void osc_pack_req_body(struct ptlrpc_request *req,
169 struct obd_info *oinfo) 107 struct obdo *oa)
170{ 108{
171 struct ost_body *body; 109 struct ost_body *body;
172 110
173 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); 111 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
174 LASSERT(body); 112 LASSERT(body);
175 113
176 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, 114 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
177 oinfo->oi_oa);
178}
179
180static int osc_getattr_interpret(const struct lu_env *env,
181 struct ptlrpc_request *req,
182 struct osc_async_args *aa, int rc)
183{
184 struct ost_body *body;
185
186 if (rc != 0)
187 goto out;
188
189 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
190 if (body) {
191 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
192 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
193 aa->aa_oi->oi_oa, &body->oa);
194
195 /* This should really be sent by the OST */
196 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
197 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
198 } else {
199 CDEBUG(D_INFO, "can't unpack ost_body\n");
200 rc = -EPROTO;
201 aa->aa_oi->oi_oa->o_valid = 0;
202 }
203out:
204 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
205 return rc;
206}
207
208static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
209 struct ptlrpc_request_set *set)
210{
211 struct ptlrpc_request *req;
212 struct osc_async_args *aa;
213 int rc;
214
215 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
216 if (!req)
217 return -ENOMEM;
218
219 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
220 if (rc) {
221 ptlrpc_request_free(req);
222 return rc;
223 }
224
225 osc_pack_req_body(req, oinfo);
226
227 ptlrpc_request_set_replen(req);
228 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
229
230 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
231 aa = ptlrpc_req_async_args(req);
232 aa->aa_oi = oinfo;
233
234 ptlrpc_set_add_req(set, req);
235 return 0;
236} 115}
237 116
238static int osc_getattr(const struct lu_env *env, struct obd_export *exp, 117static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
239 struct obd_info *oinfo) 118 struct obdo *oa)
240{ 119{
241 struct ptlrpc_request *req; 120 struct ptlrpc_request *req;
242 struct ost_body *body; 121 struct ost_body *body;
@@ -252,7 +131,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
252 return rc; 131 return rc;
253 } 132 }
254 133
255 osc_pack_req_body(req, oinfo); 134 osc_pack_req_body(req, oa);
256 135
257 ptlrpc_request_set_replen(req); 136 ptlrpc_request_set_replen(req);
258 137
@@ -267,11 +146,11 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
267 } 146 }
268 147
269 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); 148 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
270 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, 149 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
271 &body->oa); 150 &body->oa);
272 151
273 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd); 152 oa->o_blksize = cli_brw_size(exp->exp_obd);
274 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ; 153 oa->o_valid |= OBD_MD_FLBLKSZ;
275 154
276 out: 155 out:
277 ptlrpc_req_finished(req); 156 ptlrpc_req_finished(req);
@@ -279,13 +158,13 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
279} 158}
280 159
281static int osc_setattr(const struct lu_env *env, struct obd_export *exp, 160static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
282 struct obd_info *oinfo, struct obd_trans_info *oti) 161 struct obdo *oa)
283{ 162{
284 struct ptlrpc_request *req; 163 struct ptlrpc_request *req;
285 struct ost_body *body; 164 struct ost_body *body;
286 int rc; 165 int rc;
287 166
288 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); 167 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
289 168
290 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); 169 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
291 if (!req) 170 if (!req)
@@ -297,7 +176,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
297 return rc; 176 return rc;
298 } 177 }
299 178
300 osc_pack_req_body(req, oinfo); 179 osc_pack_req_body(req, oa);
301 180
302 ptlrpc_request_set_replen(req); 181 ptlrpc_request_set_replen(req);
303 182
@@ -311,7 +190,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
311 goto out; 190 goto out;
312 } 191 }
313 192
314 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, 193 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
315 &body->oa); 194 &body->oa);
316 195
317out: 196out:
@@ -341,10 +220,9 @@ out:
341 return rc; 220 return rc;
342} 221}
343 222
344int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, 223int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
345 struct obd_trans_info *oti, 224 obd_enqueue_update_f upcall, void *cookie,
346 obd_enqueue_update_f upcall, void *cookie, 225 struct ptlrpc_request_set *rqset)
347 struct ptlrpc_request_set *rqset)
348{ 226{
349 struct ptlrpc_request *req; 227 struct ptlrpc_request *req;
350 struct osc_setattr_args *sa; 228 struct osc_setattr_args *sa;
@@ -360,10 +238,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
360 return rc; 238 return rc;
361 } 239 }
362 240
363 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) 241 osc_pack_req_body(req, oa);
364 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
365
366 osc_pack_req_body(req, oinfo);
367 242
368 ptlrpc_request_set_replen(req); 243 ptlrpc_request_set_replen(req);
369 244
@@ -377,7 +252,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
377 252
378 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); 253 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
379 sa = ptlrpc_req_async_args(req); 254 sa = ptlrpc_req_async_args(req);
380 sa->sa_oa = oinfo->oi_oa; 255 sa->sa_oa = oa;
381 sa->sa_upcall = upcall; 256 sa->sa_upcall = upcall;
382 sa->sa_cookie = cookie; 257 sa->sa_cookie = cookie;
383 258
@@ -390,16 +265,8 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
390 return 0; 265 return 0;
391} 266}
392 267
393static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
394 struct obd_trans_info *oti,
395 struct ptlrpc_request_set *rqset)
396{
397 return osc_setattr_async_base(exp, oinfo, oti,
398 oinfo->oi_cb_up, oinfo, rqset);
399}
400
401static int osc_create(const struct lu_env *env, struct obd_export *exp, 268static int osc_create(const struct lu_env *env, struct obd_export *exp,
402 struct obdo *oa, struct obd_trans_info *oti) 269 struct obdo *oa)
403{ 270{
404 struct ptlrpc_request *req; 271 struct ptlrpc_request *req;
405 struct ost_body *body; 272 struct ost_body *body;
@@ -428,15 +295,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
428 295
429 ptlrpc_request_set_replen(req); 296 ptlrpc_request_set_replen(req);
430 297
431 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
432 oa->o_flags == OBD_FL_DELORPHAN) {
433 DEBUG_REQ(D_HA, req,
434 "delorphan from OST integration");
435 /* Don't resend the delorphan req */
436 req->rq_no_resend = 1;
437 req->rq_no_delay = 1;
438 }
439
440 rc = ptlrpc_queue_wait(req); 298 rc = ptlrpc_queue_wait(req);
441 if (rc) 299 if (rc)
442 goto out_req; 300 goto out_req;
@@ -453,12 +311,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
453 oa->o_blksize = cli_brw_size(exp->exp_obd); 311 oa->o_blksize = cli_brw_size(exp->exp_obd);
454 oa->o_valid |= OBD_MD_FLBLKSZ; 312 oa->o_valid |= OBD_MD_FLBLKSZ;
455 313
456 if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
457 if (!oti->oti_logcookies)
458 oti->oti_logcookies = &oti->oti_onecookie;
459 *oti->oti_logcookies = oa->o_lcookie;
460 }
461
462 CDEBUG(D_HA, "transno: %lld\n", 314 CDEBUG(D_HA, "transno: %lld\n",
463 lustre_msg_get_transno(req->rq_repmsg)); 315 lustre_msg_get_transno(req->rq_repmsg));
464out_req: 316out_req:
@@ -467,7 +319,7 @@ out:
467 return rc; 319 return rc;
468} 320}
469 321
470int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, 322int osc_punch_base(struct obd_export *exp, struct obdo *oa,
471 obd_enqueue_update_f upcall, void *cookie, 323 obd_enqueue_update_f upcall, void *cookie,
472 struct ptlrpc_request_set *rqset) 324 struct ptlrpc_request_set *rqset)
473{ 325{
@@ -491,14 +343,14 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
491 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); 343 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
492 LASSERT(body); 344 LASSERT(body);
493 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, 345 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
494 oinfo->oi_oa); 346 oa);
495 347
496 ptlrpc_request_set_replen(req); 348 ptlrpc_request_set_replen(req);
497 349
498 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; 350 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
499 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); 351 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
500 sa = ptlrpc_req_async_args(req); 352 sa = ptlrpc_req_async_args(req);
501 sa->sa_oa = oinfo->oi_oa; 353 sa->sa_oa = oa;
502 sa->sa_upcall = upcall; 354 sa->sa_upcall = upcall;
503 sa->sa_cookie = cookie; 355 sa->sa_cookie = cookie;
504 if (rqset == PTLRPCD_SET) 356 if (rqset == PTLRPCD_SET)
@@ -513,8 +365,11 @@ static int osc_sync_interpret(const struct lu_env *env,
513 struct ptlrpc_request *req, 365 struct ptlrpc_request *req,
514 void *arg, int rc) 366 void *arg, int rc)
515{ 367{
368 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
516 struct osc_fsync_args *fa = arg; 369 struct osc_fsync_args *fa = arg;
370 unsigned long valid = 0;
517 struct ost_body *body; 371 struct ost_body *body;
372 struct cl_object *obj;
518 373
519 if (rc) 374 if (rc)
520 goto out; 375 goto out;
@@ -526,16 +381,30 @@ static int osc_sync_interpret(const struct lu_env *env,
526 goto out; 381 goto out;
527 } 382 }
528 383
529 *fa->fa_oi->oi_oa = body->oa; 384 *fa->fa_oa = body->oa;
385 obj = osc2cl(fa->fa_obj);
386
387 /* Update osc object's blocks attribute */
388 cl_object_attr_lock(obj);
389 if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
390 attr->cat_blocks = body->oa.o_blocks;
391 valid |= CAT_BLOCKS;
392 }
393
394 if (valid)
395 cl_object_attr_update(env, obj, attr, valid);
396 cl_object_attr_unlock(obj);
397
530out: 398out:
531 rc = fa->fa_upcall(fa->fa_cookie, rc); 399 rc = fa->fa_upcall(fa->fa_cookie, rc);
532 return rc; 400 return rc;
533} 401}
534 402
535int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, 403int osc_sync_base(struct osc_object *obj, struct obdo *oa,
536 obd_enqueue_update_f upcall, void *cookie, 404 obd_enqueue_update_f upcall, void *cookie,
537 struct ptlrpc_request_set *rqset) 405 struct ptlrpc_request_set *rqset)
538{ 406{
407 struct obd_export *exp = osc_export(obj);
539 struct ptlrpc_request *req; 408 struct ptlrpc_request *req;
540 struct ost_body *body; 409 struct ost_body *body;
541 struct osc_fsync_args *fa; 410 struct osc_fsync_args *fa;
@@ -555,14 +424,15 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
555 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); 424 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
556 LASSERT(body); 425 LASSERT(body);
557 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, 426 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
558 oinfo->oi_oa); 427 oa);
559 428
560 ptlrpc_request_set_replen(req); 429 ptlrpc_request_set_replen(req);
561 req->rq_interpret_reply = osc_sync_interpret; 430 req->rq_interpret_reply = osc_sync_interpret;
562 431
563 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args)); 432 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
564 fa = ptlrpc_req_async_args(req); 433 fa = ptlrpc_req_async_args(req);
565 fa->fa_oi = oinfo; 434 fa->fa_obj = obj;
435 fa->fa_oa = oa;
566 fa->fa_upcall = upcall; 436 fa->fa_upcall = upcall;
567 fa->fa_cookie = cookie; 437 fa->fa_cookie = cookie;
568 438
@@ -639,19 +509,8 @@ static int osc_can_send_destroy(struct client_obd *cli)
639 return 0; 509 return 0;
640} 510}
641 511
642/* Destroy requests can be async always on the client, and we don't even really
643 * care about the return code since the client cannot do anything at all about
644 * a destroy failure.
645 * When the MDS is unlinking a filename, it saves the file objects into a
646 * recovery llog, and these object records are cancelled when the OST reports
647 * they were destroyed and sync'd to disk (i.e. transaction committed).
648 * If the client dies, or the OST is down when the object should be destroyed,
649 * the records are not cancelled, and when the OST reconnects to the MDS next,
650 * it will retrieve the llog unlink logs and then sends the log cancellation
651 * cookies to the MDS after committing destroy transactions.
652 */
653static int osc_destroy(const struct lu_env *env, struct obd_export *exp, 512static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
654 struct obdo *oa, struct obd_trans_info *oti) 513 struct obdo *oa)
655{ 514{
656 struct client_obd *cli = &exp->exp_obd->u.cli; 515 struct client_obd *cli = &exp->exp_obd->u.cli;
657 struct ptlrpc_request *req; 516 struct ptlrpc_request *req;
@@ -683,32 +542,22 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
683 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ 542 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
684 ptlrpc_at_set_req_timeout(req); 543 ptlrpc_at_set_req_timeout(req);
685 544
686 if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
687 oa->o_lcookie = *oti->oti_logcookies;
688 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); 545 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
689 LASSERT(body); 546 LASSERT(body);
690 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); 547 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
691 548
692 ptlrpc_request_set_replen(req); 549 ptlrpc_request_set_replen(req);
693 550
694 /* If osc_destroy is for destroying the unlink orphan, 551 req->rq_interpret_reply = osc_destroy_interpret;
695 * sent from MDT to OST, which should not be blocked here, 552 if (!osc_can_send_destroy(cli)) {
696 * because the process might be triggered by ptlrpcd, and 553 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
697 * it is not good to block ptlrpcd thread (b=16006 554
698 **/ 555 /*
699 if (!(oa->o_flags & OBD_FL_DELORPHAN)) { 556 * Wait until the number of on-going destroy RPCs drops
700 req->rq_interpret_reply = osc_destroy_interpret; 557 * under max_rpc_in_flight
701 if (!osc_can_send_destroy(cli)) { 558 */
702 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, 559 l_wait_event_exclusive(cli->cl_destroy_waitq,
703 NULL); 560 osc_can_send_destroy(cli), &lwi);
704
705 /*
706 * Wait until the number of on-going destroy RPCs drops
707 * under max_rpc_in_flight
708 */
709 l_wait_event_exclusive(cli->cl_destroy_waitq,
710 osc_can_send_destroy(cli), &lwi);
711 }
712 } 561 }
713 562
714 /* Do not wait for response */ 563 /* Do not wait for response */
@@ -734,14 +583,13 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
734 oa->o_undirty = 0; 583 oa->o_undirty = 0;
735 } else if (unlikely(atomic_long_read(&obd_dirty_pages) - 584 } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
736 atomic_long_read(&obd_dirty_transit_pages) > 585 atomic_long_read(&obd_dirty_transit_pages) >
737 (obd_max_dirty_pages + 1))) { 586 (long)(obd_max_dirty_pages + 1))) {
738 /* The atomic_read() allowing the atomic_inc() are 587 /* The atomic_read() allowing the atomic_inc() are
739 * not covered by a lock thus they may safely race and trip 588 * not covered by a lock thus they may safely race and trip
740 * this CERROR() unless we add in a small fudge factor (+1). 589 * this CERROR() unless we add in a small fudge factor (+1).
741 */ 590 */
742 CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n", 591 CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n",
743 cli->cl_import->imp_obd->obd_name, 592 cli_name(cli), atomic_long_read(&obd_dirty_pages),
744 atomic_long_read(&obd_dirty_pages),
745 atomic_long_read(&obd_dirty_transit_pages), 593 atomic_long_read(&obd_dirty_transit_pages),
746 obd_max_dirty_pages); 594 obd_max_dirty_pages);
747 oa->o_undirty = 0; 595 oa->o_undirty = 0;
@@ -936,12 +784,10 @@ static int osc_add_shrink_grant(struct client_obd *client)
936 osc_grant_shrink_grant_cb, NULL, 784 osc_grant_shrink_grant_cb, NULL,
937 &client->cl_grant_shrink_list); 785 &client->cl_grant_shrink_list);
938 if (rc) { 786 if (rc) {
939 CERROR("add grant client %s error %d\n", 787 CERROR("add grant client %s error %d\n", cli_name(client), rc);
940 client->cl_import->imp_obd->obd_name, rc);
941 return rc; 788 return rc;
942 } 789 }
943 CDEBUG(D_CACHE, "add grant client %s\n", 790 CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
944 client->cl_import->imp_obd->obd_name);
945 osc_update_next_shrink(client); 791 osc_update_next_shrink(client);
946 return 0; 792 return 0;
947} 793}
@@ -970,23 +816,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
970 cli->cl_avail_grant = ocd->ocd_grant - 816 cli->cl_avail_grant = ocd->ocd_grant -
971 (cli->cl_dirty_pages << PAGE_SHIFT); 817 (cli->cl_dirty_pages << PAGE_SHIFT);
972 818
973 if (cli->cl_avail_grant < 0) {
974 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
975 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
976 ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
977 /* workaround for servers which do not have the patch from
978 * LU-2679
979 */
980 cli->cl_avail_grant = ocd->ocd_grant;
981 }
982
983 /* determine the appropriate chunk size used by osc_extent. */ 819 /* determine the appropriate chunk size used by osc_extent. */
984 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); 820 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
985 spin_unlock(&cli->cl_loi_list_lock); 821 spin_unlock(&cli->cl_loi_list_lock);
986 822
987 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 823 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
988 cli->cl_import->imp_obd->obd_name, 824 cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant,
989 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits); 825 cli->cl_chunkbits);
990 826
991 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && 827 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
992 list_empty(&cli->cl_grant_shrink_list)) 828 list_empty(&cli->cl_grant_shrink_list))
@@ -1072,9 +908,9 @@ static int check_write_rcs(struct ptlrpc_request *req,
1072static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) 908static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1073{ 909{
1074 if (p1->flag != p2->flag) { 910 if (p1->flag != p2->flag) {
1075 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE | 911 unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1076 OBD_BRW_SYNC | OBD_BRW_ASYNC | 912 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1077 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC); 913 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1078 914
1079 /* warn if we try to combine flags that we don't know to be 915 /* warn if we try to combine flags that we don't know to be
1080 * safe to combine 916 * safe to combine
@@ -1097,7 +933,6 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
1097 int i = 0; 933 int i = 0;
1098 struct cfs_crypto_hash_desc *hdesc; 934 struct cfs_crypto_hash_desc *hdesc;
1099 unsigned int bufsize; 935 unsigned int bufsize;
1100 int err;
1101 unsigned char cfs_alg = cksum_obd2cfs(cksum_type); 936 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1102 937
1103 LASSERT(pg_count > 0); 938 LASSERT(pg_count > 0);
@@ -1139,7 +974,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
1139 } 974 }
1140 975
1141 bufsize = sizeof(cksum); 976 bufsize = sizeof(cksum);
1142 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize); 977 cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1143 978
1144 /* For sending we only compute the wrong checksum instead 979 /* For sending we only compute the wrong checksum instead
1145 * of corrupting the data so it is still correct on a redo 980 * of corrupting the data so it is still correct on a redo
@@ -1151,8 +986,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
1151} 986}
1152 987
1153static int osc_brw_prep_request(int cmd, struct client_obd *cli, 988static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1154 struct obdo *oa, 989 struct obdo *oa, u32 page_count,
1155 struct lov_stripe_md *lsm, u32 page_count,
1156 struct brw_page **pga, 990 struct brw_page **pga,
1157 struct ptlrpc_request **reqp, 991 struct ptlrpc_request **reqp,
1158 int reserve, 992 int reserve,
@@ -1210,8 +1044,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1210 1044
1211 desc = ptlrpc_prep_bulk_imp(req, page_count, 1045 desc = ptlrpc_prep_bulk_imp(req, page_count,
1212 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS, 1046 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1213 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, 1047 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1214 OST_BULK_PORTAL); 1048 PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL,
1049 &ptlrpc_bulk_kiov_pin_ops);
1215 1050
1216 if (!desc) { 1051 if (!desc) {
1217 rc = -ENOMEM; 1052 rc = -ENOMEM;
@@ -1259,7 +1094,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1259 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) == 1094 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1260 (pg->flag & OBD_BRW_SRVLOCK)); 1095 (pg->flag & OBD_BRW_SRVLOCK));
1261 1096
1262 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count); 1097 desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count);
1263 requested_nob += pg->count; 1098 requested_nob += pg->count;
1264 1099
1265 if (i > 0 && can_merge_pages(pg_prev, pg)) { 1100 if (i > 0 && can_merge_pages(pg_prev, pg)) {
@@ -1569,7 +1404,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
1569 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) == 1404 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1570 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ, 1405 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
1571 aa->aa_cli, aa->aa_oa, 1406 aa->aa_cli, aa->aa_oa,
1572 NULL /* lsm unused by osc currently */,
1573 aa->aa_page_count, aa->aa_ppga, 1407 aa->aa_page_count, aa->aa_ppga,
1574 &new_req, 0, 1); 1408 &new_req, 0, 1);
1575 if (rc) 1409 if (rc)
@@ -1764,8 +1598,6 @@ static int brw_interpret(const struct lu_env *env,
1764 LASSERT(list_empty(&aa->aa_exts)); 1598 LASSERT(list_empty(&aa->aa_exts));
1765 LASSERT(list_empty(&aa->aa_oaps)); 1599 LASSERT(list_empty(&aa->aa_oaps));
1766 1600
1767 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1768 req->rq_bulk->bd_nob_transferred);
1769 osc_release_ppga(aa->aa_ppga, aa->aa_page_count); 1601 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1770 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); 1602 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1771 1603
@@ -1818,9 +1650,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1818 struct osc_brw_async_args *aa = NULL; 1650 struct osc_brw_async_args *aa = NULL;
1819 struct obdo *oa = NULL; 1651 struct obdo *oa = NULL;
1820 struct osc_async_page *oap; 1652 struct osc_async_page *oap;
1821 struct osc_async_page *tmp; 1653 struct osc_object *obj = NULL;
1822 struct cl_req *clerq = NULL;
1823 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
1824 struct cl_req_attr *crattr = NULL; 1654 struct cl_req_attr *crattr = NULL;
1825 u64 starting_offset = OBD_OBJECT_EOF; 1655 u64 starting_offset = OBD_OBJECT_EOF;
1826 u64 ending_offset = 0; 1656 u64 ending_offset = 0;
@@ -1828,6 +1658,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1828 int mem_tight = 0; 1658 int mem_tight = 0;
1829 int page_count = 0; 1659 int page_count = 0;
1830 bool soft_sync = false; 1660 bool soft_sync = false;
1661 bool interrupted = false;
1831 int i; 1662 int i;
1832 int rc; 1663 int rc;
1833 struct ost_body *body; 1664 struct ost_body *body;
@@ -1839,32 +1670,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1839 list_for_each_entry(ext, ext_list, oe_link) { 1670 list_for_each_entry(ext, ext_list, oe_link) {
1840 LASSERT(ext->oe_state == OES_RPC); 1671 LASSERT(ext->oe_state == OES_RPC);
1841 mem_tight |= ext->oe_memalloc; 1672 mem_tight |= ext->oe_memalloc;
1842 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1673 page_count += ext->oe_nr_pages;
1843 ++page_count; 1674 if (!obj)
1844 list_add_tail(&oap->oap_rpc_item, &rpc_list); 1675 obj = ext->oe_obj;
1845 if (starting_offset > oap->oap_obj_off)
1846 starting_offset = oap->oap_obj_off;
1847 else
1848 LASSERT(oap->oap_page_off == 0);
1849 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1850 ending_offset = oap->oap_obj_off +
1851 oap->oap_count;
1852 else
1853 LASSERT(oap->oap_page_off + oap->oap_count ==
1854 PAGE_SIZE);
1855 }
1856 } 1676 }
1857 1677
1858 soft_sync = osc_over_unstable_soft_limit(cli); 1678 soft_sync = osc_over_unstable_soft_limit(cli);
1859 if (mem_tight) 1679 if (mem_tight)
1860 mpflag = cfs_memory_pressure_get_and_set(); 1680 mpflag = cfs_memory_pressure_get_and_set();
1861 1681
1862 crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
1863 if (!crattr) {
1864 rc = -ENOMEM;
1865 goto out;
1866 }
1867
1868 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); 1682 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
1869 if (!pga) { 1683 if (!pga) {
1870 rc = -ENOMEM; 1684 rc = -ENOMEM;
@@ -1878,44 +1692,46 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1878 } 1692 }
1879 1693
1880 i = 0; 1694 i = 0;
1881 list_for_each_entry(oap, &rpc_list, oap_rpc_item) { 1695 list_for_each_entry(ext, ext_list, oe_link) {
1882 struct cl_page *page = oap2cl_page(oap); 1696 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1883 1697 if (mem_tight)
1884 if (!clerq) { 1698 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1885 clerq = cl_req_alloc(env, page, crt, 1699 if (soft_sync)
1886 1 /* only 1-object rpcs for now */); 1700 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
1887 if (IS_ERR(clerq)) { 1701 pga[i] = &oap->oap_brw_page;
1888 rc = PTR_ERR(clerq); 1702 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1889 goto out; 1703 i++;
1890 } 1704
1705 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1706 if (starting_offset == OBD_OBJECT_EOF ||
1707 starting_offset > oap->oap_obj_off)
1708 starting_offset = oap->oap_obj_off;
1709 else
1710 LASSERT(!oap->oap_page_off);
1711 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1712 ending_offset = oap->oap_obj_off +
1713 oap->oap_count;
1714 else
1715 LASSERT(oap->oap_page_off + oap->oap_count ==
1716 PAGE_SIZE);
1717 if (oap->oap_interrupted)
1718 interrupted = true;
1891 } 1719 }
1892 if (mem_tight)
1893 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1894 if (soft_sync)
1895 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
1896 pga[i] = &oap->oap_brw_page;
1897 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1898 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1899 pga[i]->pg, oap->oap_page->index, oap,
1900 pga[i]->flag);
1901 i++;
1902 cl_req_page_add(env, clerq, page);
1903 } 1720 }
1904 1721
1905 /* always get the data for the obdo for the rpc */ 1722 /* first page in the list */
1906 LASSERT(clerq); 1723 oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
1907 crattr->cra_oa = oa;
1908 cl_req_attr_set(env, clerq, crattr, ~0ULL);
1909 1724
1910 rc = cl_req_prep(env, clerq); 1725 crattr = &osc_env_info(env)->oti_req_attr;
1911 if (rc != 0) { 1726 memset(crattr, 0, sizeof(*crattr));
1912 CERROR("cl_req_prep failed: %d\n", rc); 1727 crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
1913 goto out; 1728 crattr->cra_flags = ~0ULL;
1914 } 1729 crattr->cra_page = oap2cl_page(oap);
1730 crattr->cra_oa = oa;
1731 cl_req_attr_set(env, osc2cl(obj), crattr);
1915 1732
1916 sort_brw_pages(pga, page_count); 1733 sort_brw_pages(pga, page_count);
1917 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, 1734 rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
1918 pga, &req, 1, 0);
1919 if (rc != 0) { 1735 if (rc != 0) {
1920 CERROR("prep_req failed: %d\n", rc); 1736 CERROR("prep_req failed: %d\n", rc);
1921 goto out; 1737 goto out;
@@ -1924,8 +1740,10 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1924 req->rq_commit_cb = brw_commit; 1740 req->rq_commit_cb = brw_commit;
1925 req->rq_interpret_reply = brw_interpret; 1741 req->rq_interpret_reply = brw_interpret;
1926 1742
1927 if (mem_tight != 0) 1743 req->rq_memalloc = mem_tight != 0;
1928 req->rq_memalloc = 1; 1744 oap->oap_request = ptlrpc_request_addref(req);
1745 if (interrupted && !req->rq_intr)
1746 ptlrpc_mark_interrupted(req);
1929 1747
1930 /* Need to update the timestamps after the request is built in case 1748 /* Need to update the timestamps after the request is built in case
1931 * we race with setattr (locally or in queue at OST). If OST gets 1749 * we race with setattr (locally or in queue at OST). If OST gets
@@ -1935,9 +1753,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1935 */ 1753 */
1936 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); 1754 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1937 crattr->cra_oa = &body->oa; 1755 crattr->cra_oa = &body->oa;
1938 cl_req_attr_set(env, clerq, crattr, 1756 crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
1939 OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME); 1757 cl_req_attr_set(env, osc2cl(obj), crattr);
1940
1941 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid); 1758 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
1942 1759
1943 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); 1760 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
@@ -1946,24 +1763,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1946 list_splice_init(&rpc_list, &aa->aa_oaps); 1763 list_splice_init(&rpc_list, &aa->aa_oaps);
1947 INIT_LIST_HEAD(&aa->aa_exts); 1764 INIT_LIST_HEAD(&aa->aa_exts);
1948 list_splice_init(ext_list, &aa->aa_exts); 1765 list_splice_init(ext_list, &aa->aa_exts);
1949 aa->aa_clerq = clerq;
1950
1951 /* queued sync pages can be torn down while the pages
1952 * were between the pending list and the rpc
1953 */
1954 tmp = NULL;
1955 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1956 /* only one oap gets a request reference */
1957 if (!tmp)
1958 tmp = oap;
1959 if (oap->oap_interrupted && !req->rq_intr) {
1960 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1961 oap, req);
1962 ptlrpc_mark_interrupted(req);
1963 }
1964 }
1965 if (tmp)
1966 tmp->oap_request = ptlrpc_request_addref(req);
1967 1766
1968 spin_lock(&cli->cl_loi_list_lock); 1767 spin_lock(&cli->cl_loi_list_lock);
1969 starting_offset >>= PAGE_SHIFT; 1768 starting_offset >>= PAGE_SHIFT;
@@ -1985,6 +1784,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1985 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight", 1784 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
1986 page_count, aa, cli->cl_r_in_flight, 1785 page_count, aa, cli->cl_r_in_flight,
1987 cli->cl_w_in_flight); 1786 cli->cl_w_in_flight);
1787 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
1988 1788
1989 ptlrpcd_add_req(req); 1789 ptlrpcd_add_req(req);
1990 rc = 0; 1790 rc = 0;
@@ -1993,8 +1793,6 @@ out:
1993 if (mem_tight != 0) 1793 if (mem_tight != 0)
1994 cfs_memory_pressure_restore(mpflag); 1794 cfs_memory_pressure_restore(mpflag);
1995 1795
1996 kfree(crattr);
1997
1998 if (rc != 0) { 1796 if (rc != 0) {
1999 LASSERT(!req); 1797 LASSERT(!req);
2000 1798
@@ -2010,22 +1808,15 @@ out:
2010 list_del_init(&ext->oe_link); 1808 list_del_init(&ext->oe_link);
2011 osc_extent_finish(env, ext, 0, rc); 1809 osc_extent_finish(env, ext, 0, rc);
2012 } 1810 }
2013 if (clerq && !IS_ERR(clerq))
2014 cl_req_completion(env, clerq, rc);
2015 } 1811 }
2016 return rc; 1812 return rc;
2017} 1813}
2018 1814
2019static int osc_set_lock_data_with_check(struct ldlm_lock *lock, 1815static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2020 struct ldlm_enqueue_info *einfo)
2021{ 1816{
2022 void *data = einfo->ei_cbdata;
2023 int set = 0; 1817 int set = 0;
2024 1818
2025 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); 1819 LASSERT(lock);
2026 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2027 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2028 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2029 1820
2030 lock_res_and_lock(lock); 1821 lock_res_and_lock(lock);
2031 1822
@@ -2039,21 +1830,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2039 return set; 1830 return set;
2040} 1831}
2041 1832
2042static int osc_set_data_with_check(struct lustre_handle *lockh,
2043 struct ldlm_enqueue_info *einfo)
2044{
2045 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2046 int set = 0;
2047
2048 if (lock) {
2049 set = osc_set_lock_data_with_check(lock, einfo);
2050 LDLM_LOCK_PUT(lock);
2051 } else
2052 CERROR("lockh %p, data %p - client evicted?\n",
2053 lockh, einfo->ei_cbdata);
2054 return set;
2055}
2056
2057static int osc_enqueue_fini(struct ptlrpc_request *req, 1833static int osc_enqueue_fini(struct ptlrpc_request *req,
2058 osc_enqueue_upcall_f upcall, void *cookie, 1834 osc_enqueue_upcall_f upcall, void *cookie,
2059 struct lustre_handle *lockh, enum ldlm_mode mode, 1835 struct lustre_handle *lockh, enum ldlm_mode mode,
@@ -2153,7 +1929,7 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2153 * release locks just after they are obtained. 1929 * release locks just after they are obtained.
2154 */ 1930 */
2155int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, 1931int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2156 __u64 *flags, ldlm_policy_data_t *policy, 1932 __u64 *flags, union ldlm_policy_data *policy,
2157 struct ost_lvb *lvb, int kms_valid, 1933 struct ost_lvb *lvb, int kms_valid,
2158 osc_enqueue_upcall_f upcall, void *cookie, 1934 osc_enqueue_upcall_f upcall, void *cookie,
2159 struct ldlm_enqueue_info *einfo, 1935 struct ldlm_enqueue_info *einfo,
@@ -2219,7 +1995,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2219 ldlm_lock_decref(&lockh, mode); 1995 ldlm_lock_decref(&lockh, mode);
2220 LDLM_LOCK_PUT(matched); 1996 LDLM_LOCK_PUT(matched);
2221 return -ECANCELED; 1997 return -ECANCELED;
2222 } else if (osc_set_lock_data_with_check(matched, einfo)) { 1998 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2223 *flags |= LDLM_FL_LVB_READY; 1999 *flags |= LDLM_FL_LVB_READY;
2224 /* We already have a lock, and it's referenced. */ 2000 /* We already have a lock, and it's referenced. */
2225 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED); 2001 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
@@ -2304,7 +2080,7 @@ no_match:
2304} 2080}
2305 2081
2306int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, 2082int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2307 __u32 type, ldlm_policy_data_t *policy, __u32 mode, 2083 __u32 type, union ldlm_policy_data *policy, __u32 mode,
2308 __u64 *flags, void *data, struct lustre_handle *lockh, 2084 __u64 *flags, void *data, struct lustre_handle *lockh,
2309 int unref) 2085 int unref)
2310{ 2086{
@@ -2331,31 +2107,20 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2331 rc |= LCK_PW; 2107 rc |= LCK_PW;
2332 rc = ldlm_lock_match(obd->obd_namespace, lflags, 2108 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2333 res_id, type, policy, rc, lockh, unref); 2109 res_id, type, policy, rc, lockh, unref);
2334 if (rc) { 2110 if (!rc || lflags & LDLM_FL_TEST_LOCK)
2335 if (data) {
2336 if (!osc_set_data_with_check(lockh, data)) {
2337 if (!(lflags & LDLM_FL_TEST_LOCK))
2338 ldlm_lock_decref(lockh, rc);
2339 return 0;
2340 }
2341 }
2342 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2343 ldlm_lock_addref(lockh, LCK_PR);
2344 ldlm_lock_decref(lockh, LCK_PW);
2345 }
2346 return rc; 2111 return rc;
2347 }
2348 return rc;
2349}
2350 2112
2351int osc_cancel_base(struct lustre_handle *lockh, __u32 mode) 2113 if (data) {
2352{ 2114 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2353 if (unlikely(mode == LCK_GROUP))
2354 ldlm_lock_decref_and_cancel(lockh, mode);
2355 else
2356 ldlm_lock_decref(lockh, mode);
2357 2115
2358 return 0; 2116 LASSERT(lock);
2117 if (!osc_set_lock_data(lock, data)) {
2118 ldlm_lock_decref(lockh, rc);
2119 rc = 0;
2120 }
2121 LDLM_LOCK_PUT(lock);
2122 }
2123 return rc;
2359} 2124}
2360 2125
2361static int osc_statfs_interpret(const struct lu_env *env, 2126static int osc_statfs_interpret(const struct lu_env *env,
@@ -2526,9 +2291,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2526 err = ptlrpc_set_import_active(obd->u.cli.cl_import, 2291 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2527 data->ioc_offset); 2292 data->ioc_offset);
2528 goto out; 2293 goto out;
2529 case OBD_IOC_POLL_QUOTACHECK:
2530 err = osc_quota_poll_check(exp, karg);
2531 goto out;
2532 case OBD_IOC_PING_TARGET: 2294 case OBD_IOC_PING_TARGET:
2533 err = ptlrpc_obd_ping(obd); 2295 err = ptlrpc_obd_ping(obd);
2534 goto out; 2296 goto out;
@@ -2543,103 +2305,6 @@ out:
2543 return err; 2305 return err;
2544} 2306}
2545 2307
2546static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
2547 u32 keylen, void *key, __u32 *vallen, void *val,
2548 struct lov_stripe_md *lsm)
2549{
2550 if (!vallen || !val)
2551 return -EFAULT;
2552
2553 if (KEY_IS(KEY_FIEMAP)) {
2554 struct ll_fiemap_info_key *fm_key = key;
2555 struct ldlm_res_id res_id;
2556 ldlm_policy_data_t policy;
2557 struct lustre_handle lockh;
2558 enum ldlm_mode mode = 0;
2559 struct ptlrpc_request *req;
2560 struct ll_user_fiemap *reply;
2561 char *tmp;
2562 int rc;
2563
2564 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2565 goto skip_locking;
2566
2567 policy.l_extent.start = fm_key->fiemap.fm_start &
2568 PAGE_MASK;
2569
2570 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2571 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2572 policy.l_extent.end = OBD_OBJECT_EOF;
2573 else
2574 policy.l_extent.end = (fm_key->fiemap.fm_start +
2575 fm_key->fiemap.fm_length +
2576 PAGE_SIZE - 1) & PAGE_MASK;
2577
2578 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2579 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2580 LDLM_FL_BLOCK_GRANTED |
2581 LDLM_FL_LVB_READY,
2582 &res_id, LDLM_EXTENT, &policy,
2583 LCK_PR | LCK_PW, &lockh, 0);
2584 if (mode) { /* lock is cached on client */
2585 if (mode != LCK_PR) {
2586 ldlm_lock_addref(&lockh, LCK_PR);
2587 ldlm_lock_decref(&lockh, LCK_PW);
2588 }
2589 } else { /* no cached lock, needs acquire lock on server side */
2590 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2591 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2592 }
2593
2594skip_locking:
2595 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2596 &RQF_OST_GET_INFO_FIEMAP);
2597 if (!req) {
2598 rc = -ENOMEM;
2599 goto drop_lock;
2600 }
2601
2602 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2603 RCL_CLIENT, keylen);
2604 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2605 RCL_CLIENT, *vallen);
2606 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2607 RCL_SERVER, *vallen);
2608
2609 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2610 if (rc) {
2611 ptlrpc_request_free(req);
2612 goto drop_lock;
2613 }
2614
2615 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2616 memcpy(tmp, key, keylen);
2617 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2618 memcpy(tmp, val, *vallen);
2619
2620 ptlrpc_request_set_replen(req);
2621 rc = ptlrpc_queue_wait(req);
2622 if (rc)
2623 goto fini_req;
2624
2625 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2626 if (!reply) {
2627 rc = -EPROTO;
2628 goto fini_req;
2629 }
2630
2631 memcpy(val, reply, *vallen);
2632fini_req:
2633 ptlrpc_req_finished(req);
2634drop_lock:
2635 if (mode)
2636 ldlm_lock_decref(&lockh, LCK_PR);
2637 return rc;
2638 }
2639
2640 return -EINVAL;
2641}
2642
2643static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, 2308static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2644 u32 keylen, void *key, u32 vallen, 2309 u32 keylen, void *key, u32 vallen,
2645 void *val, struct ptlrpc_request_set *set) 2310 void *val, struct ptlrpc_request_set *set)
@@ -2999,47 +2664,33 @@ out_ptlrpcd:
2999 return rc; 2664 return rc;
3000} 2665}
3001 2666
3002static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) 2667static int osc_precleanup(struct obd_device *obd)
3003{ 2668{
3004 switch (stage) { 2669 struct client_obd *cli = &obd->u.cli;
3005 case OBD_CLEANUP_EARLY: { 2670
3006 struct obd_import *imp; 2671 /* LU-464
3007 2672 * for echo client, export may be on zombie list, wait for
3008 imp = obd->u.cli.cl_import; 2673 * zombie thread to cull it, because cli.cl_import will be
3009 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name); 2674 * cleared in client_disconnect_export():
3010 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */ 2675 * class_export_destroy() -> obd_cleanup() ->
3011 ptlrpc_deactivate_import(imp); 2676 * echo_device_free() -> echo_client_cleanup() ->
3012 spin_lock(&imp->imp_lock); 2677 * obd_disconnect() -> osc_disconnect() ->
3013 imp->imp_pingable = 0; 2678 * client_disconnect_export()
3014 spin_unlock(&imp->imp_lock); 2679 */
3015 break; 2680 obd_zombie_barrier();
2681 if (cli->cl_writeback_work) {
2682 ptlrpcd_destroy_work(cli->cl_writeback_work);
2683 cli->cl_writeback_work = NULL;
3016 } 2684 }
3017 case OBD_CLEANUP_EXPORTS: { 2685
3018 struct client_obd *cli = &obd->u.cli; 2686 if (cli->cl_lru_work) {
3019 /* LU-464 2687 ptlrpcd_destroy_work(cli->cl_lru_work);
3020 * for echo client, export may be on zombie list, wait for 2688 cli->cl_lru_work = NULL;
3021 * zombie thread to cull it, because cli.cl_import will be
3022 * cleared in client_disconnect_export():
3023 * class_export_destroy() -> obd_cleanup() ->
3024 * echo_device_free() -> echo_client_cleanup() ->
3025 * obd_disconnect() -> osc_disconnect() ->
3026 * client_disconnect_export()
3027 */
3028 obd_zombie_barrier();
3029 if (cli->cl_writeback_work) {
3030 ptlrpcd_destroy_work(cli->cl_writeback_work);
3031 cli->cl_writeback_work = NULL;
3032 }
3033 if (cli->cl_lru_work) {
3034 ptlrpcd_destroy_work(cli->cl_lru_work);
3035 cli->cl_lru_work = NULL;
3036 }
3037 obd_cleanup_client_import(obd);
3038 ptlrpc_lprocfs_unregister_obd(obd);
3039 lprocfs_obd_cleanup(obd);
3040 break;
3041 }
3042 } 2689 }
2690
2691 obd_cleanup_client_import(obd);
2692 ptlrpc_lprocfs_unregister_obd(obd);
2693 lprocfs_obd_cleanup(obd);
3043 return 0; 2694 return 0;
3044} 2695}
3045 2696
@@ -3104,24 +2755,18 @@ static struct obd_ops osc_obd_ops = {
3104 .disconnect = osc_disconnect, 2755 .disconnect = osc_disconnect,
3105 .statfs = osc_statfs, 2756 .statfs = osc_statfs,
3106 .statfs_async = osc_statfs_async, 2757 .statfs_async = osc_statfs_async,
3107 .unpackmd = osc_unpackmd,
3108 .create = osc_create, 2758 .create = osc_create,
3109 .destroy = osc_destroy, 2759 .destroy = osc_destroy,
3110 .getattr = osc_getattr, 2760 .getattr = osc_getattr,
3111 .getattr_async = osc_getattr_async,
3112 .setattr = osc_setattr, 2761 .setattr = osc_setattr,
3113 .setattr_async = osc_setattr_async,
3114 .iocontrol = osc_iocontrol, 2762 .iocontrol = osc_iocontrol,
3115 .get_info = osc_get_info,
3116 .set_info_async = osc_set_info_async, 2763 .set_info_async = osc_set_info_async,
3117 .import_event = osc_import_event, 2764 .import_event = osc_import_event,
3118 .process_config = osc_process_config, 2765 .process_config = osc_process_config,
3119 .quotactl = osc_quotactl, 2766 .quotactl = osc_quotactl,
3120 .quotacheck = osc_quotacheck,
3121}; 2767};
3122 2768
3123extern struct lu_kmem_descr osc_caches[]; 2769extern struct lu_kmem_descr osc_caches[];
3124extern struct lock_class_key osc_ast_guard_class;
3125 2770
3126static int __init osc_init(void) 2771static int __init osc_init(void)
3127{ 2772{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 8c51d51a678b..804741362bc0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -43,6 +43,18 @@
43 43
44#include "ptlrpc_internal.h" 44#include "ptlrpc_internal.h"
45 45
46const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
47 .add_kiov_frag = ptlrpc_prep_bulk_page_pin,
48 .release_frags = ptlrpc_release_bulk_page_pin,
49};
50EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
51
52const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
53 .add_kiov_frag = ptlrpc_prep_bulk_page_nopin,
54 .release_frags = NULL,
55};
56EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
57
46static int ptlrpc_send_new_req(struct ptlrpc_request *req); 58static int ptlrpc_send_new_req(struct ptlrpc_request *req);
47static int ptlrpcd_check_work(struct ptlrpc_request *req); 59static int ptlrpcd_check_work(struct ptlrpc_request *req);
48static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); 60static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
@@ -95,24 +107,43 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
95 * Allocate and initialize new bulk descriptor on the sender. 107 * Allocate and initialize new bulk descriptor on the sender.
96 * Returns pointer to the descriptor or NULL on error. 108 * Returns pointer to the descriptor or NULL on error.
97 */ 109 */
98struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, 110struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
99 unsigned type, unsigned portal) 111 unsigned int max_brw,
112 enum ptlrpc_bulk_op_type type,
113 unsigned int portal,
114 const struct ptlrpc_bulk_frag_ops *ops)
100{ 115{
101 struct ptlrpc_bulk_desc *desc; 116 struct ptlrpc_bulk_desc *desc;
102 int i; 117 int i;
103 118
104 desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]), 119 /* ensure that only one of KIOV or IOVEC is set but not both */
105 GFP_NOFS); 120 LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) ||
121 (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag));
122
123 desc = kzalloc(sizeof(*desc), GFP_NOFS);
106 if (!desc) 124 if (!desc)
107 return NULL; 125 return NULL;
108 126
127 if (type & PTLRPC_BULK_BUF_KIOV) {
128 GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)),
129 GFP_NOFS);
130 if (!GET_KIOV(desc))
131 goto free_desc;
132 } else {
133 GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)),
134 GFP_NOFS);
135 if (!GET_KVEC(desc))
136 goto free_desc;
137 }
138
109 spin_lock_init(&desc->bd_lock); 139 spin_lock_init(&desc->bd_lock);
110 init_waitqueue_head(&desc->bd_waitq); 140 init_waitqueue_head(&desc->bd_waitq);
111 desc->bd_max_iov = npages; 141 desc->bd_max_iov = nfrags;
112 desc->bd_iov_count = 0; 142 desc->bd_iov_count = 0;
113 desc->bd_portal = portal; 143 desc->bd_portal = portal;
114 desc->bd_type = type; 144 desc->bd_type = type;
115 desc->bd_md_count = 0; 145 desc->bd_md_count = 0;
146 desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops;
116 LASSERT(max_brw > 0); 147 LASSERT(max_brw > 0);
117 desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); 148 desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
118 /* 149 /*
@@ -123,24 +154,31 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
123 LNetInvalidateHandle(&desc->bd_mds[i]); 154 LNetInvalidateHandle(&desc->bd_mds[i]);
124 155
125 return desc; 156 return desc;
157free_desc:
158 kfree(desc);
159 return NULL;
126} 160}
127 161
128/** 162/**
129 * Prepare bulk descriptor for specified outgoing request \a req that 163 * Prepare bulk descriptor for specified outgoing request \a req that
130 * can fit \a npages * pages. \a type is bulk type. \a portal is where 164 * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
131 * the bulk to be sent. Used on client-side. 165 * the bulk to be sent. Used on client-side.
132 * Returns pointer to newly allocated initialized bulk descriptor or NULL on 166 * Returns pointer to newly allocated initialized bulk descriptor or NULL on
133 * error. 167 * error.
134 */ 168 */
135struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, 169struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
136 unsigned npages, unsigned max_brw, 170 unsigned int nfrags,
137 unsigned type, unsigned portal) 171 unsigned int max_brw,
172 unsigned int type,
173 unsigned int portal,
174 const struct ptlrpc_bulk_frag_ops *ops)
138{ 175{
139 struct obd_import *imp = req->rq_import; 176 struct obd_import *imp = req->rq_import;
140 struct ptlrpc_bulk_desc *desc; 177 struct ptlrpc_bulk_desc *desc;
141 178
142 LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); 179 LASSERT(ptlrpc_is_bulk_op_passive(type));
143 desc = ptlrpc_new_bulk(npages, max_brw, type, portal); 180
181 desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
144 if (!desc) 182 if (!desc)
145 return NULL; 183 return NULL;
146 184
@@ -158,56 +196,82 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
158} 196}
159EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); 197EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
160 198
161/**
162 * Add a page \a page to the bulk descriptor \a desc.
163 * Data to transfer in the page starts at offset \a pageoffset and
164 * amount of data to transfer from the page is \a len
165 */
166void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, 199void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
167 struct page *page, int pageoffset, int len, int pin) 200 struct page *page, int pageoffset, int len, int pin)
168{ 201{
202 struct bio_vec *kiov;
203
169 LASSERT(desc->bd_iov_count < desc->bd_max_iov); 204 LASSERT(desc->bd_iov_count < desc->bd_max_iov);
170 LASSERT(page); 205 LASSERT(page);
171 LASSERT(pageoffset >= 0); 206 LASSERT(pageoffset >= 0);
172 LASSERT(len > 0); 207 LASSERT(len > 0);
173 LASSERT(pageoffset + len <= PAGE_SIZE); 208 LASSERT(pageoffset + len <= PAGE_SIZE);
209 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
210
211 kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
174 212
175 desc->bd_nob += len; 213 desc->bd_nob += len;
176 214
177 if (pin) 215 if (pin)
178 get_page(page); 216 get_page(page);
179 217
180 ptlrpc_add_bulk_page(desc, page, pageoffset, len); 218 kiov->bv_page = page;
219 kiov->bv_offset = pageoffset;
220 kiov->bv_len = len;
221
222 desc->bd_iov_count++;
181} 223}
182EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); 224EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
183 225
184/** 226int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
185 * Uninitialize and free bulk descriptor \a desc. 227 void *frag, int len)
186 * Works on bulk descriptors both from server and client side.
187 */
188void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
189{ 228{
190 int i; 229 struct kvec *iovec;
230
231 LASSERT(desc->bd_iov_count < desc->bd_max_iov);
232 LASSERT(frag);
233 LASSERT(len > 0);
234 LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
191 235
236 iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
237
238 desc->bd_nob += len;
239
240 iovec->iov_base = frag;
241 iovec->iov_len = len;
242
243 desc->bd_iov_count++;
244
245 return desc->bd_nob;
246}
247EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
248
249void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
250{
192 LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ 251 LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
193 LASSERT(desc->bd_md_count == 0); /* network hands off */ 252 LASSERT(desc->bd_md_count == 0); /* network hands off */
194 LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); 253 LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
254 LASSERT(desc->bd_frag_ops);
195 255
196 sptlrpc_enc_pool_put_pages(desc); 256 if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
257 sptlrpc_enc_pool_put_pages(desc);
197 258
198 if (desc->bd_export) 259 if (desc->bd_export)
199 class_export_put(desc->bd_export); 260 class_export_put(desc->bd_export);
200 else 261 else
201 class_import_put(desc->bd_import); 262 class_import_put(desc->bd_import);
202 263
203 if (unpin) { 264 if (desc->bd_frag_ops->release_frags)
204 for (i = 0; i < desc->bd_iov_count; i++) 265 desc->bd_frag_ops->release_frags(desc);
205 put_page(desc->bd_iov[i].bv_page); 266
206 } 267 if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
268 kfree(GET_KIOV(desc));
269 else
270 kfree(GET_KVEC(desc));
207 271
208 kfree(desc); 272 kfree(desc);
209} 273}
210EXPORT_SYMBOL(__ptlrpc_free_bulk); 274EXPORT_SYMBOL(ptlrpc_free_bulk);
211 275
212/** 276/**
213 * Set server timelimit for this req, i.e. how long are we willing to wait 277 * Set server timelimit for this req, i.e. how long are we willing to wait
@@ -589,6 +653,42 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
589 spin_unlock(&pool->prp_lock); 653 spin_unlock(&pool->prp_lock);
590} 654}
591 655
656void ptlrpc_add_unreplied(struct ptlrpc_request *req)
657{
658 struct obd_import *imp = req->rq_import;
659 struct list_head *tmp;
660 struct ptlrpc_request *iter;
661
662 assert_spin_locked(&imp->imp_lock);
663 LASSERT(list_empty(&req->rq_unreplied_list));
664
665 /* unreplied list is sorted by xid in ascending order */
666 list_for_each_prev(tmp, &imp->imp_unreplied_list) {
667 iter = list_entry(tmp, struct ptlrpc_request,
668 rq_unreplied_list);
669
670 LASSERT(req->rq_xid != iter->rq_xid);
671 if (req->rq_xid < iter->rq_xid)
672 continue;
673 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
674 return;
675 }
676 list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
677}
678
679void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
680{
681 req->rq_xid = ptlrpc_next_xid();
682 ptlrpc_add_unreplied(req);
683}
684
685static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
686{
687 spin_lock(&req->rq_import->imp_lock);
688 ptlrpc_assign_next_xid_nolock(req);
689 spin_unlock(&req->rq_import->imp_lock);
690}
691
592int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, 692int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
593 __u32 version, int opcode, char **bufs, 693 __u32 version, int opcode, char **bufs,
594 struct ptlrpc_cli_ctx *ctx) 694 struct ptlrpc_cli_ctx *ctx)
@@ -637,8 +737,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
637 737
638 ptlrpc_at_set_req_timeout(request); 738 ptlrpc_at_set_req_timeout(request);
639 739
640 request->rq_xid = ptlrpc_next_xid();
641 lustre_msg_set_opc(request->rq_reqmsg, opcode); 740 lustre_msg_set_opc(request->rq_reqmsg, opcode);
741 ptlrpc_assign_next_xid(request);
642 742
643 /* Let's setup deadline for req/reply/bulk unlink for opcode. */ 743 /* Let's setup deadline for req/reply/bulk unlink for opcode. */
644 if (cfs_fail_val == opcode) { 744 if (cfs_fail_val == opcode) {
@@ -1129,7 +1229,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
1129 lnet_nid_t nid = imp->imp_connection->c_peer.nid; 1229 lnet_nid_t nid = imp->imp_connection->c_peer.nid;
1130 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); 1230 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1131 1231
1132 if (ptlrpc_console_allow(req)) 1232 /* -EAGAIN is normal when using POSIX flocks */
1233 if (ptlrpc_console_allow(req) &&
1234 !(opc == LDLM_ENQUEUE && err == -EAGAIN))
1133 LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n", 1235 LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
1134 imp->imp_obd->obd_name, 1236 imp->imp_obd->obd_name,
1135 ll_opcode2str(opc), 1237 ll_opcode2str(opc),
@@ -1166,6 +1268,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
1166 versions[0], versions[1]); 1268 versions[0], versions[1]);
1167} 1269}
1168 1270
1271__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1272{
1273 struct ptlrpc_request *req;
1274
1275 assert_spin_locked(&imp->imp_lock);
1276 if (list_empty(&imp->imp_unreplied_list))
1277 return 0;
1278
1279 req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
1280 rq_unreplied_list);
1281 LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1282
1283 if (imp->imp_known_replied_xid < req->rq_xid - 1)
1284 imp->imp_known_replied_xid = req->rq_xid - 1;
1285
1286 return req->rq_xid - 1;
1287}
1288
1169/** 1289/**
1170 * Callback function called when client receives RPC reply for \a req. 1290 * Callback function called when client receives RPC reply for \a req.
1171 * Returns 0 on success or error code. 1291 * Returns 0 on success or error code.
@@ -1180,6 +1300,7 @@ static int after_reply(struct ptlrpc_request *req)
1180 int rc; 1300 int rc;
1181 struct timespec64 work_start; 1301 struct timespec64 work_start;
1182 long timediff; 1302 long timediff;
1303 u64 committed;
1183 1304
1184 LASSERT(obd); 1305 LASSERT(obd);
1185 /* repbuf must be unlinked */ 1306 /* repbuf must be unlinked */
@@ -1206,6 +1327,10 @@ static int after_reply(struct ptlrpc_request *req)
1206 return 0; 1327 return 0;
1207 } 1328 }
1208 1329
1330 ktime_get_real_ts64(&work_start);
1331 timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
1332 (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
1333 NSEC_PER_USEC;
1209 /* 1334 /*
1210 * NB Until this point, the whole of the incoming message, 1335 * NB Until this point, the whole of the incoming message,
1211 * including buflens, status etc is in the sender's byte order. 1336 * including buflens, status etc is in the sender's byte order.
@@ -1235,13 +1360,6 @@ static int after_reply(struct ptlrpc_request *req)
1235 spin_unlock(&req->rq_lock); 1360 spin_unlock(&req->rq_lock);
1236 req->rq_nr_resend++; 1361 req->rq_nr_resend++;
1237 1362
1238 /* allocate new xid to avoid reply reconstruction */
1239 if (!req->rq_bulk) {
1240 /* new xid is already allocated for bulk in ptlrpc_check_set() */
1241 req->rq_xid = ptlrpc_next_xid();
1242 DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
1243 }
1244
1245 /* Readjust the timeout for current conditions */ 1363 /* Readjust the timeout for current conditions */
1246 ptlrpc_at_set_req_timeout(req); 1364 ptlrpc_at_set_req_timeout(req);
1247 /* 1365 /*
@@ -1255,13 +1373,14 @@ static int after_reply(struct ptlrpc_request *req)
1255 else 1373 else
1256 req->rq_sent = now + req->rq_nr_resend; 1374 req->rq_sent = now + req->rq_nr_resend;
1257 1375
1376 /* Resend for EINPROGRESS will use a new XID */
1377 spin_lock(&imp->imp_lock);
1378 list_del_init(&req->rq_unreplied_list);
1379 spin_unlock(&imp->imp_lock);
1380
1258 return 0; 1381 return 0;
1259 } 1382 }
1260 1383
1261 ktime_get_real_ts64(&work_start);
1262 timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
1263 (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
1264 NSEC_PER_USEC;
1265 if (obd->obd_svc_stats) { 1384 if (obd->obd_svc_stats) {
1266 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, 1385 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1267 timediff); 1386 timediff);
@@ -1338,10 +1457,9 @@ static int after_reply(struct ptlrpc_request *req)
1338 } 1457 }
1339 1458
1340 /* Replay-enabled imports return commit-status information. */ 1459 /* Replay-enabled imports return commit-status information. */
1341 if (lustre_msg_get_last_committed(req->rq_repmsg)) { 1460 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1342 imp->imp_peer_committed_transno = 1461 if (likely(committed > imp->imp_peer_committed_transno))
1343 lustre_msg_get_last_committed(req->rq_repmsg); 1462 imp->imp_peer_committed_transno = committed;
1344 }
1345 1463
1346 ptlrpc_free_committed(imp); 1464 ptlrpc_free_committed(imp);
1347 1465
@@ -1373,9 +1491,17 @@ static int after_reply(struct ptlrpc_request *req)
1373static int ptlrpc_send_new_req(struct ptlrpc_request *req) 1491static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1374{ 1492{
1375 struct obd_import *imp = req->rq_import; 1493 struct obd_import *imp = req->rq_import;
1494 u64 min_xid = 0;
1376 int rc; 1495 int rc;
1377 1496
1378 LASSERT(req->rq_phase == RQ_PHASE_NEW); 1497 LASSERT(req->rq_phase == RQ_PHASE_NEW);
1498
1499 /* do not try to go further if there is not enough memory in enc_pool */
1500 if (req->rq_sent && req->rq_bulk)
1501 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1502 pool_is_at_full_capacity())
1503 return -ENOMEM;
1504
1379 if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) && 1505 if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1380 (!req->rq_generation_set || 1506 (!req->rq_generation_set ||
1381 req->rq_import_generation == imp->imp_generation)) 1507 req->rq_import_generation == imp->imp_generation))
@@ -1385,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1385 1511
1386 spin_lock(&imp->imp_lock); 1512 spin_lock(&imp->imp_lock);
1387 1513
1514 LASSERT(req->rq_xid);
1515 LASSERT(!list_empty(&req->rq_unreplied_list));
1516
1388 if (!req->rq_generation_set) 1517 if (!req->rq_generation_set)
1389 req->rq_import_generation = imp->imp_generation; 1518 req->rq_import_generation = imp->imp_generation;
1390 1519
@@ -1414,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1414 LASSERT(list_empty(&req->rq_list)); 1543 LASSERT(list_empty(&req->rq_list));
1415 list_add_tail(&req->rq_list, &imp->imp_sending_list); 1544 list_add_tail(&req->rq_list, &imp->imp_sending_list);
1416 atomic_inc(&req->rq_import->imp_inflight); 1545 atomic_inc(&req->rq_import->imp_inflight);
1546
1547 /* find the known replied XID from the unreplied list, CONNECT
1548 * and DISCONNECT requests are skipped to make the sanity check
1549 * on server side happy. see process_req_last_xid().
1550 *
1551 * For CONNECT: Because replay requests have lower XID, it'll
1552 * break the sanity check if CONNECT bump the exp_last_xid on
1553 * server.
1554 *
1555 * For DISCONNECT: Since client will abort inflight RPC before
1556 * sending DISCONNECT, DISCONNECT may carry an XID which higher
1557 * than the inflight RPC.
1558 */
1559 if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1560 min_xid = ptlrpc_known_replied_xid(imp);
1417 spin_unlock(&imp->imp_lock); 1561 spin_unlock(&imp->imp_lock);
1418 1562
1563 lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1564
1419 lustre_msg_set_status(req->rq_reqmsg, current_pid()); 1565 lustre_msg_set_status(req->rq_reqmsg, current_pid());
1420 1566
1421 rc = sptlrpc_req_refresh_ctx(req, -1); 1567 rc = sptlrpc_req_refresh_ctx(req, -1);
@@ -1438,6 +1584,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1438 lustre_msg_get_opc(req->rq_reqmsg)); 1584 lustre_msg_get_opc(req->rq_reqmsg));
1439 1585
1440 rc = ptl_send_rpc(req, 0); 1586 rc = ptl_send_rpc(req, 0);
1587 if (rc == -ENOMEM) {
1588 spin_lock(&imp->imp_lock);
1589 if (!list_empty(&req->rq_list)) {
1590 list_del_init(&req->rq_list);
1591 atomic_dec(&req->rq_import->imp_inflight);
1592 }
1593 spin_unlock(&imp->imp_lock);
1594 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1595 return rc;
1596 }
1441 if (rc) { 1597 if (rc) {
1442 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); 1598 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1443 spin_lock(&req->rq_lock); 1599 spin_lock(&req->rq_lock);
@@ -1688,18 +1844,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1688 spin_lock(&req->rq_lock); 1844 spin_lock(&req->rq_lock);
1689 req->rq_resend = 1; 1845 req->rq_resend = 1;
1690 spin_unlock(&req->rq_lock); 1846 spin_unlock(&req->rq_lock);
1691 if (req->rq_bulk) { 1847 if (req->rq_bulk &&
1692 __u64 old_xid; 1848 !ptlrpc_unregister_bulk(req, 1))
1693 1849 continue;
1694 if (!ptlrpc_unregister_bulk(req, 1))
1695 continue;
1696
1697 /* ensure previous bulk fails */
1698 old_xid = req->rq_xid;
1699 req->rq_xid = ptlrpc_next_xid();
1700 CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
1701 old_xid, req->rq_xid);
1702 }
1703 } 1850 }
1704 /* 1851 /*
1705 * rq_wait_ctx is only touched by ptlrpcd, 1852 * rq_wait_ctx is only touched by ptlrpcd,
@@ -1727,6 +1874,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1727 } 1874 }
1728 1875
1729 rc = ptl_send_rpc(req, 0); 1876 rc = ptl_send_rpc(req, 0);
1877 if (rc == -ENOMEM) {
1878 spin_lock(&imp->imp_lock);
1879 if (!list_empty(&req->rq_list))
1880 list_del_init(&req->rq_list);
1881 spin_unlock(&imp->imp_lock);
1882 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1883 continue;
1884 }
1730 if (rc) { 1885 if (rc) {
1731 DEBUG_REQ(D_HA, req, 1886 DEBUG_REQ(D_HA, req,
1732 "send failed: rc = %d", rc); 1887 "send failed: rc = %d", rc);
@@ -1850,6 +2005,7 @@ interpret:
1850 list_del_init(&req->rq_list); 2005 list_del_init(&req->rq_list);
1851 atomic_dec(&imp->imp_inflight); 2006 atomic_dec(&imp->imp_inflight);
1852 } 2007 }
2008 list_del_init(&req->rq_unreplied_list);
1853 spin_unlock(&imp->imp_lock); 2009 spin_unlock(&imp->imp_lock);
1854 2010
1855 atomic_dec(&set->set_remaining); 2011 atomic_dec(&set->set_remaining);
@@ -2247,6 +2403,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2247 if (!locked) 2403 if (!locked)
2248 spin_lock(&request->rq_import->imp_lock); 2404 spin_lock(&request->rq_import->imp_lock);
2249 list_del_init(&request->rq_replay_list); 2405 list_del_init(&request->rq_replay_list);
2406 list_del_init(&request->rq_unreplied_list);
2250 if (!locked) 2407 if (!locked)
2251 spin_unlock(&request->rq_import->imp_lock); 2408 spin_unlock(&request->rq_import->imp_lock);
2252 } 2409 }
@@ -2266,7 +2423,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2266 request->rq_import = NULL; 2423 request->rq_import = NULL;
2267 } 2424 }
2268 if (request->rq_bulk) 2425 if (request->rq_bulk)
2269 ptlrpc_free_bulk_pin(request->rq_bulk); 2426 ptlrpc_free_bulk(request->rq_bulk);
2270 2427
2271 if (request->rq_reqbuf || request->rq_clrbuf) 2428 if (request->rq_reqbuf || request->rq_clrbuf)
2272 sptlrpc_cli_free_reqbuf(request); 2429 sptlrpc_cli_free_reqbuf(request);
@@ -2542,14 +2699,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
2542 req->rq_resend = 1; 2699 req->rq_resend = 1;
2543 req->rq_net_err = 0; 2700 req->rq_net_err = 0;
2544 req->rq_timedout = 0; 2701 req->rq_timedout = 0;
2545 if (req->rq_bulk) {
2546 __u64 old_xid = req->rq_xid;
2547
2548 /* ensure previous bulk fails */
2549 req->rq_xid = ptlrpc_next_xid();
2550 CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
2551 old_xid, req->rq_xid);
2552 }
2553 ptlrpc_client_wake_req(req); 2702 ptlrpc_client_wake_req(req);
2554 spin_unlock(&req->rq_lock); 2703 spin_unlock(&req->rq_lock);
2555} 2704}
@@ -2592,6 +2741,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2592 2741
2593 lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); 2742 lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
2594 2743
2744 spin_lock(&req->rq_lock);
2745 req->rq_resend = 0;
2746 spin_unlock(&req->rq_lock);
2747
2595 LASSERT(imp->imp_replayable); 2748 LASSERT(imp->imp_replayable);
2596 /* Balanced in ptlrpc_free_committed, usually. */ 2749 /* Balanced in ptlrpc_free_committed, usually. */
2597 ptlrpc_request_addref(req); 2750 ptlrpc_request_addref(req);
@@ -2667,8 +2820,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
2667 2820
2668 atomic_dec(&imp->imp_replay_inflight); 2821 atomic_dec(&imp->imp_replay_inflight);
2669 2822
2670 if (!ptlrpc_client_replied(req)) { 2823 /*
2671 CERROR("request replay timed out, restarting recovery\n"); 2824 * Note: if it is bulk replay (MDS-MDS replay), then even if
2825 * server got the request, but bulk transfer timeout, let's
2826 * replay the bulk req again
2827 */
2828 if (!ptlrpc_client_replied(req) ||
2829 (req->rq_bulk &&
2830 lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
2831 DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
2672 rc = -ETIMEDOUT; 2832 rc = -ETIMEDOUT;
2673 goto out; 2833 goto out;
2674 } 2834 }
@@ -2939,6 +3099,48 @@ __u64 ptlrpc_next_xid(void)
2939} 3099}
2940 3100
2941/** 3101/**
3102 * If request has a new allocated XID (new request or EINPROGRESS resend),
3103 * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3104 * request to ensure previous bulk fails and avoid problems with lost replies
3105 * and therefore several transfers landing into the same buffer from different
3106 * sending attempts.
3107 */
3108void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
3109{
3110 struct ptlrpc_bulk_desc *bd = req->rq_bulk;
3111
3112 LASSERT(bd);
3113
3114 if (!req->rq_resend) {
3115 /* this request has a new xid, just use it as bulk matchbits */
3116 req->rq_mbits = req->rq_xid;
3117
3118 } else { /* needs to generate a new matchbits for resend */
3119 u64 old_mbits = req->rq_mbits;
3120
3121 if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
3122 OBD_CONNECT_BULK_MBITS)) {
3123 req->rq_mbits = ptlrpc_next_xid();
3124 } else {
3125 /* old version transfers rq_xid to peer as matchbits */
3126 req->rq_mbits = ptlrpc_next_xid();
3127 req->rq_xid = req->rq_mbits;
3128 }
3129
3130 CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
3131 old_mbits, req->rq_mbits);
3132 }
3133
3134 /*
3135 * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3136 * that server can infer the number of bulks that were prepared,
3137 * see LU-1431
3138 */
3139 req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
3140 LNET_MAX_IOV) - 1;
3141}
3142
3143/**
2942 * Get a glimpse at what next xid value might have been. 3144 * Get a glimpse at what next xid value might have been.
2943 * Returns possible next xid. 3145 * Returns possible next xid.
2944 */ 3146 */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 7b020d60c9e5..6c7c8b68a909 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -152,8 +152,8 @@ void ptlrpc_connection_fini(void)
152/* 152/*
153 * Hash operations for net_peer<->connection 153 * Hash operations for net_peer<->connection
154 */ 154 */
155static unsigned 155static unsigned int
156conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) 156conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
157{ 157{
158 return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask); 158 return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
159} 159}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 283dfb296d35..49f3e6368415 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -182,9 +182,9 @@ void client_bulk_callback(lnet_event_t *ev)
182 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; 182 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
183 struct ptlrpc_request *req; 183 struct ptlrpc_request *req;
184 184
185 LASSERT((desc->bd_type == BULK_PUT_SINK && 185 LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
186 ev->type == LNET_EVENT_PUT) || 186 ev->type == LNET_EVENT_PUT) ||
187 (desc->bd_type == BULK_GET_SOURCE && 187 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
188 ev->type == LNET_EVENT_GET) || 188 ev->type == LNET_EVENT_GET) ||
189 ev->type == LNET_EVENT_UNLINK); 189 ev->type == LNET_EVENT_UNLINK);
190 LASSERT(ev->unlinked); 190 LASSERT(ev->unlinked);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a23d0a05b574..e8280194001c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -396,7 +396,7 @@ void ptlrpc_activate_import(struct obd_import *imp)
396} 396}
397EXPORT_SYMBOL(ptlrpc_activate_import); 397EXPORT_SYMBOL(ptlrpc_activate_import);
398 398
399static void ptlrpc_pinger_force(struct obd_import *imp) 399void ptlrpc_pinger_force(struct obd_import *imp)
400{ 400{
401 CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd), 401 CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
402 ptlrpc_import_state_name(imp->imp_state)); 402 ptlrpc_import_state_name(imp->imp_state));
@@ -408,6 +408,7 @@ static void ptlrpc_pinger_force(struct obd_import *imp)
408 if (imp->imp_state != LUSTRE_IMP_CONNECTING) 408 if (imp->imp_state != LUSTRE_IMP_CONNECTING)
409 ptlrpc_pinger_wake_up(); 409 ptlrpc_pinger_wake_up();
410} 410}
411EXPORT_SYMBOL(ptlrpc_pinger_force);
411 412
412void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) 413void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
413{ 414{
@@ -621,7 +622,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
621 spin_unlock(&imp->imp_lock); 622 spin_unlock(&imp->imp_lock);
622 CERROR("already connected\n"); 623 CERROR("already connected\n");
623 return 0; 624 return 0;
624 } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) { 625 } else if (imp->imp_state == LUSTRE_IMP_CONNECTING ||
626 imp->imp_connected) {
625 spin_unlock(&imp->imp_lock); 627 spin_unlock(&imp->imp_lock);
626 CERROR("already connecting\n"); 628 CERROR("already connecting\n");
627 return -EALREADY; 629 return -EALREADY;
@@ -691,8 +693,6 @@ int ptlrpc_connect_import(struct obd_import *imp)
691 request->rq_timeout = INITIAL_CONNECT_TIMEOUT; 693 request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
692 lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); 694 lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
693 695
694 lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
695
696 request->rq_no_resend = 1; 696 request->rq_no_resend = 1;
697 request->rq_no_delay = 1; 697 request->rq_no_delay = 1;
698 request->rq_send_state = LUSTRE_IMP_CONNECTING; 698 request->rq_send_state = LUSTRE_IMP_CONNECTING;
@@ -859,6 +859,17 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
859 client_adjust_max_dirty(cli); 859 client_adjust_max_dirty(cli);
860 860
861 /* 861 /*
862 * Update client max modify RPCs in flight with value returned
863 * by the server
864 */
865 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
866 cli->cl_max_mod_rpcs_in_flight = min(
867 cli->cl_max_mod_rpcs_in_flight,
868 ocd->ocd_maxmodrpcs);
869 else
870 cli->cl_max_mod_rpcs_in_flight = 1;
871
872 /*
862 * Reset ns_connect_flags only for initial connect. It might be 873 * Reset ns_connect_flags only for initial connect. It might be
863 * changed in while using FS and if we reset it in reconnect 874 * changed in while using FS and if we reset it in reconnect
864 * this leads to losing user settings done before such as 875 * this leads to losing user settings done before such as
@@ -873,8 +884,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
873 ocd->ocd_connect_flags; 884 ocd->ocd_connect_flags;
874 } 885 }
875 886
876 if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && 887 if (ocd->ocd_connect_flags & OBD_CONNECT_AT)
877 (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
878 /* 888 /*
879 * We need a per-message support flag, because 889 * We need a per-message support flag, because
880 * a. we don't know if the incoming connect reply 890 * a. we don't know if the incoming connect reply
@@ -889,16 +899,45 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
889 else 899 else
890 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; 900 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
891 901
892 if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) && 902 imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
893 (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
894 imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
895 else
896 imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
897 903
898 return 0; 904 return 0;
899} 905}
900 906
901/** 907/**
908 * Add all replay requests back to unreplied list before start replay,
909 * so that we can make sure the known replied XID is always increased
910 * only even if when replaying requests.
911 */
912static void ptlrpc_prepare_replay(struct obd_import *imp)
913{
914 struct ptlrpc_request *req;
915
916 if (imp->imp_state != LUSTRE_IMP_REPLAY ||
917 imp->imp_resend_replay)
918 return;
919
920 /*
921 * If the server was restart during repaly, the requests may
922 * have been added to the unreplied list in former replay.
923 */
924 spin_lock(&imp->imp_lock);
925
926 list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) {
927 if (list_empty(&req->rq_unreplied_list))
928 ptlrpc_add_unreplied(req);
929 }
930
931 list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) {
932 if (list_empty(&req->rq_unreplied_list))
933 ptlrpc_add_unreplied(req);
934 }
935
936 imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
937 spin_unlock(&imp->imp_lock);
938}
939
940/**
902 * interpret_reply callback for connect RPCs. 941 * interpret_reply callback for connect RPCs.
903 * Looks into returned status of connect operation and decides 942 * Looks into returned status of connect operation and decides
904 * what to do with the import - i.e enter recovery, promote it to 943 * what to do with the import - i.e enter recovery, promote it to
@@ -933,6 +972,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
933 ptlrpc_maybe_ping_import_soon(imp); 972 ptlrpc_maybe_ping_import_soon(imp);
934 goto out; 973 goto out;
935 } 974 }
975
976 /*
977 * LU-7558: indicate that we are interpretting connect reply,
978 * pltrpc_connect_import() will not try to reconnect until
979 * interpret will finish.
980 */
981 imp->imp_connected = 1;
936 spin_unlock(&imp->imp_lock); 982 spin_unlock(&imp->imp_lock);
937 983
938 LASSERT(imp->imp_conn_current); 984 LASSERT(imp->imp_conn_current);
@@ -967,6 +1013,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
967 1013
968 spin_unlock(&imp->imp_lock); 1014 spin_unlock(&imp->imp_lock);
969 1015
1016 if (!exp) {
1017 /* This could happen if export is cleaned during the
1018 * connect attempt
1019 */
1020 CERROR("%s: missing export after connect\n",
1021 imp->imp_obd->obd_name);
1022 rc = -ENODEV;
1023 goto out;
1024 }
1025
970 /* check that server granted subset of flags we asked for. */ 1026 /* check that server granted subset of flags we asked for. */
971 if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) != 1027 if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
972 ocd->ocd_connect_flags) { 1028 ocd->ocd_connect_flags) {
@@ -977,15 +1033,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
977 goto out; 1033 goto out;
978 } 1034 }
979 1035
980 if (!exp) {
981 /* This could happen if export is cleaned during the
982 * connect attempt
983 */
984 CERROR("%s: missing export after connect\n",
985 imp->imp_obd->obd_name);
986 rc = -ENODEV;
987 goto out;
988 }
989 old_connect_flags = exp_connect_flags(exp); 1036 old_connect_flags = exp_connect_flags(exp);
990 exp->exp_connect_data = *ocd; 1037 exp->exp_connect_data = *ocd;
991 imp->imp_obd->obd_self_export->exp_connect_data = *ocd; 1038 imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
@@ -1124,6 +1171,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
1124 imp->imp_remote_handle = 1171 imp->imp_remote_handle =
1125 *lustre_msg_get_handle(request->rq_repmsg); 1172 *lustre_msg_get_handle(request->rq_repmsg);
1126 imp->imp_last_replay_transno = 0; 1173 imp->imp_last_replay_transno = 0;
1174 imp->imp_replay_cursor = &imp->imp_committed_list;
1127 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); 1175 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
1128 } else { 1176 } else {
1129 DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)", 1177 DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
@@ -1147,18 +1195,25 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
1147 } 1195 }
1148 1196
1149finish: 1197finish:
1198 ptlrpc_prepare_replay(imp);
1150 rc = ptlrpc_import_recovery_state_machine(imp); 1199 rc = ptlrpc_import_recovery_state_machine(imp);
1151 if (rc == -ENOTCONN) { 1200 if (rc == -ENOTCONN) {
1152 CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n", 1201 CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
1153 obd2cli_tgt(imp->imp_obd), 1202 obd2cli_tgt(imp->imp_obd),
1154 imp->imp_connection->c_remote_uuid.uuid); 1203 imp->imp_connection->c_remote_uuid.uuid);
1155 ptlrpc_connect_import(imp); 1204 ptlrpc_connect_import(imp);
1205 spin_lock(&imp->imp_lock);
1206 imp->imp_connected = 0;
1156 imp->imp_connect_tried = 1; 1207 imp->imp_connect_tried = 1;
1208 spin_unlock(&imp->imp_lock);
1157 return 0; 1209 return 0;
1158 } 1210 }
1159 1211
1160out: 1212out:
1213 spin_lock(&imp->imp_lock);
1214 imp->imp_connected = 0;
1161 imp->imp_connect_tried = 1; 1215 imp->imp_connect_tried = 1;
1216 spin_unlock(&imp->imp_lock);
1162 1217
1163 if (rc != 0) { 1218 if (rc != 0) {
1164 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); 1219 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 839ef3e80c1a..99d7c667df28 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -48,14 +48,14 @@
48 48
49#include <linux/module.h> 49#include <linux/module.h>
50 50
51/* LUSTRE_VERSION_CODE */
52#include "../include/lustre_ver.h"
53
54#include "../include/obd_support.h"
55/* lustre_swab_mdt_body */
56#include "../include/lustre/lustre_idl.h" 51#include "../include/lustre/lustre_idl.h"
57/* obd2cli_tgt() (required by DEBUG_REQ()) */ 52
53#include "../include/llog_swab.h"
54#include "../include/lustre_debug.h"
55#include "../include/lustre_swab.h"
56#include "../include/lustre_ver.h"
58#include "../include/obd.h" 57#include "../include/obd.h"
58#include "../include/obd_support.h"
59 59
60/* __REQ_LAYOUT_USER__ */ 60/* __REQ_LAYOUT_USER__ */
61#endif 61#endif
@@ -121,7 +121,7 @@ static const struct req_msg_field *mdt_close_client[] = {
121 &RMF_CAPA1 121 &RMF_CAPA1
122}; 122};
123 123
124static const struct req_msg_field *mdt_release_close_client[] = { 124static const struct req_msg_field *mdt_intent_close_client[] = {
125 &RMF_PTLRPC_BODY, 125 &RMF_PTLRPC_BODY,
126 &RMF_MDT_EPOCH, 126 &RMF_MDT_EPOCH,
127 &RMF_REC_REINT, 127 &RMF_REC_REINT,
@@ -257,6 +257,18 @@ static const struct req_msg_field *mds_reint_rename_client[] = {
257 &RMF_DLM_REQ 257 &RMF_DLM_REQ
258}; 258};
259 259
260static const struct req_msg_field *mds_reint_migrate_client[] = {
261 &RMF_PTLRPC_BODY,
262 &RMF_REC_REINT,
263 &RMF_CAPA1,
264 &RMF_CAPA2,
265 &RMF_NAME,
266 &RMF_SYMTGT,
267 &RMF_DLM_REQ,
268 &RMF_MDT_EPOCH,
269 &RMF_CLOSE_DATA
270};
271
260static const struct req_msg_field *mds_last_unlink_server[] = { 272static const struct req_msg_field *mds_last_unlink_server[] = {
261 &RMF_PTLRPC_BODY, 273 &RMF_PTLRPC_BODY,
262 &RMF_MDT_BODY, 274 &RMF_MDT_BODY,
@@ -666,10 +678,9 @@ static struct req_format *req_formats[] = {
666 &RQF_MDS_GETXATTR, 678 &RQF_MDS_GETXATTR,
667 &RQF_MDS_SYNC, 679 &RQF_MDS_SYNC,
668 &RQF_MDS_CLOSE, 680 &RQF_MDS_CLOSE,
669 &RQF_MDS_RELEASE_CLOSE, 681 &RQF_MDS_INTENT_CLOSE,
670 &RQF_MDS_READPAGE, 682 &RQF_MDS_READPAGE,
671 &RQF_MDS_WRITEPAGE, 683 &RQF_MDS_WRITEPAGE,
672 &RQF_MDS_DONE_WRITING,
673 &RQF_MDS_REINT, 684 &RQF_MDS_REINT,
674 &RQF_MDS_REINT_CREATE, 685 &RQF_MDS_REINT_CREATE,
675 &RQF_MDS_REINT_CREATE_ACL, 686 &RQF_MDS_REINT_CREATE_ACL,
@@ -679,9 +690,9 @@ static struct req_format *req_formats[] = {
679 &RQF_MDS_REINT_UNLINK, 690 &RQF_MDS_REINT_UNLINK,
680 &RQF_MDS_REINT_LINK, 691 &RQF_MDS_REINT_LINK,
681 &RQF_MDS_REINT_RENAME, 692 &RQF_MDS_REINT_RENAME,
693 &RQF_MDS_REINT_MIGRATE,
682 &RQF_MDS_REINT_SETATTR, 694 &RQF_MDS_REINT_SETATTR,
683 &RQF_MDS_REINT_SETXATTR, 695 &RQF_MDS_REINT_SETXATTR,
684 &RQF_MDS_QUOTACHECK,
685 &RQF_MDS_QUOTACTL, 696 &RQF_MDS_QUOTACTL,
686 &RQF_MDS_HSM_PROGRESS, 697 &RQF_MDS_HSM_PROGRESS,
687 &RQF_MDS_HSM_CT_REGISTER, 698 &RQF_MDS_HSM_CT_REGISTER,
@@ -691,10 +702,8 @@ static struct req_format *req_formats[] = {
691 &RQF_MDS_HSM_ACTION, 702 &RQF_MDS_HSM_ACTION,
692 &RQF_MDS_HSM_REQUEST, 703 &RQF_MDS_HSM_REQUEST,
693 &RQF_MDS_SWAP_LAYOUTS, 704 &RQF_MDS_SWAP_LAYOUTS,
694 &RQF_QC_CALLBACK,
695 &RQF_OST_CONNECT, 705 &RQF_OST_CONNECT,
696 &RQF_OST_DISCONNECT, 706 &RQF_OST_DISCONNECT,
697 &RQF_OST_QUOTACHECK,
698 &RQF_OST_QUOTACTL, 707 &RQF_OST_QUOTACTL,
699 &RQF_OST_GETATTR, 708 &RQF_OST_GETATTR,
700 &RQF_OST_SETATTR, 709 &RQF_OST_SETATTR,
@@ -1180,14 +1189,6 @@ struct req_format RQF_LOG_CANCEL =
1180 DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty); 1189 DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
1181EXPORT_SYMBOL(RQF_LOG_CANCEL); 1190EXPORT_SYMBOL(RQF_LOG_CANCEL);
1182 1191
1183struct req_format RQF_MDS_QUOTACHECK =
1184 DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
1185EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
1186
1187struct req_format RQF_OST_QUOTACHECK =
1188 DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
1189EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
1190
1191struct req_format RQF_MDS_QUOTACTL = 1192struct req_format RQF_MDS_QUOTACTL =
1192 DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only); 1193 DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
1193EXPORT_SYMBOL(RQF_MDS_QUOTACTL); 1194EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
@@ -1196,10 +1197,6 @@ struct req_format RQF_OST_QUOTACTL =
1196 DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only); 1197 DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
1197EXPORT_SYMBOL(RQF_OST_QUOTACTL); 1198EXPORT_SYMBOL(RQF_OST_QUOTACTL);
1198 1199
1199struct req_format RQF_QC_CALLBACK =
1200 DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
1201EXPORT_SYMBOL(RQF_QC_CALLBACK);
1202
1203struct req_format RQF_MDS_GETSTATUS = 1200struct req_format RQF_MDS_GETSTATUS =
1204 DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); 1201 DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
1205EXPORT_SYMBOL(RQF_MDS_GETSTATUS); 1202EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1270,6 +1267,11 @@ struct req_format RQF_MDS_REINT_RENAME =
1270 mds_last_unlink_server); 1267 mds_last_unlink_server);
1271EXPORT_SYMBOL(RQF_MDS_REINT_RENAME); 1268EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
1272 1269
1270struct req_format RQF_MDS_REINT_MIGRATE =
1271 DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client,
1272 mds_last_unlink_server);
1273EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE);
1274
1273struct req_format RQF_MDS_REINT_SETATTR = 1275struct req_format RQF_MDS_REINT_SETATTR =
1274 DEFINE_REQ_FMT0("MDS_REINT_SETATTR", 1276 DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
1275 mds_reint_setattr_client, mds_setattr_server); 1277 mds_reint_setattr_client, mds_setattr_server);
@@ -1381,15 +1383,10 @@ struct req_format RQF_MDS_CLOSE =
1381 mdt_close_client, mds_last_unlink_server); 1383 mdt_close_client, mds_last_unlink_server);
1382EXPORT_SYMBOL(RQF_MDS_CLOSE); 1384EXPORT_SYMBOL(RQF_MDS_CLOSE);
1383 1385
1384struct req_format RQF_MDS_RELEASE_CLOSE = 1386struct req_format RQF_MDS_INTENT_CLOSE =
1385 DEFINE_REQ_FMT0("MDS_CLOSE", 1387 DEFINE_REQ_FMT0("MDS_CLOSE",
1386 mdt_release_close_client, mds_last_unlink_server); 1388 mdt_intent_close_client, mds_last_unlink_server);
1387EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE); 1389EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE);
1388
1389struct req_format RQF_MDS_DONE_WRITING =
1390 DEFINE_REQ_FMT0("MDS_DONE_WRITING",
1391 mdt_close_client, mdt_body_only);
1392EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
1393 1390
1394struct req_format RQF_MDS_READPAGE = 1391struct req_format RQF_MDS_READPAGE =
1395 DEFINE_REQ_FMT0("MDS_READPAGE", 1392 DEFINE_REQ_FMT0("MDS_READPAGE",
@@ -1874,13 +1871,14 @@ static void *__req_capsule_get(struct req_capsule *pill,
1874 getter = (field->rmf_flags & RMF_F_STRING) ? 1871 getter = (field->rmf_flags & RMF_F_STRING) ?
1875 (typeof(getter))lustre_msg_string : lustre_msg_buf; 1872 (typeof(getter))lustre_msg_string : lustre_msg_buf;
1876 1873
1877 if (field->rmf_flags & RMF_F_STRUCT_ARRAY) { 1874 if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) {
1878 /* 1875 /*
1879 * We've already asserted that field->rmf_size > 0 in 1876 * We've already asserted that field->rmf_size > 0 in
1880 * req_layout_init(). 1877 * req_layout_init().
1881 */ 1878 */
1882 len = lustre_msg_buflen(msg, offset); 1879 len = lustre_msg_buflen(msg, offset);
1883 if ((len % field->rmf_size) != 0) { 1880 if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
1881 (len % field->rmf_size)) {
1884 CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n", 1882 CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
1885 field->rmf_name, len, field->rmf_size, loc); 1883 field->rmf_name, len, field->rmf_size, loc);
1886 return NULL; 1884 return NULL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 0f55c01feba8..110d9f505787 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -287,8 +287,13 @@ static int llog_client_read_header(const struct lu_env *env,
287 goto out; 287 goto out;
288 } 288 }
289 289
290 memcpy(handle->lgh_hdr, hdr, sizeof(*hdr)); 290 if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) {
291 handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index; 291 rc = -EFAULT;
292 goto out;
293 }
294
295 memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len);
296 handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
292 297
293 /* sanity checks */ 298 /* sanity checks */
294 llh_hdr = &handle->lgh_hdr->llh_hdr; 299 llh_hdr = &handle->lgh_hdr->llh_hdr;
@@ -296,9 +301,14 @@ static int llog_client_read_header(const struct lu_env *env,
296 CERROR("bad log header magic: %#x (expecting %#x)\n", 301 CERROR("bad log header magic: %#x (expecting %#x)\n",
297 llh_hdr->lrh_type, LLOG_HDR_MAGIC); 302 llh_hdr->lrh_type, LLOG_HDR_MAGIC);
298 rc = -EIO; 303 rc = -EIO;
299 } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) { 304 } else if (llh_hdr->lrh_len !=
300 CERROR("incorrectly sized log header: %#x (expecting %#x)\n", 305 LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len ||
301 llh_hdr->lrh_len, LLOG_CHUNK_SIZE); 306 (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) ||
307 llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
308 llh_hdr->lrh_len > handle->lgh_hdr_size) {
309 CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n",
310 llh_hdr->lrh_len,
311 LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len);
302 CERROR("you may need to re-run lconf --write_conf.\n"); 312 CERROR("you may need to re-run lconf --write_conf.\n");
303 rc = -EIO; 313 rc = -EIO;
304 } 314 }
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 9bad57d65db4..f87478180013 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -479,8 +479,8 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
479 struct ptlrpc_nrs_policy *policy; 479 struct ptlrpc_nrs_policy *policy;
480 struct ptlrpc_nrs_pol_info *infos; 480 struct ptlrpc_nrs_pol_info *infos;
481 struct ptlrpc_nrs_pol_info tmp; 481 struct ptlrpc_nrs_pol_info tmp;
482 unsigned num_pols; 482 unsigned int num_pols;
483 unsigned pol_idx = 0; 483 unsigned int pol_idx = 0;
484 bool hp = false; 484 bool hp = false;
485 int i; 485 int i;
486 int rc = 0; 486 int rc = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 9c937398a085..da1209e40f03 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -114,7 +114,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
114 int rc2; 114 int rc2;
115 int posted_md; 115 int posted_md;
116 int total_md; 116 int total_md;
117 __u64 xid; 117 u64 mbits;
118 lnet_handle_me_t me_h; 118 lnet_handle_me_t me_h;
119 lnet_md_t md; 119 lnet_md_t md;
120 120
@@ -127,8 +127,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
127 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); 127 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
128 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); 128 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
129 LASSERT(desc->bd_req); 129 LASSERT(desc->bd_req);
130 LASSERT(desc->bd_type == BULK_PUT_SINK || 130 LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
131 desc->bd_type == BULK_GET_SOURCE);
132 131
133 /* cleanup the state of the bulk for it will be reused */ 132 /* cleanup the state of the bulk for it will be reused */
134 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) 133 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
@@ -143,40 +142,37 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
143 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback); 142 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
144 LASSERT(desc->bd_cbid.cbid_arg == desc); 143 LASSERT(desc->bd_cbid.cbid_arg == desc);
145 144
146 /* An XID is only used for a single request from the client. 145 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
147 * For retried bulk transfers, a new XID will be allocated in 146 /* rq_mbits is matchbits of the final bulk */
148 * in ptlrpc_check_set() if it needs to be resent, so it is not 147 mbits = req->rq_mbits - total_md + 1;
149 * using the same RDMA match bits after an error. 148
150 * 149 LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
151 * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The 150 "first mbits = x%llu, last mbits = x%llu\n",
152 * first bulk XID is power-of-two aligned before rq_xid. LU-1431 151 mbits, req->rq_mbits);
153 */
154 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
155 LASSERTF(!(desc->bd_registered && 152 LASSERTF(!(desc->bd_registered &&
156 req->rq_send_state != LUSTRE_IMP_REPLAY) || 153 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
157 xid != desc->bd_last_xid, 154 mbits != desc->bd_last_mbits,
158 "registered: %d rq_xid: %llu bd_last_xid: %llu\n", 155 "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
159 desc->bd_registered, xid, desc->bd_last_xid); 156 desc->bd_registered, mbits, desc->bd_last_mbits);
160 157
161 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
162 desc->bd_registered = 1; 158 desc->bd_registered = 1;
163 desc->bd_last_xid = xid; 159 desc->bd_last_mbits = mbits;
164 desc->bd_md_count = total_md; 160 desc->bd_md_count = total_md;
165 md.user_ptr = &desc->bd_cbid; 161 md.user_ptr = &desc->bd_cbid;
166 md.eq_handle = ptlrpc_eq_h; 162 md.eq_handle = ptlrpc_eq_h;
167 md.threshold = 1; /* PUT or GET */ 163 md.threshold = 1; /* PUT or GET */
168 164
169 for (posted_md = 0; posted_md < total_md; posted_md++, xid++) { 165 for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
170 md.options = PTLRPC_MD_OPTIONS | 166 md.options = PTLRPC_MD_OPTIONS |
171 ((desc->bd_type == BULK_GET_SOURCE) ? 167 (ptlrpc_is_bulk_op_get(desc->bd_type) ?
172 LNET_MD_OP_GET : LNET_MD_OP_PUT); 168 LNET_MD_OP_GET : LNET_MD_OP_PUT);
173 ptlrpc_fill_bulk_md(&md, desc, posted_md); 169 ptlrpc_fill_bulk_md(&md, desc, posted_md);
174 170
175 rc = LNetMEAttach(desc->bd_portal, peer, xid, 0, 171 rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
176 LNET_UNLINK, LNET_INS_AFTER, &me_h); 172 LNET_UNLINK, LNET_INS_AFTER, &me_h);
177 if (rc != 0) { 173 if (rc != 0) {
178 CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n", 174 CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
179 desc->bd_import->imp_obd->obd_name, xid, 175 desc->bd_import->imp_obd->obd_name, mbits,
180 posted_md, rc); 176 posted_md, rc);
181 break; 177 break;
182 } 178 }
@@ -186,7 +182,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
186 &desc->bd_mds[posted_md]); 182 &desc->bd_mds[posted_md]);
187 if (rc != 0) { 183 if (rc != 0) {
188 CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n", 184 CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
189 desc->bd_import->imp_obd->obd_name, xid, 185 desc->bd_import->imp_obd->obd_name, mbits,
190 posted_md, rc); 186 posted_md, rc);
191 rc2 = LNetMEUnlink(me_h); 187 rc2 = LNetMEUnlink(me_h);
192 LASSERT(rc2 == 0); 188 LASSERT(rc2 == 0);
@@ -205,27 +201,19 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
205 return -ENOMEM; 201 return -ENOMEM;
206 } 202 }
207 203
208 /* Set rq_xid to matchbits of the final bulk so that server can
209 * infer the number of bulks that were prepared
210 */
211 req->rq_xid = --xid;
212 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
213 "bd_last_xid = x%llu, rq_xid = x%llu\n",
214 desc->bd_last_xid, req->rq_xid);
215
216 spin_lock(&desc->bd_lock); 204 spin_lock(&desc->bd_lock);
217 /* Holler if peer manages to touch buffers before he knows the xid */ 205 /* Holler if peer manages to touch buffers before he knows the mbits */
218 if (desc->bd_md_count != total_md) 206 if (desc->bd_md_count != total_md)
219 CWARN("%s: Peer %s touched %d buffers while I registered\n", 207 CWARN("%s: Peer %s touched %d buffers while I registered\n",
220 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), 208 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
221 total_md - desc->bd_md_count); 209 total_md - desc->bd_md_count);
222 spin_unlock(&desc->bd_lock); 210 spin_unlock(&desc->bd_lock);
223 211
224 CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n", 212 CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
225 desc->bd_md_count, 213 desc->bd_md_count,
226 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink", 214 ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
227 desc->bd_iov_count, desc->bd_nob, 215 desc->bd_iov_count, desc->bd_nob,
228 desc->bd_last_xid, req->rq_xid, desc->bd_portal); 216 desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
229 217
230 return 0; 218 return 0;
231} 219}
@@ -521,6 +509,39 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
521 lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt); 509 lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
522 lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags); 510 lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
523 511
512 /*
513 * If it's the first time to resend the request for EINPROGRESS,
514 * we need to allocate a new XID (see after_reply()), it's different
515 * from the resend for reply timeout.
516 */
517 if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) {
518 __u64 min_xid = 0;
519 /*
520 * resend for EINPROGRESS, allocate new xid to avoid reply
521 * reconstruction
522 */
523 spin_lock(&imp->imp_lock);
524 ptlrpc_assign_next_xid_nolock(request);
525 request->rq_mbits = request->rq_xid;
526 min_xid = ptlrpc_known_replied_xid(imp);
527 spin_unlock(&imp->imp_lock);
528
529 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
530 DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS");
531 } else if (request->rq_bulk) {
532 ptlrpc_set_bulk_mbits(request);
533 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
534 }
535
536 if (list_empty(&request->rq_unreplied_list) ||
537 request->rq_xid <= imp->imp_known_replied_xid) {
538 DEBUG_REQ(D_ERROR, request,
539 "xid: %llu, replied: %llu, list_empty:%d\n",
540 request->rq_xid, imp->imp_known_replied_xid,
541 list_empty(&request->rq_unreplied_list));
542 LBUG();
543 }
544
524 /** 545 /**
525 * For enabled AT all request should have AT_SUPPORT in the 546 * For enabled AT all request should have AT_SUPPORT in the
526 * FULL import state when OBD_CONNECT_AT is set 547 * FULL import state when OBD_CONNECT_AT is set
@@ -537,8 +558,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
537 mpflag = cfs_memory_pressure_get_and_set(); 558 mpflag = cfs_memory_pressure_get_and_set();
538 559
539 rc = sptlrpc_cli_wrap_request(request); 560 rc = sptlrpc_cli_wrap_request(request);
540 if (rc) 561 if (rc) {
562 /*
563 * set rq_sent so that this request is treated
564 * as a delayed send in the upper layers
565 */
566 if (rc == -ENOMEM)
567 request->rq_sent = ktime_get_seconds();
541 goto out; 568 goto out;
569 }
542 570
543 /* bulk register should be done after wrap_request() */ 571 /* bulk register should be done after wrap_request() */
544 if (request->rq_bulk) { 572 if (request->rq_bulk) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index d88faf61e740..7b6ffb195834 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -82,16 +82,9 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
82 82
83static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) 83static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
84{ 84{
85 struct ptlrpc_nrs *nrs = policy->pol_nrs; 85 if (policy->pol_desc->pd_ops->op_policy_stop)
86
87 if (policy->pol_desc->pd_ops->op_policy_stop) {
88 spin_unlock(&nrs->nrs_lock);
89
90 policy->pol_desc->pd_ops->op_policy_stop(policy); 86 policy->pol_desc->pd_ops->op_policy_stop(policy);
91 87
92 spin_lock(&nrs->nrs_lock);
93 }
94
95 LASSERT(list_empty(&policy->pol_list_queued)); 88 LASSERT(list_empty(&policy->pol_list_queued));
96 LASSERT(policy->pol_req_queued == 0 && 89 LASSERT(policy->pol_req_queued == 0 &&
97 policy->pol_req_started == 0); 90 policy->pol_req_started == 0);
@@ -619,6 +612,12 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
619 goto out; 612 goto out;
620 } 613 }
621 614
615 if (policy->pol_state != NRS_POL_STATE_STARTED &&
616 policy->pol_state != NRS_POL_STATE_STOPPED) {
617 rc = -EAGAIN;
618 goto out;
619 }
620
622 switch (opc) { 621 switch (opc) {
623 /** 622 /**
624 * Unknown opcode, pass it down to the policy-specific control 623 * Unknown opcode, pass it down to the policy-specific control
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 871768511e8c..13f00b7cbbe5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -42,11 +42,14 @@
42 42
43#include "../../include/linux/libcfs/libcfs.h" 43#include "../../include/linux/libcfs/libcfs.h"
44 44
45#include "../include/obd_support.h" 45#include "../include/lustre/ll_fiemap.h"
46#include "../include/obd_class.h" 46
47#include "../include/llog_swab.h"
47#include "../include/lustre_net.h" 48#include "../include/lustre_net.h"
49#include "../include/lustre_swab.h"
48#include "../include/obd_cksum.h" 50#include "../include/obd_cksum.h"
49#include "../include/lustre/ll_fiemap.h" 51#include "../include/obd_support.h"
52#include "../include/obd_class.h"
50 53
51#include "ptlrpc_internal.h" 54#include "ptlrpc_internal.h"
52 55
@@ -942,6 +945,25 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg)
942} 945}
943EXPORT_SYMBOL(lustre_msg_get_opc); 946EXPORT_SYMBOL(lustre_msg_get_opc);
944 947
948__u16 lustre_msg_get_tag(struct lustre_msg *msg)
949{
950 switch (msg->lm_magic) {
951 case LUSTRE_MSG_MAGIC_V2: {
952 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
953
954 if (!pb) {
955 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
956 return 0;
957 }
958 return pb->pb_tag;
959 }
960 default:
961 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
962 return 0;
963 }
964}
965EXPORT_SYMBOL(lustre_msg_get_tag);
966
945__u64 lustre_msg_get_last_committed(struct lustre_msg *msg) 967__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
946{ 968{
947 switch (msg->lm_magic) { 969 switch (msg->lm_magic) {
@@ -1236,6 +1258,37 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1236 } 1258 }
1237} 1259}
1238 1260
1261void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid)
1262{
1263 switch (msg->lm_magic) {
1264 case LUSTRE_MSG_MAGIC_V2: {
1265 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1266
1267 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1268 pb->pb_last_xid = last_xid;
1269 return;
1270 }
1271 default:
1272 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1273 }
1274}
1275
1276void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1277{
1278 switch (msg->lm_magic) {
1279 case LUSTRE_MSG_MAGIC_V2: {
1280 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1281
1282 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1283 pb->pb_tag = tag;
1284 return;
1285 }
1286 default:
1287 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1288 }
1289}
1290EXPORT_SYMBOL(lustre_msg_set_tag);
1291
1239void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions) 1292void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1240{ 1293{
1241 switch (msg->lm_magic) { 1294 switch (msg->lm_magic) {
@@ -1373,6 +1426,21 @@ void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1373 } 1426 }
1374} 1427}
1375 1428
1429void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1430{
1431 switch (msg->lm_magic) {
1432 case LUSTRE_MSG_MAGIC_V2: {
1433 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1434
1435 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1436 pb->pb_mbits = mbits;
1437 return;
1438 }
1439 default:
1440 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1441 }
1442}
1443
1376void ptlrpc_request_set_replen(struct ptlrpc_request *req) 1444void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1377{ 1445{
1378 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER); 1446 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
@@ -1442,7 +1510,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
1442 __swab32s(&b->pb_opc); 1510 __swab32s(&b->pb_opc);
1443 __swab32s(&b->pb_status); 1511 __swab32s(&b->pb_status);
1444 __swab64s(&b->pb_last_xid); 1512 __swab64s(&b->pb_last_xid);
1445 __swab64s(&b->pb_last_seen); 1513 __swab16s(&b->pb_tag);
1446 __swab64s(&b->pb_last_committed); 1514 __swab64s(&b->pb_last_committed);
1447 __swab64s(&b->pb_transno); 1515 __swab64s(&b->pb_transno);
1448 __swab32s(&b->pb_flags); 1516 __swab32s(&b->pb_flags);
@@ -1456,7 +1524,12 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
1456 __swab64s(&b->pb_pre_versions[1]); 1524 __swab64s(&b->pb_pre_versions[1]);
1457 __swab64s(&b->pb_pre_versions[2]); 1525 __swab64s(&b->pb_pre_versions[2]);
1458 __swab64s(&b->pb_pre_versions[3]); 1526 __swab64s(&b->pb_pre_versions[3]);
1459 CLASSERT(offsetof(typeof(*b), pb_padding) != 0); 1527 __swab64s(&b->pb_mbits);
1528 CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
1529 CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
1530 CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
1531 CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
1532 CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
1460 /* While we need to maintain compatibility between 1533 /* While we need to maintain compatibility between
1461 * clients and servers without ptlrpc_body_v2 (< 2.3) 1534 * clients and servers without ptlrpc_body_v2 (< 2.3)
1462 * do not swab any fields beyond pb_jobid, as we are 1535 * do not swab any fields beyond pb_jobid, as we are
@@ -1492,8 +1565,12 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
1492 __swab32s(&ocd->ocd_max_easize); 1565 __swab32s(&ocd->ocd_max_easize);
1493 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) 1566 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1494 __swab64s(&ocd->ocd_maxbytes); 1567 __swab64s(&ocd->ocd_maxbytes);
1568 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1569 __swab16s(&ocd->ocd_maxmodrpcs);
1570 CLASSERT(offsetof(typeof(*ocd), padding0));
1495 CLASSERT(offsetof(typeof(*ocd), padding1) != 0); 1571 CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
1496 CLASSERT(offsetof(typeof(*ocd), padding2) != 0); 1572 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1573 __swab64s(&ocd->ocd_connect_flags2);
1497 CLASSERT(offsetof(typeof(*ocd), padding3) != 0); 1574 CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
1498 CLASSERT(offsetof(typeof(*ocd), padding4) != 0); 1575 CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
1499 CLASSERT(offsetof(typeof(*ocd), padding5) != 0); 1576 CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
@@ -1666,7 +1743,7 @@ void lustre_swab_mdt_body(struct mdt_body *b)
1666 __swab32s(&b->mbo_eadatasize); 1743 __swab32s(&b->mbo_eadatasize);
1667 __swab32s(&b->mbo_aclsize); 1744 __swab32s(&b->mbo_aclsize);
1668 __swab32s(&b->mbo_max_mdsize); 1745 __swab32s(&b->mbo_max_mdsize);
1669 __swab32s(&b->mbo_max_cookiesize); 1746 CLASSERT(offsetof(typeof(*b), mbo_unused3));
1670 __swab32s(&b->mbo_uid_h); 1747 __swab32s(&b->mbo_uid_h);
1671 __swab32s(&b->mbo_gid_h); 1748 __swab32s(&b->mbo_gid_h);
1672 CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0); 1749 CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
@@ -1675,9 +1752,10 @@ void lustre_swab_mdt_body(struct mdt_body *b)
1675void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b) 1752void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
1676{ 1753{
1677 /* handle is opaque */ 1754 /* handle is opaque */
1678 __swab64s(&b->ioepoch); 1755 /* mio_handle is opaque */
1679 __swab32s(&b->flags); 1756 CLASSERT(offsetof(typeof(*b), mio_unused1));
1680 CLASSERT(offsetof(typeof(*b), padding) != 0); 1757 CLASSERT(offsetof(typeof(*b), mio_unused2));
1758 CLASSERT(offsetof(typeof(*b), mio_padding));
1681} 1759}
1682 1760
1683void lustre_swab_mgs_target_info(struct mgs_target_info *mti) 1761void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
@@ -1772,7 +1850,7 @@ void lustre_swab_fid2path(struct getinfo_fid2path *gf)
1772} 1850}
1773EXPORT_SYMBOL(lustre_swab_fid2path); 1851EXPORT_SYMBOL(lustre_swab_fid2path);
1774 1852
1775static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent) 1853static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
1776{ 1854{
1777 __swab64s(&fm_extent->fe_logical); 1855 __swab64s(&fm_extent->fe_logical);
1778 __swab64s(&fm_extent->fe_physical); 1856 __swab64s(&fm_extent->fe_physical);
@@ -1781,7 +1859,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
1781 __swab32s(&fm_extent->fe_device); 1859 __swab32s(&fm_extent->fe_device);
1782} 1860}
1783 1861
1784void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) 1862void lustre_swab_fiemap(struct fiemap *fiemap)
1785{ 1863{
1786 __u32 i; 1864 __u32 i;
1787 1865
@@ -1938,7 +2016,7 @@ static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
1938 __swab64s(&id->name[i]); 2016 __swab64s(&id->name[i]);
1939} 2017}
1940 2018
1941static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) 2019static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
1942{ 2020{
1943 /* the lock data is a union and the first two fields are always an 2021 /* the lock data is a union and the first two fields are always an
1944 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock 2022 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2062,8 +2140,6 @@ static void dump_obdo(struct obdo *oa)
2062 if (valid & OBD_MD_FLHANDLE) 2140 if (valid & OBD_MD_FLHANDLE)
2063 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n", 2141 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2064 oa->o_handle.cookie); 2142 oa->o_handle.cookie);
2065 if (valid & OBD_MD_FLCOOKIE)
2066 CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
2067} 2143}
2068 2144
2069void dump_ost_body(struct ost_body *ob) 2145void dump_ost_body(struct ost_body *ob)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 5b9fb11c0b6b..94e9fa85d774 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -43,6 +43,8 @@
43void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, 43void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
44 int mdidx) 44 int mdidx)
45{ 45{
46 int offset = mdidx * LNET_MAX_IOV;
47
46 CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); 48 CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
47 49
48 LASSERT(mdidx < desc->bd_md_max_brw); 50 LASSERT(mdidx < desc->bd_md_max_brw);
@@ -50,23 +52,20 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
50 LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | 52 LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
51 LNET_MD_PHYS))); 53 LNET_MD_PHYS)));
52 54
53 md->options |= LNET_MD_KIOV;
54 md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); 55 md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
55 md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); 56 md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
56 if (desc->bd_enc_iov)
57 md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
58 else
59 md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
60}
61
62void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
63 int pageoffset, int len)
64{
65 lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
66
67 kiov->bv_page = page;
68 kiov->bv_offset = pageoffset;
69 kiov->bv_len = len;
70 57
71 desc->bd_iov_count++; 58 if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
59 md->options |= LNET_MD_KIOV;
60 if (GET_ENC_KIOV(desc))
61 md->start = &BD_GET_ENC_KIOV(desc, offset);
62 else
63 md->start = &BD_GET_KIOV(desc, offset);
64 } else {
65 md->options |= LNET_MD_IOVEC;
66 if (GET_ENC_KVEC(desc))
67 md->start = &BD_GET_ENC_KVEC(desc, offset);
68 else
69 md->start = &BD_GET_KVEC(desc, offset);
70 }
72} 71}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index f14d193287da..e0f859ca6223 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -55,8 +55,11 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc);
55/* client.c */ 55/* client.c */
56void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, 56void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
57 unsigned int service_time); 57 unsigned int service_time);
58struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, 58struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
59 unsigned type, unsigned portal); 59 unsigned int max_brw,
60 enum ptlrpc_bulk_op_type type,
61 unsigned int portal,
62 const struct ptlrpc_bulk_frag_ops *ops);
60int ptlrpc_request_cache_init(void); 63int ptlrpc_request_cache_init(void);
61void ptlrpc_request_cache_fini(void); 64void ptlrpc_request_cache_fini(void);
62struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags); 65struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
@@ -67,6 +70,10 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
67int ptlrpc_expired_set(void *data); 70int ptlrpc_expired_set(void *data);
68int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); 71int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
69void ptlrpc_resend_req(struct ptlrpc_request *request); 72void ptlrpc_resend_req(struct ptlrpc_request *request);
73void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
74void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
75__u64 ptlrpc_known_replied_xid(struct obd_import *imp);
76void ptlrpc_add_unreplied(struct ptlrpc_request *req);
70 77
71/* events.c */ 78/* events.c */
72int ptlrpc_init_portals(void); 79int ptlrpc_init_portals(void);
@@ -226,8 +233,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
226/* pers.c */ 233/* pers.c */
227void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, 234void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
228 int mdcnt); 235 int mdcnt);
229void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
230 int pageoffset, int len);
231 236
232/* pack_generic.c */ 237/* pack_generic.c */
233struct ptlrpc_reply_state * 238struct ptlrpc_reply_state *
@@ -322,6 +327,7 @@ static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
322 327
323 INIT_LIST_HEAD(&cr->cr_set_chain); 328 INIT_LIST_HEAD(&cr->cr_set_chain);
324 INIT_LIST_HEAD(&cr->cr_ctx_chain); 329 INIT_LIST_HEAD(&cr->cr_ctx_chain);
330 INIT_LIST_HEAD(&cr->cr_unreplied_list);
325 init_waitqueue_head(&cr->cr_reply_waitq); 331 init_waitqueue_head(&cr->cr_reply_waitq);
326 init_waitqueue_head(&cr->cr_set_waitq); 332 init_waitqueue_head(&cr->cr_set_waitq);
327} 333}
@@ -338,4 +344,24 @@ static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
338 INIT_LIST_HEAD(&sr->sr_hist_list); 344 INIT_LIST_HEAD(&sr->sr_hist_list);
339} 345}
340 346
347static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req)
348{
349 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT ||
350 lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT ||
351 lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT)
352 return true;
353 else
354 return false;
355}
356
357static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req)
358{
359 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT ||
360 lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT ||
361 lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT)
362 return true;
363 else
364 return false;
365}
366
341#endif /* PTLRPC_INTERNAL_H */ 367#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 405faf0dc9fc..c00449036884 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -111,7 +111,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
111 * all of it's requests being replayed, it's safe to 111 * all of it's requests being replayed, it's safe to
112 * use a cursor to accelerate the search 112 * use a cursor to accelerate the search
113 */ 113 */
114 imp->imp_replay_cursor = imp->imp_replay_cursor->next; 114 if (!imp->imp_resend_replay ||
115 imp->imp_replay_cursor == &imp->imp_committed_list)
116 imp->imp_replay_cursor = imp->imp_replay_cursor->next;
115 117
116 while (imp->imp_replay_cursor != 118 while (imp->imp_replay_cursor !=
117 &imp->imp_committed_list) { 119 &imp->imp_committed_list) {
@@ -155,10 +157,24 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
155 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); 157 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
156 158
157 spin_lock(&imp->imp_lock); 159 spin_lock(&imp->imp_lock);
160 /* The resend replay request may have been removed from the
161 * unreplied list.
162 */
163 if (req && imp->imp_resend_replay &&
164 list_empty(&req->rq_unreplied_list)) {
165 ptlrpc_add_unreplied(req);
166 imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
167 }
168
158 imp->imp_resend_replay = 0; 169 imp->imp_resend_replay = 0;
159 spin_unlock(&imp->imp_lock); 170 spin_unlock(&imp->imp_lock);
160 171
161 if (req) { 172 if (req) {
173 /* The request should have been added back in unreplied list
174 * by ptlrpc_prepare_replay().
175 */
176 LASSERT(!list_empty(&req->rq_unreplied_list));
177
162 rc = ptlrpc_replay_req(req); 178 rc = ptlrpc_replay_req(req);
163 if (rc) { 179 if (rc) {
164 CERROR("recovery replay error %d for req %llu\n", 180 CERROR("recovery replay error %d for req %llu\n",
@@ -194,7 +210,13 @@ int ptlrpc_resend(struct obd_import *imp)
194 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, 210 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
195 "req %p bad\n", req); 211 "req %p bad\n", req);
196 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); 212 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
197 if (!ptlrpc_no_resend(req)) 213
214 /*
215 * If the request is allowed to be sent during replay and it
216 * is not timeout yet, then it does not need to be resent.
217 */
218 if (!ptlrpc_no_resend(req) &&
219 (req->rq_timedout || !req->rq_allow_replay))
198 ptlrpc_resend_req(req); 220 ptlrpc_resend_req(req);
199 } 221 }
200 spin_unlock(&imp->imp_lock); 222 spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index a7416cd9ac71..e860df7c45a2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -379,7 +379,7 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
379 379
380 if (!req->rq_cli_ctx) { 380 if (!req->rq_cli_ctx) {
381 CERROR("req %p: fail to get context\n", req); 381 CERROR("req %p: fail to get context\n", req);
382 return -ENOMEM; 382 return -ECONNREFUSED;
383 } 383 }
384 384
385 return 0; 385 return 0;
@@ -515,6 +515,13 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
515 515
516 set_current_state(TASK_INTERRUPTIBLE); 516 set_current_state(TASK_INTERRUPTIBLE);
517 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); 517 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
518 } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
519 /*
520 * new ctx not up to date yet
521 */
522 CDEBUG(D_SEC,
523 "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
524 newctx, newctx->cc_flags);
518 } else { 525 } else {
519 /* 526 /*
520 * it's possible newctx == oldctx if we're switching 527 * it's possible newctx == oldctx if we're switching
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index b2cc5ea6cb93..2fe9085e2034 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -108,6 +108,7 @@ static struct ptlrpc_enc_page_pool {
108 unsigned long epp_st_lowfree; /* lowest free pages reached */ 108 unsigned long epp_st_lowfree; /* lowest free pages reached */
109 unsigned int epp_st_max_wqlen; /* highest waitqueue length */ 109 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
110 unsigned long epp_st_max_wait; /* in jiffies */ 110 unsigned long epp_st_max_wait; /* in jiffies */
111 unsigned long epp_st_outofmem; /* # of out of mem requests */
111 /* 112 /*
112 * pointers to pools 113 * pointers to pools
113 */ 114 */
@@ -139,7 +140,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
139 "cache missing: %lu\n" 140 "cache missing: %lu\n"
140 "low free mark: %lu\n" 141 "low free mark: %lu\n"
141 "max waitqueue depth: %u\n" 142 "max waitqueue depth: %u\n"
142 "max wait time: %ld/%lu\n", 143 "max wait time: %ld/%lu\n"
144 "out of mem: %lu\n",
143 totalram_pages, 145 totalram_pages,
144 PAGES_PER_POOL, 146 PAGES_PER_POOL,
145 page_pools.epp_max_pages, 147 page_pools.epp_max_pages,
@@ -158,7 +160,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
158 page_pools.epp_st_lowfree, 160 page_pools.epp_st_lowfree,
159 page_pools.epp_st_max_wqlen, 161 page_pools.epp_st_max_wqlen,
160 page_pools.epp_st_max_wait, 162 page_pools.epp_st_max_wait,
161 msecs_to_jiffies(MSEC_PER_SEC)); 163 msecs_to_jiffies(MSEC_PER_SEC),
164 page_pools.epp_st_outofmem);
162 165
163 spin_unlock(&page_pools.epp_lock); 166 spin_unlock(&page_pools.epp_lock);
164 167
@@ -306,12 +309,30 @@ static inline void enc_pools_wakeup(void)
306 } 309 }
307} 310}
308 311
312/*
313 * Export the number of free pages in the pool
314 */
315int get_free_pages_in_pool(void)
316{
317 return page_pools.epp_free_pages;
318}
319
320/*
321 * Let outside world know if enc_pool full capacity is reached
322 */
323int pool_is_at_full_capacity(void)
324{
325 return (page_pools.epp_total_pages == page_pools.epp_max_pages);
326}
327
309void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) 328void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
310{ 329{
311 int p_idx, g_idx; 330 int p_idx, g_idx;
312 int i; 331 int i;
313 332
314 if (!desc->bd_enc_iov) 333 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
334
335 if (!GET_ENC_KIOV(desc))
315 return; 336 return;
316 337
317 LASSERT(desc->bd_iov_count > 0); 338 LASSERT(desc->bd_iov_count > 0);
@@ -326,12 +347,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
326 LASSERT(page_pools.epp_pools[p_idx]); 347 LASSERT(page_pools.epp_pools[p_idx]);
327 348
328 for (i = 0; i < desc->bd_iov_count; i++) { 349 for (i = 0; i < desc->bd_iov_count; i++) {
329 LASSERT(desc->bd_enc_iov[i].bv_page); 350 LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
330 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); 351 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
331 LASSERT(!page_pools.epp_pools[p_idx][g_idx]); 352 LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
332 353
333 page_pools.epp_pools[p_idx][g_idx] = 354 page_pools.epp_pools[p_idx][g_idx] =
334 desc->bd_enc_iov[i].bv_page; 355 BD_GET_ENC_KIOV(desc, i).bv_page;
335 356
336 if (++g_idx == PAGES_PER_POOL) { 357 if (++g_idx == PAGES_PER_POOL) {
337 p_idx++; 358 p_idx++;
@@ -345,8 +366,8 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
345 366
346 spin_unlock(&page_pools.epp_lock); 367 spin_unlock(&page_pools.epp_lock);
347 368
348 kfree(desc->bd_enc_iov); 369 kfree(GET_ENC_KIOV(desc));
349 desc->bd_enc_iov = NULL; 370 GET_ENC_KIOV(desc) = NULL;
350} 371}
351 372
352static inline void enc_pools_alloc(void) 373static inline void enc_pools_alloc(void)
@@ -404,6 +425,7 @@ int sptlrpc_enc_pool_init(void)
404 page_pools.epp_st_lowfree = 0; 425 page_pools.epp_st_lowfree = 0;
405 page_pools.epp_st_max_wqlen = 0; 426 page_pools.epp_st_max_wqlen = 0;
406 page_pools.epp_st_max_wait = 0; 427 page_pools.epp_st_max_wait = 0;
428 page_pools.epp_st_outofmem = 0;
407 429
408 enc_pools_alloc(); 430 enc_pools_alloc();
409 if (!page_pools.epp_pools) 431 if (!page_pools.epp_pools)
@@ -431,13 +453,14 @@ void sptlrpc_enc_pool_fini(void)
431 453
432 if (page_pools.epp_st_access > 0) { 454 if (page_pools.epp_st_access > 0) {
433 CDEBUG(D_SEC, 455 CDEBUG(D_SEC,
434 "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n", 456 "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n",
435 page_pools.epp_st_max_pages, page_pools.epp_st_grows, 457 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
436 page_pools.epp_st_grow_fails, 458 page_pools.epp_st_grow_fails,
437 page_pools.epp_st_shrinks, page_pools.epp_st_access, 459 page_pools.epp_st_shrinks, page_pools.epp_st_access,
438 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, 460 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
439 page_pools.epp_st_max_wait, 461 page_pools.epp_st_max_wait,
440 msecs_to_jiffies(MSEC_PER_SEC)); 462 msecs_to_jiffies(MSEC_PER_SEC),
463 page_pools.epp_st_outofmem);
441 } 464 }
442} 465}
443 466
@@ -520,10 +543,11 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
520 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); 543 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
521 544
522 for (i = 0; i < desc->bd_iov_count; i++) { 545 for (i = 0; i < desc->bd_iov_count; i++) {
523 cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page, 546 cfs_crypto_hash_update_page(hdesc,
524 desc->bd_iov[i].bv_offset & 547 BD_GET_KIOV(desc, i).bv_page,
548 BD_GET_KIOV(desc, i).bv_offset &
525 ~PAGE_MASK, 549 ~PAGE_MASK,
526 desc->bd_iov[i].bv_len); 550 BD_GET_KIOV(desc, i).bv_len);
527 } 551 }
528 552
529 if (hashsize > buflen) { 553 if (hashsize > buflen) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index cd305bcb334a..c5e7a2309fce 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -153,14 +153,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
153 char *ptr; 153 char *ptr;
154 unsigned int off, i; 154 unsigned int off, i;
155 155
156 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
157
156 for (i = 0; i < desc->bd_iov_count; i++) { 158 for (i = 0; i < desc->bd_iov_count; i++) {
157 if (desc->bd_iov[i].bv_len == 0) 159 if (!BD_GET_KIOV(desc, i).bv_len)
158 continue; 160 continue;
159 161
160 ptr = kmap(desc->bd_iov[i].bv_page); 162 ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
161 off = desc->bd_iov[i].bv_offset & ~PAGE_MASK; 163 off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
162 ptr[off] ^= 0x1; 164 ptr[off] ^= 0x1;
163 kunmap(desc->bd_iov[i].bv_page); 165 kunmap(BD_GET_KIOV(desc, i).bv_page);
164 return; 166 return;
165 } 167 }
166} 168}
@@ -352,11 +354,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
352 354
353 /* fix the actual data size */ 355 /* fix the actual data size */
354 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { 356 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
355 if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) { 357 struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
356 desc->bd_iov[i].bv_len = 358
357 desc->bd_nob_transferred - nob; 359 if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
358 } 360 bv_desc.bv_len = desc->bd_nob_transferred - nob;
359 nob += desc->bd_iov[i].bv_len; 361 nob += bv_desc.bv_len;
360 } 362 }
361 363
362 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, 364 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f39308eebb..70c70558e177 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -343,9 +343,9 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
343 struct ptlrpc_service_conf *conf) 343 struct ptlrpc_service_conf *conf)
344{ 344{
345 struct ptlrpc_service_thr_conf *tc = &conf->psc_thr; 345 struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
346 unsigned init; 346 unsigned int init;
347 unsigned total; 347 unsigned int total;
348 unsigned nthrs; 348 unsigned int nthrs;
349 int weight; 349 int weight;
350 350
351 /* 351 /*
@@ -2541,8 +2541,9 @@ int ptlrpc_hr_init(void)
2541 2541
2542 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i); 2542 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
2543 hrp->hrp_nthrs /= weight; 2543 hrp->hrp_nthrs /= weight;
2544 if (hrp->hrp_nthrs == 0)
2545 hrp->hrp_nthrs = 1;
2544 2546
2545 LASSERT(hrp->hrp_nthrs > 0);
2546 hrp->hrp_thrs = 2547 hrp->hrp_thrs =
2547 kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, 2548 kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
2548 cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, 2549 cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index b05b1f935e4c..a04e36cf6dd4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -195,49 +195,29 @@ void lustre_assert_wire_constants(void)
195 LASSERTF(REINT_MAX == 10, "found %lld\n", 195 LASSERTF(REINT_MAX == 10, "found %lld\n",
196 (long long)REINT_MAX); 196 (long long)REINT_MAX);
197 LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n", 197 LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
198 (unsigned)DISP_IT_EXECD); 198 (unsigned int)DISP_IT_EXECD);
199 LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n", 199 LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
200 (unsigned)DISP_LOOKUP_EXECD); 200 (unsigned int)DISP_LOOKUP_EXECD);
201 LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n", 201 LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
202 (unsigned)DISP_LOOKUP_NEG); 202 (unsigned int)DISP_LOOKUP_NEG);
203 LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n", 203 LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
204 (unsigned)DISP_LOOKUP_POS); 204 (unsigned int)DISP_LOOKUP_POS);
205 LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n", 205 LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
206 (unsigned)DISP_OPEN_CREATE); 206 (unsigned int)DISP_OPEN_CREATE);
207 LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n", 207 LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
208 (unsigned)DISP_OPEN_OPEN); 208 (unsigned int)DISP_OPEN_OPEN);
209 LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n", 209 LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
210 (unsigned)DISP_ENQ_COMPLETE); 210 (unsigned int)DISP_ENQ_COMPLETE);
211 LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n", 211 LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
212 (unsigned)DISP_ENQ_OPEN_REF); 212 (unsigned int)DISP_ENQ_OPEN_REF);
213 LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n", 213 LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
214 (unsigned)DISP_ENQ_CREATE_REF); 214 (unsigned int)DISP_ENQ_CREATE_REF);
215 LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n", 215 LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
216 (unsigned)DISP_OPEN_LOCK); 216 (unsigned int)DISP_OPEN_LOCK);
217 LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n", 217 LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
218 (long long)MDS_STATUS_CONN); 218 (long long)MDS_STATUS_CONN);
219 LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n", 219 LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
220 (long long)MDS_STATUS_LOV); 220 (long long)MDS_STATUS_LOV);
221 LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
222 (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
223 LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
224 (unsigned)MF_SOM_CHANGE);
225 LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
226 (unsigned)MF_EPOCH_OPEN);
227 LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
228 (unsigned)MF_EPOCH_CLOSE);
229 LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
230 (unsigned)MF_MDC_CANCEL_FID1);
231 LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
232 (unsigned)MF_MDC_CANCEL_FID2);
233 LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
234 (unsigned)MF_MDC_CANCEL_FID3);
235 LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
236 (unsigned)MF_MDC_CANCEL_FID4);
237 LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
238 (unsigned)MF_SOM_AU);
239 LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
240 (unsigned)MF_GETATTR_LOCK);
241 LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n", 221 LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
242 (long long)MDS_ATTR_MODE); 222 (long long)MDS_ATTR_MODE);
243 LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n", 223 LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
@@ -420,15 +400,13 @@ void lustre_assert_wire_constants(void)
420 LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n", 400 LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
421 (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid)); 401 (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
422 LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n", 402 LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
423 (unsigned)LMAI_RELEASED); 403 (unsigned int)LMAI_RELEASED);
424 LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n", 404 LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
425 (unsigned)LMAC_HSM); 405 (unsigned int)LMAC_HSM);
426 LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
427 (unsigned)LMAC_SOM);
428 LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n", 406 LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
429 (unsigned)LMAC_NOT_IN_OI); 407 (unsigned int)LMAC_NOT_IN_OI);
430 LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", 408 LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
431 (unsigned)LMAC_FID_ON_OST); 409 (unsigned int)LMAC_FID_ON_OST);
432 410
433 /* Checks for struct ost_id */ 411 /* Checks for struct ost_id */
434 LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", 412 LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -478,11 +456,11 @@ void lustre_assert_wire_constants(void)
478 LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n", 456 LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
479 (long long)FID_SEQ_LOV_DEFAULT); 457 (long long)FID_SEQ_LOV_DEFAULT);
480 LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n", 458 LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
481 (unsigned)FID_OID_SPECIAL_BFL); 459 (unsigned int)FID_OID_SPECIAL_BFL);
482 LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n", 460 LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
483 (unsigned)FID_OID_DOT_LUSTRE); 461 (unsigned int)FID_OID_DOT_LUSTRE);
484 LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n", 462 LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
485 (unsigned)FID_OID_DOT_LUSTRE_OBF); 463 (unsigned int)FID_OID_DOT_LUSTRE_OBF);
486 464
487 /* Checks for struct lu_dirent */ 465 /* Checks for struct lu_dirent */
488 LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n", 466 LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
@@ -512,11 +490,11 @@ void lustre_assert_wire_constants(void)
512 LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n", 490 LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
513 (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0])); 491 (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
514 LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n", 492 LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
515 (unsigned)LUDA_FID); 493 (unsigned int)LUDA_FID);
516 LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n", 494 LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
517 (unsigned)LUDA_TYPE); 495 (unsigned int)LUDA_TYPE);
518 LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n", 496 LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
519 (unsigned)LUDA_64BITHASH); 497 (unsigned int)LUDA_64BITHASH);
520 498
521 /* Checks for struct luda_type */ 499 /* Checks for struct luda_type */
522 LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n", 500 LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
@@ -635,10 +613,18 @@ void lustre_assert_wire_constants(void)
635 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid)); 613 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
636 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n", 614 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
637 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid)); 615 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
638 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n", 616 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n",
639 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen)); 617 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag));
640 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n", 618 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n",
641 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen)); 619 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag));
620 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n",
621 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0));
622 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n",
623 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0));
624 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n",
625 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1));
626 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n",
627 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1));
642 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n", 628 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
643 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed)); 629 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
644 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n", 630 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
@@ -680,10 +666,22 @@ void lustre_assert_wire_constants(void)
680 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions)); 666 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
681 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n", 667 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
682 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions)); 668 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
683 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n", 669 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n",
684 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding)); 670 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits));
685 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n", 671 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n",
686 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding)); 672 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits));
673 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n",
674 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0));
675 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n",
676 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0));
677 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n",
678 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1));
679 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n",
680 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1));
681 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n",
682 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2));
683 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n",
684 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2));
687 CLASSERT(LUSTRE_JOBID_SIZE == 32); 685 CLASSERT(LUSTRE_JOBID_SIZE == 32);
688 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n", 686 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
689 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid)); 687 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
@@ -713,10 +711,18 @@ void lustre_assert_wire_constants(void)
713 (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid)); 711 (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
714 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n", 712 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
715 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid)); 713 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
716 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n", 714 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n",
717 (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen)); 715 (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag));
718 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n", 716 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n",
719 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen)); 717 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag));
718 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n",
719 (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0));
720 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n",
721 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0));
722 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n",
723 (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1));
724 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n",
725 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1));
720 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n", 726 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
721 (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed)); 727 (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
722 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n", 728 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
@@ -757,10 +763,22 @@ void lustre_assert_wire_constants(void)
757 (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions)); 763 (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
758 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n", 764 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
759 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions)); 765 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
760 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n", 766 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n",
761 (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding)); 767 (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits));
762 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n", 768 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n",
763 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding)); 769 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits));
770 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n",
771 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0));
772 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n",
773 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0));
774 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n",
775 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1));
776 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n",
777 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1));
778 LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n",
779 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2));
780 LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n",
781 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2));
764 LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n", 782 LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
765 (long long)MSG_PTLRPC_BODY_OFF); 783 (long long)MSG_PTLRPC_BODY_OFF);
766 LASSERTF(REQ_REC_OFF == 1, "found %lld\n", 784 LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
@@ -802,41 +820,41 @@ void lustre_assert_wire_constants(void)
802 LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n", 820 LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
803 (long long)MSGHDR_CKSUM_INCOMPAT18); 821 (long long)MSGHDR_CKSUM_INCOMPAT18);
804 LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n", 822 LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
805 (unsigned)MSG_OP_FLAG_MASK); 823 (unsigned int)MSG_OP_FLAG_MASK);
806 LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n", 824 LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
807 (long long)MSG_OP_FLAG_SHIFT); 825 (long long)MSG_OP_FLAG_SHIFT);
808 LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n", 826 LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
809 (unsigned)MSG_GEN_FLAG_MASK); 827 (unsigned int)MSG_GEN_FLAG_MASK);
810 LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n", 828 LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
811 (unsigned)MSG_LAST_REPLAY); 829 (unsigned int)MSG_LAST_REPLAY);
812 LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n", 830 LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
813 (unsigned)MSG_RESENT); 831 (unsigned int)MSG_RESENT);
814 LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n", 832 LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
815 (unsigned)MSG_REPLAY); 833 (unsigned int)MSG_REPLAY);
816 LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n", 834 LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
817 (unsigned)MSG_DELAY_REPLAY); 835 (unsigned int)MSG_DELAY_REPLAY);
818 LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n", 836 LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
819 (unsigned)MSG_VERSION_REPLAY); 837 (unsigned int)MSG_VERSION_REPLAY);
820 LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n", 838 LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
821 (unsigned)MSG_REQ_REPLAY_DONE); 839 (unsigned int)MSG_REQ_REPLAY_DONE);
822 LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n", 840 LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
823 (unsigned)MSG_LOCK_REPLAY_DONE); 841 (unsigned int)MSG_LOCK_REPLAY_DONE);
824 LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n", 842 LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
825 (unsigned)MSG_CONNECT_RECOVERING); 843 (unsigned int)MSG_CONNECT_RECOVERING);
826 LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n", 844 LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
827 (unsigned)MSG_CONNECT_RECONNECT); 845 (unsigned int)MSG_CONNECT_RECONNECT);
828 LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n", 846 LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
829 (unsigned)MSG_CONNECT_REPLAYABLE); 847 (unsigned int)MSG_CONNECT_REPLAYABLE);
830 LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n", 848 LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
831 (unsigned)MSG_CONNECT_LIBCLIENT); 849 (unsigned int)MSG_CONNECT_LIBCLIENT);
832 LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n", 850 LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
833 (unsigned)MSG_CONNECT_INITIAL); 851 (unsigned int)MSG_CONNECT_INITIAL);
834 LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n", 852 LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
835 (unsigned)MSG_CONNECT_ASYNC); 853 (unsigned int)MSG_CONNECT_ASYNC);
836 LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n", 854 LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
837 (unsigned)MSG_CONNECT_NEXT_VER); 855 (unsigned int)MSG_CONNECT_NEXT_VER);
838 LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n", 856 LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
839 (unsigned)MSG_CONNECT_TRANSNO); 857 (unsigned int)MSG_CONNECT_TRANSNO);
840 858
841 /* Checks for struct obd_connect_data */ 859 /* Checks for struct obd_connect_data */
842 LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n", 860 LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
@@ -905,14 +923,22 @@ void lustre_assert_wire_constants(void)
905 (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes)); 923 (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
906 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n", 924 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
907 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes)); 925 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
908 LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n", 926 LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n",
927 (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs));
928 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n",
929 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs));
930 LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n",
931 (long long)(int)offsetof(struct obd_connect_data, padding0));
932 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n",
933 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0));
934 LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n",
909 (long long)(int)offsetof(struct obd_connect_data, padding1)); 935 (long long)(int)offsetof(struct obd_connect_data, padding1));
910 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n", 936 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n",
911 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1)); 937 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
912 LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n", 938 LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n",
913 (long long)(int)offsetof(struct obd_connect_data, padding2)); 939 (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2));
914 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n", 940 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n",
915 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2)); 941 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2));
916 LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n", 942 LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
917 (long long)(int)offsetof(struct obd_connect_data, padding3)); 943 (long long)(int)offsetof(struct obd_connect_data, padding3));
918 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n", 944 LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
@@ -1075,14 +1101,24 @@ void lustre_assert_wire_constants(void)
1075 OBD_CONNECT_LFSCK); 1101 OBD_CONNECT_LFSCK);
1076 LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n", 1102 LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
1077 OBD_CONNECT_UNLINK_CLOSE); 1103 OBD_CONNECT_UNLINK_CLOSE);
1104 LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n",
1105 OBD_CONNECT_MULTIMODRPCS);
1078 LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n", 1106 LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
1079 OBD_CONNECT_DIR_STRIPE); 1107 OBD_CONNECT_DIR_STRIPE);
1108 LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n",
1109 OBD_CONNECT_SUBTREE);
1110 LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n",
1111 OBD_CONNECT_LOCK_AHEAD);
1112 LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n",
1113 OBD_CONNECT_OBDOPACK);
1114 LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n",
1115 OBD_CONNECT_FLAGS2);
1080 LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n", 1116 LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
1081 (unsigned)OBD_CKSUM_CRC32); 1117 (unsigned int)OBD_CKSUM_CRC32);
1082 LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n", 1118 LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
1083 (unsigned)OBD_CKSUM_ADLER); 1119 (unsigned int)OBD_CKSUM_ADLER);
1084 LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n", 1120 LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
1085 (unsigned)OBD_CKSUM_CRC32C); 1121 (unsigned int)OBD_CKSUM_CRC32C);
1086 1122
1087 /* Checks for struct obdo */ 1123 /* Checks for struct obdo */
1088 LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n", 1124 LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
@@ -1239,8 +1275,6 @@ void lustre_assert_wire_constants(void)
1239 OBD_MD_FLCKSUM); 1275 OBD_MD_FLCKSUM);
1240 LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n", 1276 LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
1241 OBD_MD_FLQOS); 1277 OBD_MD_FLQOS);
1242 LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
1243 OBD_MD_FLCOOKIE);
1244 LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n", 1278 LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
1245 OBD_MD_FLGROUP); 1279 OBD_MD_FLGROUP);
1246 LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n", 1280 LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
@@ -1394,13 +1428,13 @@ void lustre_assert_wire_constants(void)
1394 (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0])); 1428 (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
1395 CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0)); 1429 CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0));
1396 LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n", 1430 LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
1397 (unsigned)LOV_PATTERN_RAID0); 1431 (unsigned int)LOV_PATTERN_RAID0);
1398 LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n", 1432 LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
1399 (unsigned)LOV_PATTERN_RAID1); 1433 (unsigned int)LOV_PATTERN_RAID1);
1400 LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n", 1434 LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
1401 (unsigned)LOV_PATTERN_FIRST); 1435 (unsigned int)LOV_PATTERN_FIRST);
1402 LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n", 1436 LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
1403 (unsigned)LOV_PATTERN_CMOBD); 1437 (unsigned int)LOV_PATTERN_CMOBD);
1404 1438
1405 /* Checks for struct lmv_mds_md_v1 */ 1439 /* Checks for struct lmv_mds_md_v1 */
1406 LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n", 1440 LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
@@ -1542,6 +1576,8 @@ void lustre_assert_wire_constants(void)
1542 (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt)); 1576 (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
1543 LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n", 1577 LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
1544 (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt)); 1578 (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
1579 LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n",
1580 (long long)IOOBJ_MAX_BRW_BITS);
1545 1581
1546 /* Checks for union lquota_id */ 1582 /* Checks for union lquota_id */
1547 LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", 1583 LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
@@ -1817,10 +1853,10 @@ void lustre_assert_wire_constants(void)
1817 (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize)); 1853 (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
1818 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n", 1854 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
1819 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize)); 1855 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
1820 LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n", 1856 LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n",
1821 (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize)); 1857 (long long)(int)offsetof(struct mdt_body, mbo_unused3));
1822 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n", 1858 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n",
1823 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize)); 1859 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3));
1824 LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n", 1860 LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
1825 (long long)(int)offsetof(struct mdt_body, mbo_uid_h)); 1861 (long long)(int)offsetof(struct mdt_body, mbo_uid_h));
1826 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n", 1862 LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
@@ -1857,12 +1893,6 @@ void lustre_assert_wire_constants(void)
1857 MDS_FMODE_CLOSED); 1893 MDS_FMODE_CLOSED);
1858 LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n", 1894 LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
1859 MDS_FMODE_EXEC); 1895 MDS_FMODE_EXEC);
1860 LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
1861 MDS_FMODE_EPOCH);
1862 LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
1863 MDS_FMODE_TRUNC);
1864 LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
1865 MDS_FMODE_SOM);
1866 LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n", 1896 LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
1867 MDS_OPEN_CREATED); 1897 MDS_OPEN_CREATED);
1868 LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n", 1898 LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
@@ -1905,10 +1935,20 @@ void lustre_assert_wire_constants(void)
1905 LUSTRE_IMMUTABLE_FL); 1935 LUSTRE_IMMUTABLE_FL);
1906 LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n", 1936 LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
1907 LUSTRE_APPEND_FL); 1937 LUSTRE_APPEND_FL);
1938 LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n",
1939 LUSTRE_NODUMP_FL);
1908 LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n", 1940 LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
1909 LUSTRE_NOATIME_FL); 1941 LUSTRE_NOATIME_FL);
1942 LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n",
1943 LUSTRE_INDEX_FL);
1910 LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n", 1944 LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
1911 LUSTRE_DIRSYNC_FL); 1945 LUSTRE_DIRSYNC_FL);
1946 LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n",
1947 LUSTRE_TOPDIR_FL);
1948 LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n",
1949 LUSTRE_DIRECTIO_FL);
1950 LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n",
1951 LUSTRE_INLINE_DATA_FL);
1912 LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n", 1952 LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
1913 MDS_INODELOCK_LOOKUP); 1953 MDS_INODELOCK_LOOKUP);
1914 LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n", 1954 LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
@@ -1921,22 +1961,22 @@ void lustre_assert_wire_constants(void)
1921 /* Checks for struct mdt_ioepoch */ 1961 /* Checks for struct mdt_ioepoch */
1922 LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n", 1962 LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
1923 (long long)(int)sizeof(struct mdt_ioepoch)); 1963 (long long)(int)sizeof(struct mdt_ioepoch));
1924 LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n", 1964 LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n",
1925 (long long)(int)offsetof(struct mdt_ioepoch, handle)); 1965 (long long)(int)offsetof(struct mdt_ioepoch, mio_handle));
1926 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n", 1966 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n",
1927 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle)); 1967 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle));
1928 LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n", 1968 LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n",
1929 (long long)(int)offsetof(struct mdt_ioepoch, ioepoch)); 1969 (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1));
1930 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n", 1970 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n",
1931 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch)); 1971 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1));
1932 LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n", 1972 LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n",
1933 (long long)(int)offsetof(struct mdt_ioepoch, flags)); 1973 (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2));
1934 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n", 1974 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n",
1935 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags)); 1975 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2));
1936 LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n", 1976 LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n",
1937 (long long)(int)offsetof(struct mdt_ioepoch, padding)); 1977 (long long)(int)offsetof(struct mdt_ioepoch, mio_padding));
1938 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n", 1978 LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n",
1939 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding)); 1979 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding));
1940 1980
1941 /* Checks for struct mdt_rec_setattr */ 1981 /* Checks for struct mdt_rec_setattr */
1942 LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n", 1982 LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
@@ -3520,21 +3560,21 @@ void lustre_assert_wire_constants(void)
3520 LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n", 3560 LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
3521 (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx)); 3561 (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
3522 3562
3523 /* Checks for struct ll_fiemap_info_key */ 3563 /* Checks for struct fiemap_info_key */
3524 LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n", 3564 LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
3525 (long long)(int)sizeof(struct ll_fiemap_info_key)); 3565 (long long)(int)sizeof(struct ll_fiemap_info_key));
3526 LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n", 3566 LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n",
3527 (long long)(int)offsetof(struct ll_fiemap_info_key, name[8])); 3567 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8]));
3528 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n", 3568 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n",
3529 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8])); 3569 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]));
3530 LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n", 3570 LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n",
3531 (long long)(int)offsetof(struct ll_fiemap_info_key, oa)); 3571 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa));
3532 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n", 3572 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n",
3533 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa)); 3573 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa));
3534 LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n", 3574 LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n",
3535 (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap)); 3575 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap));
3536 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n", 3576 LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n",
3537 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap)); 3577 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap));
3538 3578
3539 /* Checks for struct mgs_target_info */ 3579 /* Checks for struct mgs_target_info */
3540 LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", 3580 LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
@@ -3670,64 +3710,64 @@ void lustre_assert_wire_constants(void)
3670 LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n", 3710 LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
3671 (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0])); 3711 (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
3672 3712
3673 /* Checks for struct ll_user_fiemap */ 3713 /* Checks for struct fiemap */
3674 LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n", 3714 LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n",
3675 (long long)(int)sizeof(struct ll_user_fiemap)); 3715 (long long)(int)sizeof(struct fiemap));
3676 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n", 3716 LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n",
3677 (long long)(int)offsetof(struct ll_user_fiemap, fm_start)); 3717 (long long)(int)offsetof(struct fiemap, fm_start));
3678 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n", 3718 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n",
3679 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start)); 3719 (long long)(int)sizeof(((struct fiemap *)0)->fm_start));
3680 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n", 3720 LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n",
3681 (long long)(int)offsetof(struct ll_user_fiemap, fm_length)); 3721 (long long)(int)offsetof(struct fiemap, fm_length));
3682 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n", 3722 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n",
3683 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length)); 3723 (long long)(int)sizeof(((struct fiemap *)0)->fm_length));
3684 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n", 3724 LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n",
3685 (long long)(int)offsetof(struct ll_user_fiemap, fm_flags)); 3725 (long long)(int)offsetof(struct fiemap, fm_flags));
3686 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n", 3726 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n",
3687 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags)); 3727 (long long)(int)sizeof(((struct fiemap *)0)->fm_flags));
3688 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n", 3728 LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n",
3689 (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents)); 3729 (long long)(int)offsetof(struct fiemap, fm_mapped_extents));
3690 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n", 3730 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
3691 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents)); 3731 (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents));
3692 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n", 3732 LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n",
3693 (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count)); 3733 (long long)(int)offsetof(struct fiemap, fm_extent_count));
3694 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n", 3734 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
3695 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count)); 3735 (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count));
3696 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n", 3736 LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n",
3697 (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved)); 3737 (long long)(int)offsetof(struct fiemap, fm_reserved));
3698 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n", 3738 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n",
3699 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved)); 3739 (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved));
3700 LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n", 3740 LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n",
3701 (long long)(int)offsetof(struct ll_user_fiemap, fm_extents)); 3741 (long long)(int)offsetof(struct fiemap, fm_extents));
3702 LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n", 3742 LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n",
3703 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents)); 3743 (long long)(int)sizeof(((struct fiemap *)0)->fm_extents));
3704 CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001); 3744 CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
3705 CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002); 3745 CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
3706 CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000); 3746 CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
3707 3747
3708 /* Checks for struct ll_fiemap_extent */ 3748 /* Checks for struct fiemap_extent */
3709 LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n", 3749 LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n",
3710 (long long)(int)sizeof(struct ll_fiemap_extent)); 3750 (long long)(int)sizeof(struct fiemap_extent));
3711 LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n", 3751 LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n",
3712 (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical)); 3752 (long long)(int)offsetof(struct fiemap_extent, fe_logical));
3713 LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n", 3753 LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
3714 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical)); 3754 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical));
3715 LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n", 3755 LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n",
3716 (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical)); 3756 (long long)(int)offsetof(struct fiemap_extent, fe_physical));
3717 LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n", 3757 LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
3718 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical)); 3758 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical));
3719 LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n", 3759 LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n",
3720 (long long)(int)offsetof(struct ll_fiemap_extent, fe_length)); 3760 (long long)(int)offsetof(struct fiemap_extent, fe_length));
3721 LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n", 3761 LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
3722 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length)); 3762 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length));
3723 LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n", 3763 LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n",
3724 (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags)); 3764 (long long)(int)offsetof(struct fiemap_extent, fe_flags));
3725 LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n", 3765 LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
3726 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags)); 3766 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags));
3727 LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n", 3767 LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n",
3728 (long long)(int)offsetof(struct ll_fiemap_extent, fe_device)); 3768 (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0]));
3729 LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n", 3769 LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n",
3730 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device)); 3770 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]));
3731 CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001); 3771 CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
3732 CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002); 3772 CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
3733 CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004); 3773 CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
@@ -4093,9 +4133,9 @@ void lustre_assert_wire_constants(void)
4093 LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n", 4133 LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
4094 (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len)); 4134 (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
4095 LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n", 4135 LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
4096 (unsigned)HSM_FORCE_ACTION); 4136 (unsigned int)HSM_FORCE_ACTION);
4097 LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n", 4137 LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
4098 (unsigned)HSM_GHOST_COPY); 4138 (unsigned int)HSM_GHOST_COPY);
4099 4139
4100 /* Checks for struct hsm_user_request */ 4140 /* Checks for struct hsm_user_request */
4101 LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n", 4141 LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 20206ba965af..8691c6543a9c 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -11,7 +11,7 @@ Description:
11 Shows if the lustre module has pinger support. 11 Shows if the lustre module has pinger support.
12 "on" means yes and "off" means no. 12 "on" means yes and "off" means no.
13 13
14What: /sys/fs/lustre/health 14What: /sys/fs/lustre/health_check
15Date: May 2015 15Date: May 2015
16Contact: "Oleg Drokin" <oleg.drokin@intel.com> 16Contact: "Oleg Drokin" <oleg.drokin@intel.com>
17Description: 17Description:
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 4d9bd02ede47..c5116c058cea 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2542,7 +2542,7 @@ static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
2542 return err; 2542 return err;
2543} 2543}
2544 2544
2545static struct v4l2_ioctl_ops bcm2048_ioctl_ops = { 2545static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
2546 .vidioc_querycap = bcm2048_vidioc_querycap, 2546 .vidioc_querycap = bcm2048_vidioc_querycap,
2547 .vidioc_g_input = bcm2048_vidioc_g_input, 2547 .vidioc_g_input = bcm2048_vidioc_g_input,
2548 .vidioc_s_input = bcm2048_vidioc_s_input, 2548 .vidioc_s_input = bcm2048_vidioc_s_input,
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index fedeb3c3549e..c72c3f09f175 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -336,7 +336,8 @@ static int init(struct cxd *ci)
336 break; 336 break;
337#endif 337#endif
338 /* TOSTRT = 8, Mode B (gated clock), falling Edge, 338 /* TOSTRT = 8, Mode B (gated clock), falling Edge,
339 * Serial, POL=HIGH, MSB */ 339 * Serial, POL=HIGH, MSB
340 */
340 status = write_reg(ci, 0x0A, 0xA7); 341 status = write_reg(ci, 0x0A, 0xA7);
341 if (status < 0) 342 if (status < 0)
342 break; 343 break;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8be9f854510f..c34bf4621767 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1143,8 +1143,8 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1143 /* Initialize buffer */ 1143 /* Initialize buffer */
1144 vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage); 1144 vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
1145 if (vb2_plane_vaddr(vb, 0) && 1145 if (vb2_plane_vaddr(vb, 0) &&
1146 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) 1146 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
1147 return -EINVAL; 1147 return -EINVAL;
1148 1148
1149 addr = vb2_dma_contig_plane_dma_addr(vb, 0); 1149 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1150 /* Make sure user addresses are aligned to 32 bytes */ 1150 /* Make sure user addresses are aligned to 32 bytes */
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 4678ae10b030..920c4a1290f4 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -103,7 +103,8 @@ struct sasem_context {
103 103
104 struct tx_t { 104 struct tx_t {
105 unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data 105 unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
106 * buffer */ 106 * buffer
107 */
107 struct completion finished; /* wait for write to finish */ 108 struct completion finished; /* wait for write to finish */
108 atomic_t busy; /* write in progress */ 109 atomic_t busy; /* write in progress */
109 int status; /* status of tx completion */ 110 int status; /* status of tx completion */
@@ -295,7 +296,8 @@ static int vfd_close(struct inode *inode, struct file *file)
295 if (!context->dev_present && !context->ir_isopen) { 296 if (!context->dev_present && !context->ir_isopen) {
296 /* Device disconnected before close and IR port is 297 /* Device disconnected before close and IR port is
297 * not open. If IR port is open, context will be 298 * not open. If IR port is open, context will be
298 * deleted by ir_close. */ 299 * deleted by ir_close.
300 */
299 mutex_unlock(&context->ctx_lock); 301 mutex_unlock(&context->ctx_lock);
300 delete_context(context); 302 delete_context(context);
301 return retval; 303 return retval;
@@ -397,7 +399,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
397 399
398 /* Nine 8 byte packets to be sent */ 400 /* Nine 8 byte packets to be sent */
399 /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0" 401 /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
400 * will clear the VFD */ 402 * will clear the VFD
403 */
401 for (i = 0; i < 9; i++) { 404 for (i = 0; i < 9; i++) {
402 switch (i) { 405 switch (i) {
403 case 0: 406 case 0:
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 3551aed589c0..34aac3e2eb87 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1157,8 +1157,8 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
1157 1157
1158 /* Send the code */ 1158 /* Send the code */
1159 if (ret == 0) { 1159 if (ret == 0) {
1160 ret = send_code(tx, (unsigned)command >> 16, 1160 ret = send_code(tx, (unsigned int)command >> 16,
1161 (unsigned)command & 0xFFFF); 1161 (unsigned int)command & 0xFFFF);
1162 if (ret == -EPROTO) { 1162 if (ret == -EPROTO) {
1163 mutex_unlock(&ir->ir_lock); 1163 mutex_unlock(&ir->ir_lock);
1164 mutex_unlock(&tx->client_lock); 1164 mutex_unlock(&tx->client_lock);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index aaca39d751a5..f71d5f2f179f 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -224,7 +224,7 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
224 fmtidx = 3; 224 fmtidx = 3;
225 break; 225 break;
226 default: 226 default:
227 WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n", 227 WARN(1, "CSI2: pixel format %08x unsupported!\n",
228 fmt->code); 228 fmt->code);
229 return 0; 229 return 0;
230 } 230 }
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 1780a08b73c9..aef962b6af31 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -22,7 +22,6 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/version.h>
26#include <linux/workqueue.h> 25#include <linux/workqueue.h>
27#include <media/cec.h> 26#include <media/cec.h>
28 27
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
index 214344866a6b..b22394ac4ec4 100644
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -108,11 +108,11 @@
108 108
109/* Constants for CEC_BIT_TOUT_THRESH register */ 109/* Constants for CEC_BIT_TOUT_THRESH register */
110#define CEC_SBIT_TOUT_47MS BIT(1) 110#define CEC_SBIT_TOUT_47MS BIT(1)
111#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1) 111#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
112#define CEC_SBIT_TOUT_50MS BIT(2) 112#define CEC_SBIT_TOUT_50MS BIT(2)
113#define CEC_DBIT_TOUT_27MS BIT(0) 113#define CEC_DBIT_TOUT_27MS BIT(0)
114#define CEC_DBIT_TOUT_28MS BIT(1) 114#define CEC_DBIT_TOUT_28MS BIT(1)
115#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1) 115#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
116 116
117/* Constants for CEC_BIT_PULSE_THRESH register */ 117/* Constants for CEC_BIT_PULSE_THRESH register */
118#define CEC_BIT_LPULSE_03MS BIT(1) 118#define CEC_BIT_LPULSE_03MS BIT(1)
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 4659a6450c04..ce1764cba5f0 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -67,10 +67,10 @@ struct net_dev_context {
67 struct most_interface *iface; 67 struct most_interface *iface;
68 bool channels_opened; 68 bool channels_opened;
69 bool is_mamac; 69 bool is_mamac;
70 unsigned char link_stat;
71 struct net_device *dev; 70 struct net_device *dev;
72 struct net_dev_channel rx; 71 struct net_dev_channel rx;
73 struct net_dev_channel tx; 72 struct net_dev_channel tx;
73 struct completion mac_compl;
74 struct list_head list; 74 struct list_head list;
75}; 75};
76 76
@@ -181,6 +181,7 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p)
181static int most_nd_open(struct net_device *dev) 181static int most_nd_open(struct net_device *dev)
182{ 182{
183 struct net_dev_context *nd = dev->ml_priv; 183 struct net_dev_context *nd = dev->ml_priv;
184 long ret;
184 185
185 netdev_info(dev, "open net device\n"); 186 netdev_info(dev, "open net device\n");
186 187
@@ -202,16 +203,30 @@ static int most_nd_open(struct net_device *dev)
202 return -EBUSY; 203 return -EBUSY;
203 } 204 }
204 205
205 nd->channels_opened = true; 206 if (!is_valid_ether_addr(dev->dev_addr)) {
206
207 if (nd->is_mamac) {
208 nd->link_stat = 1;
209 netif_wake_queue(dev);
210 } else {
211 nd->iface->request_netinfo(nd->iface, nd->tx.ch_id); 207 nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
208 ret = wait_for_completion_interruptible_timeout(
209 &nd->mac_compl, msecs_to_jiffies(5000));
210 if (!ret) {
211 netdev_err(dev, "mac timeout\n");
212 ret = -EBUSY;
213 goto err;
214 }
215
216 if (ret < 0) {
217 netdev_warn(dev, "mac waiting interrupted\n");
218 goto err;
219 }
212 } 220 }
213 221
222 nd->channels_opened = true;
223 netif_wake_queue(dev);
214 return 0; 224 return 0;
225
226err:
227 most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
228 most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
229 return ret;
215} 230}
216 231
217static int most_nd_stop(struct net_device *dev) 232static int most_nd_stop(struct net_device *dev)
@@ -277,7 +292,6 @@ static const struct net_device_ops most_nd_ops = {
277 292
278static void most_nd_setup(struct net_device *dev) 293static void most_nd_setup(struct net_device *dev)
279{ 294{
280 netdev_info(dev, "setup net device\n");
281 ether_setup(dev); 295 ether_setup(dev);
282 dev->netdev_ops = &most_nd_ops; 296 dev->netdev_ops = &most_nd_ops;
283} 297}
@@ -332,6 +346,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
332 if (!nd) 346 if (!nd)
333 return -ENOMEM; 347 return -ENOMEM;
334 348
349 init_completion(&nd->mac_compl);
335 nd->iface = iface; 350 nd->iface = iface;
336 351
337 spin_lock_irqsave(&list_lock, flags); 352 spin_lock_irqsave(&list_lock, flags);
@@ -548,8 +563,7 @@ void most_deliver_netinfo(struct most_interface *iface,
548{ 563{
549 struct net_dev_context *nd; 564 struct net_dev_context *nd;
550 struct net_device *dev; 565 struct net_device *dev;
551 566 const u8 *m = mac_addr;
552 pr_info("Received netinfo from %s\n", iface->description);
553 567
554 nd = get_net_dev_context(iface); 568 nd = get_net_dev_context(iface);
555 if (!nd) 569 if (!nd)
@@ -559,15 +573,16 @@ void most_deliver_netinfo(struct most_interface *iface,
559 if (!dev) 573 if (!dev)
560 return; 574 return;
561 575
562 if (mac_addr) 576 if (m && is_valid_ether_addr(m)) {
563 ether_addr_copy(dev->dev_addr, mac_addr); 577 if (!is_valid_ether_addr(dev->dev_addr)) {
564 578 netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
565 if (nd->link_stat != link_stat) { 579 m[0], m[1], m[2], m[3], m[4], m[5]);
566 nd->link_stat = link_stat; 580 ether_addr_copy(dev->dev_addr, m);
567 if (nd->link_stat) 581 complete(&nd->mac_compl);
568 netif_wake_queue(dev); 582 } else if (!ether_addr_equal(dev->dev_addr, m)) {
569 else 583 netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
570 netif_stop_queue(dev); 584 m[0], m[1], m[2], m[3], m[4], m[5]);
585 }
571 } 586 }
572} 587}
573EXPORT_SYMBOL(most_deliver_netinfo); 588EXPORT_SYMBOL(most_deliver_netinfo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 78b2c3dd9bb3..35aee9fbbf02 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -306,14 +306,11 @@ static int deliver_netinfo_thread(void *data)
306static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo) 306static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
307{ 307{
308 u8 *data = mbo->virt_address; 308 u8 *data = mbo->virt_address;
309 u8 *mac = dev->mac_addrs;
310 309
311 pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]); 310 pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
312 dev->link_state = data[18]; 311 dev->link_state = data[18];
313 pr_info("NIState: %d\n", dev->link_state); 312 pr_info("NIState: %d\n", dev->link_state);
314 memcpy(mac, data + 19, 6); 313 memcpy(dev->mac_addrs, data + 19, 6);
315 pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
316 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
317 dev->deliver_netinfo++; 314 dev->deliver_netinfo++;
318 wake_up_interruptible(&dev->netinfo_waitq); 315 wake_up_interruptible(&dev->netinfo_waitq);
319} 316}
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 26c9adb29308..d6db0bd65be0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -97,9 +97,7 @@ struct clear_hold_work {
97 * @cap: channel capabilities 97 * @cap: channel capabilities
98 * @conf: channel configuration 98 * @conf: channel configuration
99 * @dci: direct communication interface of hardware 99 * @dci: direct communication interface of hardware
100 * @hw_addr: MAC address of hardware
101 * @ep_address: endpoint address table 100 * @ep_address: endpoint address table
102 * @link_stat: link status of hardware
103 * @description: device description 101 * @description: device description
104 * @suffix: suffix for channel name 102 * @suffix: suffix for channel name
105 * @channel_lock: synchronize channel access 103 * @channel_lock: synchronize channel access
@@ -117,9 +115,7 @@ struct most_dev {
117 struct most_channel_capability *cap; 115 struct most_channel_capability *cap;
118 struct most_channel_config *conf; 116 struct most_channel_config *conf;
119 struct most_dci_obj *dci; 117 struct most_dci_obj *dci;
120 u8 hw_addr[6];
121 u8 *ep_address; 118 u8 *ep_address;
122 u16 link_stat;
123 char description[MAX_STRING_LEN]; 119 char description[MAX_STRING_LEN];
124 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN]; 120 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
125 spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */ 121 spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
@@ -186,28 +182,9 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
186 5 * HZ); 182 5 * HZ);
187} 183}
188 184
189/** 185static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
190 * free_anchored_buffers - free device's anchored items
191 * @mdev: the device
192 * @channel: channel ID
193 * @status: status of MBO termination
194 */
195static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
196 enum mbo_status_flags status)
197{ 186{
198 struct mbo *mbo; 187 return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
199 struct urb *urb;
200
201 while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
202 mbo = urb->context;
203 usb_kill_urb(urb);
204 if (mbo && mbo->complete) {
205 mbo->status = status;
206 mbo->processed_length = 0;
207 mbo->complete(mbo);
208 }
209 usb_free_urb(urb);
210 }
211} 188}
212 189
213/** 190/**
@@ -278,7 +255,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
278 cancel_work_sync(&mdev->clear_work[channel].ws); 255 cancel_work_sync(&mdev->clear_work[channel].ws);
279 256
280 mutex_lock(&mdev->io_mutex); 257 mutex_lock(&mdev->io_mutex);
281 free_anchored_buffers(mdev, channel, MBO_E_CLOSE); 258 usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
282 if (mdev->padding_active[channel]) 259 if (mdev->padding_active[channel])
283 mdev->padding_active[channel] = false; 260 mdev->padding_active[channel] = false;
284 261
@@ -377,33 +354,27 @@ static void hdm_write_completion(struct urb *urb)
377 unsigned long flags; 354 unsigned long flags;
378 355
379 spin_lock_irqsave(lock, flags); 356 spin_lock_irqsave(lock, flags);
380 if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
381 !mdev->is_channel_healthy[channel]) {
382 spin_unlock_irqrestore(lock, flags);
383 return;
384 }
385 357
386 if (unlikely(urb->status && urb->status != -ESHUTDOWN)) { 358 mbo->processed_length = 0;
387 mbo->processed_length = 0; 359 mbo->status = MBO_E_INVAL;
360 if (likely(mdev->is_channel_healthy[channel])) {
388 switch (urb->status) { 361 switch (urb->status) {
362 case 0:
363 case -ESHUTDOWN:
364 mbo->processed_length = urb->actual_length;
365 mbo->status = MBO_SUCCESS;
366 break;
389 case -EPIPE: 367 case -EPIPE:
390 dev_warn(dev, "Broken OUT pipe detected\n"); 368 dev_warn(dev, "Broken OUT pipe detected\n");
391 mdev->is_channel_healthy[channel] = false; 369 mdev->is_channel_healthy[channel] = false;
392 spin_unlock_irqrestore(lock, flags);
393 mdev->clear_work[channel].pipe = urb->pipe; 370 mdev->clear_work[channel].pipe = urb->pipe;
394 schedule_work(&mdev->clear_work[channel].ws); 371 schedule_work(&mdev->clear_work[channel].ws);
395 return; 372 break;
396 case -ENODEV: 373 case -ENODEV:
397 case -EPROTO: 374 case -EPROTO:
398 mbo->status = MBO_E_CLOSE; 375 mbo->status = MBO_E_CLOSE;
399 break; 376 break;
400 default:
401 mbo->status = MBO_E_INVAL;
402 break;
403 } 377 }
404 } else {
405 mbo->status = MBO_SUCCESS;
406 mbo->processed_length = urb->actual_length;
407 } 378 }
408 379
409 spin_unlock_irqrestore(lock, flags); 380 spin_unlock_irqrestore(lock, flags);
@@ -531,40 +502,35 @@ static void hdm_read_completion(struct urb *urb)
531 unsigned long flags; 502 unsigned long flags;
532 503
533 spin_lock_irqsave(lock, flags); 504 spin_lock_irqsave(lock, flags);
534 if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
535 !mdev->is_channel_healthy[channel]) {
536 spin_unlock_irqrestore(lock, flags);
537 return;
538 }
539 505
540 if (unlikely(urb->status && urb->status != -ESHUTDOWN)) { 506 mbo->processed_length = 0;
541 mbo->processed_length = 0; 507 mbo->status = MBO_E_INVAL;
508 if (likely(mdev->is_channel_healthy[channel])) {
542 switch (urb->status) { 509 switch (urb->status) {
510 case 0:
511 case -ESHUTDOWN:
512 mbo->processed_length = urb->actual_length;
513 mbo->status = MBO_SUCCESS;
514 if (mdev->padding_active[channel] &&
515 hdm_remove_padding(mdev, channel, mbo)) {
516 mbo->processed_length = 0;
517 mbo->status = MBO_E_INVAL;
518 }
519 break;
543 case -EPIPE: 520 case -EPIPE:
544 dev_warn(dev, "Broken IN pipe detected\n"); 521 dev_warn(dev, "Broken IN pipe detected\n");
545 mdev->is_channel_healthy[channel] = false; 522 mdev->is_channel_healthy[channel] = false;
546 spin_unlock_irqrestore(lock, flags);
547 mdev->clear_work[channel].pipe = urb->pipe; 523 mdev->clear_work[channel].pipe = urb->pipe;
548 schedule_work(&mdev->clear_work[channel].ws); 524 schedule_work(&mdev->clear_work[channel].ws);
549 return; 525 break;
550 case -ENODEV: 526 case -ENODEV:
551 case -EPROTO: 527 case -EPROTO:
552 mbo->status = MBO_E_CLOSE; 528 mbo->status = MBO_E_CLOSE;
553 break; 529 break;
554 case -EOVERFLOW: 530 case -EOVERFLOW:
555 dev_warn(dev, "Babble on IN pipe detected\n"); 531 dev_warn(dev, "Babble on IN pipe detected\n");
556 default:
557 mbo->status = MBO_E_INVAL;
558 break; 532 break;
559 } 533 }
560 } else {
561 mbo->processed_length = urb->actual_length;
562 mbo->status = MBO_SUCCESS;
563 if (mdev->padding_active[channel] &&
564 hdm_remove_padding(mdev, channel, mbo)) {
565 mbo->processed_length = 0;
566 mbo->status = MBO_E_INVAL;
567 }
568 } 534 }
569 535
570 spin_unlock_irqrestore(lock, flags); 536 spin_unlock_irqrestore(lock, flags);
@@ -668,6 +634,15 @@ _error:
668 * @iface: interface 634 * @iface: interface
669 * @channel: channel ID 635 * @channel: channel ID
670 * @conf: structure that holds the configuration information 636 * @conf: structure that holds the configuration information
637 *
638 * The attached network interface controller (NIC) supports a padding mode
639 * to avoid short packets on USB, hence increasing the performance due to a
640 * lower interrupt load. This mode is default for synchronous data and can
641 * be switched on for isochronous data. In case padding is active the
642 * driver needs to know the frame size of the payload in order to calculate
643 * the number of bytes it needs to pad when transmitting or to cut off when
644 * receiving data.
645 *
671 */ 646 */
672static int hdm_configure_channel(struct most_interface *iface, int channel, 647static int hdm_configure_channel(struct most_interface *iface, int channel,
673 struct most_channel_config *conf) 648 struct most_channel_config *conf)
@@ -701,6 +676,11 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
701 !(conf->data_type == MOST_CH_ISOC && 676 !(conf->data_type == MOST_CH_ISOC &&
702 conf->packets_per_xact != 0xFF)) { 677 conf->packets_per_xact != 0xFF)) {
703 mdev->padding_active[channel] = false; 678 mdev->padding_active[channel] = false;
679 /*
680 * Since the NIC's padding mode is not going to be
681 * used, we can skip the frame size calculations and
682 * move directly on to exit.
683 */
704 goto exit; 684 goto exit;
705 } 685 }
706 686
@@ -734,56 +714,12 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
734 - conf->buffer_size; 714 - conf->buffer_size;
735exit: 715exit:
736 mdev->conf[channel] = *conf; 716 mdev->conf[channel] = *conf;
737 return 0; 717 if (conf->data_type == MOST_CH_ASYNC) {
738} 718 u16 ep = mdev->ep_address[channel];
739 719
740/** 720 if (start_sync_ep(mdev->usb_device, ep) < 0)
741 * hdm_update_netinfo - retrieve latest networking information 721 dev_warn(dev, "sync for ep%02x failed", ep);
742 * @mdev: device interface
743 *
744 * This triggers the USB vendor requests to read the hardware address and
745 * the current link status of the attached device.
746 */
747static int hdm_update_netinfo(struct most_dev *mdev)
748{
749 struct usb_device *usb_device = mdev->usb_device;
750 struct device *dev = &usb_device->dev;
751 u16 hi, mi, lo, link;
752
753 if (!is_valid_ether_addr(mdev->hw_addr)) {
754 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
755 dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
756 return -EFAULT;
757 }
758
759 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
760 dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
761 return -EFAULT;
762 }
763
764 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
765 dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
766 return -EFAULT;
767 }
768
769 mutex_lock(&mdev->io_mutex);
770 mdev->hw_addr[0] = hi >> 8;
771 mdev->hw_addr[1] = hi;
772 mdev->hw_addr[2] = mi >> 8;
773 mdev->hw_addr[3] = mi;
774 mdev->hw_addr[4] = lo >> 8;
775 mdev->hw_addr[5] = lo;
776 mutex_unlock(&mdev->io_mutex);
777 }
778
779 if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
780 dev_err(dev, "Vendor request \"link status\" failed\n");
781 return -EFAULT;
782 } 722 }
783
784 mutex_lock(&mdev->io_mutex);
785 mdev->link_stat = link;
786 mutex_unlock(&mdev->io_mutex);
787 return 0; 723 return 0;
788} 724}
789 725
@@ -807,7 +743,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel)
807} 743}
808 744
809/** 745/**
810 * link_stat_timer_handler - add work to link_stat work queue 746 * link_stat_timer_handler - schedule work obtaining mac address and link status
811 * @data: pointer to USB device instance 747 * @data: pointer to USB device instance
812 * 748 *
813 * The handler runs in interrupt context. That's why we need to defer the 749 * The handler runs in interrupt context. That's why we need to defer the
@@ -823,33 +759,47 @@ static void link_stat_timer_handler(unsigned long data)
823} 759}
824 760
825/** 761/**
826 * wq_netinfo - work queue function 762 * wq_netinfo - work queue function to deliver latest networking information
827 * @wq_obj: object that holds data for our deferred work to do 763 * @wq_obj: object that holds data for our deferred work to do
828 * 764 *
829 * This retrieves the network interface status of the USB INIC 765 * This retrieves the network interface status of the USB INIC
830 * and compares it with the current status. If the status has
831 * changed, it updates the status of the core.
832 */ 766 */
833static void wq_netinfo(struct work_struct *wq_obj) 767static void wq_netinfo(struct work_struct *wq_obj)
834{ 768{
835 struct most_dev *mdev = to_mdev_from_work(wq_obj); 769 struct most_dev *mdev = to_mdev_from_work(wq_obj);
836 int i, prev_link_stat = mdev->link_stat; 770 struct usb_device *usb_device = mdev->usb_device;
837 u8 prev_hw_addr[6]; 771 struct device *dev = &usb_device->dev;
772 u16 hi, mi, lo, link;
773 u8 hw_addr[6];
838 774
839 for (i = 0; i < 6; i++) 775 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
840 prev_hw_addr[i] = mdev->hw_addr[i]; 776 dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
777 return;
778 }
779
780 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
781 dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
782 return;
783 }
841 784
842 if (hdm_update_netinfo(mdev) < 0) 785 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
786 dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
787 return;
788 }
789
790 if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
791 dev_err(dev, "Vendor request 'link status' failed\n");
843 return; 792 return;
844 if (prev_link_stat != mdev->link_stat || 793 }
845 prev_hw_addr[0] != mdev->hw_addr[0] || 794
846 prev_hw_addr[1] != mdev->hw_addr[1] || 795 hw_addr[0] = hi >> 8;
847 prev_hw_addr[2] != mdev->hw_addr[2] || 796 hw_addr[1] = hi;
848 prev_hw_addr[3] != mdev->hw_addr[3] || 797 hw_addr[2] = mi >> 8;
849 prev_hw_addr[4] != mdev->hw_addr[4] || 798 hw_addr[3] = mi;
850 prev_hw_addr[5] != mdev->hw_addr[5]) 799 hw_addr[4] = lo >> 8;
851 most_deliver_netinfo(&mdev->iface, mdev->link_stat, 800 hw_addr[5] = lo;
852 &mdev->hw_addr[0]); 801
802 most_deliver_netinfo(&mdev->iface, link, hw_addr);
853} 803}
854 804
855/** 805/**
@@ -867,7 +817,7 @@ static void wq_clear_halt(struct work_struct *wq_obj)
867 817
868 mutex_lock(&mdev->io_mutex); 818 mutex_lock(&mdev->io_mutex);
869 most_stop_enqueue(&mdev->iface, channel); 819 most_stop_enqueue(&mdev->iface, channel);
870 free_anchored_buffers(mdev, channel, MBO_E_INVAL); 820 usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
871 if (usb_clear_halt(mdev->usb_device, pipe)) 821 if (usb_clear_halt(mdev->usb_device, pipe))
872 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n"); 822 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
873 823
@@ -1053,6 +1003,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
1053 u16 val; 1003 u16 val;
1054 u16 reg_addr; 1004 u16 reg_addr;
1055 const char *name = attr->attr.name; 1005 const char *name = attr->attr.name;
1006 struct usb_device *usb_dev = dci_obj->usb_device;
1056 int err = kstrtou16(buf, 16, &val); 1007 int err = kstrtou16(buf, 16, &val);
1057 1008
1058 if (err) 1009 if (err)
@@ -1063,18 +1014,15 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
1063 return count; 1014 return count;
1064 } 1015 }
1065 1016
1066 if (!strcmp(name, "arb_value")) { 1017 if (!strcmp(name, "arb_value"))
1067 reg_addr = dci_obj->reg_addr; 1018 err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
1068 } else if (!strcmp(name, "sync_ep")) { 1019 else if (!strcmp(name, "sync_ep"))
1069 u16 ep = val; 1020 err = start_sync_ep(usb_dev, val);
1070 1021 else if (!get_static_reg_addr(ro_regs, name, &reg_addr))
1071 reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16; 1022 err = drci_wr_reg(usb_dev, reg_addr, val);
1072 val = 1; 1023 else
1073 } else if (get_static_reg_addr(ro_regs, name, &reg_addr)) {
1074 return -EFAULT; 1024 return -EFAULT;
1075 }
1076 1025
1077 err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
1078 if (err < 0) 1026 if (err < 0)
1079 return err; 1027 return err;
1080 1028
@@ -1186,7 +1134,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1186 struct most_channel_capability *tmp_cap; 1134 struct most_channel_capability *tmp_cap;
1187 struct usb_endpoint_descriptor *ep_desc; 1135 struct usb_endpoint_descriptor *ep_desc;
1188 int ret = 0; 1136 int ret = 0;
1189 int err;
1190 1137
1191 if (!mdev) 1138 if (!mdev)
1192 goto exit_ENOMEM; 1139 goto exit_ENOMEM;
@@ -1262,13 +1209,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1262 tmp_cap++; 1209 tmp_cap++;
1263 init_usb_anchor(&mdev->busy_urbs[i]); 1210 init_usb_anchor(&mdev->busy_urbs[i]);
1264 spin_lock_init(&mdev->channel_lock[i]); 1211 spin_lock_init(&mdev->channel_lock[i]);
1265 err = drci_wr_reg(usb_dev,
1266 DRCI_REG_BASE + DRCI_COMMAND +
1267 ep_desc->bEndpointAddress * 16,
1268 1);
1269 if (err < 0)
1270 dev_warn(dev, "DCI Sync for EP %02x failed",
1271 ep_desc->bEndpointAddress);
1272 } 1212 }
1273 dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n", 1213 dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1274 le16_to_cpu(usb_dev->descriptor.idVendor), 1214 le16_to_cpu(usb_dev->descriptor.idVendor),
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 329109c0024f..191404bc5906 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -342,7 +342,7 @@ static ssize_t show_channel_starving(struct most_c_obj *c,
342} 342}
343 343
344#define create_show_channel_attribute(val) \ 344#define create_show_channel_attribute(val) \
345 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL) 345 static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
346 346
347create_show_channel_attribute(available_directions); 347create_show_channel_attribute(available_directions);
348create_show_channel_attribute(available_datatypes); 348create_show_channel_attribute(available_datatypes);
@@ -494,9 +494,7 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
494} 494}
495 495
496#define create_channel_attribute(value) \ 496#define create_channel_attribute(value) \
497 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \ 497 static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
498 show_##value, \
499 store_##value)
500 498
501create_channel_attribute(set_buffer_size); 499create_channel_attribute(set_buffer_size);
502create_channel_attribute(set_number_of_buffers); 500create_channel_attribute(set_number_of_buffers);
@@ -690,7 +688,7 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj,
690} 688}
691 689
692#define create_inst_attribute(value) \ 690#define create_inst_attribute(value) \
693 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL) 691 static MOST_INST_ATTR(value, 0444, show_##value, NULL)
694 692
695create_inst_attribute(description); 693create_inst_attribute(description);
696create_inst_attribute(interface); 694create_inst_attribute(interface);
@@ -763,8 +761,6 @@ struct most_aim_obj {
763 struct kobject kobj; 761 struct kobject kobj;
764 struct list_head list; 762 struct list_head list;
765 struct most_aim *driver; 763 struct most_aim *driver;
766 char add_link[STRING_SIZE];
767 char remove_link[STRING_SIZE];
768}; 764};
769 765
770#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj) 766#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
@@ -851,7 +847,7 @@ static void most_aim_release(struct kobject *kobj)
851 kfree(aim_obj); 847 kfree(aim_obj);
852} 848}
853 849
854static ssize_t show_add_link(struct most_aim_obj *aim_obj, 850static ssize_t add_link_show(struct most_aim_obj *aim_obj,
855 struct most_aim_attribute *attr, 851 struct most_aim_attribute *attr,
856 char *buf) 852 char *buf)
857{ 853{
@@ -885,16 +881,16 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj,
885 * 881 *
886 * Examples: 882 * Examples:
887 * 883 *
888 * Input: "mdev0:ch0@ep_81:my_channel\n" or 884 * Input: "mdev0:ch6:my_channel\n" or
889 * "mdev0:ch0@ep_81:my_channel" 885 * "mdev0:ch6:my_channel"
890 * 886 *
891 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel" 887 * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
892 * 888 *
893 * Input: "mdev0:ch0@ep_81\n" 889 * Input: "mdev1:ep81\n"
894 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "" 890 * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
895 * 891 *
896 * Input: "mdev0:ch0@ep_81" 892 * Input: "mdev1:ep81"
897 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL 893 * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
898 */ 894 */
899static int split_string(char *buf, char **a, char **b, char **c) 895static int split_string(char *buf, char **a, char **b, char **c)
900{ 896{
@@ -962,13 +958,13 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
962 * Searches for a pair of device and channel and probes the AIM 958 * Searches for a pair of device and channel and probes the AIM
963 * 959 *
964 * Example: 960 * Example:
965 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link 961 * (1) echo "mdev0:ch6:my_rxchannel" >add_link
966 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link 962 * (2) echo "mdev1:ep81" >add_link
967 * 963 *
968 * (1) would create the device node /dev/my_rxchannel 964 * (1) would create the device node /dev/my_rxchannel
969 * (2) would create the device node /dev/mdev0-ch0@ep_81 965 * (2) would create the device node /dev/mdev1-ep81
970 */ 966 */
971static ssize_t store_add_link(struct most_aim_obj *aim_obj, 967static ssize_t add_link_store(struct most_aim_obj *aim_obj,
972 struct most_aim_attribute *attr, 968 struct most_aim_attribute *attr,
973 const char *buf, 969 const char *buf,
974 size_t len) 970 size_t len)
@@ -984,7 +980,6 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
984 size_t max_len = min_t(size_t, len + 1, STRING_SIZE); 980 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
985 981
986 strlcpy(buffer, buf, max_len); 982 strlcpy(buffer, buf, max_len);
987 strlcpy(aim_obj->add_link, buf, max_len);
988 983
989 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod); 984 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
990 if (ret) 985 if (ret)
@@ -1019,14 +1014,7 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
1019} 1014}
1020 1015
1021static struct most_aim_attribute most_aim_attr_add_link = 1016static struct most_aim_attribute most_aim_attr_add_link =
1022 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link); 1017 __ATTR_RW(add_link);
1023
1024static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1025 struct most_aim_attribute *attr,
1026 char *buf)
1027{
1028 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1029}
1030 1018
1031/** 1019/**
1032 * store_remove_link - store function for remove_link attribute 1020 * store_remove_link - store function for remove_link attribute
@@ -1036,9 +1024,9 @@ static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1036 * @len: buffer length 1024 * @len: buffer length
1037 * 1025 *
1038 * Example: 1026 * Example:
1039 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link 1027 * echo "mdev0:ep81" >remove_link
1040 */ 1028 */
1041static ssize_t store_remove_link(struct most_aim_obj *aim_obj, 1029static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
1042 struct most_aim_attribute *attr, 1030 struct most_aim_attribute *attr,
1043 const char *buf, 1031 const char *buf,
1044 size_t len) 1032 size_t len)
@@ -1051,7 +1039,6 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1051 size_t max_len = min_t(size_t, len + 1, STRING_SIZE); 1039 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1052 1040
1053 strlcpy(buffer, buf, max_len); 1041 strlcpy(buffer, buf, max_len);
1054 strlcpy(aim_obj->remove_link, buf, max_len);
1055 ret = split_string(buffer, &mdev, &mdev_ch, NULL); 1042 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1056 if (ret) 1043 if (ret)
1057 return ret; 1044 return ret;
@@ -1070,8 +1057,7 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1070} 1057}
1071 1058
1072static struct most_aim_attribute most_aim_attr_remove_link = 1059static struct most_aim_attribute most_aim_attr_remove_link =
1073 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, 1060 __ATTR_WO(remove_link);
1074 store_remove_link);
1075 1061
1076static struct attribute *most_aim_def_attrs[] = { 1062static struct attribute *most_aim_def_attrs[] = {
1077 &most_aim_attr_add_link.attr, 1063 &most_aim_attr_add_link.attr,
@@ -1761,9 +1747,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
1761 1747
1762 if (!name_suffix) 1748 if (!name_suffix)
1763 snprintf(channel_name, STRING_SIZE, "ch%d", i); 1749 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1764 else if (name_suffix[0] == '@')
1765 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1766 name_suffix);
1767 else 1750 else
1768 snprintf(channel_name, STRING_SIZE, "%s", name_suffix); 1751 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1769 1752
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 552a7dcbf50b..fb0928a4fb97 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -172,29 +172,31 @@ static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
172/* 172/*
173 * Ethtool operation 173 * Ethtool operation
174 */ 174 */
175static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 175static int xlr_get_link_ksettings(struct net_device *ndev,
176 struct ethtool_link_ksettings *ecmd)
176{ 177{
177 struct xlr_net_priv *priv = netdev_priv(ndev); 178 struct xlr_net_priv *priv = netdev_priv(ndev);
178 struct phy_device *phydev = xlr_get_phydev(priv); 179 struct phy_device *phydev = xlr_get_phydev(priv);
179 180
180 if (!phydev) 181 if (!phydev)
181 return -ENODEV; 182 return -ENODEV;
182 return phy_ethtool_gset(phydev, ecmd); 183 return phy_ethtool_ksettings_get(phydev, ecmd);
183} 184}
184 185
185static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 186static int xlr_set_link_ksettings(struct net_device *ndev,
187 const struct ethtool_link_ksettings *ecmd)
186{ 188{
187 struct xlr_net_priv *priv = netdev_priv(ndev); 189 struct xlr_net_priv *priv = netdev_priv(ndev);
188 struct phy_device *phydev = xlr_get_phydev(priv); 190 struct phy_device *phydev = xlr_get_phydev(priv);
189 191
190 if (!phydev) 192 if (!phydev)
191 return -ENODEV; 193 return -ENODEV;
192 return phy_ethtool_sset(phydev, ecmd); 194 return phy_ethtool_ksettings_set(phydev, ecmd);
193} 195}
194 196
195static const struct ethtool_ops xlr_ethtool_ops = { 197static const struct ethtool_ops xlr_ethtool_ops = {
196 .get_settings = xlr_get_settings, 198 .get_link_ksettings = xlr_get_link_ksettings,
197 .set_settings = xlr_set_settings, 199 .set_link_ksettings = xlr_set_link_ksettings,
198}; 200};
199 201
200/* 202/*
@@ -1005,10 +1007,8 @@ static int xlr_net_probe(struct platform_device *pdev)
1005 */ 1007 */
1006 adapter = (struct xlr_adapter *) 1008 adapter = (struct xlr_adapter *)
1007 devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL); 1009 devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
1008 if (!adapter) { 1010 if (!adapter)
1009 err = -ENOMEM; 1011 return -ENOMEM;
1010 return err;
1011 }
1012 1012
1013 /* 1013 /*
1014 * XLR and XLS have 1 and 2 NAE controller respectively 1014 * XLR and XLS have 1 and 2 NAE controller respectively
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788dbd86..36109cec8706 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -308,7 +308,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
308 mod_timer(&pmlmepriv->scan_to_timer, 308 mod_timer(&pmlmepriv->scan_to_timer,
309 jiffies + msecs_to_jiffies(SCANNING_TIMEOUT)); 309 jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
310 310
311 rtw_led_control(padapter, LED_CTL_SITE_SURVEY); 311 LedControl8188eu(padapter, LED_CTL_SITE_SURVEY);
312 312
313 pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ 313 pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
314 } else { 314 } else {
@@ -335,7 +335,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
335 u8 res = _SUCCESS; 335 u8 res = _SUCCESS;
336 336
337 337
338 rtw_led_control(padapter, LED_CTL_START_TO_LINK); 338 LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
339 339
340 if (pmlmepriv->assoc_ssid.SsidLength == 0) 340 if (pmlmepriv->assoc_ssid.SsidLength == 0)
341 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); 341 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
@@ -379,7 +379,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
379 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 379 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
380 380
381 381
382 rtw_led_control(padapter, LED_CTL_START_TO_LINK); 382 LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
383 383
384 if (pmlmepriv->assoc_ssid.SsidLength == 0) 384 if (pmlmepriv->assoc_ssid.SsidLength == 0)
385 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n")); 385 RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 14461cf34037..c1478cff5854 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -30,7 +30,7 @@ void BlinkTimerCallback(unsigned long data)
30 if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped)) 30 if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
31 return; 31 return;
32 32
33 schedule_work(&(pLed->BlinkWorkItem)); 33 schedule_work(&pLed->BlinkWorkItem);
34} 34}
35 35
36/* */ 36/* */
@@ -60,7 +60,6 @@ void ResetLedStatus(struct LED_871x *pLed)
60 60
61 pLed->bLedNoLinkBlinkInProgress = false; 61 pLed->bLedNoLinkBlinkInProgress = false;
62 pLed->bLedLinkBlinkInProgress = false; 62 pLed->bLedLinkBlinkInProgress = false;
63 pLed->bLedStartToLinkBlinkInProgress = false;
64 pLed->bLedScanBlinkInProgress = false; 63 pLed->bLedScanBlinkInProgress = false;
65} 64}
66 65
@@ -72,10 +71,10 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
72 71
73 ResetLedStatus(pLed); 72 ResetLedStatus(pLed);
74 73
75 setup_timer(&(pLed->BlinkTimer), BlinkTimerCallback, 74 setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
76 (unsigned long)pLed); 75 (unsigned long)pLed);
77 76
78 INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback); 77 INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
79} 78}
80 79
81 80
@@ -85,8 +84,8 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
85/* */ 84/* */
86void DeInitLed871x(struct LED_871x *pLed) 85void DeInitLed871x(struct LED_871x *pLed)
87{ 86{
88 cancel_work_sync(&(pLed->BlinkWorkItem)); 87 cancel_work_sync(&pLed->BlinkWorkItem);
89 del_timer_sync(&(pLed->BlinkTimer)); 88 del_timer_sync(&pLed->BlinkTimer);
90 ResetLedStatus(pLed); 89 ResetLedStatus(pLed);
91} 90}
92 91
@@ -99,7 +98,7 @@ void DeInitLed871x(struct LED_871x *pLed)
99static void SwLedBlink1(struct LED_871x *pLed) 98static void SwLedBlink1(struct LED_871x *pLed)
100{ 99{
101 struct adapter *padapter = pLed->padapter; 100 struct adapter *padapter = pLed->padapter;
102 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); 101 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
103 u8 bStopBlinking = false; 102 u8 bStopBlinking = false;
104 103
105 /* Change LED according to BlinkingLedState specified. */ 104 /* Change LED according to BlinkingLedState specified. */
@@ -247,9 +246,9 @@ static void SwLedBlink1(struct LED_871x *pLed)
247 /* ALPHA, added by chiyoko, 20090106 */ 246 /* ALPHA, added by chiyoko, 20090106 */
248static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction) 247static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
249{ 248{
250 struct led_priv *ledpriv = &(padapter->ledpriv); 249 struct led_priv *ledpriv = &padapter->ledpriv;
251 struct LED_871x *pLed = &(ledpriv->SwLed0); 250 struct LED_871x *pLed = &ledpriv->SwLed0;
252 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); 251 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
253 252
254 switch (LedAction) { 253 switch (LedAction) {
255 case LED_CTL_POWER_ON: 254 case LED_CTL_POWER_ON:
@@ -259,11 +258,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
259 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) 258 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
260 return; 259 return;
261 if (pLed->bLedLinkBlinkInProgress) { 260 if (pLed->bLedLinkBlinkInProgress) {
262 del_timer_sync(&(pLed->BlinkTimer)); 261 del_timer_sync(&pLed->BlinkTimer);
263 pLed->bLedLinkBlinkInProgress = false; 262 pLed->bLedLinkBlinkInProgress = false;
264 } 263 }
265 if (pLed->bLedBlinkInProgress) { 264 if (pLed->bLedBlinkInProgress) {
266 del_timer_sync(&(pLed->BlinkTimer)); 265 del_timer_sync(&pLed->BlinkTimer);
267 pLed->bLedBlinkInProgress = false; 266 pLed->bLedBlinkInProgress = false;
268 } 267 }
269 268
@@ -282,11 +281,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
282 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) 281 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
283 return; 282 return;
284 if (pLed->bLedNoLinkBlinkInProgress) { 283 if (pLed->bLedNoLinkBlinkInProgress) {
285 del_timer_sync(&(pLed->BlinkTimer)); 284 del_timer_sync(&pLed->BlinkTimer);
286 pLed->bLedNoLinkBlinkInProgress = false; 285 pLed->bLedNoLinkBlinkInProgress = false;
287 } 286 }
288 if (pLed->bLedBlinkInProgress) { 287 if (pLed->bLedBlinkInProgress) {
289 del_timer_sync(&(pLed->BlinkTimer)); 288 del_timer_sync(&pLed->BlinkTimer);
290 pLed->bLedBlinkInProgress = false; 289 pLed->bLedBlinkInProgress = false;
291 } 290 }
292 pLed->bLedLinkBlinkInProgress = true; 291 pLed->bLedLinkBlinkInProgress = true;
@@ -306,15 +305,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
306 if (IS_LED_WPS_BLINKING(pLed)) 305 if (IS_LED_WPS_BLINKING(pLed))
307 return; 306 return;
308 if (pLed->bLedNoLinkBlinkInProgress) { 307 if (pLed->bLedNoLinkBlinkInProgress) {
309 del_timer_sync(&(pLed->BlinkTimer)); 308 del_timer_sync(&pLed->BlinkTimer);
310 pLed->bLedNoLinkBlinkInProgress = false; 309 pLed->bLedNoLinkBlinkInProgress = false;
311 } 310 }
312 if (pLed->bLedLinkBlinkInProgress) { 311 if (pLed->bLedLinkBlinkInProgress) {
313 del_timer_sync(&(pLed->BlinkTimer)); 312 del_timer_sync(&pLed->BlinkTimer);
314 pLed->bLedLinkBlinkInProgress = false; 313 pLed->bLedLinkBlinkInProgress = false;
315 } 314 }
316 if (pLed->bLedBlinkInProgress) { 315 if (pLed->bLedBlinkInProgress) {
317 del_timer_sync(&(pLed->BlinkTimer)); 316 del_timer_sync(&pLed->BlinkTimer);
318 pLed->bLedBlinkInProgress = false; 317 pLed->bLedBlinkInProgress = false;
319 } 318 }
320 pLed->bLedScanBlinkInProgress = true; 319 pLed->bLedScanBlinkInProgress = true;
@@ -326,7 +325,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
326 pLed->BlinkingLedState = RTW_LED_ON; 325 pLed->BlinkingLedState = RTW_LED_ON;
327 mod_timer(&pLed->BlinkTimer, jiffies + 326 mod_timer(&pLed->BlinkTimer, jiffies +
328 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); 327 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
329 } 328 }
330 break; 329 break;
331 case LED_CTL_TX: 330 case LED_CTL_TX:
332 case LED_CTL_RX: 331 case LED_CTL_RX:
@@ -334,11 +333,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
334 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) 333 if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
335 return; 334 return;
336 if (pLed->bLedNoLinkBlinkInProgress) { 335 if (pLed->bLedNoLinkBlinkInProgress) {
337 del_timer_sync(&(pLed->BlinkTimer)); 336 del_timer_sync(&pLed->BlinkTimer);
338 pLed->bLedNoLinkBlinkInProgress = false; 337 pLed->bLedNoLinkBlinkInProgress = false;
339 } 338 }
340 if (pLed->bLedLinkBlinkInProgress) { 339 if (pLed->bLedLinkBlinkInProgress) {
341 del_timer_sync(&(pLed->BlinkTimer)); 340 del_timer_sync(&pLed->BlinkTimer);
342 pLed->bLedLinkBlinkInProgress = false; 341 pLed->bLedLinkBlinkInProgress = false;
343 } 342 }
344 pLed->bLedBlinkInProgress = true; 343 pLed->bLedBlinkInProgress = true;
@@ -354,21 +353,21 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
354 break; 353 break;
355 case LED_CTL_START_WPS: /* wait until xinpin finish */ 354 case LED_CTL_START_WPS: /* wait until xinpin finish */
356 case LED_CTL_START_WPS_BOTTON: 355 case LED_CTL_START_WPS_BOTTON:
357 if (!pLed->bLedWPSBlinkInProgress) { 356 if (!pLed->bLedWPSBlinkInProgress) {
358 if (pLed->bLedNoLinkBlinkInProgress) { 357 if (pLed->bLedNoLinkBlinkInProgress) {
359 del_timer_sync(&(pLed->BlinkTimer)); 358 del_timer_sync(&pLed->BlinkTimer);
360 pLed->bLedNoLinkBlinkInProgress = false; 359 pLed->bLedNoLinkBlinkInProgress = false;
361 } 360 }
362 if (pLed->bLedLinkBlinkInProgress) { 361 if (pLed->bLedLinkBlinkInProgress) {
363 del_timer_sync(&(pLed->BlinkTimer)); 362 del_timer_sync(&pLed->BlinkTimer);
364 pLed->bLedLinkBlinkInProgress = false; 363 pLed->bLedLinkBlinkInProgress = false;
365 } 364 }
366 if (pLed->bLedBlinkInProgress) { 365 if (pLed->bLedBlinkInProgress) {
367 del_timer_sync(&(pLed->BlinkTimer)); 366 del_timer_sync(&pLed->BlinkTimer);
368 pLed->bLedBlinkInProgress = false; 367 pLed->bLedBlinkInProgress = false;
369 } 368 }
370 if (pLed->bLedScanBlinkInProgress) { 369 if (pLed->bLedScanBlinkInProgress) {
371 del_timer_sync(&(pLed->BlinkTimer)); 370 del_timer_sync(&pLed->BlinkTimer);
372 pLed->bLedScanBlinkInProgress = false; 371 pLed->bLedScanBlinkInProgress = false;
373 } 372 }
374 pLed->bLedWPSBlinkInProgress = true; 373 pLed->bLedWPSBlinkInProgress = true;
@@ -379,27 +378,27 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
379 pLed->BlinkingLedState = RTW_LED_ON; 378 pLed->BlinkingLedState = RTW_LED_ON;
380 mod_timer(&pLed->BlinkTimer, jiffies + 379 mod_timer(&pLed->BlinkTimer, jiffies +
381 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); 380 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
382 } 381 }
383 break; 382 break;
384 case LED_CTL_STOP_WPS: 383 case LED_CTL_STOP_WPS:
385 if (pLed->bLedNoLinkBlinkInProgress) { 384 if (pLed->bLedNoLinkBlinkInProgress) {
386 del_timer_sync(&(pLed->BlinkTimer)); 385 del_timer_sync(&pLed->BlinkTimer);
387 pLed->bLedNoLinkBlinkInProgress = false; 386 pLed->bLedNoLinkBlinkInProgress = false;
388 } 387 }
389 if (pLed->bLedLinkBlinkInProgress) { 388 if (pLed->bLedLinkBlinkInProgress) {
390 del_timer_sync(&(pLed->BlinkTimer)); 389 del_timer_sync(&pLed->BlinkTimer);
391 pLed->bLedLinkBlinkInProgress = false; 390 pLed->bLedLinkBlinkInProgress = false;
392 } 391 }
393 if (pLed->bLedBlinkInProgress) { 392 if (pLed->bLedBlinkInProgress) {
394 del_timer_sync(&(pLed->BlinkTimer)); 393 del_timer_sync(&pLed->BlinkTimer);
395 pLed->bLedBlinkInProgress = false; 394 pLed->bLedBlinkInProgress = false;
396 } 395 }
397 if (pLed->bLedScanBlinkInProgress) { 396 if (pLed->bLedScanBlinkInProgress) {
398 del_timer_sync(&(pLed->BlinkTimer)); 397 del_timer_sync(&pLed->BlinkTimer);
399 pLed->bLedScanBlinkInProgress = false; 398 pLed->bLedScanBlinkInProgress = false;
400 } 399 }
401 if (pLed->bLedWPSBlinkInProgress) 400 if (pLed->bLedWPSBlinkInProgress)
402 del_timer_sync(&(pLed->BlinkTimer)); 401 del_timer_sync(&pLed->BlinkTimer);
403 else 402 else
404 pLed->bLedWPSBlinkInProgress = true; 403 pLed->bLedWPSBlinkInProgress = true;
405 pLed->CurrLedState = LED_BLINK_WPS_STOP; 404 pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -415,7 +414,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
415 break; 414 break;
416 case LED_CTL_STOP_WPS_FAIL: 415 case LED_CTL_STOP_WPS_FAIL:
417 if (pLed->bLedWPSBlinkInProgress) { 416 if (pLed->bLedWPSBlinkInProgress) {
418 del_timer_sync(&(pLed->BlinkTimer)); 417 del_timer_sync(&pLed->BlinkTimer);
419 pLed->bLedWPSBlinkInProgress = false; 418 pLed->bLedWPSBlinkInProgress = false;
420 } 419 }
421 pLed->bLedNoLinkBlinkInProgress = true; 420 pLed->bLedNoLinkBlinkInProgress = true;
@@ -431,23 +430,23 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
431 pLed->CurrLedState = RTW_LED_OFF; 430 pLed->CurrLedState = RTW_LED_OFF;
432 pLed->BlinkingLedState = RTW_LED_OFF; 431 pLed->BlinkingLedState = RTW_LED_OFF;
433 if (pLed->bLedNoLinkBlinkInProgress) { 432 if (pLed->bLedNoLinkBlinkInProgress) {
434 del_timer_sync(&(pLed->BlinkTimer)); 433 del_timer_sync(&pLed->BlinkTimer);
435 pLed->bLedNoLinkBlinkInProgress = false; 434 pLed->bLedNoLinkBlinkInProgress = false;
436 } 435 }
437 if (pLed->bLedLinkBlinkInProgress) { 436 if (pLed->bLedLinkBlinkInProgress) {
438 del_timer_sync(&(pLed->BlinkTimer)); 437 del_timer_sync(&pLed->BlinkTimer);
439 pLed->bLedLinkBlinkInProgress = false; 438 pLed->bLedLinkBlinkInProgress = false;
440 } 439 }
441 if (pLed->bLedBlinkInProgress) { 440 if (pLed->bLedBlinkInProgress) {
442 del_timer_sync(&(pLed->BlinkTimer)); 441 del_timer_sync(&pLed->BlinkTimer);
443 pLed->bLedBlinkInProgress = false; 442 pLed->bLedBlinkInProgress = false;
444 } 443 }
445 if (pLed->bLedWPSBlinkInProgress) { 444 if (pLed->bLedWPSBlinkInProgress) {
446 del_timer_sync(&(pLed->BlinkTimer)); 445 del_timer_sync(&pLed->BlinkTimer);
447 pLed->bLedWPSBlinkInProgress = false; 446 pLed->bLedWPSBlinkInProgress = false;
448 } 447 }
449 if (pLed->bLedScanBlinkInProgress) { 448 if (pLed->bLedScanBlinkInProgress) {
450 del_timer_sync(&(pLed->BlinkTimer)); 449 del_timer_sync(&pLed->BlinkTimer);
451 pLed->bLedScanBlinkInProgress = false; 450 pLed->bLedScanBlinkInProgress = false;
452 } 451 }
453 SwLedOff(padapter, pLed); 452 SwLedOff(padapter, pLed);
@@ -475,15 +474,10 @@ void BlinkHandler(struct LED_871x *pLed)
475 474
476void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction) 475void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
477{ 476{
478 struct led_priv *ledpriv = &(padapter->ledpriv);
479
480 if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) || 477 if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
481 (!padapter->hw_init_completed)) 478 (!padapter->hw_init_completed))
482 return; 479 return;
483 480
484 if (!ledpriv->bRegUseLed)
485 return;
486
487 if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on && 481 if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
488 padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) && 482 padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
489 (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX || 483 (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd05010f..032f783b0d83 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -801,7 +801,7 @@ void rtw_indicate_connect(struct adapter *padapter)
801 if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { 801 if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
802 set_fwstate(pmlmepriv, _FW_LINKED); 802 set_fwstate(pmlmepriv, _FW_LINKED);
803 803
804 rtw_led_control(padapter, LED_CTL_LINK); 804 LedControl8188eu(padapter, LED_CTL_LINK);
805 805
806 rtw_os_indicate_connect(padapter); 806 rtw_os_indicate_connect(padapter);
807 } 807 }
@@ -833,7 +833,7 @@ void rtw_indicate_disconnect(struct adapter *padapter)
833 rtw_os_indicate_disconnect(padapter); 833 rtw_os_indicate_disconnect(padapter);
834 834
835 _clr_fwstate_(pmlmepriv, _FW_LINKED); 835 _clr_fwstate_(pmlmepriv, _FW_LINKED);
836 rtw_led_control(padapter, LED_CTL_NO_LINK); 836 LedControl8188eu(padapter, LED_CTL_NO_LINK);
837 rtw_clear_scan_deny(padapter); 837 rtw_clear_scan_deny(padapter);
838 } 838 }
839 839
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index fb13df586441..d9c114776cab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -133,7 +133,9 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
133 {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */ 133 {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
134}; 134};
135 135
136static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */ 136static const struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {
137 0x03
138}; /* use the combination for max channel numbers */
137 139
138/* 140/*
139 * Search the @param channel_num in given @param channel_set 141 * Search the @param channel_num in given @param channel_set
@@ -667,10 +669,10 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
667 get_rate_set(padapter, bssrate, &bssrate_len); 669 get_rate_set(padapter, bssrate, &bssrate_len);
668 670
669 if (bssrate_len > 8) { 671 if (bssrate_len > 8) {
670 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen)); 672 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
671 pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); 673 pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
672 } else { 674 } else {
673 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen)); 675 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
674 } 676 }
675 677
676 /* add wps_ie for wps2.0 */ 678 /* add wps_ie for wps2.0 */
@@ -999,7 +1001,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
999 } 1001 }
1000 1002
1001 if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) 1003 if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
1002 pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen)); 1004 pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
1003 1005
1004 /* add WPS IE ie for wps 2.0 */ 1006 /* add WPS IE ie for wps 2.0 */
1005 if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) { 1007 if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -1120,10 +1122,10 @@ static void issue_assocreq(struct adapter *padapter)
1120 1122
1121 1123
1122 if (bssrate_len > 8) { 1124 if (bssrate_len > 8) {
1123 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen)); 1125 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
1124 pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); 1126 pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
1125 } else { 1127 } else {
1126 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen)); 1128 pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
1127 } 1129 }
1128 1130
1129 /* RSN */ 1131 /* RSN */
@@ -1165,7 +1167,7 @@ static void issue_assocreq(struct adapter *padapter)
1165 memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16); 1167 memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
1166 break; 1168 break;
1167 } 1169 }
1168 pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen)); 1170 pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
1169 } 1171 }
1170 } 1172 }
1171 1173
@@ -1194,7 +1196,7 @@ static void issue_assocreq(struct adapter *padapter)
1194 } 1196 }
1195 1197
1196 if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) 1198 if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
1197 pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen)); 1199 pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
1198 1200
1199 pattrib->last_txcmdsz = pattrib->pktlen; 1201 pattrib->last_txcmdsz = pattrib->pktlen;
1200 dump_mgntframe(padapter, pmgntframe); 1202 dump_mgntframe(padapter, pmgntframe);
@@ -2644,7 +2646,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
2644 ret = rtw_check_bcn_info(padapter, pframe, len); 2646 ret = rtw_check_bcn_info(padapter, pframe, len);
2645 if (!ret) { 2647 if (!ret) {
2646 DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n "); 2648 DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
2647 receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535); 2649 receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
2648 return _SUCCESS; 2650 return _SUCCESS;
2649 } 2651 }
2650 /* update WMM, ERP in the beacon */ 2652 /* update WMM, ERP in the beacon */
@@ -2802,7 +2804,7 @@ static unsigned int OnAuth(struct adapter *padapter,
2802 /* checking for challenging txt... */ 2804 /* checking for challenging txt... */
2803 DBG_88E("checking for challenging txt...\n"); 2805 DBG_88E("checking for challenging txt...\n");
2804 2806
2805 p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len, 2807 p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
2806 len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4); 2808 len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
2807 2809
2808 if ((p == NULL) || (ie_len <= 0)) { 2810 if ((p == NULL) || (ie_len <= 0)) {
@@ -3046,7 +3048,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
3046 memcpy(supportRate, p+2, ie_len); 3048 memcpy(supportRate, p+2, ie_len);
3047 supportRateNum = ie_len; 3049 supportRateNum = ie_len;
3048 3050
3049 p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len, 3051 p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
3050 pkt_len - WLAN_HDR_A3_LEN - ie_offset); 3052 pkt_len - WLAN_HDR_A3_LEN - ie_offset);
3051 if (p != NULL) { 3053 if (p != NULL) {
3052 if (supportRateNum <= sizeof(supportRate)) { 3054 if (supportRateNum <= sizeof(supportRate)) {
@@ -3146,7 +3148,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
3146 if (pmlmepriv->wps_beacon_ie) { 3148 if (pmlmepriv->wps_beacon_ie) {
3147 u8 selected_registrar = 0; 3149 u8 selected_registrar = 0;
3148 3150
3149 rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL); 3151 rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
3150 3152
3151 if (!selected_registrar) { 3153 if (!selected_registrar) {
3152 DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n"); 3154 DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
@@ -3511,7 +3513,7 @@ static unsigned int OnDeAuth(struct adapter *padapter,
3511 DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n", 3513 DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
3512 reason, GetAddr3Ptr(pframe)); 3514 reason, GetAddr3Ptr(pframe));
3513 3515
3514 receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason); 3516 receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
3515 } 3517 }
3516 pmlmepriv->LinkDetectInfo.bBusyTraffic = false; 3518 pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
3517 return _SUCCESS; 3519 return _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 0b70fe7d3b72..4032121a06f3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -56,7 +56,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
56 if (check_fwstate(pmlmepriv, _FW_LINKED)) { 56 if (check_fwstate(pmlmepriv, _FW_LINKED)) {
57 _clr_fwstate_(pmlmepriv, _FW_LINKED); 57 _clr_fwstate_(pmlmepriv, _FW_LINKED);
58 58
59 rtw_led_control(padapter, LED_CTL_NO_LINK); 59 LedControl8188eu(padapter, LED_CTL_NO_LINK);
60 60
61 rtw_os_indicate_disconnect(padapter); 61 rtw_os_indicate_disconnect(padapter);
62 62
@@ -94,7 +94,7 @@ static int rtw_hw_resume(struct adapter *padapter)
94 pwrpriv->bips_processing = true; 94 pwrpriv->bips_processing = true;
95 rtw_reset_drv_sw(padapter); 95 rtw_reset_drv_sw(padapter);
96 96
97 if (pm_netdev_open(pnetdev, false) != 0) { 97 if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) {
98 mutex_unlock(&pwrpriv->mutex_lock); 98 mutex_unlock(&pwrpriv->mutex_lock);
99 goto error_exit; 99 goto error_exit;
100 } 100 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index b87cbbbee054..3e6edb63d36b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -66,16 +66,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
66 66
67 precvpriv->adapter = padapter; 67 precvpriv->adapter = padapter;
68 68
69 precvpriv->free_recvframe_cnt = NR_RECVFRAME;
70
71 precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ); 69 precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
72 70
73 if (!precvpriv->pallocated_frame_buf) 71 if (!precvpriv->pallocated_frame_buf)
74 return _FAIL; 72 return _FAIL;
75 73
76 precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ); 74 precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
77
78 precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
79 75
80 for (i = 0; i < NR_RECVFRAME; i++) { 76 for (i = 0; i < NR_RECVFRAME; i++) {
81 INIT_LIST_HEAD(&(precvframe->list)); 77 INIT_LIST_HEAD(&(precvframe->list));
@@ -83,15 +79,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
83 list_add_tail(&(precvframe->list), 79 list_add_tail(&(precvframe->list),
84 &(precvpriv->free_recv_queue.queue)); 80 &(precvpriv->free_recv_queue.queue));
85 81
86 rtw_os_recv_resource_alloc(precvframe); 82 precvframe->pkt = NULL;
87
88 precvframe->len = 0; 83 precvframe->len = 0;
89 84
90 precvframe->adapter = padapter; 85 precvframe->adapter = padapter;
91 precvframe++; 86 precvframe++;
92 } 87 }
93 precvpriv->rx_pending_cnt = 1;
94
95 res = rtw_hal_init_recv_priv(padapter); 88 res = rtw_hal_init_recv_priv(padapter);
96 89
97 setup_timer(&precvpriv->signal_stat_timer, 90 setup_timer(&precvpriv->signal_stat_timer,
@@ -120,20 +113,11 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
120struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue) 113struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
121{ 114{
122 struct recv_frame *hdr; 115 struct recv_frame *hdr;
123 struct adapter *padapter;
124 struct recv_priv *precvpriv;
125 116
126 hdr = list_first_entry_or_null(&pfree_recv_queue->queue, 117 hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
127 struct recv_frame, list); 118 struct recv_frame, list);
128 if (hdr) { 119 if (hdr)
129 list_del_init(&hdr->list); 120 list_del_init(&hdr->list);
130 padapter = hdr->adapter;
131 if (padapter) {
132 precvpriv = &padapter->recvpriv;
133 if (pfree_recv_queue == &precvpriv->free_recv_queue)
134 precvpriv->free_recvframe_cnt--;
135 }
136 }
137 121
138 return hdr; 122 return hdr;
139} 123}
@@ -154,13 +138,8 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
154int rtw_free_recvframe(struct recv_frame *precvframe, 138int rtw_free_recvframe(struct recv_frame *precvframe,
155 struct __queue *pfree_recv_queue) 139 struct __queue *pfree_recv_queue)
156{ 140{
157 struct adapter *padapter;
158 struct recv_priv *precvpriv;
159
160 if (!precvframe) 141 if (!precvframe)
161 return _FAIL; 142 return _FAIL;
162 padapter = precvframe->adapter;
163 precvpriv = &padapter->recvpriv;
164 if (precvframe->pkt) { 143 if (precvframe->pkt) {
165 dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */ 144 dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
166 precvframe->pkt = NULL; 145 precvframe->pkt = NULL;
@@ -174,29 +153,16 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
174 153
175 list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue)); 154 list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
176 155
177 if (padapter != NULL) { 156 spin_unlock_bh(&pfree_recv_queue->lock);
178 if (pfree_recv_queue == &precvpriv->free_recv_queue)
179 precvpriv->free_recvframe_cnt++;
180 }
181
182 spin_unlock_bh(&pfree_recv_queue->lock);
183 157
184 return _SUCCESS; 158 return _SUCCESS;
185} 159}
186 160
187int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue) 161int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
188{ 162{
189 struct adapter *padapter = precvframe->adapter;
190 struct recv_priv *precvpriv = &padapter->recvpriv;
191
192 list_del_init(&(precvframe->list)); 163 list_del_init(&(precvframe->list));
193 list_add_tail(&(precvframe->list), get_list_head(queue)); 164 list_add_tail(&(precvframe->list), get_list_head(queue));
194 165
195 if (padapter != NULL) {
196 if (queue == &precvpriv->free_recv_queue)
197 precvpriv->free_recvframe_cnt++;
198 }
199
200 return _SUCCESS; 166 return _SUCCESS;
201} 167}
202 168
@@ -1294,7 +1260,7 @@ static int validate_recv_frame(struct adapter *adapter,
1294 retval = _FAIL; /* only data frame return _SUCCESS */ 1260 retval = _FAIL; /* only data frame return _SUCCESS */
1295 break; 1261 break;
1296 case WIFI_DATA_TYPE: /* data */ 1262 case WIFI_DATA_TYPE: /* data */
1297 rtw_led_control(adapter, LED_CTL_RX); 1263 LedControl8188eu(adapter, LED_CTL_RX);
1298 pattrib->qos = (subtype & BIT(7)) ? 1 : 0; 1264 pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
1299 retval = validate_recv_data_frame(adapter, precv_frame); 1265 retval = validate_recv_data_frame(adapter, precv_frame);
1300 if (retval == _FAIL) { 1266 if (retval == _FAIL) {
@@ -1989,7 +1955,7 @@ static int recv_func_posthandle(struct adapter *padapter,
1989 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; 1955 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
1990 1956
1991 /* DATA FRAME */ 1957 /* DATA FRAME */
1992 rtw_led_control(padapter, LED_CTL_RX); 1958 LedControl8188eu(padapter, LED_CTL_RX);
1993 1959
1994 prframe = decryptor(padapter, prframe); 1960 prframe = decryptor(padapter, prframe);
1995 if (prframe == NULL) { 1961 if (prframe == NULL) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index a71e25294add..941d1a069d20 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
310 /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */ 310 /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
311 for (i = 0; i < 16; i++) { 311 for (i = 0; i < 16; i++) {
312 struct list_head *phead, *plist; 312 struct list_head *phead, *plist;
313 struct recv_frame *prhdr;
314 struct recv_frame *prframe; 313 struct recv_frame *prframe;
315 struct __queue *ppending_recvframe_queue; 314 struct __queue *ppending_recvframe_queue;
316 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; 315 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -327,8 +326,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
327 plist = phead->next; 326 plist = phead->next;
328 327
329 while (!list_empty(phead)) { 328 while (!list_empty(phead)) {
330 prhdr = container_of(plist, struct recv_frame, list); 329 prframe = container_of(plist, struct recv_frame, list);
331 prframe = (struct recv_frame *)prhdr;
332 330
333 plist = plist->next; 331 plist = plist->next;
334 332
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 0f8b8e0bffdf..b60b126b860e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -220,7 +220,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
220 struct adapter *padapter = pxmitpriv->adapter; 220 struct adapter *padapter = pxmitpriv->adapter;
221 struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf; 221 struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
222 struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; 222 struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
223 u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
224 u32 num_xmit_extbuf = NR_XMIT_EXTBUFF; 223 u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
225 224
226 if (pxmitpriv->pxmit_frame_buf == NULL) 225 if (pxmitpriv->pxmit_frame_buf == NULL)
@@ -233,7 +232,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
233 } 232 }
234 233
235 for (i = 0; i < NR_XMITBUFF; i++) { 234 for (i = 0; i < NR_XMITBUFF; i++) {
236 rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); 235 rtw_os_xmit_resource_free(pxmitbuf);
237 pxmitbuf++; 236 pxmitbuf++;
238 } 237 }
239 238
@@ -243,7 +242,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
243 /* free xmit extension buff */ 242 /* free xmit extension buff */
244 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; 243 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
245 for (i = 0; i < num_xmit_extbuf; i++) { 244 for (i = 0; i < num_xmit_extbuf; i++) {
246 rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ)); 245 rtw_os_xmit_resource_free(pxmitbuf);
247 pxmitbuf++; 246 pxmitbuf++;
248 } 247 }
249 248
@@ -1064,7 +1063,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
1064 1063
1065 frg_inx++; 1064 frg_inx++;
1066 1065
1067 if (bmcst || rtw_endofpktfile(&pktfile)) { 1066 if (bmcst || pktfile.pkt_len == 0) {
1068 pattrib->nr_frags = frg_inx; 1067 pattrib->nr_frags = frg_inx;
1069 1068
1070 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) + 1069 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1677,7 +1676,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
1677 } 1676 }
1678 pxmitframe->pkt = *ppkt; 1677 pxmitframe->pkt = *ppkt;
1679 1678
1680 rtw_led_control(padapter, LED_CTL_TX); 1679 LedControl8188eu(padapter, LED_CTL_TX);
1681 1680
1682 pxmitframe->attrib.qsel = pxmitframe->attrib.priority; 1681 pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
1683 1682
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index d983a8029f4c..16476e735011 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -991,7 +991,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
991{ 991{
992 pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true; 992 pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
993 pDM_Odm->RFCalibrateInfo.TXPowercount = 0; 993 pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
994 pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
995 if (*(pDM_Odm->mp_mode) != 1) 994 if (*(pDM_Odm->mp_mode) != 1)
996 pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true; 995 pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
997 MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl); 996 MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 5192ef70bcfc..35c91e06cc47 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -40,12 +40,11 @@ static u32 cal_bit_shift(u32 bitmask)
40 40
41u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask) 41u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask)
42{ 42{
43 u32 return_value = 0, original_value, bit_shift; 43 u32 original_value, bit_shift;
44 44
45 original_value = usb_read32(adapt, regaddr); 45 original_value = usb_read32(adapt, regaddr);
46 bit_shift = cal_bit_shift(bitmask); 46 bit_shift = cal_bit_shift(bitmask);
47 return_value = (original_value & bitmask) >> bit_shift; 47 return (original_value & bitmask) >> bit_shift;
48 return return_value;
49} 48}
50 49
51void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data) 50void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data)
@@ -119,12 +118,11 @@ static void rf_serial_write(struct adapter *adapt,
119u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path, 118u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
120 u32 reg_addr, u32 bit_mask) 119 u32 reg_addr, u32 bit_mask)
121{ 120{
122 u32 original_value, readback_value, bit_shift; 121 u32 original_value, bit_shift;
123 122
124 original_value = rf_serial_read(adapt, rf_path, reg_addr); 123 original_value = rf_serial_read(adapt, rf_path, reg_addr);
125 bit_shift = cal_bit_shift(bit_mask); 124 bit_shift = cal_bit_shift(bit_mask);
126 readback_value = (original_value & bit_mask) >> bit_shift; 125 return (original_value & bit_mask) >> bit_shift;
127 return readback_value;
128} 126}
129 127
130void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path, 128void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
@@ -210,13 +208,6 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
210 u8 reg_bw_opmode; 208 u8 reg_bw_opmode;
211 u8 reg_prsr_rsc; 209 u8 reg_prsr_rsc;
212 210
213 if (hal_data->rf_chip == RF_PSEUDO_11N)
214 return;
215
216 /* There is no 40MHz mode in RF_8225. */
217 if (hal_data->rf_chip == RF_8225)
218 return;
219
220 if (adapt->bDriverStopped) 211 if (adapt->bDriverStopped)
221 return; 212 return;
222 213
@@ -265,8 +256,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
265 } 256 }
266 257
267 /* Set RF related register */ 258 /* Set RF related register */
268 if (hal_data->rf_chip == RF_6052) 259 rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
269 rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
270} 260}
271 261
272void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth, 262void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
@@ -286,7 +276,6 @@ void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
286 276
287static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel) 277static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
288{ 278{
289 u8 rf_path;
290 u32 param1, param2; 279 u32 param1, param2;
291 struct hal_data_8188e *hal_data = adapt->HalData; 280 struct hal_data_8188e *hal_data = adapt->HalData;
292 281
@@ -294,12 +283,10 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
294 283
295 param1 = RF_CHNLBW; 284 param1 = RF_CHNLBW;
296 param2 = channel; 285 param2 = channel;
297 for (rf_path = 0; rf_path < hal_data->NumTotalRFPath; rf_path++) { 286 hal_data->RfRegChnlVal[0] = (hal_data->RfRegChnlVal[0] &
298 hal_data->RfRegChnlVal[rf_path] = (hal_data->RfRegChnlVal[rf_path] & 287 0xfffffc00) | param2;
299 0xfffffc00) | param2; 288 phy_set_rf_reg(adapt, 0, param1,
300 phy_set_rf_reg(adapt, (enum rf_radio_path)rf_path, param1, 289 bRFRegOffsetMask, hal_data->RfRegChnlVal[0]);
301 bRFRegOffsetMask, hal_data->RfRegChnlVal[rf_path]);
302 }
303} 290}
304 291
305void rtw_hal_set_chan(struct adapter *adapt, u8 channel) 292void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
@@ -307,9 +294,6 @@ void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
307 struct hal_data_8188e *hal_data = adapt->HalData; 294 struct hal_data_8188e *hal_data = adapt->HalData;
308 u8 tmpchannel = hal_data->CurrentChannel; 295 u8 tmpchannel = hal_data->CurrentChannel;
309 296
310 if (hal_data->rf_chip == RF_PSEUDO_11N)
311 return;
312
313 if (channel == 0) 297 if (channel == 0)
314 channel = 1; 298 channel = 1;
315 299
@@ -407,9 +391,8 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
407 s8 ofdm_index[2], cck_index = 0; 391 s8 ofdm_index[2], cck_index = 0;
408 s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0; 392 s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
409 u32 i = 0, j = 0; 393 u32 i = 0, j = 0;
410 bool is2t = false;
411 394
412 u8 ofdm_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB */ 395 u8 ofdm_min_index = 6; /* OFDM BB Swing should be less than +3.0dB */
413 s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = { 396 s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = {
414 /* 2.4G, decrease power */ 397 /* 2.4G, decrease power */
415 {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, 398 {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
@@ -427,18 +410,12 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
427 dm_txpwr_track_setpwr(dm_odm); 410 dm_txpwr_track_setpwr(dm_odm);
428 411
429 dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++; 412 dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
430 dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
431 413
432 dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317; 414 dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
433 415
434 thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A, 416 thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A,
435 RF_T_METER_88E, 0xfc00); 417 RF_T_METER_88E, 0xfc00);
436 418
437 if (is2t)
438 rf = 2;
439 else
440 rf = 1;
441
442 if (thermal_val) { 419 if (thermal_val) {
443 /* Query OFDM path A default setting */ 420 /* Query OFDM path A default setting */
444 ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D; 421 ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
@@ -450,17 +427,6 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
450 } 427 }
451 } 428 }
452 429
453 /* Query OFDM path B default setting */
454 if (is2t) {
455 ele_d = phy_query_bb_reg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
456 for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
457 if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) {
458 ofdm_index_old[1] = (u8)i;
459 break;
460 }
461 }
462 }
463
464 /* Query CCK default setting From 0xa24 */ 430 /* Query CCK default setting From 0xa24 */
465 temp_cck = dm_odm->RFCalibrateInfo.RegA24; 431 temp_cck = dm_odm->RFCalibrateInfo.RegA24;
466 432
@@ -479,8 +445,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
479 dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val; 445 dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val;
480 dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val; 446 dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val;
481 447
482 for (i = 0; i < rf; i++) 448 dm_odm->RFCalibrateInfo.OFDM_index[0] = ofdm_index_old[0];
483 dm_odm->RFCalibrateInfo.OFDM_index[i] = ofdm_index_old[i];
484 dm_odm->RFCalibrateInfo.CCK_index = cck_index_old; 449 dm_odm->RFCalibrateInfo.CCK_index = cck_index_old;
485 } 450 }
486 451
@@ -539,13 +504,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
539 offset = index_mapping_NUM_88E-1; 504 offset = index_mapping_NUM_88E-1;
540 505
541 /* Updating ofdm_index values with new OFDM / CCK offset */ 506 /* Updating ofdm_index values with new OFDM / CCK offset */
542 for (i = 0; i < rf; i++) { 507 ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset];
543 ofdm_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + ofdm_index_mapping[j][offset]; 508 if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1)
544 if (ofdm_index[i] > OFDM_TABLE_SIZE_92D-1) 509 ofdm_index[0] = OFDM_TABLE_SIZE_92D-1;
545 ofdm_index[i] = OFDM_TABLE_SIZE_92D-1; 510 else if (ofdm_index[0] < ofdm_min_index)
546 else if (ofdm_index[i] < ofdm_min_index) 511 ofdm_index[0] = ofdm_min_index;
547 ofdm_index[i] = ofdm_min_index;
548 }
549 512
550 cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset]; 513 cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset];
551 if (cck_index > CCK_TABLE_SIZE-1) 514 if (cck_index > CCK_TABLE_SIZE-1)
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 2f3edf0f850a..8f8c9de6a9bc 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -61,8 +61,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
61 (powerlevel[idx1]<<8) | 61 (powerlevel[idx1]<<8) |
62 (powerlevel[idx1]<<16) | 62 (powerlevel[idx1]<<16) |
63 (powerlevel[idx1]<<24); 63 (powerlevel[idx1]<<24);
64 if (tx_agc[idx1] > 0x20 && hal_data->ExternalPA)
65 tx_agc[idx1] = 0x20;
66 } 64 }
67 } else { 65 } else {
68 if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) { 66 if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
@@ -139,17 +137,15 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
139 (powerbase0<<8) | powerbase0; 137 (powerbase0<<8) | powerbase0;
140 *(ofdmbase+i) = powerbase0; 138 *(ofdmbase+i) = powerbase0;
141 } 139 }
142 for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) { 140 /* Check HT20 to HT40 diff */
143 /* Check HT20 to HT40 diff */ 141 if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
144 if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) 142 powerlevel[0] = pwr_level_bw20[0];
145 powerlevel[i] = pwr_level_bw20[i]; 143 else
146 else 144 powerlevel[0] = pwr_level_bw40[0];
147 powerlevel[i] = pwr_level_bw40[i]; 145 powerbase1 = powerlevel[0];
148 powerbase1 = powerlevel[i]; 146 powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
149 powerbase1 = (powerbase1<<24) | (powerbase1<<16) | 147 (powerbase1<<8) | powerbase1;
150 (powerbase1<<8) | powerbase1; 148 *mcs_base = powerbase1;
151 *(mcs_base+i) = powerbase1;
152 }
153} 149}
154static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, 150static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
155 u8 index, u32 *powerbase0, u32 *powerbase1, 151 u8 index, u32 *powerbase0, u32 *powerbase1,
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index dde64417e66a..9712d7b74345 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -230,79 +230,33 @@ static bool rf6052_conf_para(struct adapter *adapt)
230{ 230{
231 struct hal_data_8188e *hal_data = adapt->HalData; 231 struct hal_data_8188e *hal_data = adapt->HalData;
232 u32 u4val = 0; 232 u32 u4val = 0;
233 u8 rfpath;
234 bool rtstatus = true; 233 bool rtstatus = true;
235 struct bb_reg_def *pphyreg; 234 struct bb_reg_def *pphyreg;
236 235
237 for (rfpath = 0; rfpath < hal_data->NumTotalRFPath; rfpath++) { 236 pphyreg = &hal_data->PHYRegDef[RF90_PATH_A];
238 pphyreg = &hal_data->PHYRegDef[rfpath]; 237 u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV);
239 238
240 switch (rfpath) { 239 phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
241 case RF90_PATH_A: 240 udelay(1);
242 case RF90_PATH_C:
243 u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
244 BRFSI_RFENV);
245 break;
246 case RF90_PATH_B:
247 case RF90_PATH_D:
248 u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
249 BRFSI_RFENV << 16);
250 break;
251 }
252 241
253 phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); 242 phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
254 udelay(1); 243 udelay(1);
255 244
256 phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1); 245 phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREADDREAALENGTH, 0x0);
257 udelay(1); 246 udelay(1);
258 247
259 phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, 248 phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREDATALENGTH, 0x0);
260 B3WIREADDREAALENGTH, 0x0); 249 udelay(1);
261 udelay(1);
262 250
263 phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, 251 rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
264 B3WIREDATALENGTH, 0x0);
265 udelay(1);
266 252
267 switch (rfpath) { 253 phy_set_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV, u4val);
268 case RF90_PATH_A:
269 rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
270 break;
271 case RF90_PATH_B:
272 rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
273 break;
274 case RF90_PATH_C:
275 break;
276 case RF90_PATH_D:
277 break;
278 }
279
280 switch (rfpath) {
281 case RF90_PATH_A:
282 case RF90_PATH_C:
283 phy_set_bb_reg(adapt, pphyreg->rfintfs,
284 BRFSI_RFENV, u4val);
285 break;
286 case RF90_PATH_B:
287 case RF90_PATH_D:
288 phy_set_bb_reg(adapt, pphyreg->rfintfs,
289 BRFSI_RFENV << 16, u4val);
290 break;
291 }
292
293 if (!rtstatus)
294 return false;
295 }
296 254
297 return rtstatus; 255 return rtstatus;
298} 256}
299 257
300static bool rtl88e_phy_rf6052_config(struct adapter *adapt) 258static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
301{ 259{
302 struct hal_data_8188e *hal_data = adapt->HalData;
303
304 hal_data->NumTotalRFPath = 1;
305
306 return rf6052_conf_para(adapt); 260 return rf6052_conf_para(adapt);
307} 261}
308 262
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 385bc2f56f2f..0ce7db723a5d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -135,7 +135,6 @@ void rtw_hal_read_chip_version(struct adapter *padapter)
135 dump_chip_info(ChipVersion); 135 dump_chip_info(ChipVersion);
136 136
137 pHalData->VersionID = ChipVersion; 137 pHalData->VersionID = ChipVersion;
138 pHalData->NumTotalRFPath = 1;
139} 138}
140 139
141void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet) 140void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -470,7 +469,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
470{ 469{
471 struct hal_data_8188e *pHalData = padapter->HalData; 470 struct hal_data_8188e *pHalData = padapter->HalData;
472 struct txpowerinfo24g pwrInfo24G; 471 struct txpowerinfo24g pwrInfo24G;
473 u8 rfPath, ch, group; 472 u8 ch, group;
474 u8 bIn24G, TxCount; 473 u8 bIn24G, TxCount;
475 474
476 Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail); 475 Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
@@ -478,34 +477,32 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
478 if (!AutoLoadFail) 477 if (!AutoLoadFail)
479 pHalData->bTXPowerDataReadFromEEPORM = true; 478 pHalData->bTXPowerDataReadFromEEPORM = true;
480 479
481 for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) { 480 for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
482 for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { 481 bIn24G = Hal_GetChnlGroup88E(ch, &group);
483 bIn24G = Hal_GetChnlGroup88E(ch, &group); 482 if (bIn24G) {
484 if (bIn24G) { 483 pHalData->Index24G_CCK_Base[0][ch] = pwrInfo24G.IndexCCK_Base[0][group];
485 pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group]; 484 if (ch == 14)
486 if (ch == 14) 485 pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4];
487 pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4]; 486 else
488 else 487 pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group];
489 pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
490 }
491 if (bIn24G) {
492 DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
493 DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
494 DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
495 }
496 } 488 }
497 for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { 489 if (bIn24G) {
498 pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount]; 490 DBG_88E("======= Path %d, Channel %d =======\n", 0, ch);
499 pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount]; 491 DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]);
500 pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount]; 492 DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]);
501 pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
502 DBG_88E("======= TxCount %d =======\n", TxCount);
503 DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
504 DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
505 DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
506 DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
507 } 493 }
508 } 494 }
495 for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
496 pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount];
497 pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
498 pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
499 pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount];
500 DBG_88E("======= TxCount %d =======\n", TxCount);
501 DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]);
502 DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]);
503 DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]);
504 DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]);
505 }
509 506
510 /* 2010/10/19 MH Add Regulator recognize for CU. */ 507 /* 2010/10/19 MH Add Regulator recognize for CU. */
511 if (!AutoLoadFail) { 508 if (!AutoLoadFail) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 780666a755ee..12879afb992e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -46,16 +46,12 @@ void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
46 46
47 LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */ 47 LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */
48 48
49 if (padapter->HalData->bLedOpenDrain) { 49 /* Open-drain arrangement for controlling the LED) */
50 /* Open-drain arrangement for controlling the LED) */ 50 LedCfg &= 0x90; /* Set to software control. */
51 LedCfg &= 0x90; /* Set to software control. */ 51 usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
52 usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3))); 52 LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
53 LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG); 53 LedCfg &= 0xFE;
54 LedCfg &= 0xFE; 54 usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
55 usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
56 } else {
57 usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
58 }
59exit: 55exit:
60 pLed->bLedOn = false; 56 pLed->bLedOn = false;
61} 57}
@@ -69,10 +65,6 @@ void rtw_hal_sw_led_init(struct adapter *padapter)
69{ 65{
70 struct led_priv *pledpriv = &(padapter->ledpriv); 66 struct led_priv *pledpriv = &(padapter->ledpriv);
71 67
72 pledpriv->bRegUseLed = true;
73 pledpriv->LedControlHandler = LedControl8188eu;
74 padapter->HalData->bLedOpenDrain = true;
75
76 InitLed871x(padapter, &(pledpriv->SwLed0)); 68 InitLed871x(padapter, &(pledpriv->SwLed0));
77} 69}
78 70
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d0495a16ff79..0fc093eb7a77 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -37,19 +37,15 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
37 /* init recv_buf */ 37 /* init recv_buf */
38 _rtw_init_queue(&precvpriv->free_recv_buf_queue); 38 _rtw_init_queue(&precvpriv->free_recv_buf_queue);
39 39
40 precvpriv->pallocated_recv_buf = 40 precvpriv->precv_buf =
41 kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL); 41 kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL);
42 if (!precvpriv->pallocated_recv_buf) { 42 if (!precvpriv->precv_buf) {
43 res = _FAIL; 43 res = _FAIL;
44 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 44 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
45 ("alloc recv_buf fail!\n")); 45 ("alloc recv_buf fail!\n"));
46 goto exit; 46 goto exit;
47 } 47 }
48 48 precvbuf = precvpriv->precv_buf;
49 precvpriv->precv_buf = precvpriv->pallocated_recv_buf;
50
51
52 precvbuf = (struct recv_buf *)precvpriv->precv_buf;
53 49
54 for (i = 0; i < NR_RECVBUFF; i++) { 50 for (i = 0; i < NR_RECVBUFF; i++) {
55 res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf); 51 res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
@@ -58,27 +54,18 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
58 precvbuf->adapter = padapter; 54 precvbuf->adapter = padapter;
59 precvbuf++; 55 precvbuf++;
60 } 56 }
61 precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
62 skb_queue_head_init(&precvpriv->rx_skb_queue); 57 skb_queue_head_init(&precvpriv->rx_skb_queue);
63 { 58 {
64 int i; 59 int i;
65 size_t tmpaddr = 0;
66 size_t alignm = 0;
67 struct sk_buff *pskb = NULL; 60 struct sk_buff *pskb = NULL;
68 61
69 skb_queue_head_init(&precvpriv->free_recv_skb_queue); 62 skb_queue_head_init(&precvpriv->free_recv_skb_queue);
70 63
71 for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) { 64 for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
72 pskb = __netdev_alloc_skb(padapter->pnetdev, 65 pskb = __netdev_alloc_skb(padapter->pnetdev,
73 MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, 66 MAX_RECVBUF_SZ, GFP_KERNEL);
74 GFP_KERNEL);
75 if (pskb) { 67 if (pskb) {
76 kmemleak_not_leak(pskb); 68 kmemleak_not_leak(pskb);
77 pskb->dev = padapter->pnetdev;
78 tmpaddr = (size_t)pskb->data;
79 alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
80 skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignm));
81
82 skb_queue_tail(&precvpriv->free_recv_skb_queue, 69 skb_queue_tail(&precvpriv->free_recv_skb_queue,
83 pskb); 70 pskb);
84 } 71 }
@@ -95,14 +82,14 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
95 struct recv_buf *precvbuf; 82 struct recv_buf *precvbuf;
96 struct recv_priv *precvpriv = &padapter->recvpriv; 83 struct recv_priv *precvpriv = &padapter->recvpriv;
97 84
98 precvbuf = (struct recv_buf *)precvpriv->precv_buf; 85 precvbuf = precvpriv->precv_buf;
99 86
100 for (i = 0; i < NR_RECVBUFF; i++) { 87 for (i = 0; i < NR_RECVBUFF; i++) {
101 usb_free_urb(precvbuf->purb); 88 usb_free_urb(precvbuf->purb);
102 precvbuf++; 89 precvbuf++;
103 } 90 }
104 91
105 kfree(precvpriv->pallocated_recv_buf); 92 kfree(precvpriv->precv_buf);
106 93
107 if (skb_queue_len(&precvpriv->rx_skb_queue)) 94 if (skb_queue_len(&precvpriv->rx_skb_queue))
108 DBG_88E(KERN_WARNING "rx_skb_queue not empty\n"); 95 DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 7692ca495ee5..3675edb61942 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -562,9 +562,6 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
562 562
563 /* Rx aggregation setting */ 563 /* Rx aggregation setting */
564 usb_AggSettingRxUpdate(Adapter); 564 usb_AggSettingRxUpdate(Adapter);
565
566 /* 201/12/10 MH Add for USB agg mode dynamic switch. */
567 Adapter->HalData->UsbRxHighSpeedMode = false;
568} 565}
569 566
570static void _InitBeaconParameters(struct adapter *Adapter) 567static void _InitBeaconParameters(struct adapter *Adapter)
@@ -604,11 +601,6 @@ static void _BBTurnOnBlock(struct adapter *Adapter)
604 phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1); 601 phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
605} 602}
606 603
607enum {
608 Antenna_Lfet = 1,
609 Antenna_Right = 2,
610};
611
612static void _InitAntenna_Selection(struct adapter *Adapter) 604static void _InitAntenna_Selection(struct adapter *Adapter)
613{ 605{
614 struct hal_data_8188e *haldata = Adapter->HalData; 606 struct hal_data_8188e *haldata = Adapter->HalData;
@@ -994,19 +986,16 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
994 RT_TRACE(_module_hci_hal_init_c_, _drv_info_, 986 RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
995 ("===> usb_inirp_init\n")); 987 ("===> usb_inirp_init\n"));
996 988
997 precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
998
999 /* issue Rx irp to receive data */ 989 /* issue Rx irp to receive data */
1000 precvbuf = (struct recv_buf *)precvpriv->precv_buf; 990 precvbuf = precvpriv->precv_buf;
1001 for (i = 0; i < NR_RECVBUFF; i++) { 991 for (i = 0; i < NR_RECVBUFF; i++) {
1002 if (usb_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) { 992 if (usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf) == false) {
1003 RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n")); 993 RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
1004 status = _FAIL; 994 status = _FAIL;
1005 goto exit; 995 goto exit;
1006 } 996 }
1007 997
1008 precvbuf++; 998 precvbuf++;
1009 precvpriv->free_recv_buf_queue_cnt--;
1010 } 999 }
1011 1000
1012exit: 1001exit:
@@ -1107,18 +1096,12 @@ static void _ReadPROMContent(
1107 readAdapterInfo_8188EU(Adapter); 1096 readAdapterInfo_8188EU(Adapter);
1108} 1097}
1109 1098
1110static void _ReadRFType(struct adapter *Adapter)
1111{
1112 Adapter->HalData->rf_chip = RF_6052;
1113}
1114
1115void rtw_hal_read_chip_info(struct adapter *Adapter) 1099void rtw_hal_read_chip_info(struct adapter *Adapter)
1116{ 1100{
1117 unsigned long start = jiffies; 1101 unsigned long start = jiffies;
1118 1102
1119 MSG_88E("====> %s\n", __func__); 1103 MSG_88E("====> %s\n", __func__);
1120 1104
1121 _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
1122 _ReadPROMContent(Adapter); 1105 _ReadPROMContent(Adapter);
1123 1106
1124 MSG_88E("<==== %s in %d ms\n", __func__, 1107 MSG_88E("<==== %s in %d ms\n", __func__,
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 0976a761b280..550ad62e7064 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -99,17 +99,6 @@ enum phy_rate_tx_offset_area {
99 RA_OFFSET_HT_CCK, 99 RA_OFFSET_HT_CCK,
100}; 100};
101 101
102/* BB/RF related */
103enum RF_TYPE_8190P {
104 RF_TYPE_MIN, /* 0 */
105 RF_8225 = 1, /* 1 11b/g RF for verification only */
106 RF_8256 = 2, /* 2 11b/g/n */
107 RF_8258 = 3, /* 3 11a/b/g/n RF */
108 RF_6052 = 4, /* 4 11b/g/n RF */
109 /* TODO: We should remove this psudo PHY RF after we get new RF. */
110 RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
111};
112
113struct bb_reg_def { 102struct bb_reg_def {
114 u32 rfintfs; /* set software control: */ 103 u32 rfintfs; /* set software control: */
115 /* 0x870~0x877[8 bytes] */ 104 /* 0x870~0x877[8 bytes] */
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 32326fd1dd24..e86419e525d8 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -156,8 +156,6 @@ struct adapter {
156 u8 hw_init_completed; 156 u8 hw_init_completed;
157 157
158 void *cmdThread; 158 void *cmdThread;
159 void (*intf_start)(struct adapter *adapter);
160 void (*intf_stop)(struct adapter *adapter);
161 struct net_device *pnetdev; 159 struct net_device *pnetdev;
162 struct net_device *pmondev; 160 struct net_device *pmondev;
163 161
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index fa032b0c12ff..e1114a95d442 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -190,6 +190,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
190 190
191u32 rtw_hal_inirp_init(struct adapter *padapter); 191u32 rtw_hal_inirp_init(struct adapter *padapter);
192void rtw_hal_inirp_deinit(struct adapter *padapter); 192void rtw_hal_inirp_deinit(struct adapter *padapter);
193void usb_intf_stop(struct adapter *padapter);
193 194
194s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe); 195s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
195s32 rtw_hal_mgnt_xmit(struct adapter *padapter, 196s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 805f52e108b2..4fb3bb07ceaa 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -80,11 +80,6 @@
80#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */ 80#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
81#define RSSI_OFFSET_DIG 0x05; 81#define RSSI_OFFSET_DIG 0x05;
82 82
83/* ANT Test */
84#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
85#define ANTTESTA 0x01 /* Ant A will be Testing */
86#define ANTTESTB 0x02 /* Ant B will be testing */
87
88struct rtw_dig { 83struct rtw_dig {
89 u8 Dig_Enable_Flag; 84 u8 Dig_Enable_Flag;
90 u8 Dig_Ext_Port_Stage; 85 u8 Dig_Ext_Port_Stage;
@@ -590,7 +585,6 @@ struct odm_rf_cal {
590 s32 RegEBC; 585 s32 RegEBC;
591 586
592 u8 TXPowercount; 587 u8 TXPowercount;
593 bool bTXPowerTrackingInit;
594 bool bTXPowerTracking; 588 bool bTXPowerTracking;
595 u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking 589 u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
596 * as default */ 590 * as default */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index dbd7dc4f87dd..97d3d8504184 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -35,7 +35,8 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
35struct net_device *rtw_init_netdev(struct adapter *padapter); 35struct net_device *rtw_init_netdev(struct adapter *padapter);
36u16 rtw_recv_select_queue(struct sk_buff *skb); 36u16 rtw_recv_select_queue(struct sk_buff *skb);
37 37
38int pm_netdev_open(struct net_device *pnetdev, u8 bnormal); 38int netdev_open(struct net_device *pnetdev);
39int ips_netdrv_open(struct adapter *padapter);
39void rtw_ips_dev_unload(struct adapter *padapter); 40void rtw_ips_dev_unload(struct adapter *padapter);
40int rtw_ips_pwr_up(struct adapter *padapter); 41int rtw_ips_pwr_up(struct adapter *padapter);
41void rtw_ips_pwr_down(struct adapter *padapter); 42void rtw_ips_pwr_down(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 7550d58f6b5b..9b43a1314bd5 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -29,8 +29,6 @@ int rtw_recv_indicatepkt(struct adapter *adapter,
29 29
30void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup); 30void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
31 31
32void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
33
34int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf); 32int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
35 33
36void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl); 34void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7c81e3f3d12e..9330361da4ad 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -200,10 +200,6 @@ struct hal_data_8188e {
200 200
201 u16 BasicRateSet; 201 u16 BasicRateSet;
202 202
203 /* rf_ctrl */
204 u8 rf_chip;
205 u8 NumTotalRFPath;
206
207 u8 BoardType; 203 u8 BoardType;
208 204
209 /* EEPROM setting. */ 205 /* EEPROM setting. */
@@ -265,14 +261,6 @@ struct hal_data_8188e {
265 u32 CCKTxPowerLevelOriginalOffset; 261 u32 CCKTxPowerLevelOriginalOffset;
266 262
267 u8 CrystalCap; 263 u8 CrystalCap;
268 u32 AntennaTxPath; /* Antenna path Tx */
269 u32 AntennaRxPath; /* Antenna path Rx */
270 u8 BluetoothCoexist;
271 u8 ExternalPA;
272
273 u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
274
275 u8 b1x1RecvCombine; /* for 1T1R receive combining */
276 264
277 u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */ 265 u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
278 266
@@ -316,14 +304,6 @@ struct hal_data_8188e {
316 u8 OutEpQueueSel; 304 u8 OutEpQueueSel;
317 u8 OutEpNumber; 305 u8 OutEpNumber;
318 306
319 /* Add for USB aggreation mode dynamic shceme. */
320 bool UsbRxHighSpeedMode;
321
322 /* 2010/11/22 MH Add for slim combo debug mode selective. */
323 /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
324 * HW auto suspend ability. Close BT clock. */
325 bool SlimComboDbg;
326
327 u16 EfuseUsedBytes; 307 u16 EfuseUsedBytes;
328 308
329 /* Auto FSM to Turn On, include clock, isolation, power control 309 /* Auto FSM to Turn On, include clock, isolation, power control
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 80832a5f0732..0d8bf51c72a9 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -51,9 +51,7 @@ enum rx_packet_type {
51}; 51};
52 52
53#define INTERRUPT_MSG_FORMAT_LEN 60 53#define INTERRUPT_MSG_FORMAT_LEN 60
54void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
55void rtl8188eu_recv_tasklet(void *priv); 54void rtl8188eu_recv_tasklet(void *priv);
56void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
57void rtl8188e_process_phy_info(struct adapter *padapter, 55void rtl8188e_process_phy_info(struct adapter *padapter,
58 struct recv_frame *prframe); 56 struct recv_frame *prframe);
59void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy); 57void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index f2054ef70358..607d1ba56a46 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -70,12 +70,9 @@ struct LED_871x {
70 70
71 struct timer_list BlinkTimer; /* Timer object for led blinking. */ 71 struct timer_list BlinkTimer; /* Timer object for led blinking. */
72 72
73 u8 bSWLedCtrl;
74
75 /* ALPHA, added by chiyoko, 20090106 */ 73 /* ALPHA, added by chiyoko, 20090106 */
76 u8 bLedNoLinkBlinkInProgress; 74 u8 bLedNoLinkBlinkInProgress;
77 u8 bLedLinkBlinkInProgress; 75 u8 bLedLinkBlinkInProgress;
78 u8 bLedStartToLinkBlinkInProgress;
79 u8 bLedScanBlinkInProgress; 76 u8 bLedScanBlinkInProgress;
80 struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to 77 struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
81 * manipulate H/W to blink LED. */ 78 * manipulate H/W to blink LED. */
@@ -91,18 +88,9 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
91struct led_priv { 88struct led_priv {
92 /* add for led control */ 89 /* add for led control */
93 struct LED_871x SwLed0; 90 struct LED_871x SwLed0;
94 u8 bRegUseLed;
95 void (*LedControlHandler)(struct adapter *padapter,
96 enum LED_CTL_MODE LedAction);
97 /* add for led control */ 91 /* add for led control */
98}; 92};
99 93
100#define rtw_led_control(adapt, action) \
101 do { \
102 if ((adapt)->ledpriv.LedControlHandler) \
103 (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
104 } while (0)
105
106void BlinkTimerCallback(unsigned long data); 94void BlinkTimerCallback(unsigned long data);
107void BlinkWorkItemCallback(struct work_struct *work); 95void BlinkWorkItemCallback(struct work_struct *work);
108 96
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 9434b869c5e9..18fb7e7b2273 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -504,7 +504,7 @@ void rtw_scan_abort(struct adapter *adapter);
504int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, 504int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
505 uint in_len); 505 uint in_len);
506int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, 506int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
507 uint in_len, uint initial_out_len); 507 uint in_len, uint initial_out_len);
508void rtw_init_registrypriv_dev_network(struct adapter *adapter); 508void rtw_init_registrypriv_dev_network(struct adapter *adapter);
509 509
510void rtw_update_registrypriv_dev_network(struct adapter *adapter); 510void rtw_update_registrypriv_dev_network(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 49d973881a04..052af7b891da 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -139,8 +139,6 @@ struct rx_pkt_attrib {
139#define SN_EQUAL(a, b) (a == b) 139#define SN_EQUAL(a, b) (a == b)
140#define REORDER_WAIT_TIME (50) /* (ms) */ 140#define REORDER_WAIT_TIME (50) /* (ms) */
141 141
142#define RECVBUFF_ALIGN_SZ 8
143
144#define RXDESC_SIZE 24 142#define RXDESC_SIZE 24
145#define RXDESC_OFFSET RXDESC_SIZE 143#define RXDESC_OFFSET RXDESC_SIZE
146 144
@@ -166,9 +164,7 @@ struct recv_priv {
166 struct __queue free_recv_queue; 164 struct __queue free_recv_queue;
167 struct __queue recv_pending_queue; 165 struct __queue recv_pending_queue;
168 struct __queue uc_swdec_pending_queue; 166 struct __queue uc_swdec_pending_queue;
169 u8 *pallocated_frame_buf; 167 void *pallocated_frame_buf;
170 u8 *precv_frame_buf;
171 uint free_recvframe_cnt;
172 struct adapter *adapter; 168 struct adapter *adapter;
173 u32 bIsAnyNonBEPkts; 169 u32 bIsAnyNonBEPkts;
174 u64 rx_bytes; 170 u64 rx_bytes;
@@ -176,17 +172,12 @@ struct recv_priv {
176 u64 rx_drop; 172 u64 rx_drop;
177 u64 last_rx_bytes; 173 u64 last_rx_bytes;
178 174
179 uint ff_hwaddr;
180 u8 rx_pending_cnt;
181
182 struct tasklet_struct irq_prepare_beacon_tasklet; 175 struct tasklet_struct irq_prepare_beacon_tasklet;
183 struct tasklet_struct recv_tasklet; 176 struct tasklet_struct recv_tasklet;
184 struct sk_buff_head free_recv_skb_queue; 177 struct sk_buff_head free_recv_skb_queue;
185 struct sk_buff_head rx_skb_queue; 178 struct sk_buff_head rx_skb_queue;
186 u8 *pallocated_recv_buf; 179 struct recv_buf *precv_buf; /* 4 alignment */
187 u8 *precv_buf; /* 4 alignment */
188 struct __queue free_recv_buf_queue; 180 struct __queue free_recv_buf_queue;
189 u32 free_recv_buf_queue_cnt;
190 /* For display the phy informatiom */ 181 /* For display the phy informatiom */
191 u8 is_signal_dbg; /* for debug */ 182 u8 is_signal_dbg; /* for debug */
192 u8 signal_strength_dbg; /* for debug */ 183 u8 signal_strength_dbg; /* for debug */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 78d9b6e035bf..fb586365d2e5 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -53,7 +53,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr);
53u16 usb_read16(struct adapter *adapter, u32 addr); 53u16 usb_read16(struct adapter *adapter, u32 addr);
54u32 usb_read32(struct adapter *adapter, u32 addr); 54u32 usb_read32(struct adapter *adapter, u32 addr);
55 55
56u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem); 56u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf);
57void usb_read_port_cancel(struct adapter *adapter); 57void usb_read_port_cancel(struct adapter *adapter);
58 58
59int usb_write8(struct adapter *adapter, u32 addr, u8 val); 59int usb_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index f96ca6af934d..959ef4b3066c 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -41,13 +41,11 @@ void rtw_os_xmit_schedule(struct adapter *padapter);
41 41
42int rtw_os_xmit_resource_alloc(struct adapter *padapter, 42int rtw_os_xmit_resource_alloc(struct adapter *padapter,
43 struct xmit_buf *pxmitbuf, u32 alloc_sz); 43 struct xmit_buf *pxmitbuf, u32 alloc_sz);
44void rtw_os_xmit_resource_free(struct adapter *padapter, 44void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf);
45 struct xmit_buf *pxmitbuf, u32 free_sz);
46 45
47uint rtw_remainder_len(struct pkt_file *pfile); 46uint rtw_remainder_len(struct pkt_file *pfile);
48void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile); 47void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
49uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen); 48uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
50int rtw_endofpktfile(struct pkt_file *pfile);
51 49
52void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt); 50void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
53void rtw_os_xmit_complete(struct adapter *padapter, 51void rtw_os_xmit_complete(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 40691f1ec507..8fc3fadf065f 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -144,7 +144,6 @@ static bool rtw_monitor_enable;
144module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444); 144module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444);
145MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)"); 145MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)");
146 146
147static int netdev_open(struct net_device *pnetdev);
148static int netdev_close(struct net_device *pnetdev); 147static int netdev_close(struct net_device *pnetdev);
149 148
150static void loadparam(struct adapter *padapter, struct net_device *pnetdev) 149static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
@@ -596,10 +595,9 @@ static int _netdev_open(struct net_device *pnetdev)
596 pr_info("can't init mlme_ext_priv\n"); 595 pr_info("can't init mlme_ext_priv\n");
597 goto netdev_open_error; 596 goto netdev_open_error;
598 } 597 }
599 if (padapter->intf_start) 598 rtw_hal_inirp_init(padapter);
600 padapter->intf_start(padapter);
601 599
602 rtw_led_control(padapter, LED_CTL_NO_LINK); 600 LedControl8188eu(padapter, LED_CTL_NO_LINK);
603 601
604 padapter->bup = true; 602 padapter->bup = true;
605 } 603 }
@@ -630,7 +628,7 @@ netdev_open_error:
630 return -1; 628 return -1;
631} 629}
632 630
633static int netdev_open(struct net_device *pnetdev) 631int netdev_open(struct net_device *pnetdev)
634{ 632{
635 int ret; 633 int ret;
636 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); 634 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -642,7 +640,7 @@ static int netdev_open(struct net_device *pnetdev)
642 return ret; 640 return ret;
643} 641}
644 642
645static int ips_netdrv_open(struct adapter *padapter) 643int ips_netdrv_open(struct adapter *padapter)
646{ 644{
647 int status = _SUCCESS; 645 int status = _SUCCESS;
648 646
@@ -658,8 +656,7 @@ static int ips_netdrv_open(struct adapter *padapter)
658 goto netdev_open_error; 656 goto netdev_open_error;
659 } 657 }
660 658
661 if (padapter->intf_start) 659 rtw_hal_inirp_init(padapter);
662 padapter->intf_start(padapter);
663 660
664 rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); 661 rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
665 mod_timer(&padapter->mlmepriv.dynamic_chk_timer, 662 mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
@@ -684,7 +681,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
684 681
685 result = ips_netdrv_open(padapter); 682 result = ips_netdrv_open(padapter);
686 683
687 rtw_led_control(padapter, LED_CTL_NO_LINK); 684 LedControl8188eu(padapter, LED_CTL_NO_LINK);
688 685
689 DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n", 686 DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n",
690 jiffies_to_msecs(jiffies - start_time)); 687 jiffies_to_msecs(jiffies - start_time));
@@ -699,7 +696,7 @@ void rtw_ips_pwr_down(struct adapter *padapter)
699 696
700 padapter->net_closed = true; 697 padapter->net_closed = true;
701 698
702 rtw_led_control(padapter, LED_CTL_POWER_OFF); 699 LedControl8188eu(padapter, LED_CTL_POWER_OFF);
703 700
704 rtw_ips_dev_unload(padapter); 701 rtw_ips_dev_unload(padapter);
705 DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n", 702 DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n",
@@ -712,25 +709,13 @@ void rtw_ips_dev_unload(struct adapter *padapter)
712 709
713 rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL); 710 rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
714 711
715 if (padapter->intf_stop) 712 usb_intf_stop(padapter);
716 padapter->intf_stop(padapter);
717 713
718 /* s5. */ 714 /* s5. */
719 if (!padapter->bSurpriseRemoved) 715 if (!padapter->bSurpriseRemoved)
720 rtw_hal_deinit(padapter); 716 rtw_hal_deinit(padapter);
721} 717}
722 718
723int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
724{
725 int status;
726
727 if (bnormal)
728 status = netdev_open(pnetdev);
729 else
730 status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
731 return status;
732}
733
734static int netdev_close(struct net_device *pnetdev) 719static int netdev_close(struct net_device *pnetdev)
735{ 720{
736 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); 721 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -763,7 +748,7 @@ static int netdev_close(struct net_device *pnetdev)
763 /* s2-4. */ 748 /* s2-4. */
764 rtw_free_network_queue(padapter, true); 749 rtw_free_network_queue(padapter, true);
765 /* Close LED */ 750 /* Close LED */
766 rtw_led_control(padapter, LED_CTL_POWER_OFF); 751 LedControl8188eu(padapter, LED_CTL_POWER_OFF);
767 } 752 }
768 753
769 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n")); 754 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 7cd2655f27fe..6ff836f481da 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -12,8 +12,6 @@
12 * more details. 12 * more details.
13 * 13 *
14 ******************************************************************************/ 14 ******************************************************************************/
15
16
17#define _OSDEP_SERVICE_C_ 15#define _OSDEP_SERVICE_C_
18 16
19#include <osdep_service.h> 17#include <osdep_service.h>
@@ -24,9 +22,10 @@
24#include <rtw_ioctl_set.h> 22#include <rtw_ioctl_set.h>
25 23
26/* 24/*
27* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE 25 * Translate the OS dependent @param error_code to OS independent
28* @return: one of RTW_STATUS_CODE 26 * RTW_STATUS_CODE
29*/ 27 * @return: one of RTW_STATUS_CODE
28 */
30inline int RTW_STATUS_CODE(int error_code) 29inline int RTW_STATUS_CODE(int error_code)
31{ 30{
32 if (error_code >= 0) 31 if (error_code >= 0)
@@ -43,22 +42,20 @@ void *rtw_malloc2d(int h, int w, int size)
43{ 42{
44 int j; 43 int j;
45 44
46 void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL); 45 void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL);
47 if (!a) { 46 if (!a)
48 pr_info("%s: alloc memory fail!\n", __func__); 47 goto out;
49 return NULL;
50 }
51 48
52 for (j = 0; j < h; j++) 49 for (j = 0; j < h; j++)
53 a[j] = ((char *)(a+h)) + j*w*size; 50 a[j] = ((char *)(a + h)) + j * w * size;
54 51out:
55 return a; 52 return a;
56} 53}
57 54
58void _rtw_init_queue(struct __queue *pqueue) 55void _rtw_init_queue(struct __queue *pqueue)
59{ 56{
60 INIT_LIST_HEAD(&(pqueue->queue)); 57 INIT_LIST_HEAD(&pqueue->queue);
61 spin_lock_init(&(pqueue->lock)); 58 spin_lock_init(&pqueue->lock);
62} 59}
63 60
64struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv) 61struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 103cdb4ed073..b85824ec5354 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -21,12 +21,6 @@
21#include <osdep_intf.h> 21#include <osdep_intf.h>
22#include <usb_ops_linux.h> 22#include <usb_ops_linux.h>
23 23
24/* alloc os related resource in struct recv_frame */
25void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
26{
27 precvframe->pkt = NULL;
28}
29
30/* alloc os related resource in struct recv_buf */ 24/* alloc os related resource in struct recv_buf */
31int rtw_os_recvbuf_resource_alloc(struct adapter *padapter, 25int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
32 struct recv_buf *precvbuf) 26 struct recv_buf *precvbuf)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6bbe87f..c6316ffa64d3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -141,16 +141,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
141 141
142} 142}
143 143
144static void usb_intf_start(struct adapter *padapter) 144void usb_intf_stop(struct adapter *padapter)
145{
146 RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
147
148 rtw_hal_inirp_init(padapter);
149
150 RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
151}
152
153static void usb_intf_stop(struct adapter *padapter)
154{ 145{
155 RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n")); 146 RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
156 147
@@ -183,8 +174,7 @@ static void rtw_dev_unload(struct adapter *padapter)
183 if (padapter->xmitpriv.ack_tx) 174 if (padapter->xmitpriv.ack_tx)
184 rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP); 175 rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
185 /* s3. */ 176 /* s3. */
186 if (padapter->intf_stop) 177 usb_intf_stop(padapter);
187 padapter->intf_stop(padapter);
188 /* s4. */ 178 /* s4. */
189 if (!padapter->pwrctrlpriv.bInternalAutoSuspend) 179 if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
190 rtw_stop_drv_threads(padapter); 180 rtw_stop_drv_threads(padapter);
@@ -294,7 +284,7 @@ static int rtw_resume_process(struct adapter *padapter)
294 pwrpriv->bkeepfwalive = false; 284 pwrpriv->bkeepfwalive = false;
295 285
296 pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive); 286 pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
297 if (pm_netdev_open(pnetdev, true) != 0) { 287 if (netdev_open(pnetdev) != 0) {
298 mutex_unlock(&pwrpriv->mutex_lock); 288 mutex_unlock(&pwrpriv->mutex_lock);
299 goto exit; 289 goto exit;
300 } 290 }
@@ -366,9 +356,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
366 if (!padapter->HalData) 356 if (!padapter->HalData)
367 DBG_88E("cant not alloc memory for HAL DATA\n"); 357 DBG_88E("cant not alloc memory for HAL DATA\n");
368 358
369 padapter->intf_start = &usb_intf_start;
370 padapter->intf_stop = &usb_intf_stop;
371
372 /* step read_chip_version */ 359 /* step read_chip_version */
373 rtw_hal_read_chip_version(padapter); 360 rtw_hal_read_chip_version(padapter);
374 361
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index d0d591501b73..e2dbe1b4afd3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -167,27 +167,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
167 } 167 }
168 if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */ 168 if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
169 if (pattrib->physt) 169 if (pattrib->physt)
170 update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status); 170 update_recvframe_phyinfo_88e(precvframe, pphy_status);
171 if (rtw_recv_entry(precvframe) != _SUCCESS) { 171 if (rtw_recv_entry(precvframe) != _SUCCESS) {
172 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 172 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
173 ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n")); 173 ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
174 } 174 }
175 } else { 175 } else if (pattrib->pkt_rpt_type == TX_REPORT1) {
176 /* enqueue recvframe to txrtp queue */ 176 /* CCX-TXRPT ack for xmit mgmt frames. */
177 if (pattrib->pkt_rpt_type == TX_REPORT1) { 177 handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
178 /* CCX-TXRPT ack for xmit mgmt frames. */ 178 rtw_free_recvframe(precvframe, pfree_recv_queue);
179 handle_txrpt_ccx_88e(adapt, precvframe->rx_data); 179 } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
180 } else if (pattrib->pkt_rpt_type == TX_REPORT2) { 180 ODM_RA_TxRPT2Handle_8188E(
181 ODM_RA_TxRPT2Handle_8188E( 181 &haldata->odmpriv,
182 &haldata->odmpriv, 182 precvframe->rx_data,
183 precvframe->rx_data, 183 pattrib->pkt_len,
184 pattrib->pkt_len, 184 pattrib->MacIDValidEntry[0],
185 pattrib->MacIDValidEntry[0], 185 pattrib->MacIDValidEntry[1]
186 pattrib->MacIDValidEntry[1] 186 );
187 ); 187 rtw_free_recvframe(precvframe, pfree_recv_queue);
188 } else if (pattrib->pkt_rpt_type == HIS_REPORT) { 188 } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
189 interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data); 189 interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
190 }
191 rtw_free_recvframe(precvframe, pfree_recv_queue); 190 rtw_free_recvframe(precvframe, pfree_recv_queue);
192 } 191 }
193 pkt_cnt--; 192 pkt_cnt--;
@@ -253,7 +252,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
253 /* Acquire IO memory for vendorreq */ 252 /* Acquire IO memory for vendorreq */
254 pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC); 253 pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
255 254
256 if (pIo_buf == NULL) { 255 if (!pIo_buf) {
257 DBG_88E("[%s] pIo_buf == NULL\n", __func__); 256 DBG_88E("[%s] pIo_buf == NULL\n", __func__);
258 status = -ENOMEM; 257 status = -ENOMEM;
259 goto release_mutex; 258 goto release_mutex;
@@ -384,8 +383,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
384 383
385 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n")); 384 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
386 385
387 precvpriv->rx_pending_cnt--;
388
389 if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) { 386 if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
390 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, 387 RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
391 ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", 388 ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
@@ -403,7 +400,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
403 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, 400 RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
404 ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n")); 401 ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
405 precvbuf->reuse = true; 402 precvbuf->reuse = true;
406 usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); 403 usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
407 DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__); 404 DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
408 } else { 405 } else {
409 skb_put(precvbuf->pskb, purb->actual_length); 406 skb_put(precvbuf->pskb, purb->actual_length);
@@ -414,7 +411,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
414 411
415 precvbuf->pskb = NULL; 412 precvbuf->pskb = NULL;
416 precvbuf->reuse = false; 413 precvbuf->reuse = false;
417 usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); 414 usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
418 } 415 }
419 } else { 416 } else {
420 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status)); 417 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
@@ -437,7 +434,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
437 case -EOVERFLOW: 434 case -EOVERFLOW:
438 adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL; 435 adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
439 precvbuf->reuse = true; 436 precvbuf->reuse = true;
440 usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); 437 usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
441 break; 438 break;
442 case -EINPROGRESS: 439 case -EINPROGRESS:
443 DBG_88E("ERROR: URB IS IN PROGRESS!\n"); 440 DBG_88E("ERROR: URB IS IN PROGRESS!\n");
@@ -448,17 +445,14 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
448 } 445 }
449} 446}
450 447
451u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem) 448u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
452{ 449{
453 struct urb *purb = NULL; 450 struct urb *purb = NULL;
454 struct recv_buf *precvbuf = (struct recv_buf *)rmem;
455 struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter); 451 struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
456 struct recv_priv *precvpriv = &adapter->recvpriv; 452 struct recv_priv *precvpriv = &adapter->recvpriv;
457 struct usb_device *pusbd = pdvobj->pusbdev; 453 struct usb_device *pusbd = pdvobj->pusbdev;
458 int err; 454 int err;
459 unsigned int pipe; 455 unsigned int pipe;
460 size_t tmpaddr = 0;
461 size_t alignment = 0;
462 u32 ret = _SUCCESS; 456 u32 ret = _SUCCESS;
463 457
464 458
@@ -483,22 +477,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
483 477
484 /* re-assign for linux based on skb */ 478 /* re-assign for linux based on skb */
485 if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) { 479 if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
486 precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ); 480 precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
487 if (precvbuf->pskb == NULL) { 481 if (precvbuf->pskb == NULL) {
488 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n")); 482 RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
489 DBG_88E("#### usb_read_port() alloc_skb fail!#####\n"); 483 DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
490 return _FAIL; 484 return _FAIL;
491 } 485 }
492
493 tmpaddr = (size_t)precvbuf->pskb->data;
494 alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
495 skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
496 } else { /* reuse skb */ 486 } else { /* reuse skb */
497 precvbuf->reuse = false; 487 precvbuf->reuse = false;
498 } 488 }
499 489
500 precvpriv->rx_pending_cnt++;
501
502 purb = precvbuf->purb; 490 purb = precvbuf->purb;
503 491
504 /* translate DMA FIFO addr to pipehandle */ 492 /* translate DMA FIFO addr to pipehandle */
@@ -528,7 +516,7 @@ void rtw_hal_inirp_deinit(struct adapter *padapter)
528 int i; 516 int i;
529 struct recv_buf *precvbuf; 517 struct recv_buf *precvbuf;
530 518
531 precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; 519 precvbuf = padapter->recvpriv.precv_buf;
532 520
533 DBG_88E("%s\n", __func__); 521 DBG_88E("%s\n", __func__);
534 522
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 4b1b04e00715..e097c619ed1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -59,11 +59,6 @@ uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
59 return len; 59 return len;
60} 60}
61 61
62int rtw_endofpktfile(struct pkt_file *pfile)
63{
64 return pfile->pkt_len == 0;
65}
66
67int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz) 62int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
68{ 63{
69 int i; 64 int i;
@@ -85,8 +80,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
85 return _SUCCESS; 80 return _SUCCESS;
86} 81}
87 82
88void rtw_os_xmit_resource_free(struct adapter *padapter, 83void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf)
89 struct xmit_buf *pxmitbuf, u32 free_sz)
90{ 84{
91 int i; 85 int i;
92 86
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 25725b158eca..017fe04ebe2d 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -11,7 +11,7 @@
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * wlanfae <wlanfae@realtek.com> 13 * wlanfae <wlanfae@realtek.com>
14******************************************************************************/ 14 ******************************************************************************/
15#include "dot11d.h" 15#include "dot11d.h"
16 16
17struct channel_list { 17struct channel_list {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index f9003a28cae2..757ffd4f2f89 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -49,7 +49,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
49 else 49 else
50 skb = dev_alloc_skb(frag_length + 4); 50 skb = dev_alloc_skb(frag_length + 4);
51 51
52 if (skb == NULL) { 52 if (!skb) {
53 rt_status = false; 53 rt_status = false;
54 goto Failed; 54 goto Failed;
55 } 55 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9aaa85526eb8..bbe399010be1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -202,7 +202,5 @@ bool rtl92e_init_fw(struct net_device *dev)
202 202
203download_firmware_fail: 203download_firmware_fail:
204 netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__); 204 netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__);
205 rt_status = false; 205 return false;
206 return rt_status;
207
208} 206}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5f53fbd565ef..8a9172aa8178 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -367,7 +367,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
367 } 367 }
368} 368}
369 369
370static struct rtllib_qos_parameters def_qos_parameters = { 370static const struct rtllib_qos_parameters def_qos_parameters = {
371 {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)}, 371 {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
372 {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)}, 372 {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
373 {2, 2, 2, 2}, 373 {2, 2, 2, 2},
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c7fd1b1653d6..20260af49ee7 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -11,7 +11,7 @@
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * wlanfae <wlanfae@realtek.com> 13 * wlanfae <wlanfae@realtek.com>
14******************************************************************************/ 14 ******************************************************************************/
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index dd9c0c868361..cded0f43cd33 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -11,7 +11,7 @@
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * wlanfae <wlanfae@realtek.com> 13 * wlanfae <wlanfae@realtek.com>
14******************************************************************************/ 14 ******************************************************************************/
15#include "rtllib.h" 15#include "rtllib.h"
16#include "rtl819x_HT.h" 16#include "rtl819x_HT.h"
17u8 MCS_FILTER_ALL[16] = { 17u8 MCS_FILTER_ALL[16] = {
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e490ab..48bbd9e8a52f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -11,7 +11,7 @@
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * wlanfae <wlanfae@realtek.com> 13 * wlanfae <wlanfae@realtek.com>
14******************************************************************************/ 14 ******************************************************************************/
15#include "rtllib.h" 15#include "rtllib.h"
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include "rtl819x_TS.h" 17#include "rtl819x_TS.h"
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index c743182b933e..e5ba7d1a809f 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
130 ETH_ALEN /* WDS */ + 130 ETH_ALEN /* WDS */ +
131 /* QOS Control */ 131 /* QOS Control */
132 (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0)); 132 (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0));
133 if (skb == NULL) 133 if (!skb)
134 return NULL; 134 return NULL;
135 135
136 entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; 136 entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -986,7 +986,7 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
986 ether_addr_copy(src, hdr->addr4); 986 ether_addr_copy(src, hdr->addr4);
987 ether_addr_copy(bssid, ieee->current_network.bssid); 987 ether_addr_copy(bssid, ieee->current_network.bssid);
988 break; 988 break;
989 case 0: 989 default:
990 ether_addr_copy(dst, hdr->addr1); 990 ether_addr_copy(dst, hdr->addr1);
991 ether_addr_copy(src, hdr->addr2); 991 ether_addr_copy(src, hdr->addr2);
992 ether_addr_copy(bssid, hdr->addr3); 992 ether_addr_copy(bssid, hdr->addr3);
@@ -1201,6 +1201,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
1201 if (crypt && !(fc & RTLLIB_FCTL_WEP) && 1201 if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
1202 rtllib_is_eapol_frame(ieee, skb, hdrlen)) { 1202 rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
1203 struct eapol *eap = (struct eapol *)(skb->data + 24); 1203 struct eapol *eap = (struct eapol *)(skb->data + 24);
1204
1204 netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n", 1205 netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n",
1205 eap_get_type(eap->type)); 1206 eap_get_type(eap->type));
1206 } 1207 }
@@ -1430,7 +1431,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
1430 /* skb: hdr + (possible reassembled) full plaintext payload */ 1431 /* skb: hdr + (possible reassembled) full plaintext payload */
1431 payload = skb->data + hdrlen; 1432 payload = skb->data + hdrlen;
1432 rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC); 1433 rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
1433 if (rxb == NULL) 1434 if (!rxb)
1434 goto rx_dropped; 1435 goto rx_dropped;
1435 1436
1436 /* to parse amsdu packets */ 1437 /* to parse amsdu packets */
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index da74dc49b95e..1430ba27b049 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1524,6 +1524,7 @@ static void rtllib_associate_complete_wq(void *data)
1524 struct rtllib_device, 1524 struct rtllib_device,
1525 associate_complete_wq); 1525 associate_complete_wq);
1526 struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl); 1526 struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
1527
1527 netdev_info(ieee->dev, "Associated successfully\n"); 1528 netdev_info(ieee->dev, "Associated successfully\n");
1528 if (!ieee->is_silent_reset) { 1529 if (!ieee->is_silent_reset) {
1529 netdev_info(ieee->dev, "normal associate\n"); 1530 netdev_info(ieee->dev, "normal associate\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 6fa96d57d316..e68850897adf 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -553,7 +553,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
553 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ 553 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
554 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ 554 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
555 break; 555 break;
556 case 0: 556 default:
557 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ 557 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
558 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ 558 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
559 break; 559 break;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 89cbc077a48d..82f654305414 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -129,7 +129,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
129 8 /* WEP */ + 129 8 /* WEP */ +
130 ETH_ALEN /* WDS */ + 130 ETH_ALEN /* WDS */ +
131 (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */); 131 (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
132 if (skb == NULL) 132 if (!skb)
133 return NULL; 133 return NULL;
134 134
135 entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; 135 entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -1079,7 +1079,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1079 memcpy(src, hdr->addr4, ETH_ALEN); 1079 memcpy(src, hdr->addr4, ETH_ALEN);
1080 memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); 1080 memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
1081 break; 1081 break;
1082 case 0: 1082 default:
1083 memcpy(dst, hdr->addr1, ETH_ALEN); 1083 memcpy(dst, hdr->addr1, ETH_ALEN);
1084 memcpy(src, hdr->addr2, ETH_ALEN); 1084 memcpy(src, hdr->addr2, ETH_ALEN);
1085 memcpy(bssid, hdr->addr3, ETH_ALEN); 1085 memcpy(bssid, hdr->addr3, ETH_ALEN);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index c9ea50daffff..b8a170978434 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -63,15 +63,6 @@ static inline u32 end_of_queue_search(struct list_head *head,
63 return (head == plist); 63 return (head == plist);
64} 64}
65 65
66static inline void sleep_schedulable(int ms)
67{
68 u32 delta;
69
70 delta = msecs_to_jiffies(ms);/*(ms)*/
71 set_current_state(TASK_INTERRUPTIBLE);
72 schedule_timeout(delta);
73}
74
75static inline void flush_signals_thread(void) 66static inline void flush_signals_thread(void)
76{ 67{
77 if (signal_pending(current)) 68 if (signal_pending(current))
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 57d5d2db3c77..84456bb560ef 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -68,14 +68,14 @@ struct fw_priv { /*8-bytes alignment required*/
68 unsigned char signature_0; /*0x12: CE product, 0x92: IT product*/ 68 unsigned char signature_0; /*0x12: CE product, 0x92: IT product*/
69 unsigned char signature_1; /*0x87: CE product, 0x81: IT product*/ 69 unsigned char signature_1; /*0x87: CE product, 0x81: IT product*/
70 unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP, 70 unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP,
71 * 0x12: 72S-U, 03:SDIO 71 * 0x12: 72S-U, 03:SDIO
72 */ 72 */
73 unsigned char chip_version; /*the same value as register value*/ 73 unsigned char chip_version; /*the same value as register value*/
74 unsigned char customer_ID_0; /*customer ID low byte*/ 74 unsigned char customer_ID_0; /*customer ID low byte*/
75 unsigned char customer_ID_1; /*customer ID high byte*/ 75 unsigned char customer_ID_1; /*customer ID high byte*/
76 unsigned char rf_config; /*0x11: 1T1R, 0x12: 1T2R, 0x92: 1T2R turbo, 76 unsigned char rf_config; /*0x11: 1T1R, 0x12: 1T2R, 0x92: 1T2R turbo,
77 * 0x22: 2T2R 77 * 0x22: 2T2R
78 */ 78 */
79 unsigned char usb_ep_num; /* 4: 4EP, 6: 6EP, 11: 11EP*/ 79 unsigned char usb_ep_num; /* 4: 4EP, 6: 6EP, 11: 11EP*/
80 /*--- long word 1 ----*/ 80 /*--- long word 1 ----*/
81 unsigned char regulatory_class_0; /*regulatory class bit map 0*/ 81 unsigned char regulatory_class_0; /*regulatory class bit map 0*/
@@ -99,8 +99,8 @@ struct fw_priv { /*8-bytes alignment required*/
99 unsigned char qos_en; /*1: QoS enable*/ 99 unsigned char qos_en; /*1: QoS enable*/
100 unsigned char bw_40MHz_en; /*1: 40MHz BW enable*/ 100 unsigned char bw_40MHz_en; /*1: 40MHz BW enable*/
101 unsigned char AMSDU2AMPDU_en; /*1: 4181 convert AMSDU to AMPDU, 101 unsigned char AMSDU2AMPDU_en; /*1: 4181 convert AMSDU to AMPDU,
102 * 0: disable 102 * 0: disable
103 */ 103 */
104 unsigned char AMPDU_en; /*1: 11n AMPDU enable*/ 104 unsigned char AMPDU_en; /*1: 11n AMPDU enable*/
105 unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/ 105 unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/
106 unsigned char aggregation_offload; /*1: FW offloads,0: driver handles*/ 106 unsigned char aggregation_offload; /*1: FW offloads,0: driver handles*/
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index a8e237e480c9..317aeeed38e8 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -355,7 +355,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
355 } 355 }
356 pLed->bLedScanBlinkInProgress = false; 356 pLed->bLedScanBlinkInProgress = false;
357 } else { 357 } else {
358 if (pLed->bLedOn) 358 if (pLed->bLedOn)
359 pLed->BlinkingLedState = LED_STATE_OFF; 359 pLed->BlinkingLedState = LED_STATE_OFF;
360 else 360 else
361 pLed->BlinkingLedState = LED_STATE_ON; 361 pLed->BlinkingLedState = LED_STATE_ON;
@@ -390,7 +390,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
390 pLed->BlinkTimes = 0; 390 pLed->BlinkTimes = 0;
391 pLed->bLedBlinkInProgress = false; 391 pLed->bLedBlinkInProgress = false;
392 } else { 392 } else {
393 if (pLed->bLedOn) 393 if (pLed->bLedOn)
394 pLed->BlinkingLedState = LED_STATE_OFF; 394 pLed->BlinkingLedState = LED_STATE_OFF;
395 else 395 else
396 pLed->BlinkingLedState = LED_STATE_ON; 396 pLed->BlinkingLedState = LED_STATE_ON;
@@ -460,7 +460,7 @@ static void SwLedBlink2(struct LED_871x *pLed)
460 } 460 }
461 pLed->bLedScanBlinkInProgress = false; 461 pLed->bLedScanBlinkInProgress = false;
462 } else { 462 } else {
463 if (pLed->bLedOn) 463 if (pLed->bLedOn)
464 pLed->BlinkingLedState = LED_STATE_OFF; 464 pLed->BlinkingLedState = LED_STATE_OFF;
465 else 465 else
466 pLed->BlinkingLedState = LED_STATE_ON; 466 pLed->BlinkingLedState = LED_STATE_ON;
@@ -667,7 +667,7 @@ static void SwLedBlink4(struct LED_871x *pLed)
667 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); 667 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
668 pLed->bLedBlinkInProgress = false; 668 pLed->bLedBlinkInProgress = false;
669 } else { 669 } else {
670 if (pLed->bLedOn) 670 if (pLed->bLedOn)
671 pLed->BlinkingLedState = LED_STATE_OFF; 671 pLed->BlinkingLedState = LED_STATE_OFF;
672 else 672 else
673 pLed->BlinkingLedState = LED_STATE_ON; 673 pLed->BlinkingLedState = LED_STATE_ON;
@@ -764,7 +764,7 @@ static void SwLedBlink5(struct LED_871x *pLed)
764 msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA)); 764 msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
765 pLed->bLedBlinkInProgress = false; 765 pLed->bLedBlinkInProgress = false;
766 } else { 766 } else {
767 if (pLed->bLedOn) 767 if (pLed->bLedOn)
768 pLed->BlinkingLedState = LED_STATE_OFF; 768 pLed->BlinkingLedState = LED_STATE_OFF;
769 else 769 else
770 pLed->BlinkingLedState = LED_STATE_ON; 770 pLed->BlinkingLedState = LED_STATE_ON;
@@ -946,7 +946,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
946 if (psitesurveyctrl->traffic_busy && 946 if (psitesurveyctrl->traffic_busy &&
947 check_fwstate(pmlmepriv, _FW_LINKED)) 947 check_fwstate(pmlmepriv, _FW_LINKED))
948 ; /* dummy branch */ 948 ; /* dummy branch */
949 else if (!pLed->bLedScanBlinkInProgress) { 949 else if (!pLed->bLedScanBlinkInProgress) {
950 if (IS_LED_WPS_BLINKING(pLed)) 950 if (IS_LED_WPS_BLINKING(pLed))
951 return; 951 return;
952 if (pLed->bLedNoLinkBlinkInProgress) { 952 if (pLed->bLedNoLinkBlinkInProgress) {
@@ -970,7 +970,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
970 pLed->BlinkingLedState = LED_STATE_ON; 970 pLed->BlinkingLedState = LED_STATE_ON;
971 mod_timer(&pLed->BlinkTimer, jiffies + 971 mod_timer(&pLed->BlinkTimer, jiffies +
972 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); 972 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
973 } 973 }
974 break; 974 break;
975 case LED_CTL_TX: 975 case LED_CTL_TX:
976 case LED_CTL_RX: 976 case LED_CTL_RX:
@@ -1000,7 +1000,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
1000 1000
1001 case LED_CTL_START_WPS: /*wait until xinpin finish */ 1001 case LED_CTL_START_WPS: /*wait until xinpin finish */
1002 case LED_CTL_START_WPS_BOTTON: 1002 case LED_CTL_START_WPS_BOTTON:
1003 if (!pLed->bLedWPSBlinkInProgress) { 1003 if (!pLed->bLedWPSBlinkInProgress) {
1004 if (pLed->bLedNoLinkBlinkInProgress) { 1004 if (pLed->bLedNoLinkBlinkInProgress) {
1005 del_timer(&pLed->BlinkTimer); 1005 del_timer(&pLed->BlinkTimer);
1006 pLed->bLedNoLinkBlinkInProgress = false; 1006 pLed->bLedNoLinkBlinkInProgress = false;
@@ -1113,9 +1113,9 @@ static void SwLedControlMode2(struct _adapter *padapter,
1113 1113
1114 switch (LedAction) { 1114 switch (LedAction) {
1115 case LED_CTL_SITE_SURVEY: 1115 case LED_CTL_SITE_SURVEY:
1116 if (pmlmepriv->sitesurveyctrl.traffic_busy) 1116 if (pmlmepriv->sitesurveyctrl.traffic_busy)
1117 ; /* dummy branch */ 1117 ; /* dummy branch */
1118 else if (!pLed->bLedScanBlinkInProgress) { 1118 else if (!pLed->bLedScanBlinkInProgress) {
1119 if (IS_LED_WPS_BLINKING(pLed)) 1119 if (IS_LED_WPS_BLINKING(pLed))
1120 return; 1120 return;
1121 1121
@@ -1132,7 +1132,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
1132 pLed->BlinkingLedState = LED_STATE_ON; 1132 pLed->BlinkingLedState = LED_STATE_ON;
1133 mod_timer(&pLed->BlinkTimer, jiffies + 1133 mod_timer(&pLed->BlinkTimer, jiffies +
1134 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); 1134 msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
1135 } 1135 }
1136 break; 1136 break;
1137 1137
1138 case LED_CTL_TX: 1138 case LED_CTL_TX:
@@ -1186,7 +1186,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
1186 pLed->BlinkingLedState = LED_STATE_ON; 1186 pLed->BlinkingLedState = LED_STATE_ON;
1187 mod_timer(&pLed->BlinkTimer, 1187 mod_timer(&pLed->BlinkTimer,
1188 jiffies + msecs_to_jiffies(0)); 1188 jiffies + msecs_to_jiffies(0));
1189 } 1189 }
1190 break; 1190 break;
1191 1191
1192 case LED_CTL_STOP_WPS: 1192 case LED_CTL_STOP_WPS:
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index b7ee5e63af33..04638f1e4e88 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -72,8 +72,11 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
72 ((addr_t)(pcmdpriv->cmd_allocated_buf) & 72 ((addr_t)(pcmdpriv->cmd_allocated_buf) &
73 (CMDBUFF_ALIGN_SZ - 1)); 73 (CMDBUFF_ALIGN_SZ - 1));
74 pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC); 74 pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC);
75 if (!pcmdpriv->rsp_allocated_buf) 75 if (!pcmdpriv->rsp_allocated_buf) {
76 kfree(pcmdpriv->cmd_allocated_buf);
77 pcmdpriv->cmd_allocated_buf = NULL;
76 return _FAIL; 78 return _FAIL;
79 }
77 pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - 80 pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 -
78 ((addr_t)(pcmdpriv->rsp_allocated_buf) & 3); 81 ((addr_t)(pcmdpriv->rsp_allocated_buf) & 3);
79 pcmdpriv->cmd_issued_cnt = 0; 82 pcmdpriv->cmd_issued_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 475e7904fe45..590acb5aea3d 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -588,9 +588,9 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
588 netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n"); 588 netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
589 cnt += buf[cnt + 1] + 2; 589 cnt += buf[cnt + 1] + 2;
590 break; 590 break;
591 } else {
592 cnt += buf[cnt + 1] + 2;
593 } 591 }
592
593 cnt += buf[cnt + 1] + 2;
594 } 594 }
595 } 595 }
596 } 596 }
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 0aaf2aab6dd0..01a150446f5a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -139,9 +139,10 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
139 if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, 139 if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
140 ETH_ALEN)) { 140 ETH_ALEN)) {
141 if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) 141 if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
142 goto _Abort_Set_BSSID; /* driver is in 142 /* driver is in
143 * WIFI_ADHOC_MASTER_STATE 143 * WIFI_ADHOC_MASTER_STATE
144 */ 144 */
145 goto _Abort_Set_BSSID;
145 } else { 146 } else {
146 r8712_disassoc_cmd(padapter); 147 r8712_disassoc_cmd(padapter);
147 if (check_fwstate(pmlmepriv, _FW_LINKED)) 148 if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -203,9 +204,10 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
203 WIFI_ADHOC_STATE); 204 WIFI_ADHOC_STATE);
204 } 205 }
205 } else { 206 } else {
206 goto _Abort_Set_SSID; /* driver is in 207 /* driver is in
207 * WIFI_ADHOC_MASTER_STATE 208 * WIFI_ADHOC_MASTER_STATE
208 */ 209 */
210 goto _Abort_Set_SSID;
209 } 211 }
210 } 212 }
211 } else { 213 } else {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c1feef3da26c..35cbdc71cad4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -137,11 +137,10 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv,
137} 137}
138 138
139 139
140/* 140/* return the wlan_network with the matching addr
141 return the wlan_network with the matching addr 141 * Shall be called under atomic context...
142 Shall be called under atomic context... 142 * to avoid possible racing condition...
143 to avoid possible racing condition... 143 */
144*/
145static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue, 144static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
146 u8 *addr) 145 u8 *addr)
147{ 146{
@@ -239,11 +238,10 @@ void r8712_free_network_queue(struct _adapter *dev)
239} 238}
240 239
241/* 240/*
242 return the wlan_network with the matching addr 241 * return the wlan_network with the matching addr
243 242 * Shall be called under atomic context...
244 Shall be called under atomic context... 243 * to avoid possible racing condition...
245 to avoid possible racing condition... 244 */
246*/
247static struct wlan_network *r8712_find_network(struct __queue *scanned_queue, 245static struct wlan_network *r8712_find_network(struct __queue *scanned_queue,
248 u8 *addr) 246 u8 *addr)
249{ 247{
@@ -369,9 +367,7 @@ static void update_current_network(struct _adapter *adapter,
369 } 367 }
370} 368}
371 369
372/* 370/* Caller must hold pmlmepriv->lock first */
373Caller must hold pmlmepriv->lock first.
374*/
375static void update_scanned_network(struct _adapter *adapter, 371static void update_scanned_network(struct _adapter *adapter,
376 struct wlan_bssid_ex *target) 372 struct wlan_bssid_ex *target)
377{ 373{
@@ -651,8 +647,8 @@ void r8712_free_assoc_resources(struct _adapter *adapter)
651} 647}
652 648
653/* 649/*
654*r8712_indicate_connect: the caller has to lock pmlmepriv->lock 650 * r8712_indicate_connect: the caller has to lock pmlmepriv->lock
655*/ 651 */
656void r8712_indicate_connect(struct _adapter *padapter) 652void r8712_indicate_connect(struct _adapter *padapter)
657{ 653{
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 654 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -668,8 +664,8 @@ void r8712_indicate_connect(struct _adapter *padapter)
668 664
669 665
670/* 666/*
671*r8712_ind_disconnect: the caller has to lock pmlmepriv->lock 667 * r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
672*/ 668 */
673void r8712_ind_disconnect(struct _adapter *padapter) 669void r8712_ind_disconnect(struct _adapter *padapter)
674{ 670{
675 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 671 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1347,8 +1343,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
1347 (!memcmp(psecuritypriv->PMKIDList[i].Bssid, 1343 (!memcmp(psecuritypriv->PMKIDList[i].Bssid,
1348 bssid, ETH_ALEN))) 1344 bssid, ETH_ALEN)))
1349 break; 1345 break;
1350 else 1346 i++;
1351 i++; 1347
1352 } while (i < NUM_PMKID_CACHE); 1348 } while (i < NUM_PMKID_CACHE);
1353 1349
1354 if (i == NUM_PMKID_CACHE) { 1350 if (i == NUM_PMKID_CACHE) {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index ddaaab058b2f..53a23234c598 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -162,24 +162,6 @@ static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
162 spin_unlock_irqrestore(&pmlmepriv->lock, irqL); 162 spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
163} 163}
164 164
165static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
166{
167 unsigned long irqL;
168
169 spin_lock_irqsave(&pmlmepriv->lock, irqL);
170 pmlmepriv->num_of_scanned++;
171 spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
172}
173
174static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
175{
176 unsigned long irqL;
177
178 spin_lock_irqsave(&pmlmepriv->lock, irqL);
179 pmlmepriv->num_of_scanned--;
180 spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
181}
182
183static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, 165static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv,
184 sint val) 166 sint val)
185{ 167{
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index d464c136dd98..e42fc1404c35 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -190,19 +190,15 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
190} 190}
191 191
192/* 192/*
193Caller: r8712_cmd_thread 193 * Caller: r8712_cmd_thread
194 194 * Check if the fw_pwrstate is okay for issuing cmd.
195Check if the fw_pwrstate is okay for issuing cmd. 195 * If not (cpwm should be is less than P2 state), then the sub-routine
196If not (cpwm should be is less than P2 state), then the sub-routine 196 * will raise the cpwm to be greater than or equal to P2.
197will raise the cpwm to be greater than or equal to P2. 197 * Calling Context: Passive
198 198 * Return Value:
199Calling Context: Passive 199 * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
200 200 * _FAIL: r8712_cmd_thread can not do anything.
201Return Value: 201 */
202
203_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
204_FAIL: r8712_cmd_thread can not do anything.
205*/
206sint r8712_register_cmd_alive(struct _adapter *padapter) 202sint r8712_register_cmd_alive(struct _adapter *padapter)
207{ 203{
208 uint res = _SUCCESS; 204 uint res = _SUCCESS;
@@ -219,13 +215,11 @@ sint r8712_register_cmd_alive(struct _adapter *padapter)
219} 215}
220 216
221/* 217/*
222Caller: ISR 218 * Caller: ISR
223 219 * If ISR's txdone,
224If ISR's txdone, 220 * No more pkts for TX,
225No more pkts for TX, 221 * Then driver shall call this fun. to power down firmware again.
226Then driver shall call this fun. to power down firmware again. 222 */
227*/
228
229void r8712_unregister_cmd_alive(struct _adapter *padapter) 223void r8712_unregister_cmd_alive(struct _adapter *padapter)
230{ 224{
231 struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv; 225 struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index cbd2e51ba42b..35c721a50598 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -125,13 +125,10 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
125} 125}
126 126
127/* 127/*
128caller : defrag; recvframe_chk_defrag in recv_thread (passive) 128 * caller : defrag; recvframe_chk_defrag in recv_thread (passive)
129pframequeue: defrag_queue : will be accessed in recv_thread (passive) 129 * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
130 130 * using spin_lock to protect
131using spin_lock to protect 131 */
132
133*/
134
135void r8712_free_recvframe_queue(struct __queue *pframequeue, 132void r8712_free_recvframe_queue(struct __queue *pframequeue,
136 struct __queue *pfree_recv_queue) 133 struct __queue *pfree_recv_queue)
137{ 134{
@@ -405,7 +402,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
405 } 402 }
406 403
407 /* filter packets that SA is myself or multicast or broadcast */ 404 /* filter packets that SA is myself or multicast or broadcast */
408 if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) 405 if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
409 return _FAIL; 406 return _FAIL;
410 407
411 /* da should be for me */ 408 /* da should be for me */
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 09242425dad4..a7f04a4b089d 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -159,8 +159,8 @@ static u32 getcrc32(u8 *buf, u32 len)
159} 159}
160 160
161/* 161/*
162 Need to consider the fragment situation 162 * Need to consider the fragment situation
163*/ 163 */
164void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe) 164void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
165{ /* exclude ICV */ 165{ /* exclude ICV */
166 unsigned char crc[4]; 166 unsigned char crc[4];
@@ -467,22 +467,22 @@ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
467}; 467};
468 468
469/* 469/*
470********************************************************************** 470 **********************************************************************
471* Routine: Phase 1 -- generate P1K, given TA, TK, IV32 471 * Routine: Phase 1 -- generate P1K, given TA, TK, IV32
472* 472 *
473* Inputs: 473 * Inputs:
474* tk[] = temporal key [128 bits] 474 * tk[] = temporal key [128 bits]
475* ta[] = transmitter's MAC address [ 48 bits] 475 * ta[] = transmitter's MAC address [ 48 bits]
476* iv32 = upper 32 bits of IV [ 32 bits] 476 * iv32 = upper 32 bits of IV [ 32 bits]
477* Output: 477 * Output:
478* p1k[] = Phase 1 key [ 80 bits] 478 * p1k[] = Phase 1 key [ 80 bits]
479* 479 *
480* Note: 480 * Note:
481* This function only needs to be called every 2**16 packets, 481 * This function only needs to be called every 2**16 packets,
482* although in theory it could be called every packet. 482 * although in theory it could be called every packet.
483* 483 *
484********************************************************************** 484 **********************************************************************
485*/ 485 */
486static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) 486static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
487{ 487{
488 sint i; 488 sint i;
@@ -506,28 +506,28 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
506} 506}
507 507
508/* 508/*
509********************************************************************** 509 **********************************************************************
510* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16 510 * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
511* 511 *
512* Inputs: 512 * Inputs:
513* tk[] = Temporal key [128 bits] 513 * tk[] = Temporal key [128 bits]
514* p1k[] = Phase 1 output key [ 80 bits] 514 * p1k[] = Phase 1 output key [ 80 bits]
515* iv16 = low 16 bits of IV counter [ 16 bits] 515 * iv16 = low 16 bits of IV counter [ 16 bits]
516* Output: 516 * Output:
517* rc4key[] = the key used to encrypt the packet [128 bits] 517 * rc4key[] = the key used to encrypt the packet [128 bits]
518* 518 *
519* Note: 519 * Note:
520* The value {TA,IV32,IV16} for Phase1/Phase2 must be unique 520 * The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
521* across all packets using the same key TK value. Then, for a 521 * across all packets using the same key TK value. Then, for a
522* given value of TK[], this TKIP48 construction guarantees that 522 * given value of TK[], this TKIP48 construction guarantees that
523* the final RC4KEY value is unique across all packets. 523 * the final RC4KEY value is unique across all packets.
524* 524 *
525* Suggested implementation optimization: if PPK[] is "overlaid" 525 * Suggested implementation optimization: if PPK[] is "overlaid"
526* appropriately on RC4KEY[], there is no need for the final 526 * appropriately on RC4KEY[], there is no need for the final
527* for loop below that copies the PPK[] result into RC4KEY[]. 527 * for loop below that copies the PPK[] result into RC4KEY[].
528* 528 *
529********************************************************************** 529 **********************************************************************
530*/ 530 */
531static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) 531static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
532{ 532{
533 sint i; 533 sint i;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364c8a7c..4ab82ba9bb3f 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
71 memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); 71 memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
72 spin_lock_init(&pxmitpriv->lock); 72 spin_lock_init(&pxmitpriv->lock);
73 /* 73 /*
74 Please insert all the queue initialization using _init_queue below 74 *Please insert all the queue initialization using _init_queue below
75 */ 75 */
76 pxmitpriv->adapter = padapter; 76 pxmitpriv->adapter = padapter;
77 _init_queue(&pxmitpriv->be_pending); 77 _init_queue(&pxmitpriv->be_pending);
78 _init_queue(&pxmitpriv->bk_pending); 78 _init_queue(&pxmitpriv->bk_pending);
@@ -83,10 +83,10 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
83 _init_queue(&pxmitpriv->apsd_queue); 83 _init_queue(&pxmitpriv->apsd_queue);
84 _init_queue(&pxmitpriv->free_xmit_queue); 84 _init_queue(&pxmitpriv->free_xmit_queue);
85 /* 85 /*
86 Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, 86 * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
87 and initialize free_xmit_frame below. 87 * and initialize free_xmit_frame below.
88 Please also apply free_txobj to link_up all the xmit_frames... 88 * Please also apply free_txobj to link_up all the xmit_frames...
89 */ 89 */
90 pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, 90 pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
91 GFP_ATOMIC); 91 GFP_ATOMIC);
92 if (!pxmitpriv->pallocated_frame_buf) { 92 if (!pxmitpriv->pallocated_frame_buf) {
@@ -109,8 +109,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
109 } 109 }
110 pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; 110 pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
111 /* 111 /*
112 init xmit hw_txqueue 112 * init xmit hw_txqueue
113 */ 113 */
114 _r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX); 114 _r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX);
115 _r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX); 115 _r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX);
116 _r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX); 116 _r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX);
@@ -128,8 +128,11 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
128 _init_queue(&pxmitpriv->pending_xmitbuf_queue); 128 _init_queue(&pxmitpriv->pending_xmitbuf_queue);
129 pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, 129 pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
130 GFP_ATOMIC); 130 GFP_ATOMIC);
131 if (!pxmitpriv->pallocated_xmitbuf) 131 if (!pxmitpriv->pallocated_xmitbuf) {
132 kfree(pxmitpriv->pallocated_frame_buf);
133 pxmitpriv->pallocated_frame_buf = NULL;
132 return _FAIL; 134 return _FAIL;
135 }
133 pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - 136 pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
134 ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); 137 ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
135 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; 138 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -777,24 +780,23 @@ int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
777} 780}
778 781
779/* 782/*
780Calling context: 783 * Calling context:
7811. OS_TXENTRY 784 * 1. OS_TXENTRY
7822. RXENTRY (rx_thread or RX_ISR/RX_CallBack) 785 * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
783 786 *
784If we turn on USE_RXTHREAD, then, no need for critical section. 787 * If we turn on USE_RXTHREAD, then, no need for critical section.
785Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... 788 * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
786 789 *
787Must be very very cautious... 790 * Must be very very cautious...
788 791 *
789*/ 792 */
790
791struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv) 793struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
792{ 794{
793 /* 795 /*
794 Please remember to use all the osdep_service api, 796 * Please remember to use all the osdep_service api,
795 and lock/unlock or _enter/_exit critical to protect 797 * and lock/unlock or _enter/_exit critical to protect
796 pfree_xmit_queue 798 * pfree_xmit_queue
797 */ 799 */
798 unsigned long irqL; 800 unsigned long irqL;
799 struct xmit_frame *pxframe; 801 struct xmit_frame *pxframe;
800 struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; 802 struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index d899d0c6d3a6..40927277f498 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -261,12 +261,6 @@ struct xmit_priv {
261 uint free_xmitbuf_cnt; 261 uint free_xmitbuf_cnt;
262}; 262};
263 263
264static inline struct __queue *get_free_xmit_queue(
265 struct xmit_priv *pxmitpriv)
266{
267 return &(pxmitpriv->free_xmit_queue);
268}
269
270int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, 264int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
271 struct xmit_buf *pxmitbuf); 265 struct xmit_buf *pxmitbuf);
272struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv); 266struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index f27df0b4cb44..28d56c5d1449 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -432,31 +432,36 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
432 432
433 if (CHECK_PID(chip, 0x5208)) { 433 if (CHECK_PID(chip, 0x5208)) {
434 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, 434 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
435 MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD); 435 MS_D1_PD | MS_D2_PD | MS_CLK_PD |
436 MS_D6_PD);
436 if (retval) { 437 if (retval) {
437 rtsx_trace(chip); 438 rtsx_trace(chip);
438 return retval; 439 return retval;
439 } 440 }
440 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, 441 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
441 MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD); 442 MS_D3_PD | MS_D0_PD | MS_BS_PD |
443 XD_D4_PD);
442 if (retval) { 444 if (retval) {
443 rtsx_trace(chip); 445 rtsx_trace(chip);
444 return retval; 446 return retval;
445 } 447 }
446 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, 448 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
447 MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 449 MS_D7_PD | XD_CE_PD | XD_CLE_PD |
450 XD_CD_PU);
448 if (retval) { 451 if (retval) {
449 rtsx_trace(chip); 452 rtsx_trace(chip);
450 return retval; 453 return retval;
451 } 454 }
452 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, 455 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
453 XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); 456 XD_RDY_PD | SD_D3_PD | SD_D2_PD |
457 XD_ALE_PD);
454 if (retval) { 458 if (retval) {
455 rtsx_trace(chip); 459 rtsx_trace(chip);
456 return retval; 460 return retval;
457 } 461 }
458 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, 462 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
459 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 463 MS_INS_PU | SD_WP_PD | SD_CD_PU |
464 SD_CMD_PD);
460 if (retval) { 465 if (retval) {
461 rtsx_trace(chip); 466 rtsx_trace(chip);
462 return retval; 467 return retval;
@@ -507,17 +512,17 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
507 512
508 if (CHECK_PID(chip, 0x5208)) { 513 if (CHECK_PID(chip, 0x5208)) {
509 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 514 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
510 MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD); 515 MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
511 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 516 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
512 MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD); 517 MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
513 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 518 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
514 MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 519 MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
515 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 520 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
516 XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); 521 XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
517 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 522 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
518 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 523 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
519 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 524 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
520 MS_D5_PD | MS_D4_PD); 525 MS_D5_PD | MS_D4_PD);
521 } else if (CHECK_PID(chip, 0x5288)) { 526 } else if (CHECK_PID(chip, 0x5288)) {
522 if (CHECK_BARO_PKG(chip, QFN)) { 527 if (CHECK_BARO_PKG(chip, QFN)) {
523 rtsx_add_cmd(chip, WRITE_REG_CMD, 528 rtsx_add_cmd(chip, WRITE_REG_CMD,
@@ -616,14 +621,20 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
616 621
617 if (chip->asic_code) { 622 if (chip->asic_code) {
618 retval = rtsx_write_register(chip, MS_CFG, 0xFF, 623 retval = rtsx_write_register(chip, MS_CFG, 0xFF,
619 SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); 624 SAMPLE_TIME_RISING |
625 PUSH_TIME_DEFAULT |
626 NO_EXTEND_TOGGLE |
627 MS_BUS_WIDTH_1);
620 if (retval) { 628 if (retval) {
621 rtsx_trace(chip); 629 rtsx_trace(chip);
622 return retval; 630 return retval;
623 } 631 }
624 } else { 632 } else {
625 retval = rtsx_write_register(chip, MS_CFG, 0xFF, 633 retval = rtsx_write_register(chip, MS_CFG, 0xFF,
626 SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); 634 SAMPLE_TIME_FALLING |
635 PUSH_TIME_DEFAULT |
636 NO_EXTEND_TOGGLE |
637 MS_BUS_WIDTH_1);
627 if (retval) { 638 if (retval) {
628 rtsx_trace(chip); 639 rtsx_trace(chip);
629 return retval; 640 return retval;
@@ -665,7 +676,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
665 676
666 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { 677 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
667 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG, 678 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
668 6, NO_WAIT_INT); 679 6, NO_WAIT_INT);
669 if (retval == STATUS_SUCCESS) 680 if (retval == STATUS_SUCCESS)
670 break; 681 break;
671 } 682 }
@@ -765,7 +776,7 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
765 776
766 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { 777 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
767 retval = ms_read_bytes(chip, GET_INT, 1, 778 retval = ms_read_bytes(chip, GET_INT, 1,
768 NO_WAIT_INT, &val, 1); 779 NO_WAIT_INT, &val, 1);
769 if (retval == STATUS_SUCCESS) 780 if (retval == STATUS_SUCCESS)
770 break; 781 break;
771 } 782 }
@@ -794,9 +805,9 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
794 } 805 }
795 806
796 if (val & INT_REG_ERR) { 807 if (val & INT_REG_ERR) {
797 if (val & INT_REG_CMDNK) 808 if (val & INT_REG_CMDNK) {
798 chip->card_wp |= (MS_CARD); 809 chip->card_wp |= (MS_CARD);
799 else { 810 } else {
800 rtsx_trace(chip); 811 rtsx_trace(chip);
801 return STATUS_FAIL; 812 return STATUS_FAIL;
802 } 813 }
@@ -861,7 +872,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
861 872
862 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { 873 for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
863 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 874 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
864 1, NO_WAIT_INT); 875 1, NO_WAIT_INT);
865 if (retval != STATUS_SUCCESS) { 876 if (retval != STATUS_SUCCESS) {
866 rtsx_trace(chip); 877 rtsx_trace(chip);
867 return STATUS_FAIL; 878 return STATUS_FAIL;
@@ -1061,8 +1072,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
1061 return STATUS_FAIL; 1072 return STATUS_FAIL;
1062 } 1073 }
1063 retval = ms_transfer_data(chip, MS_TM_AUTO_READ, 1074 retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
1064 PRO_READ_LONG_DATA, 0x40, WAIT_INT, 1075 PRO_READ_LONG_DATA, 0x40, WAIT_INT,
1065 0, 0, buf, 64 * 512); 1076 0, 0, buf, 64 * 512);
1066 if (retval == STATUS_SUCCESS) 1077 if (retval == STATUS_SUCCESS)
1067 break; 1078 break;
1068 1079
@@ -1087,7 +1098,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
1087 break; 1098 break;
1088 1099
1089 retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, 1100 retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
1090 PRO_READ_LONG_DATA, 0, WAIT_INT); 1101 PRO_READ_LONG_DATA, 0, WAIT_INT);
1091 if (retval != STATUS_SUCCESS) { 1102 if (retval != STATUS_SUCCESS) {
1092 kfree(buf); 1103 kfree(buf);
1093 rtsx_trace(chip); 1104 rtsx_trace(chip);
@@ -1121,7 +1132,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
1121 1132
1122#ifdef SUPPORT_MSXC 1133#ifdef SUPPORT_MSXC
1123 if ((buf[cur_addr_off + 8] == 0x10) || 1134 if ((buf[cur_addr_off + 8] == 0x10) ||
1124 (buf[cur_addr_off + 8] == 0x13)) { 1135 (buf[cur_addr_off + 8] == 0x13)) {
1125#else 1136#else
1126 if (buf[cur_addr_off + 8] == 0x10) { 1137 if (buf[cur_addr_off + 8] == 0x10) {
1127#endif 1138#endif
@@ -1264,7 +1275,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
1264 1275
1265 if (device_type != 0x00) { 1276 if (device_type != 0x00) {
1266 if ((device_type == 0x01) || (device_type == 0x02) || 1277 if ((device_type == 0x01) || (device_type == 0x02) ||
1267 (device_type == 0x03)) { 1278 (device_type == 0x03)) {
1268 chip->card_wp |= MS_CARD; 1279 chip->card_wp |= MS_CARD;
1269 } else { 1280 } else {
1270 rtsx_trace(chip); 1281 rtsx_trace(chip);
@@ -1298,7 +1309,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
1298 1309
1299#ifdef SUPPORT_MAGIC_GATE 1310#ifdef SUPPORT_MAGIC_GATE
1300static int mg_set_tpc_para_sub(struct rtsx_chip *chip, 1311static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
1301 int type, u8 mg_entry_num); 1312 int type, u8 mg_entry_num);
1302#endif 1313#endif
1303 1314
1304static int reset_ms_pro(struct rtsx_chip *chip) 1315static int reset_ms_pro(struct rtsx_chip *chip)
@@ -1317,7 +1328,7 @@ static int reset_ms_pro(struct rtsx_chip *chip)
1317#endif 1328#endif
1318 1329
1319#ifdef XC_POWERCLASS 1330#ifdef XC_POWERCLASS
1320Retry: 1331retry:
1321#endif 1332#endif
1322 retval = ms_pro_reset_flow(chip, 1); 1333 retval = ms_pro_reset_flow(chip, 1);
1323 if (retval != STATUS_SUCCESS) { 1334 if (retval != STATUS_SUCCESS) {
@@ -1365,10 +1376,10 @@ Retry:
1365 change_power_class = power_class_mode; 1376 change_power_class = power_class_mode;
1366 if (change_power_class) { 1377 if (change_power_class) {
1367 retval = msxc_change_power(chip, 1378 retval = msxc_change_power(chip,
1368 change_power_class); 1379 change_power_class);
1369 if (retval != STATUS_SUCCESS) { 1380 if (retval != STATUS_SUCCESS) {
1370 change_power_class--; 1381 change_power_class--;
1371 goto Retry; 1382 goto retry;
1372 } 1383 }
1373 } 1384 }
1374 } 1385 }
@@ -1418,14 +1429,14 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
1418} 1429}
1419 1430
1420static int ms_read_extra_data(struct rtsx_chip *chip, 1431static int ms_read_extra_data(struct rtsx_chip *chip,
1421 u16 block_addr, u8 page_num, u8 *buf, int buf_len) 1432 u16 block_addr, u8 page_num, u8 *buf, int buf_len)
1422{ 1433{
1423 struct ms_info *ms_card = &chip->ms_card; 1434 struct ms_info *ms_card = &chip->ms_card;
1424 int retval, i; 1435 int retval, i;
1425 u8 val, data[10]; 1436 u8 val, data[10];
1426 1437
1427 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 1438 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
1428 SystemParm, 6); 1439 SystemParm, 6);
1429 if (retval != STATUS_SUCCESS) { 1440 if (retval != STATUS_SUCCESS) {
1430 rtsx_trace(chip); 1441 rtsx_trace(chip);
1431 return STATUS_FAIL; 1442 return STATUS_FAIL;
@@ -1488,7 +1499,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
1488 } 1499 }
1489 1500
1490 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, 1501 retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
1491 MS_EXTRA_SIZE, SystemParm, 6); 1502 MS_EXTRA_SIZE, SystemParm,
1503 6);
1492 if (retval != STATUS_SUCCESS) { 1504 if (retval != STATUS_SUCCESS) {
1493 rtsx_trace(chip); 1505 rtsx_trace(chip);
1494 return STATUS_FAIL; 1506 return STATUS_FAIL;
@@ -1497,7 +1509,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
1497 } 1509 }
1498 1510
1499 retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, 1511 retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
1500 data, MS_EXTRA_SIZE); 1512 data, MS_EXTRA_SIZE);
1501 if (retval != STATUS_SUCCESS) { 1513 if (retval != STATUS_SUCCESS) {
1502 rtsx_trace(chip); 1514 rtsx_trace(chip);
1503 return STATUS_FAIL; 1515 return STATUS_FAIL;
@@ -1512,8 +1524,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
1512 return STATUS_SUCCESS; 1524 return STATUS_SUCCESS;
1513} 1525}
1514 1526
1515static int ms_write_extra_data(struct rtsx_chip *chip, 1527static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
1516 u16 block_addr, u8 page_num, u8 *buf, int buf_len) 1528 u8 page_num, u8 *buf, int buf_len)
1517{ 1529{
1518 struct ms_info *ms_card = &chip->ms_card; 1530 struct ms_info *ms_card = &chip->ms_card;
1519 int retval, i; 1531 int retval, i;
@@ -1525,7 +1537,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
1525 } 1537 }
1526 1538
1527 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 1539 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
1528 SystemParm, 6 + MS_EXTRA_SIZE); 1540 SystemParm, 6 + MS_EXTRA_SIZE);
1529 if (retval != STATUS_SUCCESS) { 1541 if (retval != STATUS_SUCCESS) {
1530 rtsx_trace(chip); 1542 rtsx_trace(chip);
1531 return STATUS_FAIL; 1543 return STATUS_FAIL;
@@ -1588,7 +1600,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
1588 u8 val, data[6]; 1600 u8 val, data[6];
1589 1601
1590 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 1602 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
1591 SystemParm, 6); 1603 SystemParm, 6);
1592 if (retval != STATUS_SUCCESS) { 1604 if (retval != STATUS_SUCCESS) {
1593 rtsx_trace(chip); 1605 rtsx_trace(chip);
1594 return STATUS_FAIL; 1606 return STATUS_FAIL;
@@ -1651,7 +1663,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
1651 } 1663 }
1652 1664
1653 retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 1665 retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
1654 0, NO_WAIT_INT); 1666 0, NO_WAIT_INT);
1655 if (retval != STATUS_SUCCESS) { 1667 if (retval != STATUS_SUCCESS) {
1656 rtsx_trace(chip); 1668 rtsx_trace(chip);
1657 return STATUS_FAIL; 1669 return STATUS_FAIL;
@@ -1678,7 +1690,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
1678 } 1690 }
1679 1691
1680 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 1692 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
1681 SystemParm, 7); 1693 SystemParm, 7);
1682 if (retval != STATUS_SUCCESS) { 1694 if (retval != STATUS_SUCCESS) {
1683 rtsx_trace(chip); 1695 rtsx_trace(chip);
1684 return STATUS_FAIL; 1696 return STATUS_FAIL;
@@ -1742,7 +1754,7 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
1742 u8 val, data[6]; 1754 u8 val, data[6];
1743 1755
1744 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 1756 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
1745 SystemParm, 6); 1757 SystemParm, 6);
1746 if (retval != STATUS_SUCCESS) { 1758 if (retval != STATUS_SUCCESS) {
1747 rtsx_trace(chip); 1759 rtsx_trace(chip);
1748 return STATUS_FAIL; 1760 return STATUS_FAIL;
@@ -1844,7 +1856,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
1844 } 1856 }
1845 1857
1846 retval = ms_write_extra_data(chip, phy_blk, i, 1858 retval = ms_write_extra_data(chip, phy_blk, i,
1847 extra, MS_EXTRA_SIZE); 1859 extra, MS_EXTRA_SIZE);
1848 if (retval != STATUS_SUCCESS) { 1860 if (retval != STATUS_SUCCESS) {
1849 rtsx_trace(chip); 1861 rtsx_trace(chip);
1850 return STATUS_FAIL; 1862 return STATUS_FAIL;
@@ -1855,7 +1867,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
1855} 1867}
1856 1868
1857static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, 1869static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
1858 u16 log_blk, u8 start_page, u8 end_page) 1870 u16 log_blk, u8 start_page, u8 end_page)
1859{ 1871{
1860 struct ms_info *ms_card = &chip->ms_card; 1872 struct ms_info *ms_card = &chip->ms_card;
1861 bool uncorrect_flag = false; 1873 bool uncorrect_flag = false;
@@ -1915,7 +1927,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
1915 ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); 1927 ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
1916 1928
1917 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, 1929 retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
1918 MS_EXTRA_SIZE, SystemParm, 6); 1930 MS_EXTRA_SIZE, SystemParm, 6);
1919 if (retval != STATUS_SUCCESS) { 1931 if (retval != STATUS_SUCCESS) {
1920 rtsx_trace(chip); 1932 rtsx_trace(chip);
1921 return STATUS_FAIL; 1933 return STATUS_FAIL;
@@ -1971,9 +1983,9 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
1971 } 1983 }
1972 1984
1973 retval = ms_transfer_tpc(chip, 1985 retval = ms_transfer_tpc(chip,
1974 MS_TM_NORMAL_READ, 1986 MS_TM_NORMAL_READ,
1975 READ_PAGE_DATA, 1987 READ_PAGE_DATA,
1976 0, NO_WAIT_INT); 1988 0, NO_WAIT_INT);
1977 if (retval != STATUS_SUCCESS) { 1989 if (retval != STATUS_SUCCESS) {
1978 rtsx_trace(chip); 1990 rtsx_trace(chip);
1979 return STATUS_FAIL; 1991 return STATUS_FAIL;
@@ -1981,20 +1993,24 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
1981 1993
1982 if (uncorrect_flag) { 1994 if (uncorrect_flag) {
1983 ms_set_page_status(log_blk, setPS_NG, 1995 ms_set_page_status(log_blk, setPS_NG,
1984 extra, MS_EXTRA_SIZE); 1996 extra,
1997 MS_EXTRA_SIZE);
1985 if (i == 0) 1998 if (i == 0)
1986 extra[0] &= 0xEF; 1999 extra[0] &= 0xEF;
1987 2000
1988 ms_write_extra_data(chip, old_blk, i, 2001 ms_write_extra_data(chip, old_blk, i,
1989 extra, MS_EXTRA_SIZE); 2002 extra,
2003 MS_EXTRA_SIZE);
1990 dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n", 2004 dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n",
1991 i, extra[0]); 2005 i, extra[0]);
1992 MS_SET_BAD_BLOCK_FLG(ms_card); 2006 MS_SET_BAD_BLOCK_FLG(ms_card);
1993 2007
1994 ms_set_page_status(log_blk, setPS_Error, 2008 ms_set_page_status(log_blk, setPS_Error,
1995 extra, MS_EXTRA_SIZE); 2009 extra,
2010 MS_EXTRA_SIZE);
1996 ms_write_extra_data(chip, new_blk, i, 2011 ms_write_extra_data(chip, new_blk, i,
1997 extra, MS_EXTRA_SIZE); 2012 extra,
2013 MS_EXTRA_SIZE);
1998 continue; 2014 continue;
1999 } 2015 }
2000 2016
@@ -2021,8 +2037,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
2021 } 2037 }
2022 } 2038 }
2023 2039
2024 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, 2040 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
2025 MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); 2041 SystemParm, (6 + MS_EXTRA_SIZE));
2026 2042
2027 ms_set_err_code(chip, MS_NO_ERROR); 2043 ms_set_err_code(chip, MS_NO_ERROR);
2028 2044
@@ -2085,7 +2101,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
2085 2101
2086 if (i == 0) { 2102 if (i == 0) {
2087 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, 2103 retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
2088 MS_EXTRA_SIZE, SystemParm, 7); 2104 MS_EXTRA_SIZE, SystemParm,
2105 7);
2089 if (retval != STATUS_SUCCESS) { 2106 if (retval != STATUS_SUCCESS) {
2090 rtsx_trace(chip); 2107 rtsx_trace(chip);
2091 return STATUS_FAIL; 2108 return STATUS_FAIL;
@@ -2121,7 +2138,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
2121 2138
2122 ms_set_err_code(chip, MS_NO_ERROR); 2139 ms_set_err_code(chip, MS_NO_ERROR);
2123 retval = ms_read_bytes(chip, GET_INT, 1, 2140 retval = ms_read_bytes(chip, GET_INT, 1,
2124 NO_WAIT_INT, &val, 1); 2141 NO_WAIT_INT, &val, 1);
2125 if (retval != STATUS_SUCCESS) { 2142 if (retval != STATUS_SUCCESS) {
2126 rtsx_trace(chip); 2143 rtsx_trace(chip);
2127 return STATUS_FAIL; 2144 return STATUS_FAIL;
@@ -2361,7 +2378,7 @@ RE_SEARCH:
2361 } 2378 }
2362 2379
2363 retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1, 2380 retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
2364 NO_WAIT_INT); 2381 NO_WAIT_INT);
2365 if (retval != STATUS_SUCCESS) { 2382 if (retval != STATUS_SUCCESS) {
2366 rtsx_trace(chip); 2383 rtsx_trace(chip);
2367 return STATUS_FAIL; 2384 return STATUS_FAIL;
@@ -2369,7 +2386,9 @@ RE_SEARCH:
2369 2386
2370 retval = rtsx_write_register(chip, MS_CFG, 2387 retval = rtsx_write_register(chip, MS_CFG,
2371 0x58 | MS_NO_CHECK_INT, 2388 0x58 | MS_NO_CHECK_INT,
2372 MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT); 2389 MS_BUS_WIDTH_4 |
2390 PUSH_TIME_ODD |
2391 MS_NO_CHECK_INT);
2373 if (retval) { 2392 if (retval) {
2374 rtsx_trace(chip); 2393 rtsx_trace(chip);
2375 return retval; 2394 return retval;
@@ -2474,7 +2493,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
2474} 2493}
2475 2494
2476static void ms_set_l2p_tbl(struct rtsx_chip *chip, 2495static void ms_set_l2p_tbl(struct rtsx_chip *chip,
2477 int seg_no, u16 log_off, u16 phy_blk) 2496 int seg_no, u16 log_off, u16 phy_blk)
2478{ 2497{
2479 struct ms_info *ms_card = &chip->ms_card; 2498 struct ms_info *ms_card = &chip->ms_card;
2480 struct zone_entry *segment; 2499 struct zone_entry *segment;
@@ -2530,7 +2549,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
2530 7934}; 2549 7934};
2531 2550
2532static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk, 2551static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
2533 u16 log_off, u8 us1, u8 us2) 2552 u16 log_off, u8 us1, u8 us2)
2534{ 2553{
2535 struct ms_info *ms_card = &chip->ms_card; 2554 struct ms_info *ms_card = &chip->ms_card;
2536 struct zone_entry *segment; 2555 struct zone_entry *segment;
@@ -2627,7 +2646,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2627 2646
2628 disable_cnt = segment->disable_count; 2647 disable_cnt = segment->disable_count;
2629 2648
2630 segment->get_index = segment->set_index = 0; 2649 segment->get_index = 0;
2650 segment->set_index = 0;
2631 segment->unused_blk_cnt = 0; 2651 segment->unused_blk_cnt = 0;
2632 2652
2633 for (phy_blk = start; phy_blk < end; phy_blk++) { 2653 for (phy_blk = start; phy_blk < end; phy_blk++) {
@@ -2646,7 +2666,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2646 } 2666 }
2647 2667
2648 retval = ms_read_extra_data(chip, phy_blk, 0, 2668 retval = ms_read_extra_data(chip, phy_blk, 0,
2649 extra, MS_EXTRA_SIZE); 2669 extra, MS_EXTRA_SIZE);
2650 if (retval != STATUS_SUCCESS) { 2670 if (retval != STATUS_SUCCESS) {
2651 dev_dbg(rtsx_dev(chip), "read extra data fail\n"); 2671 dev_dbg(rtsx_dev(chip), "read extra data fail\n");
2652 ms_set_bad_block(chip, phy_blk); 2672 ms_set_bad_block(chip, phy_blk);
@@ -2685,7 +2705,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2685 } 2705 }
2686 2706
2687 if ((log_blk < ms_start_idx[seg_no]) || 2707 if ((log_blk < ms_start_idx[seg_no]) ||
2688 (log_blk >= ms_start_idx[seg_no + 1])) { 2708 (log_blk >= ms_start_idx[seg_no + 1])) {
2689 if (!(chip->card_wp & MS_CARD)) { 2709 if (!(chip->card_wp & MS_CARD)) {
2690 retval = ms_erase_block(chip, phy_blk); 2710 retval = ms_erase_block(chip, phy_blk);
2691 if (retval != STATUS_SUCCESS) 2711 if (retval != STATUS_SUCCESS)
@@ -2705,7 +2725,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2705 us1 = extra[0] & 0x10; 2725 us1 = extra[0] & 0x10;
2706 tmp_blk = segment->l2p_table[idx]; 2726 tmp_blk = segment->l2p_table[idx];
2707 retval = ms_read_extra_data(chip, tmp_blk, 0, 2727 retval = ms_read_extra_data(chip, tmp_blk, 0,
2708 extra, MS_EXTRA_SIZE); 2728 extra, MS_EXTRA_SIZE);
2709 if (retval != STATUS_SUCCESS) 2729 if (retval != STATUS_SUCCESS)
2710 continue; 2730 continue;
2711 us2 = extra[0] & 0x10; 2731 us2 = extra[0] & 0x10;
@@ -2774,7 +2794,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
2774 2794
2775 phy_blk = ms_get_unused_block(chip, 0); 2795 phy_blk = ms_get_unused_block(chip, 0);
2776 retval = ms_copy_page(chip, tmp_blk, phy_blk, 2796 retval = ms_copy_page(chip, tmp_blk, phy_blk,
2777 log_blk, 0, ms_card->page_off + 1); 2797 log_blk, 0,
2798 ms_card->page_off + 1);
2778 if (retval != STATUS_SUCCESS) { 2799 if (retval != STATUS_SUCCESS) {
2779 rtsx_trace(chip); 2800 rtsx_trace(chip);
2780 return STATUS_FAIL; 2801 return STATUS_FAIL;
@@ -2861,7 +2882,7 @@ int reset_ms_card(struct rtsx_chip *chip)
2861} 2882}
2862 2883
2863static int mspro_set_rw_cmd(struct rtsx_chip *chip, 2884static int mspro_set_rw_cmd(struct rtsx_chip *chip,
2864 u32 start_sec, u16 sec_cnt, u8 cmd) 2885 u32 start_sec, u16 sec_cnt, u8 cmd)
2865{ 2886{
2866 int retval, i; 2887 int retval, i;
2867 u8 data[8]; 2888 u8 data[8];
@@ -2932,8 +2953,8 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
2932} 2953}
2933 2954
2934static int mspro_rw_multi_sector(struct scsi_cmnd *srb, 2955static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
2935 struct rtsx_chip *chip, u32 start_sector, 2956 struct rtsx_chip *chip, u32 start_sector,
2936 u16 sector_cnt) 2957 u16 sector_cnt)
2937{ 2958{
2938 struct ms_info *ms_card = &chip->ms_card; 2959 struct ms_info *ms_card = &chip->ms_card;
2939 bool mode_2k = false; 2960 bool mode_2k = false;
@@ -2992,12 +3013,13 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
2992 } 3013 }
2993 3014
2994 if (ms_card->seq_mode) { 3015 if (ms_card->seq_mode) {
2995 if ((ms_card->pre_dir != srb->sc_data_direction) 3016 if ((ms_card->pre_dir != srb->sc_data_direction) ||
2996 || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector) 3017 ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
2997 || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) 3018 start_sector) ||
2998 || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) 3019 (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
2999 || !(val & MS_INT_BREQ) 3020 (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) ||
3000 || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) { 3021 !(val & MS_INT_BREQ) ||
3022 ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
3001 ms_card->seq_mode = 0; 3023 ms_card->seq_mode = 0;
3002 ms_card->total_sec_cnt = 0; 3024 ms_card->total_sec_cnt = 0;
3003 if (val & MS_INT_BREQ) { 3025 if (val & MS_INT_BREQ) {
@@ -3007,7 +3029,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
3007 return STATUS_FAIL; 3029 return STATUS_FAIL;
3008 } 3030 }
3009 3031
3010 rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); 3032 rtsx_write_register(chip, RBCTL, RB_FLUSH,
3033 RB_FLUSH);
3011 } 3034 }
3012 } 3035 }
3013 } 3036 }
@@ -3038,8 +3061,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
3038 } 3061 }
3039 3062
3040 retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt, 3063 retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
3041 WAIT_INT, mode_2k, scsi_sg_count(srb), 3064 WAIT_INT, mode_2k, scsi_sg_count(srb),
3042 scsi_sglist(srb), scsi_bufflen(srb)); 3065 scsi_sglist(srb), scsi_bufflen(srb));
3043 if (retval != STATUS_SUCCESS) { 3066 if (retval != STATUS_SUCCESS) {
3044 ms_card->seq_mode = 0; 3067 ms_card->seq_mode = 0;
3045 rtsx_read_register(chip, MS_TRANS_CFG, &val); 3068 rtsx_read_register(chip, MS_TRANS_CFG, &val);
@@ -3076,7 +3099,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
3076} 3099}
3077 3100
3078static int mspro_read_format_progress(struct rtsx_chip *chip, 3101static int mspro_read_format_progress(struct rtsx_chip *chip,
3079 const int short_data_len) 3102 const int short_data_len)
3080{ 3103{
3081 struct ms_info *ms_card = &chip->ms_card; 3104 struct ms_info *ms_card = &chip->ms_card;
3082 int retval, i; 3105 int retval, i;
@@ -3102,7 +3125,8 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
3102 } 3125 }
3103 3126
3104 if (!(tmp & MS_INT_BREQ)) { 3127 if (!(tmp & MS_INT_BREQ)) {
3105 if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) { 3128 if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK |
3129 MS_INT_ERR)) == MS_INT_CED) {
3106 ms_card->format_status = FORMAT_SUCCESS; 3130 ms_card->format_status = FORMAT_SUCCESS;
3107 return STATUS_SUCCESS; 3131 return STATUS_SUCCESS;
3108 } 3132 }
@@ -3117,7 +3141,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
3117 cnt = (u8)short_data_len; 3141 cnt = (u8)short_data_len;
3118 3142
3119 retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 3143 retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
3120 MS_NO_CHECK_INT); 3144 MS_NO_CHECK_INT);
3121 if (retval != STATUS_SUCCESS) { 3145 if (retval != STATUS_SUCCESS) {
3122 ms_card->format_status = FORMAT_FAIL; 3146 ms_card->format_status = FORMAT_FAIL;
3123 rtsx_trace(chip); 3147 rtsx_trace(chip);
@@ -3125,7 +3149,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
3125 } 3149 }
3126 3150
3127 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT, 3151 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
3128 data, 8); 3152 data, 8);
3129 if (retval != STATUS_SUCCESS) { 3153 if (retval != STATUS_SUCCESS) {
3130 ms_card->format_status = FORMAT_FAIL; 3154 ms_card->format_status = FORMAT_FAIL;
3131 rtsx_trace(chip); 3155 rtsx_trace(chip);
@@ -3204,7 +3228,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
3204 int i; 3228 int i;
3205 3229
3206 if (ms_card->pro_under_formatting && 3230 if (ms_card->pro_under_formatting &&
3207 (rtsx_get_stat(chip) != RTSX_STAT_SS)) { 3231 (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
3208 rtsx_set_stat(chip, RTSX_STAT_RUN); 3232 rtsx_set_stat(chip, RTSX_STAT_RUN);
3209 3233
3210 for (i = 0; i < 65535; i++) { 3234 for (i = 0; i < 65535; i++) {
@@ -3216,7 +3240,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
3216} 3240}
3217 3241
3218int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, 3242int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3219 int short_data_len, bool quick_format) 3243 int short_data_len, bool quick_format)
3220{ 3244{
3221 struct ms_info *ms_card = &chip->ms_card; 3245 struct ms_info *ms_card = &chip->ms_card;
3222 int retval, i; 3246 int retval, i;
@@ -3305,9 +3329,9 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3305} 3329}
3306 3330
3307static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, 3331static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3308 u16 log_blk, u8 start_page, u8 end_page, 3332 u16 log_blk, u8 start_page, u8 end_page,
3309 u8 *buf, unsigned int *index, 3333 u8 *buf, unsigned int *index,
3310 unsigned int *offset) 3334 unsigned int *offset)
3311{ 3335{
3312 struct ms_info *ms_card = &chip->ms_card; 3336 struct ms_info *ms_card = &chip->ms_card;
3313 int retval, i; 3337 int retval, i;
@@ -3315,7 +3339,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3315 u8 *ptr; 3339 u8 *ptr;
3316 3340
3317 retval = ms_read_extra_data(chip, phy_blk, start_page, 3341 retval = ms_read_extra_data(chip, phy_blk, start_page,
3318 extra, MS_EXTRA_SIZE); 3342 extra, MS_EXTRA_SIZE);
3319 if (retval == STATUS_SUCCESS) { 3343 if (retval == STATUS_SUCCESS) {
3320 if ((extra[1] & 0x30) != 0x30) { 3344 if ((extra[1] & 0x30) != 0x30) {
3321 ms_set_err_code(chip, MS_FLASH_READ_ERROR); 3345 ms_set_err_code(chip, MS_FLASH_READ_ERROR);
@@ -3325,7 +3349,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3325 } 3349 }
3326 3350
3327 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 3351 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
3328 SystemParm, 6); 3352 SystemParm, 6);
3329 if (retval != STATUS_SUCCESS) { 3353 if (retval != STATUS_SUCCESS) {
3330 rtsx_trace(chip); 3354 rtsx_trace(chip);
3331 return STATUS_FAIL; 3355 return STATUS_FAIL;
@@ -3389,11 +3413,17 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3389 if (retval != STATUS_SUCCESS) { 3413 if (retval != STATUS_SUCCESS) {
3390 if (!(chip->card_wp & MS_CARD)) { 3414 if (!(chip->card_wp & MS_CARD)) {
3391 reset_ms(chip); 3415 reset_ms(chip);
3392 ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); 3416 ms_set_page_status
3393 ms_write_extra_data(chip, phy_blk, 3417 (log_blk, setPS_NG,
3394 page_addr, extra, MS_EXTRA_SIZE); 3418 extra,
3419 MS_EXTRA_SIZE);
3420 ms_write_extra_data
3421 (chip, phy_blk,
3422 page_addr, extra,
3423 MS_EXTRA_SIZE);
3395 } 3424 }
3396 ms_set_err_code(chip, MS_FLASH_READ_ERROR); 3425 ms_set_err_code(chip,
3426 MS_FLASH_READ_ERROR);
3397 rtsx_trace(chip); 3427 rtsx_trace(chip);
3398 return STATUS_FAIL; 3428 return STATUS_FAIL;
3399 } 3429 }
@@ -3420,7 +3450,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3420 } 3450 }
3421 3451
3422 retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, 3452 retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
3423 &val, 1); 3453 &val, 1);
3424 if (retval != STATUS_SUCCESS) { 3454 if (retval != STATUS_SUCCESS) {
3425 rtsx_trace(chip); 3455 rtsx_trace(chip);
3426 return STATUS_FAIL; 3456 return STATUS_FAIL;
@@ -3441,23 +3471,24 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3441 3471
3442 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA); 3472 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
3443 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 3473 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
3444 0xFF, trans_cfg); 3474 0xFF, trans_cfg);
3445 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 3475 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
3446 0x01, RING_BUFFER); 3476 0x01, RING_BUFFER);
3447 3477
3448 trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512); 3478 trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
3449 3479
3450 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, 3480 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
3451 MS_TRANSFER_START | MS_TM_NORMAL_READ); 3481 MS_TRANSFER_START | MS_TM_NORMAL_READ);
3452 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, 3482 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
3453 MS_TRANSFER_END, MS_TRANSFER_END); 3483 MS_TRANSFER_END, MS_TRANSFER_END);
3454 3484
3455 rtsx_send_cmd_no_wait(chip); 3485 rtsx_send_cmd_no_wait(chip);
3456 3486
3457 retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 3487 retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
3458 512, scsi_sg_count(chip->srb), 3488 scsi_sg_count(chip->srb),
3459 index, offset, DMA_FROM_DEVICE, 3489 index, offset,
3460 chip->ms_timeout); 3490 DMA_FROM_DEVICE,
3491 chip->ms_timeout);
3461 if (retval < 0) { 3492 if (retval < 0) {
3462 if (retval == -ETIMEDOUT) { 3493 if (retval == -ETIMEDOUT) {
3463 ms_set_err_code(chip, MS_TO_ERROR); 3494 ms_set_err_code(chip, MS_TO_ERROR);
@@ -3489,7 +3520,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
3489} 3520}
3490 3521
3491static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, 3522static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3492 u16 new_blk, u16 log_blk, u8 start_page, 3523 u16 new_blk, u16 log_blk, u8 start_page,
3493 u8 end_page, u8 *buf, unsigned int *index, 3524 u8 end_page, u8 *buf, unsigned int *index,
3494 unsigned int *offset) 3525 unsigned int *offset)
3495{ 3526{
@@ -3500,7 +3531,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3500 3531
3501 if (!start_page) { 3532 if (!start_page) {
3502 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 3533 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
3503 SystemParm, 7); 3534 SystemParm, 7);
3504 if (retval != STATUS_SUCCESS) { 3535 if (retval != STATUS_SUCCESS) {
3505 rtsx_trace(chip); 3536 rtsx_trace(chip);
3506 return STATUS_FAIL; 3537 return STATUS_FAIL;
@@ -3534,7 +3565,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3534 3565
3535 ms_set_err_code(chip, MS_NO_ERROR); 3566 ms_set_err_code(chip, MS_NO_ERROR);
3536 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, 3567 retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
3537 NO_WAIT_INT); 3568 NO_WAIT_INT);
3538 if (retval != STATUS_SUCCESS) { 3569 if (retval != STATUS_SUCCESS) {
3539 rtsx_trace(chip); 3570 rtsx_trace(chip);
3540 return STATUS_FAIL; 3571 return STATUS_FAIL;
@@ -3542,7 +3573,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3542 } 3573 }
3543 3574
3544 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, 3575 retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
3545 SystemParm, (6 + MS_EXTRA_SIZE)); 3576 SystemParm, (6 + MS_EXTRA_SIZE));
3546 if (retval != STATUS_SUCCESS) { 3577 if (retval != STATUS_SUCCESS) {
3547 rtsx_trace(chip); 3578 rtsx_trace(chip);
3548 return STATUS_FAIL; 3579 return STATUS_FAIL;
@@ -3630,25 +3661,26 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3630 rtsx_init_cmd(chip); 3661 rtsx_init_cmd(chip);
3631 3662
3632 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 3663 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
3633 0xFF, WRITE_PAGE_DATA); 3664 0xFF, WRITE_PAGE_DATA);
3634 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 3665 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
3635 0xFF, WAIT_INT); 3666 0xFF, WAIT_INT);
3636 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 3667 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
3637 0x01, RING_BUFFER); 3668 0x01, RING_BUFFER);
3638 3669
3639 trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); 3670 trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
3640 3671
3641 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, 3672 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
3642 MS_TRANSFER_START | MS_TM_NORMAL_WRITE); 3673 MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
3643 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, 3674 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
3644 MS_TRANSFER_END, MS_TRANSFER_END); 3675 MS_TRANSFER_END, MS_TRANSFER_END);
3645 3676
3646 rtsx_send_cmd_no_wait(chip); 3677 rtsx_send_cmd_no_wait(chip);
3647 3678
3648 retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 3679 retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
3649 512, scsi_sg_count(chip->srb), 3680 scsi_sg_count(chip->srb),
3650 index, offset, DMA_TO_DEVICE, 3681 index, offset,
3651 chip->ms_timeout); 3682 DMA_TO_DEVICE,
3683 chip->ms_timeout);
3652 if (retval < 0) { 3684 if (retval < 0) {
3653 ms_set_err_code(chip, MS_TO_ERROR); 3685 ms_set_err_code(chip, MS_TO_ERROR);
3654 rtsx_clear_ms_error(chip); 3686 rtsx_clear_ms_error(chip);
@@ -3677,7 +3709,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3677 if (page_addr == (end_page - 1)) { 3709 if (page_addr == (end_page - 1)) {
3678 if (!(val & INT_REG_CED)) { 3710 if (!(val & INT_REG_CED)) {
3679 retval = ms_send_cmd(chip, BLOCK_END, 3711 retval = ms_send_cmd(chip, BLOCK_END,
3680 WAIT_INT); 3712 WAIT_INT);
3681 if (retval != STATUS_SUCCESS) { 3713 if (retval != STATUS_SUCCESS) {
3682 rtsx_trace(chip); 3714 rtsx_trace(chip);
3683 return STATUS_FAIL; 3715 return STATUS_FAIL;
@@ -3685,7 +3717,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3685 } 3717 }
3686 3718
3687 retval = ms_read_bytes(chip, GET_INT, 1, 3719 retval = ms_read_bytes(chip, GET_INT, 1,
3688 NO_WAIT_INT, &val, 1); 3720 NO_WAIT_INT, &val, 1);
3689 if (retval != STATUS_SUCCESS) { 3721 if (retval != STATUS_SUCCESS) {
3690 rtsx_trace(chip); 3722 rtsx_trace(chip);
3691 return STATUS_FAIL; 3723 return STATUS_FAIL;
@@ -3693,7 +3725,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3693 } 3725 }
3694 3726
3695 if ((page_addr == (end_page - 1)) || 3727 if ((page_addr == (end_page - 1)) ||
3696 (page_addr == ms_card->page_off)) { 3728 (page_addr == ms_card->page_off)) {
3697 if (!(val & INT_REG_CED)) { 3729 if (!(val & INT_REG_CED)) {
3698 ms_set_err_code(chip, 3730 ms_set_err_code(chip,
3699 MS_FLASH_WRITE_ERROR); 3731 MS_FLASH_WRITE_ERROR);
@@ -3711,13 +3743,13 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
3711} 3743}
3712 3744
3713static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, 3745static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
3714 u16 log_blk, u8 page_off) 3746 u16 log_blk, u8 page_off)
3715{ 3747{
3716 struct ms_info *ms_card = &chip->ms_card; 3748 struct ms_info *ms_card = &chip->ms_card;
3717 int retval, seg_no; 3749 int retval, seg_no;
3718 3750
3719 retval = ms_copy_page(chip, old_blk, new_blk, log_blk, 3751 retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
3720 page_off, ms_card->page_off + 1); 3752 page_off, ms_card->page_off + 1);
3721 if (retval != STATUS_SUCCESS) { 3753 if (retval != STATUS_SUCCESS) {
3722 rtsx_trace(chip); 3754 rtsx_trace(chip);
3723 return STATUS_FAIL; 3755 return STATUS_FAIL;
@@ -3740,13 +3772,13 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
3740} 3772}
3741 3773
3742static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, 3774static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
3743 u16 log_blk, u8 start_page) 3775 u16 log_blk, u8 start_page)
3744{ 3776{
3745 int retval; 3777 int retval;
3746 3778
3747 if (start_page) { 3779 if (start_page) {
3748 retval = ms_copy_page(chip, old_blk, new_blk, log_blk, 3780 retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
3749 0, start_page); 3781 0, start_page);
3750 if (retval != STATUS_SUCCESS) { 3782 if (retval != STATUS_SUCCESS) {
3751 rtsx_trace(chip); 3783 rtsx_trace(chip);
3752 return STATUS_FAIL; 3784 return STATUS_FAIL;
@@ -3772,7 +3804,7 @@ int ms_delay_write(struct rtsx_chip *chip)
3772 3804
3773 delay_write->delay_write_flag = 0; 3805 delay_write->delay_write_flag = 0;
3774 retval = ms_finish_write(chip, 3806 retval = ms_finish_write(chip,
3775 delay_write->old_phyblock, 3807 delay_write->old_phyblock,
3776 delay_write->new_phyblock, 3808 delay_write->new_phyblock,
3777 delay_write->logblock, 3809 delay_write->logblock,
3778 delay_write->pageoff); 3810 delay_write->pageoff);
@@ -3790,13 +3822,13 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3790{ 3822{
3791 if (srb->sc_data_direction == DMA_FROM_DEVICE) 3823 if (srb->sc_data_direction == DMA_FROM_DEVICE)
3792 set_sense_type(chip, SCSI_LUN(srb), 3824 set_sense_type(chip, SCSI_LUN(srb),
3793 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 3825 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
3794 else 3826 else
3795 set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR); 3827 set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
3796} 3828}
3797 3829
3798static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, 3830static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3799 u32 start_sector, u16 sector_cnt) 3831 u32 start_sector, u16 sector_cnt)
3800{ 3832{
3801 struct ms_info *ms_card = &chip->ms_card; 3833 struct ms_info *ms_card = &chip->ms_card;
3802 unsigned int lun = SCSI_LUN(srb); 3834 unsigned int lun = SCSI_LUN(srb);
@@ -3843,16 +3875,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3843 if (srb->sc_data_direction == DMA_TO_DEVICE) { 3875 if (srb->sc_data_direction == DMA_TO_DEVICE) {
3844#ifdef MS_DELAY_WRITE 3876#ifdef MS_DELAY_WRITE
3845 if (delay_write->delay_write_flag && 3877 if (delay_write->delay_write_flag &&
3846 (delay_write->logblock == log_blk) && 3878 (delay_write->logblock == log_blk) &&
3847 (start_page > delay_write->pageoff)) { 3879 (start_page > delay_write->pageoff)) {
3848 delay_write->delay_write_flag = 0; 3880 delay_write->delay_write_flag = 0;
3849 retval = ms_copy_page(chip, 3881 retval = ms_copy_page(chip,
3850 delay_write->old_phyblock, 3882 delay_write->old_phyblock,
3851 delay_write->new_phyblock, log_blk, 3883 delay_write->new_phyblock,
3852 delay_write->pageoff, start_page); 3884 log_blk,
3885 delay_write->pageoff, start_page);
3853 if (retval != STATUS_SUCCESS) { 3886 if (retval != STATUS_SUCCESS) {
3854 set_sense_type(chip, lun, 3887 set_sense_type(chip, lun,
3855 SENSE_TYPE_MEDIA_WRITE_ERR); 3888 SENSE_TYPE_MEDIA_WRITE_ERR);
3856 rtsx_trace(chip); 3889 rtsx_trace(chip);
3857 return STATUS_FAIL; 3890 return STATUS_FAIL;
3858 } 3891 }
@@ -3868,32 +3901,35 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3868 retval = ms_delay_write(chip); 3901 retval = ms_delay_write(chip);
3869 if (retval != STATUS_SUCCESS) { 3902 if (retval != STATUS_SUCCESS) {
3870 set_sense_type(chip, lun, 3903 set_sense_type(chip, lun,
3871 SENSE_TYPE_MEDIA_WRITE_ERR); 3904 SENSE_TYPE_MEDIA_WRITE_ERR);
3872 rtsx_trace(chip); 3905 rtsx_trace(chip);
3873 return STATUS_FAIL; 3906 return STATUS_FAIL;
3874 } 3907 }
3875#endif 3908#endif
3876 old_blk = ms_get_l2p_tbl(chip, seg_no, 3909 old_blk = ms_get_l2p_tbl
3877 log_blk - ms_start_idx[seg_no]); 3910 (chip, seg_no,
3911 log_blk - ms_start_idx[seg_no]);
3878 new_blk = ms_get_unused_block(chip, seg_no); 3912 new_blk = ms_get_unused_block(chip, seg_no);
3879 if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) { 3913 if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
3880 set_sense_type(chip, lun, 3914 set_sense_type(chip, lun,
3881 SENSE_TYPE_MEDIA_WRITE_ERR); 3915 SENSE_TYPE_MEDIA_WRITE_ERR);
3882 rtsx_trace(chip); 3916 rtsx_trace(chip);
3883 return STATUS_FAIL; 3917 return STATUS_FAIL;
3884 } 3918 }
3885 3919
3886 retval = ms_prepare_write(chip, old_blk, new_blk, 3920 retval = ms_prepare_write(chip, old_blk, new_blk,
3887 log_blk, start_page); 3921 log_blk, start_page);
3888 if (retval != STATUS_SUCCESS) { 3922 if (retval != STATUS_SUCCESS) {
3889 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { 3923 if (detect_card_cd(chip, MS_CARD) !=
3890 set_sense_type(chip, lun, 3924 STATUS_SUCCESS) {
3925 set_sense_type
3926 (chip, lun,
3891 SENSE_TYPE_MEDIA_NOT_PRESENT); 3927 SENSE_TYPE_MEDIA_NOT_PRESENT);
3892 rtsx_trace(chip); 3928 rtsx_trace(chip);
3893 return STATUS_FAIL; 3929 return STATUS_FAIL;
3894 } 3930 }
3895 set_sense_type(chip, lun, 3931 set_sense_type(chip, lun,
3896 SENSE_TYPE_MEDIA_WRITE_ERR); 3932 SENSE_TYPE_MEDIA_WRITE_ERR);
3897 rtsx_trace(chip); 3933 rtsx_trace(chip);
3898 return STATUS_FAIL; 3934 return STATUS_FAIL;
3899 } 3935 }
@@ -3906,21 +3942,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3906 if (retval != STATUS_SUCCESS) { 3942 if (retval != STATUS_SUCCESS) {
3907 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { 3943 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
3908 set_sense_type(chip, lun, 3944 set_sense_type(chip, lun,
3909 SENSE_TYPE_MEDIA_NOT_PRESENT); 3945 SENSE_TYPE_MEDIA_NOT_PRESENT);
3910 rtsx_trace(chip); 3946 rtsx_trace(chip);
3911 return STATUS_FAIL; 3947 return STATUS_FAIL;
3912 } 3948 }
3913 set_sense_type(chip, lun, 3949 set_sense_type(chip, lun,
3914 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 3950 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
3915 rtsx_trace(chip); 3951 rtsx_trace(chip);
3916 return STATUS_FAIL; 3952 return STATUS_FAIL;
3917 } 3953 }
3918#endif 3954#endif
3919 old_blk = ms_get_l2p_tbl(chip, seg_no, 3955 old_blk = ms_get_l2p_tbl(chip, seg_no,
3920 log_blk - ms_start_idx[seg_no]); 3956 log_blk - ms_start_idx[seg_no]);
3921 if (old_blk == 0xFFFF) { 3957 if (old_blk == 0xFFFF) {
3922 set_sense_type(chip, lun, 3958 set_sense_type(chip, lun,
3923 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 3959 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
3924 rtsx_trace(chip); 3960 rtsx_trace(chip);
3925 return STATUS_FAIL; 3961 return STATUS_FAIL;
3926 } 3962 }
@@ -3942,19 +3978,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3942 3978
3943 if (srb->sc_data_direction == DMA_FROM_DEVICE) { 3979 if (srb->sc_data_direction == DMA_FROM_DEVICE) {
3944 retval = ms_read_multiple_pages(chip, 3980 retval = ms_read_multiple_pages(chip,
3945 old_blk, log_blk, start_page, end_page, 3981 old_blk, log_blk,
3946 ptr, &index, &offset); 3982 start_page, end_page,
3983 ptr, &index, &offset);
3947 } else { 3984 } else {
3948 retval = ms_write_multiple_pages(chip, old_blk, 3985 retval = ms_write_multiple_pages(chip, old_blk, new_blk,
3949 new_blk, log_blk, start_page, end_page, 3986 log_blk, start_page,
3950 ptr, &index, &offset); 3987 end_page, ptr, &index,
3988 &offset);
3951 } 3989 }
3952 3990
3953 if (retval != STATUS_SUCCESS) { 3991 if (retval != STATUS_SUCCESS) {
3954 toggle_gpio(chip, 1); 3992 toggle_gpio(chip, 1);
3955 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { 3993 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
3956 set_sense_type(chip, lun, 3994 set_sense_type(chip, lun,
3957 SENSE_TYPE_MEDIA_NOT_PRESENT); 3995 SENSE_TYPE_MEDIA_NOT_PRESENT);
3958 rtsx_trace(chip); 3996 rtsx_trace(chip);
3959 return STATUS_FAIL; 3997 return STATUS_FAIL;
3960 } 3998 }
@@ -3970,8 +4008,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3970 ms_set_unused_block(chip, old_blk); 4008 ms_set_unused_block(chip, old_blk);
3971 4009
3972 ms_set_l2p_tbl(chip, seg_no, 4010 ms_set_l2p_tbl(chip, seg_no,
3973 log_blk - ms_start_idx[seg_no], 4011 log_blk - ms_start_idx[seg_no],
3974 new_blk); 4012 new_blk);
3975 } 4013 }
3976 } 4014 }
3977 4015
@@ -3995,14 +4033,14 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
3995 if (retval != STATUS_SUCCESS) { 4033 if (retval != STATUS_SUCCESS) {
3996 chip->card_fail |= MS_CARD; 4034 chip->card_fail |= MS_CARD;
3997 set_sense_type(chip, lun, 4035 set_sense_type(chip, lun,
3998 SENSE_TYPE_MEDIA_NOT_PRESENT); 4036 SENSE_TYPE_MEDIA_NOT_PRESENT);
3999 rtsx_trace(chip); 4037 rtsx_trace(chip);
4000 return STATUS_FAIL; 4038 return STATUS_FAIL;
4001 } 4039 }
4002 } 4040 }
4003 4041
4004 old_blk = ms_get_l2p_tbl(chip, seg_no, 4042 old_blk = ms_get_l2p_tbl(chip, seg_no,
4005 log_blk - ms_start_idx[seg_no]); 4043 log_blk - ms_start_idx[seg_no]);
4006 if (old_blk == 0xFFFF) { 4044 if (old_blk == 0xFFFF) {
4007 ms_rw_fail(srb, chip); 4045 ms_rw_fail(srb, chip);
4008 rtsx_trace(chip); 4046 rtsx_trace(chip);
@@ -4034,10 +4072,12 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
4034 delay_write->pageoff = end_page; 4072 delay_write->pageoff = end_page;
4035#else 4073#else
4036 retval = ms_finish_write(chip, old_blk, new_blk, 4074 retval = ms_finish_write(chip, old_blk, new_blk,
4037 log_blk, end_page); 4075 log_blk, end_page);
4038 if (retval != STATUS_SUCCESS) { 4076 if (retval != STATUS_SUCCESS) {
4039 if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { 4077 if (detect_card_cd(chip, MS_CARD) !=
4040 set_sense_type(chip, lun, 4078 STATUS_SUCCESS) {
4079 set_sense_type
4080 (chip, lun,
4041 SENSE_TYPE_MEDIA_NOT_PRESENT); 4081 SENSE_TYPE_MEDIA_NOT_PRESENT);
4042 rtsx_trace(chip); 4082 rtsx_trace(chip);
4043 return STATUS_FAIL; 4083 return STATUS_FAIL;
@@ -4057,17 +4097,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
4057} 4097}
4058 4098
4059int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 4099int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
4060 u32 start_sector, u16 sector_cnt) 4100 u32 start_sector, u16 sector_cnt)
4061{ 4101{
4062 struct ms_info *ms_card = &chip->ms_card; 4102 struct ms_info *ms_card = &chip->ms_card;
4063 int retval; 4103 int retval;
4064 4104
4065 if (CHK_MSPRO(ms_card)) 4105 if (CHK_MSPRO(ms_card))
4066 retval = mspro_rw_multi_sector(srb, chip, start_sector, 4106 retval = mspro_rw_multi_sector(srb, chip, start_sector,
4067 sector_cnt); 4107 sector_cnt);
4068 else 4108 else
4069 retval = ms_rw_multi_sector(srb, chip, start_sector, 4109 retval = ms_rw_multi_sector(srb, chip, start_sector,
4070 sector_cnt); 4110 sector_cnt);
4071 4111
4072 return retval; 4112 return retval;
4073} 4113}
@@ -4189,7 +4229,7 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
4189} 4229}
4190 4230
4191static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type, 4231static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
4192 u8 mg_entry_num) 4232 u8 mg_entry_num)
4193{ 4233{
4194 int retval; 4234 int retval;
4195 u8 buf[6]; 4235 u8 buf[6];
@@ -4306,7 +4346,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4306 } 4346 }
4307 4347
4308 retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 4348 retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
4309 3, WAIT_INT, 0, 0, buf + 4, 1536); 4349 3, WAIT_INT, 0, 0, buf + 4, 1536);
4310 if (retval != STATUS_SUCCESS) { 4350 if (retval != STATUS_SUCCESS) {
4311 set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); 4351 set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
4312 rtsx_clear_ms_error(chip); 4352 rtsx_clear_ms_error(chip);
@@ -4354,7 +4394,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4354 } 4394 }
4355 4395
4356 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, 4396 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
4357 buf, 32); 4397 buf, 32);
4358 if (retval != STATUS_SUCCESS) { 4398 if (retval != STATUS_SUCCESS) {
4359 set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); 4399 set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
4360 rtsx_trace(chip); 4400 rtsx_trace(chip);
@@ -4437,7 +4477,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4437 } 4477 }
4438 4478
4439 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, 4479 retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
4440 buf1, 32); 4480 buf1, 32);
4441 if (retval != STATUS_SUCCESS) { 4481 if (retval != STATUS_SUCCESS) {
4442 set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); 4482 set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
4443 rtsx_trace(chip); 4483 rtsx_trace(chip);
@@ -4560,7 +4600,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4560 } 4600 }
4561 4601
4562 retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 4602 retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
4563 2, WAIT_INT, 0, 0, buf + 4, 1024); 4603 2, WAIT_INT, 0, 0, buf + 4, 1024);
4564 if (retval != STATUS_SUCCESS) { 4604 if (retval != STATUS_SUCCESS) {
4565 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 4605 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
4566 rtsx_clear_ms_error(chip); 4606 rtsx_clear_ms_error(chip);
@@ -4615,11 +4655,12 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4615 if (retval != STATUS_SUCCESS) { 4655 if (retval != STATUS_SUCCESS) {
4616 if (ms_card->mg_auth == 0) { 4656 if (ms_card->mg_auth == 0) {
4617 if ((buf[5] & 0xC0) != 0) 4657 if ((buf[5] & 0xC0) != 0)
4618 set_sense_type(chip, lun, 4658 set_sense_type
4659 (chip, lun,
4619 SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); 4660 SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
4620 else 4661 else
4621 set_sense_type(chip, lun, 4662 set_sense_type(chip, lun,
4622 SENSE_TYPE_MG_WRITE_ERR); 4663 SENSE_TYPE_MG_WRITE_ERR);
4623 } else { 4664 } else {
4624 set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); 4665 set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
4625 } 4666 }
@@ -4634,17 +4675,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4634 rtsx_init_cmd(chip); 4675 rtsx_init_cmd(chip);
4635 4676
4636 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 4677 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
4637 0xFF, PRO_WRITE_LONG_DATA); 4678 0xFF, PRO_WRITE_LONG_DATA);
4638 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT); 4679 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
4639 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 4680 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
4640 0x01, RING_BUFFER); 4681 0x01, RING_BUFFER);
4641 4682
4642 trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); 4683 trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
4643 4684
4644 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, 4685 rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
4645 MS_TRANSFER_START | MS_TM_NORMAL_WRITE); 4686 MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
4646 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, 4687 rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
4647 MS_TRANSFER_END, MS_TRANSFER_END); 4688 MS_TRANSFER_END, MS_TRANSFER_END);
4648 4689
4649 rtsx_send_cmd_no_wait(chip); 4690 rtsx_send_cmd_no_wait(chip);
4650 4691
@@ -4654,13 +4695,15 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4654 rtsx_clear_ms_error(chip); 4695 rtsx_clear_ms_error(chip);
4655 if (ms_card->mg_auth == 0) { 4696 if (ms_card->mg_auth == 0) {
4656 if ((buf[5] & 0xC0) != 0) 4697 if ((buf[5] & 0xC0) != 0)
4657 set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); 4698 set_sense_type
4699 (chip, lun,
4700 SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
4658 else 4701 else
4659 set_sense_type(chip, lun, 4702 set_sense_type(chip, lun,
4660 SENSE_TYPE_MG_WRITE_ERR); 4703 SENSE_TYPE_MG_WRITE_ERR);
4661 } else { 4704 } else {
4662 set_sense_type(chip, lun, 4705 set_sense_type(chip, lun,
4663 SENSE_TYPE_MG_WRITE_ERR); 4706 SENSE_TYPE_MG_WRITE_ERR);
4664 } 4707 }
4665 retval = STATUS_FAIL; 4708 retval = STATUS_FAIL;
4666 rtsx_trace(chip); 4709 rtsx_trace(chip);
@@ -4669,16 +4712,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4669 } 4712 }
4670#else 4713#else
4671 retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA, 4714 retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
4672 2, WAIT_INT, 0, 0, buf + 4, 1024); 4715 2, WAIT_INT, 0, 0, buf + 4, 1024);
4673 if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) { 4716 if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
4674 rtsx_clear_ms_error(chip); 4717 rtsx_clear_ms_error(chip);
4675 if (ms_card->mg_auth == 0) { 4718 if (ms_card->mg_auth == 0) {
4676 if ((buf[5] & 0xC0) != 0) 4719 if ((buf[5] & 0xC0) != 0)
4677 set_sense_type(chip, lun, 4720 set_sense_type
4678 SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); 4721 (chip, lun,
4722 SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
4679 else 4723 else
4680 set_sense_type(chip, lun, 4724 set_sense_type(chip, lun,
4681 SENSE_TYPE_MG_WRITE_ERR); 4725 SENSE_TYPE_MG_WRITE_ERR);
4682 } else { 4726 } else {
4683 set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); 4727 set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
4684 } 4728 }
@@ -4706,11 +4750,12 @@ void ms_cleanup_work(struct rtsx_chip *chip)
4706 } 4750 }
4707 if (CHK_MSHG(ms_card)) { 4751 if (CHK_MSHG(ms_card)) {
4708 rtsx_write_register(chip, MS_CFG, 4752 rtsx_write_register(chip, MS_CFG,
4709 MS_2K_SECTOR_MODE, 0x00); 4753 MS_2K_SECTOR_MODE, 0x00);
4710 } 4754 }
4711 } 4755 }
4712#ifdef MS_DELAY_WRITE 4756#ifdef MS_DELAY_WRITE
4713 else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) { 4757 else if ((!CHK_MSPRO(ms_card)) &&
4758 ms_card->delay_write.delay_write_flag) {
4714 dev_dbg(rtsx_dev(chip), "MS: delay write\n"); 4759 dev_dbg(rtsx_dev(chip), "MS: delay write\n");
4715 ms_delay_write(chip); 4760 ms_delay_write(chip);
4716 ms_card->cleanup_counter = 0; 4761 ms_card->cleanup_counter = 0;
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index d7686399df97..71f98cc03eed 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -202,9 +202,9 @@ void mspro_polling_format_status(struct rtsx_chip *chip);
202void mspro_stop_seq_mode(struct rtsx_chip *chip); 202void mspro_stop_seq_mode(struct rtsx_chip *chip);
203int reset_ms_card(struct rtsx_chip *chip); 203int reset_ms_card(struct rtsx_chip *chip);
204int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 204int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
205 u32 start_sector, u16 sector_cnt); 205 u32 start_sector, u16 sector_cnt);
206int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, 206int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
207 int short_data_len, bool quick_format); 207 int short_data_len, bool quick_format);
208void ms_free_l2p_tbl(struct rtsx_chip *chip); 208void ms_free_l2p_tbl(struct rtsx_chip *chip);
209void ms_cleanup_work(struct rtsx_chip *chip); 209void ms_cleanup_work(struct rtsx_chip *chip);
210int ms_power_off_card3v3(struct rtsx_chip *chip); 210int ms_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 5d65a5cdc748..68d75d0d5efd 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -107,8 +107,10 @@ static int slave_configure(struct scsi_device *sdev)
107 * the actual value or the modified one, depending on where the 107 * the actual value or the modified one, depending on where the
108 * data comes from. 108 * data comes from.
109 */ 109 */
110 if (sdev->scsi_level < SCSI_2) 110 if (sdev->scsi_level < SCSI_2) {
111 sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2; 111 sdev->scsi_level = SCSI_2;
112 sdev->sdev_target->scsi_level = SCSI_2;
113 }
112 114
113 return 0; 115 return 0;
114} 116}
@@ -120,12 +122,15 @@ static int slave_configure(struct scsi_device *sdev)
120/* we use this macro to help us write into the buffer */ 122/* we use this macro to help us write into the buffer */
121#undef SPRINTF 123#undef SPRINTF
122#define SPRINTF(args...) \ 124#define SPRINTF(args...) \
123 do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0) 125 do { \
126 if (pos < buffer + length) \
127 pos += sprintf(pos, ## args); \
128 } while (0)
124 129
125/* queue a command */ 130/* queue a command */
126/* This is always called with scsi_lock(host) held */ 131/* This is always called with scsi_lock(host) held */
127static int queuecommand_lck(struct scsi_cmnd *srb, 132static int queuecommand_lck(struct scsi_cmnd *srb,
128 void (*done)(struct scsi_cmnd *)) 133 void (*done)(struct scsi_cmnd *))
129{ 134{
130 struct rtsx_dev *dev = host_to_rtsx(srb->device->host); 135 struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
131 struct rtsx_chip *chip = dev->chip; 136 struct rtsx_chip *chip = dev->chip;
@@ -313,7 +318,7 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
313 return 0; 318 return 0;
314 319
315 /* lock the device pointers */ 320 /* lock the device pointers */
316 mutex_lock(&(dev->dev_mutex)); 321 mutex_lock(&dev->dev_mutex);
317 322
318 chip = dev->chip; 323 chip = dev->chip;
319 324
@@ -349,7 +354,7 @@ static int rtsx_resume(struct pci_dev *pci)
349 chip = dev->chip; 354 chip = dev->chip;
350 355
351 /* lock the device pointers */ 356 /* lock the device pointers */
352 mutex_lock(&(dev->dev_mutex)); 357 mutex_lock(&dev->dev_mutex);
353 358
354 pci_set_power_state(pci, PCI_D0); 359 pci_set_power_state(pci, PCI_D0);
355 pci_restore_state(pci); 360 pci_restore_state(pci);
@@ -418,7 +423,7 @@ static int rtsx_control_thread(void *__dev)
418 break; 423 break;
419 424
420 /* lock the device pointers */ 425 /* lock the device pointers */
421 mutex_lock(&(dev->dev_mutex)); 426 mutex_lock(&dev->dev_mutex);
422 427
423 /* if the device has disconnected, we are free to exit */ 428 /* if the device has disconnected, we are free to exit */
424 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) { 429 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -433,7 +438,7 @@ static int rtsx_control_thread(void *__dev)
433 /* has the command aborted ? */ 438 /* has the command aborted ? */
434 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { 439 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
435 chip->srb->result = DID_ABORT << 16; 440 chip->srb->result = DID_ABORT << 16;
436 goto SkipForAbort; 441 goto skip_for_abort;
437 } 442 }
438 443
439 scsi_unlock(host); 444 scsi_unlock(host);
@@ -480,12 +485,12 @@ static int rtsx_control_thread(void *__dev)
480 else if (chip->srb->result != DID_ABORT << 16) { 485 else if (chip->srb->result != DID_ABORT << 16) {
481 chip->srb->scsi_done(chip->srb); 486 chip->srb->scsi_done(chip->srb);
482 } else { 487 } else {
483SkipForAbort: 488skip_for_abort:
484 dev_err(&dev->pci->dev, "scsi command aborted\n"); 489 dev_err(&dev->pci->dev, "scsi command aborted\n");
485 } 490 }
486 491
487 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { 492 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
488 complete(&(dev->notify)); 493 complete(&dev->notify);
489 494
490 rtsx_set_stat(chip, RTSX_STAT_IDLE); 495 rtsx_set_stat(chip, RTSX_STAT_IDLE);
491 } 496 }
@@ -519,9 +524,9 @@ static int rtsx_polling_thread(void *__dev)
519{ 524{
520 struct rtsx_dev *dev = __dev; 525 struct rtsx_dev *dev = __dev;
521 struct rtsx_chip *chip = dev->chip; 526 struct rtsx_chip *chip = dev->chip;
522 struct sd_info *sd_card = &(chip->sd_card); 527 struct sd_info *sd_card = &chip->sd_card;
523 struct xd_info *xd_card = &(chip->xd_card); 528 struct xd_info *xd_card = &chip->xd_card;
524 struct ms_info *ms_card = &(chip->ms_card); 529 struct ms_info *ms_card = &chip->ms_card;
525 530
526 sd_card->cleanup_counter = 0; 531 sd_card->cleanup_counter = 0;
527 xd_card->cleanup_counter = 0; 532 xd_card->cleanup_counter = 0;
@@ -531,12 +536,11 @@ static int rtsx_polling_thread(void *__dev)
531 wait_timeout((delay_use + 5) * 1000); 536 wait_timeout((delay_use + 5) * 1000);
532 537
533 for (;;) { 538 for (;;) {
534
535 set_current_state(TASK_INTERRUPTIBLE); 539 set_current_state(TASK_INTERRUPTIBLE);
536 schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL)); 540 schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
537 541
538 /* lock the device pointers */ 542 /* lock the device pointers */
539 mutex_lock(&(dev->dev_mutex)); 543 mutex_lock(&dev->dev_mutex);
540 544
541 /* if the device has disconnected, we are free to exit */ 545 /* if the device has disconnected, we are free to exit */
542 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) { 546 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -550,7 +554,7 @@ static int rtsx_polling_thread(void *__dev)
550 mspro_polling_format_status(chip); 554 mspro_polling_format_status(chip);
551 555
552 /* lock the device pointers */ 556 /* lock the device pointers */
553 mutex_lock(&(dev->dev_mutex)); 557 mutex_lock(&dev->dev_mutex);
554 558
555 rtsx_polling_func(chip); 559 rtsx_polling_func(chip);
556 560
@@ -597,7 +601,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
597 dev->trans_result = TRANS_RESULT_FAIL; 601 dev->trans_result = TRANS_RESULT_FAIL;
598 if (dev->done) 602 if (dev->done)
599 complete(dev->done); 603 complete(dev->done);
600 goto Exit; 604 goto exit;
601 } 605 }
602 } 606 }
603 607
@@ -619,7 +623,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
619 } 623 }
620 } 624 }
621 625
622Exit: 626exit:
623 spin_unlock(&dev->reg_lock); 627 spin_unlock(&dev->reg_lock);
624 return IRQ_HANDLED; 628 return IRQ_HANDLED;
625} 629}
@@ -724,9 +728,10 @@ static int rtsx_scan_thread(void *__dev)
724 dev_info(&dev->pci->dev, 728 dev_info(&dev->pci->dev,
725 "%s: waiting for device to settle before scanning\n", 729 "%s: waiting for device to settle before scanning\n",
726 CR_DRIVER_NAME); 730 CR_DRIVER_NAME);
727 wait_event_interruptible_timeout(dev->delay_wait, 731 wait_event_interruptible_timeout
728 rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT), 732 (dev->delay_wait,
729 delay_use * HZ); 733 rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
734 delay_use * HZ);
730 } 735 }
731 736
732 /* If the device is still connected, perform the scanning */ 737 /* If the device is still connected, perform the scanning */
@@ -844,7 +849,7 @@ static void rtsx_init_options(struct rtsx_chip *chip)
844} 849}
845 850
846static int rtsx_probe(struct pci_dev *pci, 851static int rtsx_probe(struct pci_dev *pci,
847 const struct pci_device_id *pci_id) 852 const struct pci_device_id *pci_id)
848{ 853{
849 struct Scsi_Host *host; 854 struct Scsi_Host *host;
850 struct rtsx_dev *dev; 855 struct rtsx_dev *dev;
@@ -879,18 +884,18 @@ static int rtsx_probe(struct pci_dev *pci,
879 dev = host_to_rtsx(host); 884 dev = host_to_rtsx(host);
880 memset(dev, 0, sizeof(struct rtsx_dev)); 885 memset(dev, 0, sizeof(struct rtsx_dev));
881 886
882 dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL); 887 dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
883 if (!dev->chip) { 888 if (!dev->chip) {
884 err = -ENOMEM; 889 err = -ENOMEM;
885 goto errout; 890 goto errout;
886 } 891 }
887 892
888 spin_lock_init(&dev->reg_lock); 893 spin_lock_init(&dev->reg_lock);
889 mutex_init(&(dev->dev_mutex)); 894 mutex_init(&dev->dev_mutex);
890 init_completion(&dev->cmnd_ready); 895 init_completion(&dev->cmnd_ready);
891 init_completion(&dev->control_exit); 896 init_completion(&dev->control_exit);
892 init_completion(&dev->polling_exit); 897 init_completion(&dev->polling_exit);
893 init_completion(&(dev->notify)); 898 init_completion(&dev->notify);
894 init_completion(&dev->scanning_done); 899 init_completion(&dev->scanning_done);
895 init_waitqueue_head(&dev->delay_wait); 900 init_waitqueue_head(&dev->delay_wait);
896 901
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index e725b10ed087..575e5734f2a5 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -149,7 +149,7 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
149 149
150 getnstimeofday64(&ts64); 150 getnstimeofday64(&ts64);
151 151
152 tv_usec = ts64.tv_nsec/NSEC_PER_USEC; 152 tv_usec = ts64.tv_nsec / NSEC_PER_USEC;
153 153
154 timeval_buf[0] = (u8)(ts64.tv_sec >> 24); 154 timeval_buf[0] = (u8)(ts64.tv_sec >> 24);
155 timeval_buf[1] = (u8)(ts64.tv_sec >> 16); 155 timeval_buf[1] = (u8)(ts64.tv_sec >> 16);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 97717744962d..a6b7bffc6714 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -33,11 +33,11 @@
33 33
34void do_remaining_work(struct rtsx_chip *chip) 34void do_remaining_work(struct rtsx_chip *chip)
35{ 35{
36 struct sd_info *sd_card = &(chip->sd_card); 36 struct sd_info *sd_card = &chip->sd_card;
37#ifdef XD_DELAY_WRITE 37#ifdef XD_DELAY_WRITE
38 struct xd_info *xd_card = &(chip->xd_card); 38 struct xd_info *xd_card = &chip->xd_card;
39#endif 39#endif
40 struct ms_info *ms_card = &(chip->ms_card); 40 struct ms_info *ms_card = &chip->ms_card;
41 41
42 if (chip->card_ready & SD_CARD) { 42 if (chip->card_ready & SD_CARD) {
43 if (sd_card->seq_mode) { 43 if (sd_card->seq_mode) {
@@ -100,9 +100,9 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
100 if ((reg1 & 0xC0) && (reg2 & 0xC0)) { 100 if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
101 chip->sd_int = 1; 101 chip->sd_int = 1;
102 rtsx_write_register(chip, SDIO_CTRL, 0xFF, 102 rtsx_write_register(chip, SDIO_CTRL, 0xFF,
103 SDIO_BUS_CTRL | SDIO_CD_CTRL); 103 SDIO_BUS_CTRL | SDIO_CD_CTRL);
104 rtsx_write_register(chip, PWR_GATE_CTRL, 104 rtsx_write_register(chip, PWR_GATE_CTRL,
105 LDO3318_PWR_MASK, LDO_ON); 105 LDO3318_PWR_MASK, LDO_ON);
106 } 106 }
107} 107}
108 108
@@ -133,7 +133,7 @@ void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
133 if (!chip->sdio_aspm) { 133 if (!chip->sdio_aspm) {
134 dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n"); 134 dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
135 rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, 135 rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
136 0x30 | (chip->aspm_level[1] << 2)); 136 0x30 | (chip->aspm_level[1] << 2));
137 chip->sdio_aspm = 1; 137 chip->sdio_aspm = 1;
138 } 138 }
139 } else { 139 } else {
@@ -154,7 +154,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
154 chip->sd_reset_counter, chip->card2lun[SD_CARD]); 154 chip->sd_reset_counter, chip->card2lun[SD_CARD]);
155 155
156 if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) { 156 if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
157 clear_bit(SD_NR, &(chip->need_reset)); 157 clear_bit(SD_NR, &chip->need_reset);
158 chip->sd_reset_counter = 0; 158 chip->sd_reset_counter = 0;
159 chip->sd_show_cnt = 0; 159 chip->sd_show_cnt = 0;
160 return; 160 return;
@@ -169,7 +169,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
169 if (chip->need_release & SD_CARD) 169 if (chip->need_release & SD_CARD)
170 return; 170 return;
171 if (retval == STATUS_SUCCESS) { 171 if (retval == STATUS_SUCCESS) {
172 clear_bit(SD_NR, &(chip->need_reset)); 172 clear_bit(SD_NR, &chip->need_reset);
173 chip->sd_reset_counter = 0; 173 chip->sd_reset_counter = 0;
174 chip->sd_show_cnt = 0; 174 chip->sd_show_cnt = 0;
175 chip->card_ready |= SD_CARD; 175 chip->card_ready |= SD_CARD;
@@ -177,7 +177,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
177 chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw; 177 chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
178 } else { 178 } else {
179 if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) { 179 if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
180 clear_bit(SD_NR, &(chip->need_reset)); 180 clear_bit(SD_NR, &chip->need_reset);
181 chip->sd_reset_counter = 0; 181 chip->sd_reset_counter = 0;
182 chip->sd_show_cnt = 0; 182 chip->sd_show_cnt = 0;
183 } else { 183 } else {
@@ -208,7 +208,7 @@ void do_reset_xd_card(struct rtsx_chip *chip)
208 chip->xd_reset_counter, chip->card2lun[XD_CARD]); 208 chip->xd_reset_counter, chip->card2lun[XD_CARD]);
209 209
210 if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) { 210 if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
211 clear_bit(XD_NR, &(chip->need_reset)); 211 clear_bit(XD_NR, &chip->need_reset);
212 chip->xd_reset_counter = 0; 212 chip->xd_reset_counter = 0;
213 chip->xd_show_cnt = 0; 213 chip->xd_show_cnt = 0;
214 return; 214 return;
@@ -223,14 +223,14 @@ void do_reset_xd_card(struct rtsx_chip *chip)
223 if (chip->need_release & XD_CARD) 223 if (chip->need_release & XD_CARD)
224 return; 224 return;
225 if (retval == STATUS_SUCCESS) { 225 if (retval == STATUS_SUCCESS) {
226 clear_bit(XD_NR, &(chip->need_reset)); 226 clear_bit(XD_NR, &chip->need_reset);
227 chip->xd_reset_counter = 0; 227 chip->xd_reset_counter = 0;
228 chip->card_ready |= XD_CARD; 228 chip->card_ready |= XD_CARD;
229 chip->card_fail &= ~XD_CARD; 229 chip->card_fail &= ~XD_CARD;
230 chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw; 230 chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
231 } else { 231 } else {
232 if (chip->xd_reset_counter >= MAX_RESET_CNT) { 232 if (chip->xd_reset_counter >= MAX_RESET_CNT) {
233 clear_bit(XD_NR, &(chip->need_reset)); 233 clear_bit(XD_NR, &chip->need_reset);
234 chip->xd_reset_counter = 0; 234 chip->xd_reset_counter = 0;
235 chip->xd_show_cnt = 0; 235 chip->xd_show_cnt = 0;
236 } else { 236 } else {
@@ -256,7 +256,7 @@ void do_reset_ms_card(struct rtsx_chip *chip)
256 chip->ms_reset_counter, chip->card2lun[MS_CARD]); 256 chip->ms_reset_counter, chip->card2lun[MS_CARD]);
257 257
258 if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) { 258 if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
259 clear_bit(MS_NR, &(chip->need_reset)); 259 clear_bit(MS_NR, &chip->need_reset);
260 chip->ms_reset_counter = 0; 260 chip->ms_reset_counter = 0;
261 chip->ms_show_cnt = 0; 261 chip->ms_show_cnt = 0;
262 return; 262 return;
@@ -271,14 +271,14 @@ void do_reset_ms_card(struct rtsx_chip *chip)
271 if (chip->need_release & MS_CARD) 271 if (chip->need_release & MS_CARD)
272 return; 272 return;
273 if (retval == STATUS_SUCCESS) { 273 if (retval == STATUS_SUCCESS) {
274 clear_bit(MS_NR, &(chip->need_reset)); 274 clear_bit(MS_NR, &chip->need_reset);
275 chip->ms_reset_counter = 0; 275 chip->ms_reset_counter = 0;
276 chip->card_ready |= MS_CARD; 276 chip->card_ready |= MS_CARD;
277 chip->card_fail &= ~MS_CARD; 277 chip->card_fail &= ~MS_CARD;
278 chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw; 278 chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
279 } else { 279 } else {
280 if (chip->ms_reset_counter >= MAX_RESET_CNT) { 280 if (chip->ms_reset_counter >= MAX_RESET_CNT) {
281 clear_bit(MS_NR, &(chip->need_reset)); 281 clear_bit(MS_NR, &chip->need_reset);
282 chip->ms_reset_counter = 0; 282 chip->ms_reset_counter = 0;
283 chip->ms_show_cnt = 0; 283 chip->ms_show_cnt = 0;
284 } else { 284 } else {
@@ -300,7 +300,7 @@ static void release_sdio(struct rtsx_chip *chip)
300{ 300{
301 if (chip->sd_io) { 301 if (chip->sd_io) {
302 rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR, 302 rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
303 SD_STOP | SD_CLR_ERR); 303 SD_STOP | SD_CLR_ERR);
304 304
305 if (chip->chip_insert_with_sdio) { 305 if (chip->chip_insert_with_sdio) {
306 chip->chip_insert_with_sdio = 0; 306 chip->chip_insert_with_sdio = 0;
@@ -369,7 +369,7 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
369 rtsx_disable_aspm(chip); 369 rtsx_disable_aspm(chip);
370 370
371 if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio) 371 if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
372 clear_bit(SD_NR, &(chip->need_reset)); 372 clear_bit(SD_NR, &chip->need_reset);
373 373
374 if (chip->need_reset & XD_CARD) { 374 if (chip->need_reset & XD_CARD) {
375 chip->card_exist |= XD_CARD; 375 chip->card_exist |= XD_CARD;
@@ -381,8 +381,8 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
381 } 381 }
382 if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) { 382 if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
383 if (chip->card_exist & XD_CARD) { 383 if (chip->card_exist & XD_CARD) {
384 clear_bit(SD_NR, &(chip->need_reset)); 384 clear_bit(SD_NR, &chip->need_reset);
385 clear_bit(MS_NR, &(chip->need_reset)); 385 clear_bit(MS_NR, &chip->need_reset);
386 } 386 }
387 } 387 }
388 if (chip->need_reset & SD_CARD) { 388 if (chip->need_reset & SD_CARD) {
@@ -449,7 +449,7 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
449 449
450#ifdef DISABLE_CARD_INT 450#ifdef DISABLE_CARD_INT
451void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, 451void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
452 unsigned long *need_release) 452 unsigned long *need_release)
453{ 453{
454 u8 release_map = 0, reset_map = 0; 454 u8 release_map = 0, reset_map = 0;
455 455
@@ -502,13 +502,13 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
502 502
503 reset_map = 0; 503 reset_map = 0;
504 if (!(chip->card_exist & XD_CARD) && 504 if (!(chip->card_exist & XD_CARD) &&
505 (xd_cnt > (DEBOUNCE_CNT-1))) 505 (xd_cnt > (DEBOUNCE_CNT - 1)))
506 reset_map |= XD_CARD; 506 reset_map |= XD_CARD;
507 if (!(chip->card_exist & SD_CARD) && 507 if (!(chip->card_exist & SD_CARD) &&
508 (sd_cnt > (DEBOUNCE_CNT-1))) 508 (sd_cnt > (DEBOUNCE_CNT - 1)))
509 reset_map |= SD_CARD; 509 reset_map |= SD_CARD;
510 if (!(chip->card_exist & MS_CARD) && 510 if (!(chip->card_exist & MS_CARD) &&
511 (ms_cnt > (DEBOUNCE_CNT-1))) 511 (ms_cnt > (DEBOUNCE_CNT - 1)))
512 reset_map |= MS_CARD; 512 reset_map |= MS_CARD;
513 } 513 }
514 514
@@ -531,23 +531,23 @@ void rtsx_init_cards(struct rtsx_chip *chip)
531 } 531 }
532 532
533#ifdef DISABLE_CARD_INT 533#ifdef DISABLE_CARD_INT
534 card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release)); 534 card_cd_debounce(chip, &chip->need_reset, &chip->need_release);
535#endif 535#endif
536 536
537 if (chip->need_release) { 537 if (chip->need_release) {
538 if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) { 538 if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
539 if (chip->int_reg & XD_EXIST) { 539 if (chip->int_reg & XD_EXIST) {
540 clear_bit(SD_NR, &(chip->need_release)); 540 clear_bit(SD_NR, &chip->need_release);
541 clear_bit(MS_NR, &(chip->need_release)); 541 clear_bit(MS_NR, &chip->need_release);
542 } 542 }
543 } 543 }
544 544
545 if (!(chip->card_exist & SD_CARD) && !chip->sd_io) 545 if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
546 clear_bit(SD_NR, &(chip->need_release)); 546 clear_bit(SD_NR, &chip->need_release);
547 if (!(chip->card_exist & XD_CARD)) 547 if (!(chip->card_exist & XD_CARD))
548 clear_bit(XD_NR, &(chip->need_release)); 548 clear_bit(XD_NR, &chip->need_release);
549 if (!(chip->card_exist & MS_CARD)) 549 if (!(chip->card_exist & MS_CARD))
550 clear_bit(MS_NR, &(chip->need_release)); 550 clear_bit(MS_NR, &chip->need_release);
551 551
552 dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n", 552 dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n",
553 (unsigned int)(chip->need_release)); 553 (unsigned int)(chip->need_release));
@@ -556,8 +556,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
556 if (chip->need_release) { 556 if (chip->need_release) {
557 if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER)) 557 if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
558 rtsx_write_register(chip, OCPCLR, 558 rtsx_write_register(chip, OCPCLR,
559 CARD_OC_INT_CLR | CARD_OC_CLR, 559 CARD_OC_INT_CLR |
560 CARD_OC_INT_CLR | CARD_OC_CLR); 560 CARD_OC_CLR,
561 CARD_OC_INT_CLR |
562 CARD_OC_CLR);
561 chip->ocp_stat = 0; 563 chip->ocp_stat = 0;
562 } 564 }
563#endif 565#endif
@@ -567,7 +569,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
567 } 569 }
568 570
569 if (chip->need_release & SD_CARD) { 571 if (chip->need_release & SD_CARD) {
570 clear_bit(SD_NR, &(chip->need_release)); 572 clear_bit(SD_NR, &chip->need_release);
571 chip->card_exist &= ~SD_CARD; 573 chip->card_exist &= ~SD_CARD;
572 chip->card_ejected &= ~SD_CARD; 574 chip->card_ejected &= ~SD_CARD;
573 chip->card_fail &= ~SD_CARD; 575 chip->card_fail &= ~SD_CARD;
@@ -580,7 +582,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
580 } 582 }
581 583
582 if (chip->need_release & XD_CARD) { 584 if (chip->need_release & XD_CARD) {
583 clear_bit(XD_NR, &(chip->need_release)); 585 clear_bit(XD_NR, &chip->need_release);
584 chip->card_exist &= ~XD_CARD; 586 chip->card_exist &= ~XD_CARD;
585 chip->card_ejected &= ~XD_CARD; 587 chip->card_ejected &= ~XD_CARD;
586 chip->card_fail &= ~XD_CARD; 588 chip->card_fail &= ~XD_CARD;
@@ -590,13 +592,13 @@ void rtsx_init_cards(struct rtsx_chip *chip)
590 release_xd_card(chip); 592 release_xd_card(chip);
591 593
592 if (CHECK_PID(chip, 0x5288) && 594 if (CHECK_PID(chip, 0x5288) &&
593 CHECK_BARO_PKG(chip, QFN)) 595 CHECK_BARO_PKG(chip, QFN))
594 rtsx_write_register(chip, HOST_SLEEP_STATE, 596 rtsx_write_register(chip, HOST_SLEEP_STATE,
595 0xC0, 0xC0); 597 0xC0, 0xC0);
596 } 598 }
597 599
598 if (chip->need_release & MS_CARD) { 600 if (chip->need_release & MS_CARD) {
599 clear_bit(MS_NR, &(chip->need_release)); 601 clear_bit(MS_NR, &chip->need_release);
600 chip->card_exist &= ~MS_CARD; 602 chip->card_exist &= ~MS_CARD;
601 chip->card_ejected &= ~MS_CARD; 603 chip->card_ejected &= ~MS_CARD;
602 chip->card_fail &= ~MS_CARD; 604 chip->card_fail &= ~MS_CARD;
@@ -650,7 +652,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
650 return STATUS_FAIL; 652 return STATUS_FAIL;
651 } 653 }
652 654
653 mcu_cnt = (u8)(125/clk + 3); 655 mcu_cnt = (u8)(125 / clk + 3);
654 if (mcu_cnt > 7) 656 if (mcu_cnt > 7)
655 mcu_cnt = 7; 657 mcu_cnt = 7;
656 658
@@ -681,9 +683,9 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
681 rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); 683 rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
682 if (sd_vpclk_phase_reset) { 684 if (sd_vpclk_phase_reset) {
683 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, 685 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
684 PHASE_NOT_RESET, 0); 686 PHASE_NOT_RESET, 0);
685 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, 687 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
686 PHASE_NOT_RESET, PHASE_NOT_RESET); 688 PHASE_NOT_RESET, PHASE_NOT_RESET);
687 } 689 }
688 690
689 retval = rtsx_send_cmd(chip, 0, WAIT_TIME); 691 retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -850,7 +852,7 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
850} 852}
851 853
852void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, 854void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
853 u32 byte_cnt, u8 pack_size) 855 u32 byte_cnt, u8 pack_size)
854{ 856{
855 if (pack_size > DMA_1024) 857 if (pack_size > DMA_1024)
856 pack_size = DMA_512; 858 pack_size = DMA_512;
@@ -864,11 +866,11 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
864 866
865 if (dir == DMA_FROM_DEVICE) { 867 if (dir == DMA_FROM_DEVICE) {
866 rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 868 rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
867 0x03 | DMA_PACK_SIZE_MASK, 869 0x03 | DMA_PACK_SIZE_MASK,
868 DMA_DIR_FROM_CARD | DMA_EN | pack_size); 870 DMA_DIR_FROM_CARD | DMA_EN | pack_size);
869 } else { 871 } else {
870 rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 872 rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
871 0x03 | DMA_PACK_SIZE_MASK, 873 0x03 | DMA_PACK_SIZE_MASK,
872 DMA_DIR_TO_CARD | DMA_EN | pack_size); 874 DMA_DIR_TO_CARD | DMA_EN | pack_size);
873 } 875 }
874 876
@@ -978,13 +980,13 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
978} 980}
979 981
980int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 982int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
981 u32 sec_addr, u16 sec_cnt) 983 u32 sec_addr, u16 sec_cnt)
982{ 984{
983 int retval; 985 int retval;
984 unsigned int lun = SCSI_LUN(srb); 986 unsigned int lun = SCSI_LUN(srb);
985 int i; 987 int i;
986 988
987 if (chip->rw_card[lun] == NULL) { 989 if (!chip->rw_card[lun]) {
988 rtsx_trace(chip); 990 rtsx_trace(chip);
989 return STATUS_FAIL; 991 return STATUS_FAIL;
990 } 992 }
@@ -1115,7 +1117,7 @@ void turn_on_led(struct rtsx_chip *chip, u8 gpio)
1115{ 1117{
1116 if (CHECK_PID(chip, 0x5288)) 1118 if (CHECK_PID(chip, 0x5288))
1117 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 1119 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
1118 (u8)(1 << gpio)); 1120 (u8)(1 << gpio));
1119 else 1121 else
1120 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0); 1122 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
1121} 1123}
@@ -1126,7 +1128,7 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
1126 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0); 1128 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
1127 else 1129 else
1128 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 1130 rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
1129 (u8)(1 << gpio)); 1131 (u8)(1 << gpio));
1130} 1132}
1131 1133
1132int detect_card_cd(struct rtsx_chip *chip, int card) 1134int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 56df9a431d6d..aa37705bae39 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1011,9 +1011,9 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk);
1011int enable_card_clock(struct rtsx_chip *chip, u8 card); 1011int enable_card_clock(struct rtsx_chip *chip, u8 card);
1012int disable_card_clock(struct rtsx_chip *chip, u8 card); 1012int disable_card_clock(struct rtsx_chip *chip, u8 card);
1013int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 1013int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
1014 u32 sec_addr, u16 sec_cnt); 1014 u32 sec_addr, u16 sec_cnt);
1015void trans_dma_enable(enum dma_data_direction dir, 1015void trans_dma_enable(enum dma_data_direction dir,
1016 struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size); 1016 struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
1017void toggle_gpio(struct rtsx_chip *chip, u8 gpio); 1017void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
1018void turn_on_led(struct rtsx_chip *chip, u8 gpio); 1018void turn_on_led(struct rtsx_chip *chip, u8 gpio);
1019void turn_off_led(struct rtsx_chip *chip, u8 gpio); 1019void turn_off_led(struct rtsx_chip *chip, u8 gpio);
@@ -1030,10 +1030,10 @@ u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
1030static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun) 1030static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
1031{ 1031{
1032#ifdef SUPPORT_SD_LOCK 1032#ifdef SUPPORT_SD_LOCK
1033 struct sd_info *sd_card = &(chip->sd_card); 1033 struct sd_info *sd_card = &chip->sd_card;
1034 1034
1035 if ((get_lun_card(chip, lun) == SD_CARD) && 1035 if ((get_lun_card(chip, lun) == SD_CARD) &&
1036 (sd_card->sd_lock_status & SD_LOCKED)) 1036 (sd_card->sd_lock_status & SD_LOCKED))
1037 return 0; 1037 return 0;
1038 1038
1039 return chip->capacity[lun]; 1039 return chip->capacity[lun];
@@ -1073,25 +1073,25 @@ static inline int card_power_off_all(struct rtsx_chip *chip)
1073static inline void rtsx_clear_xd_error(struct rtsx_chip *chip) 1073static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
1074{ 1074{
1075 rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR, 1075 rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
1076 XD_STOP | XD_CLR_ERR); 1076 XD_STOP | XD_CLR_ERR);
1077} 1077}
1078 1078
1079static inline void rtsx_clear_sd_error(struct rtsx_chip *chip) 1079static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
1080{ 1080{
1081 rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR, 1081 rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
1082 SD_STOP | SD_CLR_ERR); 1082 SD_STOP | SD_CLR_ERR);
1083} 1083}
1084 1084
1085static inline void rtsx_clear_ms_error(struct rtsx_chip *chip) 1085static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
1086{ 1086{
1087 rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR, 1087 rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
1088 MS_STOP | MS_CLR_ERR); 1088 MS_STOP | MS_CLR_ERR);
1089} 1089}
1090 1090
1091static inline void rtsx_clear_spi_error(struct rtsx_chip *chip) 1091static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
1092{ 1092{
1093 rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR, 1093 rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
1094 SPI_STOP | SPI_CLR_ERR); 1094 SPI_STOP | SPI_CLR_ERR);
1095} 1095}
1096 1096
1097#ifdef SUPPORT_SDIO_ASPM 1097#ifdef SUPPORT_SDIO_ASPM
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a10dd6220a7b..3511157a2c78 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -114,7 +114,8 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
114 if (chip->asic_code) { 114 if (chip->asic_code) {
115 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 115 retval = rtsx_write_register(chip, CARD_PULL_CTL5,
116 0xFF, 116 0xFF,
117 MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); 117 MS_INS_PU | SD_WP_PU |
118 SD_CD_PU | SD_CMD_PU);
118 if (retval) { 119 if (retval) {
119 rtsx_trace(chip); 120 rtsx_trace(chip);
120 return retval; 121 return retval;
@@ -240,10 +241,10 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
240 return STATUS_FAIL; 241 return STATUS_FAIL;
241 } 242 }
242 } else { 243 } else {
243 retval = rtsx_write_register(chip, 244 retval = rtsx_write_register
244 FPGA_PULL_CTL, 245 (chip, FPGA_PULL_CTL,
245 FPGA_SD_PULL_CTL_BIT | 0x20, 246 FPGA_SD_PULL_CTL_BIT | 0x20,
246 0); 247 0);
247 if (retval) { 248 if (retval) {
248 rtsx_trace(chip); 249 rtsx_trace(chip);
249 return retval; 250 return retval;
@@ -713,7 +714,8 @@ nextcard:
713 714
714 if (chip->ft2_fast_mode) { 715 if (chip->ft2_fast_mode) {
715 retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF, 716 retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
716 MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON); 717 MS_PARTIAL_POWER_ON |
718 SD_PARTIAL_POWER_ON);
717 if (retval) { 719 if (retval) {
718 rtsx_trace(chip); 720 rtsx_trace(chip);
719 return retval; 721 return retval;
@@ -1567,7 +1569,8 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
1567 } 1569 }
1568 1570
1569 retval = rtsx_write_register(chip, CFGRWCTL, 0xFF, 1571 retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
1570 0x80 | mode | ((func_no & 0x03) << 4)); 1572 0x80 | mode |
1573 ((func_no & 0x03) << 4));
1571 if (retval) { 1574 if (retval) {
1572 rtsx_trace(chip); 1575 rtsx_trace(chip);
1573 return retval; 1576 return retval;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index f36642817c6e..4f6e3c1c4621 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -130,16 +130,20 @@
130#define PRDCT_REV_LEN 4 /* Product LOT Length */ 130#define PRDCT_REV_LEN 4 /* Product LOT Length */
131 131
132/* Dynamic flag definitions: used in set_bit() etc. */ 132/* Dynamic flag definitions: used in set_bit() etc. */
133#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */ 133/* 0x00040000 transfer is active */
134#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */ 134#define RTSX_FLIDX_TRANS_ACTIVE 18
135#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */ 135/* 0x00100000 abort is in progress */
136#define RTSX_FLIDX_ABORTING 20
137/* 0x00200000 disconnect in progress */
138#define RTSX_FLIDX_DISCONNECTING 21
136 139
137#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \ 140#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
138 (1UL << US_FLIDX_DISCONNECTING)) 141 (1UL << US_FLIDX_DISCONNECTING))
139 142
140#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */ 143/* 0x00400000 device reset in progress */
141#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI midlayer timed out */ 144#define RTSX_FLIDX_RESETTING 22
142 145/* 0x00800000 SCSI midlayer timed out */
146#define RTSX_FLIDX_TIMED_OUT 23
143#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */ 147#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
144#define RMB_DISC 0x80 /* The Device is Removable */ 148#define RMB_DISC 0x80 /* The Device is Removable */
145#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */ 149#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
@@ -285,23 +289,24 @@ struct sense_data_t {
285 289
286#define CARD_INT (XD_INT | MS_INT | SD_INT) 290#define CARD_INT (XD_INT | MS_INT | SD_INT)
287#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT) 291#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
288#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT) 292#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | \
293 GPIO0_INT | OC_INT)
289 294
290#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST) 295#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
291 296
292/* Bus interrupt enable register */ 297/* Bus interrupt enable register */
293#define CMD_DONE_INT_EN (1 << 31) 298#define CMD_DONE_INT_EN BIT(31)
294#define DATA_DONE_INT_EN (1 << 30) 299#define DATA_DONE_INT_EN BIT(30)
295#define TRANS_OK_INT_EN (1 << 29) 300#define TRANS_OK_INT_EN BIT(29)
296#define TRANS_FAIL_INT_EN (1 << 28) 301#define TRANS_FAIL_INT_EN BIT(28)
297#define XD_INT_EN (1 << 27) 302#define XD_INT_EN BIT(27)
298#define MS_INT_EN (1 << 26) 303#define MS_INT_EN BIT(26)
299#define SD_INT_EN (1 << 25) 304#define SD_INT_EN BIT(25)
300#define GPIO0_INT_EN (1 << 24) 305#define GPIO0_INT_EN BIT(24)
301#define OC_INT_EN (1 << 23) 306#define OC_INT_EN BIT(23)
302#define DELINK_INT_EN GPIO0_INT_EN 307#define DELINK_INT_EN GPIO0_INT_EN
303#define MS_OC_INT_EN (1 << 23) 308#define MS_OC_INT_EN BIT(23)
304#define SD_OC_INT_EN (1 << 22) 309#define SD_OC_INT_EN BIT(22)
305 310
306#define READ_REG_CMD 0 311#define READ_REG_CMD 0
307#define WRITE_REG_CMD 1 312#define WRITE_REG_CMD 1
@@ -318,10 +323,10 @@ struct sense_data_t {
318#define MS_NR 3 323#define MS_NR 3
319#define XD_NR 4 324#define XD_NR 4
320#define SPI_NR 7 325#define SPI_NR 7
321#define SD_CARD (1 << SD_NR) 326#define SD_CARD BIT(SD_NR)
322#define MS_CARD (1 << MS_NR) 327#define MS_CARD BIT(MS_NR)
323#define XD_CARD (1 << XD_NR) 328#define XD_CARD BIT(XD_NR)
324#define SPI_CARD (1 << SPI_NR) 329#define SPI_CARD BIT(SPI_NR)
325 330
326#define MAX_ALLOWED_LUN_CNT 8 331#define MAX_ALLOWED_LUN_CNT 8
327 332
@@ -393,14 +398,23 @@ struct zone_entry {
393 398
394/* SD card */ 399/* SD card */
395#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD) 400#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
396#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS)) 401#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && \
397#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50)) 402 ((sd_card)->sd_type & SD_HS))
398#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50)) 403#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && \
399#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104)) 404 ((sd_card)->sd_type & SD_SDR50))
400#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC)) 405#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && \
401#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000)) 406 ((sd_card)->sd_type & SD_DDR50))
402#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000)) 407#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && \
403#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card)) 408 ((sd_card)->sd_type & SD_SDR104))
409#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && \
410 ((sd_card)->sd_type & SD_HCXC))
411#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && \
412 ((sd_card)->capacity <= 0x4000000))
413#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && \
414 ((sd_card)->capacity > 0x4000000))
415#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || \
416 CHK_SD_DDR50(sd_card) || \
417 CHK_SD_SDR104(sd_card))
404 418
405#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD) 419#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
406#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS) 420#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
@@ -416,13 +430,20 @@ struct zone_entry {
416#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC) 430#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
417 431
418/* MMC card */ 432/* MMC card */
419#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC) 433#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == \
420#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M)) 434 TYPE_MMC)
421#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M)) 435#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && \
422#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT)) 436 ((sd_card)->sd_type & MMC_26M))
423#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT)) 437#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && \
424#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE)) 438 ((sd_card)->sd_type & MMC_52M))
425#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52)) 439#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && \
440 ((sd_card)->sd_type & MMC_4BIT))
441#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && \
442 ((sd_card)->sd_type & MMC_8BIT))
443#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && \
444 ((sd_card)->sd_type & MMC_SECTOR_MODE))
445#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && \
446 ((sd_card)->sd_type & MMC_DDR52))
426 447
427#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC) 448#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
428#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M) 449#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
@@ -439,7 +460,8 @@ struct zone_entry {
439#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE) 460#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
440#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52) 461#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
441 462
442#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card)) 463#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && \
464 CHK_MMC_26M(sd_card))
443#define CLR_MMC_HS(sd_card) \ 465#define CLR_MMC_HS(sd_card) \
444do { \ 466do { \
445 CLR_MMC_DDR52(sd_card); \ 467 CLR_MMC_DDR52(sd_card); \
@@ -450,12 +472,18 @@ do { \
450#define SD_SUPPORT_CLASS_TEN 0x01 472#define SD_SUPPORT_CLASS_TEN 0x01
451#define SD_SUPPORT_1V8 0x02 473#define SD_SUPPORT_1V8 0x02
452 474
453#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN) 475#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= \
454#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN) 476 SD_SUPPORT_CLASS_TEN)
455#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN) 477#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & \
456#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8) 478 SD_SUPPORT_CLASS_TEN)
457#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8) 479#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= \
458#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8) 480 ~SD_SUPPORT_CLASS_TEN)
481#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= \
482 SD_SUPPORT_1V8)
483#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & \
484 SD_SUPPORT_1V8)
485#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= \
486 ~SD_SUPPORT_1V8)
459 487
460struct sd_info { 488struct sd_info {
461 u16 sd_type; 489 u16 sd_type;
@@ -544,9 +572,12 @@ struct xd_info {
544#define HG8BIT (MS_HG | MS_8BIT) 572#define HG8BIT (MS_HG | MS_8BIT)
545 573
546#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO) 574#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
547#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT)) 575#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && \
548#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC)) 576 (((ms_card)->ms_type & HG8BIT) == HG8BIT))
549#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG)) 577#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && \
578 ((ms_card)->ms_type & MS_XC))
579#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && \
580 ((ms_card)->ms_type & MS_HG))
550 581
551#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT)) 582#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
552#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT)) 583#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
@@ -679,8 +710,10 @@ struct trace_msg_t {
679#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST) 710#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST)
680 711
681#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED) 712#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED)
682#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED) 713#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= \
683#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED) 714 SDIO_IGNORED)
715#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= \
716 ~SDIO_IGNORED)
684 717
685struct rtsx_chip { 718struct rtsx_chip {
686 struct rtsx_dev *rtsx; 719 struct rtsx_dev *rtsx;
@@ -957,12 +990,12 @@ void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
957int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data); 990int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
958int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data); 991int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
959int rtsx_write_cfg_dw(struct rtsx_chip *chip, 992int rtsx_write_cfg_dw(struct rtsx_chip *chip,
960 u8 func_no, u16 addr, u32 mask, u32 val); 993 u8 func_no, u16 addr, u32 mask, u32 val);
961int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val); 994int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
962int rtsx_write_cfg_seq(struct rtsx_chip *chip, 995int rtsx_write_cfg_seq(struct rtsx_chip *chip,
963 u8 func, u16 addr, u8 *buf, int len); 996 u8 func, u16 addr, u8 *buf, int len);
964int rtsx_read_cfg_seq(struct rtsx_chip *chip, 997int rtsx_read_cfg_seq(struct rtsx_chip *chip,
965 u8 func, u16 addr, u8 *buf, int len); 998 u8 func, u16 addr, u8 *buf, int len);
966int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val); 999int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
967int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val); 1000int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
968int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val); 1001int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index becb4bba166c..a95c5de1aa00 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -354,7 +354,7 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
354 354
355 case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD: 355 case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
356 set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0, 356 set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
357 ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1); 357 ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
358 break; 358 break;
359 359
360 case SENSE_TYPE_FORMAT_IN_PROGRESS: 360 case SENSE_TYPE_FORMAT_IN_PROGRESS:
@@ -397,10 +397,10 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
397} 397}
398 398
399void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code, 399void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
400 u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0, 400 u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
401 u16 sns_key_info1) 401 u16 sns_key_info1)
402{ 402{
403 struct sense_data_t *sense = &(chip->sense_buffer[lun]); 403 struct sense_data_t *sense = &chip->sense_buffer[lun];
404 404
405 sense->err_code = err_code; 405 sense->err_code = err_code;
406 sense->sense_key = sense_key; 406 sense->sense_key = sense_key;
@@ -436,7 +436,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
436 436
437#ifdef SUPPORT_SD_LOCK 437#ifdef SUPPORT_SD_LOCK
438 if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) { 438 if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
439 struct sd_info *sd_card = &(chip->sd_card); 439 struct sd_info *sd_card = &chip->sd_card;
440 440
441 if (sd_card->sd_lock_notify) { 441 if (sd_card->sd_lock_notify) {
442 sd_card->sd_lock_notify = 0; 442 sd_card->sd_lock_notify = 0;
@@ -444,7 +444,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
444 return TRANSPORT_FAILED; 444 return TRANSPORT_FAILED;
445 } else if (sd_card->sd_lock_status & SD_LOCKED) { 445 } else if (sd_card->sd_lock_status & SD_LOCKED) {
446 set_sense_type(chip, lun, 446 set_sense_type(chip, lun,
447 SENSE_TYPE_MEDIA_READ_FORBIDDEN); 447 SENSE_TYPE_MEDIA_READ_FORBIDDEN);
448 return TRANSPORT_FAILED; 448 return TRANSPORT_FAILED;
449 } 449 }
450 } 450 }
@@ -514,7 +514,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
514 514
515#ifdef SUPPORT_MAGIC_GATE 515#ifdef SUPPORT_MAGIC_GATE
516 if ((chip->mspro_formatter_enable) && 516 if ((chip->mspro_formatter_enable) &&
517 (chip->lun2card[lun] & MS_CARD)) 517 (chip->lun2card[lun] & MS_CARD))
518#else 518#else
519 if (chip->mspro_formatter_enable) 519 if (chip->mspro_formatter_enable)
520#endif 520#endif
@@ -603,7 +603,7 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
603 603
604 if (prevent) { 604 if (prevent) {
605 set_sense_type(chip, SCSI_LUN(srb), 605 set_sense_type(chip, SCSI_LUN(srb),
606 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 606 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
607 rtsx_trace(chip); 607 rtsx_trace(chip);
608 return TRANSPORT_FAILED; 608 return TRANSPORT_FAILED;
609 } 609 }
@@ -615,13 +615,13 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
615{ 615{
616 struct sense_data_t *sense; 616 struct sense_data_t *sense;
617 unsigned int lun = SCSI_LUN(srb); 617 unsigned int lun = SCSI_LUN(srb);
618 struct ms_info *ms_card = &(chip->ms_card); 618 struct ms_info *ms_card = &chip->ms_card;
619 unsigned char *tmp, *buf; 619 unsigned char *tmp, *buf;
620 620
621 sense = &(chip->sense_buffer[lun]); 621 sense = &chip->sense_buffer[lun];
622 622
623 if ((get_lun_card(chip, lun) == MS_CARD) && 623 if ((get_lun_card(chip, lun) == MS_CARD) &&
624 ms_card->pro_under_formatting) { 624 ms_card->pro_under_formatting) {
625 if (ms_card->format_status == FORMAT_SUCCESS) { 625 if (ms_card->format_status == FORMAT_SUCCESS) {
626 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); 626 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
627 ms_card->pro_under_formatting = 0; 627 ms_card->pro_under_formatting = 0;
@@ -629,7 +629,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
629 } else if (ms_card->format_status == FORMAT_IN_PROGRESS) { 629 } else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
630 /* Logical Unit Not Ready Format in Progress */ 630 /* Logical Unit Not Ready Format in Progress */
631 set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 631 set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
632 0, (u16)(ms_card->progress)); 632 0, (u16)(ms_card->progress));
633 } else { 633 } else {
634 /* Format Command Failed */ 634 /* Format Command Failed */
635 set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED); 635 set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
@@ -659,9 +659,9 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
659} 659}
660 660
661static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd, 661static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
662 int lun, u8 *buf, int buf_len) 662 int lun, u8 *buf, int buf_len)
663{ 663{
664 struct ms_info *ms_card = &(chip->ms_card); 664 struct ms_info *ms_card = &chip->ms_card;
665 int sys_info_offset; 665 int sys_info_offset;
666 int data_size = buf_len; 666 int data_size = buf_len;
667 bool support_format = false; 667 bool support_format = false;
@@ -754,10 +754,10 @@ static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
754static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) 754static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
755{ 755{
756 unsigned int lun = SCSI_LUN(srb); 756 unsigned int lun = SCSI_LUN(srb);
757 unsigned int dataSize; 757 unsigned int data_size;
758 int status; 758 int status;
759 bool pro_formatter_flag; 759 bool pro_formatter_flag;
760 unsigned char pageCode, *buf; 760 unsigned char page_code, *buf;
761 u8 card = get_lun_card(chip, lun); 761 u8 card = get_lun_card(chip, lun);
762 762
763#ifndef SUPPORT_MAGIC_GATE 763#ifndef SUPPORT_MAGIC_GATE
@@ -770,11 +770,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
770#endif 770#endif
771 771
772 pro_formatter_flag = false; 772 pro_formatter_flag = false;
773 dataSize = 8; 773 data_size = 8;
774#ifdef SUPPORT_MAGIC_GATE 774#ifdef SUPPORT_MAGIC_GATE
775 if ((chip->lun2card[lun] & MS_CARD)) { 775 if ((chip->lun2card[lun] & MS_CARD)) {
776 if (!card || (card == MS_CARD)) { 776 if (!card || (card == MS_CARD)) {
777 dataSize = 108; 777 data_size = 108;
778 if (chip->mspro_formatter_enable) 778 if (chip->mspro_formatter_enable)
779 pro_formatter_flag = true; 779 pro_formatter_flag = true;
780 } 780 }
@@ -783,28 +783,28 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
783 if (card == MS_CARD) { 783 if (card == MS_CARD) {
784 if (chip->mspro_formatter_enable) { 784 if (chip->mspro_formatter_enable) {
785 pro_formatter_flag = true; 785 pro_formatter_flag = true;
786 dataSize = 108; 786 data_size = 108;
787 } 787 }
788 } 788 }
789#endif 789#endif
790 790
791 buf = kmalloc(dataSize, GFP_KERNEL); 791 buf = kmalloc(data_size, GFP_KERNEL);
792 if (!buf) { 792 if (!buf) {
793 rtsx_trace(chip); 793 rtsx_trace(chip);
794 return TRANSPORT_ERROR; 794 return TRANSPORT_ERROR;
795 } 795 }
796 796
797 pageCode = srb->cmnd[2] & 0x3f; 797 page_code = srb->cmnd[2] & 0x3f;
798 798
799 if ((pageCode == 0x3F) || (pageCode == 0x1C) || 799 if ((page_code == 0x3F) || (page_code == 0x1C) ||
800 (pageCode == 0x00) || 800 (page_code == 0x00) ||
801 (pro_formatter_flag && (pageCode == 0x20))) { 801 (pro_formatter_flag && (page_code == 0x20))) {
802 if (srb->cmnd[0] == MODE_SENSE) { 802 if (srb->cmnd[0] == MODE_SENSE) {
803 if ((pageCode == 0x3F) || (pageCode == 0x20)) { 803 if ((page_code == 0x3F) || (page_code == 0x20)) {
804 ms_mode_sense(chip, srb->cmnd[0], 804 ms_mode_sense(chip, srb->cmnd[0],
805 lun, buf, dataSize); 805 lun, buf, data_size);
806 } else { 806 } else {
807 dataSize = 4; 807 data_size = 4;
808 buf[0] = 0x03; 808 buf[0] = 0x03;
809 buf[1] = 0x00; 809 buf[1] = 0x00;
810 if (check_card_wp(chip, lun)) 810 if (check_card_wp(chip, lun))
@@ -815,11 +815,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
815 buf[3] = 0x00; 815 buf[3] = 0x00;
816 } 816 }
817 } else { 817 } else {
818 if ((pageCode == 0x3F) || (pageCode == 0x20)) { 818 if ((page_code == 0x3F) || (page_code == 0x20)) {
819 ms_mode_sense(chip, srb->cmnd[0], 819 ms_mode_sense(chip, srb->cmnd[0],
820 lun, buf, dataSize); 820 lun, buf, data_size);
821 } else { 821 } else {
822 dataSize = 8; 822 data_size = 8;
823 buf[0] = 0x00; 823 buf[0] = 0x00;
824 buf[1] = 0x06; 824 buf[1] = 0x06;
825 buf[2] = 0x00; 825 buf[2] = 0x00;
@@ -842,7 +842,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
842 842
843 if (status == TRANSPORT_GOOD) { 843 if (status == TRANSPORT_GOOD) {
844 unsigned int len = min_t(unsigned int, scsi_bufflen(srb), 844 unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
845 dataSize); 845 data_size);
846 rtsx_stor_set_xfer_buf(buf, len, srb); 846 rtsx_stor_set_xfer_buf(buf, len, srb);
847 scsi_set_resid(srb, scsi_bufflen(srb) - len); 847 scsi_set_resid(srb, scsi_bufflen(srb) - len);
848 } 848 }
@@ -854,7 +854,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
854static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) 854static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
855{ 855{
856#ifdef SUPPORT_SD_LOCK 856#ifdef SUPPORT_SD_LOCK
857 struct sd_info *sd_card = &(chip->sd_card); 857 struct sd_info *sd_card = &chip->sd_card;
858#endif 858#endif
859 unsigned int lun = SCSI_LUN(srb); 859 unsigned int lun = SCSI_LUN(srb);
860 int retval; 860 int retval;
@@ -896,7 +896,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
896 if (sd_card->sd_lock_status & SD_LOCKED) { 896 if (sd_card->sd_lock_status & SD_LOCKED) {
897 dev_dbg(rtsx_dev(chip), "SD card locked!\n"); 897 dev_dbg(rtsx_dev(chip), "SD card locked!\n");
898 set_sense_type(chip, lun, 898 set_sense_type(chip, lun,
899 SENSE_TYPE_MEDIA_READ_FORBIDDEN); 899 SENSE_TYPE_MEDIA_READ_FORBIDDEN);
900 rtsx_trace(chip); 900 rtsx_trace(chip);
901 return TRANSPORT_FAILED; 901 return TRANSPORT_FAILED;
902 } 902 }
@@ -932,7 +932,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
932 * need to judge start_sec at first 932 * need to judge start_sec at first
933 */ 933 */
934 if ((start_sec > get_card_size(chip, lun)) || 934 if ((start_sec > get_card_size(chip, lun)) ||
935 ((start_sec + sec_cnt) > get_card_size(chip, lun))) { 935 ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
936 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE); 936 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
937 rtsx_trace(chip); 937 rtsx_trace(chip);
938 return TRANSPORT_FAILED; 938 return TRANSPORT_FAILED;
@@ -947,7 +947,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
947 dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n"); 947 dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n");
948 if (srb->sc_data_direction == DMA_FROM_DEVICE) 948 if (srb->sc_data_direction == DMA_FROM_DEVICE)
949 set_sense_type(chip, lun, 949 set_sense_type(chip, lun,
950 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 950 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
951 else 951 else
952 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); 952 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
953 953
@@ -959,7 +959,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
959 if (check_card_wp(chip, lun)) { 959 if (check_card_wp(chip, lun)) {
960 dev_dbg(rtsx_dev(chip), "Write protected card!\n"); 960 dev_dbg(rtsx_dev(chip), "Write protected card!\n");
961 set_sense_type(chip, lun, 961 set_sense_type(chip, lun,
962 SENSE_TYPE_MEDIA_WRITE_PROTECT); 962 SENSE_TYPE_MEDIA_WRITE_PROTECT);
963 rtsx_trace(chip); 963 rtsx_trace(chip);
964 return TRANSPORT_FAILED; 964 return TRANSPORT_FAILED;
965 } 965 }
@@ -973,15 +973,16 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
973 } else { 973 } else {
974 chip->rw_fail_cnt[lun]++; 974 chip->rw_fail_cnt[lun]++;
975 if (srb->sc_data_direction == DMA_FROM_DEVICE) 975 if (srb->sc_data_direction == DMA_FROM_DEVICE)
976 set_sense_type(chip, lun, 976 set_sense_type
977 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 977 (chip, lun,
978 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
978 else 979 else
979 set_sense_type(chip, lun, 980 set_sense_type(chip, lun,
980 SENSE_TYPE_MEDIA_WRITE_ERR); 981 SENSE_TYPE_MEDIA_WRITE_ERR);
981 } 982 }
982 retval = TRANSPORT_FAILED; 983 retval = TRANSPORT_FAILED;
983 rtsx_trace(chip); 984 rtsx_trace(chip);
984 goto Exit; 985 goto exit;
985 } else { 986 } else {
986 chip->rw_fail_cnt[lun] = 0; 987 chip->rw_fail_cnt[lun] = 0;
987 retval = TRANSPORT_GOOD; 988 retval = TRANSPORT_GOOD;
@@ -989,7 +990,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
989 990
990 scsi_set_resid(srb, 0); 991 scsi_set_resid(srb, 0);
991 992
992Exit: 993exit:
993 return retval; 994 return retval;
994} 995}
995 996
@@ -1025,8 +1026,8 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1025 1026
1026 /* Capacity List Length */ 1027 /* Capacity List Length */
1027 if ((buf_len > 12) && chip->mspro_formatter_enable && 1028 if ((buf_len > 12) && chip->mspro_formatter_enable &&
1028 (chip->lun2card[lun] & MS_CARD) && 1029 (chip->lun2card[lun] & MS_CARD) &&
1029 (!card || (card == MS_CARD))) { 1030 (!card || (card == MS_CARD))) {
1030 buf[i++] = 0x10; 1031 buf[i++] = 0x10;
1031 desc_cnt = 2; 1032 desc_cnt = 2;
1032 } else { 1033 } else {
@@ -1143,7 +1144,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1143 if (retval != STATUS_SUCCESS) { 1144 if (retval != STATUS_SUCCESS) {
1144 vfree(buf); 1145 vfree(buf);
1145 set_sense_type(chip, SCSI_LUN(srb), 1146 set_sense_type(chip, SCSI_LUN(srb),
1146 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1147 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1147 rtsx_trace(chip); 1148 rtsx_trace(chip);
1148 return TRANSPORT_FAILED; 1149 return TRANSPORT_FAILED;
1149 } 1150 }
@@ -1153,7 +1154,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1153 if (retval != STATUS_SUCCESS) { 1154 if (retval != STATUS_SUCCESS) {
1154 vfree(buf); 1155 vfree(buf);
1155 set_sense_type(chip, SCSI_LUN(srb), 1156 set_sense_type(chip, SCSI_LUN(srb),
1156 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1157 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1157 rtsx_trace(chip); 1158 rtsx_trace(chip);
1158 return TRANSPORT_FAILED; 1159 return TRANSPORT_FAILED;
1159 } 1160 }
@@ -1195,7 +1196,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1195 retval = spi_erase_eeprom_chip(chip); 1196 retval = spi_erase_eeprom_chip(chip);
1196 if (retval != STATUS_SUCCESS) { 1197 if (retval != STATUS_SUCCESS) {
1197 set_sense_type(chip, SCSI_LUN(srb), 1198 set_sense_type(chip, SCSI_LUN(srb),
1198 SENSE_TYPE_MEDIA_WRITE_ERR); 1199 SENSE_TYPE_MEDIA_WRITE_ERR);
1199 rtsx_trace(chip); 1200 rtsx_trace(chip);
1200 return TRANSPORT_FAILED; 1201 return TRANSPORT_FAILED;
1201 } 1202 }
@@ -1216,7 +1217,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1216 if (retval != STATUS_SUCCESS) { 1217 if (retval != STATUS_SUCCESS) {
1217 vfree(buf); 1218 vfree(buf);
1218 set_sense_type(chip, SCSI_LUN(srb), 1219 set_sense_type(chip, SCSI_LUN(srb),
1219 SENSE_TYPE_MEDIA_WRITE_ERR); 1220 SENSE_TYPE_MEDIA_WRITE_ERR);
1220 rtsx_trace(chip); 1221 rtsx_trace(chip);
1221 return TRANSPORT_FAILED; 1222 return TRANSPORT_FAILED;
1222 } 1223 }
@@ -1247,7 +1248,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1247 1248
1248 if (addr < 0xFC00) { 1249 if (addr < 0xFC00) {
1249 set_sense_type(chip, SCSI_LUN(srb), 1250 set_sense_type(chip, SCSI_LUN(srb),
1250 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1251 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1251 rtsx_trace(chip); 1252 rtsx_trace(chip);
1252 return TRANSPORT_FAILED; 1253 return TRANSPORT_FAILED;
1253 } 1254 }
@@ -1271,7 +1272,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1271 if (retval != STATUS_SUCCESS) { 1272 if (retval != STATUS_SUCCESS) {
1272 vfree(buf); 1273 vfree(buf);
1273 set_sense_type(chip, SCSI_LUN(srb), 1274 set_sense_type(chip, SCSI_LUN(srb),
1274 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1275 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1275 rtsx_trace(chip); 1276 rtsx_trace(chip);
1276 return TRANSPORT_FAILED; 1277 return TRANSPORT_FAILED;
1277 } 1278 }
@@ -1305,7 +1306,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1305 1306
1306 if (addr < 0xFC00) { 1307 if (addr < 0xFC00) {
1307 set_sense_type(chip, SCSI_LUN(srb), 1308 set_sense_type(chip, SCSI_LUN(srb),
1308 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1309 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1309 rtsx_trace(chip); 1310 rtsx_trace(chip);
1310 return TRANSPORT_FAILED; 1311 return TRANSPORT_FAILED;
1311 } 1312 }
@@ -1333,7 +1334,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1333 if (retval != STATUS_SUCCESS) { 1334 if (retval != STATUS_SUCCESS) {
1334 vfree(buf); 1335 vfree(buf);
1335 set_sense_type(chip, SCSI_LUN(srb), 1336 set_sense_type(chip, SCSI_LUN(srb),
1336 SENSE_TYPE_MEDIA_WRITE_ERR); 1337 SENSE_TYPE_MEDIA_WRITE_ERR);
1337 rtsx_trace(chip); 1338 rtsx_trace(chip);
1338 return TRANSPORT_FAILED; 1339 return TRANSPORT_FAILED;
1339 } 1340 }
@@ -1346,7 +1347,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1346 1347
1347static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip) 1348static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1348{ 1349{
1349 struct sd_info *sd_card = &(chip->sd_card); 1350 struct sd_info *sd_card = &chip->sd_card;
1350 unsigned int lun = SCSI_LUN(srb); 1351 unsigned int lun = SCSI_LUN(srb);
1351 1352
1352 if (!check_card_ready(chip, lun)) { 1353 if (!check_card_ready(chip, lun)) {
@@ -1399,7 +1400,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1399 1400
1400 if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) { 1401 if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
1401 set_sense_type(chip, SCSI_LUN(srb), 1402 set_sense_type(chip, SCSI_LUN(srb),
1402 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1403 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1403 rtsx_trace(chip); 1404 rtsx_trace(chip);
1404 return TRANSPORT_FAILED; 1405 return TRANSPORT_FAILED;
1405 } 1406 }
@@ -1522,9 +1523,9 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1522 1523
1523 if (srb->cmnd[3] == 1) { 1524 if (srb->cmnd[3] == 1) {
1524 /* Variable Clock */ 1525 /* Variable Clock */
1525 struct xd_info *xd_card = &(chip->xd_card); 1526 struct xd_info *xd_card = &chip->xd_card;
1526 struct sd_info *sd_card = &(chip->sd_card); 1527 struct sd_info *sd_card = &chip->sd_card;
1527 struct ms_info *ms_card = &(chip->ms_card); 1528 struct ms_info *ms_card = &chip->ms_card;
1528 1529
1529 switch (srb->cmnd[4]) { 1530 switch (srb->cmnd[4]) {
1530 case XD_CARD: 1531 case XD_CARD:
@@ -1541,7 +1542,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1541 1542
1542 default: 1543 default:
1543 set_sense_type(chip, lun, 1544 set_sense_type(chip, lun,
1544 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1545 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1545 rtsx_trace(chip); 1546 rtsx_trace(chip);
1546 return TRANSPORT_FAILED; 1547 return TRANSPORT_FAILED;
1547 } 1548 }
@@ -1556,7 +1557,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1556 rtsx_disable_aspm(chip); 1557 rtsx_disable_aspm(chip);
1557 1558
1558 if (chip->ss_en && 1559 if (chip->ss_en &&
1559 (rtsx_get_stat(chip) == RTSX_STAT_SS)) { 1560 (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
1560 rtsx_exit_ss(chip); 1561 rtsx_exit_ss(chip);
1561 wait_timeout(100); 1562 wait_timeout(100);
1562 } 1563 }
@@ -1565,7 +1566,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1565 retval = rtsx_force_power_on(chip, SSC_PDCTL); 1566 retval = rtsx_force_power_on(chip, SSC_PDCTL);
1566 if (retval != STATUS_SUCCESS) { 1567 if (retval != STATUS_SUCCESS) {
1567 set_sense_type(chip, SCSI_LUN(srb), 1568 set_sense_type(chip, SCSI_LUN(srb),
1568 SENSE_TYPE_MEDIA_WRITE_ERR); 1569 SENSE_TYPE_MEDIA_WRITE_ERR);
1569 rtsx_trace(chip); 1570 rtsx_trace(chip);
1570 return TRANSPORT_FAILED; 1571 return TRANSPORT_FAILED;
1571 } 1572 }
@@ -1586,9 +1587,9 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1586 unsigned int lun = SCSI_LUN(srb); 1587 unsigned int lun = SCSI_LUN(srb);
1587 1588
1588 if (srb->cmnd[3] == 1) { 1589 if (srb->cmnd[3] == 1) {
1589 struct xd_info *xd_card = &(chip->xd_card); 1590 struct xd_info *xd_card = &chip->xd_card;
1590 struct sd_info *sd_card = &(chip->sd_card); 1591 struct sd_info *sd_card = &chip->sd_card;
1591 struct ms_info *ms_card = &(chip->ms_card); 1592 struct ms_info *ms_card = &chip->ms_card;
1592 u8 tmp; 1593 u8 tmp;
1593 1594
1594 switch (srb->cmnd[4]) { 1595 switch (srb->cmnd[4]) {
@@ -1606,7 +1607,7 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1606 1607
1607 default: 1608 default:
1608 set_sense_type(chip, lun, 1609 set_sense_type(chip, lun,
1609 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1610 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1610 rtsx_trace(chip); 1611 rtsx_trace(chip);
1611 return TRANSPORT_FAILED; 1612 return TRANSPORT_FAILED;
1612 } 1613 }
@@ -1648,14 +1649,15 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1648 dev_dbg(rtsx_dev(chip), "Write to device\n"); 1649 dev_dbg(rtsx_dev(chip), "Write to device\n");
1649 1650
1650 retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len, 1651 retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
1651 scsi_sg_count(srb), srb->sc_data_direction, 1000); 1652 scsi_sg_count(srb), srb->sc_data_direction,
1653 1000);
1652 if (retval < 0) { 1654 if (retval < 0) {
1653 if (srb->sc_data_direction == DMA_FROM_DEVICE) 1655 if (srb->sc_data_direction == DMA_FROM_DEVICE)
1654 set_sense_type(chip, lun, 1656 set_sense_type(chip, lun,
1655 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1657 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1656 else 1658 else
1657 set_sense_type(chip, lun, 1659 set_sense_type(chip, lun,
1658 SENSE_TYPE_MEDIA_WRITE_ERR); 1660 SENSE_TYPE_MEDIA_WRITE_ERR);
1659 1661
1660 rtsx_trace(chip); 1662 rtsx_trace(chip);
1661 return TRANSPORT_FAILED; 1663 return TRANSPORT_FAILED;
@@ -1667,8 +1669,8 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1667 1669
1668static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) 1670static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1669{ 1671{
1670 struct sd_info *sd_card = &(chip->sd_card); 1672 struct sd_info *sd_card = &chip->sd_card;
1671 struct ms_info *ms_card = &(chip->ms_card); 1673 struct ms_info *ms_card = &chip->ms_card;
1672 int buf_len; 1674 int buf_len;
1673 unsigned int lun = SCSI_LUN(srb); 1675 unsigned int lun = SCSI_LUN(srb);
1674 u8 card = get_lun_card(chip, lun); 1676 u8 card = get_lun_card(chip, lun);
@@ -1699,8 +1701,8 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1699 1701
1700#ifdef SUPPORT_OCP 1702#ifdef SUPPORT_OCP
1701 status[8] = 0; 1703 status[8] = 0;
1702 if (CHECK_LUN_MODE(chip, 1704 if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
1703 SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) { 1705 (chip->lun2card[lun] == MS_CARD)) {
1704 oc_now_mask = MS_OC_NOW; 1706 oc_now_mask = MS_OC_NOW;
1705 oc_ever_mask = MS_OC_EVER; 1707 oc_ever_mask = MS_OC_EVER;
1706 } else { 1708 } else {
@@ -1804,7 +1806,7 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1804 1806
1805 if (!CHECK_PID(chip, 0x5208)) { 1807 if (!CHECK_PID(chip, 0x5208)) {
1806 set_sense_type(chip, SCSI_LUN(srb), 1808 set_sense_type(chip, SCSI_LUN(srb),
1807 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1809 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1808 rtsx_trace(chip); 1810 rtsx_trace(chip);
1809 return TRANSPORT_FAILED; 1811 return TRANSPORT_FAILED;
1810 } 1812 }
@@ -1884,7 +1886,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1884 cmd_type = srb->cmnd[4]; 1886 cmd_type = srb->cmnd[4];
1885 if (cmd_type > 2) { 1887 if (cmd_type > 2) {
1886 set_sense_type(chip, lun, 1888 set_sense_type(chip, lun,
1887 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1889 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1888 rtsx_trace(chip); 1890 rtsx_trace(chip);
1889 return TRANSPORT_FAILED; 1891 return TRANSPORT_FAILED;
1890 } 1892 }
@@ -1903,7 +1905,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1903 value = *(rtsx_get_cmd_data(chip) + idx); 1905 value = *(rtsx_get_cmd_data(chip) + idx);
1904 if (scsi_bufflen(srb) < 1) { 1906 if (scsi_bufflen(srb) < 1) {
1905 set_sense_type(chip, lun, 1907 set_sense_type(chip, lun,
1906 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 1908 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
1907 rtsx_trace(chip); 1909 rtsx_trace(chip);
1908 return TRANSPORT_FAILED; 1910 return TRANSPORT_FAILED;
1909 } 1911 }
@@ -1971,7 +1973,7 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1971 if (retval != STATUS_SUCCESS) { 1973 if (retval != STATUS_SUCCESS) {
1972 vfree(buf); 1974 vfree(buf);
1973 set_sense_type(chip, SCSI_LUN(srb), 1975 set_sense_type(chip, SCSI_LUN(srb),
1974 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1976 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1975 rtsx_trace(chip); 1977 rtsx_trace(chip);
1976 return TRANSPORT_FAILED; 1978 return TRANSPORT_FAILED;
1977 } 1979 }
@@ -1980,8 +1982,9 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1980 retval = rtsx_read_phy_register(chip, addr + i, &val); 1982 retval = rtsx_read_phy_register(chip, addr + i, &val);
1981 if (retval != STATUS_SUCCESS) { 1983 if (retval != STATUS_SUCCESS) {
1982 vfree(buf); 1984 vfree(buf);
1983 set_sense_type(chip, SCSI_LUN(srb), 1985 set_sense_type
1984 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 1986 (chip, SCSI_LUN(srb),
1987 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
1985 rtsx_trace(chip); 1988 rtsx_trace(chip);
1986 return TRANSPORT_FAILED; 1989 return TRANSPORT_FAILED;
1987 } 1990 }
@@ -2039,7 +2042,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2039 if (retval != STATUS_SUCCESS) { 2042 if (retval != STATUS_SUCCESS) {
2040 vfree(buf); 2043 vfree(buf);
2041 set_sense_type(chip, SCSI_LUN(srb), 2044 set_sense_type(chip, SCSI_LUN(srb),
2042 SENSE_TYPE_MEDIA_WRITE_ERR); 2045 SENSE_TYPE_MEDIA_WRITE_ERR);
2043 rtsx_trace(chip); 2046 rtsx_trace(chip);
2044 return TRANSPORT_FAILED; 2047 return TRANSPORT_FAILED;
2045 } 2048 }
@@ -2050,7 +2053,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2050 if (retval != STATUS_SUCCESS) { 2053 if (retval != STATUS_SUCCESS) {
2051 vfree(buf); 2054 vfree(buf);
2052 set_sense_type(chip, SCSI_LUN(srb), 2055 set_sense_type(chip, SCSI_LUN(srb),
2053 SENSE_TYPE_MEDIA_WRITE_ERR); 2056 SENSE_TYPE_MEDIA_WRITE_ERR);
2054 rtsx_trace(chip); 2057 rtsx_trace(chip);
2055 return TRANSPORT_FAILED; 2058 return TRANSPORT_FAILED;
2056 } 2059 }
@@ -2090,7 +2093,7 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2090 retval = spi_erase_eeprom_chip(chip); 2093 retval = spi_erase_eeprom_chip(chip);
2091 if (retval != STATUS_SUCCESS) { 2094 if (retval != STATUS_SUCCESS) {
2092 set_sense_type(chip, SCSI_LUN(srb), 2095 set_sense_type(chip, SCSI_LUN(srb),
2093 SENSE_TYPE_MEDIA_WRITE_ERR); 2096 SENSE_TYPE_MEDIA_WRITE_ERR);
2094 rtsx_trace(chip); 2097 rtsx_trace(chip);
2095 return TRANSPORT_FAILED; 2098 return TRANSPORT_FAILED;
2096 } 2099 }
@@ -2098,13 +2101,13 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2098 retval = spi_erase_eeprom_byte(chip, addr); 2101 retval = spi_erase_eeprom_byte(chip, addr);
2099 if (retval != STATUS_SUCCESS) { 2102 if (retval != STATUS_SUCCESS) {
2100 set_sense_type(chip, SCSI_LUN(srb), 2103 set_sense_type(chip, SCSI_LUN(srb),
2101 SENSE_TYPE_MEDIA_WRITE_ERR); 2104 SENSE_TYPE_MEDIA_WRITE_ERR);
2102 rtsx_trace(chip); 2105 rtsx_trace(chip);
2103 return TRANSPORT_FAILED; 2106 return TRANSPORT_FAILED;
2104 } 2107 }
2105 } else { 2108 } else {
2106 set_sense_type(chip, SCSI_LUN(srb), 2109 set_sense_type(chip, SCSI_LUN(srb),
2107 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2110 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2108 rtsx_trace(chip); 2111 rtsx_trace(chip);
2109 return TRANSPORT_FAILED; 2112 return TRANSPORT_FAILED;
2110 } 2113 }
@@ -2139,7 +2142,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2139 if (retval != STATUS_SUCCESS) { 2142 if (retval != STATUS_SUCCESS) {
2140 vfree(buf); 2143 vfree(buf);
2141 set_sense_type(chip, SCSI_LUN(srb), 2144 set_sense_type(chip, SCSI_LUN(srb),
2142 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2145 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2143 rtsx_trace(chip); 2146 rtsx_trace(chip);
2144 return TRANSPORT_FAILED; 2147 return TRANSPORT_FAILED;
2145 } 2148 }
@@ -2149,7 +2152,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2149 if (retval != STATUS_SUCCESS) { 2152 if (retval != STATUS_SUCCESS) {
2150 vfree(buf); 2153 vfree(buf);
2151 set_sense_type(chip, SCSI_LUN(srb), 2154 set_sense_type(chip, SCSI_LUN(srb),
2152 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2155 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2153 rtsx_trace(chip); 2156 rtsx_trace(chip);
2154 return TRANSPORT_FAILED; 2157 return TRANSPORT_FAILED;
2155 } 2158 }
@@ -2204,7 +2207,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2204 if (retval != STATUS_SUCCESS) { 2207 if (retval != STATUS_SUCCESS) {
2205 vfree(buf); 2208 vfree(buf);
2206 set_sense_type(chip, SCSI_LUN(srb), 2209 set_sense_type(chip, SCSI_LUN(srb),
2207 SENSE_TYPE_MEDIA_WRITE_ERR); 2210 SENSE_TYPE_MEDIA_WRITE_ERR);
2208 rtsx_trace(chip); 2211 rtsx_trace(chip);
2209 return TRANSPORT_FAILED; 2212 return TRANSPORT_FAILED;
2210 } 2213 }
@@ -2242,7 +2245,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2242 if (retval != STATUS_SUCCESS) { 2245 if (retval != STATUS_SUCCESS) {
2243 vfree(buf); 2246 vfree(buf);
2244 set_sense_type(chip, SCSI_LUN(srb), 2247 set_sense_type(chip, SCSI_LUN(srb),
2245 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2248 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2246 rtsx_trace(chip); 2249 rtsx_trace(chip);
2247 return TRANSPORT_FAILED; 2250 return TRANSPORT_FAILED;
2248 } 2251 }
@@ -2252,7 +2255,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2252 if (retval != STATUS_SUCCESS) { 2255 if (retval != STATUS_SUCCESS) {
2253 vfree(buf); 2256 vfree(buf);
2254 set_sense_type(chip, SCSI_LUN(srb), 2257 set_sense_type(chip, SCSI_LUN(srb),
2255 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2258 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2256 rtsx_trace(chip); 2259 rtsx_trace(chip);
2257 return TRANSPORT_FAILED; 2260 return TRANSPORT_FAILED;
2258 } 2261 }
@@ -2311,7 +2314,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2311 } 2314 }
2312 2315
2313 retval = rtsx_write_register(chip, PWR_GATE_CTRL, 2316 retval = rtsx_write_register(chip, PWR_GATE_CTRL,
2314 LDO3318_PWR_MASK, LDO_OFF); 2317 LDO3318_PWR_MASK, LDO_OFF);
2315 if (retval != STATUS_SUCCESS) { 2318 if (retval != STATUS_SUCCESS) {
2316 vfree(buf); 2319 vfree(buf);
2317 rtsx_trace(chip); 2320 rtsx_trace(chip);
@@ -2321,7 +2324,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2321 wait_timeout(600); 2324 wait_timeout(600);
2322 2325
2323 retval = rtsx_write_phy_register(chip, 0x08, 2326 retval = rtsx_write_phy_register(chip, 0x08,
2324 0x4C00 | chip->phy_voltage); 2327 0x4C00 | chip->phy_voltage);
2325 if (retval != STATUS_SUCCESS) { 2328 if (retval != STATUS_SUCCESS) {
2326 vfree(buf); 2329 vfree(buf);
2327 rtsx_trace(chip); 2330 rtsx_trace(chip);
@@ -2329,7 +2332,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2329 } 2332 }
2330 2333
2331 retval = rtsx_write_register(chip, PWR_GATE_CTRL, 2334 retval = rtsx_write_register(chip, PWR_GATE_CTRL,
2332 LDO3318_PWR_MASK, LDO_ON); 2335 LDO3318_PWR_MASK, LDO_ON);
2333 if (retval != STATUS_SUCCESS) { 2336 if (retval != STATUS_SUCCESS) {
2334 vfree(buf); 2337 vfree(buf);
2335 rtsx_trace(chip); 2338 rtsx_trace(chip);
@@ -2352,14 +2355,14 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2352 retval = rtsx_write_efuse(chip, addr + i, buf[i]); 2355 retval = rtsx_write_efuse(chip, addr + i, buf[i]);
2353 if (retval != STATUS_SUCCESS) { 2356 if (retval != STATUS_SUCCESS) {
2354 set_sense_type(chip, SCSI_LUN(srb), 2357 set_sense_type(chip, SCSI_LUN(srb),
2355 SENSE_TYPE_MEDIA_WRITE_ERR); 2358 SENSE_TYPE_MEDIA_WRITE_ERR);
2356 result = TRANSPORT_FAILED; 2359 result = TRANSPORT_FAILED;
2357 rtsx_trace(chip); 2360 rtsx_trace(chip);
2358 goto Exit; 2361 goto exit;
2359 } 2362 }
2360 } 2363 }
2361 2364
2362Exit: 2365exit:
2363 vfree(buf); 2366 vfree(buf);
2364 2367
2365 retval = card_power_off(chip, SPI_CARD); 2368 retval = card_power_off(chip, SPI_CARD);
@@ -2370,7 +2373,7 @@ Exit:
2370 2373
2371 if (chip->asic_code) { 2374 if (chip->asic_code) {
2372 retval = rtsx_write_register(chip, PWR_GATE_CTRL, 2375 retval = rtsx_write_register(chip, PWR_GATE_CTRL,
2373 LDO3318_PWR_MASK, LDO_OFF); 2376 LDO3318_PWR_MASK, LDO_OFF);
2374 if (retval != STATUS_SUCCESS) { 2377 if (retval != STATUS_SUCCESS) {
2375 rtsx_trace(chip); 2378 rtsx_trace(chip);
2376 return TRANSPORT_ERROR; 2379 return TRANSPORT_ERROR;
@@ -2385,7 +2388,7 @@ Exit:
2385 } 2388 }
2386 2389
2387 retval = rtsx_write_register(chip, PWR_GATE_CTRL, 2390 retval = rtsx_write_register(chip, PWR_GATE_CTRL,
2388 LDO3318_PWR_MASK, LDO_ON); 2391 LDO3318_PWR_MASK, LDO_ON);
2389 if (retval != STATUS_SUCCESS) { 2392 if (retval != STATUS_SUCCESS) {
2390 rtsx_trace(chip); 2393 rtsx_trace(chip);
2391 return TRANSPORT_ERROR; 2394 return TRANSPORT_ERROR;
@@ -2425,7 +2428,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2425 2428
2426 if (func > func_max) { 2429 if (func > func_max) {
2427 set_sense_type(chip, SCSI_LUN(srb), 2430 set_sense_type(chip, SCSI_LUN(srb),
2428 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2431 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2429 rtsx_trace(chip); 2432 rtsx_trace(chip);
2430 return TRANSPORT_FAILED; 2433 return TRANSPORT_FAILED;
2431 } 2434 }
@@ -2439,7 +2442,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2439 retval = rtsx_read_cfg_seq(chip, func, addr, buf, len); 2442 retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
2440 if (retval != STATUS_SUCCESS) { 2443 if (retval != STATUS_SUCCESS) {
2441 set_sense_type(chip, SCSI_LUN(srb), 2444 set_sense_type(chip, SCSI_LUN(srb),
2442 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2445 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2443 vfree(buf); 2446 vfree(buf);
2444 rtsx_trace(chip); 2447 rtsx_trace(chip);
2445 return TRANSPORT_FAILED; 2448 return TRANSPORT_FAILED;
@@ -2484,7 +2487,7 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2484 2487
2485 if (func > func_max) { 2488 if (func > func_max) {
2486 set_sense_type(chip, SCSI_LUN(srb), 2489 set_sense_type(chip, SCSI_LUN(srb),
2487 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2490 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2488 rtsx_trace(chip); 2491 rtsx_trace(chip);
2489 return TRANSPORT_FAILED; 2492 return TRANSPORT_FAILED;
2490 } 2493 }
@@ -2593,7 +2596,7 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2593 2596
2594 default: 2597 default:
2595 set_sense_type(chip, SCSI_LUN(srb), 2598 set_sense_type(chip, SCSI_LUN(srb),
2596 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2599 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2597 rtsx_trace(chip); 2600 rtsx_trace(chip);
2598 return TRANSPORT_FAILED; 2601 return TRANSPORT_FAILED;
2599 } 2602 }
@@ -2670,7 +2673,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2670 if (get_lun_card(chip, lun) == XD_CARD) { 2673 if (get_lun_card(chip, lun) == XD_CARD) {
2671 rtsx_status[13] = 0x40; 2674 rtsx_status[13] = 0x40;
2672 } else if (get_lun_card(chip, lun) == SD_CARD) { 2675 } else if (get_lun_card(chip, lun) == SD_CARD) {
2673 struct sd_info *sd_card = &(chip->sd_card); 2676 struct sd_info *sd_card = &chip->sd_card;
2674 2677
2675 rtsx_status[13] = 0x20; 2678 rtsx_status[13] = 0x20;
2676 if (CHK_SD(sd_card)) { 2679 if (CHK_SD(sd_card)) {
@@ -2686,7 +2689,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2686 rtsx_status[13] |= 0x04; 2689 rtsx_status[13] |= 0x04;
2687 } 2690 }
2688 } else if (get_lun_card(chip, lun) == MS_CARD) { 2691 } else if (get_lun_card(chip, lun) == MS_CARD) {
2689 struct ms_info *ms_card = &(chip->ms_card); 2692 struct ms_info *ms_card = &chip->ms_card;
2690 2693
2691 if (CHK_MSPRO(ms_card)) { 2694 if (CHK_MSPRO(ms_card)) {
2692 rtsx_status[13] = 0x38; 2695 rtsx_status[13] = 0x38;
@@ -2881,7 +2884,7 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2881 2884
2882 default: 2885 default:
2883 set_sense_type(chip, SCSI_LUN(srb), 2886 set_sense_type(chip, SCSI_LUN(srb),
2884 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2887 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2885 rtsx_trace(chip); 2888 rtsx_trace(chip);
2886 return TRANSPORT_FAILED; 2889 return TRANSPORT_FAILED;
2887 } 2890 }
@@ -2895,14 +2898,15 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2895 unsigned int lun = SCSI_LUN(srb); 2898 unsigned int lun = SCSI_LUN(srb);
2896 u16 sec_cnt; 2899 u16 sec_cnt;
2897 2900
2898 if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) 2901 if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
2899 sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8]; 2902 sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
2900 else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) { 2903 } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
2901 sec_cnt = srb->cmnd[4]; 2904 sec_cnt = srb->cmnd[4];
2902 if (sec_cnt == 0) 2905 if (sec_cnt == 0)
2903 sec_cnt = 256; 2906 sec_cnt = 256;
2904 } else 2907 } else {
2905 return; 2908 return;
2909 }
2906 2910
2907 if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) { 2911 if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
2908 toggle_gpio(chip, LED_GPIO); 2912 toggle_gpio(chip, LED_GPIO);
@@ -2915,7 +2919,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2915 2919
2916static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) 2920static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2917{ 2921{
2918 struct ms_info *ms_card = &(chip->ms_card); 2922 struct ms_info *ms_card = &chip->ms_card;
2919 unsigned int lun = SCSI_LUN(srb); 2923 unsigned int lun = SCSI_LUN(srb);
2920 bool quick_format; 2924 bool quick_format;
2921 int retval; 2925 int retval;
@@ -2927,7 +2931,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2927 } 2931 }
2928 2932
2929 if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) || 2933 if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
2930 (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) || 2934 (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
2931 (srb->cmnd[7] != 0x74)) { 2935 (srb->cmnd[7] != 0x74)) {
2932 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 2936 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
2933 rtsx_trace(chip); 2937 rtsx_trace(chip);
@@ -2941,7 +2945,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2941 wait_timeout(100); 2945 wait_timeout(100);
2942 2946
2943 if (!check_card_ready(chip, lun) || 2947 if (!check_card_ready(chip, lun) ||
2944 (get_card_size(chip, lun) == 0)) { 2948 (get_card_size(chip, lun) == 0)) {
2945 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); 2949 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
2946 rtsx_trace(chip); 2950 rtsx_trace(chip);
2947 return TRANSPORT_FAILED; 2951 return TRANSPORT_FAILED;
@@ -2986,7 +2990,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2986#ifdef SUPPORT_PCGL_1P18 2990#ifdef SUPPORT_PCGL_1P18
2987static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) 2991static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
2988{ 2992{
2989 struct ms_info *ms_card = &(chip->ms_card); 2993 struct ms_info *ms_card = &chip->ms_card;
2990 unsigned int lun = SCSI_LUN(srb); 2994 unsigned int lun = SCSI_LUN(srb);
2991 u8 dev_info_id, data_len; 2995 u8 dev_info_id, data_len;
2992 u8 *buf; 2996 u8 *buf;
@@ -3005,8 +3009,8 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3005 } 3009 }
3006 3010
3007 if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) || 3011 if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
3008 (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) || 3012 (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
3009 (srb->cmnd[7] != 0x44)) { 3013 (srb->cmnd[7] != 0x44)) {
3010 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3014 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3011 rtsx_trace(chip); 3015 rtsx_trace(chip);
3012 return TRANSPORT_FAILED; 3016 return TRANSPORT_FAILED;
@@ -3014,17 +3018,20 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3014 3018
3015 dev_info_id = srb->cmnd[3]; 3019 dev_info_id = srb->cmnd[3];
3016 if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) || 3020 if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
3017 (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) || 3021 (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
3018 !CHK_MSPRO(ms_card)) { 3022 !CHK_MSPRO(ms_card)) {
3019 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3023 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3020 rtsx_trace(chip); 3024 rtsx_trace(chip);
3021 return TRANSPORT_FAILED; 3025 return TRANSPORT_FAILED;
3022 } 3026 }
3023 3027
3024 if (dev_info_id == 0x15) 3028 if (dev_info_id == 0x15) {
3025 buf_len = data_len = 0x3A; 3029 buf_len = 0x3A;
3026 else 3030 data_len = 0x3A;
3027 buf_len = data_len = 0x6A; 3031 } else {
3032 buf_len = 0x6A;
3033 data_len = 0x6A;
3034 }
3028 3035
3029 buf = kmalloc(buf_len, GFP_KERNEL); 3036 buf = kmalloc(buf_len, GFP_KERNEL);
3030 if (!buf) { 3037 if (!buf) {
@@ -3100,7 +3107,7 @@ static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3100} 3107}
3101 3108
3102#ifdef SUPPORT_CPRM 3109#ifdef SUPPORT_CPRM
3103static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) 3110static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3104{ 3111{
3105 unsigned int lun = SCSI_LUN(srb); 3112 unsigned int lun = SCSI_LUN(srb);
3106 int result; 3113 int result;
@@ -3164,7 +3171,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3164#ifdef SUPPORT_MAGIC_GATE 3171#ifdef SUPPORT_MAGIC_GATE
3165static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) 3172static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3166{ 3173{
3167 struct ms_info *ms_card = &(chip->ms_card); 3174 struct ms_info *ms_card = &chip->ms_card;
3168 unsigned int lun = SCSI_LUN(srb); 3175 unsigned int lun = SCSI_LUN(srb);
3169 int retval; 3176 int retval;
3170 u8 key_format; 3177 u8 key_format;
@@ -3208,8 +3215,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3208 switch (key_format) { 3215 switch (key_format) {
3209 case KF_GET_LOC_EKB: 3216 case KF_GET_LOC_EKB:
3210 if ((scsi_bufflen(srb) == 0x41C) && 3217 if ((scsi_bufflen(srb) == 0x41C) &&
3211 (srb->cmnd[8] == 0x04) && 3218 (srb->cmnd[8] == 0x04) &&
3212 (srb->cmnd[9] == 0x1C)) { 3219 (srb->cmnd[9] == 0x1C)) {
3213 retval = mg_get_local_EKB(srb, chip); 3220 retval = mg_get_local_EKB(srb, chip);
3214 if (retval != STATUS_SUCCESS) { 3221 if (retval != STATUS_SUCCESS) {
3215 rtsx_trace(chip); 3222 rtsx_trace(chip);
@@ -3218,7 +3225,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3218 3225
3219 } else { 3226 } else {
3220 set_sense_type(chip, lun, 3227 set_sense_type(chip, lun,
3221 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3228 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3222 rtsx_trace(chip); 3229 rtsx_trace(chip);
3223 return TRANSPORT_FAILED; 3230 return TRANSPORT_FAILED;
3224 } 3231 }
@@ -3226,8 +3233,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3226 3233
3227 case KF_RSP_CHG: 3234 case KF_RSP_CHG:
3228 if ((scsi_bufflen(srb) == 0x24) && 3235 if ((scsi_bufflen(srb) == 0x24) &&
3229 (srb->cmnd[8] == 0x00) && 3236 (srb->cmnd[8] == 0x00) &&
3230 (srb->cmnd[9] == 0x24)) { 3237 (srb->cmnd[9] == 0x24)) {
3231 retval = mg_get_rsp_chg(srb, chip); 3238 retval = mg_get_rsp_chg(srb, chip);
3232 if (retval != STATUS_SUCCESS) { 3239 if (retval != STATUS_SUCCESS) {
3233 rtsx_trace(chip); 3240 rtsx_trace(chip);
@@ -3236,7 +3243,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3236 3243
3237 } else { 3244 } else {
3238 set_sense_type(chip, lun, 3245 set_sense_type(chip, lun,
3239 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3246 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3240 rtsx_trace(chip); 3247 rtsx_trace(chip);
3241 return TRANSPORT_FAILED; 3248 return TRANSPORT_FAILED;
3242 } 3249 }
@@ -3245,12 +3252,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3245 case KF_GET_ICV: 3252 case KF_GET_ICV:
3246 ms_card->mg_entry_num = srb->cmnd[5]; 3253 ms_card->mg_entry_num = srb->cmnd[5];
3247 if ((scsi_bufflen(srb) == 0x404) && 3254 if ((scsi_bufflen(srb) == 0x404) &&
3248 (srb->cmnd[8] == 0x04) && 3255 (srb->cmnd[8] == 0x04) &&
3249 (srb->cmnd[9] == 0x04) && 3256 (srb->cmnd[9] == 0x04) &&
3250 (srb->cmnd[2] == 0x00) && 3257 (srb->cmnd[2] == 0x00) &&
3251 (srb->cmnd[3] == 0x00) && 3258 (srb->cmnd[3] == 0x00) &&
3252 (srb->cmnd[4] == 0x00) && 3259 (srb->cmnd[4] == 0x00) &&
3253 (srb->cmnd[5] < 32)) { 3260 (srb->cmnd[5] < 32)) {
3254 retval = mg_get_ICV(srb, chip); 3261 retval = mg_get_ICV(srb, chip);
3255 if (retval != STATUS_SUCCESS) { 3262 if (retval != STATUS_SUCCESS) {
3256 rtsx_trace(chip); 3263 rtsx_trace(chip);
@@ -3259,7 +3266,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3259 3266
3260 } else { 3267 } else {
3261 set_sense_type(chip, lun, 3268 set_sense_type(chip, lun,
3262 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3269 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3263 rtsx_trace(chip); 3270 rtsx_trace(chip);
3264 return TRANSPORT_FAILED; 3271 return TRANSPORT_FAILED;
3265 } 3272 }
@@ -3277,7 +3284,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3277 3284
3278static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) 3285static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3279{ 3286{
3280 struct ms_info *ms_card = &(chip->ms_card); 3287 struct ms_info *ms_card = &chip->ms_card;
3281 unsigned int lun = SCSI_LUN(srb); 3288 unsigned int lun = SCSI_LUN(srb);
3282 int retval; 3289 int retval;
3283 u8 key_format; 3290 u8 key_format;
@@ -3326,8 +3333,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3326 switch (key_format) { 3333 switch (key_format) {
3327 case KF_SET_LEAF_ID: 3334 case KF_SET_LEAF_ID:
3328 if ((scsi_bufflen(srb) == 0x0C) && 3335 if ((scsi_bufflen(srb) == 0x0C) &&
3329 (srb->cmnd[8] == 0x00) && 3336 (srb->cmnd[8] == 0x00) &&
3330 (srb->cmnd[9] == 0x0C)) { 3337 (srb->cmnd[9] == 0x0C)) {
3331 retval = mg_set_leaf_id(srb, chip); 3338 retval = mg_set_leaf_id(srb, chip);
3332 if (retval != STATUS_SUCCESS) { 3339 if (retval != STATUS_SUCCESS) {
3333 rtsx_trace(chip); 3340 rtsx_trace(chip);
@@ -3336,7 +3343,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3336 3343
3337 } else { 3344 } else {
3338 set_sense_type(chip, lun, 3345 set_sense_type(chip, lun,
3339 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3346 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3340 rtsx_trace(chip); 3347 rtsx_trace(chip);
3341 return TRANSPORT_FAILED; 3348 return TRANSPORT_FAILED;
3342 } 3349 }
@@ -3344,8 +3351,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3344 3351
3345 case KF_CHG_HOST: 3352 case KF_CHG_HOST:
3346 if ((scsi_bufflen(srb) == 0x0C) && 3353 if ((scsi_bufflen(srb) == 0x0C) &&
3347 (srb->cmnd[8] == 0x00) && 3354 (srb->cmnd[8] == 0x00) &&
3348 (srb->cmnd[9] == 0x0C)) { 3355 (srb->cmnd[9] == 0x0C)) {
3349 retval = mg_chg(srb, chip); 3356 retval = mg_chg(srb, chip);
3350 if (retval != STATUS_SUCCESS) { 3357 if (retval != STATUS_SUCCESS) {
3351 rtsx_trace(chip); 3358 rtsx_trace(chip);
@@ -3354,7 +3361,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3354 3361
3355 } else { 3362 } else {
3356 set_sense_type(chip, lun, 3363 set_sense_type(chip, lun,
3357 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3364 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3358 rtsx_trace(chip); 3365 rtsx_trace(chip);
3359 return TRANSPORT_FAILED; 3366 return TRANSPORT_FAILED;
3360 } 3367 }
@@ -3362,8 +3369,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3362 3369
3363 case KF_RSP_HOST: 3370 case KF_RSP_HOST:
3364 if ((scsi_bufflen(srb) == 0x0C) && 3371 if ((scsi_bufflen(srb) == 0x0C) &&
3365 (srb->cmnd[8] == 0x00) && 3372 (srb->cmnd[8] == 0x00) &&
3366 (srb->cmnd[9] == 0x0C)) { 3373 (srb->cmnd[9] == 0x0C)) {
3367 retval = mg_rsp(srb, chip); 3374 retval = mg_rsp(srb, chip);
3368 if (retval != STATUS_SUCCESS) { 3375 if (retval != STATUS_SUCCESS) {
3369 rtsx_trace(chip); 3376 rtsx_trace(chip);
@@ -3372,7 +3379,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3372 3379
3373 } else { 3380 } else {
3374 set_sense_type(chip, lun, 3381 set_sense_type(chip, lun,
3375 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3382 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3376 rtsx_trace(chip); 3383 rtsx_trace(chip);
3377 return TRANSPORT_FAILED; 3384 return TRANSPORT_FAILED;
3378 } 3385 }
@@ -3381,12 +3388,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3381 case KF_SET_ICV: 3388 case KF_SET_ICV:
3382 ms_card->mg_entry_num = srb->cmnd[5]; 3389 ms_card->mg_entry_num = srb->cmnd[5];
3383 if ((scsi_bufflen(srb) == 0x404) && 3390 if ((scsi_bufflen(srb) == 0x404) &&
3384 (srb->cmnd[8] == 0x04) && 3391 (srb->cmnd[8] == 0x04) &&
3385 (srb->cmnd[9] == 0x04) && 3392 (srb->cmnd[9] == 0x04) &&
3386 (srb->cmnd[2] == 0x00) && 3393 (srb->cmnd[2] == 0x00) &&
3387 (srb->cmnd[3] == 0x00) && 3394 (srb->cmnd[3] == 0x00) &&
3388 (srb->cmnd[4] == 0x00) && 3395 (srb->cmnd[4] == 0x00) &&
3389 (srb->cmnd[5] < 32)) { 3396 (srb->cmnd[5] < 32)) {
3390 retval = mg_set_ICV(srb, chip); 3397 retval = mg_set_ICV(srb, chip);
3391 if (retval != STATUS_SUCCESS) { 3398 if (retval != STATUS_SUCCESS) {
3392 rtsx_trace(chip); 3399 rtsx_trace(chip);
@@ -3395,7 +3402,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3395 3402
3396 } else { 3403 } else {
3397 set_sense_type(chip, lun, 3404 set_sense_type(chip, lun,
3398 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 3405 SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
3399 rtsx_trace(chip); 3406 rtsx_trace(chip);
3400 return TRANSPORT_FAILED; 3407 return TRANSPORT_FAILED;
3401 } 3408 }
@@ -3415,9 +3422,9 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3415int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip) 3422int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3416{ 3423{
3417#ifdef SUPPORT_SD_LOCK 3424#ifdef SUPPORT_SD_LOCK
3418 struct sd_info *sd_card = &(chip->sd_card); 3425 struct sd_info *sd_card = &chip->sd_card;
3419#endif 3426#endif
3420 struct ms_info *ms_card = &(chip->ms_card); 3427 struct ms_info *ms_card = &chip->ms_card;
3421 unsigned int lun = SCSI_LUN(srb); 3428 unsigned int lun = SCSI_LUN(srb);
3422 int result; 3429 int result;
3423 3430
@@ -3427,9 +3434,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3427 * REQUEST_SENSE and rs_ppstatus 3434 * REQUEST_SENSE and rs_ppstatus
3428 */ 3435 */
3429 if (!((srb->cmnd[0] == VENDOR_CMND) && 3436 if (!((srb->cmnd[0] == VENDOR_CMND) &&
3430 (srb->cmnd[1] == SCSI_APP_CMD) && 3437 (srb->cmnd[1] == SCSI_APP_CMD) &&
3431 (srb->cmnd[2] == GET_DEV_STATUS)) && 3438 (srb->cmnd[2] == GET_DEV_STATUS)) &&
3432 (srb->cmnd[0] != REQUEST_SENSE)) { 3439 (srb->cmnd[0] != REQUEST_SENSE)) {
3433 /* Logical Unit Not Ready Format in Progress */ 3440 /* Logical Unit Not Ready Format in Progress */
3434 set_sense_data(chip, lun, CUR_ERR, 3441 set_sense_data(chip, lun, CUR_ERR,
3435 0x02, 0, 0x04, 0x04, 0, 0); 3442 0x02, 0, 0x04, 0x04, 0, 0);
@@ -3440,12 +3447,12 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3440#endif 3447#endif
3441 3448
3442 if ((get_lun_card(chip, lun) == MS_CARD) && 3449 if ((get_lun_card(chip, lun) == MS_CARD) &&
3443 (ms_card->format_status == FORMAT_IN_PROGRESS)) { 3450 (ms_card->format_status == FORMAT_IN_PROGRESS)) {
3444 if ((srb->cmnd[0] != REQUEST_SENSE) && 3451 if ((srb->cmnd[0] != REQUEST_SENSE) &&
3445 (srb->cmnd[0] != INQUIRY)) { 3452 (srb->cmnd[0] != INQUIRY)) {
3446 /* Logical Unit Not Ready Format in Progress */ 3453 /* Logical Unit Not Ready Format in Progress */
3447 set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 3454 set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
3448 0, (u16)(ms_card->progress)); 3455 0, (u16)(ms_card->progress));
3449 rtsx_trace(chip); 3456 rtsx_trace(chip);
3450 return TRANSPORT_FAILED; 3457 return TRANSPORT_FAILED;
3451 } 3458 }
@@ -3510,7 +3517,7 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
3510 case SD_EXECUTE_WRITE: 3517 case SD_EXECUTE_WRITE:
3511 case SD_GET_RSP: 3518 case SD_GET_RSP:
3512 case SD_HW_RST: 3519 case SD_HW_RST:
3513 result = sd_extention_cmnd(srb, chip); 3520 result = sd_extension_cmnd(srb, chip);
3514 break; 3521 break;
3515#endif 3522#endif
3516 3523
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
index 03dd76d6c859..30f3724848fe 100644
--- a/drivers/staging/rts5208/rtsx_scsi.h
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -136,8 +136,8 @@
136void scsi_show_command(struct rtsx_chip *chip); 136void scsi_show_command(struct rtsx_chip *chip);
137void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type); 137void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
138void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code, 138void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
139 u8 sense_key, u32 info, u8 asc, u8 ascq, 139 u8 sense_key, u32 info, u8 asc, u8 ascq,
140 u8 sns_key_info0, u16 sns_key_info1); 140 u8 sns_key_info0, u16 sns_key_info1);
141int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip); 141int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
142 142
143#endif /* __REALTEK_RTSX_SCSI_H */ 143#endif /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
index f49bed9ec76a..817700c0d794 100644
--- a/drivers/staging/rts5208/rtsx_sys.h
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -32,9 +32,9 @@ static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
32{ 32{
33 struct rtsx_dev *dev = chip->rtsx; 33 struct rtsx_dev *dev = chip->rtsx;
34 34
35 spin_lock(&(dev->reg_lock)); 35 spin_lock(&dev->reg_lock);
36 rtsx_enter_ss(chip); 36 rtsx_enter_ss(chip);
37 spin_unlock(&(dev->reg_lock)); 37 spin_unlock(&dev->reg_lock);
38} 38}
39 39
40static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag) 40static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index 479137398c3d..99740c33f2fb 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -30,18 +30,21 @@
30#define WAIT_TIME 2000 30#define WAIT_TIME 2000
31 31
32unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer, 32unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
33 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, 33 unsigned int buflen,
34 unsigned int *offset, enum xfer_buf_dir dir); 34 struct scsi_cmnd *srb,
35void rtsx_stor_set_xfer_buf(unsigned char *buffer, 35 unsigned int *index,
36 unsigned int buflen, struct scsi_cmnd *srb); 36 unsigned int *offset,
37void rtsx_stor_get_xfer_buf(unsigned char *buffer, 37 enum xfer_buf_dir dir);
38 unsigned int buflen, struct scsi_cmnd *srb); 38void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen,
39 struct scsi_cmnd *srb);
40void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen,
41 struct scsi_cmnd *srb);
39void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip); 42void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
40 43
41#define rtsx_init_cmd(chip) ((chip)->ci = 0) 44#define rtsx_init_cmd(chip) ((chip)->ci = 0)
42 45
43void rtsx_add_cmd(struct rtsx_chip *chip, 46void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask,
44 u8 cmd_type, u16 reg_addr, u8 mask, u8 data); 47 u8 data);
45void rtsx_send_cmd_no_wait(struct rtsx_chip *chip); 48void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
46int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout); 49int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
47 50
@@ -55,11 +58,12 @@ static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
55} 58}
56 59
57int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len, 60int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
58 int use_sg, enum dma_data_direction dma_dir, int timeout); 61 int use_sg, enum dma_data_direction dma_dir,
62 int timeout);
59 63
60int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, 64int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, void *buf,
61 void *buf, size_t len, 65 size_t len, int use_sg, unsigned int *index,
62 int use_sg, unsigned int *index, unsigned int *offset, 66 unsigned int *offset,
63 enum dma_data_direction dma_dir, int timeout); 67 enum dma_data_direction dma_dir, int timeout);
64 68
65#endif /* __REALTEK_RTSX_TRANSPORT_H */ 69#endif /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36f8988..bdd35b611f27 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -56,21 +56,21 @@ static u16 REG_SD_DCMPS1_CTL;
56 56
57static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code) 57static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
58{ 58{
59 struct sd_info *sd_card = &(chip->sd_card); 59 struct sd_info *sd_card = &chip->sd_card;
60 60
61 sd_card->err_code |= err_code; 61 sd_card->err_code |= err_code;
62} 62}
63 63
64static inline void sd_clr_err_code(struct rtsx_chip *chip) 64static inline void sd_clr_err_code(struct rtsx_chip *chip)
65{ 65{
66 struct sd_info *sd_card = &(chip->sd_card); 66 struct sd_info *sd_card = &chip->sd_card;
67 67
68 sd_card->err_code = 0; 68 sd_card->err_code = 0;
69} 69}
70 70
71static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code) 71static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
72{ 72{
73 struct sd_info *sd_card = &(chip->sd_card); 73 struct sd_info *sd_card = &chip->sd_card;
74 74
75 return sd_card->err_code & err_code; 75 return sd_card->err_code & err_code;
76} 76}
@@ -124,9 +124,9 @@ static int sd_check_data0_status(struct rtsx_chip *chip)
124} 124}
125 125
126static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, 126static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
127 u32 arg, u8 rsp_type, u8 *rsp, int rsp_len) 127 u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
128{ 128{
129 struct sd_info *sd_card = &(chip->sd_card); 129 struct sd_info *sd_card = &chip->sd_card;
130 int retval; 130 int retval;
131 int timeout = 100; 131 int timeout = 100;
132 u16 reg_addr; 132 u16 reg_addr;
@@ -153,11 +153,12 @@ RTY_SEND_CMD:
153 153
154 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); 154 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
155 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 155 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
156 0x01, PINGPONG_BUFFER); 156 0x01, PINGPONG_BUFFER);
157 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 157 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
158 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); 158 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
159 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, 159 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
160 SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE); 160 SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END |
161 SD_STAT_IDLE);
161 162
162 if (rsp_type == SD_RSP_TYPE_R2) { 163 if (rsp_type == SD_RSP_TYPE_R2) {
163 for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; 164 for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -238,7 +239,7 @@ RTY_SEND_CMD:
238 239
239 if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) { 240 if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
240 if ((cmd_idx != SEND_RELATIVE_ADDR) && 241 if ((cmd_idx != SEND_RELATIVE_ADDR) &&
241 (cmd_idx != SEND_IF_COND)) { 242 (cmd_idx != SEND_IF_COND)) {
242 if (cmd_idx != STOP_TRANSMISSION) { 243 if (cmd_idx != STOP_TRANSMISSION) {
243 if (ptr[1] & 0x80) { 244 if (ptr[1] & 0x80) {
244 rtsx_trace(chip); 245 rtsx_trace(chip);
@@ -285,7 +286,7 @@ static int sd_read_data(struct rtsx_chip *chip,
285 u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len, 286 u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
286 int timeout) 287 int timeout)
287{ 288{
288 struct sd_info *sd_card = &(chip->sd_card); 289 struct sd_info *sd_card = &chip->sd_card;
289 int retval; 290 int retval;
290 int i; 291 int i;
291 292
@@ -308,27 +309,27 @@ static int sd_read_data(struct rtsx_chip *chip,
308 0xFF, cmd[i]); 309 0xFF, cmd[i]);
309 } 310 }
310 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 311 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
311 (u8)byte_cnt); 312 (u8)byte_cnt);
312 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 313 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
313 (u8)(byte_cnt >> 8)); 314 (u8)(byte_cnt >> 8));
314 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 315 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
315 (u8)blk_cnt); 316 (u8)blk_cnt);
316 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 317 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
317 (u8)(blk_cnt >> 8)); 318 (u8)(blk_cnt >> 8));
318 319
319 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); 320 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
320 321
321 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 322 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
322 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| 323 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
323 SD_CHECK_CRC7 | SD_RSP_LEN_6); 324 SD_CHECK_CRC7 | SD_RSP_LEN_6);
324 if (trans_mode != SD_TM_AUTO_TUNING) 325 if (trans_mode != SD_TM_AUTO_TUNING)
325 rtsx_add_cmd(chip, WRITE_REG_CMD, 326 rtsx_add_cmd(chip, WRITE_REG_CMD,
326 CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); 327 CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
327 328
328 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 329 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
329 trans_mode | SD_TRANSFER_START); 330 trans_mode | SD_TRANSFER_START);
330 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, 331 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
331 SD_TRANSFER_END); 332 SD_TRANSFER_END);
332 333
333 retval = rtsx_send_cmd(chip, SD_CARD, timeout); 334 retval = rtsx_send_cmd(chip, SD_CARD, timeout);
334 if (retval < 0) { 335 if (retval < 0) {
@@ -353,10 +354,10 @@ static int sd_read_data(struct rtsx_chip *chip,
353} 354}
354 355
355static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, 356static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
356 u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width, 357 u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
357 u8 *buf, int buf_len, int timeout) 358 u8 bus_width, u8 *buf, int buf_len, int timeout)
358{ 359{
359 struct sd_info *sd_card = &(chip->sd_card); 360 struct sd_info *sd_card = &chip->sd_card;
360 int retval; 361 int retval;
361 int i; 362 int i;
362 363
@@ -389,30 +390,30 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
389 } 390 }
390 } 391 }
391 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 392 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
392 (u8)byte_cnt); 393 (u8)byte_cnt);
393 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 394 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
394 (u8)(byte_cnt >> 8)); 395 (u8)(byte_cnt >> 8));
395 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 396 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
396 (u8)blk_cnt); 397 (u8)blk_cnt);
397 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 398 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
398 (u8)(blk_cnt >> 8)); 399 (u8)(blk_cnt >> 8));
399 400
400 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); 401 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
401 402
402 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 403 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
403 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | 404 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
404 SD_CHECK_CRC7 | SD_RSP_LEN_6); 405 SD_CHECK_CRC7 | SD_RSP_LEN_6);
405 406
406 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 407 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
407 trans_mode | SD_TRANSFER_START); 408 trans_mode | SD_TRANSFER_START);
408 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, 409 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
409 SD_TRANSFER_END); 410 SD_TRANSFER_END);
410 411
411 retval = rtsx_send_cmd(chip, SD_CARD, timeout); 412 retval = rtsx_send_cmd(chip, SD_CARD, timeout);
412 if (retval < 0) { 413 if (retval < 0) {
413 if (retval == -ETIMEDOUT) { 414 if (retval == -ETIMEDOUT) {
414 sd_send_cmd_get_rsp(chip, SEND_STATUS, 415 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
415 sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); 416 SD_RSP_TYPE_R1, NULL, 0);
416 } 417 }
417 418
418 rtsx_trace(chip); 419 rtsx_trace(chip);
@@ -424,7 +425,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
424 425
425static int sd_check_csd(struct rtsx_chip *chip, char check_wp) 426static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
426{ 427{
427 struct sd_info *sd_card = &(chip->sd_card); 428 struct sd_info *sd_card = &chip->sd_card;
428 int retval; 429 int retval;
429 int i; 430 int i;
430 u8 csd_ver, trans_speed; 431 u8 csd_ver, trans_speed;
@@ -438,7 +439,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
438 } 439 }
439 440
440 retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr, 441 retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
441 SD_RSP_TYPE_R2, rsp, 16); 442 SD_RSP_TYPE_R2, rsp, 16);
442 if (retval == STATUS_SUCCESS) 443 if (retval == STATUS_SUCCESS)
443 break; 444 break;
444 } 445 }
@@ -534,7 +535,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
534static int sd_set_sample_push_timing(struct rtsx_chip *chip) 535static int sd_set_sample_push_timing(struct rtsx_chip *chip)
535{ 536{
536 int retval; 537 int retval;
537 struct sd_info *sd_card = &(chip->sd_card); 538 struct sd_info *sd_card = &chip->sd_card;
538 u8 val = 0; 539 u8 val = 0;
539 540
540 if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY) 541 if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
@@ -573,7 +574,7 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip)
573 574
574static void sd_choose_proper_clock(struct rtsx_chip *chip) 575static void sd_choose_proper_clock(struct rtsx_chip *chip)
575{ 576{
576 struct sd_info *sd_card = &(chip->sd_card); 577 struct sd_info *sd_card = &chip->sd_card;
577 578
578 if (CHK_SD_SDR104(sd_card)) { 579 if (CHK_SD_SDR104(sd_card)) {
579 if (chip->asic_code) 580 if (chip->asic_code)
@@ -637,7 +638,7 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
637 638
638static int sd_set_init_para(struct rtsx_chip *chip) 639static int sd_set_init_para(struct rtsx_chip *chip)
639{ 640{
640 struct sd_info *sd_card = &(chip->sd_card); 641 struct sd_info *sd_card = &chip->sd_card;
641 int retval; 642 int retval;
642 643
643 retval = sd_set_sample_push_timing(chip); 644 retval = sd_set_sample_push_timing(chip);
@@ -659,7 +660,7 @@ static int sd_set_init_para(struct rtsx_chip *chip)
659 660
660int sd_select_card(struct rtsx_chip *chip, int select) 661int sd_select_card(struct rtsx_chip *chip, int select)
661{ 662{
662 struct sd_info *sd_card = &(chip->sd_card); 663 struct sd_info *sd_card = &chip->sd_card;
663 int retval; 664 int retval;
664 u8 cmd_idx, cmd_type; 665 u8 cmd_idx, cmd_type;
665 u32 addr; 666 u32 addr;
@@ -686,12 +687,12 @@ int sd_select_card(struct rtsx_chip *chip, int select)
686#ifdef SUPPORT_SD_LOCK 687#ifdef SUPPORT_SD_LOCK
687static int sd_update_lock_status(struct rtsx_chip *chip) 688static int sd_update_lock_status(struct rtsx_chip *chip)
688{ 689{
689 struct sd_info *sd_card = &(chip->sd_card); 690 struct sd_info *sd_card = &chip->sd_card;
690 int retval; 691 int retval;
691 u8 rsp[5]; 692 u8 rsp[5];
692 693
693 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 694 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
694 SD_RSP_TYPE_R1, rsp, 5); 695 SD_RSP_TYPE_R1, rsp, 5);
695 if (retval != STATUS_SUCCESS) { 696 if (retval != STATUS_SUCCESS) {
696 rtsx_trace(chip); 697 rtsx_trace(chip);
697 return STATUS_FAIL; 698 return STATUS_FAIL;
@@ -715,23 +716,23 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
715#endif 716#endif
716 717
717static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state, 718static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
718 u8 data_ready, int polling_cnt) 719 u8 data_ready, int polling_cnt)
719{ 720{
720 struct sd_info *sd_card = &(chip->sd_card); 721 struct sd_info *sd_card = &chip->sd_card;
721 int retval, i; 722 int retval, i;
722 u8 rsp[5]; 723 u8 rsp[5];
723 724
724 for (i = 0; i < polling_cnt; i++) { 725 for (i = 0; i < polling_cnt; i++) {
725 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, 726 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
726 sd_card->sd_addr, SD_RSP_TYPE_R1, rsp, 727 sd_card->sd_addr, SD_RSP_TYPE_R1,
727 5); 728 rsp, 5);
728 if (retval != STATUS_SUCCESS) { 729 if (retval != STATUS_SUCCESS) {
729 rtsx_trace(chip); 730 rtsx_trace(chip);
730 return STATUS_FAIL; 731 return STATUS_FAIL;
731 } 732 }
732 733
733 if (((rsp[3] & 0x1E) == state) && 734 if (((rsp[3] & 0x1E) == state) &&
734 ((rsp[3] & 0x01) == data_ready)) 735 ((rsp[3] & 0x01) == data_ready))
735 return STATUS_SUCCESS; 736 return STATUS_SUCCESS;
736 } 737 }
737 738
@@ -746,8 +747,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
746 if (voltage == SD_IO_3V3) { 747 if (voltage == SD_IO_3V3) {
747 if (chip->asic_code) { 748 if (chip->asic_code) {
748 retval = rtsx_write_phy_register(chip, 0x08, 749 retval = rtsx_write_phy_register(chip, 0x08,
749 0x4FC0 | 750 0x4FC0 |
750 chip->phy_voltage); 751 chip->phy_voltage);
751 if (retval != STATUS_SUCCESS) { 752 if (retval != STATUS_SUCCESS) {
752 rtsx_trace(chip); 753 rtsx_trace(chip);
753 return STATUS_FAIL; 754 return STATUS_FAIL;
@@ -763,8 +764,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
763 } else if (voltage == SD_IO_1V8) { 764 } else if (voltage == SD_IO_1V8) {
764 if (chip->asic_code) { 765 if (chip->asic_code) {
765 retval = rtsx_write_phy_register(chip, 0x08, 766 retval = rtsx_write_phy_register(chip, 0x08,
766 0x4C40 | 767 0x4C40 |
767 chip->phy_voltage); 768 chip->phy_voltage);
768 if (retval != STATUS_SUCCESS) { 769 if (retval != STATUS_SUCCESS) {
769 rtsx_trace(chip); 770 rtsx_trace(chip);
770 return STATUS_FAIL; 771 return STATUS_FAIL;
@@ -800,7 +801,7 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
800 } 801 }
801 802
802 retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1, 803 retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
803 NULL, 0); 804 NULL, 0);
804 if (retval != STATUS_SUCCESS) { 805 if (retval != STATUS_SUCCESS) {
805 rtsx_trace(chip); 806 rtsx_trace(chip);
806 return STATUS_FAIL; 807 return STATUS_FAIL;
@@ -851,8 +852,8 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
851 (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | 852 (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
852 SD_DAT1_STATUS | SD_DAT0_STATUS)) { 853 SD_DAT1_STATUS | SD_DAT0_STATUS)) {
853 dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat); 854 dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat);
854 rtsx_write_register(chip, SD_BUS_STAT, 855 rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
855 SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); 856 SD_CLK_FORCE_STOP, 0);
856 rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0); 857 rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
857 rtsx_trace(chip); 858 rtsx_trace(chip);
858 return STATUS_FAIL; 859 return STATUS_FAIL;
@@ -903,7 +904,7 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
903 904
904static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) 905static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
905{ 906{
906 struct sd_info *sd_card = &(chip->sd_card); 907 struct sd_info *sd_card = &chip->sd_card;
907 u16 SD_VP_CTL, SD_DCMPS_CTL; 908 u16 SD_VP_CTL, SD_DCMPS_CTL;
908 u8 val; 909 u8 val;
909 int retval; 910 int retval;
@@ -968,7 +969,9 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
968 } 969 }
969 udelay(50); 970 udelay(50);
970 retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF, 971 retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
971 PHASE_CHANGE | PHASE_NOT_RESET | sample_point); 972 PHASE_CHANGE |
973 PHASE_NOT_RESET |
974 sample_point);
972 if (retval) { 975 if (retval) {
973 rtsx_trace(chip); 976 rtsx_trace(chip);
974 return retval; 977 return retval;
@@ -982,7 +985,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
982 } 985 }
983 udelay(50); 986 udelay(50);
984 retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF, 987 retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
985 PHASE_NOT_RESET | sample_point); 988 PHASE_NOT_RESET |
989 sample_point);
986 if (retval) { 990 if (retval) {
987 rtsx_trace(chip); 991 rtsx_trace(chip);
988 return retval; 992 return retval;
@@ -992,24 +996,24 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
992 996
993 rtsx_init_cmd(chip); 997 rtsx_init_cmd(chip);
994 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE, 998 rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
995 DCMPS_CHANGE); 999 DCMPS_CHANGE);
996 rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL, 1000 rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
997 DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE); 1001 DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
998 retval = rtsx_send_cmd(chip, SD_CARD, 100); 1002 retval = rtsx_send_cmd(chip, SD_CARD, 100);
999 if (retval != STATUS_SUCCESS) { 1003 if (retval != STATUS_SUCCESS) {
1000 rtsx_trace(chip); 1004 rtsx_trace(chip);
1001 goto Fail; 1005 goto fail;
1002 } 1006 }
1003 1007
1004 val = *rtsx_get_cmd_data(chip); 1008 val = *rtsx_get_cmd_data(chip);
1005 if (val & DCMPS_ERROR) { 1009 if (val & DCMPS_ERROR) {
1006 rtsx_trace(chip); 1010 rtsx_trace(chip);
1007 goto Fail; 1011 goto fail;
1008 } 1012 }
1009 1013
1010 if ((val & DCMPS_CURRENT_PHASE) != sample_point) { 1014 if ((val & DCMPS_CURRENT_PHASE) != sample_point) {
1011 rtsx_trace(chip); 1015 rtsx_trace(chip);
1012 goto Fail; 1016 goto fail;
1013 } 1017 }
1014 1018
1015 retval = rtsx_write_register(chip, SD_DCMPS_CTL, 1019 retval = rtsx_write_register(chip, SD_DCMPS_CTL,
@@ -1045,7 +1049,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
1045 1049
1046 return STATUS_SUCCESS; 1050 return STATUS_SUCCESS;
1047 1051
1048Fail: 1052fail:
1049 rtsx_read_register(chip, SD_VP_CTL, &val); 1053 rtsx_read_register(chip, SD_VP_CTL, &val);
1050 dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val); 1054 dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
1051 rtsx_read_register(chip, SD_DCMPS_CTL, &val); 1055 rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@@ -1060,12 +1064,12 @@ Fail:
1060 1064
1061static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width) 1065static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
1062{ 1066{
1063 struct sd_info *sd_card = &(chip->sd_card); 1067 struct sd_info *sd_card = &chip->sd_card;
1064 int retval; 1068 int retval;
1065 u8 cmd[5], buf[8]; 1069 u8 cmd[5], buf[8];
1066 1070
1067 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, 1071 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
1068 SD_RSP_TYPE_R1, NULL, 0); 1072 SD_RSP_TYPE_R1, NULL, 0);
1069 if (retval != STATUS_SUCCESS) { 1073 if (retval != STATUS_SUCCESS) {
1070 rtsx_trace(chip); 1074 rtsx_trace(chip);
1071 return STATUS_FAIL; 1075 return STATUS_FAIL;
@@ -1078,7 +1082,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
1078 cmd[4] = 0; 1082 cmd[4] = 0;
1079 1083
1080 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width, 1084 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
1081 buf, 8, 250); 1085 buf, 8, 250);
1082 if (retval != STATUS_SUCCESS) { 1086 if (retval != STATUS_SUCCESS) {
1083 rtsx_clear_sd_error(chip); 1087 rtsx_clear_sd_error(chip);
1084 rtsx_trace(chip); 1088 rtsx_trace(chip);
@@ -1096,7 +1100,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
1096} 1100}
1097 1101
1098static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, 1102static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
1099 u8 func_to_switch, u8 *buf, int buf_len) 1103 u8 func_to_switch, u8 *buf, int buf_len)
1100{ 1104{
1101 u8 support_mask = 0, query_switch = 0, switch_busy = 0; 1105 u8 support_mask = 0, query_switch = 0, switch_busy = 0;
1102 int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0; 1106 int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
@@ -1198,7 +1202,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
1198 1202
1199 if (func_group == SD_FUNC_GROUP_1) { 1203 if (func_group == SD_FUNC_GROUP_1) {
1200 if (!(buf[support_offset] & support_mask) || 1204 if (!(buf[support_offset] & support_mask) ||
1201 ((buf[query_switch_offset] & 0x0F) != query_switch)) { 1205 ((buf[query_switch_offset] & 0x0F) != query_switch)) {
1202 rtsx_trace(chip); 1206 rtsx_trace(chip);
1203 return STATUS_FAIL; 1207 return STATUS_FAIL;
1204 } 1208 }
@@ -1206,7 +1210,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
1206 1210
1207 /* Check 'Busy Status' */ 1211 /* Check 'Busy Status' */
1208 if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) && 1212 if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
1209 ((buf[check_busy_offset] & switch_busy) == switch_busy)) { 1213 ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
1210 rtsx_trace(chip); 1214 rtsx_trace(chip);
1211 return STATUS_FAIL; 1215 return STATUS_FAIL;
1212 } 1216 }
@@ -1214,10 +1218,10 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
1214 return STATUS_SUCCESS; 1218 return STATUS_SUCCESS;
1215} 1219}
1216 1220
1217static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, 1221static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
1218 u8 func_group, u8 func_to_switch, u8 bus_width) 1222 u8 func_to_switch, u8 bus_width)
1219{ 1223{
1220 struct sd_info *sd_card = &(chip->sd_card); 1224 struct sd_info *sd_card = &chip->sd_card;
1221 int retval; 1225 int retval;
1222 u8 cmd[5], buf[64]; 1226 u8 cmd[5], buf[64];
1223 1227
@@ -1247,7 +1251,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
1247 } 1251 }
1248 1252
1249 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width, 1253 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
1250 buf, 64, 250); 1254 buf, 64, 250);
1251 if (retval != STATUS_SUCCESS) { 1255 if (retval != STATUS_SUCCESS) {
1252 rtsx_clear_sd_error(chip); 1256 rtsx_clear_sd_error(chip);
1253 rtsx_trace(chip); 1257 rtsx_trace(chip);
@@ -1326,7 +1330,7 @@ static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
1326} 1330}
1327 1331
1328static int sd_check_switch(struct rtsx_chip *chip, 1332static int sd_check_switch(struct rtsx_chip *chip,
1329 u8 func_group, u8 func_to_switch, u8 bus_width) 1333 u8 func_group, u8 func_to_switch, u8 bus_width)
1330{ 1334{
1331 int retval; 1335 int retval;
1332 int i; 1336 int i;
@@ -1340,12 +1344,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
1340 } 1344 }
1341 1345
1342 retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group, 1346 retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
1343 func_to_switch, bus_width); 1347 func_to_switch, bus_width);
1344 if (retval == STATUS_SUCCESS) { 1348 if (retval == STATUS_SUCCESS) {
1345 u8 stat; 1349 u8 stat;
1346 1350
1347 retval = sd_check_switch_mode(chip, SD_SWITCH_MODE, 1351 retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
1348 func_group, func_to_switch, bus_width); 1352 func_group,
1353 func_to_switch,
1354 bus_width);
1349 if (retval == STATUS_SUCCESS) { 1355 if (retval == STATUS_SUCCESS) {
1350 switch_good = true; 1356 switch_good = true;
1351 break; 1357 break;
@@ -1364,7 +1370,7 @@ static int sd_check_switch(struct rtsx_chip *chip,
1364 } 1370 }
1365 1371
1366 func_to_switch = downgrade_switch_mode(func_group, 1372 func_to_switch = downgrade_switch_mode(func_group,
1367 func_to_switch); 1373 func_to_switch);
1368 1374
1369 wait_timeout(20); 1375 wait_timeout(20);
1370 } 1376 }
@@ -1379,14 +1385,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
1379 1385
1380static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) 1386static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1381{ 1387{
1382 struct sd_info *sd_card = &(chip->sd_card); 1388 struct sd_info *sd_card = &chip->sd_card;
1383 int retval; 1389 int retval;
1384 int i; 1390 int i;
1385 u8 func_to_switch = 0; 1391 u8 func_to_switch = 0;
1386 1392
1387 /* Get supported functions */ 1393 /* Get supported functions */
1388 retval = sd_check_switch_mode(chip, SD_CHECK_MODE, 1394 retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
1389 NO_ARGUMENT, NO_ARGUMENT, bus_width); 1395 NO_ARGUMENT, bus_width);
1390 if (retval != STATUS_SUCCESS) { 1396 if (retval != STATUS_SUCCESS) {
1391 rtsx_trace(chip); 1397 rtsx_trace(chip);
1392 return STATUS_FAIL; 1398 return STATUS_FAIL;
@@ -1396,24 +1402,24 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1396 1402
1397 /* Function Group 1: Access Mode */ 1403 /* Function Group 1: Access Mode */
1398 for (i = 0; i < 4; i++) { 1404 for (i = 0; i < 4; i++) {
1399 switch ((u8)(chip->sd_speed_prior >> (i*8))) { 1405 switch ((u8)(chip->sd_speed_prior >> (i * 8))) {
1400 case SDR104_SUPPORT: 1406 case SDR104_SUPPORT:
1401 if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) 1407 if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) &&
1402 && chip->sdr104_en) { 1408 chip->sdr104_en) {
1403 func_to_switch = SDR104_SUPPORT; 1409 func_to_switch = SDR104_SUPPORT;
1404 } 1410 }
1405 break; 1411 break;
1406 1412
1407 case DDR50_SUPPORT: 1413 case DDR50_SUPPORT:
1408 if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) 1414 if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) &&
1409 && chip->ddr50_en) { 1415 chip->ddr50_en) {
1410 func_to_switch = DDR50_SUPPORT; 1416 func_to_switch = DDR50_SUPPORT;
1411 } 1417 }
1412 break; 1418 break;
1413 1419
1414 case SDR50_SUPPORT: 1420 case SDR50_SUPPORT:
1415 if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) 1421 if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) &&
1416 && chip->sdr50_en) { 1422 chip->sdr50_en) {
1417 func_to_switch = SDR50_SUPPORT; 1423 func_to_switch = SDR50_SUPPORT;
1418 } 1424 }
1419 break; 1425 break;
@@ -1430,7 +1436,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1430 1436
1431 if (func_to_switch) 1437 if (func_to_switch)
1432 break; 1438 break;
1433
1434 } 1439 }
1435 dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x", 1440 dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
1436 func_to_switch); 1441 func_to_switch);
@@ -1446,7 +1451,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1446 1451
1447 if (func_to_switch) { 1452 if (func_to_switch) {
1448 retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch, 1453 retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
1449 bus_width); 1454 bus_width);
1450 if (retval != STATUS_SUCCESS) { 1455 if (retval != STATUS_SUCCESS) {
1451 if (func_to_switch == SDR104_SUPPORT) { 1456 if (func_to_switch == SDR104_SUPPORT) {
1452 sd_card->sd_switch_fail = SDR104_SUPPORT_MASK; 1457 sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
@@ -1496,7 +1501,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1496 func_to_switch = 0xFF; 1501 func_to_switch = 0xFF;
1497 1502
1498 for (i = 0; i < 4; i++) { 1503 for (i = 0; i < 4; i++) {
1499 switch ((u8)(chip->sd_current_prior >> (i*8))) { 1504 switch ((u8)(chip->sd_current_prior >> (i * 8))) {
1500 case CURRENT_LIMIT_800: 1505 case CURRENT_LIMIT_800:
1501 if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK) 1506 if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
1502 func_to_switch = CURRENT_LIMIT_800; 1507 func_to_switch = CURRENT_LIMIT_800;
@@ -1534,7 +1539,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
1534 1539
1535 if (func_to_switch <= CURRENT_LIMIT_800) { 1540 if (func_to_switch <= CURRENT_LIMIT_800) {
1536 retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch, 1541 retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
1537 bus_width); 1542 bus_width);
1538 if (retval != STATUS_SUCCESS) { 1543 if (retval != STATUS_SUCCESS) {
1539 if (sd_check_err_code(chip, SD_NO_CARD)) { 1544 if (sd_check_err_code(chip, SD_NO_CARD)) {
1540 rtsx_trace(chip); 1545 rtsx_trace(chip);
@@ -1596,8 +1601,8 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1596 cmd[3] = 0; 1601 cmd[3] = 0;
1597 cmd[4] = 0; 1602 cmd[4] = 0;
1598 1603
1599 retval = sd_read_data(chip, SD_TM_AUTO_TUNING, 1604 retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1,
1600 cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100); 1605 SD_BUS_WIDTH_4, NULL, 0, 100);
1601 if (retval != STATUS_SUCCESS) { 1606 if (retval != STATUS_SUCCESS) {
1602 (void)sd_wait_data_idle(chip); 1607 (void)sd_wait_data_idle(chip);
1603 1608
@@ -1611,7 +1616,7 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1611 1616
1612static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) 1617static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1613{ 1618{
1614 struct sd_info *sd_card = &(chip->sd_card); 1619 struct sd_info *sd_card = &chip->sd_card;
1615 int retval; 1620 int retval;
1616 u8 cmd[5]; 1621 u8 cmd[5];
1617 1622
@@ -1624,7 +1629,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1624 dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n"); 1629 dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n");
1625 1630
1626 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, 1631 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
1627 SD_RSP_TYPE_R1, NULL, 0); 1632 SD_RSP_TYPE_R1, NULL, 0);
1628 if (retval != STATUS_SUCCESS) { 1633 if (retval != STATUS_SUCCESS) {
1629 rtsx_trace(chip); 1634 rtsx_trace(chip);
1630 return STATUS_FAIL; 1635 return STATUS_FAIL;
@@ -1636,8 +1641,8 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1636 cmd[3] = 0; 1641 cmd[3] = 0;
1637 cmd[4] = 0; 1642 cmd[4] = 0;
1638 1643
1639 retval = sd_read_data(chip, SD_TM_NORMAL_READ, 1644 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
1640 cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100); 1645 SD_BUS_WIDTH_4, NULL, 0, 100);
1641 if (retval != STATUS_SUCCESS) { 1646 if (retval != STATUS_SUCCESS) {
1642 (void)sd_wait_data_idle(chip); 1647 (void)sd_wait_data_idle(chip);
1643 1648
@@ -1651,7 +1656,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1651 1656
1652static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) 1657static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1653{ 1658{
1654 struct sd_info *sd_card = &(chip->sd_card); 1659 struct sd_info *sd_card = &chip->sd_card;
1655 int retval; 1660 int retval;
1656 u8 cmd[5], bus_width; 1661 u8 cmd[5], bus_width;
1657 1662
@@ -1676,8 +1681,8 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1676 cmd[3] = 0; 1681 cmd[3] = 0;
1677 cmd[4] = 0; 1682 cmd[4] = 0;
1678 1683
1679 retval = sd_read_data(chip, SD_TM_NORMAL_READ, 1684 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1,
1680 cmd, 5, 0x200, 1, bus_width, NULL, 0, 100); 1685 bus_width, NULL, 0, 100);
1681 if (retval != STATUS_SUCCESS) { 1686 if (retval != STATUS_SUCCESS) {
1682 (void)sd_wait_data_idle(chip); 1687 (void)sd_wait_data_idle(chip);
1683 1688
@@ -1691,7 +1696,7 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
1691 1696
1692static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) 1697static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1693{ 1698{
1694 struct sd_info *sd_card = &(chip->sd_card); 1699 struct sd_info *sd_card = &chip->sd_card;
1695 int retval; 1700 int retval;
1696 1701
1697 retval = sd_change_phase(chip, sample_point, TUNE_TX); 1702 retval = sd_change_phase(chip, sample_point, TUNE_TX);
@@ -1708,11 +1713,11 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1708 } 1713 }
1709 1714
1710 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 1715 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
1711 SD_RSP_TYPE_R1, NULL, 0); 1716 SD_RSP_TYPE_R1, NULL, 0);
1712 if (retval != STATUS_SUCCESS) { 1717 if (retval != STATUS_SUCCESS) {
1713 if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) { 1718 if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
1714 rtsx_write_register(chip, SD_CFG3, 1719 rtsx_write_register(chip, SD_CFG3,
1715 SD_RSP_80CLK_TIMEOUT_EN, 0); 1720 SD_RSP_80CLK_TIMEOUT_EN, 0);
1716 rtsx_trace(chip); 1721 rtsx_trace(chip);
1717 return STATUS_FAIL; 1722 return STATUS_FAIL;
1718 } 1723 }
@@ -1730,7 +1735,7 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1730 1735
1731static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) 1736static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1732{ 1737{
1733 struct sd_info *sd_card = &(chip->sd_card); 1738 struct sd_info *sd_card = &chip->sd_card;
1734 int retval; 1739 int retval;
1735 u8 cmd[5], bus_width; 1740 u8 cmd[5], bus_width;
1736 1741
@@ -1770,8 +1775,8 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1770 cmd[3] = 0; 1775 cmd[3] = 0;
1771 cmd[4] = 0; 1776 cmd[4] = 0;
1772 1777
1773 retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, 1778 retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1,
1774 cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100); 1779 bus_width, sd_card->raw_csd, 16, 100);
1775 if (retval != STATUS_SUCCESS) { 1780 if (retval != STATUS_SUCCESS) {
1776 rtsx_clear_sd_error(chip); 1781 rtsx_clear_sd_error(chip);
1777 rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); 1782 rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
@@ -1787,7 +1792,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1787 } 1792 }
1788 1793
1789 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, 1794 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
1790 NULL, 0); 1795 NULL, 0);
1791 1796
1792 return STATUS_SUCCESS; 1797 return STATUS_SUCCESS;
1793} 1798}
@@ -1795,7 +1800,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
1795static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, 1800static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
1796 u8 tune_dir) 1801 u8 tune_dir)
1797{ 1802{
1798 struct sd_info *sd_card = &(chip->sd_card); 1803 struct sd_info *sd_card = &chip->sd_card;
1799 struct timing_phase_path path[MAX_PHASE + 1]; 1804 struct timing_phase_path path[MAX_PHASE + 1];
1800 int i, j, cont_path_cnt; 1805 int i, j, cont_path_cnt;
1801 bool new_block; 1806 bool new_block;
@@ -1808,7 +1813,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
1808 else 1813 else
1809 final_phase = (u8)chip->sd_default_tx_phase; 1814 final_phase = (u8)chip->sd_default_tx_phase;
1810 1815
1811 goto Search_Finish; 1816 goto search_finish;
1812 } 1817 }
1813 1818
1814 cont_path_cnt = 0; 1819 cont_path_cnt = 0;
@@ -1839,7 +1844,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
1839 1844
1840 if (cont_path_cnt == 0) { 1845 if (cont_path_cnt == 0) {
1841 dev_dbg(rtsx_dev(chip), "No continuous phase path\n"); 1846 dev_dbg(rtsx_dev(chip), "No continuous phase path\n");
1842 goto Search_Finish; 1847 goto search_finish;
1843 } else { 1848 } else {
1844 int idx = cont_path_cnt - 1; 1849 int idx = cont_path_cnt - 1;
1845 1850
@@ -1848,7 +1853,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
1848 } 1853 }
1849 1854
1850 if ((path[0].start == 0) && 1855 if ((path[0].start == 0) &&
1851 (path[cont_path_cnt - 1].end == MAX_PHASE)) { 1856 (path[cont_path_cnt - 1].end == MAX_PHASE)) {
1852 path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1; 1857 path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
1853 path[0].len += path[cont_path_cnt - 1].len; 1858 path[0].len += path[cont_path_cnt - 1].len;
1854 path[0].mid = path[0].start + path[0].len / 2; 1859 path[0].mid = path[0].start + path[0].len / 2;
@@ -1906,14 +1911,14 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
1906 } 1911 }
1907 } 1912 }
1908 1913
1909Search_Finish: 1914search_finish:
1910 dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase); 1915 dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase);
1911 return final_phase; 1916 return final_phase;
1912} 1917}
1913 1918
1914static int sd_tuning_rx(struct rtsx_chip *chip) 1919static int sd_tuning_rx(struct rtsx_chip *chip)
1915{ 1920{
1916 struct sd_info *sd_card = &(chip->sd_card); 1921 struct sd_info *sd_card = &chip->sd_card;
1917 int retval; 1922 int retval;
1918 int i, j; 1923 int i, j;
1919 u32 raw_phase_map[3], phase_map; 1924 u32 raw_phase_map[3], phase_map;
@@ -1974,7 +1979,7 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
1974 1979
1975static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) 1980static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
1976{ 1981{
1977 struct sd_info *sd_card = &(chip->sd_card); 1982 struct sd_info *sd_card = &chip->sd_card;
1978 int retval; 1983 int retval;
1979 int i; 1984 int i;
1980 u32 phase_map; 1985 u32 phase_map;
@@ -1992,7 +1997,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
1992 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { 1997 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
1993 sd_set_err_code(chip, SD_NO_CARD); 1998 sd_set_err_code(chip, SD_NO_CARD);
1994 rtsx_write_register(chip, SD_CFG3, 1999 rtsx_write_register(chip, SD_CFG3,
1995 SD_RSP_80CLK_TIMEOUT_EN, 0); 2000 SD_RSP_80CLK_TIMEOUT_EN, 0);
1996 rtsx_trace(chip); 2001 rtsx_trace(chip);
1997 return STATUS_FAIL; 2002 return STATUS_FAIL;
1998 } 2003 }
@@ -2002,10 +2007,10 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
2002 continue; 2007 continue;
2003 2008
2004 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, 2009 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
2005 sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 2010 sd_card->sd_addr, SD_RSP_TYPE_R1,
2006 0); 2011 NULL, 0);
2007 if ((retval == STATUS_SUCCESS) || 2012 if ((retval == STATUS_SUCCESS) ||
2008 !sd_check_err_code(chip, SD_RSP_TIMEOUT)) 2013 !sd_check_err_code(chip, SD_RSP_TIMEOUT))
2009 phase_map |= 1 << i; 2014 phase_map |= 1 << i;
2010 } 2015 }
2011 2016
@@ -2039,7 +2044,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
2039 2044
2040static int sd_tuning_tx(struct rtsx_chip *chip) 2045static int sd_tuning_tx(struct rtsx_chip *chip)
2041{ 2046{
2042 struct sd_info *sd_card = &(chip->sd_card); 2047 struct sd_info *sd_card = &chip->sd_card;
2043 int retval; 2048 int retval;
2044 int i, j; 2049 int i, j;
2045 u32 raw_phase_map[3], phase_map; 2050 u32 raw_phase_map[3], phase_map;
@@ -2131,7 +2136,7 @@ static int sd_ddr_tuning(struct rtsx_chip *chip)
2131 } 2136 }
2132 } else { 2137 } else {
2133 retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase, 2138 retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
2134 TUNE_TX); 2139 TUNE_TX);
2135 if (retval != STATUS_SUCCESS) { 2140 if (retval != STATUS_SUCCESS) {
2136 rtsx_trace(chip); 2141 rtsx_trace(chip);
2137 return STATUS_FAIL; 2142 return STATUS_FAIL;
@@ -2167,7 +2172,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
2167 } 2172 }
2168 } else { 2173 } else {
2169 retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase, 2174 retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
2170 TUNE_TX); 2175 TUNE_TX);
2171 if (retval != STATUS_SUCCESS) { 2176 if (retval != STATUS_SUCCESS) {
2172 rtsx_trace(chip); 2177 rtsx_trace(chip);
2173 return STATUS_FAIL; 2178 return STATUS_FAIL;
@@ -2193,7 +2198,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
2193 2198
2194int sd_switch_clock(struct rtsx_chip *chip) 2199int sd_switch_clock(struct rtsx_chip *chip)
2195{ 2200{
2196 struct sd_info *sd_card = &(chip->sd_card); 2201 struct sd_info *sd_card = &chip->sd_card;
2197 int retval; 2202 int retval;
2198 int re_tuning = 0; 2203 int re_tuning = 0;
2199 2204
@@ -2231,7 +2236,7 @@ int sd_switch_clock(struct rtsx_chip *chip)
2231 2236
2232static int sd_prepare_reset(struct rtsx_chip *chip) 2237static int sd_prepare_reset(struct rtsx_chip *chip)
2233{ 2238{
2234 struct sd_info *sd_card = &(chip->sd_card); 2239 struct sd_info *sd_card = &chip->sd_card;
2235 int retval; 2240 int retval;
2236 2241
2237 if (chip->asic_code) 2242 if (chip->asic_code)
@@ -2286,31 +2291,36 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
2286 2291
2287 if (CHECK_PID(chip, 0x5208)) { 2292 if (CHECK_PID(chip, 0x5208)) {
2288 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, 2293 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
2289 XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD); 2294 XD_D3_PD | SD_D7_PD | SD_CLK_PD |
2295 SD_D5_PD);
2290 if (retval) { 2296 if (retval) {
2291 rtsx_trace(chip); 2297 rtsx_trace(chip);
2292 return retval; 2298 return retval;
2293 } 2299 }
2294 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, 2300 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
2295 SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD); 2301 SD_D6_PD | SD_D0_PD | SD_D1_PD |
2302 XD_D5_PD);
2296 if (retval) { 2303 if (retval) {
2297 rtsx_trace(chip); 2304 rtsx_trace(chip);
2298 return retval; 2305 return retval;
2299 } 2306 }
2300 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, 2307 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
2301 SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 2308 SD_D4_PD | XD_CE_PD | XD_CLE_PD |
2309 XD_CD_PU);
2302 if (retval) { 2310 if (retval) {
2303 rtsx_trace(chip); 2311 rtsx_trace(chip);
2304 return retval; 2312 return retval;
2305 } 2313 }
2306 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, 2314 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
2307 XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); 2315 XD_RDY_PD | SD_D3_PD | SD_D2_PD |
2316 XD_ALE_PD);
2308 if (retval) { 2317 if (retval) {
2309 rtsx_trace(chip); 2318 rtsx_trace(chip);
2310 return retval; 2319 return retval;
2311 } 2320 }
2312 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, 2321 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
2313 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 2322 MS_INS_PU | SD_WP_PD | SD_CD_PU |
2323 SD_CMD_PD);
2314 if (retval) { 2324 if (retval) {
2315 rtsx_trace(chip); 2325 rtsx_trace(chip);
2316 return retval; 2326 return retval;
@@ -2361,27 +2371,27 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip)
2361 2371
2362 if (CHECK_PID(chip, 0x5208)) { 2372 if (CHECK_PID(chip, 0x5208)) {
2363 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 2373 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
2364 XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU); 2374 XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
2365 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 2375 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
2366 SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD); 2376 SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
2367 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 2377 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
2368 SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 2378 SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
2369 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 2379 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
2370 XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD); 2380 XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
2371 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 2381 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
2372 MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); 2382 MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
2373 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 2383 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
2374 MS_D5_PD | MS_D4_PD); 2384 MS_D5_PD | MS_D4_PD);
2375 } else if (CHECK_PID(chip, 0x5288)) { 2385 } else if (CHECK_PID(chip, 0x5288)) {
2376 if (CHECK_BARO_PKG(chip, QFN)) { 2386 if (CHECK_BARO_PKG(chip, QFN)) {
2377 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 2387 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
2378 0xA8); 2388 0xA8);
2379 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 2389 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
2380 0x5A); 2390 0x5A);
2381 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 2391 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
2382 0x95); 2392 0x95);
2383 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 2393 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
2384 0xAA); 2394 0xAA);
2385 } 2395 }
2386 } 2396 }
2387 2397
@@ -2478,7 +2488,7 @@ static int sd_dummy_clock(struct rtsx_chip *chip)
2478 2488
2479static int sd_read_lba0(struct rtsx_chip *chip) 2489static int sd_read_lba0(struct rtsx_chip *chip)
2480{ 2490{
2481 struct sd_info *sd_card = &(chip->sd_card); 2491 struct sd_info *sd_card = &chip->sd_card;
2482 int retval; 2492 int retval;
2483 u8 cmd[5], bus_width; 2493 u8 cmd[5], bus_width;
2484 2494
@@ -2499,8 +2509,8 @@ static int sd_read_lba0(struct rtsx_chip *chip)
2499 bus_width = SD_BUS_WIDTH_1; 2509 bus_width = SD_BUS_WIDTH_1;
2500 } 2510 }
2501 2511
2502 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 2512 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1,
2503 5, 512, 1, bus_width, NULL, 0, 100); 2513 bus_width, NULL, 0, 100);
2504 if (retval != STATUS_SUCCESS) { 2514 if (retval != STATUS_SUCCESS) {
2505 rtsx_clear_sd_error(chip); 2515 rtsx_clear_sd_error(chip);
2506 rtsx_trace(chip); 2516 rtsx_trace(chip);
@@ -2512,14 +2522,14 @@ static int sd_read_lba0(struct rtsx_chip *chip)
2512 2522
2513static int sd_check_wp_state(struct rtsx_chip *chip) 2523static int sd_check_wp_state(struct rtsx_chip *chip)
2514{ 2524{
2515 struct sd_info *sd_card = &(chip->sd_card); 2525 struct sd_info *sd_card = &chip->sd_card;
2516 int retval; 2526 int retval;
2517 u32 val; 2527 u32 val;
2518 u16 sd_card_type; 2528 u16 sd_card_type;
2519 u8 cmd[5], buf[64]; 2529 u8 cmd[5], buf[64];
2520 2530
2521 retval = sd_send_cmd_get_rsp(chip, APP_CMD, 2531 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
2522 sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); 2532 SD_RSP_TYPE_R1, NULL, 0);
2523 if (retval != STATUS_SUCCESS) { 2533 if (retval != STATUS_SUCCESS) {
2524 rtsx_trace(chip); 2534 rtsx_trace(chip);
2525 return STATUS_FAIL; 2535 return STATUS_FAIL;
@@ -2532,12 +2542,12 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
2532 cmd[4] = 0; 2542 cmd[4] = 0;
2533 2543
2534 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, 2544 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
2535 SD_BUS_WIDTH_4, buf, 64, 250); 2545 SD_BUS_WIDTH_4, buf, 64, 250);
2536 if (retval != STATUS_SUCCESS) { 2546 if (retval != STATUS_SUCCESS) {
2537 rtsx_clear_sd_error(chip); 2547 rtsx_clear_sd_error(chip);
2538 2548
2539 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 2549 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
2540 SD_RSP_TYPE_R1, NULL, 0); 2550 SD_RSP_TYPE_R1, NULL, 0);
2541 rtsx_trace(chip); 2551 rtsx_trace(chip);
2542 return STATUS_FAIL; 2552 return STATUS_FAIL;
2543 } 2553 }
@@ -2562,7 +2572,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
2562 2572
2563static int reset_sd(struct rtsx_chip *chip) 2573static int reset_sd(struct rtsx_chip *chip)
2564{ 2574{
2565 struct sd_info *sd_card = &(chip->sd_card); 2575 struct sd_info *sd_card = &chip->sd_card;
2566 bool hi_cap_flow = false; 2576 bool hi_cap_flow = false;
2567 int retval, i = 0, j = 0, k = 0; 2577 int retval, i = 0, j = 0, k = 0;
2568 bool sd_dont_switch = false; 2578 bool sd_dont_switch = false;
@@ -2575,7 +2585,7 @@ static int reset_sd(struct rtsx_chip *chip)
2575 2585
2576 SET_SD(sd_card); 2586 SET_SD(sd_card);
2577 2587
2578Switch_Fail: 2588switch_fail:
2579 2589
2580 i = 0; 2590 i = 0;
2581 j = 0; 2591 j = 0;
@@ -2589,11 +2599,11 @@ Switch_Fail:
2589 2599
2590 retval = sd_prepare_reset(chip); 2600 retval = sd_prepare_reset(chip);
2591 if (retval != STATUS_SUCCESS) 2601 if (retval != STATUS_SUCCESS)
2592 goto Status_Fail; 2602 goto status_fail;
2593 2603
2594 retval = sd_dummy_clock(chip); 2604 retval = sd_dummy_clock(chip);
2595 if (retval != STATUS_SUCCESS) 2605 if (retval != STATUS_SUCCESS)
2596 goto Status_Fail; 2606 goto status_fail;
2597 2607
2598 if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) { 2608 if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
2599 int rty_cnt = 0; 2609 int rty_cnt = 0;
@@ -2601,11 +2611,11 @@ Switch_Fail:
2601 for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) { 2611 for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
2602 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { 2612 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
2603 sd_set_err_code(chip, SD_NO_CARD); 2613 sd_set_err_code(chip, SD_NO_CARD);
2604 goto Status_Fail; 2614 goto status_fail;
2605 } 2615 }
2606 2616
2607 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, 2617 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
2608 SD_RSP_TYPE_R4, rsp, 5); 2618 SD_RSP_TYPE_R4, rsp, 5);
2609 if (retval == STATUS_SUCCESS) { 2619 if (retval == STATUS_SUCCESS) {
2610 int func_num = (rsp[1] >> 4) & 0x07; 2620 int func_num = (rsp[1] >> 4) & 0x07;
2611 2621
@@ -2613,7 +2623,7 @@ Switch_Fail:
2613 dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n", 2623 dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
2614 func_num); 2624 func_num);
2615 chip->sd_io = 1; 2625 chip->sd_io = 1;
2616 goto Status_Fail; 2626 goto status_fail;
2617 } 2627 }
2618 2628
2619 break; 2629 break;
@@ -2630,14 +2640,14 @@ Switch_Fail:
2630 /* Start Initialization Process of SD Card */ 2640 /* Start Initialization Process of SD Card */
2631RTY_SD_RST: 2641RTY_SD_RST:
2632 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, 2642 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
2633 NULL, 0); 2643 NULL, 0);
2634 if (retval != STATUS_SUCCESS) 2644 if (retval != STATUS_SUCCESS)
2635 goto Status_Fail; 2645 goto status_fail;
2636 2646
2637 wait_timeout(20); 2647 wait_timeout(20);
2638 2648
2639 retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA, 2649 retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
2640 SD_RSP_TYPE_R7, rsp, 5); 2650 SD_RSP_TYPE_R7, rsp, 5);
2641 if (retval == STATUS_SUCCESS) { 2651 if (retval == STATUS_SUCCESS) {
2642 if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) { 2652 if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
2643 hi_cap_flow = true; 2653 hi_cap_flow = true;
@@ -2649,37 +2659,37 @@ RTY_SD_RST:
2649 voltage = SUPPORT_VOLTAGE; 2659 voltage = SUPPORT_VOLTAGE;
2650 2660
2651 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, 2661 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
2652 SD_RSP_TYPE_R0, NULL, 0); 2662 SD_RSP_TYPE_R0, NULL, 0);
2653 if (retval != STATUS_SUCCESS) 2663 if (retval != STATUS_SUCCESS)
2654 goto Status_Fail; 2664 goto status_fail;
2655 2665
2656 wait_timeout(20); 2666 wait_timeout(20);
2657 } 2667 }
2658 2668
2659 do { 2669 do {
2660 retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1, 2670 retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
2661 NULL, 0); 2671 NULL, 0);
2662 if (retval != STATUS_SUCCESS) { 2672 if (retval != STATUS_SUCCESS) {
2663 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { 2673 if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
2664 sd_set_err_code(chip, SD_NO_CARD); 2674 sd_set_err_code(chip, SD_NO_CARD);
2665 goto Status_Fail; 2675 goto status_fail;
2666 } 2676 }
2667 2677
2668 j++; 2678 j++;
2669 if (j < 3) 2679 if (j < 3)
2670 goto RTY_SD_RST; 2680 goto RTY_SD_RST;
2671 else 2681 else
2672 goto Status_Fail; 2682 goto status_fail;
2673 } 2683 }
2674 2684
2675 retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage, 2685 retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
2676 SD_RSP_TYPE_R3, rsp, 5); 2686 SD_RSP_TYPE_R3, rsp, 5);
2677 if (retval != STATUS_SUCCESS) { 2687 if (retval != STATUS_SUCCESS) {
2678 k++; 2688 k++;
2679 if (k < 3) 2689 if (k < 3)
2680 goto RTY_SD_RST; 2690 goto RTY_SD_RST;
2681 else 2691 else
2682 goto Status_Fail; 2692 goto status_fail;
2683 } 2693 }
2684 2694
2685 i++; 2695 i++;
@@ -2687,7 +2697,7 @@ RTY_SD_RST:
2687 } while (!(rsp[1] & 0x80) && (i < 255)); 2697 } while (!(rsp[1] & 0x80) && (i < 255));
2688 2698
2689 if (i == 255) 2699 if (i == 255)
2690 goto Status_Fail; 2700 goto status_fail;
2691 2701
2692 if (hi_cap_flow) { 2702 if (hi_cap_flow) {
2693 if (rsp[1] & 0x40) 2703 if (rsp[1] & 0x40)
@@ -2705,19 +2715,19 @@ RTY_SD_RST:
2705 if (support_1v8) { 2715 if (support_1v8) {
2706 retval = sd_voltage_switch(chip); 2716 retval = sd_voltage_switch(chip);
2707 if (retval != STATUS_SUCCESS) 2717 if (retval != STATUS_SUCCESS)
2708 goto Status_Fail; 2718 goto status_fail;
2709 } 2719 }
2710 2720
2711 retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, 2721 retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
2712 NULL, 0); 2722 NULL, 0);
2713 if (retval != STATUS_SUCCESS) 2723 if (retval != STATUS_SUCCESS)
2714 goto Status_Fail; 2724 goto status_fail;
2715 2725
2716 for (i = 0; i < 3; i++) { 2726 for (i = 0; i < 3; i++) {
2717 retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0, 2727 retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
2718 SD_RSP_TYPE_R6, rsp, 5); 2728 SD_RSP_TYPE_R6, rsp, 5);
2719 if (retval != STATUS_SUCCESS) 2729 if (retval != STATUS_SUCCESS)
2720 goto Status_Fail; 2730 goto status_fail;
2721 2731
2722 sd_card->sd_addr = (u32)rsp[1] << 24; 2732 sd_card->sd_addr = (u32)rsp[1] << 24;
2723 sd_card->sd_addr += (u32)rsp[2] << 16; 2733 sd_card->sd_addr += (u32)rsp[2] << 16;
@@ -2728,17 +2738,17 @@ RTY_SD_RST:
2728 2738
2729 retval = sd_check_csd(chip, 1); 2739 retval = sd_check_csd(chip, 1);
2730 if (retval != STATUS_SUCCESS) 2740 if (retval != STATUS_SUCCESS)
2731 goto Status_Fail; 2741 goto status_fail;
2732 2742
2733 retval = sd_select_card(chip, 1); 2743 retval = sd_select_card(chip, 1);
2734 if (retval != STATUS_SUCCESS) 2744 if (retval != STATUS_SUCCESS)
2735 goto Status_Fail; 2745 goto status_fail;
2736 2746
2737#ifdef SUPPORT_SD_LOCK 2747#ifdef SUPPORT_SD_LOCK
2738SD_UNLOCK_ENTRY: 2748SD_UNLOCK_ENTRY:
2739 retval = sd_update_lock_status(chip); 2749 retval = sd_update_lock_status(chip);
2740 if (retval != STATUS_SUCCESS) 2750 if (retval != STATUS_SUCCESS)
2741 goto Status_Fail; 2751 goto status_fail;
2742 2752
2743 if (sd_card->sd_lock_status & SD_LOCKED) { 2753 if (sd_card->sd_lock_status & SD_LOCKED) {
2744 sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST); 2754 sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
@@ -2749,25 +2759,25 @@ SD_UNLOCK_ENTRY:
2749#endif 2759#endif
2750 2760
2751 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, 2761 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
2752 SD_RSP_TYPE_R1, NULL, 0); 2762 SD_RSP_TYPE_R1, NULL, 0);
2753 if (retval != STATUS_SUCCESS) 2763 if (retval != STATUS_SUCCESS)
2754 goto Status_Fail; 2764 goto status_fail;
2755 2765
2756 retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0, 2766 retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
2757 SD_RSP_TYPE_R1, NULL, 0); 2767 SD_RSP_TYPE_R1, NULL, 0);
2758 if (retval != STATUS_SUCCESS) 2768 if (retval != STATUS_SUCCESS)
2759 goto Status_Fail; 2769 goto status_fail;
2760 2770
2761 if (support_1v8) { 2771 if (support_1v8) {
2762 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, 2772 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
2763 SD_RSP_TYPE_R1, NULL, 0); 2773 SD_RSP_TYPE_R1, NULL, 0);
2764 if (retval != STATUS_SUCCESS) 2774 if (retval != STATUS_SUCCESS)
2765 goto Status_Fail; 2775 goto status_fail;
2766 2776
2767 retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, 2777 retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
2768 SD_RSP_TYPE_R1, NULL, 0); 2778 SD_RSP_TYPE_R1, NULL, 0);
2769 if (retval != STATUS_SUCCESS) 2779 if (retval != STATUS_SUCCESS)
2770 goto Status_Fail; 2780 goto status_fail;
2771 2781
2772 switch_bus_width = SD_BUS_WIDTH_4; 2782 switch_bus_width = SD_BUS_WIDTH_4;
2773 } else { 2783 } else {
@@ -2775,13 +2785,13 @@ SD_UNLOCK_ENTRY:
2775 } 2785 }
2776 2786
2777 retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, 2787 retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
2778 NULL, 0); 2788 NULL, 0);
2779 if (retval != STATUS_SUCCESS) 2789 if (retval != STATUS_SUCCESS)
2780 goto Status_Fail; 2790 goto status_fail;
2781 2791
2782 retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); 2792 retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
2783 if (retval != STATUS_SUCCESS) 2793 if (retval != STATUS_SUCCESS)
2784 goto Status_Fail; 2794 goto status_fail;
2785 2795
2786 if (!(sd_card->raw_csd[4] & 0x40)) 2796 if (!(sd_card->raw_csd[4] & 0x40))
2787 sd_dont_switch = true; 2797 sd_dont_switch = true;
@@ -2804,7 +2814,7 @@ SD_UNLOCK_ENTRY:
2804 sd_dont_switch = true; 2814 sd_dont_switch = true;
2805 try_sdio = false; 2815 try_sdio = false;
2806 2816
2807 goto Switch_Fail; 2817 goto switch_fail;
2808 } 2818 }
2809 } else { 2819 } else {
2810 if (support_1v8) { 2820 if (support_1v8) {
@@ -2812,21 +2822,21 @@ SD_UNLOCK_ENTRY:
2812 sd_dont_switch = true; 2822 sd_dont_switch = true;
2813 try_sdio = false; 2823 try_sdio = false;
2814 2824
2815 goto Switch_Fail; 2825 goto switch_fail;
2816 } 2826 }
2817 } 2827 }
2818 } 2828 }
2819 2829
2820 if (!support_1v8) { 2830 if (!support_1v8) {
2821 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, 2831 retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
2822 SD_RSP_TYPE_R1, NULL, 0); 2832 SD_RSP_TYPE_R1, NULL, 0);
2823 if (retval != STATUS_SUCCESS) 2833 if (retval != STATUS_SUCCESS)
2824 goto Status_Fail; 2834 goto status_fail;
2825 2835
2826 retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, 2836 retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
2827 SD_RSP_TYPE_R1, NULL, 0); 2837 SD_RSP_TYPE_R1, NULL, 0);
2828 if (retval != STATUS_SUCCESS) 2838 if (retval != STATUS_SUCCESS)
2829 goto Status_Fail; 2839 goto status_fail;
2830 } 2840 }
2831 2841
2832#ifdef SUPPORT_SD_LOCK 2842#ifdef SUPPORT_SD_LOCK
@@ -2845,7 +2855,7 @@ SD_UNLOCK_ENTRY:
2845 2855
2846 retval = sd_set_init_para(chip); 2856 retval = sd_set_init_para(chip);
2847 if (retval != STATUS_SUCCESS) 2857 if (retval != STATUS_SUCCESS)
2848 goto Status_Fail; 2858 goto status_fail;
2849 2859
2850 if (CHK_SD_DDR50(sd_card)) 2860 if (CHK_SD_DDR50(sd_card))
2851 retval = sd_ddr_tuning(chip); 2861 retval = sd_ddr_tuning(chip);
@@ -2854,20 +2864,20 @@ SD_UNLOCK_ENTRY:
2854 2864
2855 if (retval != STATUS_SUCCESS) { 2865 if (retval != STATUS_SUCCESS) {
2856 if (sd20_mode) { 2866 if (sd20_mode) {
2857 goto Status_Fail; 2867 goto status_fail;
2858 } else { 2868 } else {
2859 retval = sd_init_power(chip); 2869 retval = sd_init_power(chip);
2860 if (retval != STATUS_SUCCESS) 2870 if (retval != STATUS_SUCCESS)
2861 goto Status_Fail; 2871 goto status_fail;
2862 2872
2863 try_sdio = false; 2873 try_sdio = false;
2864 sd20_mode = true; 2874 sd20_mode = true;
2865 goto Switch_Fail; 2875 goto switch_fail;
2866 } 2876 }
2867 } 2877 }
2868 2878
2869 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 2879 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
2870 SD_RSP_TYPE_R1, NULL, 0); 2880 SD_RSP_TYPE_R1, NULL, 0);
2871 2881
2872 if (CHK_SD_DDR50(sd_card)) { 2882 if (CHK_SD_DDR50(sd_card)) {
2873 retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); 2883 retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -2879,15 +2889,15 @@ SD_UNLOCK_ENTRY:
2879 retval = sd_read_lba0(chip); 2889 retval = sd_read_lba0(chip);
2880 if (retval != STATUS_SUCCESS) { 2890 if (retval != STATUS_SUCCESS) {
2881 if (sd20_mode) { 2891 if (sd20_mode) {
2882 goto Status_Fail; 2892 goto status_fail;
2883 } else { 2893 } else {
2884 retval = sd_init_power(chip); 2894 retval = sd_init_power(chip);
2885 if (retval != STATUS_SUCCESS) 2895 if (retval != STATUS_SUCCESS)
2886 goto Status_Fail; 2896 goto status_fail;
2887 2897
2888 try_sdio = false; 2898 try_sdio = false;
2889 sd20_mode = true; 2899 sd20_mode = true;
2890 goto Switch_Fail; 2900 goto switch_fail;
2891 } 2901 }
2892 } 2902 }
2893 } 2903 }
@@ -2895,7 +2905,7 @@ SD_UNLOCK_ENTRY:
2895 2905
2896 retval = sd_check_wp_state(chip); 2906 retval = sd_check_wp_state(chip);
2897 if (retval != STATUS_SUCCESS) 2907 if (retval != STATUS_SUCCESS)
2898 goto Status_Fail; 2908 goto status_fail;
2899 2909
2900 chip->card_bus_width[chip->card2lun[SD_CARD]] = 4; 2910 chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
2901 2911
@@ -2918,21 +2928,21 @@ SD_UNLOCK_ENTRY:
2918 2928
2919 return STATUS_SUCCESS; 2929 return STATUS_SUCCESS;
2920 2930
2921Status_Fail: 2931status_fail:
2922 rtsx_trace(chip); 2932 rtsx_trace(chip);
2923 return STATUS_FAIL; 2933 return STATUS_FAIL;
2924} 2934}
2925 2935
2926static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) 2936static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
2927{ 2937{
2928 struct sd_info *sd_card = &(chip->sd_card); 2938 struct sd_info *sd_card = &chip->sd_card;
2929 int retval; 2939 int retval;
2930 u8 buf[8] = {0}, bus_width, *ptr; 2940 u8 buf[8] = {0}, bus_width, *ptr;
2931 u16 byte_cnt; 2941 u16 byte_cnt;
2932 int len; 2942 int len;
2933 2943
2934 retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL, 2944 retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
2935 0); 2945 0);
2936 if (retval != STATUS_SUCCESS) { 2946 if (retval != STATUS_SUCCESS) {
2937 rtsx_trace(chip); 2947 rtsx_trace(chip);
2938 return SWITCH_FAIL; 2948 return SWITCH_FAIL;
@@ -2957,8 +2967,8 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
2957 return SWITCH_ERR; 2967 return SWITCH_ERR;
2958 } 2968 }
2959 2969
2960 retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, 2970 retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1,
2961 NULL, 0, byte_cnt, 1, bus_width, buf, len, 100); 2971 bus_width, buf, len, 100);
2962 if (retval != STATUS_SUCCESS) { 2972 if (retval != STATUS_SUCCESS) {
2963 rtsx_clear_sd_error(chip); 2973 rtsx_clear_sd_error(chip);
2964 rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0); 2974 rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
@@ -2980,23 +2990,23 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
2980 2990
2981 if (width == MMC_8BIT_BUS) 2991 if (width == MMC_8BIT_BUS)
2982 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 2992 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
2983 0xFF, 0x08); 2993 0xFF, 0x08);
2984 else 2994 else
2985 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 2995 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
2986 0xFF, 0x04); 2996 0xFF, 0x04);
2987 2997
2988 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1); 2998 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
2989 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); 2999 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
2990 3000
2991 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 3001 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 |
2992 SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| 3002 SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
2993 SD_CHECK_CRC7 | SD_RSP_LEN_6); 3003 SD_CHECK_CRC7 | SD_RSP_LEN_6);
2994 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, 3004 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
2995 PINGPONG_BUFFER); 3005 PINGPONG_BUFFER);
2996 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 3006 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
2997 SD_TM_NORMAL_READ | SD_TRANSFER_START); 3007 SD_TM_NORMAL_READ | SD_TRANSFER_START);
2998 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, 3008 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
2999 SD_TRANSFER_END); 3009 SD_TRANSFER_END);
3000 3010
3001 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0); 3011 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
3002 if (width == MMC_8BIT_BUS) 3012 if (width == MMC_8BIT_BUS)
@@ -3024,9 +3034,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
3024 arg = 0x03B70200; 3034 arg = 0x03B70200;
3025 3035
3026 retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, 3036 retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
3027 SD_RSP_TYPE_R1b, rsp, 5); 3037 SD_RSP_TYPE_R1b, rsp, 5);
3028 if ((retval == STATUS_SUCCESS) && 3038 if ((retval == STATUS_SUCCESS) &&
3029 !(rsp[4] & MMC_SWITCH_ERR)) 3039 !(rsp[4] & MMC_SWITCH_ERR))
3030 return SWITCH_SUCCESS; 3040 return SWITCH_SUCCESS;
3031 } 3041 }
3032 } else { 3042 } else {
@@ -3041,9 +3051,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
3041 arg = 0x03B70100; 3051 arg = 0x03B70100;
3042 3052
3043 retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, 3053 retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
3044 SD_RSP_TYPE_R1b, rsp, 5); 3054 SD_RSP_TYPE_R1b, rsp, 5);
3045 if ((retval == STATUS_SUCCESS) && 3055 if ((retval == STATUS_SUCCESS) &&
3046 !(rsp[4] & MMC_SWITCH_ERR)) 3056 !(rsp[4] & MMC_SWITCH_ERR))
3047 return SWITCH_SUCCESS; 3057 return SWITCH_SUCCESS;
3048 } 3058 }
3049 } 3059 }
@@ -3054,7 +3064,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
3054 3064
3055static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) 3065static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3056{ 3066{
3057 struct sd_info *sd_card = &(chip->sd_card); 3067 struct sd_info *sd_card = &chip->sd_card;
3058 int retval; 3068 int retval;
3059 u8 *ptr, card_type, card_type_mask = 0; 3069 u8 *ptr, card_type, card_type_mask = 0;
3060 3070
@@ -3065,7 +3075,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3065 rtsx_init_cmd(chip); 3075 rtsx_init_cmd(chip);
3066 3076
3067 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 3077 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
3068 0x40 | SEND_EXT_CSD); 3078 0x40 | SEND_EXT_CSD);
3069 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0); 3079 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
3070 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0); 3080 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
3071 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0); 3081 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
@@ -3077,14 +3087,14 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3077 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); 3087 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
3078 3088
3079 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 3089 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
3080 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| 3090 SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
3081 SD_CHECK_CRC7 | SD_RSP_LEN_6); 3091 SD_CHECK_CRC7 | SD_RSP_LEN_6);
3082 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, 3092 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
3083 PINGPONG_BUFFER); 3093 PINGPONG_BUFFER);
3084 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 3094 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
3085 SD_TM_NORMAL_READ | SD_TRANSFER_START); 3095 SD_TM_NORMAL_READ | SD_TRANSFER_START);
3086 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, 3096 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
3087 SD_TRANSFER_END); 3097 SD_TRANSFER_END);
3088 3098
3089 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0); 3099 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
3090 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0); 3100 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
@@ -3097,7 +3107,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3097 if (retval == -ETIMEDOUT) { 3107 if (retval == -ETIMEDOUT) {
3098 rtsx_clear_sd_error(chip); 3108 rtsx_clear_sd_error(chip);
3099 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 3109 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
3100 SD_RSP_TYPE_R1, NULL, 0); 3110 SD_RSP_TYPE_R1, NULL, 0);
3101 } 3111 }
3102 rtsx_trace(chip); 3112 rtsx_trace(chip);
3103 return STATUS_FAIL; 3113 return STATUS_FAIL;
@@ -3106,7 +3116,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3106 ptr = rtsx_get_cmd_data(chip); 3116 ptr = rtsx_get_cmd_data(chip);
3107 if (ptr[0] & SD_TRANSFER_ERR) { 3117 if (ptr[0] & SD_TRANSFER_ERR) {
3108 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 3118 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
3109 SD_RSP_TYPE_R1, NULL, 0); 3119 SD_RSP_TYPE_R1, NULL, 0);
3110 rtsx_trace(chip); 3120 rtsx_trace(chip);
3111 return STATUS_FAIL; 3121 return STATUS_FAIL;
3112 } 3122 }
@@ -3132,8 +3142,8 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3132 SET_MMC_26M(sd_card); 3142 SET_MMC_26M(sd_card);
3133 } 3143 }
3134 3144
3135 retval = sd_send_cmd_get_rsp(chip, SWITCH, 3145 retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
3136 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5); 3146 SD_RSP_TYPE_R1b, rsp, 5);
3137 if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR)) 3147 if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
3138 CLR_MMC_HS(sd_card); 3148 CLR_MMC_HS(sd_card);
3139 } 3149 }
@@ -3178,7 +3188,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
3178 3188
3179static int reset_mmc(struct rtsx_chip *chip) 3189static int reset_mmc(struct rtsx_chip *chip)
3180{ 3190{
3181 struct sd_info *sd_card = &(chip->sd_card); 3191 struct sd_info *sd_card = &chip->sd_card;
3182 int retval, i = 0, j = 0, k = 0; 3192 int retval, i = 0, j = 0, k = 0;
3183 bool switch_ddr = true; 3193 bool switch_ddr = true;
3184 u8 rsp[16]; 3194 u8 rsp[16];
@@ -3190,7 +3200,7 @@ static int reset_mmc(struct rtsx_chip *chip)
3190 goto MMC_UNLOCK_ENTRY; 3200 goto MMC_UNLOCK_ENTRY;
3191#endif 3201#endif
3192 3202
3193Switch_Fail: 3203switch_fail:
3194 retval = sd_prepare_reset(chip); 3204 retval = sd_prepare_reset(chip);
3195 if (retval != STATUS_SUCCESS) { 3205 if (retval != STATUS_SUCCESS) {
3196 rtsx_trace(chip); 3206 rtsx_trace(chip);
@@ -3201,7 +3211,7 @@ Switch_Fail:
3201 3211
3202RTY_MMC_RST: 3212RTY_MMC_RST:
3203 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, 3213 retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
3204 NULL, 0); 3214 NULL, 0);
3205 if (retval != STATUS_SUCCESS) { 3215 if (retval != STATUS_SUCCESS) {
3206 rtsx_trace(chip); 3216 rtsx_trace(chip);
3207 return STATUS_FAIL; 3217 return STATUS_FAIL;
@@ -3215,11 +3225,11 @@ RTY_MMC_RST:
3215 } 3225 }
3216 3226
3217 retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND, 3227 retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
3218 (SUPPORT_VOLTAGE | 0x40000000), 3228 (SUPPORT_VOLTAGE | 0x40000000),
3219 SD_RSP_TYPE_R3, rsp, 5); 3229 SD_RSP_TYPE_R3, rsp, 5);
3220 if (retval != STATUS_SUCCESS) { 3230 if (retval != STATUS_SUCCESS) {
3221 if (sd_check_err_code(chip, SD_BUSY) || 3231 if (sd_check_err_code(chip, SD_BUSY) ||
3222 sd_check_err_code(chip, SD_TO_ERR)) { 3232 sd_check_err_code(chip, SD_TO_ERR)) {
3223 k++; 3233 k++;
3224 if (k < 20) { 3234 if (k < 20) {
3225 sd_clr_err_code(chip); 3235 sd_clr_err_code(chip);
@@ -3255,7 +3265,7 @@ RTY_MMC_RST:
3255 CLR_MMC_SECTOR_MODE(sd_card); 3265 CLR_MMC_SECTOR_MODE(sd_card);
3256 3266
3257 retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, 3267 retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
3258 NULL, 0); 3268 NULL, 0);
3259 if (retval != STATUS_SUCCESS) { 3269 if (retval != STATUS_SUCCESS) {
3260 rtsx_trace(chip); 3270 rtsx_trace(chip);
3261 return STATUS_FAIL; 3271 return STATUS_FAIL;
@@ -3263,7 +3273,7 @@ RTY_MMC_RST:
3263 3273
3264 sd_card->sd_addr = 0x00100000; 3274 sd_card->sd_addr = 0x00100000;
3265 retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr, 3275 retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
3266 SD_RSP_TYPE_R6, rsp, 5); 3276 SD_RSP_TYPE_R6, rsp, 5);
3267 if (retval != STATUS_SUCCESS) { 3277 if (retval != STATUS_SUCCESS) {
3268 rtsx_trace(chip); 3278 rtsx_trace(chip);
3269 return STATUS_FAIL; 3279 return STATUS_FAIL;
@@ -3284,7 +3294,7 @@ RTY_MMC_RST:
3284 } 3294 }
3285 3295
3286 retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, 3296 retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
3287 NULL, 0); 3297 NULL, 0);
3288 if (retval != STATUS_SUCCESS) { 3298 if (retval != STATUS_SUCCESS) {
3289 rtsx_trace(chip); 3299 rtsx_trace(chip);
3290 return STATUS_FAIL; 3300 return STATUS_FAIL;
@@ -3319,7 +3329,7 @@ MMC_UNLOCK_ENTRY:
3319 } 3329 }
3320 sd_card->mmc_dont_switch_bus = 1; 3330 sd_card->mmc_dont_switch_bus = 1;
3321 rtsx_trace(chip); 3331 rtsx_trace(chip);
3322 goto Switch_Fail; 3332 goto switch_fail;
3323 } 3333 }
3324 } 3334 }
3325 3335
@@ -3345,7 +3355,7 @@ MMC_UNLOCK_ENTRY:
3345 3355
3346 switch_ddr = false; 3356 switch_ddr = false;
3347 rtsx_trace(chip); 3357 rtsx_trace(chip);
3348 goto Switch_Fail; 3358 goto switch_fail;
3349 } 3359 }
3350 3360
3351 retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); 3361 retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -3360,7 +3370,7 @@ MMC_UNLOCK_ENTRY:
3360 3370
3361 switch_ddr = false; 3371 switch_ddr = false;
3362 rtsx_trace(chip); 3372 rtsx_trace(chip);
3363 goto Switch_Fail; 3373 goto switch_fail;
3364 } 3374 }
3365 } 3375 }
3366 } 3376 }
@@ -3392,7 +3402,7 @@ MMC_UNLOCK_ENTRY:
3392 3402
3393int reset_sd_card(struct rtsx_chip *chip) 3403int reset_sd_card(struct rtsx_chip *chip)
3394{ 3404{
3395 struct sd_info *sd_card = &(chip->sd_card); 3405 struct sd_info *sd_card = &chip->sd_card;
3396 int retval; 3406 int retval;
3397 3407
3398 sd_init_reg_addr(chip); 3408 sd_init_reg_addr(chip);
@@ -3407,7 +3417,7 @@ int reset_sd_card(struct rtsx_chip *chip)
3407 } 3417 }
3408 3418
3409 if (chip->ignore_sd && CHK_SDIO_EXIST(chip) && 3419 if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
3410 !CHK_SDIO_IGNORED(chip)) { 3420 !CHK_SDIO_IGNORED(chip)) {
3411 if (chip->asic_code) { 3421 if (chip->asic_code) {
3412 retval = sd_pull_ctl_enable(chip); 3422 retval = sd_pull_ctl_enable(chip);
3413 if (retval != STATUS_SUCCESS) { 3423 if (retval != STATUS_SUCCESS) {
@@ -3416,7 +3426,8 @@ int reset_sd_card(struct rtsx_chip *chip)
3416 } 3426 }
3417 } else { 3427 } else {
3418 retval = rtsx_write_register(chip, FPGA_PULL_CTL, 3428 retval = rtsx_write_register(chip, FPGA_PULL_CTL,
3419 FPGA_SD_PULL_CTL_BIT | 0x20, 0); 3429 FPGA_SD_PULL_CTL_BIT |
3430 0x20, 0);
3420 if (retval != STATUS_SUCCESS) { 3431 if (retval != STATUS_SUCCESS) {
3421 rtsx_trace(chip); 3432 rtsx_trace(chip);
3422 return STATUS_FAIL; 3433 return STATUS_FAIL;
@@ -3505,7 +3516,7 @@ int reset_sd_card(struct rtsx_chip *chip)
3505 3516
3506static int reset_mmc_only(struct rtsx_chip *chip) 3517static int reset_mmc_only(struct rtsx_chip *chip)
3507{ 3518{
3508 struct sd_info *sd_card = &(chip->sd_card); 3519 struct sd_info *sd_card = &chip->sd_card;
3509 int retval; 3520 int retval;
3510 3521
3511 sd_card->sd_type = 0; 3522 sd_card->sd_type = 0;
@@ -3574,7 +3585,7 @@ static int reset_mmc_only(struct rtsx_chip *chip)
3574 3585
3575static int wait_data_buf_ready(struct rtsx_chip *chip) 3586static int wait_data_buf_ready(struct rtsx_chip *chip)
3576{ 3587{
3577 struct sd_info *sd_card = &(chip->sd_card); 3588 struct sd_info *sd_card = &chip->sd_card;
3578 int i, retval; 3589 int i, retval;
3579 3590
3580 for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) { 3591 for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
@@ -3587,7 +3598,8 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
3587 sd_card->sd_data_buf_ready = 0; 3598 sd_card->sd_data_buf_ready = 0;
3588 3599
3589 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, 3600 retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
3590 sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); 3601 sd_card->sd_addr, SD_RSP_TYPE_R1,
3602 NULL, 0);
3591 if (retval != STATUS_SUCCESS) { 3603 if (retval != STATUS_SUCCESS) {
3592 rtsx_trace(chip); 3604 rtsx_trace(chip);
3593 return STATUS_FAIL; 3605 return STATUS_FAIL;
@@ -3607,7 +3619,7 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
3607 3619
3608void sd_stop_seq_mode(struct rtsx_chip *chip) 3620void sd_stop_seq_mode(struct rtsx_chip *chip)
3609{ 3621{
3610 struct sd_info *sd_card = &(chip->sd_card); 3622 struct sd_info *sd_card = &chip->sd_card;
3611 int retval; 3623 int retval;
3612 3624
3613 if (sd_card->seq_mode) { 3625 if (sd_card->seq_mode) {
@@ -3616,7 +3628,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
3616 return; 3628 return;
3617 3629
3618 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, 3630 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
3619 SD_RSP_TYPE_R1b, NULL, 0); 3631 SD_RSP_TYPE_R1b, NULL, 0);
3620 if (retval != STATUS_SUCCESS) 3632 if (retval != STATUS_SUCCESS)
3621 sd_set_err_code(chip, SD_STS_ERR); 3633 sd_set_err_code(chip, SD_STS_ERR);
3622 3634
@@ -3632,7 +3644,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
3632 3644
3633static inline int sd_auto_tune_clock(struct rtsx_chip *chip) 3645static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
3634{ 3646{
3635 struct sd_info *sd_card = &(chip->sd_card); 3647 struct sd_info *sd_card = &chip->sd_card;
3636 int retval; 3648 int retval;
3637 3649
3638 if (chip->asic_code) { 3650 if (chip->asic_code) {
@@ -3679,9 +3691,9 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
3679} 3691}
3680 3692
3681int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, 3693int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3682 u16 sector_cnt) 3694 u16 sector_cnt)
3683{ 3695{
3684 struct sd_info *sd_card = &(chip->sd_card); 3696 struct sd_info *sd_card = &chip->sd_card;
3685 u32 data_addr; 3697 u32 data_addr;
3686 u8 cfg2; 3698 u8 cfg2;
3687 int retval; 3699 int retval;
@@ -3730,20 +3742,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3730 } 3742 }
3731 3743
3732 if (sd_card->seq_mode && 3744 if (sd_card->seq_mode &&
3733 ((sd_card->pre_dir != srb->sc_data_direction) || 3745 ((sd_card->pre_dir != srb->sc_data_direction) ||
3734 ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) != 3746 ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
3735 start_sector))) { 3747 start_sector))) {
3736 if ((sd_card->pre_sec_cnt < 0x80) 3748 if ((sd_card->pre_sec_cnt < 0x80) &&
3737 && (sd_card->pre_dir == DMA_FROM_DEVICE) 3749 (sd_card->pre_dir == DMA_FROM_DEVICE) &&
3738 && !CHK_SD30_SPEED(sd_card) 3750 !CHK_SD30_SPEED(sd_card) &&
3739 && !CHK_SD_HS(sd_card) 3751 !CHK_SD_HS(sd_card) &&
3740 && !CHK_MMC_HS(sd_card)) { 3752 !CHK_MMC_HS(sd_card)) {
3741 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 3753 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
3742 SD_RSP_TYPE_R1, NULL, 0); 3754 SD_RSP_TYPE_R1, NULL, 0);
3743 } 3755 }
3744 3756
3745 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 3757 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
3746 0, SD_RSP_TYPE_R1b, NULL, 0); 3758 SD_RSP_TYPE_R1b, NULL, 0);
3747 if (retval != STATUS_SUCCESS) { 3759 if (retval != STATUS_SUCCESS) {
3748 chip->rw_need_retry = 1; 3760 chip->rw_need_retry = 1;
3749 sd_set_err_code(chip, SD_STS_ERR); 3761 sd_set_err_code(chip, SD_STS_ERR);
@@ -3760,12 +3772,12 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3760 goto RW_FAIL; 3772 goto RW_FAIL;
3761 } 3773 }
3762 3774
3763 if ((sd_card->pre_sec_cnt < 0x80) 3775 if ((sd_card->pre_sec_cnt < 0x80) &&
3764 && !CHK_SD30_SPEED(sd_card) 3776 !CHK_SD30_SPEED(sd_card) &&
3765 && !CHK_SD_HS(sd_card) 3777 !CHK_SD_HS(sd_card) &&
3766 && !CHK_MMC_HS(sd_card)) { 3778 !CHK_MMC_HS(sd_card)) {
3767 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, 3779 sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
3768 SD_RSP_TYPE_R1, NULL, 0); 3780 SD_RSP_TYPE_R1, NULL, 0);
3769 } 3781 }
3770 } 3782 }
3771 3783
@@ -3774,30 +3786,30 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3774 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00); 3786 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
3775 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02); 3787 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
3776 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 3788 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
3777 (u8)sector_cnt); 3789 (u8)sector_cnt);
3778 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 3790 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
3779 (u8)(sector_cnt >> 8)); 3791 (u8)(sector_cnt >> 8));
3780 3792
3781 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); 3793 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
3782 3794
3783 if (CHK_MMC_8BIT(sd_card)) 3795 if (CHK_MMC_8BIT(sd_card))
3784 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 3796 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
3785 0x03, SD_BUS_WIDTH_8); 3797 0x03, SD_BUS_WIDTH_8);
3786 else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card)) 3798 else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
3787 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 3799 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
3788 0x03, SD_BUS_WIDTH_4); 3800 0x03, SD_BUS_WIDTH_4);
3789 else 3801 else
3790 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 3802 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
3791 0x03, SD_BUS_WIDTH_1); 3803 0x03, SD_BUS_WIDTH_1);
3792 3804
3793 if (sd_card->seq_mode) { 3805 if (sd_card->seq_mode) {
3794 cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16| 3806 cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
3795 SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | 3807 SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
3796 SD_RSP_LEN_0; 3808 SD_RSP_LEN_0;
3797 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2); 3809 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
3798 3810
3799 trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512, 3811 trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
3800 DMA_512); 3812 DMA_512);
3801 3813
3802 if (srb->sc_data_direction == DMA_FROM_DEVICE) { 3814 if (srb->sc_data_direction == DMA_FROM_DEVICE) {
3803 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 3815 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
@@ -3808,7 +3820,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3808 } 3820 }
3809 3821
3810 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, 3822 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
3811 SD_TRANSFER_END, SD_TRANSFER_END); 3823 SD_TRANSFER_END, SD_TRANSFER_END);
3812 3824
3813 rtsx_send_cmd_no_wait(chip); 3825 rtsx_send_cmd_no_wait(chip);
3814 } else { 3826 } else {
@@ -3818,22 +3830,22 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3818 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 3830 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
3819 0x40 | READ_MULTIPLE_BLOCK); 3831 0x40 | READ_MULTIPLE_BLOCK);
3820 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 3832 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
3821 (u8)(data_addr >> 24)); 3833 (u8)(data_addr >> 24));
3822 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 3834 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
3823 (u8)(data_addr >> 16)); 3835 (u8)(data_addr >> 16));
3824 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 3836 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
3825 (u8)(data_addr >> 8)); 3837 (u8)(data_addr >> 8));
3826 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 3838 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
3827 (u8)data_addr); 3839 (u8)data_addr);
3828 3840
3829 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | 3841 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
3830 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | 3842 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
3831 SD_RSP_LEN_6; 3843 SD_RSP_LEN_6;
3832 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 3844 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
3833 cfg2); 3845 cfg2);
3834 3846
3835 trans_dma_enable(srb->sc_data_direction, chip, 3847 trans_dma_enable(srb->sc_data_direction, chip,
3836 sector_cnt * 512, DMA_512); 3848 sector_cnt * 512, DMA_512);
3837 3849
3838 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 3850 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
3839 SD_TM_AUTO_READ_2 | SD_TRANSFER_START); 3851 SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
@@ -3861,7 +3873,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3861 } 3873 }
3862 3874
3863 retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK, 3875 retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
3864 data_addr, SD_RSP_TYPE_R1, NULL, 0); 3876 data_addr, SD_RSP_TYPE_R1,
3877 NULL, 0);
3865 if (retval != STATUS_SUCCESS) { 3878 if (retval != STATUS_SUCCESS) {
3866 chip->rw_need_retry = 1; 3879 chip->rw_need_retry = 1;
3867 rtsx_trace(chip); 3880 rtsx_trace(chip);
@@ -3874,10 +3887,10 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3874 SD_NO_WAIT_BUSY_END | 3887 SD_NO_WAIT_BUSY_END |
3875 SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; 3888 SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
3876 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, 3889 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
3877 cfg2); 3890 cfg2);
3878 3891
3879 trans_dma_enable(srb->sc_data_direction, chip, 3892 trans_dma_enable(srb->sc_data_direction, chip,
3880 sector_cnt * 512, DMA_512); 3893 sector_cnt * 512, DMA_512);
3881 3894
3882 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 3895 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
3883 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); 3896 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
@@ -3891,7 +3904,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3891 } 3904 }
3892 3905
3893 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), 3906 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
3894 scsi_bufflen(srb), scsi_sg_count(srb), 3907 scsi_bufflen(srb), scsi_sg_count(srb),
3895 srb->sc_data_direction, chip->sd_timeout); 3908 srb->sc_data_direction, chip->sd_timeout);
3896 if (retval < 0) { 3909 if (retval < 0) {
3897 u8 stat = 0; 3910 u8 stat = 0;
@@ -3916,7 +3929,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
3916 chip->rw_need_retry = 1; 3929 chip->rw_need_retry = 1;
3917 3930
3918 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, 3931 retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
3919 SD_RSP_TYPE_R1b, NULL, 0); 3932 SD_RSP_TYPE_R1b, NULL, 0);
3920 if (retval != STATUS_SUCCESS) { 3933 if (retval != STATUS_SUCCESS) {
3921 sd_set_err_code(chip, SD_STS_ERR); 3934 sd_set_err_code(chip, SD_STS_ERR);
3922 rtsx_trace(chip); 3935 rtsx_trace(chip);
@@ -3984,8 +3997,9 @@ int soft_reset_sd_card(struct rtsx_chip *chip)
3984 return reset_sd(chip); 3997 return reset_sd(chip);
3985} 3998}
3986 3999
3987int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, 4000int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg,
3988 u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check) 4001 u8 rsp_type, u8 *rsp, int rsp_len,
4002 bool special_check)
3989{ 4003{
3990 int retval; 4004 int retval;
3991 int timeout = 100; 4005 int timeout = 100;
@@ -4011,11 +4025,11 @@ RTY_SEND_CMD:
4011 4025
4012 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); 4026 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
4013 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 4027 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
4014 0x01, PINGPONG_BUFFER); 4028 0x01, PINGPONG_BUFFER);
4015 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 4029 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
4016 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); 4030 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
4017 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, 4031 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
4018 SD_TRANSFER_END); 4032 SD_TRANSFER_END);
4019 4033
4020 if (rsp_type == SD_RSP_TYPE_R2) { 4034 if (rsp_type == SD_RSP_TYPE_R2) {
4021 for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; 4035 for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -4084,7 +4098,7 @@ RTY_SEND_CMD:
4084 } 4098 }
4085 4099
4086 if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) || 4100 if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
4087 (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) { 4101 (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
4088 if ((cmd_idx != STOP_TRANSMISSION) && !special_check) { 4102 if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
4089 if (ptr[1] & 0x80) { 4103 if (ptr[1] & 0x80) {
4090 rtsx_trace(chip); 4104 rtsx_trace(chip);
@@ -4172,7 +4186,7 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
4172 4186
4173int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) 4187int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4174{ 4188{
4175 struct sd_info *sd_card = &(chip->sd_card); 4189 struct sd_info *sd_card = &chip->sd_card;
4176 unsigned int lun = SCSI_LUN(srb); 4190 unsigned int lun = SCSI_LUN(srb);
4177 int len; 4191 int len;
4178 u8 buf[18] = { 4192 u8 buf[18] = {
@@ -4206,9 +4220,9 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4206 } 4220 }
4207 4221
4208 if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || 4222 if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
4209 (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || 4223 (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
4210 (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || 4224 (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
4211 (srb->cmnd[8] != 0x64)) { 4225 (srb->cmnd[8] != 0x64)) {
4212 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 4226 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
4213 rtsx_trace(chip); 4227 rtsx_trace(chip);
4214 return TRANSPORT_FAILED; 4228 return TRANSPORT_FAILED;
@@ -4245,7 +4259,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4245} 4259}
4246 4260
4247static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type, 4261static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
4248 int *rsp_len) 4262 int *rsp_len)
4249{ 4263{
4250 if (!rsp_type || !rsp_len) 4264 if (!rsp_type || !rsp_len)
4251 return STATUS_FAIL; 4265 return STATUS_FAIL;
@@ -4285,7 +4299,7 @@ static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
4285 4299
4286int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) 4300int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4287{ 4301{
4288 struct sd_info *sd_card = &(chip->sd_card); 4302 struct sd_info *sd_card = &chip->sd_card;
4289 unsigned int lun = SCSI_LUN(srb); 4303 unsigned int lun = SCSI_LUN(srb);
4290 int retval, rsp_len; 4304 int retval, rsp_len;
4291 u8 cmd_idx, rsp_type; 4305 u8 cmd_idx, rsp_type;
@@ -4339,7 +4353,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4339 if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { 4353 if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
4340 if (CHK_MMC_8BIT(sd_card)) { 4354 if (CHK_MMC_8BIT(sd_card)) {
4341 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, 4355 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
4342 SD_BUS_WIDTH_8); 4356 SD_BUS_WIDTH_8);
4343 if (retval != STATUS_SUCCESS) { 4357 if (retval != STATUS_SUCCESS) {
4344 rtsx_trace(chip); 4358 rtsx_trace(chip);
4345 return TRANSPORT_FAILED; 4359 return TRANSPORT_FAILED;
@@ -4347,7 +4361,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4347 4361
4348 } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { 4362 } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
4349 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, 4363 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
4350 SD_BUS_WIDTH_4); 4364 SD_BUS_WIDTH_4);
4351 if (retval != STATUS_SUCCESS) { 4365 if (retval != STATUS_SUCCESS) {
4352 rtsx_trace(chip); 4366 rtsx_trace(chip);
4353 return TRANSPORT_FAILED; 4367 return TRANSPORT_FAILED;
@@ -4366,32 +4380,33 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4366 retval = sd_select_card(chip, 0); 4380 retval = sd_select_card(chip, 0);
4367 if (retval != STATUS_SUCCESS) { 4381 if (retval != STATUS_SUCCESS) {
4368 rtsx_trace(chip); 4382 rtsx_trace(chip);
4369 goto SD_Execute_Cmd_Failed; 4383 goto sd_execute_cmd_failed;
4370 } 4384 }
4371 } 4385 }
4372 4386
4373 if (acmd) { 4387 if (acmd) {
4374 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, 4388 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
4375 sd_card->sd_addr, 4389 sd_card->sd_addr,
4376 SD_RSP_TYPE_R1, NULL, 0, false); 4390 SD_RSP_TYPE_R1, NULL, 0,
4391 false);
4377 if (retval != STATUS_SUCCESS) { 4392 if (retval != STATUS_SUCCESS) {
4378 rtsx_trace(chip); 4393 rtsx_trace(chip);
4379 goto SD_Execute_Cmd_Failed; 4394 goto sd_execute_cmd_failed;
4380 } 4395 }
4381 } 4396 }
4382 4397
4383 retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, 4398 retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
4384 sd_card->rsp, rsp_len, false); 4399 sd_card->rsp, rsp_len, false);
4385 if (retval != STATUS_SUCCESS) { 4400 if (retval != STATUS_SUCCESS) {
4386 rtsx_trace(chip); 4401 rtsx_trace(chip);
4387 goto SD_Execute_Cmd_Failed; 4402 goto sd_execute_cmd_failed;
4388 } 4403 }
4389 4404
4390 if (standby) { 4405 if (standby) {
4391 retval = sd_select_card(chip, 1); 4406 retval = sd_select_card(chip, 1);
4392 if (retval != STATUS_SUCCESS) { 4407 if (retval != STATUS_SUCCESS) {
4393 rtsx_trace(chip); 4408 rtsx_trace(chip);
4394 goto SD_Execute_Cmd_Failed; 4409 goto sd_execute_cmd_failed;
4395 } 4410 }
4396 } 4411 }
4397 4412
@@ -4399,14 +4414,14 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4399 retval = sd_update_lock_status(chip); 4414 retval = sd_update_lock_status(chip);
4400 if (retval != STATUS_SUCCESS) { 4415 if (retval != STATUS_SUCCESS) {
4401 rtsx_trace(chip); 4416 rtsx_trace(chip);
4402 goto SD_Execute_Cmd_Failed; 4417 goto sd_execute_cmd_failed;
4403 } 4418 }
4404#endif 4419#endif
4405 4420
4406 scsi_set_resid(srb, 0); 4421 scsi_set_resid(srb, 0);
4407 return TRANSPORT_GOOD; 4422 return TRANSPORT_GOOD;
4408 4423
4409SD_Execute_Cmd_Failed: 4424sd_execute_cmd_failed:
4410 sd_card->pre_cmd_err = 1; 4425 sd_card->pre_cmd_err = 1;
4411 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); 4426 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
4412 release_sd_card(chip); 4427 release_sd_card(chip);
@@ -4420,7 +4435,7 @@ SD_Execute_Cmd_Failed:
4420 4435
4421int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) 4436int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4422{ 4437{
4423 struct sd_info *sd_card = &(chip->sd_card); 4438 struct sd_info *sd_card = &chip->sd_card;
4424 unsigned int lun = SCSI_LUN(srb); 4439 unsigned int lun = SCSI_LUN(srb);
4425 int retval, rsp_len, i; 4440 int retval, rsp_len, i;
4426 bool read_err = false, cmd13_checkbit = false; 4441 bool read_err = false, cmd13_checkbit = false;
@@ -4492,10 +4507,11 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4492 4507
4493 if (data_len < 512) { 4508 if (data_len < 512) {
4494 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, 4509 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
4495 SD_RSP_TYPE_R1, NULL, 0, false); 4510 SD_RSP_TYPE_R1, NULL, 0,
4511 false);
4496 if (retval != STATUS_SUCCESS) { 4512 if (retval != STATUS_SUCCESS) {
4497 rtsx_trace(chip); 4513 rtsx_trace(chip);
4498 goto SD_Execute_Read_Cmd_Failed; 4514 goto sd_execute_read_cmd_failed;
4499 } 4515 }
4500 } 4516 }
4501 4517
@@ -4503,17 +4519,18 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4503 retval = sd_select_card(chip, 0); 4519 retval = sd_select_card(chip, 0);
4504 if (retval != STATUS_SUCCESS) { 4520 if (retval != STATUS_SUCCESS) {
4505 rtsx_trace(chip); 4521 rtsx_trace(chip);
4506 goto SD_Execute_Read_Cmd_Failed; 4522 goto sd_execute_read_cmd_failed;
4507 } 4523 }
4508 } 4524 }
4509 4525
4510 if (acmd) { 4526 if (acmd) {
4511 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, 4527 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
4512 sd_card->sd_addr, 4528 sd_card->sd_addr,
4513 SD_RSP_TYPE_R1, NULL, 0, false); 4529 SD_RSP_TYPE_R1, NULL, 0,
4530 false);
4514 if (retval != STATUS_SUCCESS) { 4531 if (retval != STATUS_SUCCESS) {
4515 rtsx_trace(chip); 4532 rtsx_trace(chip);
4516 goto SD_Execute_Read_Cmd_Failed; 4533 goto sd_execute_read_cmd_failed;
4517 } 4534 }
4518 } 4535 }
4519 4536
@@ -4539,13 +4556,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4539 } 4556 }
4540 4557
4541 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt, 4558 retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
4542 blk_cnt, bus_width, buf, data_len, 2000); 4559 blk_cnt, bus_width, buf, data_len, 2000);
4543 if (retval != STATUS_SUCCESS) { 4560 if (retval != STATUS_SUCCESS) {
4544 read_err = true; 4561 read_err = true;
4545 kfree(buf); 4562 kfree(buf);
4546 rtsx_clear_sd_error(chip); 4563 rtsx_clear_sd_error(chip);
4547 rtsx_trace(chip); 4564 rtsx_trace(chip);
4548 goto SD_Execute_Read_Cmd_Failed; 4565 goto sd_execute_read_cmd_failed;
4549 } 4566 }
4550 4567
4551 min_len = min(data_len, scsi_bufflen(srb)); 4568 min_len = min(data_len, scsi_bufflen(srb));
@@ -4558,24 +4575,24 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4558 trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512); 4575 trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
4559 4576
4560 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 4577 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
4561 0x02); 4578 0x02);
4562 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 4579 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
4563 0x00); 4580 0x00);
4564 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 4581 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
4565 0xFF, (srb->cmnd[7] & 0xFE) >> 1); 4582 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
4566 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 4583 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
4567 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); 4584 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
4568 4585
4569 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 4586 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
4570 0x40 | cmd_idx); 4587 0x40 | cmd_idx);
4571 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 4588 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
4572 srb->cmnd[3]); 4589 srb->cmnd[3]);
4573 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 4590 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
4574 srb->cmnd[4]); 4591 srb->cmnd[4]);
4575 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 4592 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
4576 srb->cmnd[5]); 4593 srb->cmnd[5]);
4577 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 4594 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
4578 srb->cmnd[6]); 4595 srb->cmnd[6]);
4579 4596
4580 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); 4597 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
4581 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); 4598 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
@@ -4583,66 +4600,69 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4583 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 4600 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
4584 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START); 4601 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
4585 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, 4602 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
4586 SD_TRANSFER_END, SD_TRANSFER_END); 4603 SD_TRANSFER_END, SD_TRANSFER_END);
4587 4604
4588 rtsx_send_cmd_no_wait(chip); 4605 rtsx_send_cmd_no_wait(chip);
4589 4606
4590 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), 4607 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
4591 scsi_bufflen(srb), scsi_sg_count(srb), 4608 scsi_bufflen(srb),
4592 DMA_FROM_DEVICE, 10000); 4609 scsi_sg_count(srb),
4610 DMA_FROM_DEVICE, 10000);
4593 if (retval < 0) { 4611 if (retval < 0) {
4594 read_err = true; 4612 read_err = true;
4595 rtsx_clear_sd_error(chip); 4613 rtsx_clear_sd_error(chip);
4596 rtsx_trace(chip); 4614 rtsx_trace(chip);
4597 goto SD_Execute_Read_Cmd_Failed; 4615 goto sd_execute_read_cmd_failed;
4598 } 4616 }
4599 4617
4600 } else { 4618 } else {
4601 rtsx_trace(chip); 4619 rtsx_trace(chip);
4602 goto SD_Execute_Read_Cmd_Failed; 4620 goto sd_execute_read_cmd_failed;
4603 } 4621 }
4604 4622
4605 retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type); 4623 retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
4606 if (retval != STATUS_SUCCESS) { 4624 if (retval != STATUS_SUCCESS) {
4607 rtsx_trace(chip); 4625 rtsx_trace(chip);
4608 goto SD_Execute_Read_Cmd_Failed; 4626 goto sd_execute_read_cmd_failed;
4609 } 4627 }
4610 4628
4611 if (standby) { 4629 if (standby) {
4612 retval = sd_select_card(chip, 1); 4630 retval = sd_select_card(chip, 1);
4613 if (retval != STATUS_SUCCESS) { 4631 if (retval != STATUS_SUCCESS) {
4614 rtsx_trace(chip); 4632 rtsx_trace(chip);
4615 goto SD_Execute_Read_Cmd_Failed; 4633 goto sd_execute_read_cmd_failed;
4616 } 4634 }
4617 } 4635 }
4618 4636
4619 if (send_cmd12) { 4637 if (send_cmd12) {
4620 retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 4638 retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
4621 0, SD_RSP_TYPE_R1b, NULL, 0, false); 4639 SD_RSP_TYPE_R1b, NULL, 0,
4640 false);
4622 if (retval != STATUS_SUCCESS) { 4641 if (retval != STATUS_SUCCESS) {
4623 rtsx_trace(chip); 4642 rtsx_trace(chip);
4624 goto SD_Execute_Read_Cmd_Failed; 4643 goto sd_execute_read_cmd_failed;
4625 } 4644 }
4626 } 4645 }
4627 4646
4628 if (data_len < 512) { 4647 if (data_len < 512) {
4629 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, 4648 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
4630 SD_RSP_TYPE_R1, NULL, 0, false); 4649 SD_RSP_TYPE_R1, NULL, 0,
4650 false);
4631 if (retval != STATUS_SUCCESS) { 4651 if (retval != STATUS_SUCCESS) {
4632 rtsx_trace(chip); 4652 rtsx_trace(chip);
4633 goto SD_Execute_Read_Cmd_Failed; 4653 goto sd_execute_read_cmd_failed;
4634 } 4654 }
4635 4655
4636 retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); 4656 retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
4637 if (retval != STATUS_SUCCESS) { 4657 if (retval != STATUS_SUCCESS) {
4638 rtsx_trace(chip); 4658 rtsx_trace(chip);
4639 goto SD_Execute_Read_Cmd_Failed; 4659 goto sd_execute_read_cmd_failed;
4640 } 4660 }
4641 4661
4642 retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); 4662 retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
4643 if (retval != STATUS_SUCCESS) { 4663 if (retval != STATUS_SUCCESS) {
4644 rtsx_trace(chip); 4664 rtsx_trace(chip);
4645 goto SD_Execute_Read_Cmd_Failed; 4665 goto sd_execute_read_cmd_failed;
4646 } 4666 }
4647 } 4667 }
4648 4668
@@ -4651,7 +4671,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4651 4671
4652 for (i = 0; i < 3; i++) { 4672 for (i = 0; i < 3; i++) {
4653 retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, 4673 retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
4654 sd_card->sd_addr, 4674 sd_card->sd_addr,
4655 SD_RSP_TYPE_R1, NULL, 0, 4675 SD_RSP_TYPE_R1, NULL, 0,
4656 cmd13_checkbit); 4676 cmd13_checkbit);
4657 if (retval == STATUS_SUCCESS) 4677 if (retval == STATUS_SUCCESS)
@@ -4659,13 +4679,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4659 } 4679 }
4660 if (retval != STATUS_SUCCESS) { 4680 if (retval != STATUS_SUCCESS) {
4661 rtsx_trace(chip); 4681 rtsx_trace(chip);
4662 goto SD_Execute_Read_Cmd_Failed; 4682 goto sd_execute_read_cmd_failed;
4663 } 4683 }
4664 4684
4665 scsi_set_resid(srb, 0); 4685 scsi_set_resid(srb, 0);
4666 return TRANSPORT_GOOD; 4686 return TRANSPORT_GOOD;
4667 4687
4668SD_Execute_Read_Cmd_Failed: 4688sd_execute_read_cmd_failed:
4669 sd_card->pre_cmd_err = 1; 4689 sd_card->pre_cmd_err = 1;
4670 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); 4690 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
4671 if (read_err) 4691 if (read_err)
@@ -4682,7 +4702,7 @@ SD_Execute_Read_Cmd_Failed:
4682 4702
4683int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) 4703int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4684{ 4704{
4685 struct sd_info *sd_card = &(chip->sd_card); 4705 struct sd_info *sd_card = &chip->sd_card;
4686 unsigned int lun = SCSI_LUN(srb); 4706 unsigned int lun = SCSI_LUN(srb);
4687 int retval, rsp_len, i; 4707 int retval, rsp_len, i;
4688 bool write_err = false, cmd13_checkbit = false; 4708 bool write_err = false, cmd13_checkbit = false;
@@ -4754,7 +4774,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4754 if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { 4774 if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
4755 if (CHK_MMC_8BIT(sd_card)) { 4775 if (CHK_MMC_8BIT(sd_card)) {
4756 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, 4776 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
4757 SD_BUS_WIDTH_8); 4777 SD_BUS_WIDTH_8);
4758 if (retval != STATUS_SUCCESS) { 4778 if (retval != STATUS_SUCCESS) {
4759 rtsx_trace(chip); 4779 rtsx_trace(chip);
4760 return TRANSPORT_FAILED; 4780 return TRANSPORT_FAILED;
@@ -4762,7 +4782,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4762 4782
4763 } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { 4783 } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
4764 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, 4784 retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
4765 SD_BUS_WIDTH_4); 4785 SD_BUS_WIDTH_4);
4766 if (retval != STATUS_SUCCESS) { 4786 if (retval != STATUS_SUCCESS) {
4767 rtsx_trace(chip); 4787 rtsx_trace(chip);
4768 return TRANSPORT_FAILED; 4788 return TRANSPORT_FAILED;
@@ -4779,10 +4799,11 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4779 4799
4780 if (data_len < 512) { 4800 if (data_len < 512) {
4781 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, 4801 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
4782 SD_RSP_TYPE_R1, NULL, 0, false); 4802 SD_RSP_TYPE_R1, NULL, 0,
4803 false);
4783 if (retval != STATUS_SUCCESS) { 4804 if (retval != STATUS_SUCCESS) {
4784 rtsx_trace(chip); 4805 rtsx_trace(chip);
4785 goto SD_Execute_Write_Cmd_Failed; 4806 goto sd_execute_write_cmd_failed;
4786 } 4807 }
4787 } 4808 }
4788 4809
@@ -4790,25 +4811,26 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4790 retval = sd_select_card(chip, 0); 4811 retval = sd_select_card(chip, 0);
4791 if (retval != STATUS_SUCCESS) { 4812 if (retval != STATUS_SUCCESS) {
4792 rtsx_trace(chip); 4813 rtsx_trace(chip);
4793 goto SD_Execute_Write_Cmd_Failed; 4814 goto sd_execute_write_cmd_failed;
4794 } 4815 }
4795 } 4816 }
4796 4817
4797 if (acmd) { 4818 if (acmd) {
4798 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, 4819 retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
4799 sd_card->sd_addr, 4820 sd_card->sd_addr,
4800 SD_RSP_TYPE_R1, NULL, 0, false); 4821 SD_RSP_TYPE_R1, NULL, 0,
4822 false);
4801 if (retval != STATUS_SUCCESS) { 4823 if (retval != STATUS_SUCCESS) {
4802 rtsx_trace(chip); 4824 rtsx_trace(chip);
4803 goto SD_Execute_Write_Cmd_Failed; 4825 goto sd_execute_write_cmd_failed;
4804 } 4826 }
4805 } 4827 }
4806 4828
4807 retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, 4829 retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
4808 sd_card->rsp, rsp_len, false); 4830 sd_card->rsp, rsp_len, false);
4809 if (retval != STATUS_SUCCESS) { 4831 if (retval != STATUS_SUCCESS) {
4810 rtsx_trace(chip); 4832 rtsx_trace(chip);
4811 goto SD_Execute_Write_Cmd_Failed; 4833 goto sd_execute_write_cmd_failed;
4812 } 4834 }
4813 4835
4814 if (data_len <= 512) { 4836 if (data_len <= 512) {
@@ -4832,37 +4854,37 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4832 rtsx_init_cmd(chip); 4854 rtsx_init_cmd(chip);
4833 for (i = 0; i < 256; i++) { 4855 for (i = 0; i < 256; i++) {
4834 rtsx_add_cmd(chip, WRITE_REG_CMD, 4856 rtsx_add_cmd(chip, WRITE_REG_CMD,
4835 PPBUF_BASE2 + i, 0xFF, buf[i]); 4857 PPBUF_BASE2 + i, 0xFF, buf[i]);
4836 } 4858 }
4837 retval = rtsx_send_cmd(chip, 0, 250); 4859 retval = rtsx_send_cmd(chip, 0, 250);
4838 if (retval != STATUS_SUCCESS) { 4860 if (retval != STATUS_SUCCESS) {
4839 kfree(buf); 4861 kfree(buf);
4840 rtsx_trace(chip); 4862 rtsx_trace(chip);
4841 goto SD_Execute_Write_Cmd_Failed; 4863 goto sd_execute_write_cmd_failed;
4842 } 4864 }
4843 4865
4844 rtsx_init_cmd(chip); 4866 rtsx_init_cmd(chip);
4845 for (i = 256; i < data_len; i++) { 4867 for (i = 256; i < data_len; i++) {
4846 rtsx_add_cmd(chip, WRITE_REG_CMD, 4868 rtsx_add_cmd(chip, WRITE_REG_CMD,
4847 PPBUF_BASE2 + i, 0xFF, buf[i]); 4869 PPBUF_BASE2 + i, 0xFF, buf[i]);
4848 } 4870 }
4849 retval = rtsx_send_cmd(chip, 0, 250); 4871 retval = rtsx_send_cmd(chip, 0, 250);
4850 if (retval != STATUS_SUCCESS) { 4872 if (retval != STATUS_SUCCESS) {
4851 kfree(buf); 4873 kfree(buf);
4852 rtsx_trace(chip); 4874 rtsx_trace(chip);
4853 goto SD_Execute_Write_Cmd_Failed; 4875 goto sd_execute_write_cmd_failed;
4854 } 4876 }
4855 } else { 4877 } else {
4856 rtsx_init_cmd(chip); 4878 rtsx_init_cmd(chip);
4857 for (i = 0; i < data_len; i++) { 4879 for (i = 0; i < data_len; i++) {
4858 rtsx_add_cmd(chip, WRITE_REG_CMD, 4880 rtsx_add_cmd(chip, WRITE_REG_CMD,
4859 PPBUF_BASE2 + i, 0xFF, buf[i]); 4881 PPBUF_BASE2 + i, 0xFF, buf[i]);
4860 } 4882 }
4861 retval = rtsx_send_cmd(chip, 0, 250); 4883 retval = rtsx_send_cmd(chip, 0, 250);
4862 if (retval != STATUS_SUCCESS) { 4884 if (retval != STATUS_SUCCESS) {
4863 kfree(buf); 4885 kfree(buf);
4864 rtsx_trace(chip); 4886 rtsx_trace(chip);
4865 goto SD_Execute_Write_Cmd_Failed; 4887 goto sd_execute_write_cmd_failed;
4866 } 4888 }
4867 } 4889 }
4868 4890
@@ -4871,20 +4893,20 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4871 rtsx_init_cmd(chip); 4893 rtsx_init_cmd(chip);
4872 4894
4873 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 4895 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
4874 srb->cmnd[8] & 0x03); 4896 srb->cmnd[8] & 0x03);
4875 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 4897 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
4876 srb->cmnd[9]); 4898 srb->cmnd[9]);
4877 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 4899 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
4878 0x00); 4900 0x00);
4879 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 4901 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
4880 0x01); 4902 0x01);
4881 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, 4903 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
4882 PINGPONG_BUFFER); 4904 PINGPONG_BUFFER);
4883 4905
4884 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 4906 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
4885 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); 4907 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
4886 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, 4908 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
4887 SD_TRANSFER_END, SD_TRANSFER_END); 4909 SD_TRANSFER_END, SD_TRANSFER_END);
4888 4910
4889 retval = rtsx_send_cmd(chip, SD_CARD, 250); 4911 retval = rtsx_send_cmd(chip, SD_CARD, 250);
4890 } else if (!(data_len & 0x1FF)) { 4912 } else if (!(data_len & 0x1FF)) {
@@ -4893,35 +4915,36 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4893 trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512); 4915 trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
4894 4916
4895 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 4917 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
4896 0x02); 4918 0x02);
4897 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 4919 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
4898 0x00); 4920 0x00);
4899 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 4921 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
4900 0xFF, (srb->cmnd[7] & 0xFE) >> 1); 4922 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
4901 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 4923 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
4902 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); 4924 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
4903 4925
4904 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, 4926 rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
4905 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); 4927 SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
4906 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, 4928 rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
4907 SD_TRANSFER_END, SD_TRANSFER_END); 4929 SD_TRANSFER_END, SD_TRANSFER_END);
4908 4930
4909 rtsx_send_cmd_no_wait(chip); 4931 rtsx_send_cmd_no_wait(chip);
4910 4932
4911 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), 4933 retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
4912 scsi_bufflen(srb), scsi_sg_count(srb), 4934 scsi_bufflen(srb),
4913 DMA_TO_DEVICE, 10000); 4935 scsi_sg_count(srb),
4936 DMA_TO_DEVICE, 10000);
4914 4937
4915 } else { 4938 } else {
4916 rtsx_trace(chip); 4939 rtsx_trace(chip);
4917 goto SD_Execute_Write_Cmd_Failed; 4940 goto sd_execute_write_cmd_failed;
4918 } 4941 }
4919 4942
4920 if (retval < 0) { 4943 if (retval < 0) {
4921 write_err = true; 4944 write_err = true;
4922 rtsx_clear_sd_error(chip); 4945 rtsx_clear_sd_error(chip);
4923 rtsx_trace(chip); 4946 rtsx_trace(chip);
4924 goto SD_Execute_Write_Cmd_Failed; 4947 goto sd_execute_write_cmd_failed;
4925 } 4948 }
4926 4949
4927#ifdef SUPPORT_SD_LOCK 4950#ifdef SUPPORT_SD_LOCK
@@ -4949,37 +4972,39 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4949 retval = sd_select_card(chip, 1); 4972 retval = sd_select_card(chip, 1);
4950 if (retval != STATUS_SUCCESS) { 4973 if (retval != STATUS_SUCCESS) {
4951 rtsx_trace(chip); 4974 rtsx_trace(chip);
4952 goto SD_Execute_Write_Cmd_Failed; 4975 goto sd_execute_write_cmd_failed;
4953 } 4976 }
4954 } 4977 }
4955 4978
4956 if (send_cmd12) { 4979 if (send_cmd12) {
4957 retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 4980 retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
4958 0, SD_RSP_TYPE_R1b, NULL, 0, false); 4981 SD_RSP_TYPE_R1b, NULL, 0,
4982 false);
4959 if (retval != STATUS_SUCCESS) { 4983 if (retval != STATUS_SUCCESS) {
4960 rtsx_trace(chip); 4984 rtsx_trace(chip);
4961 goto SD_Execute_Write_Cmd_Failed; 4985 goto sd_execute_write_cmd_failed;
4962 } 4986 }
4963 } 4987 }
4964 4988
4965 if (data_len < 512) { 4989 if (data_len < 512) {
4966 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, 4990 retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
4967 SD_RSP_TYPE_R1, NULL, 0, false); 4991 SD_RSP_TYPE_R1, NULL, 0,
4992 false);
4968 if (retval != STATUS_SUCCESS) { 4993 if (retval != STATUS_SUCCESS) {
4969 rtsx_trace(chip); 4994 rtsx_trace(chip);
4970 goto SD_Execute_Write_Cmd_Failed; 4995 goto sd_execute_write_cmd_failed;
4971 } 4996 }
4972 4997
4973 retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); 4998 retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
4974 if (retval != STATUS_SUCCESS) { 4999 if (retval != STATUS_SUCCESS) {
4975 rtsx_trace(chip); 5000 rtsx_trace(chip);
4976 goto SD_Execute_Write_Cmd_Failed; 5001 goto sd_execute_write_cmd_failed;
4977 } 5002 }
4978 5003
4979 rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); 5004 rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
4980 if (retval != STATUS_SUCCESS) { 5005 if (retval != STATUS_SUCCESS) {
4981 rtsx_trace(chip); 5006 rtsx_trace(chip);
4982 goto SD_Execute_Write_Cmd_Failed; 5007 goto sd_execute_write_cmd_failed;
4983 } 5008 }
4984 } 5009 }
4985 5010
@@ -4988,15 +5013,15 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4988 5013
4989 for (i = 0; i < 3; i++) { 5014 for (i = 0; i < 3; i++) {
4990 retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, 5015 retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
4991 sd_card->sd_addr, 5016 sd_card->sd_addr,
4992 SD_RSP_TYPE_R1, NULL, 0, 5017 SD_RSP_TYPE_R1, NULL, 0,
4993 cmd13_checkbit); 5018 cmd13_checkbit);
4994 if (retval == STATUS_SUCCESS) 5019 if (retval == STATUS_SUCCESS)
4995 break; 5020 break;
4996 } 5021 }
4997 if (retval != STATUS_SUCCESS) { 5022 if (retval != STATUS_SUCCESS) {
4998 rtsx_trace(chip); 5023 rtsx_trace(chip);
4999 goto SD_Execute_Write_Cmd_Failed; 5024 goto sd_execute_write_cmd_failed;
5000 } 5025 }
5001 5026
5002#ifdef SUPPORT_SD_LOCK 5027#ifdef SUPPORT_SD_LOCK
@@ -5024,7 +5049,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5024 if (retval != STATUS_SUCCESS) { 5049 if (retval != STATUS_SUCCESS) {
5025 sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST); 5050 sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
5026 rtsx_trace(chip); 5051 rtsx_trace(chip);
5027 goto SD_Execute_Write_Cmd_Failed; 5052 goto sd_execute_write_cmd_failed;
5028 } 5053 }
5029 } 5054 }
5030 5055
@@ -5045,7 +5070,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5045 scsi_set_resid(srb, 0); 5070 scsi_set_resid(srb, 0);
5046 return TRANSPORT_GOOD; 5071 return TRANSPORT_GOOD;
5047 5072
5048SD_Execute_Write_Cmd_Failed: 5073sd_execute_write_cmd_failed:
5049 sd_card->pre_cmd_err = 1; 5074 sd_card->pre_cmd_err = 1;
5050 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); 5075 set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
5051 if (write_err) 5076 if (write_err)
@@ -5062,7 +5087,7 @@ SD_Execute_Write_Cmd_Failed:
5062 5087
5063int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) 5088int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5064{ 5089{
5065 struct sd_info *sd_card = &(chip->sd_card); 5090 struct sd_info *sd_card = &chip->sd_card;
5066 unsigned int lun = SCSI_LUN(srb); 5091 unsigned int lun = SCSI_LUN(srb);
5067 int count; 5092 int count;
5068 u16 data_len; 5093 u16 data_len;
@@ -5104,7 +5129,7 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5104 5129
5105int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) 5130int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5106{ 5131{
5107 struct sd_info *sd_card = &(chip->sd_card); 5132 struct sd_info *sd_card = &chip->sd_card;
5108 unsigned int lun = SCSI_LUN(srb); 5133 unsigned int lun = SCSI_LUN(srb);
5109 int retval; 5134 int retval;
5110 5135
@@ -5122,9 +5147,9 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5122 } 5147 }
5123 5148
5124 if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || 5149 if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
5125 (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || 5150 (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
5126 (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || 5151 (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
5127 (srb->cmnd[8] != 0x64)) { 5152 (srb->cmnd[8] != 0x64)) {
5128 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); 5153 set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
5129 rtsx_trace(chip); 5154 rtsx_trace(chip);
5130 return TRANSPORT_FAILED; 5155 return TRANSPORT_FAILED;
@@ -5174,7 +5199,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
5174 5199
5175void sd_cleanup_work(struct rtsx_chip *chip) 5200void sd_cleanup_work(struct rtsx_chip *chip)
5176{ 5201{
5177 struct sd_info *sd_card = &(chip->sd_card); 5202 struct sd_info *sd_card = &chip->sd_card;
5178 5203
5179 if (sd_card->seq_mode) { 5204 if (sd_card->seq_mode) {
5180 dev_dbg(rtsx_dev(chip), "SD: stop transmission\n"); 5205 dev_dbg(rtsx_dev(chip), "SD: stop transmission\n");
@@ -5230,7 +5255,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
5230 5255
5231int release_sd_card(struct rtsx_chip *chip) 5256int release_sd_card(struct rtsx_chip *chip)
5232{ 5257{
5233 struct sd_info *sd_card = &(chip->sd_card); 5258 struct sd_info *sd_card = &chip->sd_card;
5234 int retval; 5259 int retval;
5235 5260
5236 chip->card_ready &= ~SD_CARD; 5261 chip->card_ready &= ~SD_CARD;
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index 60b79280fb5f..55764e16b93a 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -280,14 +280,15 @@ int reset_sd_card(struct rtsx_chip *chip);
280int sd_switch_clock(struct rtsx_chip *chip); 280int sd_switch_clock(struct rtsx_chip *chip);
281void sd_stop_seq_mode(struct rtsx_chip *chip); 281void sd_stop_seq_mode(struct rtsx_chip *chip);
282int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 282int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
283 u32 start_sector, u16 sector_cnt); 283 u32 start_sector, u16 sector_cnt);
284void sd_cleanup_work(struct rtsx_chip *chip); 284void sd_cleanup_work(struct rtsx_chip *chip);
285int sd_power_off_card3v3(struct rtsx_chip *chip); 285int sd_power_off_card3v3(struct rtsx_chip *chip);
286int release_sd_card(struct rtsx_chip *chip); 286int release_sd_card(struct rtsx_chip *chip);
287#ifdef SUPPORT_CPRM 287#ifdef SUPPORT_CPRM
288int soft_reset_sd_card(struct rtsx_chip *chip); 288int soft_reset_sd_card(struct rtsx_chip *chip);
289int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, 289int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
290 u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check); 290 u32 arg, u8 rsp_type, u8 *rsp, int rsp_len,
291 bool special_check);
291int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type); 292int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
292 293
293int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip); 294int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index 13c539c83838..8b8cd955dfeb 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -29,7 +29,7 @@
29 29
30static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code) 30static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
31{ 31{
32 struct spi_info *spi = &(chip->spi); 32 struct spi_info *spi = &chip->spi;
33 33
34 spi->err_code = err_code; 34 spi->err_code = err_code;
35} 35}
@@ -57,7 +57,7 @@ static int spi_init(struct rtsx_chip *chip)
57 57
58static int spi_set_init_para(struct rtsx_chip *chip) 58static int spi_set_init_para(struct rtsx_chip *chip)
59{ 59{
60 struct spi_info *spi = &(chip->spi); 60 struct spi_info *spi = &chip->spi;
61 int retval; 61 int retval;
62 62
63 retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF, 63 retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
@@ -117,9 +117,9 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
117 117
118 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR); 118 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
119 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 119 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
120 SPI_TRANSFER0_START | SPI_POLLING_MODE0); 120 SPI_TRANSFER0_START | SPI_POLLING_MODE0);
121 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 121 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
122 SPI_TRANSFER0_END); 122 SPI_TRANSFER0_END);
123 123
124 retval = rtsx_send_cmd(chip, 0, msec); 124 retval = rtsx_send_cmd(chip, 0, msec);
125 if (retval < 0) { 125 if (retval < 0) {
@@ -134,7 +134,7 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
134 134
135static int sf_enable_write(struct rtsx_chip *chip, u8 ins) 135static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
136{ 136{
137 struct spi_info *spi = &(chip->spi); 137 struct spi_info *spi = &chip->spi;
138 int retval; 138 int retval;
139 139
140 if (!spi->write_en) 140 if (!spi->write_en)
@@ -144,11 +144,11 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
144 144
145 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); 145 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
146 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 146 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
147 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 147 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
148 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 148 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
149 SPI_TRANSFER0_START | SPI_C_MODE0); 149 SPI_TRANSFER0_START | SPI_C_MODE0);
150 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 150 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
151 SPI_TRANSFER0_END); 151 SPI_TRANSFER0_END);
152 152
153 retval = rtsx_send_cmd(chip, 0, 100); 153 retval = rtsx_send_cmd(chip, 0, 100);
154 if (retval < 0) { 154 if (retval < 0) {
@@ -163,7 +163,7 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
163 163
164static int sf_disable_write(struct rtsx_chip *chip, u8 ins) 164static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
165{ 165{
166 struct spi_info *spi = &(chip->spi); 166 struct spi_info *spi = &chip->spi;
167 int retval; 167 int retval;
168 168
169 if (!spi->write_en) 169 if (!spi->write_en)
@@ -173,11 +173,11 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
173 173
174 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); 174 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
175 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 175 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
176 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 176 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
177 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 177 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
178 SPI_TRANSFER0_START | SPI_C_MODE0); 178 SPI_TRANSFER0_START | SPI_C_MODE0);
179 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 179 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
180 SPI_TRANSFER0_END); 180 SPI_TRANSFER0_END);
181 181
182 retval = rtsx_send_cmd(chip, 0, 100); 182 retval = rtsx_send_cmd(chip, 0, 100);
183 if (retval < 0) { 183 if (retval < 0) {
@@ -191,27 +191,27 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
191} 191}
192 192
193static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr, 193static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
194 u16 len) 194 u16 len)
195{ 195{
196 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); 196 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
197 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 197 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
198 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 198 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
199 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len); 199 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
200 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8)); 200 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
201 if (addr_mode) { 201 if (addr_mode) {
202 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr); 202 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
203 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, 203 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
204 (u8)(addr >> 8)); 204 (u8)(addr >> 8));
205 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, 205 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
206 (u8)(addr >> 16)); 206 (u8)(addr >> 16));
207 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 207 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
208 SPI_TRANSFER0_START | SPI_CADO_MODE0); 208 SPI_TRANSFER0_START | SPI_CADO_MODE0);
209 } else { 209 } else {
210 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 210 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
211 SPI_TRANSFER0_START | SPI_CDO_MODE0); 211 SPI_TRANSFER0_START | SPI_CDO_MODE0);
212 } 212 }
213 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 213 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
214 SPI_TRANSFER0_END); 214 SPI_TRANSFER0_END);
215} 215}
216 216
217static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr) 217static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
@@ -222,21 +222,21 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
222 222
223 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); 223 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
224 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 224 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
225 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 225 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
226 if (addr_mode) { 226 if (addr_mode) {
227 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr); 227 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
228 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, 228 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
229 (u8)(addr >> 8)); 229 (u8)(addr >> 8));
230 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, 230 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
231 (u8)(addr >> 16)); 231 (u8)(addr >> 16));
232 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 232 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
233 SPI_TRANSFER0_START | SPI_CA_MODE0); 233 SPI_TRANSFER0_START | SPI_CA_MODE0);
234 } else { 234 } else {
235 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 235 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
236 SPI_TRANSFER0_START | SPI_C_MODE0); 236 SPI_TRANSFER0_START | SPI_C_MODE0);
237 } 237 }
238 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 238 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
239 SPI_TRANSFER0_END); 239 SPI_TRANSFER0_END);
240 240
241 retval = rtsx_send_cmd(chip, 0, 100); 241 retval = rtsx_send_cmd(chip, 0, 100);
242 if (retval < 0) { 242 if (retval < 0) {
@@ -322,9 +322,9 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip)
322 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86); 322 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
323 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13); 323 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
324 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 324 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
325 SPI_TRANSFER0_START | SPI_CA_MODE0); 325 SPI_TRANSFER0_START | SPI_CA_MODE0);
326 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 326 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
327 SPI_TRANSFER0_END); 327 SPI_TRANSFER0_END);
328 328
329 retval = rtsx_send_cmd(chip, 0, 100); 329 retval = rtsx_send_cmd(chip, 0, 100);
330 if (retval < 0) { 330 if (retval < 0) {
@@ -358,9 +358,9 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
358 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12); 358 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
359 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84); 359 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
360 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 360 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
361 SPI_TRANSFER0_START | SPI_CA_MODE0); 361 SPI_TRANSFER0_START | SPI_CA_MODE0);
362 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 362 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
363 SPI_TRANSFER0_END); 363 SPI_TRANSFER0_END);
364 364
365 retval = rtsx_send_cmd(chip, 0, 100); 365 retval = rtsx_send_cmd(chip, 0, 100);
366 if (retval < 0) { 366 if (retval < 0) {
@@ -402,9 +402,9 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
402 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8)); 402 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
403 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46); 403 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
404 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 404 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
405 SPI_TRANSFER0_START | SPI_CA_MODE0); 405 SPI_TRANSFER0_START | SPI_CA_MODE0);
406 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 406 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
407 SPI_TRANSFER0_END); 407 SPI_TRANSFER0_END);
408 408
409 retval = rtsx_send_cmd(chip, 0, 100); 409 retval = rtsx_send_cmd(chip, 0, 100);
410 if (retval < 0) { 410 if (retval < 0) {
@@ -442,9 +442,9 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
442 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46); 442 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
443 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1); 443 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
444 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 444 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
445 SPI_TRANSFER0_START | SPI_CADI_MODE0); 445 SPI_TRANSFER0_START | SPI_CADI_MODE0);
446 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 446 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
447 SPI_TRANSFER0_END); 447 SPI_TRANSFER0_END);
448 448
449 retval = rtsx_send_cmd(chip, 0, 100); 449 retval = rtsx_send_cmd(chip, 0, 100);
450 if (retval < 0) { 450 if (retval < 0) {
@@ -497,9 +497,9 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
497 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8)); 497 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
498 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E); 498 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
499 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 499 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
500 SPI_TRANSFER0_START | SPI_CA_MODE0); 500 SPI_TRANSFER0_START | SPI_CA_MODE0);
501 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 501 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
502 SPI_TRANSFER0_END); 502 SPI_TRANSFER0_END);
503 503
504 retval = rtsx_send_cmd(chip, 0, 100); 504 retval = rtsx_send_cmd(chip, 0, 100);
505 if (retval < 0) { 505 if (retval < 0) {
@@ -518,12 +518,12 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
518 518
519int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) 519int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
520{ 520{
521 struct spi_info *spi = &(chip->spi); 521 struct spi_info *spi = &chip->spi;
522 522
523 dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n", 523 dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n",
524 spi->err_code); 524 spi->err_code);
525 rtsx_stor_set_xfer_buf(&(spi->err_code), 525 rtsx_stor_set_xfer_buf(&spi->err_code,
526 min_t(int, scsi_bufflen(srb), 1), srb); 526 min_t(int, scsi_bufflen(srb), 1), srb);
527 scsi_set_resid(srb, scsi_bufflen(srb) - 1); 527 scsi_set_resid(srb, scsi_bufflen(srb) - 1);
528 528
529 return STATUS_SUCCESS; 529 return STATUS_SUCCESS;
@@ -531,7 +531,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
531 531
532int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip) 532int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
533{ 533{
534 struct spi_info *spi = &(chip->spi); 534 struct spi_info *spi = &chip->spi;
535 535
536 spi_set_err_code(chip, SPI_NO_ERR); 536 spi_set_err_code(chip, SPI_NO_ERR);
537 537
@@ -574,37 +574,37 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
574 rtsx_init_cmd(chip); 574 rtsx_init_cmd(chip);
575 575
576 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, 576 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
577 PINGPONG_BUFFER); 577 PINGPONG_BUFFER);
578 578
579 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]); 579 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
580 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]); 580 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
581 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]); 581 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
582 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]); 582 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
583 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 583 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
584 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 584 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
585 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]); 585 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
586 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]); 586 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
587 587
588 if (len == 0) { 588 if (len == 0) {
589 if (srb->cmnd[9]) { 589 if (srb->cmnd[9]) {
590 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 590 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
591 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0); 591 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
592 } else { 592 } else {
593 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 593 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
594 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0); 594 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
595 } 595 }
596 } else { 596 } else {
597 if (srb->cmnd[9]) { 597 if (srb->cmnd[9]) {
598 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 598 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
599 SPI_TRANSFER0_START | SPI_CADI_MODE0); 599 SPI_TRANSFER0_START | SPI_CADI_MODE0);
600 } else { 600 } else {
601 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 601 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
602 SPI_TRANSFER0_START | SPI_CDI_MODE0); 602 SPI_TRANSFER0_START | SPI_CDI_MODE0);
603 } 603 }
604 } 604 }
605 605
606 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 606 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
607 SPI_TRANSFER0_END); 607 SPI_TRANSFER0_END);
608 608
609 retval = rtsx_send_cmd(chip, 0, 100); 609 retval = rtsx_send_cmd(chip, 0, 100);
610 if (retval < 0) { 610 if (retval < 0) {
@@ -682,38 +682,38 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
682 682
683 if (slow_read) { 683 if (slow_read) {
684 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, 684 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
685 (u8)addr); 685 (u8)addr);
686 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, 686 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
687 (u8)(addr >> 8)); 687 (u8)(addr >> 8));
688 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, 688 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
689 (u8)(addr >> 16)); 689 (u8)(addr >> 16));
690 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 690 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
691 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 691 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
692 } else { 692 } else {
693 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, 693 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
694 (u8)addr); 694 (u8)addr);
695 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, 695 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
696 (u8)(addr >> 8)); 696 (u8)(addr >> 8));
697 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF, 697 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
698 (u8)(addr >> 16)); 698 (u8)(addr >> 16));
699 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 699 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
700 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32); 700 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
701 } 701 }
702 702
703 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 703 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
704 (u8)(pagelen >> 8)); 704 (u8)(pagelen >> 8));
705 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 705 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
706 (u8)pagelen); 706 (u8)pagelen);
707 707
708 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 708 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
709 SPI_TRANSFER0_START | SPI_CADI_MODE0); 709 SPI_TRANSFER0_START | SPI_CADI_MODE0);
710 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, 710 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
711 SPI_TRANSFER0_END, SPI_TRANSFER0_END); 711 SPI_TRANSFER0_END, SPI_TRANSFER0_END);
712 712
713 rtsx_send_cmd_no_wait(chip); 713 rtsx_send_cmd_no_wait(chip);
714 714
715 retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0, 715 retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
716 DMA_FROM_DEVICE, 10000); 716 DMA_FROM_DEVICE, 10000);
717 if (retval < 0) { 717 if (retval < 0) {
718 kfree(buf); 718 kfree(buf);
719 rtsx_clear_spi_error(chip); 719 rtsx_clear_spi_error(chip);
@@ -723,7 +723,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
723 } 723 }
724 724
725 rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset, 725 rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
726 TO_XFER_BUF); 726 TO_XFER_BUF);
727 727
728 addr += pagelen; 728 addr += pagelen;
729 len -= pagelen; 729 len -= pagelen;
@@ -775,14 +775,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
775 } 775 }
776 776
777 rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset, 777 rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
778 FROM_XFER_BUF); 778 FROM_XFER_BUF);
779 779
780 rtsx_init_cmd(chip); 780 rtsx_init_cmd(chip);
781 781
782 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 782 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
783 0x01, PINGPONG_BUFFER); 783 0x01, PINGPONG_BUFFER);
784 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, 784 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
785 buf[0]); 785 buf[0]);
786 sf_program(chip, ins, 1, addr, 1); 786 sf_program(chip, ins, 1, addr, 1);
787 787
788 retval = rtsx_send_cmd(chip, 0, 100); 788 retval = rtsx_send_cmd(chip, 0, 100);
@@ -824,14 +824,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
824 824
825 while (len) { 825 while (len) {
826 rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset, 826 rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
827 FROM_XFER_BUF); 827 FROM_XFER_BUF);
828 828
829 rtsx_init_cmd(chip); 829 rtsx_init_cmd(chip);
830 830
831 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 831 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
832 0x01, PINGPONG_BUFFER); 832 0x01, PINGPONG_BUFFER);
833 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, 833 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
834 buf[0]); 834 buf[0]);
835 if (first_byte) { 835 if (first_byte) {
836 sf_program(chip, ins, 1, addr, 1); 836 sf_program(chip, ins, 1, addr, 1);
837 first_byte = 0; 837 first_byte = 0;
@@ -899,10 +899,10 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
899 rtsx_send_cmd_no_wait(chip); 899 rtsx_send_cmd_no_wait(chip);
900 900
901 rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, 901 rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
902 &offset, FROM_XFER_BUF); 902 &offset, FROM_XFER_BUF);
903 903
904 retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0, 904 retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
905 DMA_TO_DEVICE, 100); 905 DMA_TO_DEVICE, 100);
906 if (retval < 0) { 906 if (retval < 0) {
907 kfree(buf); 907 kfree(buf);
908 rtsx_clear_spi_error(chip); 908 rtsx_clear_spi_error(chip);
@@ -1010,18 +1010,18 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
1010 rtsx_init_cmd(chip); 1010 rtsx_init_cmd(chip);
1011 1011
1012 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, 1012 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
1013 PINGPONG_BUFFER); 1013 PINGPONG_BUFFER);
1014 1014
1015 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); 1015 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
1016 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 1016 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
1017 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); 1017 SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
1018 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0); 1018 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
1019 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1); 1019 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
1020 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status); 1020 rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
1021 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, 1021 rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
1022 SPI_TRANSFER0_START | SPI_CDO_MODE0); 1022 SPI_TRANSFER0_START | SPI_CDO_MODE0);
1023 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, 1023 rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
1024 SPI_TRANSFER0_END); 1024 SPI_TRANSFER0_END);
1025 1025
1026 retval = rtsx_send_cmd(chip, 0, 100); 1026 retval = rtsx_send_cmd(chip, 0, 100);
1027 if (retval != STATUS_SUCCESS) { 1027 if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 1de02bb98839..85aba05acbc1 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -37,21 +37,21 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
37 37
38static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code) 38static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
39{ 39{
40 struct xd_info *xd_card = &(chip->xd_card); 40 struct xd_info *xd_card = &chip->xd_card;
41 41
42 xd_card->err_code = err_code; 42 xd_card->err_code = err_code;
43} 43}
44 44
45static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code) 45static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
46{ 46{
47 struct xd_info *xd_card = &(chip->xd_card); 47 struct xd_info *xd_card = &chip->xd_card;
48 48
49 return (xd_card->err_code == err_code); 49 return (xd_card->err_code == err_code);
50} 50}
51 51
52static int xd_set_init_para(struct rtsx_chip *chip) 52static int xd_set_init_para(struct rtsx_chip *chip)
53{ 53{
54 struct xd_info *xd_card = &(chip->xd_card); 54 struct xd_info *xd_card = &chip->xd_card;
55 int retval; 55 int retval;
56 56
57 if (chip->asic_code) 57 if (chip->asic_code)
@@ -70,7 +70,7 @@ static int xd_set_init_para(struct rtsx_chip *chip)
70 70
71static int xd_switch_clock(struct rtsx_chip *chip) 71static int xd_switch_clock(struct rtsx_chip *chip)
72{ 72{
73 struct xd_info *xd_card = &(chip->xd_card); 73 struct xd_info *xd_card = &chip->xd_card;
74 int retval; 74 int retval;
75 75
76 retval = select_card(chip, XD_CARD); 76 retval = select_card(chip, XD_CARD);
@@ -97,9 +97,9 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
97 97
98 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd); 98 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
99 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 99 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
100 XD_TRANSFER_START | XD_READ_ID); 100 XD_TRANSFER_START | XD_READ_ID);
101 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END, 101 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
102 XD_TRANSFER_END); 102 XD_TRANSFER_END);
103 103
104 for (i = 0; i < 4; i++) 104 for (i = 0; i < 4; i++)
105 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0); 105 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
@@ -122,28 +122,30 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
122 122
123static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode) 123static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
124{ 124{
125 struct xd_info *xd_card = &(chip->xd_card); 125 struct xd_info *xd_card = &chip->xd_card;
126 126
127 switch (mode) { 127 switch (mode) {
128 case XD_RW_ADDR: 128 case XD_RW_ADDR:
129 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0); 129 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
130 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr); 130 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
131 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 131 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
132 0xFF, (u8)(addr >> 8)); 132 0xFF, (u8)(addr >> 8));
133 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3, 133 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
134 0xFF, (u8)(addr >> 16)); 134 0xFF, (u8)(addr >> 16));
135 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF, 135 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
136 xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM); 136 xd_card->addr_cycle |
137 XD_CALC_ECC |
138 XD_BA_NO_TRANSFORM);
137 break; 139 break;
138 140
139 case XD_ERASE_ADDR: 141 case XD_ERASE_ADDR:
140 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr); 142 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
141 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 143 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
142 0xFF, (u8)(addr >> 8)); 144 0xFF, (u8)(addr >> 8));
143 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 145 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
144 0xFF, (u8)(addr >> 16)); 146 0xFF, (u8)(addr >> 16));
145 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF, 147 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
146 (xd_card->addr_cycle - 1) | XD_CALC_ECC | 148 (xd_card->addr_cycle - 1) | XD_CALC_ECC |
147 XD_BA_NO_TRANSFORM); 149 XD_BA_NO_TRANSFORM);
148 break; 150 break;
149 151
@@ -153,7 +155,7 @@ static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
153} 155}
154 156
155static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr, 157static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
156 u8 *buf, int buf_len) 158 u8 *buf, int buf_len)
157{ 159{
158 int retval, i; 160 int retval, i;
159 161
@@ -162,16 +164,16 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
162 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); 164 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
163 165
164 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 166 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
165 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT); 167 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
166 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 168 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
167 XD_TRANSFER_END, XD_TRANSFER_END); 169 XD_TRANSFER_END, XD_TRANSFER_END);
168 170
169 for (i = 0; i < 6; i++) 171 for (i = 0; i < 6; i++)
170 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i), 172 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
171 0, 0); 173 0, 0);
172 for (i = 0; i < 4; i++) 174 for (i = 0; i < 4; i++)
173 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i), 175 rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
174 0, 0); 176 0, 0);
175 rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0); 177 rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
176 178
177 retval = rtsx_send_cmd(chip, XD_CARD, 500); 179 retval = rtsx_send_cmd(chip, XD_CARD, 500);
@@ -192,7 +194,7 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
192} 194}
193 195
194static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset, 196static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
195 u8 *buf, int buf_len) 197 u8 *buf, int buf_len)
196{ 198{
197 int retval, i; 199 int retval, i;
198 200
@@ -205,7 +207,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
205 207
206 for (i = 0; i < buf_len; i++) 208 for (i = 0; i < buf_len; i++)
207 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i, 209 rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
208 0, 0); 210 0, 0);
209 211
210 retval = rtsx_send_cmd(chip, 0, 250); 212 retval = rtsx_send_cmd(chip, 0, 250);
211 if (retval < 0) { 213 if (retval < 0) {
@@ -220,7 +222,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
220} 222}
221 223
222static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf, 224static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
223 int buf_len) 225 int buf_len)
224{ 226{
225 int retval; 227 int retval;
226 u8 reg; 228 u8 reg;
@@ -235,15 +237,15 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
235 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); 237 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
236 238
237 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 239 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
238 0x01, PINGPONG_BUFFER); 240 0x01, PINGPONG_BUFFER);
239 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1); 241 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
240 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 242 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
241 XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); 243 XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
242 244
243 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 245 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
244 XD_TRANSFER_START | XD_READ_PAGES); 246 XD_TRANSFER_START | XD_READ_PAGES);
245 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END, 247 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
246 XD_TRANSFER_END); 248 XD_TRANSFER_END);
247 249
248 retval = rtsx_send_cmd(chip, XD_CARD, 250); 250 retval = rtsx_send_cmd(chip, XD_CARD, 250);
249 if (retval == -ETIMEDOUT) { 251 if (retval == -ETIMEDOUT) {
@@ -347,27 +349,27 @@ static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
347{ 349{
348 if (CHECK_PID(chip, 0x5208)) { 350 if (CHECK_PID(chip, 0x5208)) {
349 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 351 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
350 XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); 352 XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
351 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 353 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
352 XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); 354 XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
353 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 355 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
354 XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 356 XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
355 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 357 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
356 XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD); 358 XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
357 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 359 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
358 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 360 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
359 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 361 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
360 MS_D5_PD | MS_D4_PD); 362 MS_D5_PD | MS_D4_PD);
361 } else if (CHECK_PID(chip, 0x5288)) { 363 } else if (CHECK_PID(chip, 0x5288)) {
362 if (CHECK_BARO_PKG(chip, QFN)) { 364 if (CHECK_BARO_PKG(chip, QFN)) {
363 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 365 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
364 0xFF, 0x55); 366 0xFF, 0x55);
365 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 367 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
366 0xFF, 0x55); 368 0xFF, 0x55);
367 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 369 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
368 0xFF, 0x4B); 370 0xFF, 0x4B);
369 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 371 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
370 0xFF, 0x69); 372 0xFF, 0x69);
371 } 373 }
372 } 374 }
373} 375}
@@ -386,27 +388,27 @@ static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
386{ 388{
387 if (CHECK_PID(chip, 0x5208)) { 389 if (CHECK_PID(chip, 0x5208)) {
388 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 390 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
389 XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); 391 XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
390 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 392 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
391 XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); 393 XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
392 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 394 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
393 XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU); 395 XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
394 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 396 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
395 XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD); 397 XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
396 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 398 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
397 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 399 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
398 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 400 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
399 MS_D5_PD | MS_D4_PD); 401 MS_D5_PD | MS_D4_PD);
400 } else if (CHECK_PID(chip, 0x5288)) { 402 } else if (CHECK_PID(chip, 0x5288)) {
401 if (CHECK_BARO_PKG(chip, QFN)) { 403 if (CHECK_BARO_PKG(chip, QFN)) {
402 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 404 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
403 0xFF, 0x55); 405 0xFF, 0x55);
404 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 406 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
405 0xFF, 0x55); 407 0xFF, 0x55);
406 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 408 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
407 0xFF, 0x53); 409 0xFF, 0x53);
408 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 410 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
409 0xFF, 0xA9); 411 0xFF, 0xA9);
410 } 412 }
411 } 413 }
412} 414}
@@ -417,31 +419,46 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
417 419
418 if (CHECK_PID(chip, 0x5208)) { 420 if (CHECK_PID(chip, 0x5208)) {
419 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, 421 retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
420 XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); 422 XD_D3_PD |
423 XD_D2_PD |
424 XD_D1_PD |
425 XD_D0_PD);
421 if (retval) { 426 if (retval) {
422 rtsx_trace(chip); 427 rtsx_trace(chip);
423 return retval; 428 return retval;
424 } 429 }
425 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, 430 retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
426 XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); 431 XD_D7_PD |
432 XD_D6_PD |
433 XD_D5_PD |
434 XD_D4_PD);
427 if (retval) { 435 if (retval) {
428 rtsx_trace(chip); 436 rtsx_trace(chip);
429 return retval; 437 return retval;
430 } 438 }
431 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, 439 retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
432 XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); 440 XD_WP_PD |
441 XD_CE_PD |
442 XD_CLE_PD |
443 XD_CD_PU);
433 if (retval) { 444 if (retval) {
434 rtsx_trace(chip); 445 rtsx_trace(chip);
435 return retval; 446 return retval;
436 } 447 }
437 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, 448 retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
438 XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD); 449 XD_RDY_PD |
450 XD_WE_PD |
451 XD_RE_PD |
452 XD_ALE_PD);
439 if (retval) { 453 if (retval) {
440 rtsx_trace(chip); 454 rtsx_trace(chip);
441 return retval; 455 return retval;
442 } 456 }
443 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, 457 retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
444 MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); 458 MS_INS_PU |
459 SD_WP_PD |
460 SD_CD_PU |
461 SD_CMD_PD);
445 if (retval) { 462 if (retval) {
446 rtsx_trace(chip); 463 rtsx_trace(chip);
447 return retval; 464 return retval;
@@ -486,7 +503,7 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
486 503
487static int reset_xd(struct rtsx_chip *chip) 504static int reset_xd(struct rtsx_chip *chip)
488{ 505{
489 struct xd_info *xd_card = &(chip->xd_card); 506 struct xd_info *xd_card = &chip->xd_card;
490 int retval, i, j; 507 int retval, i, j;
491 u8 *ptr, id_buf[4], redunt[11]; 508 u8 *ptr, id_buf[4], redunt[11];
492 509
@@ -499,7 +516,7 @@ static int reset_xd(struct rtsx_chip *chip)
499 rtsx_init_cmd(chip); 516 rtsx_init_cmd(chip);
500 517
501 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF, 518 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
502 XD_PGSTS_NOT_FF); 519 XD_PGSTS_NOT_FF);
503 if (chip->asic_code) { 520 if (chip->asic_code) {
504 if (!CHECK_PID(chip, 0x5288)) 521 if (!CHECK_PID(chip, 0x5288))
505 xd_fill_pull_ctl_disable(chip); 522 xd_fill_pull_ctl_disable(chip);
@@ -507,12 +524,13 @@ static int reset_xd(struct rtsx_chip *chip)
507 xd_fill_pull_ctl_stage1_barossa(chip); 524 xd_fill_pull_ctl_stage1_barossa(chip);
508 } else { 525 } else {
509 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, 526 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
510 (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20); 527 (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) |
528 0x20);
511 } 529 }
512 530
513 if (!chip->ft2_fast_mode) 531 if (!chip->ft2_fast_mode)
514 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT, 532 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
515 XD_NO_AUTO_PWR_OFF, 0); 533 XD_NO_AUTO_PWR_OFF, 0);
516 534
517 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0); 535 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
518 536
@@ -537,8 +555,9 @@ static int reset_xd(struct rtsx_chip *chip)
537 xd_fill_pull_ctl_enable(chip); 555 xd_fill_pull_ctl_enable(chip);
538 } else { 556 } else {
539 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, 557 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
540 (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) | 558 (FPGA_XD_PULL_CTL_EN1 &
541 0x20); 559 FPGA_XD_PULL_CTL_EN2) |
560 0x20);
542 } 561 }
543 562
544 retval = rtsx_send_cmd(chip, XD_CARD, 100); 563 retval = rtsx_send_cmd(chip, XD_CARD, 100);
@@ -571,8 +590,9 @@ static int reset_xd(struct rtsx_chip *chip)
571 xd_fill_pull_ctl_enable(chip); 590 xd_fill_pull_ctl_enable(chip);
572 } else { 591 } else {
573 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, 592 rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
574 (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) | 593 (FPGA_XD_PULL_CTL_EN1 &
575 0x20); 594 FPGA_XD_PULL_CTL_EN2) |
595 0x20);
576 } 596 }
577 } 597 }
578 598
@@ -599,16 +619,17 @@ static int reset_xd(struct rtsx_chip *chip)
599 rtsx_init_cmd(chip); 619 rtsx_init_cmd(chip);
600 620
601 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF, 621 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
602 XD_TIME_SETUP_STEP * 3 + 622 XD_TIME_SETUP_STEP * 3 +
603 XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i); 623 XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
604 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF, 624 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
605 XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) + 625 XD_TIME_SETUP_STEP * 3 +
606 XD_TIME_RWN_STEP * (3 + i)); 626 XD_TIME_RW_STEP * (4 + i) +
627 XD_TIME_RWN_STEP * (3 + i));
607 628
608 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 629 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
609 XD_TRANSFER_START | XD_RESET); 630 XD_TRANSFER_START | XD_RESET);
610 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 631 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
611 XD_TRANSFER_END, XD_TRANSFER_END); 632 XD_TRANSFER_END, XD_TRANSFER_END);
612 633
613 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); 634 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
614 rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0); 635 rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -625,7 +646,7 @@ static int reset_xd(struct rtsx_chip *chip)
625 ptr[0], ptr[1]); 646 ptr[0], ptr[1]);
626 647
627 if (((ptr[0] & READY_FLAG) != READY_STATE) || 648 if (((ptr[0] & READY_FLAG) != READY_STATE) ||
628 !(ptr[1] & XD_RDY)) 649 !(ptr[1] & XD_RDY))
629 continue; 650 continue;
630 651
631 retval = xd_read_id(chip, READ_ID, id_buf, 4); 652 retval = xd_read_id(chip, READ_ID, id_buf, 4);
@@ -773,7 +794,7 @@ static int reset_xd(struct rtsx_chip *chip)
773 if (redunt[PAGE_STATUS] != XD_GPG) { 794 if (redunt[PAGE_STATUS] != XD_GPG) {
774 for (j = 1; j <= 8; j++) { 795 for (j = 1; j <= 8; j++) {
775 retval = xd_read_redundant(chip, page_addr + j, 796 retval = xd_read_redundant(chip, page_addr + j,
776 redunt, 11); 797 redunt, 11);
777 if (retval == STATUS_SUCCESS) { 798 if (retval == STATUS_SUCCESS) {
778 if (redunt[PAGE_STATUS] == XD_GPG) 799 if (redunt[PAGE_STATUS] == XD_GPG)
779 break; 800 break;
@@ -786,7 +807,7 @@ static int reset_xd(struct rtsx_chip *chip)
786 807
787 /* Check CIS data */ 808 /* Check CIS data */
788 if ((redunt[BLOCK_STATUS] == XD_GBLK) && 809 if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
789 (redunt[PARITY] & XD_BA1_ALL0)) { 810 (redunt[PARITY] & XD_BA1_ALL0)) {
790 u8 buf[10]; 811 u8 buf[10];
791 812
792 page_addr += j; 813 page_addr += j;
@@ -798,11 +819,11 @@ static int reset_xd(struct rtsx_chip *chip)
798 } 819 }
799 820
800 if ((buf[0] == 0x01) && (buf[1] == 0x03) && 821 if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
801 (buf[2] == 0xD9) 822 (buf[2] == 0xD9) &&
802 && (buf[3] == 0x01) && (buf[4] == 0xFF) 823 (buf[3] == 0x01) && (buf[4] == 0xFF) &&
803 && (buf[5] == 0x18) && (buf[6] == 0x02) 824 (buf[5] == 0x18) && (buf[6] == 0x02) &&
804 && (buf[7] == 0xDF) && (buf[8] == 0x01) 825 (buf[7] == 0xDF) && (buf[8] == 0x01) &&
805 && (buf[9] == 0x20)) { 826 (buf[9] == 0x20)) {
806 xd_card->cis_block = (u16)i; 827 xd_card->cis_block = (u16)i;
807 } 828 }
808 } 829 }
@@ -861,7 +882,7 @@ static u16 xd_load_log_block_addr(u8 *redunt)
861 882
862static int xd_init_l2p_tbl(struct rtsx_chip *chip) 883static int xd_init_l2p_tbl(struct rtsx_chip *chip)
863{ 884{
864 struct xd_info *xd_card = &(chip->xd_card); 885 struct xd_info *xd_card = &chip->xd_card;
865 int size, i; 886 int size, i;
866 887
867 dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n", 888 dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n",
@@ -910,7 +931,7 @@ static inline void free_zone(struct zone_entry *zone)
910 931
911static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk) 932static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
912{ 933{
913 struct xd_info *xd_card = &(chip->xd_card); 934 struct xd_info *xd_card = &chip->xd_card;
914 struct zone_entry *zone; 935 struct zone_entry *zone;
915 int zone_no; 936 int zone_no;
916 937
@@ -920,15 +941,15 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
920 zone_no, xd_card->zone_cnt); 941 zone_no, xd_card->zone_cnt);
921 return; 942 return;
922 } 943 }
923 zone = &(xd_card->zone[zone_no]); 944 zone = &xd_card->zone[zone_no];
924 945
925 if (zone->free_table == NULL) { 946 if (!zone->free_table) {
926 if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS) 947 if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
927 return; 948 return;
928 } 949 }
929 950
930 if ((zone->set_index >= XD_FREE_TABLE_CNT) 951 if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
931 || (zone->set_index < 0)) { 952 (zone->set_index < 0)) {
932 free_zone(zone); 953 free_zone(zone);
933 dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n"); 954 dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
934 return; 955 return;
@@ -945,7 +966,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
945 966
946static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no) 967static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
947{ 968{
948 struct xd_info *xd_card = &(chip->xd_card); 969 struct xd_info *xd_card = &chip->xd_card;
949 struct zone_entry *zone; 970 struct zone_entry *zone;
950 u32 phy_blk; 971 u32 phy_blk;
951 972
@@ -954,10 +975,10 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
954 zone_no, xd_card->zone_cnt); 975 zone_no, xd_card->zone_cnt);
955 return BLK_NOT_FOUND; 976 return BLK_NOT_FOUND;
956 } 977 }
957 zone = &(xd_card->zone[zone_no]); 978 zone = &xd_card->zone[zone_no];
958 979
959 if ((zone->unused_blk_cnt == 0) || 980 if ((zone->unused_blk_cnt == 0) ||
960 (zone->set_index == zone->get_index)) { 981 (zone->set_index == zone->get_index)) {
961 free_zone(zone); 982 free_zone(zone);
962 dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n"); 983 dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
963 return BLK_NOT_FOUND; 984 return BLK_NOT_FOUND;
@@ -982,22 +1003,22 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
982} 1003}
983 1004
984static void xd_set_l2p_tbl(struct rtsx_chip *chip, 1005static void xd_set_l2p_tbl(struct rtsx_chip *chip,
985 int zone_no, u16 log_off, u16 phy_off) 1006 int zone_no, u16 log_off, u16 phy_off)
986{ 1007{
987 struct xd_info *xd_card = &(chip->xd_card); 1008 struct xd_info *xd_card = &chip->xd_card;
988 struct zone_entry *zone; 1009 struct zone_entry *zone;
989 1010
990 zone = &(xd_card->zone[zone_no]); 1011 zone = &xd_card->zone[zone_no];
991 zone->l2p_table[log_off] = phy_off; 1012 zone->l2p_table[log_off] = phy_off;
992} 1013}
993 1014
994static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off) 1015static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
995{ 1016{
996 struct xd_info *xd_card = &(chip->xd_card); 1017 struct xd_info *xd_card = &chip->xd_card;
997 struct zone_entry *zone; 1018 struct zone_entry *zone;
998 int retval; 1019 int retval;
999 1020
1000 zone = &(xd_card->zone[zone_no]); 1021 zone = &xd_card->zone[zone_no];
1001 if (zone->l2p_table[log_off] == 0xFFFF) { 1022 if (zone->l2p_table[log_off] == 0xFFFF) {
1002 u32 phy_blk = 0; 1023 u32 phy_blk = 0;
1003 int i; 1024 int i;
@@ -1023,7 +1044,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
1023 } 1044 }
1024 1045
1025 retval = xd_init_page(chip, phy_blk, log_off, 1046 retval = xd_init_page(chip, phy_blk, log_off,
1026 0, xd_card->page_off + 1); 1047 0, xd_card->page_off + 1);
1027 if (retval == STATUS_SUCCESS) 1048 if (retval == STATUS_SUCCESS)
1028 break; 1049 break;
1029 } 1050 }
@@ -1041,7 +1062,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
1041 1062
1042int reset_xd_card(struct rtsx_chip *chip) 1063int reset_xd_card(struct rtsx_chip *chip)
1043{ 1064{
1044 struct xd_info *xd_card = &(chip->xd_card); 1065 struct xd_info *xd_card = &chip->xd_card;
1045 int retval; 1066 int retval;
1046 1067
1047 memset(xd_card, 0, sizeof(struct xd_info)); 1068 memset(xd_card, 0, sizeof(struct xd_info));
@@ -1077,7 +1098,7 @@ int reset_xd_card(struct rtsx_chip *chip)
1077 1098
1078static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk) 1099static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
1079{ 1100{
1080 struct xd_info *xd_card = &(chip->xd_card); 1101 struct xd_info *xd_card = &chip->xd_card;
1081 int retval; 1102 int retval;
1082 u32 page_addr; 1103 u32 page_addr;
1083 u8 reg = 0; 1104 u8 reg = 0;
@@ -1107,12 +1128,12 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
1107 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); 1128 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
1108 1129
1109 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1130 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
1110 xd_card->page_off + 1); 1131 xd_card->page_off + 1);
1111 1132
1112 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1133 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1113 XD_TRANSFER_START | XD_WRITE_REDUNDANT); 1134 XD_TRANSFER_START | XD_WRITE_REDUNDANT);
1114 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1135 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1115 XD_TRANSFER_END, XD_TRANSFER_END); 1136 XD_TRANSFER_END, XD_TRANSFER_END);
1116 1137
1117 retval = rtsx_send_cmd(chip, XD_CARD, 500); 1138 retval = rtsx_send_cmd(chip, XD_CARD, 500);
1118 if (retval < 0) { 1139 if (retval < 0) {
@@ -1132,7 +1153,7 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
1132static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, 1153static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
1133 u16 logoff, u8 start_page, u8 end_page) 1154 u16 logoff, u8 start_page, u8 end_page)
1134{ 1155{
1135 struct xd_info *xd_card = &(chip->xd_card); 1156 struct xd_info *xd_card = &chip->xd_card;
1136 int retval; 1157 int retval;
1137 u32 page_addr; 1158 u32 page_addr;
1138 u8 reg = 0; 1159 u8 reg = 0;
@@ -1153,7 +1174,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
1153 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF); 1174 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
1154 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF); 1175 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
1155 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 1176 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
1156 0xFF, (u8)(logoff >> 8)); 1177 0xFF, (u8)(logoff >> 8));
1157 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff); 1178 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
1158 1179
1159 page_addr = (phy_blk << xd_card->block_shift) + start_page; 1180 page_addr = (phy_blk << xd_card->block_shift) + start_page;
@@ -1161,15 +1182,15 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
1161 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); 1182 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
1162 1183
1163 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 1184 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
1164 XD_BA_TRANSFORM, XD_BA_TRANSFORM); 1185 XD_BA_TRANSFORM, XD_BA_TRANSFORM);
1165 1186
1166 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 1187 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
1167 0xFF, (end_page - start_page)); 1188 0xFF, (end_page - start_page));
1168 1189
1169 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 1190 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
1170 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT); 1191 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
1171 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1192 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1172 XD_TRANSFER_END, XD_TRANSFER_END); 1193 XD_TRANSFER_END, XD_TRANSFER_END);
1173 1194
1174 retval = rtsx_send_cmd(chip, XD_CARD, 500); 1195 retval = rtsx_send_cmd(chip, XD_CARD, 500);
1175 if (retval < 0) { 1196 if (retval < 0) {
@@ -1191,7 +1212,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
1191static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, 1212static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
1192 u8 start_page, u8 end_page) 1213 u8 start_page, u8 end_page)
1193{ 1214{
1194 struct xd_info *xd_card = &(chip->xd_card); 1215 struct xd_info *xd_card = &chip->xd_card;
1195 u32 old_page, new_page; 1216 u32 old_page, new_page;
1196 u8 i, reg = 0; 1217 u8 i, reg = 0;
1197 int retval; 1218 int retval;
@@ -1235,11 +1256,11 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
1235 1256
1236 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1); 1257 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
1237 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 1258 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
1238 XD_AUTO_CHK_DATA_STATUS, 0); 1259 XD_AUTO_CHK_DATA_STATUS, 0);
1239 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1260 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1240 XD_TRANSFER_START | XD_READ_PAGES); 1261 XD_TRANSFER_START | XD_READ_PAGES);
1241 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1262 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1242 XD_TRANSFER_END, XD_TRANSFER_END); 1263 XD_TRANSFER_END, XD_TRANSFER_END);
1243 1264
1244 retval = rtsx_send_cmd(chip, XD_CARD, 500); 1265 retval = rtsx_send_cmd(chip, XD_CARD, 500);
1245 if (retval < 0) { 1266 if (retval < 0) {
@@ -1250,22 +1271,24 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
1250 wait_timeout(100); 1271 wait_timeout(100);
1251 1272
1252 if (detect_card_cd(chip, 1273 if (detect_card_cd(chip,
1253 XD_CARD) != STATUS_SUCCESS) { 1274 XD_CARD) != STATUS_SUCCESS) {
1254 xd_set_err_code(chip, XD_NO_CARD); 1275 xd_set_err_code(chip, XD_NO_CARD);
1255 rtsx_trace(chip); 1276 rtsx_trace(chip);
1256 return STATUS_FAIL; 1277 return STATUS_FAIL;
1257 } 1278 }
1258 1279
1259 if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) == 1280 if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
1260 (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) 1281 (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
1261 || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) == 1282 ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
1262 (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) { 1283 (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
1263 rtsx_write_register(chip, 1284 rtsx_write_register(chip,
1264 XD_PAGE_STATUS, 0xFF, 1285 XD_PAGE_STATUS,
1265 XD_BPG); 1286 0xFF,
1287 XD_BPG);
1266 rtsx_write_register(chip, 1288 rtsx_write_register(chip,
1267 XD_BLOCK_STATUS, 0xFF, 1289 XD_BLOCK_STATUS,
1268 XD_GBLK); 1290 0xFF,
1291 XD_GBLK);
1269 XD_SET_BAD_OLDBLK(xd_card); 1292 XD_SET_BAD_OLDBLK(xd_card);
1270 dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n", 1293 dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
1271 old_blk); 1294 old_blk);
@@ -1287,7 +1310,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
1287 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1310 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1288 XD_TRANSFER_START | XD_WRITE_PAGES); 1311 XD_TRANSFER_START | XD_WRITE_PAGES);
1289 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1312 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1290 XD_TRANSFER_END, XD_TRANSFER_END); 1313 XD_TRANSFER_END, XD_TRANSFER_END);
1291 1314
1292 retval = rtsx_send_cmd(chip, XD_CARD, 300); 1315 retval = rtsx_send_cmd(chip, XD_CARD, 300);
1293 if (retval < 0) { 1316 if (retval < 0) {
@@ -1320,9 +1343,9 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
1320 rtsx_init_cmd(chip); 1343 rtsx_init_cmd(chip);
1321 1344
1322 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 1345 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
1323 0xFF, XD_TRANSFER_START | XD_RESET); 1346 0xFF, XD_TRANSFER_START | XD_RESET);
1324 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1347 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1325 XD_TRANSFER_END, XD_TRANSFER_END); 1348 XD_TRANSFER_END, XD_TRANSFER_END);
1326 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); 1349 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
1327 rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0); 1350 rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
1328 1351
@@ -1342,7 +1365,7 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
1342 1365
1343static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk) 1366static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
1344{ 1367{
1345 struct xd_info *xd_card = &(chip->xd_card); 1368 struct xd_info *xd_card = &chip->xd_card;
1346 u32 page_addr; 1369 u32 page_addr;
1347 u8 reg = 0, *ptr; 1370 u8 reg = 0, *ptr;
1348 int i, retval; 1371 int i, retval;
@@ -1360,9 +1383,9 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
1360 xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR); 1383 xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
1361 1384
1362 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1385 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1363 XD_TRANSFER_START | XD_ERASE); 1386 XD_TRANSFER_START | XD_ERASE);
1364 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1387 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1365 XD_TRANSFER_END, XD_TRANSFER_END); 1388 XD_TRANSFER_END, XD_TRANSFER_END);
1366 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); 1389 rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
1367 1390
1368 retval = rtsx_send_cmd(chip, XD_CARD, 250); 1391 retval = rtsx_send_cmd(chip, XD_CARD, 250);
@@ -1403,7 +1426,7 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
1403 1426
1404static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) 1427static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1405{ 1428{
1406 struct xd_info *xd_card = &(chip->xd_card); 1429 struct xd_info *xd_card = &chip->xd_card;
1407 struct zone_entry *zone; 1430 struct zone_entry *zone;
1408 int retval; 1431 int retval;
1409 u32 start, end, i; 1432 u32 start, end, i;
@@ -1413,7 +1436,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1413 1436
1414 dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no); 1437 dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no);
1415 1438
1416 if (xd_card->zone == NULL) { 1439 if (!xd_card->zone) {
1417 retval = xd_init_l2p_tbl(chip); 1440 retval = xd_init_l2p_tbl(chip);
1418 if (retval != STATUS_SUCCESS) 1441 if (retval != STATUS_SUCCESS)
1419 return retval; 1442 return retval;
@@ -1425,22 +1448,22 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1425 return STATUS_SUCCESS; 1448 return STATUS_SUCCESS;
1426 } 1449 }
1427 1450
1428 zone = &(xd_card->zone[zone_no]); 1451 zone = &xd_card->zone[zone_no];
1429 1452
1430 if (zone->l2p_table == NULL) { 1453 if (!zone->l2p_table) {
1431 zone->l2p_table = vmalloc(2000); 1454 zone->l2p_table = vmalloc(2000);
1432 if (!zone->l2p_table) { 1455 if (!zone->l2p_table) {
1433 rtsx_trace(chip); 1456 rtsx_trace(chip);
1434 goto Build_Fail; 1457 goto build_fail;
1435 } 1458 }
1436 } 1459 }
1437 memset((u8 *)(zone->l2p_table), 0xff, 2000); 1460 memset((u8 *)(zone->l2p_table), 0xff, 2000);
1438 1461
1439 if (zone->free_table == NULL) { 1462 if (!zone->free_table) {
1440 zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2); 1463 zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
1441 if (!zone->free_table) { 1464 if (!zone->free_table) {
1442 rtsx_trace(chip); 1465 rtsx_trace(chip);
1443 goto Build_Fail; 1466 goto build_fail;
1444 } 1467 }
1445 } 1468 }
1446 memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2); 1469 memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
@@ -1466,7 +1489,8 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1466 dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n", 1489 dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
1467 start, end); 1490 start, end);
1468 1491
1469 zone->set_index = zone->get_index = 0; 1492 zone->set_index = 0;
1493 zone->get_index = 0;
1470 zone->unused_blk_cnt = 0; 1494 zone->unused_blk_cnt = 0;
1471 1495
1472 for (i = start; i < end; i++) { 1496 for (i = start; i < end; i++) {
@@ -1490,7 +1514,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1490 1514
1491 cur_fst_page_logoff = xd_load_log_block_addr(redunt); 1515 cur_fst_page_logoff = xd_load_log_block_addr(redunt);
1492 if ((cur_fst_page_logoff == 0xFFFF) || 1516 if ((cur_fst_page_logoff == 0xFFFF) ||
1493 (cur_fst_page_logoff > max_logoff)) { 1517 (cur_fst_page_logoff > max_logoff)) {
1494 retval = xd_erase_block(chip, i); 1518 retval = xd_erase_block(chip, i);
1495 if (retval == STATUS_SUCCESS) 1519 if (retval == STATUS_SUCCESS)
1496 xd_set_unused_block(chip, i); 1520 xd_set_unused_block(chip, i);
@@ -1498,7 +1522,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1498 } 1522 }
1499 1523
1500 if ((zone_no == 0) && (cur_fst_page_logoff == 0) && 1524 if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
1501 (redunt[PAGE_STATUS] != XD_GPG)) 1525 (redunt[PAGE_STATUS] != XD_GPG))
1502 XD_SET_MBR_FAIL(xd_card); 1526 XD_SET_MBR_FAIL(xd_card);
1503 1527
1504 if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) { 1528 if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
@@ -1524,7 +1548,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1524 1548
1525 for (m = 0; m < 3; m++) { 1549 for (m = 0; m < 3; m++) {
1526 retval = xd_read_redundant(chip, page_addr, 1550 retval = xd_read_redundant(chip, page_addr,
1527 redunt, 11); 1551 redunt, 11);
1528 if (retval == STATUS_SUCCESS) 1552 if (retval == STATUS_SUCCESS)
1529 break; 1553 break;
1530 } 1554 }
@@ -1581,7 +1605,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
1581 1605
1582 return STATUS_SUCCESS; 1606 return STATUS_SUCCESS;
1583 1607
1584Build_Fail: 1608build_fail:
1585 vfree(zone->l2p_table); 1609 vfree(zone->l2p_table);
1586 zone->l2p_table = NULL; 1610 zone->l2p_table = NULL;
1587 vfree(zone->free_table); 1611 vfree(zone->free_table);
@@ -1598,9 +1622,9 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
1598 1622
1599 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd); 1623 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
1600 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1624 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1601 XD_TRANSFER_START | XD_SET_CMD); 1625 XD_TRANSFER_START | XD_SET_CMD);
1602 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1626 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1603 XD_TRANSFER_END, XD_TRANSFER_END); 1627 XD_TRANSFER_END, XD_TRANSFER_END);
1604 1628
1605 retval = rtsx_send_cmd(chip, XD_CARD, 200); 1629 retval = rtsx_send_cmd(chip, XD_CARD, 200);
1606 if (retval < 0) { 1630 if (retval < 0) {
@@ -1612,18 +1636,18 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
1612} 1636}
1613 1637
1614static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, 1638static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
1615 u32 log_blk, u8 start_page, u8 end_page, 1639 u32 log_blk, u8 start_page, u8 end_page,
1616 u8 *buf, unsigned int *index, 1640 u8 *buf, unsigned int *index,
1617 unsigned int *offset) 1641 unsigned int *offset)
1618{ 1642{
1619 struct xd_info *xd_card = &(chip->xd_card); 1643 struct xd_info *xd_card = &chip->xd_card;
1620 u32 page_addr, new_blk; 1644 u32 page_addr, new_blk;
1621 u16 log_off; 1645 u16 log_off;
1622 u8 reg_val, page_cnt; 1646 u8 reg_val, page_cnt;
1623 int zone_no, retval, i; 1647 int zone_no, retval, i;
1624 1648
1625 if (start_page > end_page) 1649 if (start_page > end_page)
1626 goto Status_Fail; 1650 goto status_fail;
1627 1651
1628 page_cnt = end_page - start_page; 1652 page_cnt = end_page - start_page;
1629 zone_no = (int)(log_blk / 1000); 1653 zone_no = (int)(log_blk / 1000);
@@ -1639,7 +1663,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
1639 1663
1640 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { 1664 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
1641 xd_set_err_code(chip, XD_NO_CARD); 1665 xd_set_err_code(chip, XD_NO_CARD);
1642 goto Status_Fail; 1666 goto status_fail;
1643 } 1667 }
1644 } 1668 }
1645 } 1669 }
@@ -1653,37 +1677,38 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
1653 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); 1677 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
1654 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); 1678 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
1655 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 1679 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
1656 XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); 1680 XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
1657 1681
1658 trans_dma_enable(chip->srb->sc_data_direction, chip, 1682 trans_dma_enable(chip->srb->sc_data_direction, chip,
1659 page_cnt * 512, DMA_512); 1683 page_cnt * 512, DMA_512);
1660 1684
1661 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, 1685 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
1662 XD_TRANSFER_START | XD_READ_PAGES); 1686 XD_TRANSFER_START | XD_READ_PAGES);
1663 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1687 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1664 XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY); 1688 XD_TRANSFER_END | XD_PPB_EMPTY,
1689 XD_TRANSFER_END | XD_PPB_EMPTY);
1665 1690
1666 rtsx_send_cmd_no_wait(chip); 1691 rtsx_send_cmd_no_wait(chip);
1667 1692
1668 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, 1693 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
1669 scsi_sg_count(chip->srb), 1694 scsi_sg_count(chip->srb),
1670 index, offset, DMA_FROM_DEVICE, 1695 index, offset, DMA_FROM_DEVICE,
1671 chip->xd_timeout); 1696 chip->xd_timeout);
1672 if (retval < 0) { 1697 if (retval < 0) {
1673 rtsx_clear_xd_error(chip); 1698 rtsx_clear_xd_error(chip);
1674 1699
1675 if (retval == -ETIMEDOUT) { 1700 if (retval == -ETIMEDOUT) {
1676 xd_set_err_code(chip, XD_TO_ERROR); 1701 xd_set_err_code(chip, XD_TO_ERROR);
1677 goto Status_Fail; 1702 goto status_fail;
1678 } else { 1703 } else {
1679 rtsx_trace(chip); 1704 rtsx_trace(chip);
1680 goto Fail; 1705 goto fail;
1681 } 1706 }
1682 } 1707 }
1683 1708
1684 return STATUS_SUCCESS; 1709 return STATUS_SUCCESS;
1685 1710
1686Fail: 1711fail:
1687 retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val); 1712 retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val);
1688 if (retval) { 1713 if (retval) {
1689 rtsx_trace(chip); 1714 rtsx_trace(chip);
@@ -1699,15 +1724,15 @@ Fail:
1699 return retval; 1724 return retval;
1700 } 1725 }
1701 1726
1702 if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) 1727 if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
1703 == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) 1728 (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
1704 || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) 1729 ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
1705 == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) { 1730 (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
1706 wait_timeout(100); 1731 wait_timeout(100);
1707 1732
1708 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { 1733 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
1709 xd_set_err_code(chip, XD_NO_CARD); 1734 xd_set_err_code(chip, XD_NO_CARD);
1710 goto Status_Fail; 1735 goto status_fail;
1711 } 1736 }
1712 1737
1713 xd_set_err_code(chip, XD_ECC_ERROR); 1738 xd_set_err_code(chip, XD_ECC_ERROR);
@@ -1715,11 +1740,11 @@ Fail:
1715 new_blk = xd_get_unused_block(chip, zone_no); 1740 new_blk = xd_get_unused_block(chip, zone_no);
1716 if (new_blk == NO_NEW_BLK) { 1741 if (new_blk == NO_NEW_BLK) {
1717 XD_CLR_BAD_OLDBLK(xd_card); 1742 XD_CLR_BAD_OLDBLK(xd_card);
1718 goto Status_Fail; 1743 goto status_fail;
1719 } 1744 }
1720 1745
1721 retval = xd_copy_page(chip, phy_blk, new_blk, 0, 1746 retval = xd_copy_page(chip, phy_blk, new_blk, 0,
1722 xd_card->page_off + 1); 1747 xd_card->page_off + 1);
1723 if (retval != STATUS_SUCCESS) { 1748 if (retval != STATUS_SUCCESS) {
1724 if (!XD_CHK_BAD_NEWBLK(xd_card)) { 1749 if (!XD_CHK_BAD_NEWBLK(xd_card)) {
1725 retval = xd_erase_block(chip, new_blk); 1750 retval = xd_erase_block(chip, new_blk);
@@ -1729,7 +1754,7 @@ Fail:
1729 XD_CLR_BAD_NEWBLK(xd_card); 1754 XD_CLR_BAD_NEWBLK(xd_card);
1730 } 1755 }
1731 XD_CLR_BAD_OLDBLK(xd_card); 1756 XD_CLR_BAD_OLDBLK(xd_card);
1732 goto Status_Fail; 1757 goto status_fail;
1733 } 1758 }
1734 xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF)); 1759 xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
1735 xd_erase_block(chip, phy_blk); 1760 xd_erase_block(chip, phy_blk);
@@ -1737,15 +1762,15 @@ Fail:
1737 XD_CLR_BAD_OLDBLK(xd_card); 1762 XD_CLR_BAD_OLDBLK(xd_card);
1738 } 1763 }
1739 1764
1740Status_Fail: 1765status_fail:
1741 rtsx_trace(chip); 1766 rtsx_trace(chip);
1742 return STATUS_FAIL; 1767 return STATUS_FAIL;
1743} 1768}
1744 1769
1745static int xd_finish_write(struct rtsx_chip *chip, 1770static int xd_finish_write(struct rtsx_chip *chip,
1746 u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) 1771 u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
1747{ 1772{
1748 struct xd_info *xd_card = &(chip->xd_card); 1773 struct xd_info *xd_card = &chip->xd_card;
1749 int retval, zone_no; 1774 int retval, zone_no;
1750 u16 log_off; 1775 u16 log_off;
1751 1776
@@ -1762,7 +1787,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
1762 1787
1763 if (old_blk == BLK_NOT_FOUND) { 1788 if (old_blk == BLK_NOT_FOUND) {
1764 retval = xd_init_page(chip, new_blk, log_off, 1789 retval = xd_init_page(chip, new_blk, log_off,
1765 page_off, xd_card->page_off + 1); 1790 page_off, xd_card->page_off + 1);
1766 if (retval != STATUS_SUCCESS) { 1791 if (retval != STATUS_SUCCESS) {
1767 retval = xd_erase_block(chip, new_blk); 1792 retval = xd_erase_block(chip, new_blk);
1768 if (retval == STATUS_SUCCESS) 1793 if (retval == STATUS_SUCCESS)
@@ -1772,7 +1797,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
1772 } 1797 }
1773 } else { 1798 } else {
1774 retval = xd_copy_page(chip, old_blk, new_blk, 1799 retval = xd_copy_page(chip, old_blk, new_blk,
1775 page_off, xd_card->page_off + 1); 1800 page_off, xd_card->page_off + 1);
1776 if (retval != STATUS_SUCCESS) { 1801 if (retval != STATUS_SUCCESS) {
1777 if (!XD_CHK_BAD_NEWBLK(xd_card)) { 1802 if (!XD_CHK_BAD_NEWBLK(xd_card)) {
1778 retval = xd_erase_block(chip, new_blk); 1803 retval = xd_erase_block(chip, new_blk);
@@ -1804,7 +1829,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
1804} 1829}
1805 1830
1806static int xd_prepare_write(struct rtsx_chip *chip, 1831static int xd_prepare_write(struct rtsx_chip *chip,
1807 u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) 1832 u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
1808{ 1833{
1809 int retval; 1834 int retval;
1810 1835
@@ -1823,11 +1848,11 @@ static int xd_prepare_write(struct rtsx_chip *chip,
1823} 1848}
1824 1849
1825static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, 1850static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
1826 u32 new_blk, u32 log_blk, u8 start_page, 1851 u32 new_blk, u32 log_blk, u8 start_page,
1827 u8 end_page, u8 *buf, unsigned int *index, 1852 u8 end_page, u8 *buf, unsigned int *index,
1828 unsigned int *offset) 1853 unsigned int *offset)
1829{ 1854{
1830 struct xd_info *xd_card = &(chip->xd_card); 1855 struct xd_info *xd_card = &chip->xd_card;
1831 u32 page_addr; 1856 u32 page_addr;
1832 int zone_no, retval; 1857 int zone_no, retval;
1833 u16 log_off; 1858 u16 log_off;
@@ -1837,7 +1862,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
1837 __func__, old_blk, new_blk, log_blk); 1862 __func__, old_blk, new_blk, log_blk);
1838 1863
1839 if (start_page > end_page) 1864 if (start_page > end_page)
1840 goto Status_Fail; 1865 goto status_fail;
1841 1866
1842 page_cnt = end_page - start_page; 1867 page_cnt = end_page - start_page;
1843 zone_no = (int)(log_blk / 1000); 1868 zone_no = (int)(log_blk / 1000);
@@ -1847,12 +1872,12 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
1847 1872
1848 retval = xd_send_cmd(chip, READ1_1); 1873 retval = xd_send_cmd(chip, READ1_1);
1849 if (retval != STATUS_SUCCESS) 1874 if (retval != STATUS_SUCCESS)
1850 goto Status_Fail; 1875 goto status_fail;
1851 1876
1852 rtsx_init_cmd(chip); 1877 rtsx_init_cmd(chip);
1853 1878
1854 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 1879 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
1855 0xFF, (u8)(log_off >> 8)); 1880 0xFF, (u8)(log_off >> 8));
1856 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off); 1881 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
1857 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK); 1882 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
1858 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG); 1883 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
@@ -1860,32 +1885,32 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
1860 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); 1885 xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
1861 1886
1862 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM, 1887 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
1863 XD_BA_TRANSFORM); 1888 XD_BA_TRANSFORM);
1864 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); 1889 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
1865 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); 1890 rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
1866 1891
1867 trans_dma_enable(chip->srb->sc_data_direction, chip, 1892 trans_dma_enable(chip->srb->sc_data_direction, chip,
1868 page_cnt * 512, DMA_512); 1893 page_cnt * 512, DMA_512);
1869 1894
1870 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 1895 rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
1871 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES); 1896 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
1872 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, 1897 rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
1873 XD_TRANSFER_END, XD_TRANSFER_END); 1898 XD_TRANSFER_END, XD_TRANSFER_END);
1874 1899
1875 rtsx_send_cmd_no_wait(chip); 1900 rtsx_send_cmd_no_wait(chip);
1876 1901
1877 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, 1902 retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
1878 scsi_sg_count(chip->srb), 1903 scsi_sg_count(chip->srb),
1879 index, offset, DMA_TO_DEVICE, chip->xd_timeout); 1904 index, offset, DMA_TO_DEVICE, chip->xd_timeout);
1880 if (retval < 0) { 1905 if (retval < 0) {
1881 rtsx_clear_xd_error(chip); 1906 rtsx_clear_xd_error(chip);
1882 1907
1883 if (retval == -ETIMEDOUT) { 1908 if (retval == -ETIMEDOUT) {
1884 xd_set_err_code(chip, XD_TO_ERROR); 1909 xd_set_err_code(chip, XD_TO_ERROR);
1885 goto Status_Fail; 1910 goto status_fail;
1886 } else { 1911 } else {
1887 rtsx_trace(chip); 1912 rtsx_trace(chip);
1888 goto Fail; 1913 goto fail;
1889 } 1914 }
1890 } 1915 }
1891 1916
@@ -1911,7 +1936,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
1911 1936
1912 return STATUS_SUCCESS; 1937 return STATUS_SUCCESS;
1913 1938
1914Fail: 1939fail:
1915 retval = rtsx_read_register(chip, XD_DAT, &reg_val); 1940 retval = rtsx_read_register(chip, XD_DAT, &reg_val);
1916 if (retval) { 1941 if (retval) {
1917 rtsx_trace(chip); 1942 rtsx_trace(chip);
@@ -1922,7 +1947,7 @@ Fail:
1922 xd_mark_bad_block(chip, new_blk); 1947 xd_mark_bad_block(chip, new_blk);
1923 } 1948 }
1924 1949
1925Status_Fail: 1950status_fail:
1926 rtsx_trace(chip); 1951 rtsx_trace(chip);
1927 return STATUS_FAIL; 1952 return STATUS_FAIL;
1928} 1953}
@@ -1930,8 +1955,8 @@ Status_Fail:
1930#ifdef XD_DELAY_WRITE 1955#ifdef XD_DELAY_WRITE
1931int xd_delay_write(struct rtsx_chip *chip) 1956int xd_delay_write(struct rtsx_chip *chip)
1932{ 1957{
1933 struct xd_info *xd_card = &(chip->xd_card); 1958 struct xd_info *xd_card = &chip->xd_card;
1934 struct xd_delay_write_tag *delay_write = &(xd_card->delay_write); 1959 struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
1935 int retval; 1960 int retval;
1936 1961
1937 if (delay_write->delay_write_flag) { 1962 if (delay_write->delay_write_flag) {
@@ -1944,9 +1969,10 @@ int xd_delay_write(struct rtsx_chip *chip)
1944 1969
1945 delay_write->delay_write_flag = 0; 1970 delay_write->delay_write_flag = 0;
1946 retval = xd_finish_write(chip, 1971 retval = xd_finish_write(chip,
1947 delay_write->old_phyblock, 1972 delay_write->old_phyblock,
1948 delay_write->new_phyblock, 1973 delay_write->new_phyblock,
1949 delay_write->logblock, delay_write->pageoff); 1974 delay_write->logblock,
1975 delay_write->pageoff);
1950 if (retval != STATUS_SUCCESS) { 1976 if (retval != STATUS_SUCCESS) {
1951 rtsx_trace(chip); 1977 rtsx_trace(chip);
1952 return STATUS_FAIL; 1978 return STATUS_FAIL;
@@ -1958,12 +1984,12 @@ int xd_delay_write(struct rtsx_chip *chip)
1958#endif 1984#endif
1959 1985
1960int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 1986int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
1961 u32 start_sector, u16 sector_cnt) 1987 u32 start_sector, u16 sector_cnt)
1962{ 1988{
1963 struct xd_info *xd_card = &(chip->xd_card); 1989 struct xd_info *xd_card = &chip->xd_card;
1964 unsigned int lun = SCSI_LUN(srb); 1990 unsigned int lun = SCSI_LUN(srb);
1965#ifdef XD_DELAY_WRITE 1991#ifdef XD_DELAY_WRITE
1966 struct xd_delay_write_tag *delay_write = &(xd_card->delay_write); 1992 struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
1967#endif 1993#endif
1968 int retval, zone_no; 1994 int retval, zone_no;
1969 unsigned int index = 0, offset = 0; 1995 unsigned int index = 0, offset = 0;
@@ -2012,17 +2038,18 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2012 if (srb->sc_data_direction == DMA_TO_DEVICE) { 2038 if (srb->sc_data_direction == DMA_TO_DEVICE) {
2013#ifdef XD_DELAY_WRITE 2039#ifdef XD_DELAY_WRITE
2014 if (delay_write->delay_write_flag && 2040 if (delay_write->delay_write_flag &&
2015 (delay_write->logblock == log_blk) && 2041 (delay_write->logblock == log_blk) &&
2016 (start_page > delay_write->pageoff)) { 2042 (start_page > delay_write->pageoff)) {
2017 delay_write->delay_write_flag = 0; 2043 delay_write->delay_write_flag = 0;
2018 if (delay_write->old_phyblock != BLK_NOT_FOUND) { 2044 if (delay_write->old_phyblock != BLK_NOT_FOUND) {
2019 retval = xd_copy_page(chip, 2045 retval = xd_copy_page(chip,
2020 delay_write->old_phyblock, 2046 delay_write->old_phyblock,
2021 delay_write->new_phyblock, 2047 delay_write->new_phyblock,
2022 delay_write->pageoff, start_page); 2048 delay_write->pageoff,
2049 start_page);
2023 if (retval != STATUS_SUCCESS) { 2050 if (retval != STATUS_SUCCESS) {
2024 set_sense_type(chip, lun, 2051 set_sense_type(chip, lun,
2025 SENSE_TYPE_MEDIA_WRITE_ERR); 2052 SENSE_TYPE_MEDIA_WRITE_ERR);
2026 rtsx_trace(chip); 2053 rtsx_trace(chip);
2027 return STATUS_FAIL; 2054 return STATUS_FAIL;
2028 } 2055 }
@@ -2039,7 +2066,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2039 retval = xd_delay_write(chip); 2066 retval = xd_delay_write(chip);
2040 if (retval != STATUS_SUCCESS) { 2067 if (retval != STATUS_SUCCESS) {
2041 set_sense_type(chip, lun, 2068 set_sense_type(chip, lun,
2042 SENSE_TYPE_MEDIA_WRITE_ERR); 2069 SENSE_TYPE_MEDIA_WRITE_ERR);
2043 rtsx_trace(chip); 2070 rtsx_trace(chip);
2044 return STATUS_FAIL; 2071 return STATUS_FAIL;
2045 } 2072 }
@@ -2047,25 +2074,25 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2047 old_blk = xd_get_l2p_tbl(chip, zone_no, log_off); 2074 old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
2048 new_blk = xd_get_unused_block(chip, zone_no); 2075 new_blk = xd_get_unused_block(chip, zone_no);
2049 if ((old_blk == BLK_NOT_FOUND) || 2076 if ((old_blk == BLK_NOT_FOUND) ||
2050 (new_blk == BLK_NOT_FOUND)) { 2077 (new_blk == BLK_NOT_FOUND)) {
2051 set_sense_type(chip, lun, 2078 set_sense_type(chip, lun,
2052 SENSE_TYPE_MEDIA_WRITE_ERR); 2079 SENSE_TYPE_MEDIA_WRITE_ERR);
2053 rtsx_trace(chip); 2080 rtsx_trace(chip);
2054 return STATUS_FAIL; 2081 return STATUS_FAIL;
2055 } 2082 }
2056 2083
2057 retval = xd_prepare_write(chip, old_blk, new_blk, 2084 retval = xd_prepare_write(chip, old_blk, new_blk,
2058 log_blk, start_page); 2085 log_blk, start_page);
2059 if (retval != STATUS_SUCCESS) { 2086 if (retval != STATUS_SUCCESS) {
2060 if (detect_card_cd(chip, XD_CARD) != 2087 if (detect_card_cd(chip, XD_CARD) !=
2061 STATUS_SUCCESS) { 2088 STATUS_SUCCESS) {
2062 set_sense_type(chip, lun, 2089 set_sense_type(chip, lun,
2063 SENSE_TYPE_MEDIA_NOT_PRESENT); 2090 SENSE_TYPE_MEDIA_NOT_PRESENT);
2064 rtsx_trace(chip); 2091 rtsx_trace(chip);
2065 return STATUS_FAIL; 2092 return STATUS_FAIL;
2066 } 2093 }
2067 set_sense_type(chip, lun, 2094 set_sense_type(chip, lun,
2068 SENSE_TYPE_MEDIA_WRITE_ERR); 2095 SENSE_TYPE_MEDIA_WRITE_ERR);
2069 rtsx_trace(chip); 2096 rtsx_trace(chip);
2070 return STATUS_FAIL; 2097 return STATUS_FAIL;
2071 } 2098 }
@@ -2078,12 +2105,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2078 if (retval != STATUS_SUCCESS) { 2105 if (retval != STATUS_SUCCESS) {
2079 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { 2106 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
2080 set_sense_type(chip, lun, 2107 set_sense_type(chip, lun,
2081 SENSE_TYPE_MEDIA_NOT_PRESENT); 2108 SENSE_TYPE_MEDIA_NOT_PRESENT);
2082 rtsx_trace(chip); 2109 rtsx_trace(chip);
2083 return STATUS_FAIL; 2110 return STATUS_FAIL;
2084 } 2111 }
2085 set_sense_type(chip, lun, 2112 set_sense_type(chip, lun,
2086 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2113 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2087 rtsx_trace(chip); 2114 rtsx_trace(chip);
2088 return STATUS_FAIL; 2115 return STATUS_FAIL;
2089 } 2116 }
@@ -2092,7 +2119,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2092 old_blk = xd_get_l2p_tbl(chip, zone_no, log_off); 2119 old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
2093 if (old_blk == BLK_NOT_FOUND) { 2120 if (old_blk == BLK_NOT_FOUND) {
2094 set_sense_type(chip, lun, 2121 set_sense_type(chip, lun,
2095 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2122 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2096 rtsx_trace(chip); 2123 rtsx_trace(chip);
2097 return STATUS_FAIL; 2124 return STATUS_FAIL;
2098 } 2125 }
@@ -2116,22 +2143,22 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2116 page_cnt = end_page - start_page; 2143 page_cnt = end_page - start_page;
2117 if (srb->sc_data_direction == DMA_FROM_DEVICE) { 2144 if (srb->sc_data_direction == DMA_FROM_DEVICE) {
2118 retval = xd_read_multiple_pages(chip, old_blk, log_blk, 2145 retval = xd_read_multiple_pages(chip, old_blk, log_blk,
2119 start_page, end_page, ptr, 2146 start_page, end_page,
2120 &index, &offset); 2147 ptr, &index, &offset);
2121 if (retval != STATUS_SUCCESS) { 2148 if (retval != STATUS_SUCCESS) {
2122 set_sense_type(chip, lun, 2149 set_sense_type(chip, lun,
2123 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2150 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2124 rtsx_trace(chip); 2151 rtsx_trace(chip);
2125 return STATUS_FAIL; 2152 return STATUS_FAIL;
2126 } 2153 }
2127 } else { 2154 } else {
2128 retval = xd_write_multiple_pages(chip, old_blk, 2155 retval = xd_write_multiple_pages(chip, old_blk,
2129 new_blk, log_blk, 2156 new_blk, log_blk,
2130 start_page, end_page, ptr, 2157 start_page, end_page,
2131 &index, &offset); 2158 ptr, &index, &offset);
2132 if (retval != STATUS_SUCCESS) { 2159 if (retval != STATUS_SUCCESS) {
2133 set_sense_type(chip, lun, 2160 set_sense_type(chip, lun,
2134 SENSE_TYPE_MEDIA_WRITE_ERR); 2161 SENSE_TYPE_MEDIA_WRITE_ERR);
2135 rtsx_trace(chip); 2162 rtsx_trace(chip);
2136 return STATUS_FAIL; 2163 return STATUS_FAIL;
2137 } 2164 }
@@ -2153,7 +2180,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2153 if (retval != STATUS_SUCCESS) { 2180 if (retval != STATUS_SUCCESS) {
2154 chip->card_fail |= XD_CARD; 2181 chip->card_fail |= XD_CARD;
2155 set_sense_type(chip, lun, 2182 set_sense_type(chip, lun,
2156 SENSE_TYPE_MEDIA_NOT_PRESENT); 2183 SENSE_TYPE_MEDIA_NOT_PRESENT);
2157 rtsx_trace(chip); 2184 rtsx_trace(chip);
2158 return STATUS_FAIL; 2185 return STATUS_FAIL;
2159 } 2186 }
@@ -2163,10 +2190,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2163 if (old_blk == BLK_NOT_FOUND) { 2190 if (old_blk == BLK_NOT_FOUND) {
2164 if (srb->sc_data_direction == DMA_FROM_DEVICE) 2191 if (srb->sc_data_direction == DMA_FROM_DEVICE)
2165 set_sense_type(chip, lun, 2192 set_sense_type(chip, lun,
2166 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); 2193 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
2167 else 2194 else
2168 set_sense_type(chip, lun, 2195 set_sense_type(chip, lun,
2169 SENSE_TYPE_MEDIA_WRITE_ERR); 2196 SENSE_TYPE_MEDIA_WRITE_ERR);
2170 2197
2171 rtsx_trace(chip); 2198 rtsx_trace(chip);
2172 return STATUS_FAIL; 2199 return STATUS_FAIL;
@@ -2176,7 +2203,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2176 new_blk = xd_get_unused_block(chip, zone_no); 2203 new_blk = xd_get_unused_block(chip, zone_no);
2177 if (new_blk == BLK_NOT_FOUND) { 2204 if (new_blk == BLK_NOT_FOUND) {
2178 set_sense_type(chip, lun, 2205 set_sense_type(chip, lun,
2179 SENSE_TYPE_MEDIA_WRITE_ERR); 2206 SENSE_TYPE_MEDIA_WRITE_ERR);
2180 rtsx_trace(chip); 2207 rtsx_trace(chip);
2181 return STATUS_FAIL; 2208 return STATUS_FAIL;
2182 } 2209 }
@@ -2186,7 +2213,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2186 } 2213 }
2187 2214
2188 if ((srb->sc_data_direction == DMA_TO_DEVICE) && 2215 if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
2189 (end_page != (xd_card->page_off + 1))) { 2216 (end_page != (xd_card->page_off + 1))) {
2190#ifdef XD_DELAY_WRITE 2217#ifdef XD_DELAY_WRITE
2191 delay_write->delay_write_flag = 1; 2218 delay_write->delay_write_flag = 1;
2192 delay_write->old_phyblock = old_blk; 2219 delay_write->old_phyblock = old_blk;
@@ -2202,11 +2229,11 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2202 } 2229 }
2203 2230
2204 retval = xd_finish_write(chip, old_blk, new_blk, 2231 retval = xd_finish_write(chip, old_blk, new_blk,
2205 log_blk, end_page); 2232 log_blk, end_page);
2206 if (retval != STATUS_SUCCESS) { 2233 if (retval != STATUS_SUCCESS) {
2207 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { 2234 if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
2208 set_sense_type(chip, lun, 2235 set_sense_type(chip, lun,
2209 SENSE_TYPE_MEDIA_NOT_PRESENT); 2236 SENSE_TYPE_MEDIA_NOT_PRESENT);
2210 rtsx_trace(chip); 2237 rtsx_trace(chip);
2211 return STATUS_FAIL; 2238 return STATUS_FAIL;
2212 } 2239 }
@@ -2224,10 +2251,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
2224 2251
2225void xd_free_l2p_tbl(struct rtsx_chip *chip) 2252void xd_free_l2p_tbl(struct rtsx_chip *chip)
2226{ 2253{
2227 struct xd_info *xd_card = &(chip->xd_card); 2254 struct xd_info *xd_card = &chip->xd_card;
2228 int i = 0; 2255 int i = 0;
2229 2256
2230 if (xd_card->zone != NULL) { 2257 if (xd_card->zone) {
2231 for (i = 0; i < xd_card->zone_cnt; i++) { 2258 for (i = 0; i < xd_card->zone_cnt; i++) {
2232 vfree(xd_card->zone[i].l2p_table); 2259 vfree(xd_card->zone[i].l2p_table);
2233 xd_card->zone[i].l2p_table = NULL; 2260 xd_card->zone[i].l2p_table = NULL;
@@ -2242,7 +2269,7 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip)
2242void xd_cleanup_work(struct rtsx_chip *chip) 2269void xd_cleanup_work(struct rtsx_chip *chip)
2243{ 2270{
2244#ifdef XD_DELAY_WRITE 2271#ifdef XD_DELAY_WRITE
2245 struct xd_info *xd_card = &(chip->xd_card); 2272 struct xd_info *xd_card = &chip->xd_card;
2246 2273
2247 if (xd_card->delay_write.delay_write_flag) { 2274 if (xd_card->delay_write.delay_write_flag) {
2248 dev_dbg(rtsx_dev(chip), "xD: delay write\n"); 2275 dev_dbg(rtsx_dev(chip), "xD: delay write\n");
@@ -2297,7 +2324,7 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
2297 2324
2298int release_xd_card(struct rtsx_chip *chip) 2325int release_xd_card(struct rtsx_chip *chip)
2299{ 2326{
2300 struct xd_info *xd_card = &(chip->xd_card); 2327 struct xd_info *xd_card = &chip->xd_card;
2301 int retval; 2328 int retval;
2302 2329
2303 chip->card_ready &= ~XD_CARD; 2330 chip->card_ready &= ~XD_CARD;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 938138c50bb5..d5f10880efb7 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -179,7 +179,7 @@ int reset_xd_card(struct rtsx_chip *chip);
179int xd_delay_write(struct rtsx_chip *chip); 179int xd_delay_write(struct rtsx_chip *chip);
180#endif 180#endif
181int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, 181int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
182 u32 start_sector, u16 sector_cnt); 182 u32 start_sector, u16 sector_cnt);
183void xd_free_l2p_tbl(struct rtsx_chip *chip); 183void xd_free_l2p_tbl(struct rtsx_chip *chip);
184void xd_cleanup_work(struct rtsx_chip *chip); 184void xd_cleanup_work(struct rtsx_chip *chip);
185int xd_power_off_card3v3(struct rtsx_chip *chip); 185int xd_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index cab26e736111..c6526b6fbfb4 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -98,19 +98,16 @@ int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
98 switch (ctx->skein_size) { 98 switch (ctx->skein_size) {
99 case SKEIN_256: 99 case SKEIN_256:
100 ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len, 100 ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
101 tree_info, 101 tree_info, key, key_len);
102 (const u8 *)key, key_len);
103 102
104 break; 103 break;
105 case SKEIN_512: 104 case SKEIN_512:
106 ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len, 105 ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
107 tree_info, 106 tree_info, key, key_len);
108 (const u8 *)key, key_len);
109 break; 107 break;
110 case SKEIN_1024: 108 case SKEIN_1024:
111 ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len, 109 ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
112 tree_info, 110 tree_info, key, key_len);
113 (const u8 *)key, key_len);
114 111
115 break; 112 break;
116 } 113 }
@@ -152,16 +149,13 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
152 149
153 switch (ctx->skein_size) { 150 switch (ctx->skein_size) {
154 case SKEIN_256: 151 case SKEIN_256:
155 ret = skein_256_update(&ctx->m.s256, (const u8 *)msg, 152 ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt);
156 msg_byte_cnt);
157 break; 153 break;
158 case SKEIN_512: 154 case SKEIN_512:
159 ret = skein_512_update(&ctx->m.s512, (const u8 *)msg, 155 ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt);
160 msg_byte_cnt);
161 break; 156 break;
162 case SKEIN_1024: 157 case SKEIN_1024:
163 ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg, 158 ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt);
164 msg_byte_cnt);
165 break; 159 break;
166 } 160 }
167 return ret; 161 return ret;
@@ -211,7 +205,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
211 /* partial byte bit mask */ 205 /* partial byte bit mask */
212 mask = (u8)(1u << (7 - (msg_bit_cnt & 7))); 206 mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
213 /* apply bit padding on final byte (in the buffer) */ 207 /* apply bit padding on final byte (in the buffer) */
214 up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask); 208 up[length - 1] = (up[length - 1] & (0 - mask)) | mask;
215 209
216 return SKEIN_SUCCESS; 210 return SKEIN_SUCCESS;
217} 211}
@@ -224,13 +218,13 @@ int skein_final(struct skein_ctx *ctx, u8 *hash)
224 218
225 switch (ctx->skein_size) { 219 switch (ctx->skein_size) {
226 case SKEIN_256: 220 case SKEIN_256:
227 ret = skein_256_final(&ctx->m.s256, (u8 *)hash); 221 ret = skein_256_final(&ctx->m.s256, hash);
228 break; 222 break;
229 case SKEIN_512: 223 case SKEIN_512:
230 ret = skein_512_final(&ctx->m.s512, (u8 *)hash); 224 ret = skein_512_final(&ctx->m.s512, hash);
231 break; 225 break;
232 case SKEIN_1024: 226 case SKEIN_1024:
233 ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash); 227 ret = skein_1024_final(&ctx->m.s1024, hash);
234 break; 228 break;
235 } 229 }
236 return ret; 230 return ret;
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index a95563fad071..50640656c10d 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -64,7 +64,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
64 b2 += b1; 64 b2 += b1;
65 b1 = rol64(b1, 32) ^ b2; 65 b1 = rol64(b1, 32) ^ b2;
66 66
67
68 b1 += k3 + t2; 67 b1 += k3 + t2;
69 b0 += b1 + k2; 68 b0 += b1 + k2;
70 b1 = rol64(b1, 14) ^ b0; 69 b1 = rol64(b1, 14) ^ b0;
@@ -117,7 +116,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
117 b2 += b1; 116 b2 += b1;
118 b1 = rol64(b1, 32) ^ b2; 117 b1 = rol64(b1, 32) ^ b2;
119 118
120
121 b1 += k0 + t1; 119 b1 += k0 + t1;
122 b0 += b1 + k4; 120 b0 += b1 + k4;
123 b1 = rol64(b1, 14) ^ b0; 121 b1 = rol64(b1, 14) ^ b0;
@@ -170,7 +168,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
170 b2 += b1; 168 b2 += b1;
171 b1 = rol64(b1, 32) ^ b2; 169 b1 = rol64(b1, 32) ^ b2;
172 170
173
174 b1 += k2 + t0; 171 b1 += k2 + t0;
175 b0 += b1 + k1; 172 b0 += b1 + k1;
176 b1 = rol64(b1, 14) ^ b0; 173 b1 = rol64(b1, 14) ^ b0;
@@ -223,7 +220,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
223 b2 += b1; 220 b2 += b1;
224 b1 = rol64(b1, 32) ^ b2; 221 b1 = rol64(b1, 32) ^ b2;
225 222
226
227 b1 += k4 + t2; 223 b1 += k4 + t2;
228 b0 += b1 + k3; 224 b0 += b1 + k3;
229 b1 = rol64(b1, 14) ^ b0; 225 b1 = rol64(b1, 14) ^ b0;
@@ -276,7 +272,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
276 b2 += b1; 272 b2 += b1;
277 b1 = rol64(b1, 32) ^ b2; 273 b1 = rol64(b1, 32) ^ b2;
278 274
279
280 b1 += k1 + t1; 275 b1 += k1 + t1;
281 b0 += b1 + k0; 276 b0 += b1 + k0;
282 b1 = rol64(b1, 14) ^ b0; 277 b1 = rol64(b1, 14) ^ b0;
@@ -329,7 +324,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
329 b2 += b1; 324 b2 += b1;
330 b1 = rol64(b1, 32) ^ b2; 325 b1 = rol64(b1, 32) ^ b2;
331 326
332
333 b1 += k3 + t0; 327 b1 += k3 + t0;
334 b0 += b1 + k2; 328 b0 += b1 + k2;
335 b1 = rol64(b1, 14) ^ b0; 329 b1 = rol64(b1, 14) ^ b0;
@@ -382,7 +376,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
382 b2 += b1; 376 b2 += b1;
383 b1 = rol64(b1, 32) ^ b2; 377 b1 = rol64(b1, 32) ^ b2;
384 378
385
386 b1 += k0 + t2; 379 b1 += k0 + t2;
387 b0 += b1 + k4; 380 b0 += b1 + k4;
388 b1 = rol64(b1, 14) ^ b0; 381 b1 = rol64(b1, 14) ^ b0;
@@ -435,7 +428,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
435 b2 += b1; 428 b2 += b1;
436 b1 = rol64(b1, 32) ^ b2; 429 b1 = rol64(b1, 32) ^ b2;
437 430
438
439 b1 += k2 + t1; 431 b1 += k2 + t1;
440 b0 += b1 + k1; 432 b0 += b1 + k1;
441 b1 = rol64(b1, 14) ^ b0; 433 b1 = rol64(b1, 14) ^ b0;
@@ -579,7 +571,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
579 b2 -= b3 + k3 + t2; 571 b2 -= b3 + k3 + t2;
580 b3 -= k4 + 16; 572 b3 -= k4 + 16;
581 573
582
583 tmp = b3 ^ b0; 574 tmp = b3 ^ b0;
584 b3 = ror64(tmp, 32); 575 b3 = ror64(tmp, 32);
585 b0 -= b3; 576 b0 -= b3;
@@ -648,7 +639,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
648 b2 -= b3 + k1 + t0; 639 b2 -= b3 + k1 + t0;
649 b3 -= k2 + 14; 640 b3 -= k2 + 14;
650 641
651
652 tmp = b3 ^ b0; 642 tmp = b3 ^ b0;
653 b3 = ror64(tmp, 32); 643 b3 = ror64(tmp, 32);
654 b0 -= b3; 644 b0 -= b3;
@@ -717,7 +707,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
717 b2 -= b3 + k4 + t1; 707 b2 -= b3 + k4 + t1;
718 b3 -= k0 + 12; 708 b3 -= k0 + 12;
719 709
720
721 tmp = b3 ^ b0; 710 tmp = b3 ^ b0;
722 b3 = ror64(tmp, 32); 711 b3 = ror64(tmp, 32);
723 b0 -= b3; 712 b0 -= b3;
@@ -786,7 +775,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
786 b2 -= b3 + k2 + t2; 775 b2 -= b3 + k2 + t2;
787 b3 -= k3 + 10; 776 b3 -= k3 + 10;
788 777
789
790 tmp = b3 ^ b0; 778 tmp = b3 ^ b0;
791 b3 = ror64(tmp, 32); 779 b3 = ror64(tmp, 32);
792 b0 -= b3; 780 b0 -= b3;
@@ -855,7 +843,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
855 b2 -= b3 + k0 + t0; 843 b2 -= b3 + k0 + t0;
856 b3 -= k1 + 8; 844 b3 -= k1 + 8;
857 845
858
859 tmp = b3 ^ b0; 846 tmp = b3 ^ b0;
860 b3 = ror64(tmp, 32); 847 b3 = ror64(tmp, 32);
861 b0 -= b3; 848 b0 -= b3;
@@ -924,7 +911,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
924 b2 -= b3 + k3 + t1; 911 b2 -= b3 + k3 + t1;
925 b3 -= k4 + 6; 912 b3 -= k4 + 6;
926 913
927
928 tmp = b3 ^ b0; 914 tmp = b3 ^ b0;
929 b3 = ror64(tmp, 32); 915 b3 = ror64(tmp, 32);
930 b0 -= b3; 916 b0 -= b3;
@@ -993,7 +979,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
993 b2 -= b3 + k1 + t2; 979 b2 -= b3 + k1 + t2;
994 b3 -= k2 + 4; 980 b3 -= k2 + 4;
995 981
996
997 tmp = b3 ^ b0; 982 tmp = b3 ^ b0;
998 b3 = ror64(tmp, 32); 983 b3 = ror64(tmp, 32);
999 b0 -= b3; 984 b0 -= b3;
@@ -1062,7 +1047,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
1062 b2 -= b3 + k4 + t0; 1047 b2 -= b3 + k4 + t0;
1063 b3 -= k0 + 2; 1048 b3 -= k0 + 2;
1064 1049
1065
1066 tmp = b3 ^ b0; 1050 tmp = b3 ^ b0;
1067 b3 = ror64(tmp, 32); 1051 b3 = ror64(tmp, 32);
1068 b0 -= b3; 1052 b0 -= b3;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
deleted file mode 100644
index 5c2a15b42dfe..000000000000
--- a/drivers/staging/slicoss/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
1config SLICOSS
2 tristate "Alacritech Gigabit IS-NIC support"
3 depends on PCI && X86 && NET
4 default n
5 help
6 This driver supports Alacritech's IS-NIC gigabit ethernet cards.
7
8 This includes the following devices:
9 Mojave cards (single port PCI Gigabit) both copper and fiber
10 Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
11 Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
12
13 To compile this driver as a module, choose M here: the module
14 will be called slicoss.
diff --git a/drivers/staging/slicoss/Makefile b/drivers/staging/slicoss/Makefile
deleted file mode 100644
index 7bc9e9b9d3ab..000000000000
--- a/drivers/staging/slicoss/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
deleted file mode 100644
index 4fa50e73ce86..000000000000
--- a/drivers/staging/slicoss/README
+++ /dev/null
@@ -1,7 +0,0 @@
1This driver is supposed to support:
2
3 Mojave cards (single port PCI Gigabit) both copper and fiber
4 Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
5 Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
6
7The driver was actually tested on Oasis and Kalahari cards.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
deleted file mode 100644
index 9019729b7be6..000000000000
--- a/drivers/staging/slicoss/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
1TODO:
2 - move firmware loading to request_firmware()
3 - remove direct memory access of structures
4 - any remaining sparse and checkpatch.pl warnings
5
6 - use net_device_ops
7 - use dev->stats rather than adapter->stats
8 - don't cast netdev_priv it is already void
9 - GET RID OF MACROS
10 - work on all architectures
11 - without CONFIG_X86_64 confusion
12 - do 64 bit correctly
13 - don't depend on order of union
14 - get rid of ASSERT(), use BUG() instead but only where necessary
15 looks like most aren't really useful
16 - no new SIOCDEVPRIVATE ioctl allowed
17 - don't use module_param for configuring interrupt mitigation
18 use ethtool instead
19 - reorder code to elminate use of forward declarations
20 - don't keep private linked list of drivers.
21 - use PCI_DEVICE()
22 - do ethtool correctly using ethtool_ops
23 - NAPI?
24 - wasted overhead of extra stats
25 - state variables for things that are
26 easily available and shouldn't be kept in card structure, cardnum, ...
27 slotnumber, events, ...
28 - volatile == bad design => bad code
29 - locking too fine grained, not designed just throw more locks
30 at problem
31
32Please send patches to:
33 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
34and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
35<charrer@alacritech.com> as well as they are also able to test out any
36changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
deleted file mode 100644
index 420546d43002..000000000000
--- a/drivers/staging/slicoss/slic.h
+++ /dev/null
@@ -1,573 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * The views and conclusions contained in the software and documentation
31 * are those of the authors and should not be interpreted as representing
32 * official policies, either expressed or implied, of Alacritech, Inc.
33 *
34 **************************************************************************/
35
36/*
37 * FILENAME: slic.h
38 *
39 * This is the base set of header definitions for the SLICOSS driver.
40 */
41#ifndef __SLIC_DRIVER_H__
42#define __SLIC_DRIVER_H__
43
44/* firmware stuff */
45#define OASIS_UCODE_VERS_STRING "1.2"
46#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37"
47#define OASIS_UCODE_HOSTIF_ID 3
48
49#define MOJAVE_UCODE_VERS_STRING "1.2"
50#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22"
51#define MOJAVE_UCODE_HOSTIF_ID 3
52
53#define GB_RCVUCODE_VERS_STRING "1.2"
54#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15"
55static u32 OasisRcvUCodeLen = 512;
56static u32 GBRcvUCodeLen = 512;
57#define SECTION_SIZE 65536
58
59#define SLIC_RSPQ_PAGES_GB 10
60#define SLIC_RSPQ_BUFSINPAGE (PAGE_SIZE / SLIC_RSPBUF_SIZE)
61
62struct slic_rspqueue {
63 u32 offset;
64 u32 pageindex;
65 u32 num_pages;
66 struct slic_rspbuf *rspbuf;
67 u32 *vaddr[SLIC_RSPQ_PAGES_GB];
68 dma_addr_t paddr[SLIC_RSPQ_PAGES_GB];
69};
70
71#define SLIC_RCVQ_EXPANSION 1
72#define SLIC_RCVQ_ENTRIES (256 * SLIC_RCVQ_EXPANSION)
73#define SLIC_RCVQ_MINENTRIES (SLIC_RCVQ_ENTRIES / 2)
74#define SLIC_RCVQ_MAX_PROCESS_ISR ((SLIC_RCVQ_ENTRIES * 4))
75#define SLIC_RCVQ_RCVBUFSIZE 2048
76#define SLIC_RCVQ_FILLENTRIES (16 * SLIC_RCVQ_EXPANSION)
77#define SLIC_RCVQ_FILLTHRESH (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES)
78
79struct slic_rcvqueue {
80 struct sk_buff *head;
81 struct sk_buff *tail;
82 u32 count;
83 u32 size;
84 u32 errors;
85};
86
87struct slic_rcvbuf_info {
88 u32 id;
89 u32 starttime;
90 u32 stoptime;
91 u32 slicworld;
92 u32 lasttime;
93 u32 lastid;
94};
95
96/*
97 * SLIC Handle structure. Used to restrict handle values to
98 * 32 bits by using an index rather than an address.
99 * Simplifies ucode in 64-bit systems
100 */
101struct slic_handle_word {
102 union {
103 struct {
104 ushort index;
105 ushort bottombits; /* to denote num bufs to card */
106 } parts;
107 u32 whole;
108 } handle;
109};
110
111struct slic_handle {
112 struct slic_handle_word token; /* token passed between host and card*/
113 ushort type;
114 void *address; /* actual address of the object*/
115 ushort offset;
116 struct slic_handle *other_handle;
117 struct slic_handle *next;
118};
119
120#define SLIC_HANDLE_FREE 0x0000
121#define SLIC_HANDLE_DATA 0x0001
122#define SLIC_HANDLE_CMD 0x0002
123#define SLIC_HANDLE_CONTEXT 0x0003
124#define SLIC_HANDLE_TEAM 0x0004
125
126#define handle_index handle.parts.index
127#define handle_bottom handle.parts.bottombits
128#define handle_token handle.whole
129
130#define SLIC_HOSTCMD_SIZE 512
131
132struct slic_hostcmd {
133 struct slic_host64_cmd cmd64;
134 u32 type;
135 struct sk_buff *skb;
136 u32 paddrl;
137 u32 paddrh;
138 u32 busy;
139 u32 cmdsize;
140 ushort numbufs;
141 struct slic_handle *pslic_handle;/* handle associated with command */
142 struct slic_hostcmd *next;
143 struct slic_hostcmd *next_all;
144};
145
146#define SLIC_CMDQ_CMDSINPAGE (PAGE_SIZE / SLIC_HOSTCMD_SIZE)
147#define SLIC_CMD_DUMB 3
148#define SLIC_CMDQ_INITCMDS 256
149#define SLIC_CMDQ_MAXCMDS 256
150#define SLIC_CMDQ_MAXOUTSTAND SLIC_CMDQ_MAXCMDS
151#define SLIC_CMDQ_MAXPAGES (SLIC_CMDQ_MAXCMDS / SLIC_CMDQ_CMDSINPAGE)
152#define SLIC_CMDQ_INITPAGES (SLIC_CMDQ_INITCMDS / SLIC_CMDQ_CMDSINPAGE)
153
154struct slic_cmdqmem {
155 int pagecnt;
156 u32 *pages[SLIC_CMDQ_MAXPAGES];
157 dma_addr_t dma_pages[SLIC_CMDQ_MAXPAGES];
158};
159
160struct slic_cmdqueue {
161 struct slic_hostcmd *head;
162 struct slic_hostcmd *tail;
163 int count;
164 spinlock_t lock;
165};
166
167#define SLIC_MAX_CARDS 32
168#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
169
170struct mcast_address {
171 unsigned char address[6];
172 struct mcast_address *next;
173};
174
175#define CARD_DOWN 0x00000000
176#define CARD_UP 0x00000001
177#define CARD_FAIL 0x00000002
178#define CARD_DIAG 0x00000003
179#define CARD_SLEEP 0x00000004
180
181#define ADAPT_DOWN 0x00
182#define ADAPT_UP 0x01
183#define ADAPT_FAIL 0x02
184#define ADAPT_RESET 0x03
185#define ADAPT_SLEEP 0x04
186
187#define ADAPT_FLAGS_BOOTTIME 0x0001
188#define ADAPT_FLAGS_IS64BIT 0x0002
189#define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004
190#define ADAPT_FLAGS_FIBERMEDIA 0x0008
191#define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010
192#define ADAPT_FLAGS_INT_REGISTERED 0x0020
193#define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040
194#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080
195#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100
196
197#define LINK_DOWN 0x00
198#define LINK_CONFIG 0x01
199#define LINK_UP 0x02
200
201#define LINK_10MB 0x00
202#define LINK_100MB 0x01
203#define LINK_AUTOSPEED 0x02
204#define LINK_1000MB 0x03
205#define LINK_10000MB 0x04
206
207#define LINK_HALFD 0x00
208#define LINK_FULLD 0x01
209#define LINK_AUTOD 0x02
210
211#define MAC_DIRECTED 0x00000001
212#define MAC_BCAST 0x00000002
213#define MAC_MCAST 0x00000004
214#define MAC_PROMISC 0x00000008
215#define MAC_LOOPBACK 0x00000010
216#define MAC_ALLMCAST 0x00000020
217
218#define SLIC_DUPLEX(x) ((x == LINK_FULLD) ? "FDX" : "HDX")
219#define SLIC_SPEED(x) ((x == LINK_100MB) ? "100Mb" : ((x == LINK_1000MB) ?\
220 "1000Mb" : " 10Mb"))
221#define SLIC_LINKSTATE(x) ((x == LINK_DOWN) ? "Down" : "Up ")
222#define SLIC_ADAPTER_STATE(x) ((x == ADAPT_UP) ? "UP" : "Down")
223#define SLIC_CARD_STATE(x) ((x == CARD_UP) ? "UP" : "Down")
224
225struct slic_iface_stats {
226 /*
227 * Stats
228 */
229 u64 xmt_bytes;
230 u64 xmt_ucast;
231 u64 xmt_mcast;
232 u64 xmt_bcast;
233 u64 xmt_errors;
234 u64 xmt_discards;
235 u64 xmit_collisions;
236 u64 xmit_excess_xmit_collisions;
237 u64 rcv_bytes;
238 u64 rcv_ucast;
239 u64 rcv_mcast;
240 u64 rcv_bcast;
241 u64 rcv_errors;
242 u64 rcv_discards;
243};
244
245struct sliccp_stats {
246 u64 xmit_tcp_segs;
247 u64 xmit_tcp_bytes;
248 u64 rcv_tcp_segs;
249 u64 rcv_tcp_bytes;
250};
251
252struct slicnet_stats {
253 struct sliccp_stats tcp;
254 struct slic_iface_stats iface;
255};
256
257#define SLIC_LOADTIMER_PERIOD 1
258#define SLIC_INTAGG_DEFAULT 200
259#define SLIC_LOAD_0 0
260#define SLIC_INTAGG_0 0
261#define SLIC_LOAD_1 8000
262#define SLIC_LOAD_2 10000
263#define SLIC_LOAD_3 12000
264#define SLIC_LOAD_4 14000
265#define SLIC_LOAD_5 16000
266#define SLIC_INTAGG_1 50
267#define SLIC_INTAGG_2 100
268#define SLIC_INTAGG_3 150
269#define SLIC_INTAGG_4 200
270#define SLIC_INTAGG_5 250
271#define SLIC_LOAD_1GB 3000
272#define SLIC_LOAD_2GB 6000
273#define SLIC_LOAD_3GB 12000
274#define SLIC_LOAD_4GB 24000
275#define SLIC_LOAD_5GB 48000
276#define SLIC_INTAGG_1GB 50
277#define SLIC_INTAGG_2GB 75
278#define SLIC_INTAGG_3GB 100
279#define SLIC_INTAGG_4GB 100
280#define SLIC_INTAGG_5GB 100
281
282struct ether_header {
283 unsigned char ether_dhost[6];
284 unsigned char ether_shost[6];
285 ushort ether_type;
286};
287
288struct sliccard {
289 uint busnumber;
290 uint slotnumber;
291 uint state;
292 uint cardnum;
293 uint card_size;
294 uint adapters_activated;
295 uint adapters_allocated;
296 uint adapters_sleeping;
297 uint gennumber;
298 u32 events;
299 u32 loadlevel_current;
300 u32 load;
301 uint reset_in_progress;
302 u32 pingstatus;
303 u32 bad_pingstatus;
304 struct timer_list loadtimer;
305 u32 loadtimerset;
306 uint config_set;
307 struct slic_config config;
308 struct adapter *master;
309 struct adapter *adapter[SLIC_MAX_PORTS];
310 struct sliccard *next;
311 u32 error_interrupts;
312 u32 error_rmiss_interrupts;
313 u32 rcv_interrupts;
314 u32 xmit_interrupts;
315 u32 num_isrs;
316 u32 false_interrupts;
317 u32 max_isr_rcvs;
318 u32 max_isr_xmits;
319 u32 rcv_interrupt_yields;
320 u32 tx_packets;
321 u32 debug_ix;
322 ushort reg_type[32];
323 ushort reg_offset[32];
324 u32 reg_value[32];
325 u32 reg_valueh[32];
326};
327
328#define NUM_CFG_SPACES 2
329#define NUM_CFG_REGS 64
330#define NUM_CFG_REG_ULONGS (NUM_CFG_REGS / sizeof(u32))
331
332struct physcard {
333 struct adapter *adapter[SLIC_MAX_PORTS];
334 struct physcard *next;
335 uint adapters_allocd;
336
337/*
338 * the following is not currently needed
339 * u32 bridge_busnum;
340 * u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
341 */
342};
343
344struct base_driver {
345 spinlock_t driver_lock;
346 u32 num_slic_cards;
347 u32 num_slic_ports;
348 u32 num_slic_ports_active;
349 u32 dynamic_intagg;
350 struct sliccard *slic_card;
351 struct physcard *phys_card;
352 uint cardnuminuse[SLIC_MAX_CARDS];
353};
354
355struct slic_stats {
356 /* xmit stats */
357 u64 xmit_tcp_bytes;
358 u64 xmit_tcp_segs;
359 u64 xmit_bytes;
360 u64 xmit_collisions;
361 u64 xmit_unicasts;
362 u64 xmit_other_error;
363 u64 xmit_excess_collisions;
364 /* rcv stats */
365 u64 rcv_tcp_bytes;
366 u64 rcv_tcp_segs;
367 u64 rcv_bytes;
368 u64 rcv_unicasts;
369 u64 rcv_other_error;
370 u64 rcv_drops;
371};
372
373struct slic_shmem_data {
374 u32 isr;
375 u32 lnkstatus;
376 struct slic_stats stats;
377};
378
379struct slic_shmemory {
380 dma_addr_t isr_phaddr;
381 dma_addr_t lnkstatus_phaddr;
382 dma_addr_t stats_phaddr;
383 struct slic_shmem_data __iomem *shmem_data;
384};
385
386struct slic_upr {
387 uint adapter;
388 u32 upr_request;
389 u32 upr_data;
390 u32 upr_data_h;
391 u32 upr_buffer;
392 u32 upr_buffer_h;
393 struct slic_upr *next;
394};
395
396struct slic_ifevents {
397 uint oflow802;
398 uint uflow802;
399 uint Tprtoflow;
400 uint rcvearly;
401 uint Bufov;
402 uint Carre;
403 uint Longe;
404 uint Invp;
405 uint Crc;
406 uint Drbl;
407 uint Code;
408 uint IpHlen;
409 uint IpLen;
410 uint IpCsum;
411 uint TpCsum;
412 uint TpHlen;
413};
414
415struct adapter {
416 void *ifp;
417 struct sliccard *card;
418 uint port;
419 struct physcard *physcard;
420 uint physport;
421 uint cardindex;
422 uint card_size;
423 uint chipid;
424 struct net_device *netdev;
425 spinlock_t adapter_lock;
426 spinlock_t reset_lock;
427 struct pci_dev *pcidev;
428 uint busnumber;
429 uint slotnumber;
430 uint functionnumber;
431 ushort vendid;
432 ushort devid;
433 ushort subsysid;
434 u32 irq;
435 u32 drambase;
436 u32 dramlength;
437 uint queues_initialized;
438 uint allocated;
439 uint activated;
440 u32 intrregistered;
441 uint isp_initialized;
442 uint gennumber;
443 struct slic_shmemory shmem;
444 dma_addr_t phys_shmem;
445 void __iomem *regs;
446 unsigned char state;
447 unsigned char linkstate;
448 unsigned char linkspeed;
449 unsigned char linkduplex;
450 uint flags;
451 unsigned char macaddr[6];
452 unsigned char currmacaddr[6];
453 u32 macopts;
454 ushort devflags_prev;
455 u64 mcastmask;
456 struct mcast_address *mcastaddrs;
457 struct slic_upr *upr_list;
458 uint upr_busy;
459 struct timer_list pingtimer;
460 u32 pingtimerset;
461 struct timer_list loadtimer;
462 u32 loadtimerset;
463 spinlock_t upr_lock;
464 spinlock_t bit64reglock;
465 struct slic_rspqueue rspqueue;
466 struct slic_rcvqueue rcvqueue;
467 struct slic_cmdqueue cmdq_free;
468 struct slic_cmdqueue cmdq_done;
469 struct slic_cmdqueue cmdq_all;
470 struct slic_cmdqmem cmdqmem;
471 /*
472 * SLIC Handles
473 */
474 /* Object handles*/
475 struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1];
476 /* Free object handles*/
477 struct slic_handle *pfree_slic_handles;
478 /* Object handle list lock*/
479 spinlock_t handle_lock;
480 ushort slic_handle_ix;
481
482 u32 xmitq_full;
483 u32 all_reg_writes;
484 u32 icr_reg_writes;
485 u32 isr_reg_writes;
486 u32 error_interrupts;
487 u32 error_rmiss_interrupts;
488 u32 rx_errors;
489 u32 rcv_drops;
490 u32 rcv_interrupts;
491 u32 xmit_interrupts;
492 u32 linkevent_interrupts;
493 u32 upr_interrupts;
494 u32 num_isrs;
495 u32 false_interrupts;
496 u32 tx_packets;
497 u32 xmit_completes;
498 u32 tx_drops;
499 u32 rcv_broadcasts;
500 u32 rcv_multicasts;
501 u32 rcv_unicasts;
502 u32 max_isr_rcvs;
503 u32 max_isr_xmits;
504 u32 rcv_interrupt_yields;
505 u32 intagg_period;
506 u32 intagg_delay;
507 u32 dynamic_intagg;
508 struct inicpm_state *inicpm_info;
509 void *pinicpm_info;
510 struct slic_ifevents if_events;
511 struct slic_stats inicstats_prev;
512 struct slicnet_stats slic_stats;
513};
514
515static inline u32 slic_read32(struct adapter *adapter, unsigned int reg)
516{
517 return ioread32(adapter->regs + reg);
518}
519
520static inline void slic_write32(struct adapter *adapter, unsigned int reg,
521 u32 val)
522{
523 iowrite32(val, adapter->regs + reg);
524}
525
526static inline void slic_write64(struct adapter *adapter, unsigned int reg,
527 u32 val, u32 hiaddr)
528{
529 unsigned long flags;
530
531 spin_lock_irqsave(&adapter->bit64reglock, flags);
532 slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr);
533 slic_write32(adapter, reg, val);
534 mmiowb();
535 spin_unlock_irqrestore(&adapter->bit64reglock, flags);
536}
537
538static inline void slic_flush_write(struct adapter *adapter)
539{
540 ioread32(adapter->regs + SLIC_REG_HOSTID);
541}
542
543#define UPDATE_STATS(largestat, newstat, oldstat) \
544{ \
545 if ((newstat) < (oldstat)) \
546 (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
547 else \
548 (largestat) += ((newstat) - (oldstat)); \
549}
550
551#define UPDATE_STATS_GB(largestat, newstat, oldstat) \
552{ \
553 (largestat) += ((newstat) - (oldstat)); \
554}
555
556#if BITS_PER_LONG == 64
557#define SLIC_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & \
558 0x00000000FFFFFFFF)
559#define SLIC_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & \
560 0x00000000FFFFFFFF)
561#elif BITS_PER_LONG == 32
562#define SLIC_GET_ADDR_LOW(_addr) (u32)(_addr)
563#define SLIC_GET_ADDR_HIGH(_addr) (u32)0
564#else
565#error BITS_PER_LONG must be 32 or 64
566#endif
567
568#define FLUSH true
569#define DONT_FLUSH false
570
571#define SIOCSLICSETINTAGG (SIOCDEVPRIVATE + 10)
572
573#endif /* __SLIC_DRIVER_H__ */
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
deleted file mode 100644
index 49cb91aa02bb..000000000000
--- a/drivers/staging/slicoss/slichw.h
+++ /dev/null
@@ -1,652 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * The views and conclusions contained in the software and documentation
31 * are those of the authors and should not be interpreted as representing
32 * official policies, either expressed or implied, of Alacritech, Inc.
33 *
34 **************************************************************************/
35
36/*
37 * FILENAME: slichw.h
38 *
39 * This header file contains definitions that are common to our hardware.
40 */
41#ifndef __SLICHW_H__
42#define __SLICHW_H__
43
44#define PCI_VENDOR_ID_ALACRITECH 0x139A
45#define SLIC_1GB_DEVICE_ID 0x0005
46#define SLIC_2GB_DEVICE_ID 0x0007 /* Oasis Device ID */
47
48#define SLIC_1GB_CICADA_SUBSYS_ID 0x0008
49
50#define SLIC_NBR_MACS 4
51
52#define SLIC_RCVBUF_SIZE 2048
53#define SLIC_RCVBUF_HEADSIZE 34
54#define SLIC_RCVBUF_TAILSIZE 0
55#define SLIC_RCVBUF_DATASIZE (SLIC_RCVBUF_SIZE - \
56 (SLIC_RCVBUF_HEADSIZE + \
57 SLIC_RCVBUF_TAILSIZE))
58
59#define VGBSTAT_XPERR 0x40000000
60#define VGBSTAT_XERRSHFT 25
61#define VGBSTAT_XCSERR 0x23
62#define VGBSTAT_XUFLOW 0x22
63#define VGBSTAT_XHLEN 0x20
64#define VGBSTAT_NETERR 0x01000000
65#define VGBSTAT_NERRSHFT 16
66#define VGBSTAT_NERRMSK 0x1ff
67#define VGBSTAT_NCSERR 0x103
68#define VGBSTAT_NUFLOW 0x102
69#define VGBSTAT_NHLEN 0x100
70#define VGBSTAT_LNKERR 0x00000080
71#define VGBSTAT_LERRMSK 0xff
72#define VGBSTAT_LDEARLY 0x86
73#define VGBSTAT_LBOFLO 0x85
74#define VGBSTAT_LCODERR 0x84
75#define VGBSTAT_LDBLNBL 0x83
76#define VGBSTAT_LCRCERR 0x82
77#define VGBSTAT_LOFLO 0x81
78#define VGBSTAT_LUFLO 0x80
79#define IRHDDR_FLEN_MSK 0x0000ffff
80#define IRHDDR_SVALID 0x80000000
81#define IRHDDR_ERR 0x10000000
82#define VRHSTAT_802OE 0x80000000
83#define VRHSTAT_TPOFLO 0x10000000
84#define VRHSTATB_802UE 0x80000000
85#define VRHSTATB_RCVE 0x40000000
86#define VRHSTATB_BUFF 0x20000000
87#define VRHSTATB_CARRE 0x08000000
88#define VRHSTATB_LONGE 0x02000000
89#define VRHSTATB_PREA 0x01000000
90#define VRHSTATB_CRC 0x00800000
91#define VRHSTATB_DRBL 0x00400000
92#define VRHSTATB_CODE 0x00200000
93#define VRHSTATB_TPCSUM 0x00100000
94#define VRHSTATB_TPHLEN 0x00080000
95#define VRHSTATB_IPCSUM 0x00040000
96#define VRHSTATB_IPLERR 0x00020000
97#define VRHSTATB_IPHERR 0x00010000
98#define SLIC_MAX64_BCNT 23
99#define SLIC_MAX32_BCNT 26
100#define IHCMD_XMT_REQ 0x01
101#define IHFLG_IFSHFT 2
102#define SLIC_RSPBUF_SIZE 32
103
104#define SLIC_RESET_MAGIC 0xDEAD
105#define ICR_INT_OFF 0
106#define ICR_INT_ON 1
107#define ICR_INT_MASK 2
108
109#define ISR_ERR 0x80000000
110#define ISR_RCV 0x40000000
111#define ISR_CMD 0x20000000
112#define ISR_IO 0x60000000
113#define ISR_UPC 0x10000000
114#define ISR_LEVENT 0x08000000
115#define ISR_RMISS 0x02000000
116#define ISR_UPCERR 0x01000000
117#define ISR_XDROP 0x00800000
118#define ISR_UPCBSY 0x00020000
119#define ISR_EVMSK 0xffff0000
120#define ISR_PINGMASK 0x00700000
121#define ISR_PINGDSMASK 0x00710000
122#define ISR_UPCMASK 0x11000000
123#define SLIC_WCS_START 0x80000000
124#define SLIC_WCS_COMPARE 0x40000000
125#define SLIC_RCVWCS_BEGIN 0x40000000
126#define SLIC_RCVWCS_FINISH 0x80000000
127#define SLIC_PM_MAXPATTERNS 6
128#define SLIC_PM_PATTERNSIZE 128
129#define SLIC_PMCAPS_WAKEONLAN 0x00000001
130#define MIICR_REG_PCR 0x00000000
131#define MIICR_REG_4 0x00040000
132#define MIICR_REG_9 0x00090000
133#define MIICR_REG_16 0x00100000
134#define PCR_RESET 0x8000
135#define PCR_POWERDOWN 0x0800
136#define PCR_SPEED_100 0x2000
137#define PCR_SPEED_1000 0x0040
138#define PCR_AUTONEG 0x1000
139#define PCR_AUTONEG_RST 0x0200
140#define PCR_DUPLEX_FULL 0x0100
141#define PSR_LINKUP 0x0004
142
143#define PAR_ADV100FD 0x0100
144#define PAR_ADV100HD 0x0080
145#define PAR_ADV10FD 0x0040
146#define PAR_ADV10HD 0x0020
147#define PAR_ASYMPAUSE 0x0C00
148#define PAR_802_3 0x0001
149
150#define PAR_ADV1000XFD 0x0020
151#define PAR_ADV1000XHD 0x0040
152#define PAR_ASYMPAUSE_FIBER 0x0180
153
154#define PGC_ADV1000FD 0x0200
155#define PGC_ADV1000HD 0x0100
156#define SEEQ_LINKFAIL 0x4000
157#define SEEQ_SPEED 0x0080
158#define SEEQ_DUPLEX 0x0040
159#define TDK_DUPLEX 0x0800
160#define TDK_SPEED 0x0400
161#define MRV_REG16_XOVERON 0x0068
162#define MRV_REG16_XOVEROFF 0x0008
163#define MRV_SPEED_1000 0x8000
164#define MRV_SPEED_100 0x4000
165#define MRV_SPEED_10 0x0000
166#define MRV_FULLDUPLEX 0x2000
167#define MRV_LINKUP 0x0400
168
169#define GIG_LINKUP 0x0001
170#define GIG_FULLDUPLEX 0x0002
171#define GIG_SPEED_MASK 0x000C
172#define GIG_SPEED_1000 0x0008
173#define GIG_SPEED_100 0x0004
174#define GIG_SPEED_10 0x0000
175
176#define MCR_RESET 0x80000000
177#define MCR_CRCEN 0x40000000
178#define MCR_FULLD 0x10000000
179#define MCR_PAD 0x02000000
180#define MCR_RETRYLATE 0x01000000
181#define MCR_BOL_SHIFT 21
182#define MCR_IPG1_SHIFT 14
183#define MCR_IPG2_SHIFT 7
184#define MCR_IPG3_SHIFT 0
185#define GMCR_RESET 0x80000000
186#define GMCR_GBIT 0x20000000
187#define GMCR_FULLD 0x10000000
188#define GMCR_GAPBB_SHIFT 14
189#define GMCR_GAPR1_SHIFT 7
190#define GMCR_GAPR2_SHIFT 0
191#define GMCR_GAPBB_1000 0x60
192#define GMCR_GAPR1_1000 0x2C
193#define GMCR_GAPR2_1000 0x40
194#define GMCR_GAPBB_100 0x70
195#define GMCR_GAPR1_100 0x2C
196#define GMCR_GAPR2_100 0x40
197#define XCR_RESET 0x80000000
198#define XCR_XMTEN 0x40000000
199#define XCR_PAUSEEN 0x20000000
200#define XCR_LOADRNG 0x10000000
201#define RCR_RESET 0x80000000
202#define RCR_RCVEN 0x40000000
203#define RCR_RCVALL 0x20000000
204#define RCR_RCVBAD 0x10000000
205#define RCR_CTLEN 0x08000000
206#define RCR_ADDRAEN 0x02000000
207#define GXCR_RESET 0x80000000
208#define GXCR_XMTEN 0x40000000
209#define GXCR_PAUSEEN 0x20000000
210#define GRCR_RESET 0x80000000
211#define GRCR_RCVEN 0x40000000
212#define GRCR_RCVALL 0x20000000
213#define GRCR_RCVBAD 0x10000000
214#define GRCR_CTLEN 0x08000000
215#define GRCR_ADDRAEN 0x02000000
216#define GRCR_HASHSIZE_SHIFT 17
217#define GRCR_HASHSIZE 14
218
219#define SLIC_EEPROM_ID 0xA5A5
220#define SLIC_SRAM_SIZE2GB (64 * 1024)
221#define SLIC_SRAM_SIZE1GB (32 * 1024)
222#define SLIC_HOSTID_DEFAULT 0xFFFF /* uninitialized hostid */
223#define SLIC_NBR_MACS 4
224
225struct slic_rcvbuf {
226 u8 pad1[6];
227 u16 pad2;
228 u32 pad3;
229 u32 pad4;
230 u32 buffer;
231 u32 length;
232 u32 status;
233 u32 pad5;
234 u16 pad6;
235 u8 data[SLIC_RCVBUF_DATASIZE];
236};
237
238struct slic_hddr_wds {
239 union {
240 struct {
241 u32 frame_status;
242 u32 frame_status_b;
243 u32 time_stamp;
244 u32 checksum;
245 } hdrs_14port;
246 struct {
247 u32 frame_status;
248 u16 ByteCnt;
249 u16 TpChksum;
250 u16 CtxHash;
251 u16 MacHash;
252 u32 BufLnk;
253 } hdrs_gbit;
254 } u0;
255};
256
257#define frame_status14 u0.hdrs_14port.frame_status
258#define frame_status_b14 u0.hdrs_14port.frame_status_b
259#define frame_statusGB u0.hdrs_gbit.frame_status
260
261struct slic_host64sg {
262 u32 paddrl;
263 u32 paddrh;
264 u32 length;
265};
266
267struct slic_host64_cmd {
268 u32 hosthandle;
269 u32 RSVD;
270 u8 command;
271 u8 flags;
272 union {
273 u16 rsv1;
274 u16 rsv2;
275 } u0;
276 union {
277 struct {
278 u32 totlen;
279 struct slic_host64sg bufs[SLIC_MAX64_BCNT];
280 } slic_buffers;
281 } u;
282};
283
284struct slic_rspbuf {
285 u32 hosthandle;
286 u32 pad0;
287 u32 pad1;
288 u32 status;
289 u32 pad2[4];
290};
291
292/* Reset Register */
293#define SLIC_REG_RESET 0x0000
294/* Interrupt Control Register */
295#define SLIC_REG_ICR 0x0008
296/* Interrupt status pointer */
297#define SLIC_REG_ISP 0x0010
298/* Interrupt status */
299#define SLIC_REG_ISR 0x0018
300/*
301 * Header buffer address reg
302 * 31-8 - phy addr of set of contiguous hdr buffers
303 * 7-0 - number of buffers passed
304 * Buffers are 256 bytes long on 256-byte boundaries.
305 */
306#define SLIC_REG_HBAR 0x0020
307/*
308 * Data buffer handle & address reg
309 * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
310 */
311#define SLIC_REG_DBAR 0x0028
312/*
313 * Xmt Cmd buf addr regs.
314 * 1 per XMT interface
315 * 31-5 - phy addr of host command buffer
316 * 4-0 - length of cmd in multiples of 32 bytes
317 * Buffers are 32 bytes up to 512 bytes long
318 */
319#define SLIC_REG_CBAR 0x0030
320/* Write control store */
321#define SLIC_REG_WCS 0x0034
322/*
323 * Response buffer address reg.
324 * 31-8 - phy addr of set of contiguous response buffers
325 * 7-0 - number of buffers passed
326 * Buffers are 32 bytes long on 32-byte boundaries.
327 */
328#define SLIC_REG_RBAR 0x0038
329/* Read statistics (UPR) */
330#define SLIC_REG_RSTAT 0x0040
331/* Read link status */
332#define SLIC_REG_LSTAT 0x0048
333/* Write Mac Config */
334#define SLIC_REG_WMCFG 0x0050
335/* Write phy register */
336#define SLIC_REG_WPHY 0x0058
337/* Rcv Cmd buf addr reg */
338#define SLIC_REG_RCBAR 0x0060
339/* Read SLIC Config*/
340#define SLIC_REG_RCONFIG 0x0068
341/* Interrupt aggregation time */
342#define SLIC_REG_INTAGG 0x0070
343/* Write XMIT config reg */
344#define SLIC_REG_WXCFG 0x0078
345/* Write RCV config reg */
346#define SLIC_REG_WRCFG 0x0080
347/* Write rcv addr a low */
348#define SLIC_REG_WRADDRAL 0x0088
349/* Write rcv addr a high */
350#define SLIC_REG_WRADDRAH 0x0090
351/* Write rcv addr b low */
352#define SLIC_REG_WRADDRBL 0x0098
353/* Write rcv addr b high */
354#define SLIC_REG_WRADDRBH 0x00a0
355/* Low bits of mcast mask */
356#define SLIC_REG_MCASTLOW 0x00a8
357/* High bits of mcast mask */
358#define SLIC_REG_MCASTHIGH 0x00b0
359/* Ping the card */
360#define SLIC_REG_PING 0x00b8
361/* Dump command */
362#define SLIC_REG_DUMP_CMD 0x00c0
363/* Dump data pointer */
364#define SLIC_REG_DUMP_DATA 0x00c8
365/* Read card's pci_status register */
366#define SLIC_REG_PCISTATUS 0x00d0
367/* Write hostid field */
368#define SLIC_REG_WRHOSTID 0x00d8
369/* Put card in a low power state */
370#define SLIC_REG_LOW_POWER 0x00e0
371/* Force slic into quiescent state before soft reset */
372#define SLIC_REG_QUIESCE 0x00e8
373/* Reset interface queues */
374#define SLIC_REG_RESET_IFACE 0x00f0
375/*
376 * Register is only written when it has changed.
377 * Bits 63-32 for host i/f addrs.
378 */
379#define SLIC_REG_ADDR_UPPER 0x00f8
380/* 64 bit Header buffer address reg */
381#define SLIC_REG_HBAR64 0x0100
382/* 64 bit Data buffer handle & address reg */
383#define SLIC_REG_DBAR64 0x0108
384/* 64 bit Xmt Cmd buf addr regs. */
385#define SLIC_REG_CBAR64 0x0110
386/* 64 bit Response buffer address reg.*/
387#define SLIC_REG_RBAR64 0x0118
388/* 64 bit Rcv Cmd buf addr reg*/
389#define SLIC_REG_RCBAR64 0x0120
390/* Read statistics (64 bit UPR) */
391#define SLIC_REG_RSTAT64 0x0128
392/* Download Gigabit RCV sequencer ucode */
393#define SLIC_REG_RCV_WCS 0x0130
394/* Write VlanId field */
395#define SLIC_REG_WRVLANID 0x0138
396/* Read Transformer info */
397#define SLIC_REG_READ_XF_INFO 0x0140
398/* Write Transformer info */
399#define SLIC_REG_WRITE_XF_INFO 0x0148
400/* Write card ticks per second */
401#define SLIC_REG_TICKS_PER_SEC 0x0170
402
403#define SLIC_REG_HOSTID 0x1554
404
405enum UPR_REQUEST {
406 SLIC_UPR_STATS,
407 SLIC_UPR_RLSR,
408 SLIC_UPR_WCFG,
409 SLIC_UPR_RCONFIG,
410 SLIC_UPR_RPHY,
411 SLIC_UPR_ENLB,
412 SLIC_UPR_ENCT,
413 SLIC_UPR_PDWN,
414 SLIC_UPR_PING,
415 SLIC_UPR_DUMP,
416};
417
418struct inicpm_wakepattern {
419 u32 patternlength;
420 u8 pattern[SLIC_PM_PATTERNSIZE];
421 u8 mask[SLIC_PM_PATTERNSIZE];
422};
423
424struct inicpm_state {
425 u32 powercaps;
426 u32 powerstate;
427 u32 wake_linkstatus;
428 u32 wake_magicpacket;
429 u32 wake_framepattern;
430 struct inicpm_wakepattern wakepattern[SLIC_PM_MAXPATTERNS];
431};
432
433struct slicpm_packet_pattern {
434 u32 priority;
435 u32 reserved;
436 u32 masksize;
437 u32 patternoffset;
438 u32 patternsize;
439 u32 patternflags;
440};
441
442enum slicpm_power_state {
443 slicpm_state_unspecified = 0,
444 slicpm_state_d0,
445 slicpm_state_d1,
446 slicpm_state_d2,
447 slicpm_state_d3,
448 slicpm_state_maximum
449};
450
451struct slicpm_wakeup_capabilities {
452 enum slicpm_power_state min_magic_packet_wakeup;
453 enum slicpm_power_state min_pattern_wakeup;
454 enum slicpm_power_state min_link_change_wakeup;
455};
456
457struct slic_pnp_capabilities {
458 u32 flags;
459 struct slicpm_wakeup_capabilities wakeup_capabilities;
460};
461
462struct slic_config_mac {
463 u8 macaddrA[6];
464};
465
466#define ATK_FRU_FORMAT 0x00
467#define VENDOR1_FRU_FORMAT 0x01
468#define VENDOR2_FRU_FORMAT 0x02
469#define VENDOR3_FRU_FORMAT 0x03
470#define VENDOR4_FRU_FORMAT 0x04
471#define NO_FRU_FORMAT 0xFF
472
473struct atk_fru {
474 u8 assembly[6];
475 u8 revision[2];
476 u8 serial[14];
477 u8 pad[3];
478};
479
480struct vendor1_fru {
481 u8 commodity;
482 u8 assembly[4];
483 u8 revision[2];
484 u8 supplier[2];
485 u8 date[2];
486 u8 sequence[3];
487 u8 pad[13];
488};
489
490struct vendor2_fru {
491 u8 part[8];
492 u8 supplier[5];
493 u8 date[3];
494 u8 sequence[4];
495 u8 pad[7];
496};
497
498struct vendor3_fru {
499 u8 assembly[6];
500 u8 revision[2];
501 u8 serial[14];
502 u8 pad[3];
503};
504
505struct vendor4_fru {
506 u8 number[8];
507 u8 part[8];
508 u8 version[8];
509 u8 pad[3];
510};
511
512union oemfru {
513 struct vendor1_fru vendor1_fru;
514 struct vendor2_fru vendor2_fru;
515 struct vendor3_fru vendor3_fru;
516 struct vendor4_fru vendor4_fru;
517};
518
519/*
520 * SLIC EEPROM structure for Mojave
521 */
522struct slic_eeprom {
523 u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/
524 u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
525 u16 FlashSize; /* 02 Flash size */
526 u16 EepromSize; /* 03 EEPROM Size */
527 u16 VendorId; /* 04 Vendor ID */
528 u16 DeviceId; /* 05 Device ID */
529 u8 RevisionId; /* 06 Revision ID */
530 u8 ClassCode[3]; /* 07 Class Code */
531 u8 DbgIntPin; /* 08 Debug Interrupt pin */
532 u8 NetIntPin0; /* Network Interrupt Pin */
533 u8 MinGrant; /* 09 Minimum grant */
534 u8 MaxLat; /* Maximum Latency */
535 u16 PciStatus; /* 10 PCI Status */
536 u16 SubSysVId; /* 11 Subsystem Vendor Id */
537 u16 SubSysId; /* 12 Subsystem ID */
538 u16 DbgDevId; /* 13 Debug Device Id */
539 u16 DramRomFn; /* 14 Dram/Rom function */
540 u16 DSize2Pci; /* 15 DRAM size to PCI (bytes * 64K) */
541 u16 RSize2Pci; /* 16 ROM extension size to PCI (bytes * 4k) */
542 u8 NetIntPin1; /* 17 Network Interface Pin 1
543 * (simba/leone only)
544 */
545 u8 NetIntPin2; /* Network Interface Pin 2 (simba/leone only)*/
546 union {
547 u8 NetIntPin3; /* 18 Network Interface Pin 3 (simba only) */
548 u8 FreeTime; /* FreeTime setting (leone/mojave only) */
549 } u1;
550 u8 TBIctl; /* 10-bit interface control (Mojave only) */
551 u16 DramSize; /* 19 DRAM size (bytes * 64k) */
552 union {
553 struct {
554 /* Mac Interface Specific portions */
555 struct slic_config_mac MacInfo[SLIC_NBR_MACS];
556 } mac; /* MAC access for all boards */
557 struct {
558 /* use above struct for MAC access */
559 struct slic_config_mac pad[SLIC_NBR_MACS - 1];
560 u16 DeviceId2; /* Device ID for 2nd PCI function */
561 u8 IntPin2; /* Interrupt pin for 2nd PCI function */
562 u8 ClassCode2[3]; /* Class Code for 2nd PCI function */
563 } mojave; /* 2nd function access for gigabit board */
564 } u2;
565 u16 CfgByte6; /* Config Byte 6 */
566 u16 PMECapab; /* Power Mgment capabilities */
567 u16 NwClkCtrls; /* NetworkClockControls */
568 u8 FruFormat; /* Alacritech FRU format type */
569 struct atk_fru AtkFru; /* Alacritech FRU information */
570 u8 OemFruFormat; /* optional OEM FRU format type */
571 union oemfru OemFru; /* optional OEM FRU information */
572 u8 Pad[4]; /* Pad to 128 bytes - includes 2 cksum bytes
573 * (if OEM FRU info exists) and two unusable
574 * bytes at the end
575 */
576};
577
578/* SLIC EEPROM structure for Oasis */
579struct oslic_eeprom {
580 u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5' */
581 u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
582 u16 FlashConfig0; /* 02 Flash Config for SPI device 0 */
583 u16 FlashConfig1; /* 03 Flash Config for SPI device 1 */
584 u16 VendorId; /* 04 Vendor ID */
585 u16 DeviceId; /* 05 Device ID (function 0) */
586 u8 RevisionId; /* 06 Revision ID */
587 u8 ClassCode[3]; /* 07 Class Code for PCI function 0 */
588 u8 IntPin1; /* 08 Interrupt pin for PCI function 1*/
589 u8 ClassCode2[3]; /* 09 Class Code for PCI function 1 */
590 u8 IntPin2; /* 10 Interrupt pin for PCI function 2*/
591 u8 IntPin0; /* Interrupt pin for PCI function 0*/
592 u8 MinGrant; /* 11 Minimum grant */
593 u8 MaxLat; /* Maximum Latency */
594 u16 SubSysVId; /* 12 Subsystem Vendor Id */
595 u16 SubSysId; /* 13 Subsystem ID */
596 u16 FlashSize; /* 14 Flash size (bytes / 4K) */
597 u16 DSize2Pci; /* 15 DRAM size to PCI (bytes / 64K) */
598 u16 RSize2Pci; /* 16 Flash (ROM extension) size to PCI
599 * (bytes / 4K)
600 */
601 u16 DeviceId1; /* 17 Device Id (function 1) */
602 u16 DeviceId2; /* 18 Device Id (function 2) */
603 u16 CfgByte6; /* 19 Device Status Config Bytes 6-7 */
604 u16 PMECapab; /* 20 Power Mgment capabilities */
605 u8 MSICapab; /* 21 MSI capabilities */
606 u8 ClockDivider; /* Clock divider */
607 u16 PciStatusLow; /* 22 PCI Status bits 15:0 */
608 u16 PciStatusHigh; /* 23 PCI Status bits 31:16 */
609 u16 DramConfigLow; /* 24 DRAM Configuration bits 15:0 */
610 u16 DramConfigHigh; /* 25 DRAM Configuration bits 31:16 */
611 u16 DramSize; /* 26 DRAM size (bytes / 64K) */
612 u16 GpioTbiCtl; /* 27 GPIO/TBI controls for functions 1/0 */
613 u16 EepromSize; /* 28 EEPROM Size */
614 struct slic_config_mac MacInfo[2]; /* 29 MAC addresses (2 ports) */
615 u8 FruFormat; /* 35 Alacritech FRU format type */
616 struct atk_fru AtkFru; /* Alacritech FRU information */
617 u8 OemFruFormat; /* optional OEM FRU format type */
618 union oemfru OemFru; /* optional OEM FRU information */
619 u8 Pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes
620 * (if OEM FRU info exists) and two unusable
621 * bytes at the end
622 */
623};
624
625#define MAX_EECODE_SIZE sizeof(struct slic_eeprom)
626#define MIN_EECODE_SIZE 0x62 /* code size without optional OEM FRU stuff */
627
628/*
629 * SLIC CONFIG structure
630 *
631 * This structure lives in the CARD structure and is valid for all board types.
632 * It is filled in from the appropriate EEPROM structure by
633 * SlicGetConfigData()
634 */
635struct slic_config {
636 bool EepromValid; /* Valid EEPROM flag (checksum good?) */
637 u16 DramSize; /* DRAM size (bytes / 64K) */
638 struct slic_config_mac MacInfo[SLIC_NBR_MACS]; /* MAC addresses */
639 u8 FruFormat; /* Alacritech FRU format type */
640 struct atk_fru AtkFru; /* Alacritech FRU information */
641 u8 OemFruFormat; /* optional OEM FRU format type */
642 union {
643 struct vendor1_fru vendor1_fru;
644 struct vendor2_fru vendor2_fru;
645 struct vendor3_fru vendor3_fru;
646 struct vendor4_fru vendor4_fru;
647 } OemFru;
648};
649
650#pragma pack()
651
652#endif
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
deleted file mode 100644
index 2802b900f8ee..000000000000
--- a/drivers/staging/slicoss/slicoss.c
+++ /dev/null
@@ -1,3131 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2000-2006 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
19 *
20 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
27 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * The views and conclusions contained in the software and documentation
34 * are those of the authors and should not be interpreted as representing
35 * official policies, either expressed or implied, of Alacritech, Inc.
36 *
37 **************************************************************************/
38
39/*
40 * FILENAME: slicoss.c
41 *
42 * The SLICOSS driver for Alacritech's IS-NIC products.
43 *
44 * This driver is supposed to support:
45 *
46 * Mojave cards (single port PCI Gigabit) both copper and fiber
47 * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
48 * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
49 *
50 * The driver was actually tested on Oasis and Kalahari cards.
51 *
52 *
53 * NOTE: This is the standard, non-accelerated version of Alacritech's
54 * IS-NIC driver.
55 */
56
57#define KLUDGE_FOR_4GB_BOUNDARY 1
58#define DEBUG_MICROCODE 1
59#define DBG 1
60#define SLIC_INTERRUPT_PROCESS_LIMIT 1
61#define SLIC_OFFLOAD_IP_CHECKSUM 1
62#define STATS_TIMER_INTERVAL 2
63#define PING_TIMER_INTERVAL 1
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/kernel.h>
67#include <linux/string.h>
68#include <linux/errno.h>
69#include <linux/ioport.h>
70#include <linux/slab.h>
71#include <linux/interrupt.h>
72#include <linux/timer.h>
73#include <linux/pci.h>
74#include <linux/spinlock.h>
75#include <linux/init.h>
76#include <linux/bitops.h>
77#include <linux/io.h>
78#include <linux/netdevice.h>
79#include <linux/crc32.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/delay.h>
83#include <linux/seq_file.h>
84#include <linux/kthread.h>
85#include <linux/module.h>
86
87#include <linux/firmware.h>
88#include <linux/types.h>
89#include <linux/dma-mapping.h>
90#include <linux/mii.h>
91#include <linux/if_vlan.h>
92#include <asm/unaligned.h>
93
94#include <linux/ethtool.h>
95#include <linux/uaccess.h>
96#include "slichw.h"
97#include "slic.h"
98
99static uint slic_first_init = 1;
100static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)";
101
102static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
103
104static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
105#define DEFAULT_INTAGG_DELAY 100
106static unsigned int rcv_count;
107
108#define DRV_NAME "slicoss"
109#define DRV_VERSION "2.0.1"
110#define DRV_AUTHOR "Alacritech, Inc. Engineering"
111#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\
112 "Non-Accelerated Driver"
113#define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\
114 "All rights reserved."
115#define PFX DRV_NAME " "
116
117MODULE_AUTHOR(DRV_AUTHOR);
118MODULE_DESCRIPTION(DRV_DESCRIPTION);
119MODULE_LICENSE("Dual BSD/GPL");
120
121static const struct pci_device_id slic_pci_tbl[] = {
122 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
123 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
124 { 0 }
125};
126
127static const struct ethtool_ops slic_ethtool_ops;
128
129MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
130
131static void slic_mcast_set_bit(struct adapter *adapter, char *address)
132{
133 unsigned char crcpoly;
134
135 /* Get the CRC polynomial for the mac address */
136 /*
137 * we use bits 1-8 (lsb), bitwise reversed,
138 * msb (= lsb bit 0 before bitrev) is automatically discarded
139 */
140 crcpoly = ether_crc(ETH_ALEN, address) >> 23;
141
142 /*
143 * We only have space on the SLIC for 64 entries. Lop
144 * off the top two bits. (2^6 = 64)
145 */
146 crcpoly &= 0x3F;
147
148 /* OR in the new bit into our 64 bit mask. */
149 adapter->mcastmask |= (u64)1 << crcpoly;
150}
151
152static void slic_mcast_set_mask(struct adapter *adapter)
153{
154 if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
155 /*
156 * Turn on all multicast addresses. We have to do this for
157 * promiscuous mode as well as ALLMCAST mode. It saves the
158 * Microcode from having to keep state about the MAC
159 * configuration.
160 */
161 slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF);
162 slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF);
163 } else {
164 /*
165 * Commit our multicast mast to the SLIC by writing to the
166 * multicast address mask registers
167 */
168 slic_write32(adapter, SLIC_REG_MCASTLOW,
169 (u32)(adapter->mcastmask & 0xFFFFFFFF));
170 slic_write32(adapter, SLIC_REG_MCASTHIGH,
171 (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF));
172 }
173}
174
175static void slic_timer_ping(ulong dev)
176{
177 struct adapter *adapter;
178 struct sliccard *card;
179
180 adapter = netdev_priv((struct net_device *)dev);
181 card = adapter->card;
182
183 adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
184 add_timer(&adapter->pingtimer);
185}
186
187/*
188 * slic_link_config
189 *
190 * Write phy control to configure link duplex/speed
191 *
192 */
193static void slic_link_config(struct adapter *adapter,
194 u32 linkspeed, u32 linkduplex)
195{
196 u32 speed;
197 u32 duplex;
198 u32 phy_config;
199 u32 phy_advreg;
200 u32 phy_gctlreg;
201
202 if (adapter->state != ADAPT_UP)
203 return;
204
205 if (linkspeed > LINK_1000MB)
206 linkspeed = LINK_AUTOSPEED;
207 if (linkduplex > LINK_AUTOD)
208 linkduplex = LINK_AUTOD;
209
210 if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
211 if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
212 /*
213 * We've got a fiber gigabit interface, and register
214 * 4 is different in fiber mode than in copper mode
215 */
216
217 /* advertise FD only @1000 Mb */
218 phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
219 /* enable PAUSE frames */
220 phy_advreg |= PAR_ASYMPAUSE_FIBER;
221 slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
222
223 if (linkspeed == LINK_AUTOSPEED) {
224 /* reset phy, enable auto-neg */
225 phy_config =
226 (MIICR_REG_PCR |
227 (PCR_RESET | PCR_AUTONEG |
228 PCR_AUTONEG_RST));
229 slic_write32(adapter, SLIC_REG_WPHY,
230 phy_config);
231 } else { /* forced 1000 Mb FD*/
232 /*
233 * power down phy to break link
234 * this may not work)
235 */
236 phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
237 slic_write32(adapter, SLIC_REG_WPHY,
238 phy_config);
239 slic_flush_write(adapter);
240 /*
241 * wait, Marvell says 1 sec,
242 * try to get away with 10 ms
243 */
244 mdelay(10);
245
246 /*
247 * disable auto-neg, set speed/duplex,
248 * soft reset phy, powerup
249 */
250 phy_config =
251 (MIICR_REG_PCR |
252 (PCR_RESET | PCR_SPEED_1000 |
253 PCR_DUPLEX_FULL));
254 slic_write32(adapter, SLIC_REG_WPHY,
255 phy_config);
256 }
257 } else { /* copper gigabit */
258
259 /*
260 * Auto-Negotiate or 1000 Mb must be auto negotiated
261 * We've got a copper gigabit interface, and
262 * register 4 is different in copper mode than
263 * in fiber mode
264 */
265 if (linkspeed == LINK_AUTOSPEED) {
266 /* advertise 10/100 Mb modes */
267 phy_advreg =
268 (MIICR_REG_4 |
269 (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
270 | PAR_ADV10HD));
271 } else {
272 /*
273 * linkspeed == LINK_1000MB -
274 * don't advertise 10/100 Mb modes
275 */
276 phy_advreg = MIICR_REG_4;
277 }
278 /* enable PAUSE frames */
279 phy_advreg |= PAR_ASYMPAUSE;
280 /* required by the Cicada PHY */
281 phy_advreg |= PAR_802_3;
282 slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
283 /* advertise FD only @1000 Mb */
284 phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
285 slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg);
286
287 if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
288 /*
289 * if a Marvell PHY
290 * enable auto crossover
291 */
292 phy_config =
293 (MIICR_REG_16 | (MRV_REG16_XOVERON));
294 slic_write32(adapter, SLIC_REG_WPHY,
295 phy_config);
296
297 /* reset phy, enable auto-neg */
298 phy_config =
299 (MIICR_REG_PCR |
300 (PCR_RESET | PCR_AUTONEG |
301 PCR_AUTONEG_RST));
302 slic_write32(adapter, SLIC_REG_WPHY,
303 phy_config);
304 } else { /* it's a Cicada PHY */
305 /* enable and restart auto-neg (don't reset) */
306 phy_config =
307 (MIICR_REG_PCR |
308 (PCR_AUTONEG | PCR_AUTONEG_RST));
309 slic_write32(adapter, SLIC_REG_WPHY,
310 phy_config);
311 }
312 }
313 } else {
314 /* Forced 10/100 */
315 if (linkspeed == LINK_10MB)
316 speed = 0;
317 else
318 speed = PCR_SPEED_100;
319 if (linkduplex == LINK_HALFD)
320 duplex = 0;
321 else
322 duplex = PCR_DUPLEX_FULL;
323
324 if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
325 /*
326 * if a Marvell PHY
327 * disable auto crossover
328 */
329 phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
330 slic_write32(adapter, SLIC_REG_WPHY, phy_config);
331 }
332
333 /* power down phy to break link (this may not work) */
334 phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
335 slic_write32(adapter, SLIC_REG_WPHY, phy_config);
336 slic_flush_write(adapter);
337 /* wait, Marvell says 1 sec, try to get away with 10 ms */
338 mdelay(10);
339
340 if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
341 /*
342 * if a Marvell PHY
343 * disable auto-neg, set speed,
344 * soft reset phy, powerup
345 */
346 phy_config =
347 (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
348 slic_write32(adapter, SLIC_REG_WPHY, phy_config);
349 } else { /* it's a Cicada PHY */
350 /* disable auto-neg, set speed, powerup */
351 phy_config = (MIICR_REG_PCR | (speed | duplex));
352 slic_write32(adapter, SLIC_REG_WPHY, phy_config);
353 }
354 }
355}
356
357static int slic_card_download_gbrcv(struct adapter *adapter)
358{
359 const struct firmware *fw;
360 const char *file = "";
361 int ret;
362 u32 codeaddr;
363 u32 instruction;
364 int index = 0;
365 u32 rcvucodelen = 0;
366
367 switch (adapter->devid) {
368 case SLIC_2GB_DEVICE_ID:
369 file = "slicoss/oasisrcvucode.sys";
370 break;
371 case SLIC_1GB_DEVICE_ID:
372 file = "slicoss/gbrcvucode.sys";
373 break;
374 default:
375 return -ENOENT;
376 }
377
378 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
379 if (ret) {
380 dev_err(&adapter->pcidev->dev,
381 "Failed to load firmware %s\n", file);
382 return ret;
383 }
384
385 rcvucodelen = *(u32 *)(fw->data + index);
386 index += 4;
387 switch (adapter->devid) {
388 case SLIC_2GB_DEVICE_ID:
389 if (rcvucodelen != OasisRcvUCodeLen) {
390 release_firmware(fw);
391 return -EINVAL;
392 }
393 break;
394 case SLIC_1GB_DEVICE_ID:
395 if (rcvucodelen != GBRcvUCodeLen) {
396 release_firmware(fw);
397 return -EINVAL;
398 }
399 break;
400 }
401 /* start download */
402 slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
403 /* download the rcv sequencer ucode */
404 for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
405 /* write out instruction address */
406 slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr);
407
408 instruction = *(u32 *)(fw->data + index);
409 index += 4;
410 /* write out the instruction data low addr */
411 slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
412
413 instruction = *(u8 *)(fw->data + index);
414 index++;
415 /* write out the instruction data high addr */
416 slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
417 }
418
419 /* download finished */
420 release_firmware(fw);
421 slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
422 slic_flush_write(adapter);
423
424 return 0;
425}
426
427MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
428MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
429
430static int slic_card_download(struct adapter *adapter)
431{
432 const struct firmware *fw;
433 const char *file = "";
434 int ret;
435 u32 section;
436 int thissectionsize;
437 int codeaddr;
438 u32 instruction;
439 u32 baseaddress;
440 u32 i;
441 u32 numsects = 0;
442 u32 sectsize[3];
443 u32 sectstart[3];
444 int ucode_start, index = 0;
445
446 switch (adapter->devid) {
447 case SLIC_2GB_DEVICE_ID:
448 file = "slicoss/oasisdownload.sys";
449 break;
450 case SLIC_1GB_DEVICE_ID:
451 file = "slicoss/gbdownload.sys";
452 break;
453 default:
454 return -ENOENT;
455 }
456 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
457 if (ret) {
458 dev_err(&adapter->pcidev->dev,
459 "Failed to load firmware %s\n", file);
460 return ret;
461 }
462 numsects = *(u32 *)(fw->data + index);
463 index += 4;
464 for (i = 0; i < numsects; i++) {
465 sectsize[i] = *(u32 *)(fw->data + index);
466 index += 4;
467 }
468 for (i = 0; i < numsects; i++) {
469 sectstart[i] = *(u32 *)(fw->data + index);
470 index += 4;
471 }
472 ucode_start = index;
473 instruction = *(u32 *)(fw->data + index);
474 index += 4;
475 for (section = 0; section < numsects; section++) {
476 baseaddress = sectstart[section];
477 thissectionsize = sectsize[section] >> 3;
478
479 for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
480 /* Write out instruction address */
481 slic_write32(adapter, SLIC_REG_WCS,
482 baseaddress + codeaddr);
483 /* Write out instruction to low addr */
484 slic_write32(adapter, SLIC_REG_WCS,
485 instruction);
486 instruction = *(u32 *)(fw->data + index);
487 index += 4;
488
489 /* Write out instruction to high addr */
490 slic_write32(adapter, SLIC_REG_WCS,
491 instruction);
492 instruction = *(u32 *)(fw->data + index);
493 index += 4;
494 }
495 }
496 index = ucode_start;
497 for (section = 0; section < numsects; section++) {
498 instruction = *(u32 *)(fw->data + index);
499 baseaddress = sectstart[section];
500 if (baseaddress < 0x8000)
501 continue;
502 thissectionsize = sectsize[section] >> 3;
503
504 for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
505 /* Write out instruction address */
506 slic_write32(adapter, SLIC_REG_WCS,
507 SLIC_WCS_COMPARE | (baseaddress +
508 codeaddr));
509 /* Write out instruction to low addr */
510 slic_write32(adapter, SLIC_REG_WCS, instruction);
511 instruction = *(u32 *)(fw->data + index);
512 index += 4;
513 /* Write out instruction to high addr */
514 slic_write32(adapter, SLIC_REG_WCS, instruction);
515 instruction = *(u32 *)(fw->data + index);
516 index += 4;
517 }
518 }
519 release_firmware(fw);
520 /* Everything OK, kick off the card */
521 mdelay(10);
522
523 slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START);
524 slic_flush_write(adapter);
525 /*
526 * stall for 20 ms, long enough for ucode to init card
527 * and reach mainloop
528 */
529 mdelay(20);
530
531 return 0;
532}
533
534MODULE_FIRMWARE("slicoss/oasisdownload.sys");
535MODULE_FIRMWARE("slicoss/gbdownload.sys");
536
537static void slic_adapter_set_hwaddr(struct adapter *adapter)
538{
539 struct sliccard *card = adapter->card;
540
541 if ((adapter->card) && (card->config_set)) {
542 memcpy(adapter->macaddr,
543 card->config.MacInfo[adapter->functionnumber].macaddrA,
544 sizeof(struct slic_config_mac));
545 if (is_zero_ether_addr(adapter->currmacaddr))
546 memcpy(adapter->currmacaddr, adapter->macaddr,
547 ETH_ALEN);
548 if (adapter->netdev)
549 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
550 ETH_ALEN);
551 }
552}
553
554static void slic_intagg_set(struct adapter *adapter, u32 value)
555{
556 slic_write32(adapter, SLIC_REG_INTAGG, value);
557 adapter->card->loadlevel_current = value;
558}
559
560static void slic_soft_reset(struct adapter *adapter)
561{
562 if (adapter->card->state == CARD_UP) {
563 slic_write32(adapter, SLIC_REG_QUIESCE, 0);
564 slic_flush_write(adapter);
565 mdelay(1);
566 }
567
568 slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC);
569 slic_flush_write(adapter);
570
571 mdelay(1);
572}
573
574static void slic_mac_address_config(struct adapter *adapter)
575{
576 u32 value;
577 u32 value2;
578
579 value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
580 slic_write32(adapter, SLIC_REG_WRADDRAL, value);
581 slic_write32(adapter, SLIC_REG_WRADDRBL, value);
582
583 value2 = (u32)((adapter->currmacaddr[0] << 8 |
584 adapter->currmacaddr[1]) & 0xFFFF);
585
586 slic_write32(adapter, SLIC_REG_WRADDRAH, value2);
587 slic_write32(adapter, SLIC_REG_WRADDRBH, value2);
588
589 /*
590 * Write our multicast mask out to the card. This is done
591 * here in addition to the slic_mcast_addr_set routine
592 * because ALL_MCAST may have been enabled or disabled
593 */
594 slic_mcast_set_mask(adapter);
595}
596
597static void slic_mac_config(struct adapter *adapter)
598{
599 u32 value;
600
601 /* Setup GMAC gaps */
602 if (adapter->linkspeed == LINK_1000MB) {
603 value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
604 (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
605 (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
606 } else {
607 value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
608 (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
609 (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
610 }
611
612 /* enable GMII */
613 if (adapter->linkspeed == LINK_1000MB)
614 value |= GMCR_GBIT;
615
616 /* enable fullduplex */
617 if ((adapter->linkduplex == LINK_FULLD)
618 || (adapter->macopts & MAC_LOOPBACK)) {
619 value |= GMCR_FULLD;
620 }
621
622 /* write mac config */
623 slic_write32(adapter, SLIC_REG_WMCFG, value);
624
625 /* setup mac addresses */
626 slic_mac_address_config(adapter);
627}
628
629static void slic_config_set(struct adapter *adapter, bool linkchange)
630{
631 u32 value;
632 u32 RcrReset;
633
634 if (linkchange) {
635 /* Setup MAC */
636 slic_mac_config(adapter);
637 RcrReset = GRCR_RESET;
638 } else {
639 slic_mac_address_config(adapter);
640 RcrReset = 0;
641 }
642
643 if (adapter->linkduplex == LINK_FULLD) {
644 /* setup xmtcfg */
645 value = (GXCR_RESET | /* Always reset */
646 GXCR_XMTEN | /* Enable transmit */
647 GXCR_PAUSEEN); /* Enable pause */
648
649 slic_write32(adapter, SLIC_REG_WXCFG, value);
650
651 /* Setup rcvcfg last */
652 value = (RcrReset | /* Reset, if linkchange */
653 GRCR_CTLEN | /* Enable CTL frames */
654 GRCR_ADDRAEN | /* Address A enable */
655 GRCR_RCVBAD | /* Rcv bad frames */
656 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
657 } else {
658 /* setup xmtcfg */
659 value = (GXCR_RESET | /* Always reset */
660 GXCR_XMTEN); /* Enable transmit */
661
662 slic_write32(adapter, SLIC_REG_WXCFG, value);
663
664 /* Setup rcvcfg last */
665 value = (RcrReset | /* Reset, if linkchange */
666 GRCR_ADDRAEN | /* Address A enable */
667 GRCR_RCVBAD | /* Rcv bad frames */
668 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
669 }
670
671 if (adapter->state != ADAPT_DOWN) {
672 /* Only enable receive if we are restarting or running */
673 value |= GRCR_RCVEN;
674 }
675
676 if (adapter->macopts & MAC_PROMISC)
677 value |= GRCR_RCVALL;
678
679 slic_write32(adapter, SLIC_REG_WRCFG, value);
680}
681
682/*
683 * Turn off RCV and XMT, power down PHY
684 */
685static void slic_config_clear(struct adapter *adapter)
686{
687 u32 value;
688 u32 phy_config;
689
690 /* Setup xmtcfg */
691 value = (GXCR_RESET | /* Always reset */
692 GXCR_PAUSEEN); /* Enable pause */
693
694 slic_write32(adapter, SLIC_REG_WXCFG, value);
695
696 value = (GRCR_RESET | /* Always reset */
697 GRCR_CTLEN | /* Enable CTL frames */
698 GRCR_ADDRAEN | /* Address A enable */
699 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
700
701 slic_write32(adapter, SLIC_REG_WRCFG, value);
702
703 /* power down phy */
704 phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
705 slic_write32(adapter, SLIC_REG_WPHY, phy_config);
706}
707
708static bool slic_mac_filter(struct adapter *adapter,
709 struct ether_header *ether_frame)
710{
711 struct net_device *netdev = adapter->netdev;
712 u32 opts = adapter->macopts;
713
714 if (opts & MAC_PROMISC)
715 return true;
716
717 if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
718 if (opts & MAC_BCAST) {
719 adapter->rcv_broadcasts++;
720 return true;
721 }
722
723 return false;
724 }
725
726 if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
727 if (opts & MAC_ALLMCAST) {
728 adapter->rcv_multicasts++;
729 netdev->stats.multicast++;
730 return true;
731 }
732 if (opts & MAC_MCAST) {
733 struct mcast_address *mcaddr = adapter->mcastaddrs;
734
735 while (mcaddr) {
736 if (ether_addr_equal(mcaddr->address,
737 ether_frame->ether_dhost)) {
738 adapter->rcv_multicasts++;
739 netdev->stats.multicast++;
740 return true;
741 }
742 mcaddr = mcaddr->next;
743 }
744
745 return false;
746 }
747
748 return false;
749 }
750 if (opts & MAC_DIRECTED) {
751 adapter->rcv_unicasts++;
752 return true;
753 }
754 return false;
755}
756
757static int slic_mac_set_address(struct net_device *dev, void *ptr)
758{
759 struct adapter *adapter = netdev_priv(dev);
760 struct sockaddr *addr = ptr;
761
762 if (netif_running(dev))
763 return -EBUSY;
764 if (!adapter)
765 return -EBUSY;
766
767 if (!is_valid_ether_addr(addr->sa_data))
768 return -EINVAL;
769
770 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
771 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
772
773 slic_config_set(adapter, true);
774 return 0;
775}
776
777static void slic_timer_load_check(ulong cardaddr)
778{
779 struct sliccard *card = (struct sliccard *)cardaddr;
780 struct adapter *adapter = card->master;
781 u32 load = card->events;
782 u32 level = 0;
783
784 if ((adapter) && (adapter->state == ADAPT_UP) &&
785 (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
786 if (adapter->devid == SLIC_1GB_DEVICE_ID) {
787 if (adapter->linkspeed == LINK_1000MB)
788 level = 100;
789 else {
790 if (load > SLIC_LOAD_5)
791 level = SLIC_INTAGG_5;
792 else if (load > SLIC_LOAD_4)
793 level = SLIC_INTAGG_4;
794 else if (load > SLIC_LOAD_3)
795 level = SLIC_INTAGG_3;
796 else if (load > SLIC_LOAD_2)
797 level = SLIC_INTAGG_2;
798 else if (load > SLIC_LOAD_1)
799 level = SLIC_INTAGG_1;
800 else
801 level = SLIC_INTAGG_0;
802 }
803 if (card->loadlevel_current != level) {
804 card->loadlevel_current = level;
805 slic_write32(adapter, SLIC_REG_INTAGG, level);
806 }
807 } else {
808 if (load > SLIC_LOAD_5)
809 level = SLIC_INTAGG_5;
810 else if (load > SLIC_LOAD_4)
811 level = SLIC_INTAGG_4;
812 else if (load > SLIC_LOAD_3)
813 level = SLIC_INTAGG_3;
814 else if (load > SLIC_LOAD_2)
815 level = SLIC_INTAGG_2;
816 else if (load > SLIC_LOAD_1)
817 level = SLIC_INTAGG_1;
818 else
819 level = SLIC_INTAGG_0;
820 if (card->loadlevel_current != level) {
821 card->loadlevel_current = level;
822 slic_write32(adapter, SLIC_REG_INTAGG, level);
823 }
824 }
825 }
826 card->events = 0;
827 card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
828 add_timer(&card->loadtimer);
829}
830
831static int slic_upr_queue_request(struct adapter *adapter,
832 u32 upr_request,
833 u32 upr_data,
834 u32 upr_data_h,
835 u32 upr_buffer, u32 upr_buffer_h)
836{
837 struct slic_upr *upr;
838 struct slic_upr *uprqueue;
839
840 upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
841 if (!upr)
842 return -ENOMEM;
843
844 upr->adapter = adapter->port;
845 upr->upr_request = upr_request;
846 upr->upr_data = upr_data;
847 upr->upr_buffer = upr_buffer;
848 upr->upr_data_h = upr_data_h;
849 upr->upr_buffer_h = upr_buffer_h;
850 upr->next = NULL;
851 if (adapter->upr_list) {
852 uprqueue = adapter->upr_list;
853
854 while (uprqueue->next)
855 uprqueue = uprqueue->next;
856 uprqueue->next = upr;
857 } else {
858 adapter->upr_list = upr;
859 }
860 return 0;
861}
862
863static void slic_upr_start(struct adapter *adapter)
864{
865 struct slic_upr *upr;
866
867 upr = adapter->upr_list;
868 if (!upr)
869 return;
870 if (adapter->upr_busy)
871 return;
872 adapter->upr_busy = 1;
873
874 switch (upr->upr_request) {
875 case SLIC_UPR_STATS:
876 if (upr->upr_data_h == 0) {
877 slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data);
878 } else {
879 slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data,
880 upr->upr_data_h);
881 }
882 break;
883
884 case SLIC_UPR_RLSR:
885 slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data,
886 upr->upr_data_h);
887 break;
888
889 case SLIC_UPR_RCONFIG:
890 slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data,
891 upr->upr_data_h);
892 break;
893 case SLIC_UPR_PING:
894 slic_write32(adapter, SLIC_REG_PING, 1);
895 break;
896 }
897 slic_flush_write(adapter);
898}
899
900static int slic_upr_request(struct adapter *adapter,
901 u32 upr_request,
902 u32 upr_data,
903 u32 upr_data_h,
904 u32 upr_buffer, u32 upr_buffer_h)
905{
906 unsigned long flags;
907 int rc;
908
909 spin_lock_irqsave(&adapter->upr_lock, flags);
910 rc = slic_upr_queue_request(adapter,
911 upr_request,
912 upr_data,
913 upr_data_h, upr_buffer, upr_buffer_h);
914 if (rc)
915 goto err_unlock_irq;
916
917 slic_upr_start(adapter);
918err_unlock_irq:
919 spin_unlock_irqrestore(&adapter->upr_lock, flags);
920 return rc;
921}
922
923static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
924{
925 struct slic_shmemory *sm = &adapter->shmem;
926 struct slic_shmem_data *sm_data = sm->shmem_data;
927 u32 lst = sm_data->lnkstatus;
928 uint linkup;
929 unsigned char linkspeed;
930 unsigned char linkduplex;
931
932 if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
933 dma_addr_t phaddr = sm->lnkstatus_phaddr;
934
935 slic_upr_queue_request(adapter, SLIC_UPR_RLSR,
936 cpu_to_le32(lower_32_bits(phaddr)),
937 cpu_to_le32(upper_32_bits(phaddr)),
938 0, 0);
939 return;
940 }
941 if (adapter->state != ADAPT_UP)
942 return;
943
944 linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN;
945 if (lst & GIG_SPEED_1000)
946 linkspeed = LINK_1000MB;
947 else if (lst & GIG_SPEED_100)
948 linkspeed = LINK_100MB;
949 else
950 linkspeed = LINK_10MB;
951
952 if (lst & GIG_FULLDUPLEX)
953 linkduplex = LINK_FULLD;
954 else
955 linkduplex = LINK_HALFD;
956
957 if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
958 return;
959
960 /* link up event, but nothing has changed */
961 if ((adapter->linkstate == LINK_UP) &&
962 (linkup == LINK_UP) &&
963 (adapter->linkspeed == linkspeed) &&
964 (adapter->linkduplex == linkduplex))
965 return;
966
967 /* link has changed at this point */
968
969 /* link has gone from up to down */
970 if (linkup == LINK_DOWN) {
971 adapter->linkstate = LINK_DOWN;
972 netif_carrier_off(adapter->netdev);
973 return;
974 }
975
976 /* link has gone from down to up */
977 adapter->linkspeed = linkspeed;
978 adapter->linkduplex = linkduplex;
979
980 if (adapter->linkstate != LINK_UP) {
981 /* setup the mac */
982 slic_config_set(adapter, true);
983 adapter->linkstate = LINK_UP;
984 netif_carrier_on(adapter->netdev);
985 }
986}
987
988static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
989{
990 struct sliccard *card = adapter->card;
991 struct slic_upr *upr;
992 unsigned long flags;
993
994 spin_lock_irqsave(&adapter->upr_lock, flags);
995 upr = adapter->upr_list;
996 if (!upr) {
997 spin_unlock_irqrestore(&adapter->upr_lock, flags);
998 return;
999 }
1000 adapter->upr_list = upr->next;
1001 upr->next = NULL;
1002 adapter->upr_busy = 0;
1003 switch (upr->upr_request) {
1004 case SLIC_UPR_STATS: {
1005 struct slic_shmemory *sm = &adapter->shmem;
1006 struct slic_shmem_data *sm_data = sm->shmem_data;
1007 struct slic_stats *stats = &sm_data->stats;
1008 struct slic_stats *old = &adapter->inicstats_prev;
1009 struct slicnet_stats *stst = &adapter->slic_stats;
1010
1011 if (isr & ISR_UPCERR) {
1012 dev_err(&adapter->netdev->dev,
1013 "SLIC_UPR_STATS command failed isr[%x]\n", isr);
1014 break;
1015 }
1016
1017 UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs,
1018 old->xmit_tcp_segs);
1019
1020 UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes,
1021 old->xmit_tcp_bytes);
1022
1023 UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs,
1024 old->rcv_tcp_segs);
1025
1026 UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes,
1027 old->rcv_tcp_bytes);
1028
1029 UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes,
1030 old->xmit_bytes);
1031
1032 UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts,
1033 old->xmit_unicasts);
1034
1035 UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes,
1036 old->rcv_bytes);
1037
1038 UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts,
1039 old->rcv_unicasts);
1040
1041 UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions,
1042 old->xmit_collisions);
1043
1044 UPDATE_STATS_GB(stst->iface.xmt_errors,
1045 stats->xmit_excess_collisions,
1046 old->xmit_excess_collisions);
1047
1048 UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error,
1049 old->xmit_other_error);
1050
1051 UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error,
1052 old->rcv_other_error);
1053
1054 UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops,
1055 old->rcv_drops);
1056
1057 if (stats->rcv_drops > old->rcv_drops)
1058 adapter->rcv_drops += (stats->rcv_drops -
1059 old->rcv_drops);
1060 memcpy_fromio(old, stats, sizeof(*stats));
1061 break;
1062 }
1063 case SLIC_UPR_RLSR:
1064 slic_link_upr_complete(adapter, isr);
1065 break;
1066 case SLIC_UPR_RCONFIG:
1067 break;
1068 case SLIC_UPR_PING:
1069 card->pingstatus |= (isr & ISR_PINGDSMASK);
1070 break;
1071 }
1072 kfree(upr);
1073 slic_upr_start(adapter);
1074 spin_unlock_irqrestore(&adapter->upr_lock, flags);
1075}
1076
1077static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
1078{
1079 return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h,
1080 0, 0);
1081}
1082
1083/*
1084 * Compute a checksum of the EEPROM according to RFC 1071.
1085 */
1086static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
1087{
1088 u16 *wp = eeprom;
1089 u32 checksum = 0;
1090
1091 while (len > 1) {
1092 checksum += *(wp++);
1093 len -= 2;
1094 }
1095
1096 if (len > 0)
1097 checksum += *(u8 *)wp;
1098
1099 while (checksum >> 16)
1100 checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF);
1101
1102 return ~checksum;
1103}
1104
1105static void slic_rspqueue_free(struct adapter *adapter)
1106{
1107 int i;
1108 struct slic_rspqueue *rspq = &adapter->rspqueue;
1109
1110 for (i = 0; i < rspq->num_pages; i++) {
1111 if (rspq->vaddr[i]) {
1112 pci_free_consistent(adapter->pcidev, PAGE_SIZE,
1113 rspq->vaddr[i], rspq->paddr[i]);
1114 }
1115 rspq->vaddr[i] = NULL;
1116 rspq->paddr[i] = 0;
1117 }
1118 rspq->offset = 0;
1119 rspq->pageindex = 0;
1120 rspq->rspbuf = NULL;
1121}
1122
1123static int slic_rspqueue_init(struct adapter *adapter)
1124{
1125 int i;
1126 struct slic_rspqueue *rspq = &adapter->rspqueue;
1127 u32 paddrh = 0;
1128
1129 memset(rspq, 0, sizeof(struct slic_rspqueue));
1130
1131 rspq->num_pages = SLIC_RSPQ_PAGES_GB;
1132
1133 for (i = 0; i < rspq->num_pages; i++) {
1134 rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
1135 PAGE_SIZE,
1136 &rspq->paddr[i]);
1137 if (!rspq->vaddr[i]) {
1138 dev_err(&adapter->pcidev->dev,
1139 "pci_alloc_consistent failed\n");
1140 slic_rspqueue_free(adapter);
1141 return -ENOMEM;
1142 }
1143
1144 if (paddrh == 0) {
1145 slic_write32(adapter, SLIC_REG_RBAR,
1146 rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE);
1147 } else {
1148 slic_write64(adapter, SLIC_REG_RBAR64,
1149 rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE,
1150 paddrh);
1151 }
1152 }
1153 rspq->offset = 0;
1154 rspq->pageindex = 0;
1155 rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
1156 return 0;
1157}
1158
1159static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
1160{
1161 struct slic_rspqueue *rspq = &adapter->rspqueue;
1162 struct slic_rspbuf *buf;
1163
1164 if (!(rspq->rspbuf->status))
1165 return NULL;
1166
1167 buf = rspq->rspbuf;
1168 if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
1169 rspq->rspbuf++;
1170 } else {
1171 slic_write64(adapter, SLIC_REG_RBAR64,
1172 rspq->paddr[rspq->pageindex] |
1173 SLIC_RSPQ_BUFSINPAGE, 0);
1174 rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
1175 rspq->offset = 0;
1176 rspq->rspbuf = (struct slic_rspbuf *)
1177 rspq->vaddr[rspq->pageindex];
1178 }
1179
1180 return buf;
1181}
1182
1183static void slic_cmdqmem_free(struct adapter *adapter)
1184{
1185 struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
1186 int i;
1187
1188 for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
1189 if (cmdqmem->pages[i]) {
1190 pci_free_consistent(adapter->pcidev,
1191 PAGE_SIZE,
1192 (void *)cmdqmem->pages[i],
1193 cmdqmem->dma_pages[i]);
1194 }
1195 }
1196 memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
1197}
1198
1199static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
1200{
1201 struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
1202 u32 *pageaddr;
1203
1204 if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
1205 return NULL;
1206 pageaddr = pci_alloc_consistent(adapter->pcidev,
1207 PAGE_SIZE,
1208 &cmdqmem->dma_pages[cmdqmem->pagecnt]);
1209 if (!pageaddr)
1210 return NULL;
1211
1212 cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
1213 cmdqmem->pagecnt++;
1214 return pageaddr;
1215}
1216
1217static void slic_cmdq_free(struct adapter *adapter)
1218{
1219 struct slic_hostcmd *cmd;
1220
1221 cmd = adapter->cmdq_all.head;
1222 while (cmd) {
1223 if (cmd->busy) {
1224 struct sk_buff *tempskb;
1225
1226 tempskb = cmd->skb;
1227 if (tempskb) {
1228 cmd->skb = NULL;
1229 dev_kfree_skb_irq(tempskb);
1230 }
1231 }
1232 cmd = cmd->next_all;
1233 }
1234 memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
1235 memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
1236 memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
1237 slic_cmdqmem_free(adapter);
1238}
1239
1240static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
1241{
1242 struct slic_hostcmd *cmd;
1243 struct slic_hostcmd *prev;
1244 struct slic_hostcmd *tail;
1245 struct slic_cmdqueue *cmdq;
1246 int cmdcnt;
1247 void *cmdaddr;
1248 ulong phys_addr;
1249 u32 phys_addrl;
1250 u32 phys_addrh;
1251 struct slic_handle *pslic_handle;
1252 unsigned long flags;
1253
1254 cmdaddr = page;
1255 cmd = cmdaddr;
1256 cmdcnt = 0;
1257
1258 phys_addr = virt_to_bus((void *)page);
1259 phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
1260 phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
1261
1262 prev = NULL;
1263 tail = cmd;
1264 while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
1265 (adapter->slic_handle_ix < 256)) {
1266 /* Allocate and initialize a SLIC_HANDLE for this command */
1267 spin_lock_irqsave(&adapter->handle_lock, flags);
1268 pslic_handle = adapter->pfree_slic_handles;
1269 adapter->pfree_slic_handles = pslic_handle->next;
1270 spin_unlock_irqrestore(&adapter->handle_lock, flags);
1271 pslic_handle->type = SLIC_HANDLE_CMD;
1272 pslic_handle->address = (void *)cmd;
1273 pslic_handle->offset = (ushort)adapter->slic_handle_ix++;
1274 pslic_handle->other_handle = NULL;
1275 pslic_handle->next = NULL;
1276
1277 cmd->pslic_handle = pslic_handle;
1278 cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
1279 cmd->busy = false;
1280 cmd->paddrl = phys_addrl;
1281 cmd->paddrh = phys_addrh;
1282 cmd->next_all = prev;
1283 cmd->next = prev;
1284 prev = cmd;
1285 phys_addrl += SLIC_HOSTCMD_SIZE;
1286 cmdaddr += SLIC_HOSTCMD_SIZE;
1287
1288 cmd = cmdaddr;
1289 cmdcnt++;
1290 }
1291
1292 cmdq = &adapter->cmdq_all;
1293 cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
1294 tail->next_all = cmdq->head;
1295 cmdq->head = prev;
1296 cmdq = &adapter->cmdq_free;
1297 spin_lock_irqsave(&cmdq->lock, flags);
1298 cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
1299 tail->next = cmdq->head;
1300 cmdq->head = prev;
1301 spin_unlock_irqrestore(&cmdq->lock, flags);
1302}
1303
1304static int slic_cmdq_init(struct adapter *adapter)
1305{
1306 int i;
1307 u32 *pageaddr;
1308
1309 memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
1310 memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
1311 memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
1312 spin_lock_init(&adapter->cmdq_all.lock);
1313 spin_lock_init(&adapter->cmdq_free.lock);
1314 spin_lock_init(&adapter->cmdq_done.lock);
1315 memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem));
1316 adapter->slic_handle_ix = 1;
1317 for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
1318 pageaddr = slic_cmdqmem_addpage(adapter);
1319 if (!pageaddr) {
1320 slic_cmdq_free(adapter);
1321 return -ENOMEM;
1322 }
1323 slic_cmdq_addcmdpage(adapter, pageaddr);
1324 }
1325 adapter->slic_handle_ix = 1;
1326
1327 return 0;
1328}
1329
1330static void slic_cmdq_reset(struct adapter *adapter)
1331{
1332 struct slic_hostcmd *hcmd;
1333 struct sk_buff *skb;
1334 u32 outstanding;
1335 unsigned long flags;
1336
1337 spin_lock_irqsave(&adapter->cmdq_free.lock, flags);
1338 spin_lock(&adapter->cmdq_done.lock);
1339 outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
1340 outstanding -= adapter->cmdq_free.count;
1341 hcmd = adapter->cmdq_all.head;
1342 while (hcmd) {
1343 if (hcmd->busy) {
1344 skb = hcmd->skb;
1345 hcmd->busy = 0;
1346 hcmd->skb = NULL;
1347 dev_kfree_skb_irq(skb);
1348 }
1349 hcmd = hcmd->next_all;
1350 }
1351 adapter->cmdq_free.count = 0;
1352 adapter->cmdq_free.head = NULL;
1353 adapter->cmdq_free.tail = NULL;
1354 adapter->cmdq_done.count = 0;
1355 adapter->cmdq_done.head = NULL;
1356 adapter->cmdq_done.tail = NULL;
1357 adapter->cmdq_free.head = adapter->cmdq_all.head;
1358 hcmd = adapter->cmdq_all.head;
1359 while (hcmd) {
1360 adapter->cmdq_free.count++;
1361 hcmd->next = hcmd->next_all;
1362 hcmd = hcmd->next_all;
1363 }
1364 if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
1365 dev_err(&adapter->netdev->dev,
1366 "free_count %d != all count %d\n",
1367 adapter->cmdq_free.count, adapter->cmdq_all.count);
1368 }
1369 spin_unlock(&adapter->cmdq_done.lock);
1370 spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags);
1371}
1372
1373static void slic_cmdq_getdone(struct adapter *adapter)
1374{
1375 struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
1376 struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
1377 unsigned long flags;
1378
1379 spin_lock_irqsave(&done_cmdq->lock, flags);
1380
1381 free_cmdq->head = done_cmdq->head;
1382 free_cmdq->count = done_cmdq->count;
1383 done_cmdq->head = NULL;
1384 done_cmdq->tail = NULL;
1385 done_cmdq->count = 0;
1386 spin_unlock_irqrestore(&done_cmdq->lock, flags);
1387}
1388
1389static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
1390{
1391 struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
1392 struct slic_hostcmd *cmd = NULL;
1393 unsigned long flags;
1394
1395lock_and_retry:
1396 spin_lock_irqsave(&cmdq->lock, flags);
1397retry:
1398 cmd = cmdq->head;
1399 if (cmd) {
1400 cmdq->head = cmd->next;
1401 cmdq->count--;
1402 spin_unlock_irqrestore(&cmdq->lock, flags);
1403 } else {
1404 slic_cmdq_getdone(adapter);
1405 cmd = cmdq->head;
1406 if (cmd) {
1407 goto retry;
1408 } else {
1409 u32 *pageaddr;
1410
1411 spin_unlock_irqrestore(&cmdq->lock, flags);
1412 pageaddr = slic_cmdqmem_addpage(adapter);
1413 if (pageaddr) {
1414 slic_cmdq_addcmdpage(adapter, pageaddr);
1415 goto lock_and_retry;
1416 }
1417 }
1418 }
1419 return cmd;
1420}
1421
1422static void slic_cmdq_putdone_irq(struct adapter *adapter,
1423 struct slic_hostcmd *cmd)
1424{
1425 struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
1426
1427 spin_lock(&cmdq->lock);
1428 cmd->busy = 0;
1429 cmd->next = cmdq->head;
1430 cmdq->head = cmd;
1431 cmdq->count++;
1432 if ((adapter->xmitq_full) && (cmdq->count > 10))
1433 netif_wake_queue(adapter->netdev);
1434 spin_unlock(&cmdq->lock);
1435}
1436
1437static int slic_rcvqueue_fill(struct adapter *adapter)
1438{
1439 void *paddr;
1440 u32 paddrl;
1441 u32 paddrh;
1442 struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
1443 int i = 0;
1444 struct device *dev = &adapter->netdev->dev;
1445
1446 while (i < SLIC_RCVQ_FILLENTRIES) {
1447 struct slic_rcvbuf *rcvbuf;
1448 struct sk_buff *skb;
1449#ifdef KLUDGE_FOR_4GB_BOUNDARY
1450retry_rcvqfill:
1451#endif
1452 skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
1453 if (skb) {
1454 paddr = (void *)(unsigned long)
1455 pci_map_single(adapter->pcidev,
1456 skb->data,
1457 SLIC_RCVQ_RCVBUFSIZE,
1458 PCI_DMA_FROMDEVICE);
1459 paddrl = SLIC_GET_ADDR_LOW(paddr);
1460 paddrh = SLIC_GET_ADDR_HIGH(paddr);
1461
1462 skb->len = SLIC_RCVBUF_HEADSIZE;
1463 rcvbuf = (struct slic_rcvbuf *)skb->head;
1464 rcvbuf->status = 0;
1465 skb->next = NULL;
1466#ifdef KLUDGE_FOR_4GB_BOUNDARY
1467 if (paddrl == 0) {
1468 dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
1469 __func__);
1470 dev_err(dev, "skb[%p] PROBLEM\n", skb);
1471 dev_err(dev, " skbdata[%p]\n",
1472 skb->data);
1473 dev_err(dev, " skblen[%x]\n", skb->len);
1474 dev_err(dev, " paddr[%p]\n", paddr);
1475 dev_err(dev, " paddrl[%x]\n", paddrl);
1476 dev_err(dev, " paddrh[%x]\n", paddrh);
1477 dev_err(dev, " rcvq->head[%p]\n",
1478 rcvq->head);
1479 dev_err(dev, " rcvq->tail[%p]\n",
1480 rcvq->tail);
1481 dev_err(dev, " rcvq->count[%x]\n",
1482 rcvq->count);
1483 dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
1484 goto retry_rcvqfill;
1485 }
1486#else
1487 if (paddrl == 0) {
1488 dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
1489 __func__);
1490 dev_err(dev, "skb[%p] PROBLEM\n", skb);
1491 dev_err(dev, " skbdata[%p]\n",
1492 skb->data);
1493 dev_err(dev, " skblen[%x]\n", skb->len);
1494 dev_err(dev, " paddr[%p]\n", paddr);
1495 dev_err(dev, " paddrl[%x]\n", paddrl);
1496 dev_err(dev, " paddrh[%x]\n", paddrh);
1497 dev_err(dev, " rcvq->head[%p]\n",
1498 rcvq->head);
1499 dev_err(dev, " rcvq->tail[%p]\n",
1500 rcvq->tail);
1501 dev_err(dev, " rcvq->count[%x]\n",
1502 rcvq->count);
1503 dev_err(dev, "GIVE TO CARD ANYWAY\n");
1504 }
1505#endif
1506 if (paddrh == 0) {
1507 slic_write32(adapter, SLIC_REG_HBAR,
1508 (u32)paddrl);
1509 } else {
1510 slic_write64(adapter, SLIC_REG_HBAR64, paddrl,
1511 paddrh);
1512 }
1513 if (rcvq->head)
1514 rcvq->tail->next = skb;
1515 else
1516 rcvq->head = skb;
1517 rcvq->tail = skb;
1518 rcvq->count++;
1519 i++;
1520 } else {
1521 dev_err(&adapter->netdev->dev,
1522 "slic_rcvqueue_fill could only get [%d] skbuffs\n",
1523 i);
1524 break;
1525 }
1526 }
1527 return i;
1528}
1529
1530static void slic_rcvqueue_free(struct adapter *adapter)
1531{
1532 struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
1533 struct sk_buff *skb;
1534
1535 while (rcvq->head) {
1536 skb = rcvq->head;
1537 rcvq->head = rcvq->head->next;
1538 dev_kfree_skb(skb);
1539 }
1540 rcvq->tail = NULL;
1541 rcvq->head = NULL;
1542 rcvq->count = 0;
1543}
1544
1545static int slic_rcvqueue_init(struct adapter *adapter)
1546{
1547 int i, count;
1548 struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
1549
1550 rcvq->tail = NULL;
1551 rcvq->head = NULL;
1552 rcvq->size = SLIC_RCVQ_ENTRIES;
1553 rcvq->errors = 0;
1554 rcvq->count = 0;
1555 i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
1556 count = 0;
1557 while (i) {
1558 count += slic_rcvqueue_fill(adapter);
1559 i--;
1560 }
1561 if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
1562 slic_rcvqueue_free(adapter);
1563 return -ENOMEM;
1564 }
1565 return 0;
1566}
1567
1568static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
1569{
1570 struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
1571 struct sk_buff *skb;
1572 struct slic_rcvbuf *rcvbuf;
1573 int count;
1574
1575 if (rcvq->count) {
1576 skb = rcvq->head;
1577 rcvbuf = (struct slic_rcvbuf *)skb->head;
1578
1579 if (rcvbuf->status & IRHDDR_SVALID) {
1580 rcvq->head = rcvq->head->next;
1581 skb->next = NULL;
1582 rcvq->count--;
1583 } else {
1584 skb = NULL;
1585 }
1586 } else {
1587 dev_err(&adapter->netdev->dev,
1588 "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
1589 skb = NULL;
1590 }
1591 while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
1592 count = slic_rcvqueue_fill(adapter);
1593 if (!count)
1594 break;
1595 }
1596 if (skb)
1597 rcvq->errors = 0;
1598 return skb;
1599}
1600
1601static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
1602{
1603 struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
1604 void *paddr;
1605 u32 paddrl;
1606 u32 paddrh;
1607 struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
1608 struct device *dev;
1609
1610 paddr = (void *)(unsigned long)
1611 pci_map_single(adapter->pcidev, skb->head,
1612 SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
1613 rcvbuf->status = 0;
1614 skb->next = NULL;
1615
1616 paddrl = SLIC_GET_ADDR_LOW(paddr);
1617 paddrh = SLIC_GET_ADDR_HIGH(paddr);
1618
1619 if (paddrl == 0) {
1620 dev = &adapter->netdev->dev;
1621 dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
1622 __func__);
1623 dev_err(dev, "skb[%p] PROBLEM\n", skb);
1624 dev_err(dev, " skbdata[%p]\n", skb->data);
1625 dev_err(dev, " skblen[%x]\n", skb->len);
1626 dev_err(dev, " paddr[%p]\n", paddr);
1627 dev_err(dev, " paddrl[%x]\n", paddrl);
1628 dev_err(dev, " paddrh[%x]\n", paddrh);
1629 dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
1630 dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
1631 dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
1632 }
1633 if (paddrh == 0)
1634 slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl);
1635 else
1636 slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh);
1637 if (rcvq->head)
1638 rcvq->tail->next = skb;
1639 else
1640 rcvq->head = skb;
1641 rcvq->tail = skb;
1642 rcvq->count++;
1643 return rcvq->count;
1644}
1645
1646/*
1647 * slic_link_event_handler -
1648 *
1649 * Initiate a link configuration sequence. The link configuration begins
1650 * by issuing a READ_LINK_STATUS command to the Utility Processor on the
1651 * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
1652 * routine will follow it up witha UP configuration write command, which
1653 * will also complete asynchronously.
1654 *
1655 */
1656static int slic_link_event_handler(struct adapter *adapter)
1657{
1658 int status;
1659 struct slic_shmemory *sm = &adapter->shmem;
1660 dma_addr_t phaddr = sm->lnkstatus_phaddr;
1661
1662 if (adapter->state != ADAPT_UP) {
1663 /* Adapter is not operational. Ignore. */
1664 return -ENODEV;
1665 }
1666 /* no 4GB wrap guaranteed */
1667 status = slic_upr_request(adapter, SLIC_UPR_RLSR,
1668 cpu_to_le32(lower_32_bits(phaddr)),
1669 cpu_to_le32(upper_32_bits(phaddr)), 0, 0);
1670 return status;
1671}
1672
1673static void slic_init_cleanup(struct adapter *adapter)
1674{
1675 if (adapter->intrregistered) {
1676 adapter->intrregistered = 0;
1677 free_irq(adapter->netdev->irq, adapter->netdev);
1678 }
1679
1680 if (adapter->shmem.shmem_data) {
1681 struct slic_shmemory *sm = &adapter->shmem;
1682 struct slic_shmem_data *sm_data = sm->shmem_data;
1683
1684 pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data,
1685 sm->isr_phaddr);
1686 }
1687
1688 if (adapter->pingtimerset) {
1689 adapter->pingtimerset = 0;
1690 del_timer(&adapter->pingtimer);
1691 }
1692
1693 slic_rspqueue_free(adapter);
1694 slic_cmdq_free(adapter);
1695 slic_rcvqueue_free(adapter);
1696}
1697
1698/*
1699 * Allocate a mcast_address structure to hold the multicast address.
1700 * Link it in.
1701 */
1702static int slic_mcast_add_list(struct adapter *adapter, char *address)
1703{
1704 struct mcast_address *mcaddr, *mlist;
1705
1706 /* Check to see if it already exists */
1707 mlist = adapter->mcastaddrs;
1708 while (mlist) {
1709 if (ether_addr_equal(mlist->address, address))
1710 return 0;
1711 mlist = mlist->next;
1712 }
1713
1714 /* Doesn't already exist. Allocate a structure to hold it */
1715 mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC);
1716 if (!mcaddr)
1717 return 1;
1718
1719 ether_addr_copy(mcaddr->address, address);
1720
1721 mcaddr->next = adapter->mcastaddrs;
1722 adapter->mcastaddrs = mcaddr;
1723
1724 return 0;
1725}
1726
1727static void slic_mcast_set_list(struct net_device *dev)
1728{
1729 struct adapter *adapter = netdev_priv(dev);
1730 int status = 0;
1731 char *addresses;
1732 struct netdev_hw_addr *ha;
1733
1734 netdev_for_each_mc_addr(ha, dev) {
1735 addresses = (char *)&ha->addr;
1736 status = slic_mcast_add_list(adapter, addresses);
1737 if (status != 0)
1738 break;
1739 slic_mcast_set_bit(adapter, addresses);
1740 }
1741
1742 if (adapter->devflags_prev != dev->flags) {
1743 adapter->macopts = MAC_DIRECTED;
1744 if (dev->flags) {
1745 if (dev->flags & IFF_BROADCAST)
1746 adapter->macopts |= MAC_BCAST;
1747 if (dev->flags & IFF_PROMISC)
1748 adapter->macopts |= MAC_PROMISC;
1749 if (dev->flags & IFF_ALLMULTI)
1750 adapter->macopts |= MAC_ALLMCAST;
1751 if (dev->flags & IFF_MULTICAST)
1752 adapter->macopts |= MAC_MCAST;
1753 }
1754 adapter->devflags_prev = dev->flags;
1755 slic_config_set(adapter, true);
1756 } else {
1757 if (status == 0)
1758 slic_mcast_set_mask(adapter);
1759 }
1760}
1761
1762#define XMIT_FAIL_LINK_STATE 1
1763#define XMIT_FAIL_ZERO_LENGTH 2
1764#define XMIT_FAIL_HOSTCMD_FAIL 3
1765
1766static void slic_xmit_build_request(struct adapter *adapter,
1767 struct slic_hostcmd *hcmd, struct sk_buff *skb)
1768{
1769 struct slic_host64_cmd *ihcmd;
1770 ulong phys_addr;
1771
1772 ihcmd = &hcmd->cmd64;
1773
1774 ihcmd->flags = adapter->port << IHFLG_IFSHFT;
1775 ihcmd->command = IHCMD_XMT_REQ;
1776 ihcmd->u.slic_buffers.totlen = skb->len;
1777 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
1778 PCI_DMA_TODEVICE);
1779 if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
1780 kfree_skb(skb);
1781 dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
1782 return;
1783 }
1784 ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
1785 ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
1786 ihcmd->u.slic_buffers.bufs[0].length = skb->len;
1787#if BITS_PER_LONG == 64
1788 hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] -
1789 (u64)hcmd) + 31) >> 5);
1790#else
1791 hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] -
1792 (u32)hcmd) + 31) >> 5;
1793#endif
1794}
1795
1796static void slic_xmit_fail(struct adapter *adapter,
1797 struct sk_buff *skb,
1798 void *cmd, u32 skbtype, u32 status)
1799{
1800 if (adapter->xmitq_full)
1801 netif_stop_queue(adapter->netdev);
1802 if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
1803 switch (status) {
1804 case XMIT_FAIL_LINK_STATE:
1805 dev_err(&adapter->netdev->dev,
1806 "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
1807 skb, skb->pkt_type,
1808 SLIC_LINKSTATE(adapter->linkstate),
1809 SLIC_ADAPTER_STATE(adapter->state),
1810 adapter->state,
1811 SLIC_CARD_STATE(adapter->card->state),
1812 adapter->card->state);
1813 break;
1814 case XMIT_FAIL_ZERO_LENGTH:
1815 dev_err(&adapter->netdev->dev,
1816 "xmit_start skb->len == 0 skb[%p] type[%x]\n",
1817 skb, skb->pkt_type);
1818 break;
1819 case XMIT_FAIL_HOSTCMD_FAIL:
1820 dev_err(&adapter->netdev->dev,
1821 "xmit_start skb[%p] type[%x] No host commands available\n",
1822 skb, skb->pkt_type);
1823 break;
1824 }
1825 }
1826 dev_kfree_skb(skb);
1827 adapter->netdev->stats.tx_dropped++;
1828}
1829
1830static void slic_rcv_handle_error(struct adapter *adapter,
1831 struct slic_rcvbuf *rcvbuf)
1832{
1833 struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
1834 struct net_device *netdev = adapter->netdev;
1835
1836 if (adapter->devid != SLIC_1GB_DEVICE_ID) {
1837 if (hdr->frame_status14 & VRHSTAT_802OE)
1838 adapter->if_events.oflow802++;
1839 if (hdr->frame_status14 & VRHSTAT_TPOFLO)
1840 adapter->if_events.Tprtoflow++;
1841 if (hdr->frame_status_b14 & VRHSTATB_802UE)
1842 adapter->if_events.uflow802++;
1843 if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
1844 adapter->if_events.rcvearly++;
1845 netdev->stats.rx_fifo_errors++;
1846 }
1847 if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
1848 adapter->if_events.Bufov++;
1849 netdev->stats.rx_over_errors++;
1850 }
1851 if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
1852 adapter->if_events.Carre++;
1853 netdev->stats.tx_carrier_errors++;
1854 }
1855 if (hdr->frame_status_b14 & VRHSTATB_LONGE)
1856 adapter->if_events.Longe++;
1857 if (hdr->frame_status_b14 & VRHSTATB_PREA)
1858 adapter->if_events.Invp++;
1859 if (hdr->frame_status_b14 & VRHSTATB_CRC) {
1860 adapter->if_events.Crc++;
1861 netdev->stats.rx_crc_errors++;
1862 }
1863 if (hdr->frame_status_b14 & VRHSTATB_DRBL)
1864 adapter->if_events.Drbl++;
1865 if (hdr->frame_status_b14 & VRHSTATB_CODE)
1866 adapter->if_events.Code++;
1867 if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
1868 adapter->if_events.TpCsum++;
1869 if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
1870 adapter->if_events.TpHlen++;
1871 if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
1872 adapter->if_events.IpCsum++;
1873 if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
1874 adapter->if_events.IpLen++;
1875 if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
1876 adapter->if_events.IpHlen++;
1877 } else {
1878 if (hdr->frame_statusGB & VGBSTAT_XPERR) {
1879 u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
1880
1881 if (xerr == VGBSTAT_XCSERR)
1882 adapter->if_events.TpCsum++;
1883 if (xerr == VGBSTAT_XUFLOW)
1884 adapter->if_events.Tprtoflow++;
1885 if (xerr == VGBSTAT_XHLEN)
1886 adapter->if_events.TpHlen++;
1887 }
1888 if (hdr->frame_statusGB & VGBSTAT_NETERR) {
1889 u32 nerr =
1890 (hdr->
1891 frame_statusGB >> VGBSTAT_NERRSHFT) &
1892 VGBSTAT_NERRMSK;
1893 if (nerr == VGBSTAT_NCSERR)
1894 adapter->if_events.IpCsum++;
1895 if (nerr == VGBSTAT_NUFLOW)
1896 adapter->if_events.IpLen++;
1897 if (nerr == VGBSTAT_NHLEN)
1898 adapter->if_events.IpHlen++;
1899 }
1900 if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
1901 u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
1902
1903 if (lerr == VGBSTAT_LDEARLY)
1904 adapter->if_events.rcvearly++;
1905 if (lerr == VGBSTAT_LBOFLO)
1906 adapter->if_events.Bufov++;
1907 if (lerr == VGBSTAT_LCODERR)
1908 adapter->if_events.Code++;
1909 if (lerr == VGBSTAT_LDBLNBL)
1910 adapter->if_events.Drbl++;
1911 if (lerr == VGBSTAT_LCRCERR)
1912 adapter->if_events.Crc++;
1913 if (lerr == VGBSTAT_LOFLO)
1914 adapter->if_events.oflow802++;
1915 if (lerr == VGBSTAT_LUFLO)
1916 adapter->if_events.uflow802++;
1917 }
1918 }
1919}
1920
1921#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
1922#define M_FAST_PATH 0x0040
1923
1924static void slic_rcv_handler(struct adapter *adapter)
1925{
1926 struct net_device *netdev = adapter->netdev;
1927 struct sk_buff *skb;
1928 struct slic_rcvbuf *rcvbuf;
1929 u32 frames = 0;
1930
1931 while ((skb = slic_rcvqueue_getnext(adapter))) {
1932 u32 rx_bytes;
1933
1934 rcvbuf = (struct slic_rcvbuf *)skb->head;
1935 adapter->card->events++;
1936 if (rcvbuf->status & IRHDDR_ERR) {
1937 adapter->rx_errors++;
1938 slic_rcv_handle_error(adapter, rcvbuf);
1939 slic_rcvqueue_reinsert(adapter, skb);
1940 continue;
1941 }
1942
1943 if (!slic_mac_filter(adapter, (struct ether_header *)
1944 rcvbuf->data)) {
1945 slic_rcvqueue_reinsert(adapter, skb);
1946 continue;
1947 }
1948 skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
1949 rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
1950 skb_put(skb, rx_bytes);
1951 netdev->stats.rx_packets++;
1952 netdev->stats.rx_bytes += rx_bytes;
1953#if SLIC_OFFLOAD_IP_CHECKSUM
1954 skb->ip_summed = CHECKSUM_UNNECESSARY;
1955#endif
1956
1957 skb->dev = adapter->netdev;
1958 skb->protocol = eth_type_trans(skb, skb->dev);
1959 netif_rx(skb);
1960
1961 ++frames;
1962#if SLIC_INTERRUPT_PROCESS_LIMIT
1963 if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
1964 adapter->rcv_interrupt_yields++;
1965 break;
1966 }
1967#endif
1968 }
1969 adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
1970}
1971
1972static void slic_xmit_complete(struct adapter *adapter)
1973{
1974 struct slic_hostcmd *hcmd;
1975 struct slic_rspbuf *rspbuf;
1976 u32 frames = 0;
1977 struct slic_handle_word slic_handle_word;
1978
1979 do {
1980 rspbuf = slic_rspqueue_getnext(adapter);
1981 if (!rspbuf)
1982 break;
1983 adapter->xmit_completes++;
1984 adapter->card->events++;
1985 /*
1986 * Get the complete host command buffer
1987 */
1988 slic_handle_word.handle_token = rspbuf->hosthandle;
1989 hcmd =
1990 adapter->slic_handles[slic_handle_word.handle_index].
1991 address;
1992/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
1993 if (hcmd->type == SLIC_CMD_DUMB) {
1994 if (hcmd->skb)
1995 dev_kfree_skb_irq(hcmd->skb);
1996 slic_cmdq_putdone_irq(adapter, hcmd);
1997 }
1998 rspbuf->status = 0;
1999 rspbuf->hosthandle = 0;
2000 frames++;
2001 } while (1);
2002 adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
2003}
2004
2005static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
2006 struct net_device *dev)
2007{
2008 if (isr & ~ISR_IO) {
2009 if (isr & ISR_ERR) {
2010 adapter->error_interrupts++;
2011 if (isr & ISR_RMISS) {
2012 int count;
2013 int pre_count;
2014 int errors;
2015
2016 struct slic_rcvqueue *rcvq =
2017 &adapter->rcvqueue;
2018
2019 adapter->error_rmiss_interrupts++;
2020
2021 if (!rcvq->errors)
2022 rcv_count = rcvq->count;
2023 pre_count = rcvq->count;
2024 errors = rcvq->errors;
2025
2026 while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
2027 count = slic_rcvqueue_fill(adapter);
2028 if (!count)
2029 break;
2030 }
2031 } else if (isr & ISR_XDROP) {
2032 dev_err(&dev->dev,
2033 "isr & ISR_ERR [%x] ISR_XDROP\n",
2034 isr);
2035 } else {
2036 dev_err(&dev->dev,
2037 "isr & ISR_ERR [%x]\n",
2038 isr);
2039 }
2040 }
2041
2042 if (isr & ISR_LEVENT) {
2043 adapter->linkevent_interrupts++;
2044 if (slic_link_event_handler(adapter))
2045 adapter->linkevent_interrupts--;
2046 }
2047
2048 if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
2049 (isr & ISR_UPCBSY)) {
2050 adapter->upr_interrupts++;
2051 slic_upr_request_complete(adapter, isr);
2052 }
2053 }
2054
2055 if (isr & ISR_RCV) {
2056 adapter->rcv_interrupts++;
2057 slic_rcv_handler(adapter);
2058 }
2059
2060 if (isr & ISR_CMD) {
2061 adapter->xmit_interrupts++;
2062 slic_xmit_complete(adapter);
2063 }
2064}
2065
2066static irqreturn_t slic_interrupt(int irq, void *dev_id)
2067{
2068 struct net_device *dev = dev_id;
2069 struct adapter *adapter = netdev_priv(dev);
2070 struct slic_shmemory *sm = &adapter->shmem;
2071 struct slic_shmem_data *sm_data = sm->shmem_data;
2072 u32 isr;
2073
2074 if (sm_data->isr) {
2075 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK);
2076 slic_flush_write(adapter);
2077
2078 isr = sm_data->isr;
2079 sm_data->isr = 0;
2080 adapter->num_isrs++;
2081 switch (adapter->card->state) {
2082 case CARD_UP:
2083 slic_interrupt_card_up(isr, adapter, dev);
2084 break;
2085
2086 case CARD_DOWN:
2087 if ((isr & ISR_UPC) ||
2088 (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
2089 adapter->upr_interrupts++;
2090 slic_upr_request_complete(adapter, isr);
2091 }
2092 break;
2093 }
2094
2095 adapter->all_reg_writes += 2;
2096 adapter->isr_reg_writes++;
2097 slic_write32(adapter, SLIC_REG_ISR, 0);
2098 } else {
2099 adapter->false_interrupts++;
2100 }
2101 return IRQ_HANDLED;
2102}
2103
2104#define NORMAL_ETHFRAME 0
2105
2106static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
2107{
2108 struct sliccard *card;
2109 struct adapter *adapter = netdev_priv(dev);
2110 struct slic_hostcmd *hcmd = NULL;
2111 u32 status = 0;
2112 void *offloadcmd = NULL;
2113
2114 card = adapter->card;
2115 if ((adapter->linkstate != LINK_UP) ||
2116 (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
2117 status = XMIT_FAIL_LINK_STATE;
2118 goto xmit_fail;
2119
2120 } else if (skb->len == 0) {
2121 status = XMIT_FAIL_ZERO_LENGTH;
2122 goto xmit_fail;
2123 }
2124
2125 hcmd = slic_cmdq_getfree(adapter);
2126 if (!hcmd) {
2127 adapter->xmitq_full = 1;
2128 status = XMIT_FAIL_HOSTCMD_FAIL;
2129 goto xmit_fail;
2130 }
2131 hcmd->skb = skb;
2132 hcmd->busy = 1;
2133 hcmd->type = SLIC_CMD_DUMB;
2134 slic_xmit_build_request(adapter, hcmd, skb);
2135 dev->stats.tx_packets++;
2136 dev->stats.tx_bytes += skb->len;
2137
2138#ifdef DEBUG_DUMP
2139 if (adapter->kill_card) {
2140 struct slic_host64_cmd ihcmd;
2141
2142 ihcmd = &hcmd->cmd64;
2143
2144 ihcmd->flags |= 0x40;
2145 adapter->kill_card = 0; /* only do this once */
2146 }
2147#endif
2148 if (hcmd->paddrh == 0) {
2149 slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl |
2150 hcmd->cmdsize));
2151 } else {
2152 slic_write64(adapter, SLIC_REG_CBAR64,
2153 hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh);
2154 }
2155xmit_done:
2156 return NETDEV_TX_OK;
2157xmit_fail:
2158 slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status);
2159 goto xmit_done;
2160}
2161
2162static void slic_adapter_freeresources(struct adapter *adapter)
2163{
2164 slic_init_cleanup(adapter);
2165 adapter->error_interrupts = 0;
2166 adapter->rcv_interrupts = 0;
2167 adapter->xmit_interrupts = 0;
2168 adapter->linkevent_interrupts = 0;
2169 adapter->upr_interrupts = 0;
2170 adapter->num_isrs = 0;
2171 adapter->xmit_completes = 0;
2172 adapter->rcv_broadcasts = 0;
2173 adapter->rcv_multicasts = 0;
2174 adapter->rcv_unicasts = 0;
2175}
2176
2177static int slic_adapter_allocresources(struct adapter *adapter,
2178 unsigned long *flags)
2179{
2180 if (!adapter->intrregistered) {
2181 int retval;
2182
2183 spin_unlock_irqrestore(&slic_global.driver_lock, *flags);
2184
2185 retval = request_irq(adapter->netdev->irq,
2186 &slic_interrupt,
2187 IRQF_SHARED,
2188 adapter->netdev->name, adapter->netdev);
2189
2190 spin_lock_irqsave(&slic_global.driver_lock, *flags);
2191
2192 if (retval) {
2193 dev_err(&adapter->netdev->dev,
2194 "request_irq (%s) FAILED [%x]\n",
2195 adapter->netdev->name, retval);
2196 return retval;
2197 }
2198 adapter->intrregistered = 1;
2199 }
2200 return 0;
2201}
2202
2203/*
2204 * slic_if_init
2205 *
2206 * Perform initialization of our slic interface.
2207 *
2208 */
2209static int slic_if_init(struct adapter *adapter, unsigned long *flags)
2210{
2211 struct sliccard *card = adapter->card;
2212 struct net_device *dev = adapter->netdev;
2213 struct slic_shmemory *sm = &adapter->shmem;
2214 struct slic_shmem_data *sm_data = sm->shmem_data;
2215 int rc;
2216
2217 /* adapter should be down at this point */
2218 if (adapter->state != ADAPT_DOWN) {
2219 dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
2220 __func__);
2221 rc = -EIO;
2222 goto err;
2223 }
2224
2225 adapter->devflags_prev = dev->flags;
2226 adapter->macopts = MAC_DIRECTED;
2227 if (dev->flags) {
2228 if (dev->flags & IFF_BROADCAST)
2229 adapter->macopts |= MAC_BCAST;
2230 if (dev->flags & IFF_PROMISC)
2231 adapter->macopts |= MAC_PROMISC;
2232 if (dev->flags & IFF_ALLMULTI)
2233 adapter->macopts |= MAC_ALLMCAST;
2234 if (dev->flags & IFF_MULTICAST)
2235 adapter->macopts |= MAC_MCAST;
2236 }
2237 rc = slic_adapter_allocresources(adapter, flags);
2238 if (rc) {
2239 dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n",
2240 rc);
2241 slic_adapter_freeresources(adapter);
2242 goto err;
2243 }
2244
2245 if (!adapter->queues_initialized) {
2246 rc = slic_rspqueue_init(adapter);
2247 if (rc)
2248 goto err;
2249 rc = slic_cmdq_init(adapter);
2250 if (rc)
2251 goto err;
2252 rc = slic_rcvqueue_init(adapter);
2253 if (rc)
2254 goto err;
2255 adapter->queues_initialized = 1;
2256 }
2257
2258 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
2259 slic_flush_write(adapter);
2260 mdelay(1);
2261
2262 if (!adapter->isp_initialized) {
2263 unsigned long flags;
2264
2265 spin_lock_irqsave(&adapter->bit64reglock, flags);
2266 slic_write32(adapter, SLIC_REG_ADDR_UPPER,
2267 cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
2268 slic_write32(adapter, SLIC_REG_ISP,
2269 cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
2270 spin_unlock_irqrestore(&adapter->bit64reglock, flags);
2271
2272 adapter->isp_initialized = 1;
2273 }
2274
2275 adapter->state = ADAPT_UP;
2276 if (!card->loadtimerset) {
2277 setup_timer(&card->loadtimer, &slic_timer_load_check,
2278 (ulong)card);
2279 card->loadtimer.expires =
2280 jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
2281 add_timer(&card->loadtimer);
2282
2283 card->loadtimerset = 1;
2284 }
2285
2286 if (!adapter->pingtimerset) {
2287 setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev);
2288 adapter->pingtimer.expires =
2289 jiffies + (PING_TIMER_INTERVAL * HZ);
2290 add_timer(&adapter->pingtimer);
2291 adapter->pingtimerset = 1;
2292 adapter->card->pingstatus = ISR_PINGMASK;
2293 }
2294
2295 /*
2296 * clear any pending events, then enable interrupts
2297 */
2298 sm_data->isr = 0;
2299 slic_write32(adapter, SLIC_REG_ISR, 0);
2300 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON);
2301
2302 slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
2303 slic_flush_write(adapter);
2304
2305 rc = slic_link_event_handler(adapter);
2306 if (rc) {
2307 /* disable interrupts then clear pending events */
2308 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
2309 slic_write32(adapter, SLIC_REG_ISR, 0);
2310 slic_flush_write(adapter);
2311
2312 if (adapter->pingtimerset) {
2313 del_timer(&adapter->pingtimer);
2314 adapter->pingtimerset = 0;
2315 }
2316 if (card->loadtimerset) {
2317 del_timer(&card->loadtimer);
2318 card->loadtimerset = 0;
2319 }
2320 adapter->state = ADAPT_DOWN;
2321 slic_adapter_freeresources(adapter);
2322 }
2323
2324err:
2325 return rc;
2326}
2327
2328static int slic_entry_open(struct net_device *dev)
2329{
2330 struct adapter *adapter = netdev_priv(dev);
2331 struct sliccard *card = adapter->card;
2332 unsigned long flags;
2333 int status;
2334
2335 netif_carrier_off(dev);
2336
2337 spin_lock_irqsave(&slic_global.driver_lock, flags);
2338 if (!adapter->activated) {
2339 card->adapters_activated++;
2340 slic_global.num_slic_ports_active++;
2341 adapter->activated = 1;
2342 }
2343 status = slic_if_init(adapter, &flags);
2344
2345 if (status != 0) {
2346 if (adapter->activated) {
2347 card->adapters_activated--;
2348 slic_global.num_slic_ports_active--;
2349 adapter->activated = 0;
2350 }
2351 goto spin_unlock;
2352 }
2353 if (!card->master)
2354 card->master = adapter;
2355
2356spin_unlock:
2357 spin_unlock_irqrestore(&slic_global.driver_lock, flags);
2358
2359 netif_start_queue(adapter->netdev);
2360
2361 return status;
2362}
2363
2364static void slic_card_cleanup(struct sliccard *card)
2365{
2366 if (card->loadtimerset) {
2367 card->loadtimerset = 0;
2368 del_timer_sync(&card->loadtimer);
2369 }
2370
2371 kfree(card);
2372}
2373
2374static void slic_entry_remove(struct pci_dev *pcidev)
2375{
2376 struct net_device *dev = pci_get_drvdata(pcidev);
2377 struct adapter *adapter = netdev_priv(dev);
2378 struct sliccard *card;
2379 struct mcast_address *mcaddr, *mlist;
2380
2381 unregister_netdev(dev);
2382
2383 slic_adapter_freeresources(adapter);
2384 iounmap(adapter->regs);
2385
2386 /* free multicast addresses */
2387 mlist = adapter->mcastaddrs;
2388 while (mlist) {
2389 mcaddr = mlist;
2390 mlist = mlist->next;
2391 kfree(mcaddr);
2392 }
2393 card = adapter->card;
2394 card->adapters_allocated--;
2395 adapter->allocated = 0;
2396 if (!card->adapters_allocated) {
2397 struct sliccard *curr_card = slic_global.slic_card;
2398
2399 if (curr_card == card) {
2400 slic_global.slic_card = card->next;
2401 } else {
2402 while (curr_card->next != card)
2403 curr_card = curr_card->next;
2404 curr_card->next = card->next;
2405 }
2406 slic_global.num_slic_cards--;
2407 slic_card_cleanup(card);
2408 }
2409 free_netdev(dev);
2410 pci_release_regions(pcidev);
2411 pci_disable_device(pcidev);
2412}
2413
2414static int slic_entry_halt(struct net_device *dev)
2415{
2416 struct adapter *adapter = netdev_priv(dev);
2417 struct sliccard *card = adapter->card;
2418 unsigned long flags;
2419
2420 spin_lock_irqsave(&slic_global.driver_lock, flags);
2421 netif_stop_queue(adapter->netdev);
2422 adapter->state = ADAPT_DOWN;
2423 adapter->linkstate = LINK_DOWN;
2424 adapter->upr_list = NULL;
2425 adapter->upr_busy = 0;
2426 adapter->devflags_prev = 0;
2427 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
2428 adapter->all_reg_writes++;
2429 adapter->icr_reg_writes++;
2430 slic_config_clear(adapter);
2431 if (adapter->activated) {
2432 card->adapters_activated--;
2433 slic_global.num_slic_ports_active--;
2434 adapter->activated = 0;
2435 }
2436#ifdef AUTOMATIC_RESET
2437 slic_write32(adapter, SLIC_REG_RESET_IFACE, 0);
2438#endif
2439 slic_flush_write(adapter);
2440
2441 /*
2442 * Reset the adapter's cmd queues
2443 */
2444 slic_cmdq_reset(adapter);
2445
2446#ifdef AUTOMATIC_RESET
2447 if (!card->adapters_activated)
2448 slic_card_init(card, adapter);
2449#endif
2450
2451 spin_unlock_irqrestore(&slic_global.driver_lock, flags);
2452
2453 netif_carrier_off(dev);
2454
2455 return 0;
2456}
2457
2458static struct net_device_stats *slic_get_stats(struct net_device *dev)
2459{
2460 struct adapter *adapter = netdev_priv(dev);
2461
2462 dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
2463 dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
2464 dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
2465 dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
2466 dev->stats.tx_heartbeat_errors = 0;
2467 dev->stats.tx_aborted_errors = 0;
2468 dev->stats.tx_window_errors = 0;
2469 dev->stats.tx_fifo_errors = 0;
2470 dev->stats.rx_frame_errors = 0;
2471 dev->stats.rx_length_errors = 0;
2472
2473 return &dev->stats;
2474}
2475
2476static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2477{
2478 struct adapter *adapter = netdev_priv(dev);
2479 struct ethtool_cmd edata;
2480 struct ethtool_cmd ecmd;
2481 u32 data[7];
2482 u32 intagg;
2483
2484 switch (cmd) {
2485 case SIOCSLICSETINTAGG:
2486 if (copy_from_user(data, rq->ifr_data, 28))
2487 return -EFAULT;
2488 intagg = data[0];
2489 dev_err(&dev->dev, "set interrupt aggregation to %d\n",
2490 intagg);
2491 slic_intagg_set(adapter, intagg);
2492 return 0;
2493
2494 case SIOCETHTOOL:
2495 if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
2496 return -EFAULT;
2497
2498 if (ecmd.cmd == ETHTOOL_GSET) {
2499 memset(&edata, 0, sizeof(edata));
2500 edata.supported = (SUPPORTED_10baseT_Half |
2501 SUPPORTED_10baseT_Full |
2502 SUPPORTED_100baseT_Half |
2503 SUPPORTED_100baseT_Full |
2504 SUPPORTED_Autoneg | SUPPORTED_MII);
2505 edata.port = PORT_MII;
2506 edata.transceiver = XCVR_INTERNAL;
2507 edata.phy_address = 0;
2508 if (adapter->linkspeed == LINK_100MB)
2509 edata.speed = SPEED_100;
2510 else if (adapter->linkspeed == LINK_10MB)
2511 edata.speed = SPEED_10;
2512 else
2513 edata.speed = 0;
2514
2515 if (adapter->linkduplex == LINK_FULLD)
2516 edata.duplex = DUPLEX_FULL;
2517 else
2518 edata.duplex = DUPLEX_HALF;
2519
2520 edata.autoneg = AUTONEG_ENABLE;
2521 edata.maxtxpkt = 1;
2522 edata.maxrxpkt = 1;
2523 if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
2524 return -EFAULT;
2525
2526 } else if (ecmd.cmd == ETHTOOL_SSET) {
2527 if (!capable(CAP_NET_ADMIN))
2528 return -EPERM;
2529
2530 if (adapter->linkspeed == LINK_100MB)
2531 edata.speed = SPEED_100;
2532 else if (adapter->linkspeed == LINK_10MB)
2533 edata.speed = SPEED_10;
2534 else
2535 edata.speed = 0;
2536
2537 if (adapter->linkduplex == LINK_FULLD)
2538 edata.duplex = DUPLEX_FULL;
2539 else
2540 edata.duplex = DUPLEX_HALF;
2541
2542 edata.autoneg = AUTONEG_ENABLE;
2543 edata.maxtxpkt = 1;
2544 edata.maxrxpkt = 1;
2545 if ((ecmd.speed != edata.speed) ||
2546 (ecmd.duplex != edata.duplex)) {
2547 u32 speed;
2548 u32 duplex;
2549
2550 if (ecmd.speed == SPEED_10)
2551 speed = 0;
2552 else
2553 speed = PCR_SPEED_100;
2554 if (ecmd.duplex == DUPLEX_FULL)
2555 duplex = PCR_DUPLEX_FULL;
2556 else
2557 duplex = 0;
2558 slic_link_config(adapter, speed, duplex);
2559 if (slic_link_event_handler(adapter))
2560 return -EFAULT;
2561 }
2562 }
2563 return 0;
2564 default:
2565 return -EOPNOTSUPP;
2566 }
2567}
2568
2569static void slic_config_pci(struct pci_dev *pcidev)
2570{
2571 u16 pci_command;
2572 u16 new_command;
2573
2574 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
2575
2576 new_command = pci_command | PCI_COMMAND_MASTER
2577 | PCI_COMMAND_MEMORY
2578 | PCI_COMMAND_INVALIDATE
2579 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
2580 if (pci_command != new_command)
2581 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
2582}
2583
2584static int slic_card_init(struct sliccard *card, struct adapter *adapter)
2585{
2586 struct slic_shmemory *sm = &adapter->shmem;
2587 struct slic_shmem_data *sm_data = sm->shmem_data;
2588 struct slic_eeprom *peeprom;
2589 struct oslic_eeprom *pOeeprom;
2590 dma_addr_t phys_config;
2591 u32 phys_configh;
2592 u32 phys_configl;
2593 u32 i = 0;
2594 int status;
2595 uint macaddrs = card->card_size;
2596 ushort eecodesize;
2597 ushort dramsize;
2598 ushort ee_chksum;
2599 ushort calc_chksum;
2600 struct slic_config_mac *pmac;
2601 unsigned char fruformat;
2602 unsigned char oemfruformat;
2603 struct atk_fru *patkfru;
2604 union oemfru *poemfru;
2605 unsigned long flags;
2606
2607 /* Reset everything except PCI configuration space */
2608 slic_soft_reset(adapter);
2609
2610 /* Download the microcode */
2611 status = slic_card_download(adapter);
2612 if (status)
2613 return status;
2614
2615 if (!card->config_set) {
2616 peeprom = pci_alloc_consistent(adapter->pcidev,
2617 sizeof(struct slic_eeprom),
2618 &phys_config);
2619
2620 if (!peeprom) {
2621 dev_err(&adapter->pcidev->dev,
2622 "Failed to allocate DMA memory for EEPROM.\n");
2623 return -ENOMEM;
2624 }
2625
2626 phys_configl = SLIC_GET_ADDR_LOW(phys_config);
2627 phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
2628
2629 memset(peeprom, 0, sizeof(struct slic_eeprom));
2630
2631 slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
2632 slic_flush_write(adapter);
2633 mdelay(1);
2634
2635 spin_lock_irqsave(&adapter->bit64reglock, flags);
2636 slic_write32(adapter, SLIC_REG_ADDR_UPPER,
2637 cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
2638 slic_write32(adapter, SLIC_REG_ISP,
2639 cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
2640 spin_unlock_irqrestore(&adapter->bit64reglock, flags);
2641
2642 status = slic_config_get(adapter, phys_configl, phys_configh);
2643 if (status) {
2644 dev_err(&adapter->pcidev->dev,
2645 "Failed to fetch config data from device.\n");
2646 goto card_init_err;
2647 }
2648
2649 for (;;) {
2650 if (sm_data->isr) {
2651 if (sm_data->isr & ISR_UPC) {
2652 sm_data->isr = 0;
2653 slic_write64(adapter, SLIC_REG_ISP, 0,
2654 0);
2655 slic_write32(adapter, SLIC_REG_ISR, 0);
2656 slic_flush_write(adapter);
2657
2658 slic_upr_request_complete(adapter, 0);
2659 break;
2660 }
2661
2662 sm_data->isr = 0;
2663 slic_write32(adapter, SLIC_REG_ISR, 0);
2664 slic_flush_write(adapter);
2665 } else {
2666 mdelay(1);
2667 i++;
2668 if (i > 5000) {
2669 dev_err(&adapter->pcidev->dev,
2670 "Fetch of config data timed out.\n");
2671 slic_write64(adapter, SLIC_REG_ISP,
2672 0, 0);
2673 slic_flush_write(adapter);
2674
2675 status = -EINVAL;
2676 goto card_init_err;
2677 }
2678 }
2679 }
2680
2681 switch (adapter->devid) {
2682 /* Oasis card */
2683 case SLIC_2GB_DEVICE_ID:
2684 /* extract EEPROM data and pointers to EEPROM data */
2685 pOeeprom = (struct oslic_eeprom *)peeprom;
2686 eecodesize = pOeeprom->EecodeSize;
2687 dramsize = pOeeprom->DramSize;
2688 pmac = pOeeprom->MacInfo;
2689 fruformat = pOeeprom->FruFormat;
2690 patkfru = &pOeeprom->AtkFru;
2691 oemfruformat = pOeeprom->OemFruFormat;
2692 poemfru = &pOeeprom->OemFru;
2693 macaddrs = 2;
2694 /*
2695 * Minor kludge for Oasis card
2696 * get 2 MAC addresses from the
2697 * EEPROM to ensure that function 1
2698 * gets the Port 1 MAC address
2699 */
2700 break;
2701 default:
2702 /* extract EEPROM data and pointers to EEPROM data */
2703 eecodesize = peeprom->EecodeSize;
2704 dramsize = peeprom->DramSize;
2705 pmac = peeprom->u2.mac.MacInfo;
2706 fruformat = peeprom->FruFormat;
2707 patkfru = &peeprom->AtkFru;
2708 oemfruformat = peeprom->OemFruFormat;
2709 poemfru = &peeprom->OemFru;
2710 break;
2711 }
2712
2713 card->config.EepromValid = false;
2714
2715 /* see if the EEPROM is valid by checking it's checksum */
2716 if ((eecodesize <= MAX_EECODE_SIZE) &&
2717 (eecodesize >= MIN_EECODE_SIZE)) {
2718 ee_chksum =
2719 *(u16 *)((char *)peeprom + (eecodesize - 2));
2720 /*
2721 * calculate the EEPROM checksum
2722 */
2723 calc_chksum = slic_eeprom_cksum(peeprom,
2724 eecodesize - 2);
2725 /*
2726 * if the ucdoe chksum flag bit worked,
2727 * we wouldn't need this
2728 */
2729 if (ee_chksum == calc_chksum)
2730 card->config.EepromValid = true;
2731 }
2732 /* copy in the DRAM size */
2733 card->config.DramSize = dramsize;
2734
2735 /* copy in the MAC address(es) */
2736 for (i = 0; i < macaddrs; i++) {
2737 memcpy(&card->config.MacInfo[i],
2738 &pmac[i], sizeof(struct slic_config_mac));
2739 }
2740
2741 /* copy the Alacritech FRU information */
2742 card->config.FruFormat = fruformat;
2743 memcpy(&card->config.AtkFru, patkfru,
2744 sizeof(struct atk_fru));
2745
2746 pci_free_consistent(adapter->pcidev,
2747 sizeof(struct slic_eeprom),
2748 peeprom, phys_config);
2749
2750 if (!card->config.EepromValid) {
2751 slic_write64(adapter, SLIC_REG_ISP, 0, 0);
2752 slic_flush_write(adapter);
2753 dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
2754 return -EINVAL;
2755 }
2756
2757 card->config_set = 1;
2758 }
2759
2760 status = slic_card_download_gbrcv(adapter);
2761 if (status)
2762 return status;
2763
2764 if (slic_global.dynamic_intagg)
2765 slic_intagg_set(adapter, 0);
2766 else
2767 slic_intagg_set(adapter, adapter->intagg_delay);
2768
2769 /*
2770 * Initialize ping status to "ok"
2771 */
2772 card->pingstatus = ISR_PINGMASK;
2773
2774 /*
2775 * Lastly, mark our card state as up and return success
2776 */
2777 card->state = CARD_UP;
2778 card->reset_in_progress = 0;
2779
2780 return 0;
2781
2782card_init_err:
2783 pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom),
2784 peeprom, phys_config);
2785 return status;
2786}
2787
2788static int slic_get_coalesce(struct net_device *dev,
2789 struct ethtool_coalesce *coalesce)
2790{
2791 struct adapter *adapter = netdev_priv(dev);
2792
2793 adapter->intagg_delay = coalesce->rx_coalesce_usecs;
2794 adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
2795 return 0;
2796}
2797
2798static int slic_set_coalesce(struct net_device *dev,
2799 struct ethtool_coalesce *coalesce)
2800{
2801 struct adapter *adapter = netdev_priv(dev);
2802
2803 coalesce->rx_coalesce_usecs = adapter->intagg_delay;
2804 coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
2805 return 0;
2806}
2807
2808static void slic_init_driver(void)
2809{
2810 if (slic_first_init) {
2811 slic_first_init = 0;
2812 spin_lock_init(&slic_global.driver_lock);
2813 }
2814}
2815
2816static int slic_init_adapter(struct net_device *netdev,
2817 struct pci_dev *pcidev,
2818 const struct pci_device_id *pci_tbl_entry,
2819 void __iomem *memaddr, int chip_idx)
2820{
2821 ushort index;
2822 struct slic_handle *pslic_handle;
2823 struct adapter *adapter = netdev_priv(netdev);
2824 struct slic_shmemory *sm = &adapter->shmem;
2825 struct slic_shmem_data *sm_data;
2826 dma_addr_t phaddr;
2827
2828/* adapter->pcidev = pcidev;*/
2829 adapter->vendid = pci_tbl_entry->vendor;
2830 adapter->devid = pci_tbl_entry->device;
2831 adapter->subsysid = pci_tbl_entry->subdevice;
2832 adapter->busnumber = pcidev->bus->number;
2833 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
2834 adapter->functionnumber = (pcidev->devfn & 0x7);
2835 adapter->regs = memaddr;
2836 adapter->irq = pcidev->irq;
2837 adapter->chipid = chip_idx;
2838 adapter->port = 0;
2839 adapter->cardindex = adapter->port;
2840 spin_lock_init(&adapter->upr_lock);
2841 spin_lock_init(&adapter->bit64reglock);
2842 spin_lock_init(&adapter->adapter_lock);
2843 spin_lock_init(&adapter->reset_lock);
2844 spin_lock_init(&adapter->handle_lock);
2845
2846 adapter->card_size = 1;
2847 /*
2848 * Initialize slic_handle array
2849 */
2850 /*
2851 * Start with 1. 0 is an invalid host handle.
2852 */
2853 for (index = 1, pslic_handle = &adapter->slic_handles[1];
2854 index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
2855 pslic_handle->token.handle_index = index;
2856 pslic_handle->type = SLIC_HANDLE_FREE;
2857 pslic_handle->next = adapter->pfree_slic_handles;
2858 adapter->pfree_slic_handles = pslic_handle;
2859 }
2860 sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data),
2861 &phaddr);
2862 if (!sm_data)
2863 return -ENOMEM;
2864
2865 sm->shmem_data = sm_data;
2866 sm->isr_phaddr = phaddr;
2867 sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data,
2868 lnkstatus);
2869 sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats);
2870
2871 return 0;
2872}
2873
2874static const struct net_device_ops slic_netdev_ops = {
2875 .ndo_open = slic_entry_open,
2876 .ndo_stop = slic_entry_halt,
2877 .ndo_start_xmit = slic_xmit_start,
2878 .ndo_do_ioctl = slic_ioctl,
2879 .ndo_set_mac_address = slic_mac_set_address,
2880 .ndo_get_stats = slic_get_stats,
2881 .ndo_set_rx_mode = slic_mcast_set_list,
2882 .ndo_validate_addr = eth_validate_addr,
2883};
2884
2885static u32 slic_card_locate(struct adapter *adapter)
2886{
2887 struct sliccard *card = slic_global.slic_card;
2888 struct physcard *physcard = slic_global.phys_card;
2889 ushort card_hostid;
2890 uint i;
2891
2892 card_hostid = slic_read32(adapter, SLIC_REG_HOSTID);
2893
2894 /* Initialize a new card structure if need be */
2895 if (card_hostid == SLIC_HOSTID_DEFAULT) {
2896 card = kzalloc(sizeof(*card), GFP_KERNEL);
2897 if (!card)
2898 return -ENOMEM;
2899
2900 card->next = slic_global.slic_card;
2901 slic_global.slic_card = card;
2902 card->busnumber = adapter->busnumber;
2903 card->slotnumber = adapter->slotnumber;
2904
2905 /* Find an available cardnum */
2906 for (i = 0; i < SLIC_MAX_CARDS; i++) {
2907 if (slic_global.cardnuminuse[i] == 0) {
2908 slic_global.cardnuminuse[i] = 1;
2909 card->cardnum = i;
2910 break;
2911 }
2912 }
2913 slic_global.num_slic_cards++;
2914 } else {
2915 /* Card exists, find the card this adapter belongs to */
2916 while (card) {
2917 if (card->cardnum == card_hostid)
2918 break;
2919 card = card->next;
2920 }
2921 }
2922
2923 if (!card)
2924 return -ENXIO;
2925 /* Put the adapter in the card's adapter list */
2926 if (!card->adapter[adapter->port]) {
2927 card->adapter[adapter->port] = adapter;
2928 adapter->card = card;
2929 }
2930
2931 card->card_size = 1; /* one port per *logical* card */
2932
2933 while (physcard) {
2934 for (i = 0; i < SLIC_MAX_PORTS; i++) {
2935 if (physcard->adapter[i])
2936 break;
2937 }
2938 if (i == SLIC_MAX_PORTS)
2939 break;
2940
2941 if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
2942 break;
2943 physcard = physcard->next;
2944 }
2945 if (!physcard) {
2946 /* no structure allocated for this physical card yet */
2947 physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC);
2948 if (!physcard) {
2949 if (card_hostid == SLIC_HOSTID_DEFAULT)
2950 kfree(card);
2951 return -ENOMEM;
2952 }
2953
2954 physcard->next = slic_global.phys_card;
2955 slic_global.phys_card = physcard;
2956 physcard->adapters_allocd = 1;
2957 } else {
2958 physcard->adapters_allocd++;
2959 }
2960 /* Note - this is ZERO relative */
2961 adapter->physport = physcard->adapters_allocd - 1;
2962
2963 physcard->adapter[adapter->physport] = adapter;
2964 adapter->physcard = physcard;
2965
2966 return 0;
2967}
2968
2969static int slic_entry_probe(struct pci_dev *pcidev,
2970 const struct pci_device_id *pci_tbl_entry)
2971{
2972 static int cards_found;
2973 static int did_version;
2974 int err = -ENODEV;
2975 struct net_device *netdev;
2976 struct adapter *adapter;
2977 void __iomem *memmapped_ioaddr = NULL;
2978 ulong mmio_start = 0;
2979 ulong mmio_len = 0;
2980 struct sliccard *card = NULL;
2981 int pci_using_dac = 0;
2982
2983 err = pci_enable_device(pcidev);
2984
2985 if (err)
2986 return err;
2987
2988 if (did_version++ == 0) {
2989 dev_info(&pcidev->dev, "%s\n", slic_banner);
2990 dev_info(&pcidev->dev, "%s\n", slic_proc_version);
2991 }
2992
2993 if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
2994 pci_using_dac = 1;
2995 err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
2996 if (err) {
2997 dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
2998 goto err_out_disable_pci;
2999 }
3000 } else {
3001 err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
3002 if (err) {
3003 dev_err(&pcidev->dev, "no usable DMA configuration\n");
3004 goto err_out_disable_pci;
3005 }
3006 pci_using_dac = 0;
3007 pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
3008 }
3009
3010 err = pci_request_regions(pcidev, DRV_NAME);
3011 if (err) {
3012 dev_err(&pcidev->dev, "can't obtain PCI resources\n");
3013 goto err_out_disable_pci;
3014 }
3015
3016 pci_set_master(pcidev);
3017
3018 netdev = alloc_etherdev(sizeof(struct adapter));
3019 if (!netdev) {
3020 err = -ENOMEM;
3021 goto err_out_exit_slic_probe;
3022 }
3023
3024 netdev->ethtool_ops = &slic_ethtool_ops;
3025 SET_NETDEV_DEV(netdev, &pcidev->dev);
3026
3027 pci_set_drvdata(pcidev, netdev);
3028 adapter = netdev_priv(netdev);
3029 adapter->netdev = netdev;
3030 adapter->pcidev = pcidev;
3031 slic_global.dynamic_intagg = adapter->dynamic_intagg;
3032 if (pci_using_dac)
3033 netdev->features |= NETIF_F_HIGHDMA;
3034
3035 mmio_start = pci_resource_start(pcidev, 0);
3036 mmio_len = pci_resource_len(pcidev, 0);
3037
3038 memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len);
3039 if (!memmapped_ioaddr) {
3040 dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
3041 mmio_len, mmio_start);
3042 err = -ENOMEM;
3043 goto err_out_free_netdev;
3044 }
3045
3046 slic_config_pci(pcidev);
3047
3048 slic_init_driver();
3049
3050 err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr,
3051 cards_found);
3052 if (err) {
3053 dev_err(&pcidev->dev, "failed to init adapter: %i\n", err);
3054 goto err_out_unmap;
3055 }
3056
3057 err = slic_card_locate(adapter);
3058 if (err) {
3059 dev_err(&pcidev->dev, "cannot locate card\n");
3060 goto err_clean_init;
3061 }
3062
3063 card = adapter->card;
3064
3065 if (!adapter->allocated) {
3066 card->adapters_allocated++;
3067 adapter->allocated = 1;
3068 }
3069
3070 err = slic_card_init(card, adapter);
3071 if (err)
3072 goto err_clean_init;
3073
3074 slic_adapter_set_hwaddr(adapter);
3075
3076 netdev->base_addr = (unsigned long)memmapped_ioaddr;
3077 netdev->irq = adapter->irq;
3078 netdev->netdev_ops = &slic_netdev_ops;
3079
3080 netif_carrier_off(netdev);
3081
3082 strcpy(netdev->name, "eth%d");
3083 err = register_netdev(netdev);
3084 if (err) {
3085 dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
3086 goto err_clean_init;
3087 }
3088
3089 cards_found++;
3090
3091 return 0;
3092
3093err_clean_init:
3094 slic_init_cleanup(adapter);
3095err_out_unmap:
3096 iounmap(memmapped_ioaddr);
3097err_out_free_netdev:
3098 free_netdev(netdev);
3099err_out_exit_slic_probe:
3100 pci_release_regions(pcidev);
3101err_out_disable_pci:
3102 pci_disable_device(pcidev);
3103 return err;
3104}
3105
3106static struct pci_driver slic_driver = {
3107 .name = DRV_NAME,
3108 .id_table = slic_pci_tbl,
3109 .probe = slic_entry_probe,
3110 .remove = slic_entry_remove,
3111};
3112
3113static int __init slic_module_init(void)
3114{
3115 slic_init_driver();
3116
3117 return pci_register_driver(&slic_driver);
3118}
3119
3120static void __exit slic_module_cleanup(void)
3121{
3122 pci_unregister_driver(&slic_driver);
3123}
3124
3125static const struct ethtool_ops slic_ethtool_ops = {
3126 .get_coalesce = slic_get_coalesce,
3127 .set_coalesce = slic_set_coalesce
3128};
3129
3130module_init(slic_module_init);
3131module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index dcce3f487ed5..4d781f78b95c 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_FB_SM750) += sm750fb.o 1obj-$(CONFIG_FB_SM750) += sm750fb.o
2 2
3sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o 3sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
4sm750fb-objs += ddk750_display.o ddk750_help.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o 4sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 2c10a08ed964..734010324a8f 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -1,22 +1,21 @@
1/*
2 * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
3 *
4 * All rights are reserved. Reproduction or in part is prohibited
5 * without the written consent of the copyright owner.
6 *
7 * RegSC.h --- SM718 SDK
8 * This file contains the definitions for the System Configuration registers.
9 */
10
1#ifndef DDK750_H__ 11#ifndef DDK750_H__
2#define DDK750_H__ 12#define DDK750_H__
3/******************************************************************* 13
4*
5* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
6*
7* All rights are reserved. Reproduction or in part is prohibited
8* without the written consent of the copyright owner.
9*
10* RegSC.h --- SM718 SDK
11* This file contains the definitions for the System Configuration registers.
12*
13*******************************************************************/
14#include "ddk750_reg.h" 14#include "ddk750_reg.h"
15#include "ddk750_mode.h" 15#include "ddk750_mode.h"
16#include "ddk750_chip.h" 16#include "ddk750_chip.h"
17#include "ddk750_display.h" 17#include "ddk750_display.h"
18#include "ddk750_power.h" 18#include "ddk750_power.h"
19#include "ddk750_help.h"
20#ifdef USE_HW_I2C 19#ifdef USE_HW_I2C
21#include "ddk750_hwi2c.h" 20#include "ddk750_hwi2c.h"
22#endif 21#endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 839d6730bde9..f59ce5c0867d 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,33 +1,32 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/sizes.h> 2#include <linux/sizes.h>
3 3
4#include "ddk750_help.h"
5#include "ddk750_reg.h" 4#include "ddk750_reg.h"
6#include "ddk750_chip.h" 5#include "ddk750_chip.h"
7#include "ddk750_power.h" 6#include "ddk750_power.h"
8 7
9#define MHz(x) ((x) * 1000000) 8#define MHz(x) ((x) * 1000000)
10 9
10static logical_chip_type_t chip;
11
11logical_chip_type_t sm750_get_chip_type(void) 12logical_chip_type_t sm750_get_chip_type(void)
12{ 13{
13 unsigned short physicalID; 14 return chip;
14 char physicalRev; 15}
15 logical_chip_type_t chip;
16
17 physicalID = devId750; /* either 0x718 or 0x750 */
18 physicalRev = revId750;
19 16
20 if (physicalID == 0x718) 17void sm750_set_chip_type(unsigned short devId, u8 revId)
18{
19 if (devId == 0x718)
21 chip = SM718; 20 chip = SM718;
22 else if (physicalID == 0x750) { 21 else if (devId == 0x750) {
23 chip = SM750; 22 chip = SM750;
24 /* SM750 and SM750LE are different in their revision ID only. */ 23 /* SM750 and SM750LE are different in their revision ID only. */
25 if (physicalRev == SM750LE_REVISION_ID) 24 if (revId == SM750LE_REVISION_ID) {
26 chip = SM750LE; 25 chip = SM750LE;
26 pr_info("found sm750le\n");
27 }
27 } else 28 } else
28 chip = SM_UNKNOWN; 29 chip = SM_UNKNOWN;
29
30 return chip;
31} 30}
32 31
33static unsigned int get_mxclk_freq(void) 32static unsigned int get_mxclk_freq(void)
@@ -52,9 +51,9 @@ static unsigned int get_mxclk_freq(void)
52 * 51 *
53 * Input: Frequency to be set. 52 * Input: Frequency to be set.
54 */ 53 */
55static void setChipClock(unsigned int frequency) 54static void set_chip_clock(unsigned int frequency)
56{ 55{
57 pll_value_t pll; 56 struct pll_value pll;
58 unsigned int ulActualMxClk; 57 unsigned int ulActualMxClk;
59 58
60 /* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */ 59 /* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
@@ -63,29 +62,31 @@ static void setChipClock(unsigned int frequency)
63 62
64 if (frequency) { 63 if (frequency) {
65 /* 64 /*
66 * Set up PLL, a structure to hold the value to be set in clocks. 65 * Set up PLL structure to hold the value to be set in clocks.
67 */ 66 */
68 pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */ 67 pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
69 pll.clockType = MXCLK_PLL; 68 pll.clockType = MXCLK_PLL;
70 69
71 /* 70 /*
72 * Call calcPllValue() to fill the other fields of PLL structure. 71 * Call sm750_calc_pll_value() to fill the other fields of the PLL
73 * Sometime, the chip cannot set up the exact clock 72 * structure. Sometimes, the chip cannot set up the exact
74 * required by the User. 73 * clock required by the User.
75 * Return value of calcPllValue gives the actual possible clock. 74 * Return value of sm750_calc_pll_value gives the actual possible
76 */ 75 * clock.
77 ulActualMxClk = calcPllValue(frequency, &pll); 76 */
77 ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
78 78
79 /* Master Clock Control: MXCLK_PLL */ 79 /* Master Clock Control: MXCLK_PLL */
80 POKE32(MXCLK_PLL_CTRL, formatPllReg(&pll)); 80 POKE32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
81 } 81 }
82} 82}
83 83
84static void setMemoryClock(unsigned int frequency) 84static void set_memory_clock(unsigned int frequency)
85{ 85{
86 unsigned int reg, divisor; 86 unsigned int reg, divisor;
87 87
88 /* Cheok_0509: For SM750LE, the memory clock is fixed. 88 /*
89 * Cheok_0509: For SM750LE, the memory clock is fixed.
89 * Nothing to set. 90 * Nothing to set.
90 */ 91 */
91 if (sm750_get_chip_type() == SM750LE) 92 if (sm750_get_chip_type() == SM750LE)
@@ -120,7 +121,7 @@ static void setMemoryClock(unsigned int frequency)
120 break; 121 break;
121 } 122 }
122 123
123 setCurrentGate(reg); 124 sm750_set_current_gate(reg);
124 } 125 }
125} 126}
126 127
@@ -132,18 +133,20 @@ static void setMemoryClock(unsigned int frequency)
132 * NOTE: 133 * NOTE:
133 * The maximum frequency the engine can run is 168MHz. 134 * The maximum frequency the engine can run is 168MHz.
134 */ 135 */
135static void setMasterClock(unsigned int frequency) 136static void set_master_clock(unsigned int frequency)
136{ 137{
137 unsigned int reg, divisor; 138 unsigned int reg, divisor;
138 139
139 /* Cheok_0509: For SM750LE, the memory clock is fixed. 140 /*
141 * Cheok_0509: For SM750LE, the memory clock is fixed.
140 * Nothing to set. 142 * Nothing to set.
141 */ 143 */
142 if (sm750_get_chip_type() == SM750LE) 144 if (sm750_get_chip_type() == SM750LE)
143 return; 145 return;
144 146
145 if (frequency) { 147 if (frequency) {
146 /* Set the frequency to the maximum frequency 148 /*
149 * Set the frequency to the maximum frequency
147 * that the SM750 engine can run, which is about 190 MHz. 150 * that the SM750 engine can run, which is about 190 MHz.
148 */ 151 */
149 if (frequency > MHz(190)) 152 if (frequency > MHz(190))
@@ -170,11 +173,11 @@ static void setMasterClock(unsigned int frequency)
170 break; 173 break;
171 } 174 }
172 175
173 setCurrentGate(reg); 176 sm750_set_current_gate(reg);
174 } 177 }
175} 178}
176 179
177unsigned int ddk750_getVMSize(void) 180unsigned int ddk750_get_vm_size(void)
178{ 181{
179 unsigned int reg; 182 unsigned int reg;
180 unsigned int data; 183 unsigned int data;
@@ -206,18 +209,18 @@ unsigned int ddk750_getVMSize(void)
206 return data; 209 return data;
207} 210}
208 211
209int ddk750_initHw(initchip_param_t *pInitParam) 212int ddk750_init_hw(struct initchip_param *pInitParam)
210{ 213{
211 unsigned int reg; 214 unsigned int reg;
212 215
213 if (pInitParam->powerMode != 0) 216 if (pInitParam->powerMode != 0)
214 pInitParam->powerMode = 0; 217 pInitParam->powerMode = 0;
215 setPowerMode(pInitParam->powerMode); 218 sm750_set_power_mode(pInitParam->powerMode);
216 219
217 /* Enable display power gate & LOCALMEM power gate*/ 220 /* Enable display power gate & LOCALMEM power gate*/
218 reg = PEEK32(CURRENT_GATE); 221 reg = PEEK32(CURRENT_GATE);
219 reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM); 222 reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
220 setCurrentGate(reg); 223 sm750_set_current_gate(reg);
221 224
222 if (sm750_get_chip_type() != SM750LE) { 225 if (sm750_get_chip_type() != SM750LE) {
223 /* set panel pll and graphic mode via mmio_88 */ 226 /* set panel pll and graphic mode via mmio_88 */
@@ -233,16 +236,17 @@ int ddk750_initHw(initchip_param_t *pInitParam)
233 } 236 }
234 237
235 /* Set the Main Chip Clock */ 238 /* Set the Main Chip Clock */
236 setChipClock(MHz((unsigned int)pInitParam->chipClock)); 239 set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
237 240
238 /* Set up memory clock. */ 241 /* Set up memory clock. */
239 setMemoryClock(MHz(pInitParam->memClock)); 242 set_memory_clock(MHz(pInitParam->memClock));
240 243
241 /* Set up master clock */ 244 /* Set up master clock */
242 setMasterClock(MHz(pInitParam->masterClock)); 245 set_master_clock(MHz(pInitParam->masterClock));
243 246
244 247
245 /* Reset the memory controller. 248 /*
249 * Reset the memory controller.
246 * If the memory controller is not reset in SM750, 250 * If the memory controller is not reset in SM750,
247 * the system might hang when sw accesses the memory. 251 * the system might hang when sw accesses the memory.
248 * The memory should be resetted after changing the MXCLK. 252 * The memory should be resetted after changing the MXCLK.
@@ -257,7 +261,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
257 } 261 }
258 262
259 if (pInitParam->setAllEngOff == 1) { 263 if (pInitParam->setAllEngOff == 1) {
260 enable2DEngine(0); 264 sm750_enable_2d_engine(0);
261 265
262 /* Disable Overlay, if a former application left it on */ 266 /* Disable Overlay, if a former application left it on */
263 reg = PEEK32(VIDEO_DISPLAY_CTRL); 267 reg = PEEK32(VIDEO_DISPLAY_CTRL);
@@ -280,7 +284,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
280 POKE32(DMA_ABORT_INTERRUPT, reg); 284 POKE32(DMA_ABORT_INTERRUPT, reg);
281 285
282 /* Disable DMA Power, if a former application left it on */ 286 /* Disable DMA Power, if a former application left it on */
283 enableDMA(0); 287 sm750_enable_dma(0);
284 } 288 }
285 289
286 /* We can add more initialization as needed. */ 290 /* We can add more initialization as needed. */
@@ -305,9 +309,10 @@ int ddk750_initHw(initchip_param_t *pInitParam)
305 * M = {1,...,255} 309 * M = {1,...,255}
306 * N = {2,...,15} 310 * N = {2,...,15}
307 */ 311 */
308unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) 312unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll)
309{ 313{
310 /* as sm750 register definition, 314 /*
315 * as sm750 register definition,
311 * N located in 2,15 and M located in 1,255 316 * N located in 2,15 and M located in 1,255
312 */ 317 */
313 int N, M, X, d; 318 int N, M, X, d;
@@ -319,7 +324,8 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
319 int max_d = 6; 324 int max_d = 6;
320 325
321 if (sm750_get_chip_type() == SM750LE) { 326 if (sm750_get_chip_type() == SM750LE) {
322 /* SM750LE don't have 327 /*
328 * SM750LE don't have
323 * programmable PLL and M/N values to work on. 329 * programmable PLL and M/N values to work on.
324 * Just return the requested clock. 330 * Just return the requested clock.
325 */ 331 */
@@ -331,14 +337,16 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
331 request = request_orig / 1000; 337 request = request_orig / 1000;
332 input = pll->inputFreq / 1000; 338 input = pll->inputFreq / 1000;
333 339
334 /* for MXCLK register, 340 /*
341 * for MXCLK register,
335 * no POD provided, so need be treated differently 342 * no POD provided, so need be treated differently
336 */ 343 */
337 if (pll->clockType == MXCLK_PLL) 344 if (pll->clockType == MXCLK_PLL)
338 max_d = 3; 345 max_d = 3;
339 346
340 for (N = 15; N > 1; N--) { 347 for (N = 15; N > 1; N--) {
341 /* RN will not exceed maximum long 348 /*
349 * RN will not exceed maximum long
342 * if @request <= 285 MHZ (for 32bit cpu) 350 * if @request <= 285 MHZ (for 32bit cpu)
343 */ 351 */
344 RN = N * request; 352 RN = N * request;
@@ -373,7 +381,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
373 return ret; 381 return ret;
374} 382}
375 383
376unsigned int formatPllReg(pll_value_t *pPLL) 384unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
377{ 385{
378#ifndef VALIDATION_CHIP 386#ifndef VALIDATION_CHIP
379 unsigned int POD = pPLL->POD; 387 unsigned int POD = pPLL->POD;
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 14357fd1cc6b..e63b8b293816 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -6,6 +6,14 @@
6#endif 6#endif
7 7
8#include <linux/io.h> 8#include <linux/io.h>
9#include <linux/ioport.h>
10#include <linux/uaccess.h>
11
12/* software control endianness */
13#define PEEK32(addr) readl(addr + mmio750)
14#define POKE32(addr, data) writel(data, addr + mmio750)
15
16extern void __iomem *mmio750;
9 17
10/* This is all the chips recognized by this library */ 18/* This is all the chips recognized by this library */
11typedef enum _logical_chip_type_t { 19typedef enum _logical_chip_type_t {
@@ -25,7 +33,7 @@ typedef enum _clock_type_t {
25} 33}
26clock_type_t; 34clock_type_t;
27 35
28typedef struct _pll_value_t { 36struct pll_value {
29 clock_type_t clockType; 37 clock_type_t clockType;
30 unsigned long inputFreq; /* Input clock frequency to the PLL */ 38 unsigned long inputFreq; /* Input clock frequency to the PLL */
31 39
@@ -34,46 +42,55 @@ typedef struct _pll_value_t {
34 unsigned long N; 42 unsigned long N;
35 unsigned long OD; 43 unsigned long OD;
36 unsigned long POD; 44 unsigned long POD;
37} 45};
38pll_value_t;
39 46
40/* input struct to initChipParam() function */ 47/* input struct to initChipParam() function */
41typedef struct _initchip_param_t { 48struct initchip_param {
42 unsigned short powerMode; /* Use power mode 0 or 1 */ 49 /* Use power mode 0 or 1 */
43 unsigned short chipClock; /** 50 unsigned short powerMode;
44 * Speed of main chip clock in MHz unit 51
45 * 0 = keep the current clock setting 52 /*
46 * Others = the new main chip clock 53 * Speed of main chip clock in MHz unit
47 */ 54 * 0 = keep the current clock setting
48 unsigned short memClock; /** 55 * Others = the new main chip clock
49 * Speed of memory clock in MHz unit 56 */
50 * 0 = keep the current clock setting 57 unsigned short chipClock;
51 * Others = the new memory clock 58
52 */ 59 /*
53 unsigned short masterClock; /** 60 * Speed of memory clock in MHz unit
54 * Speed of master clock in MHz unit 61 * 0 = keep the current clock setting
55 * 0 = keep the current clock setting 62 * Others = the new memory clock
56 * Others = the new master clock 63 */
57 */ 64 unsigned short memClock;
58 unsigned short setAllEngOff; /** 65
59 * 0 = leave all engine state untouched. 66 /*
60 * 1 = make sure they are off: 2D, Overlay, 67 * Speed of master clock in MHz unit
61 * video alpha, alpha, hardware cursors 68 * 0 = keep the current clock setting
62 */ 69 * Others = the new master clock
63 unsigned char resetMemory; /** 70 */
64 * 0 = Do not reset the memory controller 71 unsigned short masterClock;
65 * 1 = Reset the memory controller 72
66 */ 73 /*
74 * 0 = leave all engine state untouched.
75 * 1 = make sure they are off: 2D, Overlay,
76 * video alpha, alpha, hardware cursors
77 */
78 unsigned short setAllEngOff;
79
80 /*
81 * 0 = Do not reset the memory controller
82 * 1 = Reset the memory controller
83 */
84 unsigned char resetMemory;
67 85
68 /* More initialization parameter can be added if needed */ 86 /* More initialization parameter can be added if needed */
69} 87};
70initchip_param_t;
71 88
72logical_chip_type_t sm750_get_chip_type(void); 89logical_chip_type_t sm750_get_chip_type(void);
73unsigned int calcPllValue(unsigned int request, pll_value_t *pll); 90void sm750_set_chip_type(unsigned short devId, u8 revId);
74unsigned int formatPllReg(pll_value_t *pPLL); 91unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
75void ddk750_set_mmio(void __iomem *, unsigned short, char); 92unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
76unsigned int ddk750_getVMSize(void); 93unsigned int ddk750_get_vm_size(void);
77int ddk750_initHw(initchip_param_t *); 94int ddk750_init_hw(struct initchip_param *);
78 95
79#endif 96#endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 4023c476b9e4..c347803f7e19 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -1,11 +1,9 @@
1#include "ddk750_reg.h" 1#include "ddk750_reg.h"
2#include "ddk750_help.h" 2#include "ddk750_chip.h"
3#include "ddk750_display.h" 3#include "ddk750_display.h"
4#include "ddk750_power.h" 4#include "ddk750_power.h"
5#include "ddk750_dvi.h" 5#include "ddk750_dvi.h"
6 6
7#define primaryWaitVerticalSync(delay) waitNextVerticalSync(0, delay)
8
9static void setDisplayControl(int ctrl, int disp_state) 7static void setDisplayControl(int ctrl, int disp_state)
10{ 8{
11 /* state != 0 means turn on both timing & plane en_bit */ 9 /* state != 0 means turn on both timing & plane en_bit */
@@ -61,55 +59,28 @@ static void setDisplayControl(int ctrl, int disp_state)
61 } 59 }
62} 60}
63 61
64static void waitNextVerticalSync(int ctrl, int delay) 62static void primary_wait_vertical_sync(int delay)
65{ 63{
66 unsigned int status; 64 unsigned int status;
67 65
68 if (!ctrl) { 66 /*
69 /* primary controller */ 67 * Do not wait when the Primary PLL is off or display control is
68 * already off. This will prevent the software to wait forever.
69 */
70 if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
71 !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING))
72 return;
70 73
71 /* 74 while (delay-- > 0) {
72 * Do not wait when the Primary PLL is off or display control is 75 /* Wait for end of vsync. */
73 * already off. This will prevent the software to wait forever. 76 do {
74 */ 77 status = PEEK32(SYSTEM_CTRL);
75 if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) || 78 } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
76 !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
77 return;
78 }
79
80 while (delay-- > 0) {
81 /* Wait for end of vsync. */
82 do {
83 status = PEEK32(SYSTEM_CTRL);
84 } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
85
86 /* Wait for start of vsync. */
87 do {
88 status = PEEK32(SYSTEM_CTRL);
89 } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
90 }
91 79
92 } else { 80 /* Wait for start of vsync. */
93 /* 81 do {
94 * Do not wait when the Primary PLL is off or display control is 82 status = PEEK32(SYSTEM_CTRL);
95 * already off. This will prevent the software to wait forever. 83 } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
96 */
97 if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
98 !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
99 return;
100 }
101
102 while (delay-- > 0) {
103 /* Wait for end of vsync. */
104 do {
105 status = PEEK32(SYSTEM_CTRL);
106 } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
107
108 /* Wait for start of vsync. */
109 do {
110 status = PEEK32(SYSTEM_CTRL);
111 } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
112 }
113 } 84 }
114} 85}
115 86
@@ -121,22 +92,22 @@ static void swPanelPowerSequence(int disp, int delay)
121 reg = PEEK32(PANEL_DISPLAY_CTRL); 92 reg = PEEK32(PANEL_DISPLAY_CTRL);
122 reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); 93 reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
123 POKE32(PANEL_DISPLAY_CTRL, reg); 94 POKE32(PANEL_DISPLAY_CTRL, reg);
124 primaryWaitVerticalSync(delay); 95 primary_wait_vertical_sync(delay);
125 96
126 reg = PEEK32(PANEL_DISPLAY_CTRL); 97 reg = PEEK32(PANEL_DISPLAY_CTRL);
127 reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0); 98 reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
128 POKE32(PANEL_DISPLAY_CTRL, reg); 99 POKE32(PANEL_DISPLAY_CTRL, reg);
129 primaryWaitVerticalSync(delay); 100 primary_wait_vertical_sync(delay);
130 101
131 reg = PEEK32(PANEL_DISPLAY_CTRL); 102 reg = PEEK32(PANEL_DISPLAY_CTRL);
132 reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0); 103 reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
133 POKE32(PANEL_DISPLAY_CTRL, reg); 104 POKE32(PANEL_DISPLAY_CTRL, reg);
134 primaryWaitVerticalSync(delay); 105 primary_wait_vertical_sync(delay);
135 106
136 reg = PEEK32(PANEL_DISPLAY_CTRL); 107 reg = PEEK32(PANEL_DISPLAY_CTRL);
137 reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); 108 reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
138 POKE32(PANEL_DISPLAY_CTRL, reg); 109 POKE32(PANEL_DISPLAY_CTRL, reg);
139 primaryWaitVerticalSync(delay); 110 primary_wait_vertical_sync(delay);
140} 111}
141 112
142void ddk750_setLogicalDispOut(disp_output_t output) 113void ddk750_setLogicalDispOut(disp_output_t output)
@@ -182,5 +153,5 @@ void ddk750_setLogicalDispOut(disp_output_t output)
182 setDAC((output & DAC_MASK) >> DAC_OFFSET); 153 setDAC((output & DAC_MASK) >> DAC_OFFSET);
183 154
184 if (output & DPMS_USAGE) 155 if (output & DPMS_USAGE)
185 ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET); 156 ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET);
186} 157}
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index e3fde428c52b..8abca88f089e 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -1,7 +1,8 @@
1#ifndef DDK750_DISPLAY_H__ 1#ifndef DDK750_DISPLAY_H__
2#define DDK750_DISPLAY_H__ 2#define DDK750_DISPLAY_H__
3 3
4/* panel path select 4/*
5 * panel path select
5 * 80000[29:28] 6 * 80000[29:28]
6 */ 7 */
7 8
@@ -12,7 +13,8 @@
12#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE) 13#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE)
13 14
14 15
15/* primary timing & plane enable bit 16/*
17 * primary timing & plane enable bit
16 * 1: 80000[8] & 80000[2] on 18 * 1: 80000[8] & 80000[2] on
17 * 0: both off 19 * 0: both off
18 */ 20 */
@@ -23,7 +25,8 @@
23#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE) 25#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
24 26
25 27
26/* panel sequency status 28/*
29 * panel sequency status
27 * 80000[27:24] 30 * 80000[27:24]
28 */ 31 */
29#define PNL_SEQ_OFFSET 6 32#define PNL_SEQ_OFFSET 6
@@ -32,7 +35,8 @@
32#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE) 35#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
33#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE) 36#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
34 37
35/* dual digital output 38/*
39 * dual digital output
36 * 80000[19] 40 * 80000[19]
37 */ 41 */
38#define DUAL_TFT_OFFSET 8 42#define DUAL_TFT_OFFSET 8
@@ -41,7 +45,8 @@
41#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE) 45#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
42#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE) 46#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
43 47
44/* secondary timing & plane enable bit 48/*
49 * secondary timing & plane enable bit
45 * 1:80200[8] & 80200[2] on 50 * 1:80200[8] & 80200[2] on
46 * 0: both off 51 * 0: both off
47 */ 52 */
@@ -51,7 +56,8 @@
51#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE) 56#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE)
52#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE) 57#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE)
53 58
54/* crt path select 59/*
60 * crt path select
55 * 80200[19:18] 61 * 80200[19:18]
56 */ 62 */
57#define CRT_2_OFFSET 2 63#define CRT_2_OFFSET 2
@@ -61,7 +67,8 @@
61#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE) 67#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
62 68
63 69
64/* DAC affect both DVI and DSUB 70/*
71 * DAC affect both DVI and DSUB
65 * 4[20] 72 * 4[20]
66 */ 73 */
67#define DAC_OFFSET 7 74#define DAC_OFFSET 7
@@ -70,7 +77,8 @@
70#define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE) 77#define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE)
71#define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE) 78#define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE)
72 79
73/* DPMS only affect D-SUB head 80/*
81 * DPMS only affect D-SUB head
74 * 0[31:30] 82 * 0[31:30]
75 */ 83 */
76#define DPMS_OFFSET 9 84#define DPMS_OFFSET 9
@@ -81,7 +89,8 @@
81 89
82 90
83 91
84/* LCD1 means panel path TFT1 & panel path DVI (so enable DAC) 92/*
93 * LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
85 * CRT means crt path DSUB 94 * CRT means crt path DSUB
86 */ 95 */
87typedef enum _disp_output_t { 96typedef enum _disp_output_t {
@@ -89,7 +98,8 @@ typedef enum _disp_output_t {
89 do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON, 98 do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON,
90 do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON, 99 do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON,
91 do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON, 100 do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON,
92 /* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON, 101 /*
102 * do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
93 * do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON, 103 * do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON,
94 */ 104 */
95 do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON, 105 do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON,
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 8252f771ef9e..250c2f478778 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,6 +1,6 @@
1#define USE_DVICHIP 1#define USE_DVICHIP
2#ifdef USE_DVICHIP 2#ifdef USE_DVICHIP
3#include "ddk750_help.h" 3#include "ddk750_chip.h"
4#include "ddk750_reg.h" 4#include "ddk750_reg.h"
5#include "ddk750_dvi.h" 5#include "ddk750_dvi.h"
6#include "ddk750_sii164.h" 6#include "ddk750_sii164.h"
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
deleted file mode 100644
index 9637dd30d037..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ /dev/null
@@ -1,17 +0,0 @@
1#include "ddk750_help.h"
2
3void __iomem *mmio750;
4char revId750;
5unsigned short devId750;
6
7/* after driver mapped io registers, use this function first */
8void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
9{
10 mmio750 = addr;
11 devId750 = devId;
12 revId750 = revId;
13 if (revId == 0xfe)
14 printk("found sm750le\n");
15}
16
17
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
deleted file mode 100644
index 009db9213a73..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef DDK750_HELP_H__
2#define DDK750_HELP_H__
3#include "ddk750_chip.h"
4#ifndef USE_INTERNAL_REGISTER_ACCESS
5
6#include <linux/ioport.h>
7#include <linux/io.h>
8#include <linux/uaccess.h>
9
10/* software control endianness */
11#define PEEK32(addr) readl(addr + mmio750)
12#define POKE32(addr, data) writel(data, addr + mmio750)
13
14extern void __iomem *mmio750;
15extern char revId750;
16extern unsigned short devId750;
17#else
18/* implement if you want use it*/
19#endif
20
21#endif
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index d391c127ead7..05d4a73aa1d4 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -1,6 +1,6 @@
1#define USE_HW_I2C 1#define USE_HW_I2C
2#ifdef USE_HW_I2C 2#ifdef USE_HW_I2C
3#include "ddk750_help.h" 3#include "ddk750_chip.h"
4#include "ddk750_reg.h" 4#include "ddk750_reg.h"
5#include "ddk750_hwi2c.h" 5#include "ddk750_hwi2c.h"
6#include "ddk750_power.h" 6#include "ddk750_power.h"
@@ -20,10 +20,11 @@ unsigned char bus_speed_mode
20 value |= (GPIO_MUX_30 | GPIO_MUX_31); 20 value |= (GPIO_MUX_30 | GPIO_MUX_31);
21 POKE32(GPIO_MUX, value); 21 POKE32(GPIO_MUX, value);
22 22
23 /* Enable Hardware I2C power. 23 /*
24 * Enable Hardware I2C power.
24 * TODO: Check if we need to enable GPIO power? 25 * TODO: Check if we need to enable GPIO power?
25 */ 26 */
26 enableI2C(1); 27 sm750_enable_i2c(1);
27 28
28 /* Enable the I2C Controller and set the bus speed mode */ 29 /* Enable the I2C Controller and set the bus speed mode */
29 value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN); 30 value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
@@ -44,7 +45,7 @@ void sm750_hw_i2c_close(void)
44 POKE32(I2C_CTRL, value); 45 POKE32(I2C_CTRL, value);
45 46
46 /* Disable I2C Power */ 47 /* Disable I2C Power */
47 enableI2C(0); 48 sm750_enable_i2c(0);
48 49
49 /* Set GPIO 30 & 31 back as GPIO pins */ 50 /* Set GPIO 30 & 31 back as GPIO pins */
50 value = PEEK32(GPIO_MUX); 51 value = PEEK32(GPIO_MUX);
@@ -92,7 +93,8 @@ static unsigned int hw_i2c_write_data(
92 /* Set the Device Address */ 93 /* Set the Device Address */
93 POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01); 94 POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01);
94 95
95 /* Write data. 96 /*
97 * Write data.
96 * Note: 98 * Note:
97 * Only 16 byte can be accessed per i2c start instruction. 99 * Only 16 byte can be accessed per i2c start instruction.
98 */ 100 */
@@ -158,7 +160,8 @@ static unsigned int hw_i2c_read_data(
158 /* Set the Device Address */ 160 /* Set the Device Address */
159 POKE32(I2C_SLAVE_ADDRESS, addr | 0x01); 161 POKE32(I2C_SLAVE_ADDRESS, addr | 0x01);
160 162
161 /* Read data and save them to the buffer. 163 /*
164 * Read data and save them to the buffer.
162 * Note: 165 * Note:
163 * Only 16 byte can be accessed per i2c start instruction. 166 * Only 16 byte can be accessed per i2c start instruction.
164 */ 167 */
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 05b83646c2d5..4a4b1de97a87 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -1,10 +1,10 @@
1 1
2#include "ddk750_help.h"
3#include "ddk750_reg.h" 2#include "ddk750_reg.h"
4#include "ddk750_mode.h" 3#include "ddk750_mode.h"
5#include "ddk750_chip.h" 4#include "ddk750_chip.h"
6 5
7/* SM750LE only: 6/*
7 * SM750LE only:
8 * This function takes care extra registers and bit fields required to set 8 * This function takes care extra registers and bit fields required to set
9 * up a mode in SM750LE 9 * up a mode in SM750LE
10 * 10 *
@@ -19,7 +19,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
19 x = pModeParam->horizontal_display_end; 19 x = pModeParam->horizontal_display_end;
20 y = pModeParam->vertical_display_end; 20 y = pModeParam->vertical_display_end;
21 21
22 /* SM750LE has to set up the top-left and bottom-right 22 /*
23 * SM750LE has to set up the top-left and bottom-right
23 * registers as well. 24 * registers as well.
24 * Note that normal SM750/SM718 only use those two register for 25 * Note that normal SM750/SM718 only use those two register for
25 * auto-centering mode. 26 * auto-centering mode.
@@ -31,7 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
31 CRT_AUTO_CENTERING_BR_BOTTOM_MASK) | 32 CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
32 ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK)); 33 ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
33 34
34 /* Assume common fields in dispControl have been properly set before 35 /*
36 * Assume common fields in dispControl have been properly set before
35 * calling this function. 37 * calling this function.
36 * This function only sets the extra fields in dispControl. 38 * This function only sets the extra fields in dispControl.
37 */ 39 */
@@ -72,7 +74,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
72 74
73 75
74/* only timing related registers will be programed */ 76/* only timing related registers will be programed */
75static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) 77static int programModeRegisters(mode_parameter_t *pModeParam,
78 struct pll_value *pll)
76{ 79{
77 int ret = 0; 80 int ret = 0;
78 int cnt = 0; 81 int cnt = 0;
@@ -80,7 +83,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
80 83
81 if (pll->clockType == SECONDARY_PLL) { 84 if (pll->clockType == SECONDARY_PLL) {
82 /* programe secondary pixel clock */ 85 /* programe secondary pixel clock */
83 POKE32(CRT_PLL_CTRL, formatPllReg(pll)); 86 POKE32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
84 POKE32(CRT_HORIZONTAL_TOTAL, 87 POKE32(CRT_HORIZONTAL_TOTAL,
85 (((pModeParam->horizontal_total - 1) << 88 (((pModeParam->horizontal_total - 1) <<
86 CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) & 89 CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -130,7 +133,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
130 } else if (pll->clockType == PRIMARY_PLL) { 133 } else if (pll->clockType == PRIMARY_PLL) {
131 unsigned int reserved; 134 unsigned int reserved;
132 135
133 POKE32(PANEL_PLL_CTRL, formatPllReg(pll)); 136 POKE32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
134 137
135 reg = ((pModeParam->horizontal_total - 1) << 138 reg = ((pModeParam->horizontal_total - 1) <<
136 PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) & 139 PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -176,14 +179,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
176 DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING | 179 DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
177 DISPLAY_CTRL_PLANE); 180 DISPLAY_CTRL_PLANE);
178 181
179 /* May a hardware bug or just my test chip (not confirmed). 182 /*
180 * PANEL_DISPLAY_CTRL register seems requiring few writes 183 * May a hardware bug or just my test chip (not confirmed).
181 * before a value can be successfully written in. 184 * PANEL_DISPLAY_CTRL register seems requiring few writes
182 * Added some masks to mask out the reserved bits. 185 * before a value can be successfully written in.
183 * Note: This problem happens by design. The hardware will wait for the 186 * Added some masks to mask out the reserved bits.
184 * next vertical sync to turn on/off the plane. 187 * Note: This problem happens by design. The hardware will wait
185 */ 188 * for the next vertical sync to turn on/off the plane.
186 189 */
187 POKE32(PANEL_DISPLAY_CTRL, tmp | reg); 190 POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
188 191
189 while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) != 192 while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
@@ -201,13 +204,13 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
201 204
202int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock) 205int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
203{ 206{
204 pll_value_t pll; 207 struct pll_value pll;
205 unsigned int uiActualPixelClk; 208 unsigned int uiActualPixelClk;
206 209
207 pll.inputFreq = DEFAULT_INPUT_CLOCK; 210 pll.inputFreq = DEFAULT_INPUT_CLOCK;
208 pll.clockType = clock; 211 pll.clockType = clock;
209 212
210 uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll); 213 uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
211 if (sm750_get_chip_type() == SM750LE) { 214 if (sm750_get_chip_type() == SM750LE) {
212 /* set graphic mode via IO method */ 215 /* set graphic mode via IO method */
213 outb_p(0x88, 0x3d4); 216 outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 7cc6169f884e..6167e30e8e01 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -1,8 +1,8 @@
1#include "ddk750_help.h" 1#include "ddk750_chip.h"
2#include "ddk750_reg.h" 2#include "ddk750_reg.h"
3#include "ddk750_power.h" 3#include "ddk750_power.h"
4 4
5void ddk750_setDPMS(DPMS_t state) 5void ddk750_set_dpms(DPMS_t state)
6{ 6{
7 unsigned int value; 7 unsigned int value;
8 8
@@ -17,7 +17,7 @@ void ddk750_setDPMS(DPMS_t state)
17 } 17 }
18} 18}
19 19
20static unsigned int getPowerMode(void) 20static unsigned int get_power_mode(void)
21{ 21{
22 if (sm750_get_chip_type() == SM750LE) 22 if (sm750_get_chip_type() == SM750LE)
23 return 0; 23 return 0;
@@ -29,26 +29,26 @@ static unsigned int getPowerMode(void)
29 * SM50x can operate in one of three modes: 0, 1 or Sleep. 29 * SM50x can operate in one of three modes: 0, 1 or Sleep.
30 * On hardware reset, power mode 0 is default. 30 * On hardware reset, power mode 0 is default.
31 */ 31 */
32void setPowerMode(unsigned int powerMode) 32void sm750_set_power_mode(unsigned int mode)
33{ 33{
34 unsigned int control_value = 0; 34 unsigned int ctrl = 0;
35 35
36 control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK; 36 ctrl = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
37 37
38 if (sm750_get_chip_type() == SM750LE) 38 if (sm750_get_chip_type() == SM750LE)
39 return; 39 return;
40 40
41 switch (powerMode) { 41 switch (mode) {
42 case POWER_MODE_CTRL_MODE_MODE0: 42 case POWER_MODE_CTRL_MODE_MODE0:
43 control_value |= POWER_MODE_CTRL_MODE_MODE0; 43 ctrl |= POWER_MODE_CTRL_MODE_MODE0;
44 break; 44 break;
45 45
46 case POWER_MODE_CTRL_MODE_MODE1: 46 case POWER_MODE_CTRL_MODE_MODE1:
47 control_value |= POWER_MODE_CTRL_MODE_MODE1; 47 ctrl |= POWER_MODE_CTRL_MODE_MODE1;
48 break; 48 break;
49 49
50 case POWER_MODE_CTRL_MODE_SLEEP: 50 case POWER_MODE_CTRL_MODE_SLEEP:
51 control_value |= POWER_MODE_CTRL_MODE_SLEEP; 51 ctrl |= POWER_MODE_CTRL_MODE_SLEEP;
52 break; 52 break;
53 53
54 default: 54 default:
@@ -56,44 +56,28 @@ void setPowerMode(unsigned int powerMode)
56 } 56 }
57 57
58 /* Set up other fields in Power Control Register */ 58 /* Set up other fields in Power Control Register */
59 if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) { 59 if (mode == POWER_MODE_CTRL_MODE_SLEEP) {
60 control_value &= ~POWER_MODE_CTRL_OSC_INPUT; 60 ctrl &= ~POWER_MODE_CTRL_OSC_INPUT;
61#ifdef VALIDATION_CHIP 61#ifdef VALIDATION_CHIP
62 control_value &= ~POWER_MODE_CTRL_336CLK; 62 ctrl &= ~POWER_MODE_CTRL_336CLK;
63#endif 63#endif
64 } else { 64 } else {
65 control_value |= POWER_MODE_CTRL_OSC_INPUT; 65 ctrl |= POWER_MODE_CTRL_OSC_INPUT;
66#ifdef VALIDATION_CHIP 66#ifdef VALIDATION_CHIP
67 control_value |= POWER_MODE_CTRL_336CLK; 67 ctrl |= POWER_MODE_CTRL_336CLK;
68#endif 68#endif
69 } 69 }
70 70
71 /* Program new power mode. */ 71 /* Program new power mode. */
72 POKE32(POWER_MODE_CTRL, control_value); 72 POKE32(POWER_MODE_CTRL, ctrl);
73} 73}
74 74
75void setCurrentGate(unsigned int gate) 75void sm750_set_current_gate(unsigned int gate)
76{ 76{
77 unsigned int gate_reg; 77 if (get_power_mode() == POWER_MODE_CTRL_MODE_MODE1)
78 unsigned int mode; 78 POKE32(MODE1_GATE, gate);
79 79 else
80 /* Get current power mode. */ 80 POKE32(MODE0_GATE, gate);
81 mode = getPowerMode();
82
83 switch (mode) {
84 case POWER_MODE_CTRL_MODE_MODE0:
85 gate_reg = MODE0_GATE;
86 break;
87
88 case POWER_MODE_CTRL_MODE_MODE1:
89 gate_reg = MODE1_GATE;
90 break;
91
92 default:
93 gate_reg = MODE0_GATE;
94 break;
95 }
96 POKE32(gate_reg, gate);
97} 81}
98 82
99 83
@@ -101,7 +85,7 @@ void setCurrentGate(unsigned int gate)
101/* 85/*
102 * This function enable/disable the 2D engine. 86 * This function enable/disable the 2D engine.
103 */ 87 */
104void enable2DEngine(unsigned int enable) 88void sm750_enable_2d_engine(unsigned int enable)
105{ 89{
106 u32 gate; 90 u32 gate;
107 91
@@ -111,10 +95,10 @@ void enable2DEngine(unsigned int enable)
111 else 95 else
112 gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC); 96 gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
113 97
114 setCurrentGate(gate); 98 sm750_set_current_gate(gate);
115} 99}
116 100
117void enableDMA(unsigned int enable) 101void sm750_enable_dma(unsigned int enable)
118{ 102{
119 u32 gate; 103 u32 gate;
120 104
@@ -125,13 +109,13 @@ void enableDMA(unsigned int enable)
125 else 109 else
126 gate &= ~CURRENT_GATE_DMA; 110 gate &= ~CURRENT_GATE_DMA;
127 111
128 setCurrentGate(gate); 112 sm750_set_current_gate(gate);
129} 113}
130 114
131/* 115/*
132 * This function enable/disable the GPIO Engine 116 * This function enable/disable the GPIO Engine
133 */ 117 */
134void enableGPIO(unsigned int enable) 118void sm750_enable_gpio(unsigned int enable)
135{ 119{
136 u32 gate; 120 u32 gate;
137 121
@@ -142,13 +126,13 @@ void enableGPIO(unsigned int enable)
142 else 126 else
143 gate &= ~CURRENT_GATE_GPIO; 127 gate &= ~CURRENT_GATE_GPIO;
144 128
145 setCurrentGate(gate); 129 sm750_set_current_gate(gate);
146} 130}
147 131
148/* 132/*
149 * This function enable/disable the I2C Engine 133 * This function enable/disable the I2C Engine
150 */ 134 */
151void enableI2C(unsigned int enable) 135void sm750_enable_i2c(unsigned int enable)
152{ 136{
153 u32 gate; 137 u32 gate;
154 138
@@ -159,7 +143,7 @@ void enableI2C(unsigned int enable)
159 else 143 else
160 gate &= ~CURRENT_GATE_I2C; 144 gate &= ~CURRENT_GATE_I2C;
161 145
162 setCurrentGate(gate); 146 sm750_set_current_gate(gate);
163} 147}
164 148
165 149
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 5963691f9a68..eb088b0d805f 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -14,37 +14,29 @@ DPMS_t;
14 (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \ 14 (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
15} 15}
16 16
17void ddk750_setDPMS(DPMS_t); 17void ddk750_set_dpms(DPMS_t);
18 18void sm750_set_power_mode(unsigned int powerMode);
19/* 19void sm750_set_current_gate(unsigned int gate);
20 * This function sets the current power mode
21 */
22void setPowerMode(unsigned int powerMode);
23
24/*
25 * This function sets current gate
26 */
27void setCurrentGate(unsigned int gate);
28 20
29/* 21/*
30 * This function enable/disable the 2D engine. 22 * This function enable/disable the 2D engine.
31 */ 23 */
32void enable2DEngine(unsigned int enable); 24void sm750_enable_2d_engine(unsigned int enable);
33 25
34/* 26/*
35 * This function enable/disable the DMA Engine 27 * This function enable/disable the DMA Engine
36 */ 28 */
37void enableDMA(unsigned int enable); 29void sm750_enable_dma(unsigned int enable);
38 30
39/* 31/*
40 * This function enable/disable the GPIO Engine 32 * This function enable/disable the GPIO Engine
41 */ 33 */
42void enableGPIO(unsigned int enable); 34void sm750_enable_gpio(unsigned int enable);
43 35
44/* 36/*
45 * This function enable/disable the I2C Engine 37 * This function enable/disable the I2C Engine
46 */ 38 */
47void enableI2C(unsigned int enable); 39void sm750_enable_i2c(unsigned int enable);
48 40
49 41
50#endif 42#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 99a8683e6383..259006ace219 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -173,7 +173,8 @@ long sii164InitChip(
173 173
174 i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config); 174 i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
175 175
176 /* De-skew enabled with default 111b value. 176 /*
177 * De-skew enabled with default 111b value.
177 * This fixes some artifacts problem in some mode on board 2.2. 178 * This fixes some artifacts problem in some mode on board 2.2.
178 * Somehow this fix does not affect board 2.1. 179 * Somehow this fix does not affect board 2.1.
179 */ 180 */
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 72a42330e7a1..b8a4e44359af 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -1,21 +1,20 @@
1/******************************************************************* 1/*
2* 2 * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
3* Copyright (c) 2007 by Silicon Motion, Inc. (SMI) 3 *
4* 4 * All rights are reserved. Reproduction or in part is prohibited
5* All rights are reserved. Reproduction or in part is prohibited 5 * without the written consent of the copyright owner.
6* without the written consent of the copyright owner. 6 *
7* 7 * swi2c.c --- SM750/SM718 DDK
8* swi2c.c --- SM750/SM718 DDK 8 * This file contains the source code for I2C using software
9* This file contains the source code for I2C using software 9 * implementation.
10* implementation. 10 */
11* 11
12*******************************************************************/ 12#include "ddk750_chip.h"
13#include "ddk750_help.h"
14#include "ddk750_reg.h" 13#include "ddk750_reg.h"
15#include "ddk750_swi2c.h" 14#include "ddk750_swi2c.h"
16#include "ddk750_power.h" 15#include "ddk750_power.h"
17 16
18/******************************************************************* 17/*
19 * I2C Software Master Driver: 18 * I2C Software Master Driver:
20 * =========================== 19 * ===========================
21 * Each i2c cycle is split into 4 sections. Each of these section marks 20 * Each i2c cycle is split into 4 sections. Each of these section marks
@@ -51,7 +50,7 @@
51 * SCL | L | | H | | 50 * SCL | L | | H | |
52 * ---------------+---+---+---+---+ 51 * ---------------+---+---+---+---+
53 * 52 *
54 ******************************************************************/ 53 */
55 54
56/* GPIO pins used for this I2C. It ranges from 0 to 63. */ 55/* GPIO pins used for this I2C. It ranges from 0 to 63. */
57static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL; 56static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL;
@@ -429,7 +428,7 @@ long sm750_sw_i2c_init(
429 PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio)); 428 PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio));
430 429
431 /* Enable GPIO power */ 430 /* Enable GPIO power */
432 enableGPIO(1); 431 sm750_enable_gpio(1);
433 432
434 /* Clear the i2c lines. */ 433 /* Clear the i2c lines. */
435 for (i = 0; i < 9; i++) 434 for (i = 0; i < 9; i++)
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index b53629cda095..5a9466efc7bd 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -1,15 +1,15 @@
1/******************************************************************* 1/*
2* 2 * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
3* Copyright (c) 2007 by Silicon Motion, Inc. (SMI) 3 *
4* 4 * All rights are reserved. Reproduction or in part is prohibited
5* All rights are reserved. Reproduction or in part is prohibited 5 * without the written consent of the copyright owner.
6* without the written consent of the copyright owner. 6 *
7* 7 * swi2c.h --- SM750/SM718 DDK
8* swi2c.h --- SM750/SM718 DDK 8 * This file contains the definitions for i2c using software
9* This file contains the definitions for i2c using software 9 * implementation.
10* implementation. 10 *
11* 11 */
12*******************************************************************/ 12
13#ifndef _SWI2C_H_ 13#ifndef _SWI2C_H_
14#define _SWI2C_H_ 14#define _SWI2C_H_
15 15
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e250142c..e9632f162f99 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -118,14 +118,14 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
118 return -ENXIO; 118 return -ENXIO;
119 } 119 }
120 120
121 hw_cursor_disable(cursor); 121 sm750_hw_cursor_disable(cursor);
122 if (fbcursor->set & FB_CUR_SETSIZE) 122 if (fbcursor->set & FB_CUR_SETSIZE)
123 hw_cursor_setSize(cursor, 123 sm750_hw_cursor_setSize(cursor,
124 fbcursor->image.width, 124 fbcursor->image.width,
125 fbcursor->image.height); 125 fbcursor->image.height);
126 126
127 if (fbcursor->set & FB_CUR_SETPOS) 127 if (fbcursor->set & FB_CUR_SETPOS)
128 hw_cursor_setPos(cursor, 128 sm750_hw_cursor_setPos(cursor,
129 fbcursor->image.dx - info->var.xoffset, 129 fbcursor->image.dx - info->var.xoffset,
130 fbcursor->image.dy - info->var.yoffset); 130 fbcursor->image.dy - info->var.yoffset);
131 131
@@ -141,18 +141,18 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
141 ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) | 141 ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
142 ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11); 142 ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
143 143
144 hw_cursor_setColor(cursor, fg, bg); 144 sm750_hw_cursor_setColor(cursor, fg, bg);
145 } 145 }
146 146
147 if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { 147 if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
148 hw_cursor_setData(cursor, 148 sm750_hw_cursor_setData(cursor,
149 fbcursor->rop, 149 fbcursor->rop,
150 fbcursor->image.data, 150 fbcursor->image.data,
151 fbcursor->mask); 151 fbcursor->mask);
152 } 152 }
153 153
154 if (fbcursor->enable) 154 if (fbcursor->enable)
155 hw_cursor_enable(cursor); 155 sm750_hw_cursor_enable(cursor);
156 156
157 return 0; 157 return 0;
158} 158}
@@ -575,11 +575,11 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
575 return hw_sm750_crtc_checkMode(crtc, var); 575 return hw_sm750_crtc_checkMode(crtc, var);
576} 576}
577 577
578static int lynxfb_ops_setcolreg(unsigned regno, 578static int lynxfb_ops_setcolreg(unsigned int regno,
579 unsigned red, 579 unsigned int red,
580 unsigned green, 580 unsigned int green,
581 unsigned blue, 581 unsigned int blue,
582 unsigned transp, 582 unsigned int transp,
583 struct fb_info *info) 583 struct fb_info *info)
584{ 584{
585 struct lynxfb_par *par; 585 struct lynxfb_par *par;
@@ -788,7 +788,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
788 memset_io(crtc->cursor.vstart, 0, crtc->cursor.size); 788 memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
789 if (!g_hwcursor) { 789 if (!g_hwcursor) {
790 lynxfb_ops.fb_cursor = NULL; 790 lynxfb_ops.fb_cursor = NULL;
791 hw_cursor_disable(&crtc->cursor); 791 sm750_hw_cursor_disable(&crtc->cursor);
792 } 792 }
793 793
794 /* set info->fbops, must be set before fb_find_mode */ 794 /* set info->fbops, must be set before fb_find_mode */
@@ -947,13 +947,13 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
947 g_hwcursor = 3; 947 g_hwcursor = 3;
948 948
949 if (!src || !*src) { 949 if (!src || !*src) {
950 pr_warn("no specific g_option.\n"); 950 dev_warn(&sm750_dev->pdev->dev, "no specific g_option.\n");
951 goto NO_PARAM; 951 goto NO_PARAM;
952 } 952 }
953 953
954 while ((opt = strsep(&src, ":")) != NULL && *opt != 0) { 954 while ((opt = strsep(&src, ":")) != NULL && *opt != 0) {
955 pr_info("opt=%s\n", opt); 955 dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt);
956 pr_info("src=%s\n", src); 956 dev_info(&sm750_dev->pdev->dev, "src=%s\n", src);
957 957
958 if (!strncmp(opt, "swap", strlen("swap"))) 958 if (!strncmp(opt, "swap", strlen("swap")))
959 swap = 1; 959 swap = 1;
@@ -974,12 +974,12 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
974 else { 974 else {
975 if (!g_fbmode[0]) { 975 if (!g_fbmode[0]) {
976 g_fbmode[0] = opt; 976 g_fbmode[0] = opt;
977 pr_info("find fbmode0 : %s\n", g_fbmode[0]); 977 dev_info(&sm750_dev->pdev->dev, "find fbmode0 : %s\n", g_fbmode[0]);
978 } else if (!g_fbmode[1]) { 978 } else if (!g_fbmode[1]) {
979 g_fbmode[1] = opt; 979 g_fbmode[1] = opt;
980 pr_info("find fbmode1 : %s\n", g_fbmode[1]); 980 dev_info(&sm750_dev->pdev->dev, "find fbmode1 : %s\n", g_fbmode[1]);
981 } else { 981 } else {
982 pr_warn("How many view you wann set?\n"); 982 dev_warn(&sm750_dev->pdev->dev, "How many view you wann set?\n");
983 } 983 }
984 } 984 }
985 } 985 }
@@ -1083,10 +1083,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
1083 * if some chip need specific function, 1083 * if some chip need specific function,
1084 * please hook it in smXXX_set_drv routine 1084 * please hook it in smXXX_set_drv routine
1085 */ 1085 */
1086 sm750_dev->accel.de_init = hw_de_init; 1086 sm750_dev->accel.de_init = sm750_hw_de_init;
1087 sm750_dev->accel.de_fillrect = hw_fillrect; 1087 sm750_dev->accel.de_fillrect = sm750_hw_fillrect;
1088 sm750_dev->accel.de_copyarea = hw_copyarea; 1088 sm750_dev->accel.de_copyarea = sm750_hw_copyarea;
1089 sm750_dev->accel.de_imageblit = hw_imageblit; 1089 sm750_dev->accel.de_imageblit = sm750_hw_imageblit;
1090 } 1090 }
1091 1091
1092 /* call chip specific setup routine */ 1092 /* call chip specific setup routine */
@@ -1188,7 +1188,7 @@ static int __init lynxfb_setup(char *options)
1188 return 0; 1188 return 0;
1189} 1189}
1190 1190
1191static struct pci_device_id smi_pci_table[] = { 1191static const struct pci_device_id smi_pci_table[] = {
1192 { PCI_DEVICE(0x126f, 0x0750), }, 1192 { PCI_DEVICE(0x126f, 0x0750), },
1193 {0,} 1193 {0,}
1194}; 1194};
@@ -1209,7 +1209,6 @@ static struct pci_driver lynxfb_driver = {
1209static int __init lynxfb_init(void) 1209static int __init lynxfb_init(void)
1210{ 1210{
1211 char *option; 1211 char *option;
1212 int ret;
1213 1212
1214#ifdef MODULE 1213#ifdef MODULE
1215 option = g_option; 1214 option = g_option;
@@ -1219,8 +1218,7 @@ static int __init lynxfb_init(void)
1219#endif 1218#endif
1220 1219
1221 lynxfb_setup(option); 1220 lynxfb_setup(option);
1222 ret = pci_register_driver(&lynxfb_driver); 1221 return pci_register_driver(&lynxfb_driver);
1223 return ret;
1224} 1222}
1225module_init(lynxfb_init); 1223module_init(lynxfb_init);
1226 1224
@@ -1245,4 +1243,4 @@ MODULE_PARM_DESC(g_option,
1245MODULE_AUTHOR("monk liu <monk.liu@siliconmotion.com>"); 1243MODULE_AUTHOR("monk liu <monk.liu@siliconmotion.com>");
1246MODULE_AUTHOR("Sudip Mukherjee <sudip@vectorindia.org>"); 1244MODULE_AUTHOR("Sudip Mukherjee <sudip@vectorindia.org>");
1247MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset"); 1245MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset");
1248MODULE_LICENSE("GPL v2"); 1246MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ff31c5c9cc6f..28f4b9b4f95f 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -146,14 +146,16 @@ struct lynxfb_crtc {
146struct lynxfb_output { 146struct lynxfb_output {
147 int dpms; 147 int dpms;
148 int paths; 148 int paths;
149 /* which paths(s) this output stands for,for sm750: 149 /*
150 * which paths(s) this output stands for,for sm750:
150 * paths=1:means output for panel paths 151 * paths=1:means output for panel paths
151 * paths=2:means output for crt paths 152 * paths=2:means output for crt paths
152 * paths=3:means output for both panel and crt paths 153 * paths=3:means output for both panel and crt paths
153 */ 154 */
154 155
155 int *channel; 156 int *channel;
156 /* which channel these outputs linked with,for sm750: 157 /*
158 * which channel these outputs linked with,for sm750:
157 * *channel=0 means primary channel 159 * *channel=0 means primary channel
158 * *channel=1 means secondary channel 160 * *channel=1 means secondary channel
159 * output->channel ==> &crtc->channel 161 * output->channel ==> &crtc->channel
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 38adae6b5d83..af0db5789c53 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -32,7 +32,7 @@ static inline void write_dpPort(struct lynx_accel *accel, u32 data)
32 writel(data, accel->dpPortBase); 32 writel(data, accel->dpPortBase);
33} 33}
34 34
35void hw_de_init(struct lynx_accel *accel) 35void sm750_hw_de_init(struct lynx_accel *accel)
36{ 36{
37 /* setup 2d engine registers */ 37 /* setup 2d engine registers */
38 u32 reg, clr; 38 u32 reg, clr;
@@ -65,12 +65,13 @@ void hw_de_init(struct lynx_accel *accel)
65 write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr); 65 write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
66} 66}
67 67
68/* set2dformat only be called from setmode functions 68/*
69 * set2dformat only be called from setmode functions
69 * but if you need dual framebuffer driver,need call set2dformat 70 * but if you need dual framebuffer driver,need call set2dformat
70 * every time you use 2d function 71 * every time you use 2d function
71 */ 72 */
72 73
73void hw_set2dformat(struct lynx_accel *accel, int fmt) 74void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt)
74{ 75{
75 u32 reg; 76 u32 reg;
76 77
@@ -82,7 +83,7 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt)
82 write_dpr(accel, DE_STRETCH_FORMAT, reg); 83 write_dpr(accel, DE_STRETCH_FORMAT, reg);
83} 84}
84 85
85int hw_fillrect(struct lynx_accel *accel, 86int sm750_hw_fillrect(struct lynx_accel *accel,
86 u32 base, u32 pitch, u32 Bpp, 87 u32 base, u32 pitch, u32 Bpp,
87 u32 x, u32 y, u32 width, u32 height, 88 u32 x, u32 y, u32 width, u32 height,
88 u32 color, u32 rop) 89 u32 color, u32 rop)
@@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel,
90 u32 deCtrl; 91 u32 deCtrl;
91 92
92 if (accel->de_wait() != 0) { 93 if (accel->de_wait() != 0) {
93 /* int time wait and always busy,seems hardware 94 /*
95 * int time wait and always busy,seems hardware
94 * got something error 96 * got something error
95 */ 97 */
96 pr_debug("De engine always busy\n"); 98 pr_debug("De engine always busy\n");
@@ -126,7 +128,7 @@ int hw_fillrect(struct lynx_accel *accel,
126 return 0; 128 return 0;
127} 129}
128 130
129int hw_copyarea( 131int sm750_hw_copyarea(
130struct lynx_accel *accel, 132struct lynx_accel *accel,
131unsigned int sBase, /* Address of source: offset in frame buffer */ 133unsigned int sBase, /* Address of source: offset in frame buffer */
132unsigned int sPitch, /* Pitch value of source surface in BYTE */ 134unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -213,25 +215,29 @@ unsigned int rop2) /* ROP value */
213 opSign = (-1); 215 opSign = (-1);
214 } 216 }
215 217
216 /* Note: 218 /*
219 * Note:
217 * DE_FOREGROUND are DE_BACKGROUND are don't care. 220 * DE_FOREGROUND are DE_BACKGROUND are don't care.
218 * DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS 221 * DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
219 * are set by set deSetTransparency(). 222 * are set by set deSetTransparency().
220 */ 223 */
221 224
222 /* 2D Source Base. 225 /*
226 * 2D Source Base.
223 * It is an address offset (128 bit aligned) 227 * It is an address offset (128 bit aligned)
224 * from the beginning of frame buffer. 228 * from the beginning of frame buffer.
225 */ 229 */
226 write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */ 230 write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
227 231
228 /* 2D Destination Base. 232 /*
233 * 2D Destination Base.
229 * It is an address offset (128 bit aligned) 234 * It is an address offset (128 bit aligned)
230 * from the beginning of frame buffer. 235 * from the beginning of frame buffer.
231 */ 236 */
232 write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */ 237 write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
233 238
234 /* Program pitch (distance between the 1st points of two adjacent lines). 239 /*
240 * Program pitch (distance between the 1st points of two adjacent lines).
235 * Note that input pitch is BYTE value, but the 2D Pitch register uses 241 * Note that input pitch is BYTE value, but the 2D Pitch register uses
236 * pixel values. Need Byte to pixel conversion. 242 * pixel values. Need Byte to pixel conversion.
237 */ 243 */
@@ -240,7 +246,8 @@ unsigned int rop2) /* ROP value */
240 DE_PITCH_DESTINATION_MASK) | 246 DE_PITCH_DESTINATION_MASK) |
241 (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */ 247 (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
242 248
243 /* Screen Window width in Pixels. 249 /*
250 * Screen Window width in Pixels.
244 * 2D engine uses this value to calculate the linear address in frame buffer 251 * 2D engine uses this value to calculate the linear address in frame buffer
245 * for a given point. 252 * for a given point.
246 */ 253 */
@@ -286,7 +293,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
286 return de_ctrl; 293 return de_ctrl;
287} 294}
288 295
289int hw_imageblit(struct lynx_accel *accel, 296int sm750_hw_imageblit(struct lynx_accel *accel,
290 const char *pSrcbuf, /* pointer to start of source buffer in system memory */ 297 const char *pSrcbuf, /* pointer to start of source buffer in system memory */
291 u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */ 298 u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
292 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */ 299 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
@@ -316,7 +323,8 @@ int hw_imageblit(struct lynx_accel *accel,
316 if (accel->de_wait() != 0) 323 if (accel->de_wait() != 0)
317 return -1; 324 return -1;
318 325
319 /* 2D Source Base. 326 /*
327 * 2D Source Base.
320 * Use 0 for HOST Blt. 328 * Use 0 for HOST Blt.
321 */ 329 */
322 write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0); 330 write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
@@ -326,16 +334,19 @@ int hw_imageblit(struct lynx_accel *accel,
326 * from the beginning of frame buffer. 334 * from the beginning of frame buffer.
327 */ 335 */
328 write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); 336 write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
329 /* Program pitch (distance between the 1st points of two adjacent lines). 337
330 * Note that input pitch is BYTE value, but the 2D Pitch register uses 338 /*
331 * pixel values. Need Byte to pixel conversion. 339 * Program pitch (distance between the 1st points of two adjacent
332 */ 340 * lines). Note that input pitch is BYTE value, but the 2D Pitch
341 * register uses pixel values. Need Byte to pixel conversion.
342 */
333 write_dpr(accel, DE_PITCH, 343 write_dpr(accel, DE_PITCH,
334 ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) & 344 ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
335 DE_PITCH_DESTINATION_MASK) | 345 DE_PITCH_DESTINATION_MASK) |
336 (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */ 346 (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
337 347
338 /* Screen Window width in Pixels. 348 /*
349 * Screen Window width in Pixels.
339 * 2D engine uses this value to calculate the linear address 350 * 2D engine uses this value to calculate the linear address
340 * in frame buffer for a given point. 351 * in frame buffer for a given point.
341 */ 352 */
@@ -344,7 +355,8 @@ int hw_imageblit(struct lynx_accel *accel,
344 DE_WINDOW_WIDTH_DST_MASK) | 355 DE_WINDOW_WIDTH_DST_MASK) |
345 (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK)); 356 (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
346 357
347 /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, 358 /*
359 * Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
348 * and Y_K2 field is not used. 360 * and Y_K2 field is not used.
349 * For mono bitmap, use startBit for X_K1. 361 * For mono bitmap, use startBit for X_K1.
350 */ 362 */
@@ -383,6 +395,6 @@ int hw_imageblit(struct lynx_accel *accel,
383 pSrcbuf += srcDelta; 395 pSrcbuf += srcDelta;
384 } 396 }
385 397
386 return 0; 398 return 0;
387} 399}
388 400
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d59d005e0add..4b0ff8feb9a0 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -184,16 +184,16 @@
184#define BOTTOM_TO_TOP 1 184#define BOTTOM_TO_TOP 1
185#define RIGHT_TO_LEFT 1 185#define RIGHT_TO_LEFT 1
186 186
187void hw_set2dformat(struct lynx_accel *accel, int fmt); 187void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
188 188
189void hw_de_init(struct lynx_accel *accel); 189void sm750_hw_de_init(struct lynx_accel *accel);
190 190
191int hw_fillrect(struct lynx_accel *accel, 191int sm750_hw_fillrect(struct lynx_accel *accel,
192 u32 base, u32 pitch, u32 Bpp, 192 u32 base, u32 pitch, u32 Bpp,
193 u32 x, u32 y, u32 width, u32 height, 193 u32 x, u32 y, u32 width, u32 height,
194 u32 color, u32 rop); 194 u32 color, u32 rop);
195 195
196int hw_copyarea( 196int sm750_hw_copyarea(
197struct lynx_accel *accel, 197struct lynx_accel *accel,
198unsigned int sBase, /* Address of source: offset in frame buffer */ 198unsigned int sBase, /* Address of source: offset in frame buffer */
199unsigned int sPitch, /* Pitch value of source surface in BYTE */ 199unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -208,7 +208,7 @@ unsigned int width,
208unsigned int height, /* width and height of rectangle in pixel value */ 208unsigned int height, /* width and height of rectangle in pixel value */
209unsigned int rop2); 209unsigned int rop2);
210 210
211int hw_imageblit(struct lynx_accel *accel, 211int sm750_hw_imageblit(struct lynx_accel *accel,
212 const char *pSrcbuf, /* pointer to start of source buffer in system memory */ 212 const char *pSrcbuf, /* pointer to start of source buffer in system memory */
213 u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */ 213 u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
214 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */ 214 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index d622d65b6cee..2a13353fc492 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -47,25 +47,25 @@ writel((data), cursor->mmio + (addr))
47 47
48 48
49/* hw_cursor_xxx works for voyager,718 and 750 */ 49/* hw_cursor_xxx works for voyager,718 and 750 */
50void hw_cursor_enable(struct lynx_cursor *cursor) 50void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
51{ 51{
52 u32 reg; 52 u32 reg;
53 53
54 reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE; 54 reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
55 POKE32(HWC_ADDRESS, reg); 55 POKE32(HWC_ADDRESS, reg);
56} 56}
57void hw_cursor_disable(struct lynx_cursor *cursor) 57void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
58{ 58{
59 POKE32(HWC_ADDRESS, 0); 59 POKE32(HWC_ADDRESS, 0);
60} 60}
61 61
62void hw_cursor_setSize(struct lynx_cursor *cursor, 62void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
63 int w, int h) 63 int w, int h)
64{ 64{
65 cursor->w = w; 65 cursor->w = w;
66 cursor->h = h; 66 cursor->h = h;
67} 67}
68void hw_cursor_setPos(struct lynx_cursor *cursor, 68void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
69 int x, int y) 69 int x, int y)
70{ 70{
71 u32 reg; 71 u32 reg;
@@ -74,7 +74,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
74 (x & HWC_LOCATION_X_MASK)); 74 (x & HWC_LOCATION_X_MASK));
75 POKE32(HWC_LOCATION, reg); 75 POKE32(HWC_LOCATION, reg);
76} 76}
77void hw_cursor_setColor(struct lynx_cursor *cursor, 77void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
78 u32 fg, u32 bg) 78 u32 fg, u32 bg)
79{ 79{
80 u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) & 80 u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
@@ -84,7 +84,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor,
84 POKE32(HWC_COLOR_3, 0xffe0); 84 POKE32(HWC_COLOR_3, 0xffe0);
85} 85}
86 86
87void hw_cursor_setData(struct lynx_cursor *cursor, 87void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
88 u16 rop, const u8 *pcol, const u8 *pmsk) 88 u16 rop, const u8 *pcol, const u8 *pmsk)
89{ 89{
90 int i, j, count, pitch, offset; 90 int i, j, count, pitch, offset;
@@ -138,7 +138,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
138} 138}
139 139
140 140
141void hw_cursor_setData2(struct lynx_cursor *cursor, 141void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
142 u16 rop, const u8 *pcol, const u8 *pmsk) 142 u16 rop, const u8 *pcol, const u8 *pmsk)
143{ 143{
144 int i, j, count, pitch, offset; 144 int i, j, count, pitch, offset;
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 6c4fc9b73489..c7b86ae235b4 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -2,16 +2,16 @@
2#define LYNX_CURSOR_H__ 2#define LYNX_CURSOR_H__
3 3
4/* hw_cursor_xxx works for voyager,718 and 750 */ 4/* hw_cursor_xxx works for voyager,718 and 750 */
5void hw_cursor_enable(struct lynx_cursor *cursor); 5void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
6void hw_cursor_disable(struct lynx_cursor *cursor); 6void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
7void hw_cursor_setSize(struct lynx_cursor *cursor, 7void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
8 int w, int h); 8 int w, int h);
9void hw_cursor_setPos(struct lynx_cursor *cursor, 9void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
10 int x, int y); 10 int x, int y);
11void hw_cursor_setColor(struct lynx_cursor *cursor, 11void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
12 u32 fg, u32 bg); 12 u32 fg, u32 bg);
13void hw_cursor_setData(struct lynx_cursor *cursor, 13void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
14 u16 rop, const u8 *data, const u8 *mask); 14 u16 rop, const u8 *data, const u8 *mask);
15void hw_cursor_setData2(struct lynx_cursor *cursor, 15void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
16 u16 rop, const u8 *data, const u8 *mask); 16 u16 rop, const u8 *data, const u8 *mask);
17#endif 17#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 7dd208caa5eb..b6af3b53076b 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -23,6 +23,8 @@
23#include "ddk750.h" 23#include "ddk750.h"
24#include "sm750_accel.h" 24#include "sm750_accel.h"
25 25
26void __iomem *mmio750;
27
26int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) 28int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
27{ 29{
28 int ret; 30 int ret;
@@ -34,7 +36,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
34 36
35 pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start); 37 pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
36 38
37 /* reserve the vidreg space of smi adaptor 39 /*
40 * reserve the vidreg space of smi adaptor
38 * if you do this, you need to add release region code 41 * if you do this, you need to add release region code
39 * in lynxfb_remove, or memory will not be mapped again 42 * in lynxfb_remove, or memory will not be mapped again
40 * successfully 43 * successfully
@@ -59,15 +62,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
59 sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1; 62 sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
60 sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1; 63 sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
61 64
62 ddk750_set_mmio(sm750_dev->pvReg, sm750_dev->devid, sm750_dev->revid); 65 mmio750 = sm750_dev->pvReg;
66 sm750_set_chip_type(sm750_dev->devid, sm750_dev->revid);
63 67
64 sm750_dev->vidmem_start = pci_resource_start(pdev, 0); 68 sm750_dev->vidmem_start = pci_resource_start(pdev, 0);
65 /* don't use pdev_resource[x].end - resource[x].start to 69 /*
70 * don't use pdev_resource[x].end - resource[x].start to
66 * calculate the resource size, it's only the maximum available 71 * calculate the resource size, it's only the maximum available
67 * size but not the actual size, using 72 * size but not the actual size, using
68 * @ddk750_getVMSize function can be safe. 73 * @ddk750_get_vm_size function can be safe.
69 */ 74 */
70 sm750_dev->vidmem_size = ddk750_getVMSize(); 75 sm750_dev->vidmem_size = ddk750_get_vm_size();
71 pr_info("video memory phyAddr = %lx, size = %u bytes\n", 76 pr_info("video memory phyAddr = %lx, size = %u bytes\n",
72 sm750_dev->vidmem_start, sm750_dev->vidmem_size); 77 sm750_dev->vidmem_start, sm750_dev->vidmem_size);
73 78
@@ -100,7 +105,7 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
100 if (parm->master_clk == 0) 105 if (parm->master_clk == 0)
101 parm->master_clk = parm->chip_clk / 3; 106 parm->master_clk = parm->chip_clk / 3;
102 107
103 ddk750_initHw((initchip_param_t *)&sm750_dev->initParm); 108 ddk750_init_hw((struct initchip_param *)&sm750_dev->initParm);
104 /* for sm718, open pci burst */ 109 /* for sm718, open pci burst */
105 if (sm750_dev->devid == 0x718) { 110 if (sm750_dev->devid == 0x718) {
106 POKE32(SYSTEM_CTRL, 111 POKE32(SYSTEM_CTRL,
@@ -141,7 +146,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
141 } 146 }
142 POKE32(PANEL_DISPLAY_CTRL, val); 147 POKE32(PANEL_DISPLAY_CTRL, val);
143 } else { 148 } else {
144 /* for 750LE, no DVI chip initialization 149 /*
150 * for 750LE, no DVI chip initialization
145 * makes Monitor no signal 151 * makes Monitor no signal
146 * 152 *
147 * Set up GPIO for software I2C to program DVI chip in the 153 * Set up GPIO for software I2C to program DVI chip in the
@@ -149,13 +155,15 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
149 */ 155 */
150 sm750_sw_i2c_init(0, 1); 156 sm750_sw_i2c_init(0, 1);
151 157
152 /* Customer may NOT use CH7301 DVI chip, which has to be 158 /*
159 * Customer may NOT use CH7301 DVI chip, which has to be
153 * initialized differently. 160 * initialized differently.
154 */ 161 */
155 if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) { 162 if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
156 /* The following register values for CH7301 are from 163 /*
157 * Chrontel app note and our experiment. 164 * The following register values for CH7301 are from
158 */ 165 * Chrontel app note and our experiment.
166 */
159 pr_info("yes,CH7301 DVI chip found\n"); 167 pr_info("yes,CH7301 DVI chip found\n");
160 sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16); 168 sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
161 sm750_sw_i2c_write_reg(0xec, 0x21, 0x9); 169 sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
@@ -267,7 +275,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
267 fmt = 2; 275 fmt = 2;
268 break; 276 break;
269 } 277 }
270 hw_set2dformat(&sm750_dev->accel, fmt); 278 sm750_hw_set2dformat(&sm750_dev->accel, fmt);
271 } 279 }
272 280
273 /* set timing */ 281 /* set timing */
@@ -308,7 +316,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
308 crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK); 316 crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
309 317
310 reg = var->xres * (var->bits_per_pixel >> 3); 318 reg = var->xres * (var->bits_per_pixel >> 3);
311 /* crtc->channel is not equal to par->index on numeric, 319 /*
320 * crtc->channel is not equal to par->index on numeric,
312 * be aware of that 321 * be aware of that
313 */ 322 */
314 reg = ALIGN(reg, crtc->line_pad); 323 reg = ALIGN(reg, crtc->line_pad);
@@ -342,7 +351,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
342 /* not implemented now */ 351 /* not implemented now */
343 POKE32(CRT_FB_ADDRESS, crtc->oScreen); 352 POKE32(CRT_FB_ADDRESS, crtc->oScreen);
344 reg = var->xres * (var->bits_per_pixel >> 3); 353 reg = var->xres * (var->bits_per_pixel >> 3);
345 /* crtc->channel is not equal to par->index on numeric, 354 /*
355 * crtc->channel is not equal to par->index on numeric,
346 * be aware of that 356 * be aware of that
347 */ 357 */
348 reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT; 358 reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
@@ -469,7 +479,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
469{ 479{
470 u32 reg; 480 u32 reg;
471 481
472 enable2DEngine(1); 482 sm750_enable_2d_engine(1);
473 483
474 if (sm750_get_chip_type() == SM750LE) { 484 if (sm750_get_chip_type() == SM750LE) {
475 reg = PEEK32(DE_STATE1); 485 reg = PEEK32(DE_STATE1);
diff --git a/drivers/staging/speakup/TODO b/drivers/staging/speakup/TODO
index 3094799cf6a0..993410c3e531 100644
--- a/drivers/staging/speakup/TODO
+++ b/drivers/staging/speakup/TODO
@@ -42,6 +42,6 @@ We prefer that you contact us on the mailing list; however, if you do
42not want to subscribe to a mailing list, send your email to all of the 42not want to subscribe to a mailing list, send your email to all of the
43following: 43following:
44 44
45w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@braille.uwo.ca and 45w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@reisers.ca and
46samuel.thibault@ens-lyon.org. 46samuel.thibault@ens-lyon.org.
47 47
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 97ca4ecca8a9..5c192042eeac 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -351,14 +351,14 @@ static void speakup_cut(struct vc_data *vc)
351 351
352 if (!mark_cut_flag) { 352 if (!mark_cut_flag) {
353 mark_cut_flag = 1; 353 mark_cut_flag = 1;
354 spk_xs = (u_short) spk_x; 354 spk_xs = (u_short)spk_x;
355 spk_ys = (u_short) spk_y; 355 spk_ys = (u_short)spk_y;
356 spk_sel_cons = vc; 356 spk_sel_cons = vc;
357 synth_printf("%s\n", spk_msg_get(MSG_MARK)); 357 synth_printf("%s\n", spk_msg_get(MSG_MARK));
358 return; 358 return;
359 } 359 }
360 spk_xe = (u_short) spk_x; 360 spk_xe = (u_short)spk_x;
361 spk_ye = (u_short) spk_y; 361 spk_ye = (u_short)spk_y;
362 mark_cut_flag = 0; 362 mark_cut_flag = 0;
363 synth_printf("%s\n", spk_msg_get(MSG_CUT)); 363 synth_printf("%s\n", spk_msg_get(MSG_CUT));
364 364
@@ -489,7 +489,7 @@ static void say_char(struct vc_data *vc)
489 u_short ch; 489 u_short ch;
490 490
491 spk_old_attr = spk_attr; 491 spk_old_attr = spk_attr;
492 ch = get_char(vc, (u_short *) spk_pos, &spk_attr); 492 ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
493 if (spk_attr != spk_old_attr) { 493 if (spk_attr != spk_old_attr) {
494 if (spk_attrib_bleep & 1) 494 if (spk_attrib_bleep & 1)
495 bleep(spk_y); 495 bleep(spk_y);
@@ -504,7 +504,7 @@ static void say_phonetic_char(struct vc_data *vc)
504 u_short ch; 504 u_short ch;
505 505
506 spk_old_attr = spk_attr; 506 spk_old_attr = spk_attr;
507 ch = get_char(vc, (u_short *) spk_pos, &spk_attr); 507 ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
508 if (isascii(ch) && isalpha(ch)) { 508 if (isascii(ch) && isalpha(ch)) {
509 ch &= 0x1f; 509 ch &= 0x1f;
510 synth_printf("%s\n", phonetic[--ch]); 510 synth_printf("%s\n", phonetic[--ch]);
@@ -556,7 +556,7 @@ static u_long get_word(struct vc_data *vc)
556 u_char temp; 556 u_char temp;
557 557
558 spk_old_attr = spk_attr; 558 spk_old_attr = spk_attr;
559 ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); 559 ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
560 560
561/* decided to take out the sayword if on a space (mis-information */ 561/* decided to take out the sayword if on a space (mis-information */
562 if (spk_say_word_ctl && ch == SPACE) { 562 if (spk_say_word_ctl && ch == SPACE) {
@@ -565,26 +565,26 @@ static u_long get_word(struct vc_data *vc)
565 return 0; 565 return 0;
566 } else if ((tmpx < vc->vc_cols - 2) 566 } else if ((tmpx < vc->vc_cols - 2)
567 && (ch == SPACE || ch == 0 || IS_WDLM(ch)) 567 && (ch == SPACE || ch == 0 || IS_WDLM(ch))
568 && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) > 568 && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) >
569 SPACE)) { 569 SPACE)) {
570 tmp_pos += 2; 570 tmp_pos += 2;
571 tmpx++; 571 tmpx++;
572 } else 572 } else
573 while (tmpx > 0) { 573 while (tmpx > 0) {
574 ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp); 574 ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp);
575 if ((ch == SPACE || ch == 0 || IS_WDLM(ch)) 575 if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
576 && ((char)get_char(vc, (u_short *) tmp_pos, &temp) > 576 && ((char)get_char(vc, (u_short *)tmp_pos, &temp) >
577 SPACE)) 577 SPACE))
578 break; 578 break;
579 tmp_pos -= 2; 579 tmp_pos -= 2;
580 tmpx--; 580 tmpx--;
581 } 581 }
582 attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr); 582 attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
583 buf[cnt++] = attr_ch & 0xff; 583 buf[cnt++] = attr_ch & 0xff;
584 while (tmpx < vc->vc_cols - 1) { 584 while (tmpx < vc->vc_cols - 1) {
585 tmp_pos += 2; 585 tmp_pos += 2;
586 tmpx++; 586 tmpx++;
587 ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); 587 ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
588 if ((ch == SPACE) || ch == 0 588 if ((ch == SPACE) || ch == 0
589 || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE))) 589 || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
590 break; 590 break;
@@ -639,7 +639,7 @@ static void say_prev_word(struct vc_data *vc)
639 } else 639 } else
640 spk_x--; 640 spk_x--;
641 spk_pos -= 2; 641 spk_pos -= 2;
642 ch = (char)get_char(vc, (u_short *) spk_pos, &temp); 642 ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
643 if (ch == SPACE || ch == 0) 643 if (ch == SPACE || ch == 0)
644 state = 0; 644 state = 0;
645 else if (IS_WDLM(ch)) 645 else if (IS_WDLM(ch))
@@ -672,7 +672,7 @@ static void say_next_word(struct vc_data *vc)
672 return; 672 return;
673 } 673 }
674 while (1) { 674 while (1) {
675 ch = (char)get_char(vc, (u_short *) spk_pos, &temp); 675 ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
676 if (ch == SPACE || ch == 0) 676 if (ch == SPACE || ch == 0)
677 state = 0; 677 state = 0;
678 else if (IS_WDLM(ch)) 678 else if (IS_WDLM(ch))
@@ -709,7 +709,7 @@ static void spell_word(struct vc_data *vc)
709 709
710 if (!get_word(vc)) 710 if (!get_word(vc))
711 return; 711 return;
712 while ((ch = (u_char) *cp)) { 712 while ((ch = (u_char)*cp)) {
713 if (cp != buf) 713 if (cp != buf)
714 synth_printf(" %s ", delay_str[spk_spell_delay]); 714 synth_printf(" %s ", delay_str[spk_spell_delay]);
715 if (IS_CHAR(ch, B_CAP)) { 715 if (IS_CHAR(ch, B_CAP)) {
@@ -751,7 +751,7 @@ static int get_line(struct vc_data *vc)
751 spk_old_attr = spk_attr; 751 spk_old_attr = spk_attr;
752 spk_attr = get_attributes(vc, (u_short *)spk_pos); 752 spk_attr = get_attributes(vc, (u_short *)spk_pos);
753 for (i = 0; i < vc->vc_cols; i++) { 753 for (i = 0; i < vc->vc_cols; i++) {
754 buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2); 754 buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2);
755 tmp += 2; 755 tmp += 2;
756 } 756 }
757 for (--i; i >= 0; i--) 757 for (--i; i >= 0; i--)
@@ -816,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
816 spk_old_attr = spk_attr; 816 spk_old_attr = spk_attr;
817 spk_attr = get_attributes(vc, (u_short *)from); 817 spk_attr = get_attributes(vc, (u_short *)from);
818 while (from < to) { 818 while (from < to) {
819 buf[i++] = (char)get_char(vc, (u_short *) from, &tmp); 819 buf[i++] = (char)get_char(vc, (u_short *)from, &tmp);
820 from += 2; 820 from += 2;
821 if (i >= vc->vc_size_row) 821 if (i >= vc->vc_size_row)
822 break; 822 break;
@@ -892,7 +892,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
892 spk_attr = get_attributes(vc, (u_short *)start); 892 spk_attr = get_attributes(vc, (u_short *)start);
893 893
894 while (start < end) { 894 while (start < end) {
895 sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp); 895 sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp);
896 if (i > 0) { 896 if (i > 0) {
897 if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.' 897 if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
898 && numsentences[bn] < 9) { 898 && numsentences[bn] < 9) {
@@ -1040,7 +1040,7 @@ static void say_position(struct vc_data *vc)
1040static void say_char_num(struct vc_data *vc) 1040static void say_char_num(struct vc_data *vc)
1041{ 1041{
1042 u_char tmp; 1042 u_char tmp;
1043 u_short ch = get_char(vc, (u_short *) spk_pos, &tmp); 1043 u_short ch = get_char(vc, (u_short *)spk_pos, &tmp);
1044 1044
1045 ch &= 0xff; 1045 ch &= 0xff;
1046 synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch); 1046 synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
@@ -1085,7 +1085,7 @@ static void spkup_write(const char *in_buf, int count)
1085 (currsentence <= numsentences[bn])) 1085 (currsentence <= numsentences[bn]))
1086 synth_insert_next_index(currsentence++); 1086 synth_insert_next_index(currsentence++);
1087 } 1087 }
1088 ch = (u_char) *in_buf++; 1088 ch = (u_char)*in_buf++;
1089 char_type = spk_chartab[ch]; 1089 char_type = spk_chartab[ch];
1090 if (ch == old_ch && !(char_type & B_NUM)) { 1090 if (ch == old_ch && !(char_type & B_NUM)) {
1091 if (++rep_count > 2) 1091 if (++rep_count > 2)
@@ -1579,7 +1579,7 @@ static int count_highlight_color(struct vc_data *vc)
1579 int cc; 1579 int cc;
1580 int vc_num = vc->vc_num; 1580 int vc_num = vc->vc_num;
1581 u16 ch; 1581 u16 ch;
1582 u16 *start = (u16 *) vc->vc_origin; 1582 u16 *start = (u16 *)vc->vc_origin;
1583 1583
1584 for (i = 0; i < 8; i++) 1584 for (i = 0; i < 8; i++)
1585 speakup_console[vc_num]->ht.bgcount[i] = 0; 1585 speakup_console[vc_num]->ht.bgcount[i] = 0;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0149edc1e0ae..aeb2b865615a 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -137,7 +137,7 @@ static void __speakup_paste_selection(struct work_struct *work)
137 struct speakup_paste_work *spw = 137 struct speakup_paste_work *spw =
138 container_of(work, struct speakup_paste_work, work); 138 container_of(work, struct speakup_paste_work, work);
139 struct tty_struct *tty = xchg(&spw->tty, NULL); 139 struct tty_struct *tty = xchg(&spw->tty, NULL);
140 struct vc_data *vc = (struct vc_data *) tty->driver_data; 140 struct vc_data *vc = (struct vc_data *)tty->driver_data;
141 int pasted = 0, count; 141 int pasted = 0, count;
142 struct tty_ldisc *ld; 142 struct tty_ldisc *ld;
143 DECLARE_WAITQUEUE(wait, current); 143 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index c2c435cc3d63..ef89dc1c21c8 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -99,7 +99,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
99 while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) { 99 while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
100 100
101 c = inb_p(speakup_info.port_tts+UART_RX); 101 c = inb_p(speakup_info.port_tts+UART_RX);
102 synth->read_buff_add((u_char) c); 102 synth->read_buff_add((u_char)c);
103 } 103 }
104 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 104 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
105 return IRQ_HANDLED; 105 return IRQ_HANDLED;
@@ -113,7 +113,7 @@ static void start_serial_interrupt(int irq)
113 return; 113 return;
114 114
115 rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED, 115 rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED,
116 "serial", (void *) synth_readbuf_handler); 116 "serial", (void *)synth_readbuf_handler);
117 117
118 if (rv) 118 if (rv)
119 pr_err("Unable to request Speakup serial I R Q\n"); 119 pr_err("Unable to request Speakup serial I R Q\n");
@@ -141,7 +141,7 @@ void spk_stop_serial_interrupt(void)
141 /* Turn off interrupts */ 141 /* Turn off interrupts */
142 outb(0, speakup_info.port_tts+UART_IER); 142 outb(0, speakup_info.port_tts+UART_IER);
143 /* Free IRQ */ 143 /* Free IRQ */
144 free_irq(serstate->irq, (void *) synth_readbuf_handler); 144 free_irq(serstate->irq, (void *)synth_readbuf_handler);
145} 145}
146 146
147int spk_wait_for_xmitr(void) 147int spk_wait_for_xmitr(void)
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 6b1d0f538bbd..ed3e4282f41c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -20,8 +20,8 @@
20 */ 20 */
21 21
22#include <linux/unistd.h> 22#include <linux/unistd.h>
23#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ 23#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
24#include <linux/poll.h> /* for poll_wait() */ 24#include <linux/poll.h> /* for poll_wait() */
25#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ 25#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
26 26
27#include "spk_priv.h" 27#include "spk_priv.h"
@@ -55,27 +55,26 @@ static struct var_t vars[] = {
55 V_LAST_VAR 55 V_LAST_VAR
56}; 56};
57 57
58/* 58/* These attributes will appear in /sys/accessibility/speakup/soft. */
59 * These attributes will appear in /sys/accessibility/speakup/soft. 59
60 */
61static struct kobj_attribute caps_start_attribute = 60static struct kobj_attribute caps_start_attribute =
62 __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 61 __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
63static struct kobj_attribute caps_stop_attribute = 62static struct kobj_attribute caps_stop_attribute =
64 __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
65static struct kobj_attribute freq_attribute = 64static struct kobj_attribute freq_attribute =
66 __ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(freq, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
67static struct kobj_attribute pitch_attribute = 66static struct kobj_attribute pitch_attribute =
68 __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
69static struct kobj_attribute punct_attribute = 68static struct kobj_attribute punct_attribute =
70 __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
71static struct kobj_attribute rate_attribute = 70static struct kobj_attribute rate_attribute =
72 __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 71 __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
73static struct kobj_attribute tone_attribute = 72static struct kobj_attribute tone_attribute =
74 __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 73 __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
75static struct kobj_attribute voice_attribute = 74static struct kobj_attribute voice_attribute =
76 __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 75 __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
77static struct kobj_attribute vol_attribute = 76static struct kobj_attribute vol_attribute =
78 __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 77 __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
79 78
80/* 79/*
81 * We should uncomment the following definition, when we agree on a 80 * We should uncomment the following definition, when we agree on a
@@ -85,15 +84,15 @@ static struct kobj_attribute vol_attribute =
85 */ 84 */
86 85
87static struct kobj_attribute delay_time_attribute = 86static struct kobj_attribute delay_time_attribute =
88 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 87 __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
89static struct kobj_attribute direct_attribute = 88static struct kobj_attribute direct_attribute =
90 __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 89 __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
91static struct kobj_attribute full_time_attribute = 90static struct kobj_attribute full_time_attribute =
92 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 91 __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
93static struct kobj_attribute jiffy_delta_attribute = 92static struct kobj_attribute jiffy_delta_attribute =
94 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 93 __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
95static struct kobj_attribute trigger_time_attribute = 94static struct kobj_attribute trigger_time_attribute =
96 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 95 __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
97 96
98/* 97/*
99 * Create a group of attributes so that we can create and destroy them all 98 * Create a group of attributes so that we can create and destroy them all
@@ -162,8 +161,8 @@ static char *get_initstring(void)
162 cp = buf; 161 cp = buf;
163 var = synth_soft.vars; 162 var = synth_soft.vars;
164 while (var->var_id != MAXVARS) { 163 while (var->var_id != MAXVARS) {
165 if (var->var_id != CAPS_START && var->var_id != CAPS_STOP 164 if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
166 && var->var_id != DIRECT) 165 var->var_id != DIRECT)
167 cp = cp + sprintf(cp, var->u.n.synth_fmt, 166 cp = cp + sprintf(cp, var->u.n.synth_fmt,
168 var->u.n.value); 167 var->u.n.value);
169 var++; 168 var++;
@@ -277,8 +276,7 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
277 return count; 276 return count;
278} 277}
279 278
280static unsigned int softsynth_poll(struct file *fp, 279static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
281 struct poll_table_struct *wait)
282{ 280{
283 unsigned long flags; 281 unsigned long flags;
284 int ret = 0; 282 int ret = 0;
@@ -310,10 +308,8 @@ static const struct file_operations softsynth_fops = {
310 .release = softsynth_close, 308 .release = softsynth_close,
311}; 309};
312 310
313
314static int softsynth_probe(struct spk_synth *synth) 311static int softsynth_probe(struct spk_synth *synth)
315{ 312{
316
317 if (misc_registered != 0) 313 if (misc_registered != 0)
318 return 0; 314 return 0;
319 memset(&synth_device, 0, sizeof(synth_device)); 315 memset(&synth_device, 0, sizeof(synth_device));
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index e449f2770c1f..586890908826 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * originally written by: Kirk Reiser <kirk@braille.uwo.ca> 2 * originally written by: Kirk Reiser <kirk@braille.uwo.ca>
3* this version considerably modified by David Borowski, david575@rogers.com 3 * this version considerably modified by David Borowski, david575@rogers.com
4 * 4 *
5 * Copyright (C) 1998-99 Kirk Reiser. 5 * Copyright (C) 1998-99 Kirk Reiser.
6 * Copyright (C) 2003 David Borowski. 6 * Copyright (C) 2003 David Borowski.
@@ -40,34 +40,33 @@ static struct var_t vars[] = {
40 V_LAST_VAR 40 V_LAST_VAR
41}; 41};
42 42
43/* 43/* These attributes will appear in /sys/accessibility/speakup/spkout. */
44 * These attributes will appear in /sys/accessibility/speakup/spkout. 44
45 */
46static struct kobj_attribute caps_start_attribute = 45static struct kobj_attribute caps_start_attribute =
47 __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 46 __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
48static struct kobj_attribute caps_stop_attribute = 47static struct kobj_attribute caps_stop_attribute =
49 __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 48 __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
50static struct kobj_attribute pitch_attribute = 49static struct kobj_attribute pitch_attribute =
51 __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 50 __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
52static struct kobj_attribute punct_attribute = 51static struct kobj_attribute punct_attribute =
53 __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 52 __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
54static struct kobj_attribute rate_attribute = 53static struct kobj_attribute rate_attribute =
55 __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 54 __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
56static struct kobj_attribute tone_attribute = 55static struct kobj_attribute tone_attribute =
57 __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 56 __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
58static struct kobj_attribute vol_attribute = 57static struct kobj_attribute vol_attribute =
59 __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 58 __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
60 59
61static struct kobj_attribute delay_time_attribute = 60static struct kobj_attribute delay_time_attribute =
62 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 61 __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
63static struct kobj_attribute direct_attribute = 62static struct kobj_attribute direct_attribute =
64 __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
65static struct kobj_attribute full_time_attribute = 64static struct kobj_attribute full_time_attribute =
66 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
67static struct kobj_attribute jiffy_delta_attribute = 66static struct kobj_attribute jiffy_delta_attribute =
68 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
69static struct kobj_attribute trigger_time_attribute = 68static struct kobj_attribute trigger_time_attribute =
70 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
71 70
72/* 71/*
73 * Create a group of attributes so that we can create and destroy them all 72 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index fd98d4ffcb3e..b3d2cfd20ac8 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * originally written by: Kirk Reiser <kirk@braille.uwo.ca> 2 * originally written by: Kirk Reiser <kirk@braille.uwo.ca>
3* this version considerably modified by David Borowski, david575@rogers.com 3 * this version considerably modified by David Borowski, david575@rogers.com
4 * 4 *
5 * Copyright (C) 1998-99 Kirk Reiser. 5 * Copyright (C) 1998-99 Kirk Reiser.
6 * Copyright (C) 2003 David Borowski. 6 * Copyright (C) 2003 David Borowski.
@@ -36,32 +36,31 @@ static struct var_t vars[] = {
36 V_LAST_VAR 36 V_LAST_VAR
37 }; 37 };
38 38
39/* 39/* These attributes will appear in /sys/accessibility/speakup/txprt. */
40 * These attributes will appear in /sys/accessibility/speakup/txprt. 40
41 */
42static struct kobj_attribute caps_start_attribute = 41static struct kobj_attribute caps_start_attribute =
43 __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 42 __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
44static struct kobj_attribute caps_stop_attribute = 43static struct kobj_attribute caps_stop_attribute =
45 __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 44 __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
46static struct kobj_attribute pitch_attribute = 45static struct kobj_attribute pitch_attribute =
47 __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 46 __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
48static struct kobj_attribute rate_attribute = 47static struct kobj_attribute rate_attribute =
49 __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 48 __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
50static struct kobj_attribute tone_attribute = 49static struct kobj_attribute tone_attribute =
51 __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 50 __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
52static struct kobj_attribute vol_attribute = 51static struct kobj_attribute vol_attribute =
53 __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 52 __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
54 53
55static struct kobj_attribute delay_time_attribute = 54static struct kobj_attribute delay_time_attribute =
56 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 55 __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
57static struct kobj_attribute direct_attribute = 56static struct kobj_attribute direct_attribute =
58 __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 57 __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
59static struct kobj_attribute full_time_attribute = 58static struct kobj_attribute full_time_attribute =
60 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 59 __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
61static struct kobj_attribute jiffy_delta_attribute = 60static struct kobj_attribute jiffy_delta_attribute =
62 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 61 __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
63static struct kobj_attribute trigger_time_attribute = 62static struct kobj_attribute trigger_time_attribute =
64 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
65 64
66/* 65/*
67 * Create a group of attributes so that we can create and destroy them all 66 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 130e9cb0118b..c95b68ebd8e7 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -23,84 +23,82 @@
23 23
24#define FIRST_SYNTH_VAR RATE 24#define FIRST_SYNTH_VAR RATE
25/* 0 is reserved for no remap */ 25/* 0 is reserved for no remap */
26#define SPEAKUP_GOTO 0x01 26#define SPEAKUP_GOTO 0x01
27#define SPEECH_KILL 0x02 27#define SPEECH_KILL 0x02
28#define SPEAKUP_QUIET 0x03 28#define SPEAKUP_QUIET 0x03
29#define SPEAKUP_CUT 0x04 29#define SPEAKUP_CUT 0x04
30#define SPEAKUP_PASTE 0x05 30#define SPEAKUP_PASTE 0x05
31#define SAY_FIRST_CHAR 0x06 31#define SAY_FIRST_CHAR 0x06
32#define SAY_LAST_CHAR 0x07 32#define SAY_LAST_CHAR 0x07
33#define SAY_CHAR 0x08 33#define SAY_CHAR 0x08
34#define SAY_PREV_CHAR 0x09 34#define SAY_PREV_CHAR 0x09
35#define SAY_NEXT_CHAR 0x0a 35#define SAY_NEXT_CHAR 0x0a
36#define SAY_WORD 0x0b 36#define SAY_WORD 0x0b
37#define SAY_PREV_WORD 0x0c 37#define SAY_PREV_WORD 0x0c
38#define SAY_NEXT_WORD 0x0d 38#define SAY_NEXT_WORD 0x0d
39#define SAY_LINE 0x0e 39#define SAY_LINE 0x0e
40#define SAY_PREV_LINE 0x0f 40#define SAY_PREV_LINE 0x0f
41#define SAY_NEXT_LINE 0x10 41#define SAY_NEXT_LINE 0x10
42#define TOP_EDGE 0x11 42#define TOP_EDGE 0x11
43#define BOTTOM_EDGE 0x12 43#define BOTTOM_EDGE 0x12
44#define LEFT_EDGE 0x13 44#define LEFT_EDGE 0x13
45#define RIGHT_EDGE 0x14 45#define RIGHT_EDGE 0x14
46#define SPELL_PHONETIC 0x15 46#define SPELL_PHONETIC 0x15
47#define SPELL_WORD 0x16 47#define SPELL_WORD 0x16
48#define SAY_SCREEN 0x17 48#define SAY_SCREEN 0x17
49#define SAY_POSITION 0x18 49#define SAY_POSITION 0x18
50#define SAY_ATTRIBUTES 0x19 50#define SAY_ATTRIBUTES 0x19
51#define SPEAKUP_OFF 0x1a 51#define SPEAKUP_OFF 0x1a
52#define SPEAKUP_PARKED 0x1b 52#define SPEAKUP_PARKED 0x1b
53#define SAY_LINE_INDENT 0x1c 53#define SAY_LINE_INDENT 0x1c
54#define SAY_FROM_TOP 0x1d 54#define SAY_FROM_TOP 0x1d
55#define SAY_TO_BOTTOM 0x1e 55#define SAY_TO_BOTTOM 0x1e
56#define SAY_FROM_LEFT 0x1f 56#define SAY_FROM_LEFT 0x1f
57#define SAY_TO_RIGHT 0x20 57#define SAY_TO_RIGHT 0x20
58#define SAY_CHAR_NUM 0x21 58#define SAY_CHAR_NUM 0x21
59#define EDIT_SOME 0x22 59#define EDIT_SOME 0x22
60#define EDIT_MOST 0x23 60#define EDIT_MOST 0x23
61#define SAY_PHONETIC_CHAR 0x24 61#define SAY_PHONETIC_CHAR 0x24
62#define EDIT_DELIM 0x25 62#define EDIT_DELIM 0x25
63#define EDIT_REPEAT 0x26 63#define EDIT_REPEAT 0x26
64#define EDIT_EXNUM 0x27 64#define EDIT_EXNUM 0x27
65#define SET_WIN 0x28 65#define SET_WIN 0x28
66#define CLEAR_WIN 0x29 66#define CLEAR_WIN 0x29
67#define ENABLE_WIN 0x2a 67#define ENABLE_WIN 0x2a
68#define SAY_WIN 0x2b 68#define SAY_WIN 0x2b
69#define SPK_LOCK 0x2c 69#define SPK_LOCK 0x2c
70#define SPEAKUP_HELP 0x2d 70#define SPEAKUP_HELP 0x2d
71#define TOGGLE_CURSORING 0x2e 71#define TOGGLE_CURSORING 0x2e
72#define READ_ALL_DOC 0x2f 72#define READ_ALL_DOC 0x2f
73#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */ 73#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
74 74#define SPK_KEY 0x80
75#define SPK_KEY 0x80 75#define FIRST_EDIT_BITS 0x22
76#define FIRST_EDIT_BITS 0x22
77
78#define FIRST_SET_VAR SPELL_DELAY 76#define FIRST_SET_VAR SPELL_DELAY
79#define VAR_START 0x40 /* increase if adding more than 0x3f functions */ 77#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
80 78
81/* keys for setting variables, must be ordered same as the enum for var_ids */ 79/* keys for setting variables, must be ordered same as the enum for var_ids */
82/* with dec being even and inc being 1 greater */ 80/* with dec being even and inc being 1 greater */
83#define SPELL_DELAY_DEC (VAR_START+0) 81#define SPELL_DELAY_DEC (VAR_START + 0)
84#define SPELL_DELAY_INC (SPELL_DELAY_DEC+1) 82#define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1)
85#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC+2) 83#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2)
86#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC+1) 84#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1)
87#define READING_PUNC_DEC (PUNC_LEVEL_DEC+2) 85#define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2)
88#define READING_PUNC_INC (READING_PUNC_DEC+1) 86#define READING_PUNC_INC (READING_PUNC_DEC + 1)
89#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC+2) 87#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2)
90#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC+1) 88#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1)
91#define BLEEPS_DEC (ATTRIB_BLEEP_DEC+2) 89#define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2)
92#define BLEEPS_INC (BLEEPS_DEC+1) 90#define BLEEPS_INC (BLEEPS_DEC + 1)
93#define RATE_DEC (BLEEPS_DEC+2) 91#define RATE_DEC (BLEEPS_DEC + 2)
94#define RATE_INC (RATE_DEC+1) 92#define RATE_INC (RATE_DEC + 1)
95#define PITCH_DEC (RATE_DEC+2) 93#define PITCH_DEC (RATE_DEC + 2)
96#define PITCH_INC (PITCH_DEC+1) 94#define PITCH_INC (PITCH_DEC + 1)
97#define VOL_DEC (PITCH_DEC+2) 95#define VOL_DEC (PITCH_DEC + 2)
98#define VOL_INC (VOL_DEC+1) 96#define VOL_INC (VOL_DEC + 1)
99#define TONE_DEC (VOL_DEC+2) 97#define TONE_DEC (VOL_DEC + 2)
100#define TONE_INC (TONE_DEC+1) 98#define TONE_INC (TONE_DEC + 1)
101#define PUNCT_DEC (TONE_DEC+2) 99#define PUNCT_DEC (TONE_DEC + 2)
102#define PUNCT_INC (PUNCT_DEC+1) 100#define PUNCT_INC (PUNCT_DEC + 1)
103#define VOICE_DEC (PUNCT_DEC+2) 101#define VOICE_DEC (PUNCT_DEC + 2)
104#define VOICE_INC (VOICE_DEC+1) 102#define VOICE_INC (VOICE_DEC + 1)
105 103
106#endif 104#endif
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index e8ff5d7d6419..b07f6cc4f284 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -1,16 +1,14 @@
1#ifndef SPEAKUP_TYPES_H 1#ifndef SPEAKUP_TYPES_H
2#define SPEAKUP_TYPES_H 2#define SPEAKUP_TYPES_H
3 3
4/* 4/* This file includes all of the typedefs and structs used in speakup. */
5 * This file includes all of the typedefs and structs used in speakup.
6 */
7 5
8#include <linux/types.h> 6#include <linux/types.h>
9#include <linux/fs.h> 7#include <linux/fs.h>
10#include <linux/errno.h> 8#include <linux/errno.h>
11#include <linux/delay.h> 9#include <linux/delay.h>
12#include <linux/wait.h> /* for wait_queue */ 10#include <linux/wait.h> /* for wait_queue */
13#include <linux/init.h> /* for __init */ 11#include <linux/init.h> /* for __init */
14#include <linux/module.h> 12#include <linux/module.h>
15#include <linux/vt_kern.h> 13#include <linux/vt_kern.h>
16#include <linux/spinlock.h> 14#include <linux/spinlock.h>
@@ -105,7 +103,7 @@ struct st_var_header {
105 enum var_id_t var_id; 103 enum var_id_t var_id;
106 enum var_type_t var_type; 104 enum var_type_t var_type;
107 void *p_val; /* ptr to programs variable to store value */ 105 void *p_val; /* ptr to programs variable to store value */
108 void *data; /* ptr to the vars data */ 106 void *data; /* ptr to the vars data */
109}; 107};
110 108
111struct num_var_t { 109struct num_var_t {
@@ -114,8 +112,8 @@ struct num_var_t {
114 int low; 112 int low;
115 int high; 113 int high;
116 short offset, multiplier; /* for fiddling rates etc. */ 114 short offset, multiplier; /* for fiddling rates etc. */
117 char *out_str; /* if synth needs char representation of number */ 115 char *out_str; /* if synth needs char representation of number */
118 int value; /* current value */ 116 int value; /* current value */
119}; 117};
120 118
121struct punc_var_t { 119struct punc_var_t {
@@ -169,7 +167,7 @@ struct spk_synth {
169 int (*probe)(struct spk_synth *synth); 167 int (*probe)(struct spk_synth *synth);
170 void (*release)(void); 168 void (*release)(void);
171 const char *(*synth_immediate)(struct spk_synth *synth, 169 const char *(*synth_immediate)(struct spk_synth *synth,
172 const char *buff); 170 const char *buff);
173 void (*catch_up)(struct spk_synth *synth); 171 void (*catch_up)(struct spk_synth *synth);
174 void (*flush)(struct spk_synth *synth); 172 void (*flush)(struct spk_synth *synth);
175 int (*is_alive)(struct spk_synth *synth); 173 int (*is_alive)(struct spk_synth *synth);
@@ -181,7 +179,7 @@ struct spk_synth {
181 struct attribute_group attributes; 179 struct attribute_group attributes;
182}; 180};
183 181
184/** 182/*
185 * module_spk_synth() - Helper macro for registering a speakup driver 183 * module_spk_synth() - Helper macro for registering a speakup driver
186 * @__spk_synth: spk_synth struct 184 * @__spk_synth: spk_synth struct
187 * Helper macro for speakup drivers which do not do anything special in module 185 * Helper macro for speakup drivers which do not do anything special in module
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 54b2f3918628..a61c02ba06da 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -8,7 +8,7 @@
8#include <linux/delay.h> /* for loops_per_sec */ 8#include <linux/delay.h> /* for loops_per_sec */
9#include <linux/kmod.h> 9#include <linux/kmod.h>
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/uaccess.h> /* for copy_from_user */ 11#include <linux/uaccess.h> /* for copy_from_user */
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/kthread.h> 14#include <linux/kthread.h>
@@ -67,13 +67,14 @@ int spk_serial_synth_probe(struct spk_synth *synth)
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
69 pr_info("%s: ttyS%i, Driver Version %s\n", 69 pr_info("%s: ttyS%i, Driver Version %s\n",
70 synth->long_name, synth->ser, synth->version); 70 synth->long_name, synth->ser, synth->version);
71 synth->alive = 1; 71 synth->alive = 1;
72 return 0; 72 return 0;
73} 73}
74EXPORT_SYMBOL_GPL(spk_serial_synth_probe); 74EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
75 75
76/* Main loop of the progression thread: keep eating from the buffer 76/*
77 * Main loop of the progression thread: keep eating from the buffer
77 * and push to the serial port, waiting as needed 78 * and push to the serial port, waiting as needed
78 * 79 *
79 * For devices that have a "full" notification mechanism, the driver can 80 * For devices that have a "full" notification mechanism, the driver can
@@ -303,12 +304,11 @@ void spk_get_index_count(int *linecount, int *sentcount)
303 sentence_count = ind % 10; 304 sentence_count = ind % 10;
304 305
305 if ((ind / 10) <= synth->indexing.currindex) 306 if ((ind / 10) <= synth->indexing.currindex)
306 index_count = synth->indexing.currindex-(ind/10); 307 index_count = synth->indexing.currindex - (ind / 10);
307 else 308 else
308 index_count = synth->indexing.currindex 309 index_count = synth->indexing.currindex
309 -synth->indexing.lowindex 310 - synth->indexing.lowindex
310 + synth->indexing.highindex-(ind/10)+1; 311 + synth->indexing.highindex - (ind / 10) + 1;
311
312 } 312 }
313 *sentcount = sentence_count; 313 *sentcount = sentence_count;
314 *linecount = index_count; 314 *linecount = index_count;
@@ -406,8 +406,8 @@ static int do_synth_init(struct spk_synth *in_synth)
406 speakup_register_var(var); 406 speakup_register_var(var);
407 if (!spk_quiet_boot) 407 if (!spk_quiet_boot)
408 synth_printf("%s found\n", synth->long_name); 408 synth_printf("%s found\n", synth->long_name);
409 if (synth->attributes.name 409 if (synth->attributes.name && sysfs_create_group(speakup_kobj,
410 && sysfs_create_group(speakup_kobj, &synth->attributes) < 0) 410 &synth->attributes) < 0)
411 return -ENOMEM; 411 return -ENOMEM;
412 synth_flags = synth->flags; 412 synth_flags = synth->flags;
413 wake_up_interruptible_all(&speakup_event); 413 wake_up_interruptible_all(&speakup_event);
@@ -476,10 +476,10 @@ void synth_remove(struct spk_synth *in_synth)
476 break; 476 break;
477 } 477 }
478 for ( ; synths[i] != NULL; i++) /* compress table */ 478 for ( ; synths[i] != NULL; i++) /* compress table */
479 synths[i] = synths[i+1]; 479 synths[i] = synths[i + 1];
480 module_status = 0; 480 module_status = 0;
481 mutex_unlock(&spk_mutex); 481 mutex_unlock(&spk_mutex);
482} 482}
483EXPORT_SYMBOL_GPL(synth_remove); 483EXPORT_SYMBOL_GPL(synth_remove);
484 484
485short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM }; 485short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 90c383ee7c3f..8c64f1ada6e0 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -27,7 +27,7 @@ int speakup_thread(void *data)
27 our_sound = spk_unprocessed_sound; 27 our_sound = spk_unprocessed_sound;
28 spk_unprocessed_sound.active = 0; 28 spk_unprocessed_sound.active = 0;
29 prepare_to_wait(&speakup_event, &wait, 29 prepare_to_wait(&speakup_event, &wait,
30 TASK_INTERRUPTIBLE); 30 TASK_INTERRUPTIBLE);
31 should_break = kthread_should_stop() || 31 should_break = kthread_should_stop() ||
32 our_sound.active || 32 our_sound.active ||
33 (synth && synth->catch_up && synth->alive && 33 (synth && synth->catch_up && synth->alive &&
@@ -47,7 +47,8 @@ int speakup_thread(void *data)
47 if (our_sound.active) 47 if (our_sound.active)
48 kd_mksound(our_sound.freq, our_sound.jiffies); 48 kd_mksound(our_sound.freq, our_sound.jiffies);
49 if (synth && synth->catch_up && synth->alive) { 49 if (synth && synth->catch_up && synth->alive) {
50 /* It is up to the callee to take the lock, so that it 50 /*
51 * It is up to the callee to take the lock, so that it
51 * can sleep whenever it likes 52 * can sleep whenever it likes
52 */ 53 */
53 synth->catch_up(synth); 54 synth->catch_up(synth);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 21186e3dc7ad..cc984196020f 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -237,8 +237,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
237 if (!var_data->u.n.out_str) 237 if (!var_data->u.n.out_str)
238 l = sprintf(cp, var_data->u.n.synth_fmt, (int)val); 238 l = sprintf(cp, var_data->u.n.synth_fmt, (int)val);
239 else 239 else
240 l = sprintf(cp, 240 l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
241 var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
242 synth_printf("%s", cp); 241 synth_printf("%s", cp);
243 return 0; 242 return 0;
244} 243}
@@ -266,7 +265,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
266 return 0; 265 return 0;
267} 266}
268 267
269/* spk_set_mask_bits sets or clears the punc/delim/repeat bits, 268/*
269 * spk_set_mask_bits sets or clears the punc/delim/repeat bits,
270 * if input is null uses the defaults. 270 * if input is null uses the defaults.
271 * values for how: 0 clears bits of chars supplied, 271 * values for how: 0 clears bits of chars supplied,
272 * 1 clears allk, 2 sets bits for chars 272 * 1 clears allk, 2 sets bits for chars
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 9081b3f8779c..54f490090a59 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,20 +1,20 @@
1/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */ 1/* Copyright (C) 2010 - 2016 UNISYS CORPORATION */
2/* All rights reserved. */ 2/* All rights reserved. */
3#ifndef __IOCHANNEL_H__ 3#ifndef __IOCHANNEL_H__
4#define __IOCHANNEL_H__ 4#define __IOCHANNEL_H__
5 5
6/* 6/*
7 * Everything needed for IOPart-GuestPart communication is define in 7 * Everything needed for IOPart-GuestPart communication is define in
8 * this file. Note: Everything is OS-independent because this file is 8 * this file. Note: Everything is OS-independent because this file is
9 * used by Windows, Linux and possible EFI drivers. 9 * used by Windows, Linux and possible EFI drivers.
10 */ 10 */
11 11
12/* 12/*
13 * Communication flow between the IOPart and GuestPart uses the channel headers 13 * Communication flow between the IOPart and GuestPart uses the channel headers
14 * channel state. The following states are currently being used: 14 * channel state. The following states are currently being used:
15 * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED 15 * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
16 * 16 *
17 * additional states will be used later. No locking is needed to switch between 17 * Additional states will be used later. No locking is needed to switch between
18 * states due to the following rules: 18 * states due to the following rules:
19 * 19 *
20 * 1. IOPart is only the only partition allowed to change from UNIT 20 * 1. IOPart is only the only partition allowed to change from UNIT
@@ -39,10 +39,11 @@
39#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \ 39#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
40 ULTRA_CHANNEL_PROTOCOL_SIGNATURE 40 ULTRA_CHANNEL_PROTOCOL_SIGNATURE
41 41
42/* Must increment these whenever you insert or delete fields within this channel 42/*
43 * struct. Also increment whenever you change the meaning of fields within this 43 * Must increment these whenever you insert or delete fields within this channel
44 * channel struct so as to break pre-existing software. Note that you can 44 * struct. Also increment whenever you change the meaning of fields within this
45 * usually add fields to the END of the channel struct withOUT needing to 45 * channel struct so as to break pre-existing software. Note that you can
46 * usually add fields to the END of the channel struct without needing to
46 * increment this. 47 * increment this.
47 */ 48 */
48#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2 49#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
@@ -70,59 +71,62 @@
70#define MINNUM(a, b) (((a) < (b)) ? (a) : (b)) 71#define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
71#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b)) 72#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
72 73
73/* define the two queues per data channel between iopart and ioguestparts */ 74/* Define the two queues per data channel between iopart and ioguestparts. */
74/* used by ioguestpart to 'insert' signals to iopart */ 75/* Used by ioguestpart to 'insert' signals to iopart. */
75#define IOCHAN_TO_IOPART 0 76#define IOCHAN_TO_IOPART 0
76/* used by ioguestpart to 'remove' signals from iopart, same previous queue */ 77/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */
77#define IOCHAN_FROM_IOPART 1 78#define IOCHAN_FROM_IOPART 1
78 79
79/* size of cdb - i.e., scsi cmnd */ 80/* Size of cdb - i.e., SCSI cmnd */
80#define MAX_CMND_SIZE 16 81#define MAX_CMND_SIZE 16
81 82
82#define MAX_SENSE_SIZE 64 83#define MAX_SENSE_SIZE 64
83 84
84#define MAX_PHYS_INFO 64 85#define MAX_PHYS_INFO 64
85 86
86/* various types of network packets that can be sent in cmdrsp */ 87/* Various types of network packets that can be sent in cmdrsp. */
87enum net_types { 88enum net_types {
88 NET_RCV_POST = 0, /* submit buffer to hold receiving 89 NET_RCV_POST = 0, /*
90 * Submit buffer to hold receiving
89 * incoming packet 91 * incoming packet
90 */ 92 */
91 /* virtnic -> uisnic */ 93 /* visornic -> uisnic */
92 NET_RCV, /* incoming packet received */ 94 NET_RCV, /* incoming packet received */
93 /* uisnic -> virtpci */ 95 /* uisnic -> virtpci */
94 NET_XMIT, /* for outgoing net packets */ 96 NET_XMIT, /* for outgoing net packets */
95 /* virtnic -> uisnic */ 97 /* visornic -> uisnic */
96 NET_XMIT_DONE, /* outgoing packet xmitted */ 98 NET_XMIT_DONE, /* outgoing packet xmitted */
97 /* uisnic -> virtpci */ 99 /* uisnic -> virtpci */
98 NET_RCV_ENBDIS, /* enable/disable packet reception */ 100 NET_RCV_ENBDIS, /* enable/disable packet reception */
99 /* virtnic -> uisnic */ 101 /* visornic -> uisnic */
100 NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */ 102 NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */
101 /* reception */ 103 /* reception */
102 /* uisnic -> virtnic */ 104 /* uisnic -> visornic */
103 NET_RCV_PROMISC, /* enable/disable promiscuous mode */ 105 NET_RCV_PROMISC, /* enable/disable promiscuous mode */
104 /* virtnic -> uisnic */ 106 /* visornic -> uisnic */
105 NET_CONNECT_STATUS, /* indicate the loss or restoration of a network 107 NET_CONNECT_STATUS, /*
108 * indicate the loss or restoration of a network
106 * connection 109 * connection
107 */ 110 */
108 /* uisnic -> virtnic */ 111 /* uisnic -> visornic */
109 NET_MACADDR, /* indicates the client has requested to update 112 NET_MACADDR, /*
110 * its MAC addr 113 * Indicates the client has requested to update
114 * it's MAC address
111 */ 115 */
112 NET_MACADDR_ACK, /* MAC address */ 116 NET_MACADDR_ACK, /* MAC address acknowledge */
113 117
114}; 118};
115 119
116#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */ 120#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
117#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE) 121#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
118 122
119#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */ 123#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */
120 124
121#ifndef MAX_MACADDR_LEN 125#ifndef MAX_MACADDR_LEN
122#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */ 126#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
123#endif /* MAX_MACADDR_LEN */ 127#endif
124 128
125/* various types of scsi task mgmt commands */ 129/* Various types of scsi task mgmt commands. */
126enum task_mgmt_types { 130enum task_mgmt_types {
127 TASK_MGMT_ABORT_TASK = 1, 131 TASK_MGMT_ABORT_TASK = 1,
128 TASK_MGMT_BUS_RESET, 132 TASK_MGMT_BUS_RESET,
@@ -130,7 +134,7 @@ enum task_mgmt_types {
130 TASK_MGMT_TARGET_RESET, 134 TASK_MGMT_TARGET_RESET,
131}; 135};
132 136
133/* various types of vdisk mgmt commands */ 137/* Various types of vdisk mgmt commands. */
134enum vdisk_mgmt_types { 138enum vdisk_mgmt_types {
135 VDISK_MGMT_ACQUIRE = 1, 139 VDISK_MGMT_ACQUIRE = 1,
136 VDISK_MGMT_RELEASE, 140 VDISK_MGMT_RELEASE,
@@ -144,7 +148,7 @@ struct phys_info {
144 148
145#define MIN_NUMSIGNALS 64 149#define MIN_NUMSIGNALS 64
146 150
147/* structs with pragma pack */ 151/* Structs with pragma pack. */
148 152
149struct guest_phys_info { 153struct guest_phys_info {
150 u64 address; 154 u64 address;
@@ -154,9 +158,9 @@ struct guest_phys_info {
154#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info)) 158#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
155 159
156struct uisscsi_dest { 160struct uisscsi_dest {
157 u32 channel; /* channel == bus number */ 161 u32 channel; /* channel == bus number */
158 u32 id; /* id == target number */ 162 u32 id; /* id == target number */
159 u32 lun; /* lun == logical unit number */ 163 u32 lun; /* lun == logical unit number */
160} __packed; 164} __packed;
161 165
162struct vhba_wwnn { 166struct vhba_wwnn {
@@ -164,7 +168,8 @@ struct vhba_wwnn {
164 u32 wwnn2; 168 u32 wwnn2;
165} __packed; 169} __packed;
166 170
167/* WARNING: Values stired in this structure must contain maximum counts (not 171/*
172 * WARNING: Values stired in this structure must contain maximum counts (not
168 * maximum values). 173 * maximum values).
169 */ 174 */
170struct vhba_config_max {/* 20 bytes */ 175struct vhba_config_max {/* 20 bytes */
@@ -187,23 +192,24 @@ struct uiscmdrsp_scsi {
187 * information for each 192 * information for each
188 * fragment 193 * fragment
189 */ 194 */
190 enum dma_data_direction data_dir; /* direction of the data, if any */ 195 enum dma_data_direction data_dir; /* direction of the data, if any */
191 struct uisscsi_dest vdest; /* identifies the virtual hba, id, */ 196 struct uisscsi_dest vdest; /* identifies the virtual hba, id, */
192 /* channel, lun to which cmd was sent */ 197 /* channel, lun to which cmd was sent */
193 198
194 /* Needed to queue the rsp back to cmd originator */ 199 /* Needed to queue the rsp back to cmd originator. */
195 int linuxstat; /* original Linux status used by linux vdisk */ 200 int linuxstat; /* original Linux status used by Linux vdisk */
196 u8 scsistat; /* the scsi status */ 201 u8 scsistat; /* the scsi status */
197 u8 addlstat; /* non-scsi status */ 202 u8 addlstat; /* non-scsi status */
198#define ADDL_SEL_TIMEOUT 4 203#define ADDL_SEL_TIMEOUT 4
199 204
200 /* the following fields are need to determine the result of command */ 205 /* The following fields are need to determine the result of command. */
201 u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */ 206 u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
202 /* it holds the sense_data struct; */ 207 /* sensebuf holds the sense_data struct; */
203 /* see that struct for details. */ 208 /* See sense_data struct for more details. */
204 void *vdisk; /* pointer to the vdisk to clean up when IO completes. */ 209 void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */
205 int no_disk_result; 210 int no_disk_result;
206 /* used to return no disk inquiry result 211 /*
212 * Used to return no disk inquiry result
207 * when no_disk_result is set to 1, 213 * when no_disk_result is set to 1,
208 * scsi.scsistat is SAM_STAT_GOOD 214 * scsi.scsistat is SAM_STAT_GOOD
209 * scsi.addlstat is 0 215 * scsi.addlstat is 0
@@ -212,35 +218,44 @@ struct uiscmdrsp_scsi {
212 */ 218 */
213} __packed; 219} __packed;
214 220
215/* Defines to support sending correct inquiry result when no disk is 221/*
222 * Defines to support sending correct inquiry result when no disk is
216 * configured. 223 * configured.
217 */ 224 */
218 225
219/* From SCSI SPC2 - 226/*
227 * From SCSI SPC2 -
220 * 228 *
221 * If the target is not capable of supporting a device on this logical unit, the 229 * If the target is not capable of supporting a device on this logical unit, the
222 * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b 230 * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
223 * and PERIPHERAL DEVICE TYPE set to 1Fh). 231 * and PERIPHERAL DEVICE TYPE set to 1Fh).
224 * 232 *
225 *The device server is capable of supporting the specified peripheral device 233 * The device server is capable of supporting the specified peripheral device
226 *type on this logical unit. However, the physical device is not currently 234 * type on this logical unit. However, the physical device is not currently
227 *connected to this logical unit. 235 * connected to this logical unit.
228 */ 236 */
229 237
230#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */ 238#define DEV_NOT_CAPABLE 0x7f /*
231 /* peripheral type of 0x1f */ 239 * peripheral qualifier of 0x3
232 /* specifies no device but target present */ 240 * peripheral type of 0x1f
241 * specifies no device but target present
242 */
233 243
234#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */ 244#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1
235 /* peripheral type of 0 - disk */ 245 * peripheral type of 0 - disk
236 /* specifies device capable, but not present */ 246 * Specifies device capable, but
247 * not present
248 */
237 249
238#define DEV_HISUPPORT 0x10 /* HiSup = 1; shows support for report luns */ 250#define DEV_HISUPPORT 0x10 /*
239 /* must be returned for lun 0. */ 251 * HiSup = 1; shows support for report luns
252 * must be returned for lun 0.
253 */
240 254
241/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length 255/*
242 * in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product 256 * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
243 * & revision. Yikes! So let us always send back 36 bytes, the minimum for 257 * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
258 * and revision. Yikes! So let us always send back 36 bytes, the minimum for
244 * inquiry result. 259 * inquiry result.
245 */ 260 */
246#define NO_DISK_INQUIRY_RESULT_LEN 36 261#define NO_DISK_INQUIRY_RESULT_LEN 36
@@ -248,11 +263,12 @@ struct uiscmdrsp_scsi {
248#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */ 263#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */
249 264
250/* SCSI device version for no disk inquiry result */ 265/* SCSI device version for no disk inquiry result */
251#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ 266#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
252 267
253/* Struct & Defines to support sense information. */ 268/* Struct and Defines to support sense information. */
254 269
255/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is 270/*
271 * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
256 * initialized in exactly the manner that is recommended in Windows (hence the 272 * initialized in exactly the manner that is recommended in Windows (hence the
257 * odd values). 273 * odd values).
258 * When set, these fields will have the following values: 274 * When set, these fields will have the following values:
@@ -288,7 +304,7 @@ struct net_pkt_xmt {
288 struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */ 304 struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */
289 char ethhdr[ETH_HLEN]; /* the ethernet header */ 305 char ethhdr[ETH_HLEN]; /* the ethernet header */
290 struct { 306 struct {
291 /* these are needed for csum at uisnic end */ 307 /* These are needed for csum at uisnic end */
292 u8 valid; /* 1 = struct is valid - else ignore */ 308 u8 valid; /* 1 = struct is valid - else ignore */
293 u8 hrawoffv; /* 1 = hwrafoff is valid */ 309 u8 hrawoffv; /* 1 = hwrafoff is valid */
294 u8 nhrawoffv; /* 1 = nhwrafoff is valid */ 310 u8 nhrawoffv; /* 1 = nhwrafoff is valid */
@@ -300,7 +316,8 @@ struct net_pkt_xmt {
300 /* nhrawoff points to the start of the NETWORK LAYER HEADER */ 316 /* nhrawoff points to the start of the NETWORK LAYER HEADER */
301 } lincsum; 317 } lincsum;
302 318
303 /* **** NOTE **** 319 /*
320 * NOTE:
304 * The full packet is described in frags but the ethernet header is 321 * The full packet is described in frags but the ethernet header is
305 * separately kept in ethhdr so that uisnic doesn't have "MAP" the 322 * separately kept in ethhdr so that uisnic doesn't have "MAP" the
306 * guest memory to get to the header. uisnic needs ethhdr to 323 * guest memory to get to the header. uisnic needs ethhdr to
@@ -309,14 +326,15 @@ struct net_pkt_xmt {
309} __packed; 326} __packed;
310 327
311struct net_pkt_xmtdone { 328struct net_pkt_xmtdone {
312 u32 xmt_done_result; /* result of NET_XMIT */ 329 u32 xmt_done_result; /* result of NET_XMIT */
313} __packed; 330} __packed;
314 331
315/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The 332/*
333 * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
316 * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in 334 * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
317 * virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I 335 * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
318 * prefer to use 1 full cache line size for "overhead" so that transfers are 336 * Use 1 full cache line size for "overhead" so that transfers are optimized.
319 * better. IOVM requires that a buffer be represented by 1 phys_info structure 337 * IOVM requires that a buffer be represented by 1 phys_info structure
320 * which can only cover page_size. 338 * which can only cover page_size.
321 */ 339 */
322#define RCVPOST_BUF_SIZE 4032 340#define RCVPOST_BUF_SIZE 4032
@@ -324,26 +342,38 @@ struct net_pkt_xmtdone {
324 ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \ 342 ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
325 / RCVPOST_BUF_SIZE) 343 / RCVPOST_BUF_SIZE)
326 344
345/*
346 * rcv buf size must be large enough to include ethernet data len + ethernet
347 * header len - we are choosing 2K because it is guaranteed to be describable.
348 */
327struct net_pkt_rcvpost { 349struct net_pkt_rcvpost {
328 /* rcv buf size must be large enough to include ethernet data len + 350 /* Physical page information for the single fragment 2K rcv buf */
329 * ethernet header len - we are choosing 2K because it is guaranteed 351 struct phys_info frag;
330 * to be describable 352
331 */ 353 /*
332 struct phys_info frag; /* physical page information for the */ 354 * Ensures that receive posts are returned to the adapter which we sent
333 /* single fragment 2K rcv buf */ 355 * them from originally.
334 u64 unique_num; 356 */
335 /* unique_num ensure that receive posts are returned to */ 357 u64 unique_num;
336 /* the Adapter which we sent them originally. */ 358
337} __packed; 359} __packed;
338 360
361/*
362 * The number of rcvbuf that can be chained is based on max mtu and size of each
363 * rcvbuf.
364 */
339struct net_pkt_rcv { 365struct net_pkt_rcv {
340 /* the number of receive buffers that can be chained */ 366 u32 rcv_done_len; /* length of received data */
341 /* is based on max mtu and size of each rcv buf */ 367
342 u32 rcv_done_len; /* length of received data */ 368 /*
343 u8 numrcvbufs; /* number of receive buffers that contain the */ 369 * numrcvbufs: contain the incoming data; guest side MUST chain these
344 /* incoming data; guest end MUST chain these together. */ 370 * together.
345 void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */ 371 */
346 /* each entry is a receive buffer provided by NET_RCV_POST. */ 372 u8 numrcvbufs;
373
374 void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
375
376 /* Each entry is a receive buffer provided by NET_RCV_POST. */
347 /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */ 377 /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
348 u64 unique_num; 378 u64 unique_num;
349 u32 rcvs_dropped_delta; 379 u32 rcvs_dropped_delta;
@@ -351,12 +381,12 @@ struct net_pkt_rcv {
351 381
352struct net_pkt_enbdis { 382struct net_pkt_enbdis {
353 void *context; 383 void *context;
354 u16 enable; /* 1 = enable, 0 = disable */ 384 u16 enable; /* 1 = enable, 0 = disable */
355} __packed; 385} __packed;
356 386
357struct net_pkt_macaddr { 387struct net_pkt_macaddr {
358 void *context; 388 void *context;
359 u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ 389 u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
360} __packed; 390} __packed;
361 391
362/* cmd rsp packet used for VNIC network traffic */ 392/* cmd rsp packet used for VNIC network traffic */
@@ -377,41 +407,44 @@ struct uiscmdrsp_net {
377} __packed; 407} __packed;
378 408
379struct uiscmdrsp_scsitaskmgmt { 409struct uiscmdrsp_scsitaskmgmt {
410 /* The type of task. */
380 enum task_mgmt_types tasktype; 411 enum task_mgmt_types tasktype;
381 412
382 /* the type of task */ 413 /* The vdisk for which this task mgmt is generated. */
383 struct uisscsi_dest vdest; 414 struct uisscsi_dest vdest;
384 415
385 /* the vdisk for which this task mgmt is generated */ 416 /*
417 * This is a handle that the guest has saved off for its own use.
418 * The handle value is preserved by iopart and returned as in task
419 * mgmt rsp.
420 */
386 u64 handle; 421 u64 handle;
387 422
388 /* This is a handle that the guest has saved off for its own use. 423 /*
389 * Its value is preserved by iopart & returned as is in the task 424 * For Linux guests, this is a pointer to wait_queue_head that a
390 * mgmt rsp. 425 * thread is waiting on to see if the taskmgmt command has completed.
391 */ 426 * When the rsp is received by guest, the thread receiving the
427 * response uses this to notify the thread waiting for taskmgmt
428 * command completion. It's value is preserved by iopart and returned
429 * as in the task mgmt rsp.
430 */
392 u64 notify_handle; 431 u64 notify_handle;
393 432
394 /* For linux guests, this is a pointer to wait_queue_head that a 433 /*
395 * thread is waiting on to see if the taskmgmt command has completed. 434 * This is a handle to the location in the guest where the result of
396 * When the rsp is received by guest, the thread receiving the 435 * the taskmgmt command (result field) is saved to when the response
397 * response uses this to notify the thread waiting for taskmgmt 436 * is handled. It's value is preserved by iopart and returned as in
398 * command completion. Its value is preserved by iopart & returned 437 * the task mgmt rsp.
399 * as is in the task mgmt rsp. 438 */
400 */
401 u64 notifyresult_handle; 439 u64 notifyresult_handle;
402 440
403 /* this is a handle to location in guest where the result of the 441 /* Result of taskmgmt command - set by IOPart - values are: */
404 * taskmgmt command (result field) is to saved off when the response
405 * is handled. Its value is preserved by iopart & returned as is in
406 * the task mgmt rsp.
407 */
408 char result; 442 char result;
409 443
410 /* result of taskmgmt command - set by IOPart - values are: */
411#define TASK_MGMT_FAILED 0 444#define TASK_MGMT_FAILED 0
412} __packed; 445} __packed;
413 446
414/* Used by uissd to send disk add/remove notifications to Guest */ 447/* Used by uissd to send disk add/remove notifications to Guest. */
415/* Note that the vHba pointer is not used by the Client/Guest side. */ 448/* Note that the vHba pointer is not used by the Client/Guest side. */
416struct uiscmdrsp_disknotify { 449struct uiscmdrsp_disknotify {
417 u8 add; /* 0-remove, 1-add */ 450 u8 add; /* 0-remove, 1-add */
@@ -419,49 +452,50 @@ struct uiscmdrsp_disknotify {
419 u32 channel, id, lun; /* SCSI Path of Disk to added or removed */ 452 u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
420} __packed; 453} __packed;
421 454
422/* The following is used by virthba/vSCSI to send the Acquire/Release commands 455/*
456 * The following is used by virthba/vSCSI to send the Acquire/Release commands
423 * to the IOVM. 457 * to the IOVM.
424 */ 458 */
425struct uiscmdrsp_vdiskmgmt { 459struct uiscmdrsp_vdiskmgmt {
460 /* The type of task */
426 enum vdisk_mgmt_types vdisktype; 461 enum vdisk_mgmt_types vdisktype;
427 462
428 /* the type of task */ 463 /* The vdisk for which this task mgmt is generated */
429 struct uisscsi_dest vdest; 464 struct uisscsi_dest vdest;
430 465
431 /* the vdisk for which this task mgmt is generated */ 466 /*
467 * This is a handle that the guest has saved off for its own use. It's
468 * value is preserved by iopart and returned as in the task mgmt rsp.
469 */
432 u64 handle; 470 u64 handle;
433 471
434 /* This is a handle that the guest has saved off for its own use. 472 /*
435 * Its value is preserved by iopart & returned as is in the task 473 * For Linux guests, this is a pointer to wait_queue_head that a
436 * mgmt rsp. 474 * thread is waiting on to see if the tskmgmt command has completed.
437 */ 475 * When the rsp is received by guest, the thread receiving the
476 * response uses this to notify the thread waiting for taskmgmt
477 * command completion. It's value is preserved by iopart and returned
478 * as in the task mgmt rsp.
479 */
438 u64 notify_handle; 480 u64 notify_handle;
439 481
440 /* For linux guests, this is a pointer to wait_queue_head that a 482 /*
441 * thread is waiting on to see if the tskmgmt command has completed. 483 * Handle to the location in guest where the result of the
442 * When the rsp is received by guest, the thread receiving the 484 * taskmgmt command (result field) is saved to when the response
443 * response uses this to notify the thread waiting for taskmgmt 485 * is handled. It's value is preserved by iopart and returned as in
444 * command completion. Its value is preserved by iopart & returned 486 * the task mgmt rsp.
445 * as is in the task mgmt rsp. 487 */
446 */
447 u64 notifyresult_handle; 488 u64 notifyresult_handle;
448 489
449 /* this is a handle to location in guest where the result of the 490 /* Result of taskmgmt command - set by IOPart - values are: */
450 * taskmgmt command (result field) is to saved off when the response
451 * is handled. Its value is preserved by iopart & returned as is in
452 * the task mgmt rsp.
453 */
454 char result; 491 char result;
455
456 /* result of taskmgmt command - set by IOPart - values are: */
457#define VDISK_MGMT_FAILED 0
458} __packed; 492} __packed;
459 493
460/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */ 494/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
461struct uiscmdrsp { 495struct uiscmdrsp {
462 char cmdtype; 496 char cmdtype;
463 497
464/* describes what type of information is in the struct */ 498/* Describes what type of information is in the struct */
465#define CMD_SCSI_TYPE 1 499#define CMD_SCSI_TYPE 1
466#define CMD_NET_TYPE 2 500#define CMD_NET_TYPE 2
467#define CMD_SCSITASKMGMT_TYPE 3 501#define CMD_SCSITASKMGMT_TYPE 3
@@ -474,11 +508,11 @@ struct uiscmdrsp {
474 struct uiscmdrsp_disknotify disknotify; 508 struct uiscmdrsp_disknotify disknotify;
475 struct uiscmdrsp_vdiskmgmt vdiskmgmt; 509 struct uiscmdrsp_vdiskmgmt vdiskmgmt;
476 }; 510 };
477 void *private_data; /* send the response when the cmd is */ 511 /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
478 /* done (scsi & scsittaskmgmt). */ 512 void *private_data;
479 struct uiscmdrsp *next; /* General Purpose Queue Link */ 513 struct uiscmdrsp *next; /* General Purpose Queue Link */
480 struct uiscmdrsp *activeQ_next; /* Used to track active commands */ 514 struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */
481 struct uiscmdrsp *activeQ_prev; /* Used to track active commands */ 515 struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */
482} __packed; 516} __packed;
483 517
484struct iochannel_vhba { 518struct iochannel_vhba {
@@ -491,7 +525,8 @@ struct iochannel_vnic {
491 u32 mtu; /* 4 bytes */ 525 u32 mtu; /* 4 bytes */
492 uuid_le zone_uuid; /* 16 bytes */ 526 uuid_le zone_uuid; /* 16 bytes */
493} __packed; 527} __packed;
494/* This is just the header of the IO channel. It is assumed that directly after 528/*
529 * This is just the header of the IO channel. It is assumed that directly after
495 * this header there is a large region of memory which contains the command and 530 * this header there is a large region of memory which contains the command and
496 * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS. 531 * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
497 */ 532 */
@@ -505,31 +540,19 @@ struct spar_io_channel_protocol {
505 } __packed; 540 } __packed;
506 541
507#define MAX_CLIENTSTRING_LEN 1024 542#define MAX_CLIENTSTRING_LEN 1024
508 /* client_string is NULL termimated so holds max -1 bytes */ 543 /* client_string is NULL termimated so holds max-1 bytes */
509 u8 client_string[MAX_CLIENTSTRING_LEN]; 544 u8 client_string[MAX_CLIENTSTRING_LEN];
510} __packed; 545} __packed;
511 546
512/* INLINE functions for initializing and accessing I/O data channels */ 547/* INLINE functions for initializing and accessing I/O data channels. */
513#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
514#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64)) 548#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
515 549
516#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \ 550/* Use 4K page sizes when passing page info between Guest and IOPartition. */
517 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
518
519/*
520 * INLINE function for expanding a guest's pfn-off-size into multiple 4K page
521 * pfn-off-size entires.
522 */
523
524/* use 4K page sizes when we it comes to passing page information between */
525/* Guest and IOPartition. */
526#define PI_PAGE_SIZE 0x1000 551#define PI_PAGE_SIZE 0x1000
527#define PI_PAGE_MASK 0x0FFF 552#define PI_PAGE_MASK 0x0FFF
528 553
529/* returns next non-zero index on success or zero on failure (i.e. out of 554/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
530 * room) 555static inline u16
531 */
532static inline u16
533add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, 556add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
534 u16 max_pi_arr_entries, struct phys_info pi_arr[]) 557 u16 max_pi_arr_entries, struct phys_info pi_arr[])
535{ 558{
@@ -538,7 +561,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
538 561
539 firstlen = PI_PAGE_SIZE - inp_off; 562 firstlen = PI_PAGE_SIZE - inp_off;
540 if (inp_len <= firstlen) { 563 if (inp_len <= firstlen) {
541 /* the input entry spans only one page - add as is */ 564 /* The input entry spans only one page - add as is. */
542 if (index >= max_pi_arr_entries) 565 if (index >= max_pi_arr_entries)
543 return 0; 566 return 0;
544 pi_arr[index].pi_pfn = inp_pfn; 567 pi_arr[index].pi_pfn = inp_pfn;
@@ -547,7 +570,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
547 return index + 1; 570 return index + 1;
548 } 571 }
549 572
550 /* this entry spans multiple pages */ 573 /* This entry spans multiple pages. */
551 for (len = inp_len, i = 0; len; 574 for (len = inp_len, i = 0; len;
552 len -= pi_arr[index + i].pi_len, i++) { 575 len -= pi_arr[index + i].pi_len, i++) {
553 if (index + i >= max_pi_arr_entries) 576 if (index + i >= max_pi_arr_entries)
@@ -565,4 +588,4 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
565 return index + i; 588 return index + i;
566} 589}
567 590
568#endif /* __IOCHANNEL_H__ */ 591#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 677627c72c4c..03d56f818a86 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -166,6 +166,8 @@ struct visor_device {
166 struct controlvm_message_header *pending_msg_hdr; 166 struct controlvm_message_header *pending_msg_hdr;
167 void *vbus_hdr_info; 167 void *vbus_hdr_info;
168 uuid_le partition_uuid; 168 uuid_le partition_uuid;
169 struct dentry *debugfs_dir;
170 struct dentry *debugfs_client_bus_info;
169}; 171};
170 172
171#define to_visor_device(x) container_of(x, struct visor_device, device) 173#define to_visor_device(x) container_of(x, struct visor_device, device)
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index e97917522f6a..b0df26155d02 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -23,6 +23,7 @@
23 * the client devices and client drivers for the server end to see. 23 * the client devices and client drivers for the server end to see.
24 */ 24 */
25#include <linux/uuid.h> 25#include <linux/uuid.h>
26#include <linux/ctype.h>
26#include "channel.h" 27#include "channel.h"
27 28
28/* {193b331b-c58f-11da-95a9-00e08161165f} */ 29/* {193b331b-c58f-11da-95a9-00e08161165f} */
@@ -50,12 +51,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
50 SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \ 51 SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
51 SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE) 52 SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
52 53
53#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
54 (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
55 "vbus", \
56 sizeof(struct spar_vbus_channel_protocol),\
57 actual_bytes))
58
59#pragma pack(push, 1) /* both GCC and VC now allow this pragma */ 54#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
60 55
61/* 56/*
@@ -72,199 +67,38 @@ struct ultra_vbus_deviceinfo {
72}; 67};
73 68
74/** 69/**
75 * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer 70 * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
76 * @p: destination buffer where chars are written to 71 * and write it to a seq_file
77 * @remain: number of bytes that can be written starting at #p
78 * @src: pointer to source buffer
79 * @srcmax: number of valid characters at #src
80 *
81 * Reads chars from the buffer at @src for @srcmax bytes, and writes to
82 * the buffer at @p, which is @remain bytes long, ensuring never to
83 * overflow the buffer at @p, using the following rules:
84 * - printable characters are simply copied from the buffer at @src to the
85 * buffer at @p
86 * - intervening streaks of non-printable characters in the buffer at @src
87 * are replaced with a single space in the buffer at @p
88 * Note that we pay no attention to '\0'-termination.
89 *
90 * Pass @p == NULL and @remain == 0 for this special behavior -- In this
91 * case, we simply return the number of bytes that WOULD HAVE been written
92 * to a buffer at @p, had it been infinitely big.
93 *
94 * Return: the number of bytes written to @p (or WOULD HAVE been written to
95 * @p, as described in the previous paragraph)
96 */
97static inline int
98vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
99{
100 int chars = 0;
101 int nonprintable_streak = 0;
102
103 while (srcmax > 0) {
104 if ((*src >= ' ') && (*src < 0x7f)) {
105 if (nonprintable_streak) {
106 if (remain > 0) {
107 *p = ' ';
108 p++;
109 remain--;
110 chars++;
111 } else if (!p) {
112 chars++;
113 }
114 nonprintable_streak = 0;
115 }
116 if (remain > 0) {
117 *p = *src;
118 p++;
119 remain--;
120 chars++;
121 } else if (!p) {
122 chars++;
123 }
124 } else {
125 nonprintable_streak = 1;
126 }
127 src++;
128 srcmax--;
129 }
130 return chars;
131}
132
133#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
134 do { \
135 if (remain <= 0) \
136 break; \
137 *p = ch; \
138 p++; chars++; remain--; \
139 } while (0)
140
141/**
142 * vbuschannel_itoa() - convert non-negative int to string
143 * @p: destination string
144 * @remain: max number of bytes that can be written to @p
145 * @num: input int to convert
146 *
147 * Converts the non-negative value at @num to an ascii decimal string
148 * at @p, writing at most @remain bytes. Note there is NO '\0' termination
149 * written to @p.
150 *
151 * Return: number of bytes written to @p
152 *
153 */
154static inline int
155vbuschannel_itoa(char *p, int remain, int num)
156{
157 int digits = 0;
158 char s[32];
159 int i;
160
161 if (num == 0) {
162 /* '0' is a special case */
163 if (remain <= 0)
164 return 0;
165 *p = '0';
166 return 1;
167 }
168 /* form a backwards decimal ascii string in <s> */
169 while (num > 0) {
170 if (digits >= (int)sizeof(s))
171 return 0;
172 s[digits++] = (num % 10) + '0';
173 num = num / 10;
174 }
175 if (remain < digits) {
176 /* not enough room left at <p> to hold number, so fill with
177 * '?'
178 */
179 for (i = 0; i < remain; i++, p++)
180 *p = '?';
181 return remain;
182 }
183 /* plug in the decimal ascii string representing the number, by */
184 /* reversing the string we just built in <s> */
185 i = digits;
186 while (i > 0) {
187 i--;
188 *p = s[i];
189 p++;
190 }
191 return digits;
192}
193
194/**
195 * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo
196 * to a printable string
197 * @devinfo: the struct ultra_vbus_deviceinfo to format 72 * @devinfo: the struct ultra_vbus_deviceinfo to format
198 * @p: destination string area 73 * @seq: seq_file to write to
199 * @remain: size of destination string area in bytes
200 * @devix: the device index to be included in the output data, or -1 if no 74 * @devix: the device index to be included in the output data, or -1 if no
201 * device index is to be included 75 * device index is to be included
202 * 76 *
203 * Reads @devInfo, and converts its contents to a printable string at @p, 77 * Reads @devInfo, and writes it in human-readable notation to @seq.
204 * writing at most @remain bytes. Note there is NO '\0' termination
205 * written to @p.
206 *
207 * Return: number of bytes written to @p
208 */ 78 */
209static inline int 79static inline void
210vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo, 80vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
211 char *p, int remain, int devix) 81 struct seq_file *seq, int devix)
212{ 82{
213 char *psrc; 83 if (!isprint(devinfo->devtype[0]))
214 int nsrc, x, i, pad; 84 return; /* uninitialized vbus device entry */
215 int chars = 0; 85
216 86 if (devix >= 0)
217 psrc = &devinfo->devtype[0]; 87 seq_printf(seq, "[%d]", devix);
218 nsrc = sizeof(devinfo->devtype); 88 else
219 if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0) 89 /* vbus device entry is for bus or chipset */
220 return 0; 90 seq_puts(seq, " ");
221 91
222 /* emit device index */ 92 /*
223 if (devix >= 0) { 93 * Note: because the s-Par back-end is free to scribble in this area,
224 VBUSCHANNEL_ADDACHAR('[', p, remain, chars); 94 * we never assume '\0'-termination.
225 x = vbuschannel_itoa(p, remain, devix); 95 */
226 p += x; 96 seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
227 remain -= x; 97 (int)sizeof(devinfo->devtype), devinfo->devtype);
228 chars += x; 98 seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
229 VBUSCHANNEL_ADDACHAR(']', p, remain, chars); 99 (int)sizeof(devinfo->drvname), devinfo->drvname);
230 } else { 100 seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
231 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); 101 devinfo->infostrs);
232 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
233 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
234 }
235
236 /* emit device type */
237 x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
238 p += x;
239 remain -= x;
240 chars += x;
241 pad = 15 - x; /* pad device type to be exactly 15 chars */
242 for (i = 0; i < pad; i++)
243 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
244 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
245
246 /* emit driver name */
247 psrc = &devinfo->drvname[0];
248 nsrc = sizeof(devinfo->drvname);
249 x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
250 p += x;
251 remain -= x;
252 chars += x;
253 pad = 15 - x; /* pad driver name to be exactly 15 chars */
254 for (i = 0; i < pad; i++)
255 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
256 VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
257
258 /* emit strings */
259 psrc = &devinfo->infostrs[0];
260 nsrc = sizeof(devinfo->infostrs);
261 x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
262 p += x;
263 remain -= x;
264 chars += x;
265 VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
266
267 return chars;
268} 102}
269 103
270struct spar_vbus_headerinfo { 104struct spar_vbus_headerinfo {
@@ -293,11 +127,6 @@ struct spar_vbus_channel_protocol {
293 /* describes client device and driver for each device on the bus */ 127 /* describes client device and driver for each device on the bus */
294}; 128};
295 129
296#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
297 (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
298 sizeof(ULTRA_VBUS_DEVICEINFO)))
299#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
300
301#pragma pack(pop) 130#pragma pack(pop)
302 131
303#endif 132#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index fec0a54916fe..3457ef338e1e 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -14,6 +14,7 @@
14 * details. 14 * details.
15 */ 15 */
16 16
17#include <linux/debugfs.h>
17#include <linux/uuid.h> 18#include <linux/uuid.h>
18 19
19#include "visorbus.h" 20#include "visorbus.h"
@@ -33,6 +34,7 @@ static int visorbus_forcenomatch;
33#define POLLJIFFIES_NORMALCHANNEL 10 34#define POLLJIFFIES_NORMALCHANNEL 10
34 35
35static int busreg_rc = -ENODEV; /* stores the result from bus registration */ 36static int busreg_rc = -ENODEV; /* stores the result from bus registration */
37static struct dentry *visorbus_debugfs_dir;
36 38
37/* 39/*
38 * DEVICE type attributes 40 * DEVICE type attributes
@@ -151,6 +153,8 @@ visorbus_release_busdevice(struct device *xdev)
151{ 153{
152 struct visor_device *dev = dev_get_drvdata(xdev); 154 struct visor_device *dev = dev_get_drvdata(xdev);
153 155
156 debugfs_remove(dev->debugfs_client_bus_info);
157 debugfs_remove_recursive(dev->debugfs_dir);
154 kfree(dev); 158 kfree(dev);
155} 159}
156 160
@@ -186,6 +190,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
186 return snprintf(buf, PAGE_SIZE, "0x%llx\n", 190 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
187 visorchannel_get_physaddr(vdev->visorchannel)); 191 visorchannel_get_physaddr(vdev->visorchannel));
188} 192}
193static DEVICE_ATTR_RO(physaddr);
189 194
190static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr, 195static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
191 char *buf) 196 char *buf)
@@ -197,6 +202,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
197 return snprintf(buf, PAGE_SIZE, "0x%lx\n", 202 return snprintf(buf, PAGE_SIZE, "0x%lx\n",
198 visorchannel_get_nbytes(vdev->visorchannel)); 203 visorchannel_get_nbytes(vdev->visorchannel));
199} 204}
205static DEVICE_ATTR_RO(nbytes);
200 206
201static ssize_t clientpartition_show(struct device *dev, 207static ssize_t clientpartition_show(struct device *dev,
202 struct device_attribute *attr, char *buf) 208 struct device_attribute *attr, char *buf)
@@ -208,6 +214,7 @@ static ssize_t clientpartition_show(struct device *dev,
208 return snprintf(buf, PAGE_SIZE, "0x%llx\n", 214 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
209 visorchannel_get_clientpartition(vdev->visorchannel)); 215 visorchannel_get_clientpartition(vdev->visorchannel));
210} 216}
217static DEVICE_ATTR_RO(clientpartition);
211 218
212static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr, 219static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
213 char *buf) 220 char *buf)
@@ -220,6 +227,7 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
220 return snprintf(buf, PAGE_SIZE, "%s\n", 227 return snprintf(buf, PAGE_SIZE, "%s\n",
221 visorchannel_id(vdev->visorchannel, typeid)); 228 visorchannel_id(vdev->visorchannel, typeid));
222} 229}
230static DEVICE_ATTR_RO(typeguid);
223 231
224static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr, 232static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
225 char *buf) 233 char *buf)
@@ -232,6 +240,7 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
232 return snprintf(buf, PAGE_SIZE, "%s\n", 240 return snprintf(buf, PAGE_SIZE, "%s\n",
233 visorchannel_zoneid(vdev->visorchannel, zoneid)); 241 visorchannel_zoneid(vdev->visorchannel, zoneid));
234} 242}
243static DEVICE_ATTR_RO(zoneguid);
235 244
236static ssize_t typename_show(struct device *dev, struct device_attribute *attr, 245static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
237 char *buf) 246 char *buf)
@@ -250,12 +259,6 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
250 drv = to_visor_driver(xdrv); 259 drv = to_visor_driver(xdrv);
251 return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name); 260 return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name);
252} 261}
253
254static DEVICE_ATTR_RO(physaddr);
255static DEVICE_ATTR_RO(nbytes);
256static DEVICE_ATTR_RO(clientpartition);
257static DEVICE_ATTR_RO(typeguid);
258static DEVICE_ATTR_RO(zoneguid);
259static DEVICE_ATTR_RO(typename); 262static DEVICE_ATTR_RO(typename);
260 263
261static struct attribute *channel_attrs[] = { 264static struct attribute *channel_attrs[] = {
@@ -295,6 +298,7 @@ static ssize_t partition_handle_show(struct device *dev,
295 298
296 return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle); 299 return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
297} 300}
301static DEVICE_ATTR_RO(partition_handle);
298 302
299static ssize_t partition_guid_show(struct device *dev, 303static ssize_t partition_guid_show(struct device *dev,
300 struct device_attribute *attr, 304 struct device_attribute *attr,
@@ -303,6 +307,7 @@ static ssize_t partition_guid_show(struct device *dev,
303 307
304 return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid); 308 return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid);
305} 309}
310static DEVICE_ATTR_RO(partition_guid);
306 311
307static ssize_t partition_name_show(struct device *dev, 312static ssize_t partition_name_show(struct device *dev,
308 struct device_attribute *attr, 313 struct device_attribute *attr,
@@ -311,6 +316,7 @@ static ssize_t partition_name_show(struct device *dev,
311 316
312 return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name); 317 return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name);
313} 318}
319static DEVICE_ATTR_RO(partition_name);
314 320
315static ssize_t channel_addr_show(struct device *dev, 321static ssize_t channel_addr_show(struct device *dev,
316 struct device_attribute *attr, 322 struct device_attribute *attr,
@@ -320,6 +326,7 @@ static ssize_t channel_addr_show(struct device *dev,
320 326
321 return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr); 327 return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
322} 328}
329static DEVICE_ATTR_RO(channel_addr);
323 330
324static ssize_t channel_bytes_show(struct device *dev, 331static ssize_t channel_bytes_show(struct device *dev,
325 struct device_attribute *attr, 332 struct device_attribute *attr,
@@ -329,6 +336,7 @@ static ssize_t channel_bytes_show(struct device *dev,
329 336
330 return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes); 337 return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
331} 338}
339static DEVICE_ATTR_RO(channel_bytes);
332 340
333static ssize_t channel_id_show(struct device *dev, 341static ssize_t channel_id_show(struct device *dev,
334 struct device_attribute *attr, 342 struct device_attribute *attr,
@@ -343,77 +351,7 @@ static ssize_t channel_id_show(struct device *dev,
343 } 351 }
344 return len; 352 return len;
345} 353}
346
347static ssize_t client_bus_info_show(struct device *dev,
348 struct device_attribute *attr,
349 char *buf) {
350 struct visor_device *vdev = to_visor_device(dev);
351 struct visorchannel *channel = vdev->visorchannel;
352
353 int i, shift, remain = PAGE_SIZE;
354 unsigned long off;
355 char *pos = buf;
356 u8 *partition_name;
357 struct ultra_vbus_deviceinfo dev_info;
358
359 partition_name = "";
360 if (channel) {
361 if (vdev->name)
362 partition_name = vdev->name;
363 shift = snprintf(pos, remain,
364 "Client device / client driver info for %s partition (vbus #%u):\n",
365 partition_name, vdev->chipset_bus_no);
366 pos += shift;
367 remain -= shift;
368 shift = visorchannel_read(channel,
369 offsetof(struct
370 spar_vbus_channel_protocol,
371 chp_info),
372 &dev_info, sizeof(dev_info));
373 if (shift >= 0) {
374 shift = vbuschannel_devinfo_to_string(&dev_info, pos,
375 remain, -1);
376 pos += shift;
377 remain -= shift;
378 }
379 shift = visorchannel_read(channel,
380 offsetof(struct
381 spar_vbus_channel_protocol,
382 bus_info),
383 &dev_info, sizeof(dev_info));
384 if (shift >= 0) {
385 shift = vbuschannel_devinfo_to_string(&dev_info, pos,
386 remain, -1);
387 pos += shift;
388 remain -= shift;
389 }
390 off = offsetof(struct spar_vbus_channel_protocol, dev_info);
391 i = 0;
392 while (off + sizeof(dev_info) <=
393 visorchannel_get_nbytes(channel)) {
394 shift = visorchannel_read(channel,
395 off, &dev_info,
396 sizeof(dev_info));
397 if (shift >= 0) {
398 shift = vbuschannel_devinfo_to_string
399 (&dev_info, pos, remain, i);
400 pos += shift;
401 remain -= shift;
402 }
403 off += sizeof(dev_info);
404 i++;
405 }
406 }
407 return PAGE_SIZE - remain;
408}
409
410static DEVICE_ATTR_RO(partition_handle);
411static DEVICE_ATTR_RO(partition_guid);
412static DEVICE_ATTR_RO(partition_name);
413static DEVICE_ATTR_RO(channel_addr);
414static DEVICE_ATTR_RO(channel_bytes);
415static DEVICE_ATTR_RO(channel_id); 354static DEVICE_ATTR_RO(channel_id);
416static DEVICE_ATTR_RO(client_bus_info);
417 355
418static struct attribute *dev_attrs[] = { 356static struct attribute *dev_attrs[] = {
419 &dev_attr_partition_handle.attr, 357 &dev_attr_partition_handle.attr,
@@ -422,7 +360,6 @@ static struct attribute *dev_attrs[] = {
422 &dev_attr_channel_addr.attr, 360 &dev_attr_channel_addr.attr,
423 &dev_attr_channel_bytes.attr, 361 &dev_attr_channel_bytes.attr,
424 &dev_attr_channel_id.attr, 362 &dev_attr_channel_id.attr,
425 &dev_attr_client_bus_info.attr,
426 NULL 363 NULL
427}; 364};
428 365
@@ -435,6 +372,66 @@ static const struct attribute_group *visorbus_groups[] = {
435 NULL 372 NULL
436}; 373};
437 374
375/*
376 * BUS debugfs entries
377 *
378 * define & implement display of debugfs attributes under
379 * /sys/kernel/debug/visorbus/visorbus<n>.
380 */
381
382static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
383{
384 struct visor_device *vdev = seq->private;
385 struct visorchannel *channel = vdev->visorchannel;
386
387 int i;
388 unsigned long off;
389 struct ultra_vbus_deviceinfo dev_info;
390
391 if (!channel)
392 return 0;
393
394 seq_printf(seq,
395 "Client device / client driver info for %s partition (vbus #%u):\n",
396 ((vdev->name) ? (char *)(vdev->name) : ""),
397 vdev->chipset_bus_no);
398 if (visorchannel_read(channel,
399 offsetof(struct spar_vbus_channel_protocol,
400 chp_info),
401 &dev_info, sizeof(dev_info)) >= 0)
402 vbuschannel_print_devinfo(&dev_info, seq, -1);
403 if (visorchannel_read(channel,
404 offsetof(struct spar_vbus_channel_protocol,
405 bus_info),
406 &dev_info, sizeof(dev_info)) >= 0)
407 vbuschannel_print_devinfo(&dev_info, seq, -1);
408 off = offsetof(struct spar_vbus_channel_protocol, dev_info);
409 i = 0;
410 while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
411 if (visorchannel_read(channel, off, &dev_info,
412 sizeof(dev_info)) >= 0)
413 vbuschannel_print_devinfo(&dev_info, seq, i);
414 off += sizeof(dev_info);
415 i++;
416 }
417
418 return 0;
419}
420
421static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
422{
423 return single_open(file, client_bus_info_debugfs_show,
424 inode->i_private);
425}
426
427static const struct file_operations client_bus_info_debugfs_fops = {
428 .owner = THIS_MODULE,
429 .open = client_bus_info_debugfs_open,
430 .read = seq_read,
431 .llseek = seq_lseek,
432 .release = single_release,
433};
434
438static void 435static void
439dev_periodic_work(unsigned long __opaque) 436dev_periodic_work(unsigned long __opaque)
440{ 437{
@@ -610,8 +607,8 @@ create_visor_device(struct visor_device *dev)
610 u32 chipset_bus_no = dev->chipset_bus_no; 607 u32 chipset_bus_no = dev->chipset_bus_no;
611 u32 chipset_dev_no = dev->chipset_dev_no; 608 u32 chipset_dev_no = dev->chipset_dev_no;
612 609
613 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no, 610 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
614 POSTCODE_SEVERITY_INFO); 611 DIAG_SEVERITY_PRINT);
615 612
616 mutex_init(&dev->visordriver_callback_lock); 613 mutex_init(&dev->visordriver_callback_lock);
617 dev->device.bus = &visorbus_type; 614 dev->device.bus = &visorbus_type;
@@ -651,8 +648,8 @@ create_visor_device(struct visor_device *dev)
651 */ 648 */
652 err = device_add(&dev->device); 649 err = device_add(&dev->device);
653 if (err < 0) { 650 if (err < 0) {
654 POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no, 651 POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no,
655 DIAG_SEVERITY_ERR); 652 DIAG_SEVERITY_ERR);
656 goto err_put; 653 goto err_put;
657 } 654 }
658 655
@@ -966,9 +963,10 @@ static int
966create_bus_instance(struct visor_device *dev) 963create_bus_instance(struct visor_device *dev)
967{ 964{
968 int id = dev->chipset_bus_no; 965 int id = dev->chipset_bus_no;
966 int err;
969 struct spar_vbus_headerinfo *hdr_info; 967 struct spar_vbus_headerinfo *hdr_info;
970 968
971 POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); 969 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
972 970
973 hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL); 971 hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
974 if (!hdr_info) 972 if (!hdr_info)
@@ -979,11 +977,26 @@ create_bus_instance(struct visor_device *dev)
979 dev->device.groups = visorbus_groups; 977 dev->device.groups = visorbus_groups;
980 dev->device.release = visorbus_release_busdevice; 978 dev->device.release = visorbus_release_busdevice;
981 979
980 dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
981 visorbus_debugfs_dir);
982 if (!dev->debugfs_dir) {
983 err = -ENOMEM;
984 goto err_hdr_info;
985 }
986 dev->debugfs_client_bus_info =
987 debugfs_create_file("client_bus_info", S_IRUSR | S_IRGRP,
988 dev->debugfs_dir, dev,
989 &client_bus_info_debugfs_fops);
990 if (!dev->debugfs_client_bus_info) {
991 err = -ENOMEM;
992 goto err_debugfs_dir;
993 }
994
982 if (device_register(&dev->device) < 0) { 995 if (device_register(&dev->device) < 0) {
983 POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id, 996 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id,
984 POSTCODE_SEVERITY_ERR); 997 DIAG_SEVERITY_ERR);
985 kfree(hdr_info); 998 err = -ENODEV;
986 return -ENODEV; 999 goto err_debugfs_created;
987 } 1000 }
988 1001
989 if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) { 1002 if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -998,6 +1011,16 @@ create_bus_instance(struct visor_device *dev)
998 list_add_tail(&dev->list_all, &list_all_bus_instances); 1011 list_add_tail(&dev->list_all, &list_all_bus_instances);
999 dev_set_drvdata(&dev->device, dev); 1012 dev_set_drvdata(&dev->device, dev);
1000 return 0; 1013 return 0;
1014
1015err_debugfs_created:
1016 debugfs_remove(dev->debugfs_client_bus_info);
1017
1018err_debugfs_dir:
1019 debugfs_remove_recursive(dev->debugfs_dir);
1020
1021err_hdr_info:
1022 kfree(hdr_info);
1023 return err;
1001} 1024}
1002 1025
1003/** 1026/**
@@ -1069,16 +1092,16 @@ chipset_bus_create(struct visor_device *dev)
1069 int rc; 1092 int rc;
1070 u32 bus_no = dev->chipset_bus_no; 1093 u32 bus_no = dev->chipset_bus_no;
1071 1094
1072 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); 1095 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
1073 rc = create_bus_instance(dev); 1096 rc = create_bus_instance(dev);
1074 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); 1097 POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
1075 1098
1076 if (rc < 0) 1099 if (rc < 0)
1077 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 1100 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
1078 POSTCODE_SEVERITY_ERR); 1101 DIAG_SEVERITY_ERR);
1079 else 1102 else
1080 POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no, 1103 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no,
1081 POSTCODE_SEVERITY_INFO); 1104 DIAG_SEVERITY_PRINT);
1082 1105
1083 bus_create_response(dev, rc); 1106 bus_create_response(dev, rc);
1084} 1107}
@@ -1097,18 +1120,18 @@ chipset_device_create(struct visor_device *dev_info)
1097 u32 bus_no = dev_info->chipset_bus_no; 1120 u32 bus_no = dev_info->chipset_bus_no;
1098 u32 dev_no = dev_info->chipset_dev_no; 1121 u32 dev_no = dev_info->chipset_dev_no;
1099 1122
1100 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, 1123 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1101 POSTCODE_SEVERITY_INFO); 1124 DIAG_SEVERITY_PRINT);
1102 1125
1103 rc = create_visor_device(dev_info); 1126 rc = create_visor_device(dev_info);
1104 device_create_response(dev_info, rc); 1127 device_create_response(dev_info, rc);
1105 1128
1106 if (rc < 0) 1129 if (rc < 0)
1107 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 1130 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1108 POSTCODE_SEVERITY_ERR); 1131 DIAG_SEVERITY_ERR);
1109 else 1132 else
1110 POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no, 1133 POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
1111 POSTCODE_SEVERITY_INFO); 1134 DIAG_SEVERITY_PRINT);
1112} 1135}
1113 1136
1114void 1137void
@@ -1274,12 +1297,17 @@ visorbus_init(void)
1274{ 1297{
1275 int err; 1298 int err;
1276 1299
1277 POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO); 1300 POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1301
1302 visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
1303 if (!visorbus_debugfs_dir)
1304 return -ENOMEM;
1305
1278 bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus"); 1306 bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
1279 1307
1280 err = create_bus_type(); 1308 err = create_bus_type();
1281 if (err < 0) { 1309 if (err < 0) {
1282 POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR); 1310 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR);
1283 goto error; 1311 goto error;
1284 } 1312 }
1285 1313
@@ -1288,7 +1316,7 @@ visorbus_init(void)
1288 return 0; 1316 return 0;
1289 1317
1290error: 1318error:
1291 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR); 1319 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
1292 return err; 1320 return err;
1293} 1321}
1294 1322
@@ -1306,6 +1334,7 @@ visorbus_exit(void)
1306 remove_bus_instance(dev); 1334 remove_bus_instance(dev);
1307 } 1335 }
1308 remove_bus_type(); 1336 remove_bus_type();
1337 debugfs_remove_recursive(visorbus_debugfs_dir);
1309} 1338}
1310 1339
1311module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO); 1340module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 15403fb52847..49bec1763e33 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -70,9 +70,9 @@ struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
70 gfp_t gfp, uuid_le guid); 70 gfp_t gfp, uuid_le guid);
71void visorchannel_destroy(struct visorchannel *channel); 71void visorchannel_destroy(struct visorchannel *channel);
72int visorchannel_read(struct visorchannel *channel, ulong offset, 72int visorchannel_read(struct visorchannel *channel, ulong offset,
73 void *local, ulong nbytes); 73 void *dest, ulong nbytes);
74int visorchannel_write(struct visorchannel *channel, ulong offset, 74int visorchannel_write(struct visorchannel *channel, ulong offset,
75 void *local, ulong nbytes); 75 void *dest, ulong nbytes);
76u64 visorchannel_get_physaddr(struct visorchannel *channel); 76u64 visorchannel_get_physaddr(struct visorchannel *channel);
77ulong visorchannel_get_nbytes(struct visorchannel *channel); 77ulong visorchannel_get_nbytes(struct visorchannel *channel);
78char *visorchannel_id(struct visorchannel *channel, char *s); 78char *visorchannel_id(struct visorchannel *channel, char *s);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 300a65dc5c6c..f51a7258bef0 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include "visorbus.h" 25#include "visorbus.h"
26#include "visorbus_private.h"
26#include "controlvmchannel.h" 27#include "controlvmchannel.h"
27 28
28#define MYDRVNAME "visorchannel" 29#define MYDRVNAME "visorchannel"
@@ -127,19 +128,19 @@ EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
127 128
128int 129int
129visorchannel_read(struct visorchannel *channel, ulong offset, 130visorchannel_read(struct visorchannel *channel, ulong offset,
130 void *local, ulong nbytes) 131 void *dest, ulong nbytes)
131{ 132{
132 if (offset + nbytes > channel->nbytes) 133 if (offset + nbytes > channel->nbytes)
133 return -EIO; 134 return -EIO;
134 135
135 memcpy(local, channel->mapped + offset, nbytes); 136 memcpy(dest, channel->mapped + offset, nbytes);
136 137
137 return 0; 138 return 0;
138} 139}
139 140
140int 141int
141visorchannel_write(struct visorchannel *channel, ulong offset, 142visorchannel_write(struct visorchannel *channel, ulong offset,
142 void *local, ulong nbytes) 143 void *dest, ulong nbytes)
143{ 144{
144 size_t chdr_size = sizeof(struct channel_header); 145 size_t chdr_size = sizeof(struct channel_header);
145 size_t copy_size; 146 size_t copy_size;
@@ -150,10 +151,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
150 if (offset < chdr_size) { 151 if (offset < chdr_size) {
151 copy_size = min(chdr_size - offset, nbytes); 152 copy_size = min(chdr_size - offset, nbytes);
152 memcpy(((char *)(&channel->chan_hdr)) + offset, 153 memcpy(((char *)(&channel->chan_hdr)) + offset,
153 local, copy_size); 154 dest, copy_size);
154 } 155 }
155 156
156 memcpy(channel->mapped + offset, local, nbytes); 157 memcpy(channel->mapped + offset, dest, nbytes);
157 158
158 return 0; 159 return 0;
159} 160}
@@ -236,8 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
236 if (error) 237 if (error)
237 return error; 238 return error;
238 239
240 /* No signals to remove; have caller try again. */
239 if (sig_hdr.head == sig_hdr.tail) 241 if (sig_hdr.head == sig_hdr.tail)
240 return -EIO; /* no signals to remove */ 242 return -EAGAIN;
241 243
242 sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; 244 sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
243 245
@@ -299,22 +301,30 @@ EXPORT_SYMBOL_GPL(visorchannel_signalremove);
299 * Return: boolean indicating whether any messages in the designated 301 * Return: boolean indicating whether any messages in the designated
300 * channel/queue are present 302 * channel/queue are present
301 */ 303 */
304
305static bool
306queue_empty(struct visorchannel *channel, u32 queue)
307{
308 struct signal_queue_header sig_hdr;
309
310 if (sig_read_header(channel, queue, &sig_hdr))
311 return true;
312
313 return (sig_hdr.head == sig_hdr.tail);
314}
315
302bool 316bool
303visorchannel_signalempty(struct visorchannel *channel, u32 queue) 317visorchannel_signalempty(struct visorchannel *channel, u32 queue)
304{ 318{
305 unsigned long flags = 0; 319 bool rc;
306 struct signal_queue_header sig_hdr; 320 unsigned long flags;
307 bool rc = false;
308 321
309 if (channel->needs_lock) 322 if (!channel->needs_lock)
310 spin_lock_irqsave(&channel->remove_lock, flags); 323 return queue_empty(channel, queue);
311 324
312 if (sig_read_header(channel, queue, &sig_hdr)) 325 spin_lock_irqsave(&channel->remove_lock, flags);
313 rc = true; 326 rc = queue_empty(channel, queue);
314 if (sig_hdr.head == sig_hdr.tail) 327 spin_unlock_irqrestore(&channel->remove_lock, flags);
315 rc = true;
316 if (channel->needs_lock)
317 spin_unlock_irqrestore(&channel->remove_lock, flags);
318 328
319 return rc; 329 return rc;
320} 330}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 59871495ea85..d7148c351d3f 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -29,7 +29,7 @@
29#include "visorbus_private.h" 29#include "visorbus_private.h"
30#include "vmcallinterface.h" 30#include "vmcallinterface.h"
31 31
32#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c 32#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
33 33
34#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1 34#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
35#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100 35#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -57,7 +57,6 @@ visorchipset_open(struct inode *inode, struct file *file)
57 57
58 if (minor_number) 58 if (minor_number)
59 return -ENODEV; 59 return -ENODEV;
60 file->private_data = NULL;
61 return 0; 60 return 0;
62} 61}
63 62
@@ -499,7 +498,7 @@ controlvm_init_response(struct controlvm_message *msg,
499 } 498 }
500} 499}
501 500
502static void 501static int
503controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr, 502controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
504 int response, 503 int response,
505 enum ultra_chipset_feature features) 504 enum ultra_chipset_feature features)
@@ -508,34 +507,33 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
508 507
509 controlvm_init_response(&outmsg, msg_hdr, response); 508 controlvm_init_response(&outmsg, msg_hdr, response);
510 outmsg.cmd.init_chipset.features = features; 509 outmsg.cmd.init_chipset.features = features;
511 if (visorchannel_signalinsert(controlvm_channel, 510 return visorchannel_signalinsert(controlvm_channel,
512 CONTROLVM_QUEUE_REQUEST, &outmsg)) { 511 CONTROLVM_QUEUE_REQUEST, &outmsg);
513 return;
514 }
515} 512}
516 513
517static void 514static int
518chipset_init(struct controlvm_message *inmsg) 515chipset_init(struct controlvm_message *inmsg)
519{ 516{
520 static int chipset_inited; 517 static int chipset_inited;
521 enum ultra_chipset_feature features = 0; 518 enum ultra_chipset_feature features = 0;
522 int rc = CONTROLVM_RESP_SUCCESS; 519 int rc = CONTROLVM_RESP_SUCCESS;
520 int res = 0;
523 521
524 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO); 522 POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
525 if (chipset_inited) { 523 if (chipset_inited) {
526 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 524 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
525 res = -EIO;
527 goto out_respond; 526 goto out_respond;
528 } 527 }
529 chipset_inited = 1; 528 chipset_inited = 1;
530 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); 529 POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
531 530
532 /* 531 /*
533 * Set features to indicate we support parahotplug (if Command 532 * Set features to indicate we support parahotplug (if Command
534 * also supports it). 533 * also supports it).
535 */ 534 */
536 features = 535 features = inmsg->cmd.init_chipset.features &
537 inmsg->cmd.init_chipset. 536 ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
538 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
539 537
540 /* 538 /*
541 * Set the "reply" bit so Command knows this is a 539 * Set the "reply" bit so Command knows this is a
@@ -545,25 +543,25 @@ chipset_init(struct controlvm_message *inmsg)
545 543
546out_respond: 544out_respond:
547 if (inmsg->hdr.flags.response_expected) 545 if (inmsg->hdr.flags.response_expected)
548 controlvm_respond_chipset_init(&inmsg->hdr, rc, features); 546 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
547
548 return res;
549} 549}
550 550
551static void 551static int
552controlvm_respond(struct controlvm_message_header *msg_hdr, int response) 552controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
553{ 553{
554 struct controlvm_message outmsg; 554 struct controlvm_message outmsg;
555 555
556 controlvm_init_response(&outmsg, msg_hdr, response); 556 controlvm_init_response(&outmsg, msg_hdr, response);
557 if (outmsg.hdr.flags.test_message == 1) 557 if (outmsg.hdr.flags.test_message == 1)
558 return; 558 return -EINVAL;
559 559
560 if (visorchannel_signalinsert(controlvm_channel, 560 return visorchannel_signalinsert(controlvm_channel,
561 CONTROLVM_QUEUE_REQUEST, &outmsg)) { 561 CONTROLVM_QUEUE_REQUEST, &outmsg);
562 return;
563 }
564} 562}
565 563
566static void controlvm_respond_physdev_changestate( 564static int controlvm_respond_physdev_changestate(
567 struct controlvm_message_header *msg_hdr, int response, 565 struct controlvm_message_header *msg_hdr, int response,
568 struct spar_segment_state state) 566 struct spar_segment_state state)
569{ 567{
@@ -572,10 +570,8 @@ static void controlvm_respond_physdev_changestate(
572 controlvm_init_response(&outmsg, msg_hdr, response); 570 controlvm_init_response(&outmsg, msg_hdr, response);
573 outmsg.cmd.device_change_state.state = state; 571 outmsg.cmd.device_change_state.state = state;
574 outmsg.cmd.device_change_state.flags.phys_device = 1; 572 outmsg.cmd.device_change_state.flags.phys_device = 1;
575 if (visorchannel_signalinsert(controlvm_channel, 573 return visorchannel_signalinsert(controlvm_channel,
576 CONTROLVM_QUEUE_REQUEST, &outmsg)) { 574 CONTROLVM_QUEUE_REQUEST, &outmsg);
577 return;
578 }
579} 575}
580 576
581enum crash_obj_type { 577enum crash_obj_type {
@@ -583,74 +579,80 @@ enum crash_obj_type {
583 CRASH_BUS, 579 CRASH_BUS,
584}; 580};
585 581
586static void 582static int
587save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ) 583save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
588{ 584{
589 u32 local_crash_msg_offset; 585 u32 local_crash_msg_offset;
590 u16 local_crash_msg_count; 586 u16 local_crash_msg_count;
587 int err;
591 588
592 if (visorchannel_read(controlvm_channel, 589 err = visorchannel_read(controlvm_channel,
593 offsetof(struct spar_controlvm_channel_protocol, 590 offsetof(struct spar_controlvm_channel_protocol,
594 saved_crash_message_count), 591 saved_crash_message_count),
595 &local_crash_msg_count, sizeof(u16)) < 0) { 592 &local_crash_msg_count, sizeof(u16));
596 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, 593 if (err) {
597 POSTCODE_SEVERITY_ERR); 594 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
598 return; 595 DIAG_SEVERITY_ERR);
596 return err;
599 } 597 }
600 598
601 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { 599 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
602 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, 600 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
603 local_crash_msg_count, 601 local_crash_msg_count,
604 POSTCODE_SEVERITY_ERR); 602 DIAG_SEVERITY_ERR);
605 return; 603 return -EIO;
606 } 604 }
607 605
608 if (visorchannel_read(controlvm_channel, 606 err = visorchannel_read(controlvm_channel,
609 offsetof(struct spar_controlvm_channel_protocol, 607 offsetof(struct spar_controlvm_channel_protocol,
610 saved_crash_message_offset), 608 saved_crash_message_offset),
611 &local_crash_msg_offset, sizeof(u32)) < 0) { 609 &local_crash_msg_offset, sizeof(u32));
612 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, 610 if (err) {
613 POSTCODE_SEVERITY_ERR); 611 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
614 return; 612 DIAG_SEVERITY_ERR);
613 return err;
615 } 614 }
616 615
617 if (typ == CRASH_BUS) { 616 if (typ == CRASH_BUS) {
618 if (visorchannel_write(controlvm_channel, 617 err = visorchannel_write(controlvm_channel,
619 local_crash_msg_offset, 618 local_crash_msg_offset,
620 msg, 619 msg,
621 sizeof(struct controlvm_message)) < 0) { 620 sizeof(struct controlvm_message));
622 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC, 621 if (err) {
623 POSTCODE_SEVERITY_ERR); 622 POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
624 return; 623 DIAG_SEVERITY_ERR);
624 return err;
625 } 625 }
626 } else { 626 } else {
627 local_crash_msg_offset += sizeof(struct controlvm_message); 627 local_crash_msg_offset += sizeof(struct controlvm_message);
628 if (visorchannel_write(controlvm_channel, 628 err = visorchannel_write(controlvm_channel,
629 local_crash_msg_offset, 629 local_crash_msg_offset,
630 msg, 630 msg,
631 sizeof(struct controlvm_message)) < 0) { 631 sizeof(struct controlvm_message));
632 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC, 632 if (err) {
633 POSTCODE_SEVERITY_ERR); 633 POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
634 return; 634 DIAG_SEVERITY_ERR);
635 return err;
635 } 636 }
636 } 637 }
638 return 0;
637} 639}
638 640
639static void 641static int
640bus_responder(enum controlvm_id cmd_id, 642bus_responder(enum controlvm_id cmd_id,
641 struct controlvm_message_header *pending_msg_hdr, 643 struct controlvm_message_header *pending_msg_hdr,
642 int response) 644 int response)
643{ 645{
644 if (!pending_msg_hdr) 646 if (!pending_msg_hdr)
645 return; /* no controlvm response needed */ 647 return -EIO;
646 648
647 if (pending_msg_hdr->id != (u32)cmd_id) 649 if (pending_msg_hdr->id != (u32)cmd_id)
648 return; 650 return -EINVAL;
649 651
650 controlvm_respond(pending_msg_hdr, response); 652 return controlvm_respond(pending_msg_hdr, response);
651} 653}
652 654
653static void 655static int
654device_changestate_responder(enum controlvm_id cmd_id, 656device_changestate_responder(enum controlvm_id cmd_id,
655 struct visor_device *p, int response, 657 struct visor_device *p, int response,
656 struct spar_segment_state response_state) 658 struct spar_segment_state response_state)
@@ -660,9 +662,9 @@ device_changestate_responder(enum controlvm_id cmd_id,
660 u32 dev_no = p->chipset_dev_no; 662 u32 dev_no = p->chipset_dev_no;
661 663
662 if (!p->pending_msg_hdr) 664 if (!p->pending_msg_hdr)
663 return; /* no controlvm response needed */ 665 return -EIO;
664 if (p->pending_msg_hdr->id != cmd_id) 666 if (p->pending_msg_hdr->id != cmd_id)
665 return; 667 return -EINVAL;
666 668
667 controlvm_init_response(&outmsg, p->pending_msg_hdr, response); 669 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
668 670
@@ -670,175 +672,74 @@ device_changestate_responder(enum controlvm_id cmd_id,
670 outmsg.cmd.device_change_state.dev_no = dev_no; 672 outmsg.cmd.device_change_state.dev_no = dev_no;
671 outmsg.cmd.device_change_state.state = response_state; 673 outmsg.cmd.device_change_state.state = response_state;
672 674
673 if (visorchannel_signalinsert(controlvm_channel, 675 return visorchannel_signalinsert(controlvm_channel,
674 CONTROLVM_QUEUE_REQUEST, &outmsg)) 676 CONTROLVM_QUEUE_REQUEST, &outmsg);
675 return;
676} 677}
677 678
678static void 679static int
679device_responder(enum controlvm_id cmd_id, 680device_responder(enum controlvm_id cmd_id,
680 struct controlvm_message_header *pending_msg_hdr, 681 struct controlvm_message_header *pending_msg_hdr,
681 int response) 682 int response)
682{ 683{
683 if (!pending_msg_hdr) 684 if (!pending_msg_hdr)
684 return; /* no controlvm response needed */ 685 return -EIO;
685 686
686 if (pending_msg_hdr->id != (u32)cmd_id) 687 if (pending_msg_hdr->id != (u32)cmd_id)
687 return; 688 return -EINVAL;
688
689 controlvm_respond(pending_msg_hdr, response);
690}
691
692static void
693bus_epilog(struct visor_device *bus_info,
694 u32 cmd, struct controlvm_message_header *msg_hdr,
695 int response, bool need_response)
696{
697 struct controlvm_message_header *pmsg_hdr = NULL;
698
699 if (!bus_info) {
700 /*
701 * relying on a valid passed in response code
702 * be lazy and re-use msg_hdr for this failure, is this ok??
703 */
704 pmsg_hdr = msg_hdr;
705 goto out_respond;
706 }
707
708 if (bus_info->pending_msg_hdr) {
709 /* only non-NULL if dev is still waiting on a response */
710 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
711 pmsg_hdr = bus_info->pending_msg_hdr;
712 goto out_respond;
713 }
714
715 if (need_response) {
716 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
717 if (!pmsg_hdr) {
718 POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
719 bus_info->chipset_bus_no,
720 POSTCODE_SEVERITY_ERR);
721 return;
722 }
723
724 memcpy(pmsg_hdr, msg_hdr,
725 sizeof(struct controlvm_message_header));
726 bus_info->pending_msg_hdr = pmsg_hdr;
727 }
728
729 if (response == CONTROLVM_RESP_SUCCESS) {
730 switch (cmd) {
731 case CONTROLVM_BUS_CREATE:
732 chipset_bus_create(bus_info);
733 break;
734 case CONTROLVM_BUS_DESTROY:
735 chipset_bus_destroy(bus_info);
736 break;
737 }
738 }
739
740out_respond:
741 bus_responder(cmd, pmsg_hdr, response);
742}
743
744static void
745device_epilog(struct visor_device *dev_info,
746 struct spar_segment_state state, u32 cmd,
747 struct controlvm_message_header *msg_hdr, int response,
748 bool need_response, bool for_visorbus)
749{
750 struct controlvm_message_header *pmsg_hdr = NULL;
751
752 if (!dev_info) {
753 /*
754 * relying on a valid passed in response code
755 * be lazy and re-use msg_hdr for this failure, is this ok??
756 */
757 pmsg_hdr = msg_hdr;
758 goto out_respond;
759 }
760
761 if (dev_info->pending_msg_hdr) {
762 /* only non-NULL if dev is still waiting on a response */
763 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
764 pmsg_hdr = dev_info->pending_msg_hdr;
765 goto out_respond;
766 }
767
768 if (need_response) {
769 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
770 if (!pmsg_hdr) {
771 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
772 goto out_respond;
773 }
774
775 memcpy(pmsg_hdr, msg_hdr,
776 sizeof(struct controlvm_message_header));
777 dev_info->pending_msg_hdr = pmsg_hdr;
778 }
779
780 if (response >= 0) {
781 switch (cmd) {
782 case CONTROLVM_DEVICE_CREATE:
783 chipset_device_create(dev_info);
784 break;
785 case CONTROLVM_DEVICE_CHANGESTATE:
786 /* ServerReady / ServerRunning / SegmentStateRunning */
787 if (state.alive == segment_state_running.alive &&
788 state.operating ==
789 segment_state_running.operating) {
790 chipset_device_resume(dev_info);
791 }
792 /* ServerNotReady / ServerLost / SegmentStateStandby */
793 else if (state.alive == segment_state_standby.alive &&
794 state.operating ==
795 segment_state_standby.operating) {
796 /*
797 * technically this is standby case
798 * where server is lost
799 */
800 chipset_device_pause(dev_info);
801 }
802 break;
803 case CONTROLVM_DEVICE_DESTROY:
804 chipset_device_destroy(dev_info);
805 break;
806 }
807 }
808 689
809out_respond: 690 return controlvm_respond(pending_msg_hdr, response);
810 device_responder(cmd, pmsg_hdr, response);
811} 691}
812 692
813static void 693static int
814bus_create(struct controlvm_message *inmsg) 694bus_create(struct controlvm_message *inmsg)
815{ 695{
816 struct controlvm_message_packet *cmd = &inmsg->cmd; 696 struct controlvm_message_packet *cmd = &inmsg->cmd;
697 struct controlvm_message_header *pmsg_hdr = NULL;
817 u32 bus_no = cmd->create_bus.bus_no; 698 u32 bus_no = cmd->create_bus.bus_no;
818 int rc = CONTROLVM_RESP_SUCCESS;
819 struct visor_device *bus_info; 699 struct visor_device *bus_info;
820 struct visorchannel *visorchannel; 700 struct visorchannel *visorchannel;
701 int err;
821 702
822 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); 703 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
823 if (bus_info && (bus_info->state.created == 1)) { 704 if (bus_info && (bus_info->state.created == 1)) {
824 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 705 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
825 POSTCODE_SEVERITY_ERR); 706 DIAG_SEVERITY_ERR);
826 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 707 err = -EEXIST;
827 goto out_bus_epilog; 708 goto err_respond;
828 } 709 }
710
829 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); 711 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
830 if (!bus_info) { 712 if (!bus_info) {
831 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 713 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
832 POSTCODE_SEVERITY_ERR); 714 DIAG_SEVERITY_ERR);
833 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 715 err = -ENOMEM;
834 goto out_bus_epilog; 716 goto err_respond;
835 } 717 }
836 718
837 INIT_LIST_HEAD(&bus_info->list_all); 719 INIT_LIST_HEAD(&bus_info->list_all);
838 bus_info->chipset_bus_no = bus_no; 720 bus_info->chipset_bus_no = bus_no;
839 bus_info->chipset_dev_no = BUS_ROOT_DEVICE; 721 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
840 722
841 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); 723 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
724
725 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
726 save_crash_message(inmsg, CRASH_BUS);
727
728 if (inmsg->hdr.flags.response_expected == 1) {
729 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
730 GFP_KERNEL);
731 if (!pmsg_hdr) {
732 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
733 bus_info->chipset_bus_no,
734 DIAG_SEVERITY_ERR);
735 err = -ENOMEM;
736 goto err_free_bus_info;
737 }
738
739 memcpy(pmsg_hdr, &inmsg->hdr,
740 sizeof(struct controlvm_message_header));
741 bus_info->pending_msg_hdr = pmsg_hdr;
742 }
842 743
843 visorchannel = visorchannel_create(cmd->create_bus.channel_addr, 744 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
844 cmd->create_bus.channel_bytes, 745 cmd->create_bus.channel_bytes,
@@ -846,89 +747,138 @@ bus_create(struct controlvm_message *inmsg)
846 cmd->create_bus.bus_data_type_uuid); 747 cmd->create_bus.bus_data_type_uuid);
847 748
848 if (!visorchannel) { 749 if (!visorchannel) {
849 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, 750 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
850 POSTCODE_SEVERITY_ERR); 751 DIAG_SEVERITY_ERR);
851 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 752 err = -ENOMEM;
852 kfree(bus_info); 753 goto err_free_pending_msg;
853 bus_info = NULL;
854 goto out_bus_epilog;
855 } 754 }
856 bus_info->visorchannel = visorchannel; 755 bus_info->visorchannel = visorchannel;
857 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
858 save_crash_message(inmsg, CRASH_BUS);
859 756
860 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); 757 /* Response will be handled by chipset_bus_create */
758 chipset_bus_create(bus_info);
759
760 POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
761 return 0;
762
763err_free_pending_msg:
764 kfree(bus_info->pending_msg_hdr);
765
766err_free_bus_info:
767 kfree(bus_info);
861 768
862out_bus_epilog: 769err_respond:
863 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr, 770 if (inmsg->hdr.flags.response_expected == 1)
864 rc, inmsg->hdr.flags.response_expected == 1); 771 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
772 return err;
865} 773}
866 774
867static void 775static int
868bus_destroy(struct controlvm_message *inmsg) 776bus_destroy(struct controlvm_message *inmsg)
869{ 777{
870 struct controlvm_message_packet *cmd = &inmsg->cmd; 778 struct controlvm_message_packet *cmd = &inmsg->cmd;
779 struct controlvm_message_header *pmsg_hdr = NULL;
871 u32 bus_no = cmd->destroy_bus.bus_no; 780 u32 bus_no = cmd->destroy_bus.bus_no;
872 struct visor_device *bus_info; 781 struct visor_device *bus_info;
873 int rc = CONTROLVM_RESP_SUCCESS; 782 int err;
874 783
875 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); 784 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
876 if (!bus_info) 785 if (!bus_info) {
877 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 786 err = -ENODEV;
878 else if (bus_info->state.created == 0) 787 goto err_respond;
879 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 788 }
789 if (bus_info->state.created == 0) {
790 err = -ENOENT;
791 goto err_respond;
792 }
793 if (bus_info->pending_msg_hdr) {
794 /* only non-NULL if dev is still waiting on a response */
795 err = -EEXIST;
796 goto err_respond;
797 }
798 if (inmsg->hdr.flags.response_expected == 1) {
799 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
800 if (!pmsg_hdr) {
801 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
802 bus_info->chipset_bus_no,
803 DIAG_SEVERITY_ERR);
804 err = -ENOMEM;
805 goto err_respond;
806 }
880 807
881 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr, 808 memcpy(pmsg_hdr, &inmsg->hdr,
882 rc, inmsg->hdr.flags.response_expected == 1); 809 sizeof(struct controlvm_message_header));
810 bus_info->pending_msg_hdr = pmsg_hdr;
811 }
883 812
884 /* bus_info is freed as part of the busdevice_release function */ 813 /* Response will be handled by chipset_bus_destroy */
814 chipset_bus_destroy(bus_info);
815 return 0;
816
817err_respond:
818 if (inmsg->hdr.flags.response_expected == 1)
819 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
820 return err;
885} 821}
886 822
887static void 823static int
888bus_configure(struct controlvm_message *inmsg, 824bus_configure(struct controlvm_message *inmsg,
889 struct parser_context *parser_ctx) 825 struct parser_context *parser_ctx)
890{ 826{
891 struct controlvm_message_packet *cmd = &inmsg->cmd; 827 struct controlvm_message_packet *cmd = &inmsg->cmd;
892 u32 bus_no; 828 u32 bus_no;
893 struct visor_device *bus_info; 829 struct visor_device *bus_info;
894 int rc = CONTROLVM_RESP_SUCCESS; 830 int err = 0;
895 831
896 bus_no = cmd->configure_bus.bus_no; 832 bus_no = cmd->configure_bus.bus_no;
897 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no, 833 POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
898 POSTCODE_SEVERITY_INFO); 834 DIAG_SEVERITY_PRINT);
899 835
900 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); 836 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
901 if (!bus_info) { 837 if (!bus_info) {
902 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, 838 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
903 POSTCODE_SEVERITY_ERR); 839 DIAG_SEVERITY_ERR);
904 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 840 err = -EINVAL;
841 goto err_respond;
905 } else if (bus_info->state.created == 0) { 842 } else if (bus_info->state.created == 0) {
906 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, 843 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
907 POSTCODE_SEVERITY_ERR); 844 DIAG_SEVERITY_ERR);
908 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 845 err = -EINVAL;
846 goto err_respond;
909 } else if (bus_info->pending_msg_hdr) { 847 } else if (bus_info->pending_msg_hdr) {
910 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, 848 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
911 POSTCODE_SEVERITY_ERR); 849 DIAG_SEVERITY_ERR);
912 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 850 err = -EIO;
913 } else { 851 goto err_respond;
914 visorchannel_set_clientpartition
915 (bus_info->visorchannel,
916 cmd->configure_bus.guest_handle);
917 bus_info->partition_uuid = parser_id_get(parser_ctx);
918 parser_param_start(parser_ctx, PARSERSTRING_NAME);
919 bus_info->name = parser_string_get(parser_ctx);
920
921 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
922 POSTCODE_SEVERITY_INFO);
923 } 852 }
924 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr, 853
925 rc, inmsg->hdr.flags.response_expected == 1); 854 err = visorchannel_set_clientpartition
855 (bus_info->visorchannel,
856 cmd->configure_bus.guest_handle);
857 if (err)
858 goto err_respond;
859
860 bus_info->partition_uuid = parser_id_get(parser_ctx);
861 parser_param_start(parser_ctx, PARSERSTRING_NAME);
862 bus_info->name = parser_string_get(parser_ctx);
863
864 POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
865 DIAG_SEVERITY_PRINT);
866
867 if (inmsg->hdr.flags.response_expected == 1)
868 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
869 return 0;
870
871err_respond:
872 if (inmsg->hdr.flags.response_expected == 1)
873 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
874 return err;
926} 875}
927 876
928static void 877static void
929my_device_create(struct controlvm_message *inmsg) 878my_device_create(struct controlvm_message *inmsg)
930{ 879{
931 struct controlvm_message_packet *cmd = &inmsg->cmd; 880 struct controlvm_message_packet *cmd = &inmsg->cmd;
881 struct controlvm_message_header *pmsg_hdr = NULL;
932 u32 bus_no = cmd->create_device.bus_no; 882 u32 bus_no = cmd->create_device.bus_no;
933 u32 dev_no = cmd->create_device.dev_no; 883 u32 dev_no = cmd->create_device.dev_no;
934 struct visor_device *dev_info = NULL; 884 struct visor_device *dev_info = NULL;
@@ -938,31 +888,31 @@ my_device_create(struct controlvm_message *inmsg)
938 888
939 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); 889 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
940 if (!bus_info) { 890 if (!bus_info) {
941 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 891 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
942 POSTCODE_SEVERITY_ERR); 892 DIAG_SEVERITY_ERR);
943 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 893 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
944 goto out_respond; 894 goto out_respond;
945 } 895 }
946 896
947 if (bus_info->state.created == 0) { 897 if (bus_info->state.created == 0) {
948 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 898 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
949 POSTCODE_SEVERITY_ERR); 899 DIAG_SEVERITY_ERR);
950 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; 900 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
951 goto out_respond; 901 goto out_respond;
952 } 902 }
953 903
954 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); 904 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
955 if (dev_info && (dev_info->state.created == 1)) { 905 if (dev_info && (dev_info->state.created == 1)) {
956 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 906 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
957 POSTCODE_SEVERITY_ERR); 907 DIAG_SEVERITY_ERR);
958 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 908 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
959 goto out_respond; 909 goto out_respond;
960 } 910 }
961 911
962 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); 912 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
963 if (!dev_info) { 913 if (!dev_info) {
964 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 914 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
965 POSTCODE_SEVERITY_ERR); 915 DIAG_SEVERITY_ERR);
966 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 916 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
967 goto out_respond; 917 goto out_respond;
968 } 918 }
@@ -974,8 +924,8 @@ my_device_create(struct controlvm_message *inmsg)
974 /* not sure where the best place to set the 'parent' */ 924 /* not sure where the best place to set the 'parent' */
975 dev_info->device.parent = &bus_info->device; 925 dev_info->device.parent = &bus_info->device;
976 926
977 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, 927 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
978 POSTCODE_SEVERITY_INFO); 928 DIAG_SEVERITY_PRINT);
979 929
980 visorchannel = 930 visorchannel =
981 visorchannel_create_with_lock(cmd->create_device.channel_addr, 931 visorchannel_create_with_lock(cmd->create_device.channel_addr,
@@ -984,12 +934,10 @@ my_device_create(struct controlvm_message *inmsg)
984 cmd->create_device.data_type_uuid); 934 cmd->create_device.data_type_uuid);
985 935
986 if (!visorchannel) { 936 if (!visorchannel) {
987 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, 937 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
988 POSTCODE_SEVERITY_ERR); 938 DIAG_SEVERITY_ERR);
989 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 939 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
990 kfree(dev_info); 940 goto out_free_dev_info;
991 dev_info = NULL;
992 goto out_respond;
993 } 941 }
994 dev_info->visorchannel = visorchannel; 942 dev_info->visorchannel = visorchannel;
995 dev_info->channel_type_guid = cmd->create_device.data_type_uuid; 943 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -997,18 +945,36 @@ my_device_create(struct controlvm_message *inmsg)
997 spar_vhba_channel_protocol_uuid) == 0) 945 spar_vhba_channel_protocol_uuid) == 0)
998 save_crash_message(inmsg, CRASH_DEV); 946 save_crash_message(inmsg, CRASH_DEV);
999 947
1000 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, 948 if (inmsg->hdr.flags.response_expected == 1) {
1001 POSTCODE_SEVERITY_INFO); 949 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
950 if (!pmsg_hdr) {
951 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
952 goto out_free_dev_info;
953 }
954
955 memcpy(pmsg_hdr, &inmsg->hdr,
956 sizeof(struct controlvm_message_header));
957 dev_info->pending_msg_hdr = pmsg_hdr;
958 }
959 /* Chipset_device_create will send response */
960 chipset_device_create(dev_info);
961 POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
962 DIAG_SEVERITY_PRINT);
963 return;
964
965out_free_dev_info:
966 kfree(dev_info);
967
1002out_respond: 968out_respond:
1003 device_epilog(dev_info, segment_state_running, 969 if (inmsg->hdr.flags.response_expected == 1)
1004 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc, 970 device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
1005 inmsg->hdr.flags.response_expected == 1, 1);
1006} 971}
1007 972
1008static void 973static void
1009my_device_changestate(struct controlvm_message *inmsg) 974my_device_changestate(struct controlvm_message *inmsg)
1010{ 975{
1011 struct controlvm_message_packet *cmd = &inmsg->cmd; 976 struct controlvm_message_packet *cmd = &inmsg->cmd;
977 struct controlvm_message_header *pmsg_hdr = NULL;
1012 u32 bus_no = cmd->device_change_state.bus_no; 978 u32 bus_no = cmd->device_change_state.bus_no;
1013 u32 dev_no = cmd->device_change_state.dev_no; 979 u32 dev_no = cmd->device_change_state.dev_no;
1014 struct spar_segment_state state = cmd->device_change_state.state; 980 struct spar_segment_state state = cmd->device_change_state.state;
@@ -1017,39 +983,97 @@ my_device_changestate(struct controlvm_message *inmsg)
1017 983
1018 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); 984 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1019 if (!dev_info) { 985 if (!dev_info) {
1020 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, 986 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1021 POSTCODE_SEVERITY_ERR); 987 DIAG_SEVERITY_ERR);
1022 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; 988 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1023 } else if (dev_info->state.created == 0) { 989 goto err_respond;
1024 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, 990 }
1025 POSTCODE_SEVERITY_ERR); 991 if (dev_info->state.created == 0) {
992 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
993 DIAG_SEVERITY_ERR);
1026 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; 994 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
995 goto err_respond;
996 }
997 if (dev_info->pending_msg_hdr) {
998 /* only non-NULL if dev is still waiting on a response */
999 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1000 goto err_respond;
1027 } 1001 }
1028 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) 1002 if (inmsg->hdr.flags.response_expected == 1) {
1029 device_epilog(dev_info, state, 1003 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1030 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc, 1004 if (!pmsg_hdr) {
1031 inmsg->hdr.flags.response_expected == 1, 1); 1005 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1006 goto err_respond;
1007 }
1008
1009 memcpy(pmsg_hdr, &inmsg->hdr,
1010 sizeof(struct controlvm_message_header));
1011 dev_info->pending_msg_hdr = pmsg_hdr;
1012 }
1013
1014 if (state.alive == segment_state_running.alive &&
1015 state.operating == segment_state_running.operating)
1016 /* Response will be sent from chipset_device_resume */
1017 chipset_device_resume(dev_info);
1018 /* ServerNotReady / ServerLost / SegmentStateStandby */
1019 else if (state.alive == segment_state_standby.alive &&
1020 state.operating == segment_state_standby.operating)
1021 /*
1022 * technically this is standby case where server is lost.
1023 * Response will be sent from chipset_device_pause.
1024 */
1025 chipset_device_pause(dev_info);
1026
1027 return;
1028
1029err_respond:
1030 if (inmsg->hdr.flags.response_expected == 1)
1031 device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
1032} 1032}
1033 1033
1034static void 1034static void
1035my_device_destroy(struct controlvm_message *inmsg) 1035my_device_destroy(struct controlvm_message *inmsg)
1036{ 1036{
1037 struct controlvm_message_packet *cmd = &inmsg->cmd; 1037 struct controlvm_message_packet *cmd = &inmsg->cmd;
1038 struct controlvm_message_header *pmsg_hdr = NULL;
1038 u32 bus_no = cmd->destroy_device.bus_no; 1039 u32 bus_no = cmd->destroy_device.bus_no;
1039 u32 dev_no = cmd->destroy_device.dev_no; 1040 u32 dev_no = cmd->destroy_device.dev_no;
1040 struct visor_device *dev_info; 1041 struct visor_device *dev_info;
1041 int rc = CONTROLVM_RESP_SUCCESS; 1042 int rc = CONTROLVM_RESP_SUCCESS;
1042 1043
1043 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); 1044 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1044 if (!dev_info) 1045 if (!dev_info) {
1045 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; 1046 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1046 else if (dev_info->state.created == 0) 1047 goto err_respond;
1048 }
1049 if (dev_info->state.created == 0) {
1047 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; 1050 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1051 goto err_respond;
1052 }
1048 1053
1049 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) 1054 if (dev_info->pending_msg_hdr) {
1050 device_epilog(dev_info, segment_state_running, 1055 /* only non-NULL if dev is still waiting on a response */
1051 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc, 1056 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1052 inmsg->hdr.flags.response_expected == 1, 1); 1057 goto err_respond;
1058 }
1059 if (inmsg->hdr.flags.response_expected == 1) {
1060 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1061 if (!pmsg_hdr) {
1062 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1063 goto err_respond;
1064 }
1065
1066 memcpy(pmsg_hdr, &inmsg->hdr,
1067 sizeof(struct controlvm_message_header));
1068 dev_info->pending_msg_hdr = pmsg_hdr;
1069 }
1070
1071 chipset_device_destroy(dev_info);
1072 return;
1073
1074err_respond:
1075 if (inmsg->hdr.flags.response_expected == 1)
1076 device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
1053} 1077}
1054 1078
1055/** 1079/**
@@ -1075,7 +1099,6 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1075 if (!info) 1099 if (!info)
1076 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; 1100 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1077 1101
1078 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1079 if ((offset == 0) || (bytes == 0)) 1102 if ((offset == 0) || (bytes == 0))
1080 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; 1103 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1081 1104
@@ -1083,6 +1106,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1083 if (!payload) 1106 if (!payload)
1084 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; 1107 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1085 1108
1109 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1086 info->offset = offset; 1110 info->offset = offset;
1087 info->bytes = bytes; 1111 info->bytes = bytes;
1088 info->ptr = payload; 1112 info->ptr = payload;
@@ -1111,16 +1135,16 @@ initialize_controlvm_payload(void)
1111 offsetof(struct spar_controlvm_channel_protocol, 1135 offsetof(struct spar_controlvm_channel_protocol,
1112 request_payload_offset), 1136 request_payload_offset),
1113 &payload_offset, sizeof(payload_offset)) < 0) { 1137 &payload_offset, sizeof(payload_offset)) < 0) {
1114 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, 1138 POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
1115 POSTCODE_SEVERITY_ERR); 1139 DIAG_SEVERITY_ERR);
1116 return; 1140 return;
1117 } 1141 }
1118 if (visorchannel_read(controlvm_channel, 1142 if (visorchannel_read(controlvm_channel,
1119 offsetof(struct spar_controlvm_channel_protocol, 1143 offsetof(struct spar_controlvm_channel_protocol,
1120 request_payload_bytes), 1144 request_payload_bytes),
1121 &payload_bytes, sizeof(payload_bytes)) < 0) { 1145 &payload_bytes, sizeof(payload_bytes)) < 0) {
1122 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, 1146 POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
1123 POSTCODE_SEVERITY_ERR); 1147 DIAG_SEVERITY_ERR);
1124 return; 1148 return;
1125 } 1149 }
1126 initialize_controlvm_payload_info(phys_addr, 1150 initialize_controlvm_payload_info(phys_addr,
@@ -1317,7 +1341,7 @@ static struct attribute *visorchipset_install_attrs[] = {
1317 NULL 1341 NULL
1318}; 1342};
1319 1343
1320static struct attribute_group visorchipset_install_group = { 1344static const struct attribute_group visorchipset_install_group = {
1321 .name = "install", 1345 .name = "install",
1322 .attrs = visorchipset_install_attrs 1346 .attrs = visorchipset_install_attrs
1323}; 1347};
@@ -1540,7 +1564,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
1540 u32 local_crash_msg_offset; 1564 u32 local_crash_msg_offset;
1541 u16 local_crash_msg_count; 1565 u16 local_crash_msg_count;
1542 1566
1543 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO); 1567 POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1544 1568
1545 /* send init chipset msg */ 1569 /* send init chipset msg */
1546 msg.hdr.id = CONTROLVM_CHIPSET_INIT; 1570 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
@@ -1554,15 +1578,15 @@ setup_crash_devices_work_queue(struct work_struct *work)
1554 offsetof(struct spar_controlvm_channel_protocol, 1578 offsetof(struct spar_controlvm_channel_protocol,
1555 saved_crash_message_count), 1579 saved_crash_message_count),
1556 &local_crash_msg_count, sizeof(u16)) < 0) { 1580 &local_crash_msg_count, sizeof(u16)) < 0) {
1557 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, 1581 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1558 POSTCODE_SEVERITY_ERR); 1582 DIAG_SEVERITY_ERR);
1559 return; 1583 return;
1560 } 1584 }
1561 1585
1562 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { 1586 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1563 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, 1587 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1564 local_crash_msg_count, 1588 local_crash_msg_count,
1565 POSTCODE_SEVERITY_ERR); 1589 DIAG_SEVERITY_ERR);
1566 return; 1590 return;
1567 } 1591 }
1568 1592
@@ -1571,8 +1595,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
1571 offsetof(struct spar_controlvm_channel_protocol, 1595 offsetof(struct spar_controlvm_channel_protocol,
1572 saved_crash_message_offset), 1596 saved_crash_message_offset),
1573 &local_crash_msg_offset, sizeof(u32)) < 0) { 1597 &local_crash_msg_offset, sizeof(u32)) < 0) {
1574 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, 1598 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1575 POSTCODE_SEVERITY_ERR); 1599 DIAG_SEVERITY_ERR);
1576 return; 1600 return;
1577 } 1601 }
1578 1602
@@ -1581,8 +1605,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
1581 local_crash_msg_offset, 1605 local_crash_msg_offset,
1582 &local_crash_bus_msg, 1606 &local_crash_bus_msg,
1583 sizeof(struct controlvm_message)) < 0) { 1607 sizeof(struct controlvm_message)) < 0) {
1584 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC, 1608 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1585 POSTCODE_SEVERITY_ERR); 1609 DIAG_SEVERITY_ERR);
1586 return; 1610 return;
1587 } 1611 }
1588 1612
@@ -1592,8 +1616,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
1592 sizeof(struct controlvm_message), 1616 sizeof(struct controlvm_message),
1593 &local_crash_dev_msg, 1617 &local_crash_dev_msg,
1594 sizeof(struct controlvm_message)) < 0) { 1618 sizeof(struct controlvm_message)) < 0) {
1595 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC, 1619 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1596 POSTCODE_SEVERITY_ERR); 1620 DIAG_SEVERITY_ERR);
1597 return; 1621 return;
1598 } 1622 }
1599 1623
@@ -1601,8 +1625,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
1601 if (local_crash_bus_msg.cmd.create_bus.channel_addr) { 1625 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1602 bus_create(&local_crash_bus_msg); 1626 bus_create(&local_crash_bus_msg);
1603 } else { 1627 } else {
1604 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC, 1628 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1605 POSTCODE_SEVERITY_ERR); 1629 DIAG_SEVERITY_ERR);
1606 return; 1630 return;
1607 } 1631 }
1608 1632
@@ -1610,11 +1634,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
1610 if (local_crash_dev_msg.cmd.create_device.channel_addr) { 1634 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1611 my_device_create(&local_crash_dev_msg); 1635 my_device_create(&local_crash_dev_msg);
1612 } else { 1636 } else {
1613 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC, 1637 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1614 POSTCODE_SEVERITY_ERR); 1638 DIAG_SEVERITY_ERR);
1615 return; 1639 return;
1616 } 1640 }
1617 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO); 1641 POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1618} 1642}
1619 1643
1620void 1644void
@@ -2119,8 +2143,6 @@ visorchipset_init(struct acpi_device *acpi_device)
2119 if (!addr) 2143 if (!addr)
2120 goto error; 2144 goto error;
2121 2145
2122 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2123
2124 controlvm_channel = visorchannel_create_with_lock(addr, 0, 2146 controlvm_channel = visorchannel_create_with_lock(addr, 0,
2125 GFP_KERNEL, uuid); 2147 GFP_KERNEL, uuid);
2126 if (!controlvm_channel) 2148 if (!controlvm_channel)
@@ -2152,11 +2174,12 @@ visorchipset_init(struct acpi_device *acpi_device)
2152 2174
2153 visorchipset_platform_device.dev.devt = major_dev; 2175 visorchipset_platform_device.dev.devt = major_dev;
2154 if (platform_device_register(&visorchipset_platform_device) < 0) { 2176 if (platform_device_register(&visorchipset_platform_device) < 0) {
2155 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); 2177 POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
2178 DIAG_SEVERITY_ERR);
2156 err = -ENODEV; 2179 err = -ENODEV;
2157 goto error_cancel_work; 2180 goto error_cancel_work;
2158 } 2181 }
2159 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); 2182 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
2160 2183
2161 err = visorbus_init(); 2184 err = visorbus_init();
2162 if (err < 0) 2185 if (err < 0)
@@ -2178,14 +2201,14 @@ error_destroy_channel:
2178 visorchannel_destroy(controlvm_channel); 2201 visorchannel_destroy(controlvm_channel);
2179 2202
2180error: 2203error:
2181 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR); 2204 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
2182 return err; 2205 return err;
2183} 2206}
2184 2207
2185static int 2208static int
2186visorchipset_exit(struct acpi_device *acpi_device) 2209visorchipset_exit(struct acpi_device *acpi_device)
2187{ 2210{
2188 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); 2211 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2189 2212
2190 visorbus_exit(); 2213 visorbus_exit();
2191 2214
@@ -2196,7 +2219,7 @@ visorchipset_exit(struct acpi_device *acpi_device)
2196 2219
2197 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt); 2220 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2198 platform_device_unregister(&visorchipset_platform_device); 2221 platform_device_unregister(&visorchipset_platform_device);
2199 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); 2222 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2200 2223
2201 return 0; 2224 return 0;
2202} 2225}
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 86e695d5a441..674a88b657d3 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -92,15 +92,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
92#define ISSUE_IO_VMCALL(method, param, result) \ 92#define ISSUE_IO_VMCALL(method, param, result) \
93 (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \ 93 (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
94 (param) >> 32)) 94 (param) >> 32))
95#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \
96 unisys_extended_vmcall(method, param1, param2, param3)
97
98 /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
99 * not used much
100 */
101#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
102 ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
103 MDS_APPOS, postcode)
104 95
105/* Structures for IO VMCALLs */ 96/* Structures for IO VMCALLs */
106 97
@@ -117,118 +108,53 @@ struct vmcall_io_controlvm_addr_params {
117 108
118/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/ 109/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
119enum driver_pc { /* POSTCODE driver identifier tuples */ 110enum driver_pc { /* POSTCODE driver identifier tuples */
120 /* visorchipset driver files */
121 VISOR_CHIPSET_PC = 0xA0,
122 VISOR_CHIPSET_PC_controlvm_c = 0xA1,
123 VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
124 VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
125 VISOR_CHIPSET_PC_file_c = 0xA4,
126 VISOR_CHIPSET_PC_parser_c = 0xA5,
127 VISOR_CHIPSET_PC_testing_c = 0xA6,
128 VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
129 VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
130 /* visorbus driver files */ 111 /* visorbus driver files */
131 VISOR_BUS_PC = 0xB0, 112 VISOR_BUS_PC = 0xF0,
132 VISOR_BUS_PC_businst_attr_c = 0xB1, 113 VISOR_BUS_PC_visorbus_main_c = 0xFF,
133 VISOR_BUS_PC_channel_attr_c = 0xB2, 114 VISOR_BUS_PC_visorchipset_c = 0xFE,
134 VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
135 VISOR_BUS_PC_visorbus_main_c = 0xB4,
136 /* visorclientbus driver files */
137 VISOR_CLIENT_BUS_PC = 0xC0,
138 VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
139 /* virt hba driver files */
140 VIRT_HBA_PC = 0xC2,
141 VIRT_HBA_PC_virthba_c = 0xC3,
142 /* virtpci driver files */
143 VIRT_PCI_PC = 0xC4,
144 VIRT_PCI_PC_virtpci_c = 0xC5,
145 /* virtnic driver files */
146 VIRT_NIC_PC = 0xC6,
147 VIRT_NIC_P_virtnic_c = 0xC7,
148 /* uislib driver files */
149 UISLIB_PC = 0xD0,
150 UISLIB_PC_uislib_c = 0xD1,
151 UISLIB_PC_uisqueue_c = 0xD2,
152 /* 0xD3 RESERVED */
153 UISLIB_PC_uisutils_c = 0xD4,
154}; 115};
155 116
156enum event_pc { /* POSTCODE event identifier tuples */ 117enum event_pc { /* POSTCODE event identifier tuples */
157 ATTACH_PORT_ENTRY_PC = 0x001, 118 BUS_CREATE_ENTRY_PC = 0x001,
158 ATTACH_PORT_FAILURE_PC = 0x002, 119 BUS_CREATE_FAILURE_PC = 0x002,
159 ATTACH_PORT_SUCCESS_PC = 0x003, 120 BUS_CREATE_EXIT_PC = 0x003,
160 BUS_FAILURE_PC = 0x004, 121 BUS_CONFIGURE_ENTRY_PC = 0x004,
161 BUS_CREATE_ENTRY_PC = 0x005, 122 BUS_CONFIGURE_FAILURE_PC = 0x005,
162 BUS_CREATE_FAILURE_PC = 0x006, 123 BUS_CONFIGURE_EXIT_PC = 0x006,
163 BUS_CREATE_EXIT_PC = 0x007, 124 CHIPSET_INIT_ENTRY_PC = 0x007,
164 BUS_CONFIGURE_ENTRY_PC = 0x008, 125 CHIPSET_INIT_SUCCESS_PC = 0x008,
165 BUS_CONFIGURE_FAILURE_PC = 0x009, 126 CHIPSET_INIT_FAILURE_PC = 0x009,
166 BUS_CONFIGURE_EXIT_PC = 0x00A, 127 CHIPSET_INIT_EXIT_PC = 0x00A,
167 CHIPSET_INIT_ENTRY_PC = 0x00B, 128 CONTROLVM_INIT_FAILURE_PC = 0x00B,
168 CHIPSET_INIT_SUCCESS_PC = 0x00C, 129 DEVICE_CREATE_ENTRY_PC = 0x00C,
169 CHIPSET_INIT_FAILURE_PC = 0x00D, 130 DEVICE_CREATE_FAILURE_PC = 0x00D,
170 CHIPSET_INIT_EXIT_PC = 0x00E, 131 DEVICE_CREATE_SUCCESS_PC = 0x00E,
171 CREATE_WORKQUEUE_PC = 0x00F, 132 DEVICE_CREATE_EXIT_PC = 0x00F,
172 CREATE_WORKQUEUE_FAILED_PC = 0x0A0, 133 DEVICE_ADD_PC = 0x010,
173 CONTROLVM_INIT_FAILURE_PC = 0x0A1, 134 DEVICE_REGISTER_FAILURE_PC = 0x011,
174 DEVICE_CREATE_ENTRY_PC = 0x0A2, 135 DEVICE_CHANGESTATE_FAILURE_PC = 0x012,
175 DEVICE_CREATE_FAILURE_PC = 0x0A3, 136 DRIVER_ENTRY_PC = 0x013,
176 DEVICE_CREATE_SUCCESS_PC = 0x0A4, 137 DRIVER_EXIT_PC = 0x014,
177 DEVICE_CREATE_EXIT_PC = 0x0A5, 138 MALLOC_FAILURE_PC = 0x015,
178 DEVICE_ADD_PC = 0x0A6, 139 CRASH_DEV_ENTRY_PC = 0x016,
179 DEVICE_REGISTER_FAILURE_PC = 0x0A7, 140 CRASH_DEV_EXIT_PC = 0x017,
180 DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8, 141 CRASH_DEV_RD_BUS_FAILURE_PC = 0x018,
181 DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9, 142 CRASH_DEV_RD_DEV_FAILURE_PC = 0x019,
182 DEVICE_CHANGESTATE_EXIT_PC = 0x0AA, 143 CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A,
183 DRIVER_ENTRY_PC = 0x0AB, 144 CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B,
184 DRIVER_EXIT_PC = 0x0AC, 145 CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C,
185 MALLOC_FAILURE_PC = 0x0AD, 146 CRASH_DEV_COUNT_FAILURE_PC = 0x01D,
186 QUEUE_DELAYED_WORK_PC = 0x0AE, 147 SAVE_MSG_BUS_FAILURE_PC = 0x01E,
187 /* 0x0B7 RESERVED */ 148 SAVE_MSG_DEV_FAILURE_PC = 0x01F,
188 VBUS_CHANNEL_ENTRY_PC = 0x0B8,
189 VBUS_CHANNEL_FAILURE_PC = 0x0B9,
190 VBUS_CHANNEL_EXIT_PC = 0x0BA,
191 VHBA_CREATE_ENTRY_PC = 0x0BB,
192 VHBA_CREATE_FAILURE_PC = 0x0BC,
193 VHBA_CREATE_EXIT_PC = 0x0BD,
194 VHBA_CREATE_SUCCESS_PC = 0x0BE,
195 VHBA_COMMAND_HANDLER_PC = 0x0BF,
196 VHBA_PROBE_ENTRY_PC = 0x0C0,
197 VHBA_PROBE_FAILURE_PC = 0x0C1,
198 VHBA_PROBE_EXIT_PC = 0x0C2,
199 VNIC_CREATE_ENTRY_PC = 0x0C3,
200 VNIC_CREATE_FAILURE_PC = 0x0C4,
201 VNIC_CREATE_SUCCESS_PC = 0x0C5,
202 VNIC_PROBE_ENTRY_PC = 0x0C6,
203 VNIC_PROBE_FAILURE_PC = 0x0C7,
204 VNIC_PROBE_EXIT_PC = 0x0C8,
205 VPCI_CREATE_ENTRY_PC = 0x0C9,
206 VPCI_CREATE_FAILURE_PC = 0x0CA,
207 VPCI_CREATE_EXIT_PC = 0x0CB,
208 VPCI_PROBE_ENTRY_PC = 0x0CC,
209 VPCI_PROBE_FAILURE_PC = 0x0CD,
210 VPCI_PROBE_EXIT_PC = 0x0CE,
211 CRASH_DEV_ENTRY_PC = 0x0CF,
212 CRASH_DEV_EXIT_PC = 0x0D0,
213 CRASH_DEV_HADDR_NULL = 0x0D1,
214 CRASH_DEV_CONTROLVM_NULL = 0x0D2,
215 CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
216 CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
217 CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
218 CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
219 CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
220 CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
221 SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
222 SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
223 CALLHOME_INIT_FAILURE_PC = 0x0DB
224}; 149};
225 150
226#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR 151/* Write a 64-bit value to the hypervisor's log file
227#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING 152 * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where
228/* TODO-> Info currently doesn't show, so we set info=warning */ 153 * A is an identifier for the file logging the postcode
229#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT 154 * B is an identifier for the event logging the postcode
230 155 * C is the line logging the postcode
231/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR); 156 * D is additional information the caller wants to log
157 * E is additional information the caller wants to log
232 * Please also note that the resulting postcode is in hex, so if you are 158 * Please also note that the resulting postcode is in hex, so if you are
233 * searching for the __LINE__ number, convert it first to decimal. The line 159 * searching for the __LINE__ number, convert it first to decimal. The line
234 * number combined with driver and type of call, will allow you to track down 160 * number combined with driver and type of call, will allow you to track down
@@ -236,35 +162,16 @@ enum event_pc { /* POSTCODE event identifier tuples */
236 * entered/exited from. 162 * entered/exited from.
237 */ 163 */
238 164
239/* BASE FUNCTIONS */ 165#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity) \
240#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
241do { \
242 unsigned long long post_code_temp; \
243 post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
244 ((((u64)__LINE__) & 0xFFF) << 32) | \
245 (((u64)pc32bit) & 0xFFFFFFFF); \
246 ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
247} while (0)
248
249#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
250do { \ 166do { \
251 unsigned long long post_code_temp; \ 167 unsigned long long post_code_temp; \
252 post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \ 168 post_code_temp = (((u64)CURRENT_FILE_PC) << 56) | \
169 (((u64)EVENT_PC) << 44) | \
253 ((((u64)__LINE__) & 0xFFF) << 32) | \ 170 ((((u64)__LINE__) & 0xFFF) << 32) | \
254 ((((u64)pc16bit1) & 0xFFFF) << 16) | \ 171 ((((u64)pc16bit1) & 0xFFFF) << 16) | \
255 (((u64)pc16bit2) & 0xFFFF); \ 172 (((u64)pc16bit2) & 0xFFFF); \
256 ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \ 173 unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity, \
174 MDS_APPOS, post_code_temp); \
257} while (0) 175} while (0)
258 176
259/* MOST COMMON */
260#define POSTCODE_LINUX_2(EVENT_PC, severity) \
261 POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
262
263#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
264 POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
265
266#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
267 POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
268 pc16bit2, severity)
269
270#endif /* __IOMONINTF_H__ */ 177#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 6f94b646f7c5..949cce680b29 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -409,6 +409,9 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
409 if (!devdata->visorinput_dev) 409 if (!devdata->visorinput_dev)
410 goto cleanups_register; 410 goto cleanups_register;
411 break; 411 break;
412 default:
413 /* No other input devices supported */
414 break;
412 } 415 }
413 416
414 dev_set_drvdata(&dev->device, devdata); 417 dev_set_drvdata(&dev->device, devdata);
@@ -653,6 +656,9 @@ visorinput_channel_interrupt(struct visor_device *dev)
653 input_report_rel(visorinput_dev, REL_WHEEL, -1); 656 input_report_rel(visorinput_dev, REL_WHEEL, -1);
654 input_sync(visorinput_dev); 657 input_sync(visorinput_dev);
655 break; 658 break;
659 default:
660 /* Unsupported input action */
661 break;
656 } 662 }
657 } 663 }
658} 664}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index f8a584bf4a77..c1f674f5268c 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1371,7 +1371,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1371 " num_rcv_bufs = %d\n", 1371 " num_rcv_bufs = %d\n",
1372 devdata->num_rcv_bufs); 1372 devdata->num_rcv_bufs);
1373 str_pos += scnprintf(vbuf + str_pos, len - str_pos, 1373 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1374 " max_oustanding_next_xmits = %lu\n", 1374 " max_outstanding_next_xmits = %lu\n",
1375 devdata->max_outstanding_net_xmits); 1375 devdata->max_outstanding_net_xmits);
1376 str_pos += scnprintf(vbuf + str_pos, len - str_pos, 1376 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1377 " upper_threshold_net_xmits = %lu\n", 1377 " upper_threshold_net_xmits = %lu\n",
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9676fb29075a..e61e4ca064a8 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,9 +1,10 @@
1config BCM2708_VCHIQ 1config BCM2835_VCHIQ
2 tristate "Videocore VCHIQ" 2 tristate "Videocore VCHIQ"
3 depends on RASPBERRYPI_FIRMWARE && BROKEN 3 depends on HAS_DMA
4 depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
4 default y 5 default y
5 help 6 help
6 Kernel to VideoCore communication interface for the 7 Kernel to VideoCore communication interface for the
7 BCM2708 family of products. 8 BCM2835 family of products.
8 Defaults to Y when the Broadcom Videocore services 9 Defaults to Y when the Broadcom Videocore services
9 are included in the build, N otherwise. 10 are included in the build, N otherwise.
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 90ab4781df2c..1a9e742ee40d 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o 1obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
2 2
3vchiq-objs := \ 3vchiq-objs := \
4 interface/vchiq_arm/vchiq_core.o \ 4 interface/vchiq_arm/vchiq_core.o \
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
new file mode 100644
index 000000000000..03aa65183b25
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -0,0 +1,50 @@
11) Port to aarch64
2
3This driver won't be very useful unless we also have it working on
4Raspberry Pi 3. This requires, at least:
5
6 - Figure out an alternative to the dmac_map_area() hack.
7
8 - Decide what to use instead of dsb().
9
10 - Do something about (int) cast of bulk->data in
11 vchiq_bulk_transfer().
12
13 bulk->data is a bus address going across to the firmware. We know
14 our bus addresses are <32bit.
15
162) Write a DT binding doc and get the corresponding DT node merged to
17 bcm2835.
18
19This will let the driver probe when enabled.
20
213) Import drivers using VCHI.
22
23VCHI is just a tool to let drivers talk to the firmware. Here are
24some of the ones we want:
25
26 - vc_mem (https://github.com/raspberrypi/linux/blob/rpi-4.4.y/drivers/char/broadcom/vc_mem.c)
27
28 This driver is what the vcdbg userspace program uses to set up its
29 requests to the firmware, which are transmitted across VCHIQ. vcdbg
30 is really useful for debugging firmware interactions.
31
32 - bcm2835-camera (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/media/platform/bcm2835)
33
34 This driver will let us get images from the camera using the MMAL
35 protocol over VCHI.
36
37 - VCSM (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/char/broadcom/vc_sm)
38
39 This driver is used for talking about regions of VC memory across
40 firmware protocols including VCHI. We'll want to extend this driver
41 to manage these buffers as dmabufs so that we can zero-copy import
42 camera images into vc4 for rendering/display.
43
444) Garbage-collect unused code
45
46One of the reasons this driver wasn't upstreamed previously was that
47there's a lot code that got built that's probably unnecessary these
48days. Once we have the set of VCHI-using drivers we want in tree, we
49should be able to do a sweep of the code to see what's left that's
50unused.
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 1b17e98f7379..d6937288210c 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -226,25 +226,12 @@ extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
226 int value); 226 int value);
227 227
228// Routine to send a message across a service 228// Routine to send a message across a service
229extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle, 229extern int32_t
230 const void *data, 230 vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
231 uint32_t data_size, 231 ssize_t (*copy_callback)(void *context, void *dest,
232 VCHI_FLAGS_T flags, 232 size_t offset, size_t maxsize),
233 void *msg_handle ); 233 void *context,
234 234 uint32_t data_size);
235// scatter-gather (vector) and send message
236int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
237 VCHI_MSG_VECTOR_EX_T *vector,
238 uint32_t count,
239 VCHI_FLAGS_T flags,
240 void *msg_handle );
241
242// legacy scatter-gather (vector) and send message, only handles pointers
243int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
244 VCHI_MSG_VECTOR_T *vector,
245 uint32_t count,
246 VCHI_FLAGS_T flags,
247 void *msg_handle );
248 235
249// Routine to receive a msg from a service 236// Routine to receive a msg from a service
250// Dequeue is equivalent to hold, copy into client buffer, release 237// Dequeue is equivalent to hold, copy into client buffer, release
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
index ad398bae6ee4..21adf89a9065 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -37,4 +37,15 @@
37#include "vchiq_if.h" 37#include "vchiq_if.h"
38#include "vchiq_util.h" 38#include "vchiq_util.h"
39 39
40/* Do this so that we can test-build the code on non-rpi systems */
41#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
42
43#else
44
45#ifndef dsb
46#define dsb(a)
47#endif
48
49#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
50
40#endif 51#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 1091b9f1dd07..2b500d85cebc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -45,16 +45,8 @@
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <soc/bcm2835/raspberrypi-firmware.h> 46#include <soc/bcm2835/raspberrypi-firmware.h>
47 47
48#define dmac_map_area __glue(_CACHE,_dma_map_area)
49#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
50
51extern void dmac_map_area(const void *, size_t, int);
52extern void dmac_unmap_area(const void *, size_t, int);
53
54#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) 48#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
55 49
56#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
57
58#include "vchiq_arm.h" 50#include "vchiq_arm.h"
59#include "vchiq_2835.h" 51#include "vchiq_2835.h"
60#include "vchiq_connected.h" 52#include "vchiq_connected.h"
@@ -70,13 +62,25 @@ typedef struct vchiq_2835_state_struct {
70 VCHIQ_ARM_STATE_T arm_state; 62 VCHIQ_ARM_STATE_T arm_state;
71} VCHIQ_2835_ARM_STATE_T; 63} VCHIQ_2835_ARM_STATE_T;
72 64
65struct vchiq_pagelist_info {
66 PAGELIST_T *pagelist;
67 size_t pagelist_buffer_size;
68 dma_addr_t dma_addr;
69 enum dma_data_direction dma_dir;
70 unsigned int num_pages;
71 unsigned int pages_need_release;
72 struct page **pages;
73 struct scatterlist *scatterlist;
74 unsigned int scatterlist_mapped;
75};
76
73static void __iomem *g_regs; 77static void __iomem *g_regs;
74static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE); 78static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
75static unsigned int g_fragments_size; 79static unsigned int g_fragments_size;
76static char *g_fragments_base; 80static char *g_fragments_base;
77static char *g_free_fragments; 81static char *g_free_fragments;
78static struct semaphore g_free_fragments_sema; 82static struct semaphore g_free_fragments_sema;
79static unsigned long g_virt_to_bus_offset; 83static struct device *g_dev;
80 84
81extern int vchiq_arm_log_level; 85extern int vchiq_arm_log_level;
82 86
@@ -85,12 +89,13 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex);
85static irqreturn_t 89static irqreturn_t
86vchiq_doorbell_irq(int irq, void *dev_id); 90vchiq_doorbell_irq(int irq, void *dev_id);
87 91
88static int 92static struct vchiq_pagelist_info *
89create_pagelist(char __user *buf, size_t count, unsigned short type, 93create_pagelist(char __user *buf, size_t count, unsigned short type,
90 struct task_struct *task, PAGELIST_T ** ppagelist); 94 struct task_struct *task);
91 95
92static void 96static void
93free_pagelist(PAGELIST_T *pagelist, int actual); 97free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
98 int actual);
94 99
95int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) 100int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
96{ 101{
@@ -104,7 +109,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
104 int slot_mem_size, frag_mem_size; 109 int slot_mem_size, frag_mem_size;
105 int err, irq, i; 110 int err, irq, i;
106 111
107 g_virt_to_bus_offset = virt_to_dma(dev, (void *)0); 112 /*
113 * VCHI messages between the CPU and firmware use
114 * 32-bit bus addresses.
115 */
116 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
117
118 if (err < 0)
119 return err;
108 120
109 (void)of_property_read_u32(dev->of_node, "cache-line-size", 121 (void)of_property_read_u32(dev->of_node, "cache-line-size",
110 &g_cache_line_size); 122 &g_cache_line_size);
@@ -121,7 +133,7 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
121 return -ENOMEM; 133 return -ENOMEM;
122 } 134 }
123 135
124 WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0); 136 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
125 137
126 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size); 138 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
127 if (!vchiq_slot_zero) 139 if (!vchiq_slot_zero)
@@ -173,9 +185,10 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
173 return err ? : -ENXIO; 185 return err ? : -ENXIO;
174 } 186 }
175 187
188 g_dev = dev;
176 vchiq_log_info(vchiq_arm_log_level, 189 vchiq_log_info(vchiq_arm_log_level,
177 "vchiq_init - done (slots %x, phys %pad)", 190 "vchiq_init - done (slots %pK, phys %pad)",
178 (unsigned int)vchiq_slot_zero, &slot_phys); 191 vchiq_slot_zero, &slot_phys);
179 192
180 vchiq_call_connected_callbacks(); 193 vchiq_call_connected_callbacks();
181 194
@@ -213,47 +226,37 @@ remote_event_signal(REMOTE_EVENT_T *event)
213 226
214 event->fired = 1; 227 event->fired = 1;
215 228
216 dsb(); /* data barrier operation */ 229 dsb(sy); /* data barrier operation */
217 230
218 if (event->armed) 231 if (event->armed)
219 writel(0, g_regs + BELL2); /* trigger vc interrupt */ 232 writel(0, g_regs + BELL2); /* trigger vc interrupt */
220} 233}
221 234
222int
223vchiq_copy_from_user(void *dst, const void *src, int size)
224{
225 if ((uint32_t)src < TASK_SIZE) {
226 return copy_from_user(dst, src, size);
227 } else {
228 memcpy(dst, src, size);
229 return 0;
230 }
231}
232
233VCHIQ_STATUS_T 235VCHIQ_STATUS_T
234vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle, 236vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
235 void *offset, int size, int dir) 237 void *offset, int size, int dir)
236{ 238{
237 PAGELIST_T *pagelist; 239 struct vchiq_pagelist_info *pagelistinfo;
238 int ret;
239 240
240 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID); 241 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
241 242
242 ret = create_pagelist((char __user *)offset, size, 243 pagelistinfo = create_pagelist((char __user *)offset, size,
243 (dir == VCHIQ_BULK_RECEIVE) 244 (dir == VCHIQ_BULK_RECEIVE)
244 ? PAGELIST_READ 245 ? PAGELIST_READ
245 : PAGELIST_WRITE, 246 : PAGELIST_WRITE,
246 current, 247 current);
247 &pagelist); 248
248 if (ret != 0) 249 if (!pagelistinfo)
249 return VCHIQ_ERROR; 250 return VCHIQ_ERROR;
250 251
251 bulk->handle = memhandle; 252 bulk->handle = memhandle;
252 bulk->data = VCHIQ_ARM_ADDRESS(pagelist); 253 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
253 254
254 /* Store the pagelist address in remote_data, which isn't used by the 255 /*
255 slave. */ 256 * Store the pagelistinfo address in remote_data,
256 bulk->remote_data = pagelist; 257 * which isn't used by the slave.
258 */
259 bulk->remote_data = pagelistinfo;
257 260
258 return VCHIQ_SUCCESS; 261 return VCHIQ_SUCCESS;
259} 262}
@@ -262,7 +265,8 @@ void
262vchiq_complete_bulk(VCHIQ_BULK_T *bulk) 265vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
263{ 266{
264 if (bulk && bulk->remote_data && bulk->actual) 267 if (bulk && bulk->remote_data && bulk->actual)
265 free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual); 268 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
269 bulk->actual);
266} 270}
267 271
268void 272void
@@ -350,57 +354,93 @@ vchiq_doorbell_irq(int irq, void *dev_id)
350 return ret; 354 return ret;
351} 355}
352 356
357static void
358cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
359{
360 if (pagelistinfo->scatterlist_mapped) {
361 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
362 pagelistinfo->num_pages, pagelistinfo->dma_dir);
363 }
364
365 if (pagelistinfo->pages_need_release) {
366 unsigned int i;
367
368 for (i = 0; i < pagelistinfo->num_pages; i++)
369 put_page(pagelistinfo->pages[i]);
370 }
371
372 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
373 pagelistinfo->pagelist, pagelistinfo->dma_addr);
374}
375
353/* There is a potential problem with partial cache lines (pages?) 376/* There is a potential problem with partial cache lines (pages?)
354** at the ends of the block when reading. If the CPU accessed anything in 377** at the ends of the block when reading. If the CPU accessed anything in
355** the same line (page?) then it may have pulled old data into the cache, 378** the same line (page?) then it may have pulled old data into the cache,
356** obscuring the new data underneath. We can solve this by transferring the 379** obscuring the new data underneath. We can solve this by transferring the
357** partial cache lines separately, and allowing the ARM to copy into the 380** partial cache lines separately, and allowing the ARM to copy into the
358** cached area. 381** cached area.
359
360** N.B. This implementation plays slightly fast and loose with the Linux
361** driver programming rules, e.g. its use of dmac_map_area instead of
362** dma_map_single, but it isn't a multi-platform driver and it benefits
363** from increased speed as a result.
364*/ 382*/
365 383
366static int 384static struct vchiq_pagelist_info *
367create_pagelist(char __user *buf, size_t count, unsigned short type, 385create_pagelist(char __user *buf, size_t count, unsigned short type,
368 struct task_struct *task, PAGELIST_T ** ppagelist) 386 struct task_struct *task)
369{ 387{
370 PAGELIST_T *pagelist; 388 PAGELIST_T *pagelist;
389 struct vchiq_pagelist_info *pagelistinfo;
371 struct page **pages; 390 struct page **pages;
372 unsigned long *addrs; 391 u32 *addrs;
373 unsigned int num_pages, offset, i; 392 unsigned int num_pages, offset, i, k;
374 char *addr, *base_addr, *next_addr; 393 int actual_pages;
375 int run, addridx, actual_pages; 394 size_t pagelist_size;
376 unsigned long *need_release; 395 struct scatterlist *scatterlist, *sg;
377 396 int dma_buffers;
378 offset = (unsigned int)buf & (PAGE_SIZE - 1); 397 dma_addr_t dma_addr;
398
399 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
379 num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; 400 num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
380 401
381 *ppagelist = NULL; 402 pagelist_size = sizeof(PAGELIST_T) +
403 (num_pages * sizeof(u32)) +
404 (num_pages * sizeof(pages[0]) +
405 (num_pages * sizeof(struct scatterlist))) +
406 sizeof(struct vchiq_pagelist_info);
382 407
383 /* Allocate enough storage to hold the page pointers and the page 408 /* Allocate enough storage to hold the page pointers and the page
384 ** list 409 ** list
385 */ 410 */
386 pagelist = kmalloc(sizeof(PAGELIST_T) + 411 pagelist = dma_zalloc_coherent(g_dev,
387 (num_pages * sizeof(unsigned long)) + 412 pagelist_size,
388 sizeof(unsigned long) + 413 &dma_addr,
389 (num_pages * sizeof(pages[0])), 414 GFP_KERNEL);
390 GFP_KERNEL); 415
391 416 vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
392 vchiq_log_trace(vchiq_arm_log_level, 417 pagelist);
393 "create_pagelist - %x", (unsigned int)pagelist);
394 if (!pagelist) 418 if (!pagelist)
395 return -ENOMEM; 419 return NULL;
396 420
397 addrs = pagelist->addrs; 421 addrs = pagelist->addrs;
398 need_release = (unsigned long *)(addrs + num_pages); 422 pages = (struct page **)(addrs + num_pages);
399 pages = (struct page **)(addrs + num_pages + 1); 423 scatterlist = (struct scatterlist *)(pages + num_pages);
424 pagelistinfo = (struct vchiq_pagelist_info *)
425 (scatterlist + num_pages);
426
427 pagelist->length = count;
428 pagelist->type = type;
429 pagelist->offset = offset;
430
431 /* Populate the fields of the pagelistinfo structure */
432 pagelistinfo->pagelist = pagelist;
433 pagelistinfo->pagelist_buffer_size = pagelist_size;
434 pagelistinfo->dma_addr = dma_addr;
435 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
436 DMA_TO_DEVICE : DMA_FROM_DEVICE;
437 pagelistinfo->num_pages = num_pages;
438 pagelistinfo->pages_need_release = 0;
439 pagelistinfo->pages = pages;
440 pagelistinfo->scatterlist = scatterlist;
441 pagelistinfo->scatterlist_mapped = 0;
400 442
401 if (is_vmalloc_addr(buf)) { 443 if (is_vmalloc_addr(buf)) {
402 int dir = (type == PAGELIST_WRITE) ?
403 DMA_TO_DEVICE : DMA_FROM_DEVICE;
404 unsigned long length = count; 444 unsigned long length = count;
405 unsigned int off = offset; 445 unsigned int off = offset;
406 446
@@ -413,14 +453,13 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
413 if (bytes > length) 453 if (bytes > length)
414 bytes = length; 454 bytes = length;
415 pages[actual_pages] = pg; 455 pages[actual_pages] = pg;
416 dmac_map_area(page_address(pg) + off, bytes, dir);
417 length -= bytes; 456 length -= bytes;
418 off = 0; 457 off = 0;
419 } 458 }
420 *need_release = 0; /* do not try and release vmalloc pages */ 459 /* do not try and release vmalloc pages */
421 } else { 460 } else {
422 down_read(&task->mm->mmap_sem); 461 down_read(&task->mm->mmap_sem);
423 actual_pages = get_user_pages(task, task->mm, 462 actual_pages = get_user_pages(
424 (unsigned long)buf & ~(PAGE_SIZE - 1), 463 (unsigned long)buf & ~(PAGE_SIZE - 1),
425 num_pages, 464 num_pages,
426 (type == PAGELIST_READ) ? FOLL_WRITE : 0, 465 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
@@ -438,44 +477,59 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
438 while (actual_pages > 0) 477 while (actual_pages > 0)
439 { 478 {
440 actual_pages--; 479 actual_pages--;
441 page_cache_release(pages[actual_pages]); 480 put_page(pages[actual_pages]);
442 } 481 }
443 kfree(pagelist); 482 cleaup_pagelistinfo(pagelistinfo);
444 if (actual_pages == 0) 483 return NULL;
445 actual_pages = -ENOMEM;
446 return actual_pages;
447 } 484 }
448 *need_release = 1; /* release user pages */ 485 /* release user pages */
486 pagelistinfo->pages_need_release = 1;
449 } 487 }
450 488
451 pagelist->length = count; 489 /*
452 pagelist->type = type; 490 * Initialize the scatterlist so that the magic cookie
453 pagelist->offset = offset; 491 * is filled if debugging is enabled
454 492 */
455 /* Group the pages into runs of contiguous pages */ 493 sg_init_table(scatterlist, num_pages);
456 494 /* Now set the pages for each scatterlist */
457 base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0])); 495 for (i = 0; i < num_pages; i++)
458 next_addr = base_addr + PAGE_SIZE; 496 sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
459 addridx = 0; 497
460 run = 0; 498 dma_buffers = dma_map_sg(g_dev,
499 scatterlist,
500 num_pages,
501 pagelistinfo->dma_dir);
502
503 if (dma_buffers == 0) {
504 cleaup_pagelistinfo(pagelistinfo);
505 return NULL;
506 }
461 507
462 for (i = 1; i < num_pages; i++) { 508 pagelistinfo->scatterlist_mapped = 1;
463 addr = VCHIQ_ARM_ADDRESS(page_address(pages[i])); 509
464 if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { 510 /* Combine adjacent blocks for performance */
465 next_addr += PAGE_SIZE; 511 k = 0;
466 run++; 512 for_each_sg(scatterlist, sg, dma_buffers, i) {
513 u32 len = sg_dma_len(sg);
514 u32 addr = sg_dma_address(sg);
515
516 /* Note: addrs is the address + page_count - 1
517 * The firmware expects the block to be page
518 * aligned and a multiple of the page size
519 */
520 WARN_ON(len == 0);
521 WARN_ON(len & ~PAGE_MASK);
522 WARN_ON(addr & ~PAGE_MASK);
523 if (k > 0 &&
524 ((addrs[k - 1] & PAGE_MASK) |
525 ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
526 == addr) {
527 addrs[k - 1] += (len >> PAGE_SHIFT);
467 } else { 528 } else {
468 addrs[addridx] = (unsigned long)base_addr + run; 529 addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
469 addridx++;
470 base_addr = addr;
471 next_addr = addr + PAGE_SIZE;
472 run = 0;
473 } 530 }
474 } 531 }
475 532
476 addrs[addridx] = (unsigned long)base_addr + run;
477 addridx++;
478
479 /* Partial cache lines (fragments) require special measures */ 533 /* Partial cache lines (fragments) require special measures */
480 if ((type == PAGELIST_READ) && 534 if ((type == PAGELIST_READ) &&
481 ((pagelist->offset & (g_cache_line_size - 1)) || 535 ((pagelist->offset & (g_cache_line_size - 1)) ||
@@ -484,8 +538,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
484 char *fragments; 538 char *fragments;
485 539
486 if (down_interruptible(&g_free_fragments_sema) != 0) { 540 if (down_interruptible(&g_free_fragments_sema) != 0) {
487 kfree(pagelist); 541 cleaup_pagelistinfo(pagelistinfo);
488 return -EINTR; 542 return NULL;
489 } 543 }
490 544
491 WARN_ON(g_free_fragments == NULL); 545 WARN_ON(g_free_fragments == NULL);
@@ -499,29 +553,28 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
499 (fragments - g_fragments_base) / g_fragments_size; 553 (fragments - g_fragments_base) / g_fragments_size;
500 } 554 }
501 555
502 dmac_flush_range(pagelist, addrs + num_pages); 556 return pagelistinfo;
503
504 *ppagelist = pagelist;
505
506 return 0;
507} 557}
508 558
509static void 559static void
510free_pagelist(PAGELIST_T *pagelist, int actual) 560free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
561 int actual)
511{ 562{
512 unsigned long *need_release; 563 unsigned int i;
513 struct page **pages; 564 PAGELIST_T *pagelist = pagelistinfo->pagelist;
514 unsigned int num_pages, i; 565 struct page **pages = pagelistinfo->pages;
515 566 unsigned int num_pages = pagelistinfo->num_pages;
516 vchiq_log_trace(vchiq_arm_log_level,
517 "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
518 567
519 num_pages = 568 vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
520 (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / 569 pagelistinfo->pagelist, actual);
521 PAGE_SIZE;
522 570
523 need_release = (unsigned long *)(pagelist->addrs + num_pages); 571 /*
524 pages = (struct page **)(pagelist->addrs + num_pages + 1); 572 * NOTE: dma_unmap_sg must be called before the
573 * cpu can touch any of the data/pages.
574 */
575 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
576 pagelistinfo->num_pages, pagelistinfo->dma_dir);
577 pagelistinfo->scatterlist_mapped = 0;
525 578
526 /* Deal with any partial cache lines (fragments) */ 579 /* Deal with any partial cache lines (fragments) */
527 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { 580 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
@@ -559,27 +612,12 @@ free_pagelist(PAGELIST_T *pagelist, int actual)
559 up(&g_free_fragments_sema); 612 up(&g_free_fragments_sema);
560 } 613 }
561 614
562 if (*need_release) { 615 /* Need to mark all the pages dirty. */
563 unsigned int length = pagelist->length; 616 if (pagelist->type != PAGELIST_WRITE &&
564 unsigned int offset = pagelist->offset; 617 pagelistinfo->pages_need_release) {
565 618 for (i = 0; i < num_pages; i++)
566 for (i = 0; i < num_pages; i++) { 619 set_page_dirty(pages[i]);
567 struct page *pg = pages[i];
568
569 if (pagelist->type != PAGELIST_WRITE) {
570 unsigned int bytes = PAGE_SIZE - offset;
571
572 if (bytes > length)
573 bytes = length;
574 dmac_unmap_area(page_address(pg) + offset,
575 bytes, DMA_FROM_DEVICE);
576 length -= bytes;
577 offset = 0;
578 set_page_dirty(pg);
579 }
580 page_cache_release(pg);
581 }
582 } 620 }
583 621
584 kfree(pagelist); 622 cleaup_pagelistinfo(pagelistinfo);
585} 623}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 7b6cd4d80621..0d987898b4f8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -190,8 +190,8 @@ static const char *const ioctl_names[] = {
190 "CLOSE_DELIVERED" 190 "CLOSE_DELIVERED"
191}; 191};
192 192
193vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) == 193vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
194 (VCHIQ_IOC_MAX + 1)); 194 (VCHIQ_IOC_MAX + 1));
195 195
196static void 196static void
197dump_phys_mem(void *virt_addr, uint32_t num_bytes); 197dump_phys_mem(void *virt_addr, uint32_t num_bytes);
@@ -402,6 +402,107 @@ static void close_delivered(USER_SERVICE_T *user_service)
402 } 402 }
403} 403}
404 404
405struct vchiq_io_copy_callback_context {
406 VCHIQ_ELEMENT_T *current_element;
407 size_t current_element_offset;
408 unsigned long elements_to_go;
409 size_t current_offset;
410};
411
412static ssize_t
413vchiq_ioc_copy_element_data(
414 void *context,
415 void *dest,
416 size_t offset,
417 size_t maxsize)
418{
419 long res;
420 size_t bytes_this_round;
421 struct vchiq_io_copy_callback_context *copy_context =
422 (struct vchiq_io_copy_callback_context *)context;
423
424 if (offset != copy_context->current_offset)
425 return 0;
426
427 if (!copy_context->elements_to_go)
428 return 0;
429
430 /*
431 * Complex logic here to handle the case of 0 size elements
432 * in the middle of the array of elements.
433 *
434 * Need to skip over these 0 size elements.
435 */
436 while (1) {
437 bytes_this_round = min(copy_context->current_element->size -
438 copy_context->current_element_offset,
439 maxsize);
440
441 if (bytes_this_round)
442 break;
443
444 copy_context->elements_to_go--;
445 copy_context->current_element++;
446 copy_context->current_element_offset = 0;
447
448 if (!copy_context->elements_to_go)
449 return 0;
450 }
451
452 res = copy_from_user(dest,
453 copy_context->current_element->data +
454 copy_context->current_element_offset,
455 bytes_this_round);
456
457 if (res != 0)
458 return -EFAULT;
459
460 copy_context->current_element_offset += bytes_this_round;
461 copy_context->current_offset += bytes_this_round;
462
463 /*
464 * Check if done with current element, and if so advance to the next.
465 */
466 if (copy_context->current_element_offset ==
467 copy_context->current_element->size) {
468 copy_context->elements_to_go--;
469 copy_context->current_element++;
470 copy_context->current_element_offset = 0;
471 }
472
473 return bytes_this_round;
474}
475
476/**************************************************************************
477 *
478 * vchiq_ioc_queue_message
479 *
480 **************************************************************************/
481static VCHIQ_STATUS_T
482vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
483 VCHIQ_ELEMENT_T *elements,
484 unsigned long count)
485{
486 struct vchiq_io_copy_callback_context context;
487 unsigned long i;
488 size_t total_size = 0;
489
490 context.current_element = elements;
491 context.current_element_offset = 0;
492 context.elements_to_go = count;
493 context.current_offset = 0;
494
495 for (i = 0; i < count; i++) {
496 if (!elements[i].data && elements[i].size != 0)
497 return -EFAULT;
498
499 total_size += elements[i].size;
500 }
501
502 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
503 &context, total_size);
504}
505
405/**************************************************************************** 506/****************************************************************************
406* 507*
407* vchiq_ioctl 508* vchiq_ioctl
@@ -418,8 +519,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
418 DEBUG_INITIALISE(g_state.local) 519 DEBUG_INITIALISE(g_state.local)
419 520
420 vchiq_log_trace(vchiq_arm_log_level, 521 vchiq_log_trace(vchiq_arm_log_level,
421 "vchiq_ioctl - instance %x, cmd %s, arg %lx", 522 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
422 (unsigned int)instance, 523 instance,
423 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && 524 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
424 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? 525 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
425 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); 526 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -453,7 +554,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
453 ret = -EINVAL; 554 ret = -EINVAL;
454 break; 555 break;
455 } 556 }
456 rc = mutex_lock_interruptible(&instance->state->mutex); 557 rc = mutex_lock_killable(&instance->state->mutex);
457 if (rc != 0) { 558 if (rc != 0) {
458 vchiq_log_error(vchiq_arm_log_level, 559 vchiq_log_error(vchiq_arm_log_level,
459 "vchiq: connect: could not lock mutex for " 560 "vchiq: connect: could not lock mutex for "
@@ -651,7 +752,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
651 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS]; 752 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
652 if (copy_from_user(elements, args.elements, 753 if (copy_from_user(elements, args.elements,
653 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0) 754 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
654 status = vchiq_queue_message 755 status = vchiq_ioc_queue_message
655 (args.handle, 756 (args.handle,
656 elements, args.count); 757 elements, args.count);
657 else 758 else
@@ -713,8 +814,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
713 break; 814 break;
714 } 815 }
715 vchiq_log_info(vchiq_arm_log_level, 816 vchiq_log_info(vchiq_arm_log_level,
716 "found bulk_waiter %x for pid %d", 817 "found bulk_waiter %pK for pid %d", waiter,
717 (unsigned int)waiter, current->pid); 818 current->pid);
718 args.userdata = &waiter->bulk_waiter; 819 args.userdata = &waiter->bulk_waiter;
719 } 820 }
720 status = vchiq_bulk_transfer 821 status = vchiq_bulk_transfer
@@ -743,8 +844,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
743 list_add(&waiter->list, &instance->bulk_waiter_list); 844 list_add(&waiter->list, &instance->bulk_waiter_list);
744 mutex_unlock(&instance->bulk_waiter_list_mutex); 845 mutex_unlock(&instance->bulk_waiter_list_mutex);
745 vchiq_log_info(vchiq_arm_log_level, 846 vchiq_log_info(vchiq_arm_log_level,
746 "saved bulk_waiter %x for pid %d", 847 "saved bulk_waiter %pK for pid %d",
747 (unsigned int)waiter, current->pid); 848 waiter, current->pid);
748 849
749 if (copy_to_user((void __user *) 850 if (copy_to_user((void __user *)
750 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *) 851 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
@@ -826,10 +927,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
826 if (args.msgbufsize < msglen) { 927 if (args.msgbufsize < msglen) {
827 vchiq_log_error( 928 vchiq_log_error(
828 vchiq_arm_log_level, 929 vchiq_arm_log_level,
829 "header %x: msgbufsize" 930 "header %pK: msgbufsize %x < msglen %x",
830 " %x < msglen %x", 931 header, args.msgbufsize,
831 (unsigned int)header,
832 args.msgbufsize,
833 msglen); 932 msglen);
834 WARN(1, "invalid message " 933 WARN(1, "invalid message "
835 "size\n"); 934 "size\n");
@@ -980,9 +1079,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
980 ret = -EFAULT; 1079 ret = -EFAULT;
981 } else { 1080 } else {
982 vchiq_log_error(vchiq_arm_log_level, 1081 vchiq_log_error(vchiq_arm_log_level,
983 "header %x: bufsize %x < size %x", 1082 "header %pK: bufsize %x < size %x",
984 (unsigned int)header, args.bufsize, 1083 header, args.bufsize, header->size);
985 header->size);
986 WARN(1, "invalid size\n"); 1084 WARN(1, "invalid size\n");
987 ret = -EMSGSIZE; 1085 ret = -EMSGSIZE;
988 } 1086 }
@@ -1284,9 +1382,8 @@ vchiq_release(struct inode *inode, struct file *file)
1284 list); 1382 list);
1285 list_del(pos); 1383 list_del(pos);
1286 vchiq_log_info(vchiq_arm_log_level, 1384 vchiq_log_info(vchiq_arm_log_level,
1287 "bulk_waiter - cleaned up %x " 1385 "bulk_waiter - cleaned up %pK for pid %d",
1288 "for pid %d", 1386 waiter, waiter->pid);
1289 (unsigned int)waiter, waiter->pid);
1290 kfree(waiter); 1387 kfree(waiter);
1291 } 1388 }
1292 } 1389 }
@@ -1385,9 +1482,8 @@ vchiq_dump_platform_instances(void *dump_context)
1385 instance = service->instance; 1482 instance = service->instance;
1386 if (instance && !instance->mark) { 1483 if (instance && !instance->mark) {
1387 len = snprintf(buf, sizeof(buf), 1484 len = snprintf(buf, sizeof(buf),
1388 "Instance %x: pid %d,%s completions " 1485 "Instance %pK: pid %d,%s completions %d/%d",
1389 "%d/%d", 1486 instance, instance->pid,
1390 (unsigned int)instance, instance->pid,
1391 instance->connected ? " connected, " : 1487 instance->connected ? " connected, " :
1392 "", 1488 "",
1393 instance->completion_insert - 1489 instance->completion_insert -
@@ -1415,8 +1511,7 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1415 char buf[80]; 1511 char buf[80];
1416 int len; 1512 int len;
1417 1513
1418 len = snprintf(buf, sizeof(buf), " instance %x", 1514 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
1419 (unsigned int)service->instance);
1420 1515
1421 if ((service->base.callback == service_callback) && 1516 if ((service->base.callback == service_callback) &&
1422 user_service->is_vchi) { 1517 user_service->is_vchi) {
@@ -1473,8 +1568,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1473 } 1568 }
1474 1569
1475 down_read(&current->mm->mmap_sem); 1570 down_read(&current->mm->mmap_sem);
1476 rc = get_user_pages(current, /* task */ 1571 rc = get_user_pages(
1477 current->mm, /* mm */
1478 (unsigned long)virt_addr, /* start */ 1572 (unsigned long)virt_addr, /* start */
1479 num_pages, /* len */ 1573 num_pages, /* len */
1480 0, /* gup_flags */ 1574 0, /* gup_flags */
@@ -1485,6 +1579,12 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1485 prev_idx = -1; 1579 prev_idx = -1;
1486 page = NULL; 1580 page = NULL;
1487 1581
1582 if (rc < 0) {
1583 vchiq_log_error(vchiq_arm_log_level,
1584 "Failed to get user pages: %d\n", rc);
1585 goto out;
1586 }
1587
1488 while (offset < end_offset) { 1588 while (offset < end_offset) {
1489 1589
1490 int page_offset = offset % PAGE_SIZE; 1590 int page_offset = offset % PAGE_SIZE;
@@ -1508,11 +1608,13 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1508 1608
1509 offset += 16; 1609 offset += 16;
1510 } 1610 }
1611
1612out:
1511 if (page != NULL) 1613 if (page != NULL)
1512 kunmap(page); 1614 kunmap(page);
1513 1615
1514 for (page_idx = 0; page_idx < num_pages; page_idx++) 1616 for (page_idx = 0; page_idx < num_pages; page_idx++)
1515 page_cache_release(pages[page_idx]); 1617 put_page(pages[page_idx]);
1516 1618
1517 kfree(pages); 1619 kfree(pages);
1518} 1620}
@@ -1683,8 +1785,6 @@ exit:
1683VCHIQ_STATUS_T 1785VCHIQ_STATUS_T
1684vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) 1786vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1685{ 1787{
1686 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1687
1688 if (arm_state) { 1788 if (arm_state) {
1689 rwlock_init(&arm_state->susp_res_lock); 1789 rwlock_init(&arm_state->susp_res_lock);
1690 1790
@@ -1712,14 +1812,13 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1712 1812
1713 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; 1813 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1714 arm_state->suspend_timer_running = 0; 1814 arm_state->suspend_timer_running = 0;
1715 init_timer(&arm_state->suspend_timer); 1815 setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
1716 arm_state->suspend_timer.data = (unsigned long)(state); 1816 (unsigned long)(state));
1717 arm_state->suspend_timer.function = suspend_timer_callback;
1718 1817
1719 arm_state->first_connect = 0; 1818 arm_state->first_connect = 0;
1720 1819
1721 } 1820 }
1722 return status; 1821 return VCHIQ_SUCCESS;
1723} 1822}
1724 1823
1725/* 1824/*
@@ -2032,20 +2131,20 @@ static void
2032output_timeout_error(VCHIQ_STATE_T *state) 2131output_timeout_error(VCHIQ_STATE_T *state)
2033{ 2132{
2034 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2133 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2035 char service_err[50] = ""; 2134 char err[50] = "";
2036 int vc_use_count = arm_state->videocore_use_count; 2135 int vc_use_count = arm_state->videocore_use_count;
2037 int active_services = state->unused_service; 2136 int active_services = state->unused_service;
2038 int i; 2137 int i;
2039 2138
2040 if (!arm_state->videocore_use_count) { 2139 if (!arm_state->videocore_use_count) {
2041 snprintf(service_err, 50, " Videocore usecount is 0"); 2140 snprintf(err, sizeof(err), " Videocore usecount is 0");
2042 goto output_msg; 2141 goto output_msg;
2043 } 2142 }
2044 for (i = 0; i < active_services; i++) { 2143 for (i = 0; i < active_services; i++) {
2045 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2144 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2046 if (service_ptr && service_ptr->service_use_count && 2145 if (service_ptr && service_ptr->service_use_count &&
2047 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) { 2146 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2048 snprintf(service_err, 50, " %c%c%c%c(%d) service has " 2147 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2049 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS( 2148 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2050 service_ptr->base.fourcc), 2149 service_ptr->base.fourcc),
2051 service_ptr->client_id, 2150 service_ptr->client_id,
@@ -2059,7 +2158,7 @@ output_timeout_error(VCHIQ_STATE_T *state)
2059output_msg: 2158output_msg:
2060 vchiq_log_error(vchiq_susp_log_level, 2159 vchiq_log_error(vchiq_susp_log_level,
2061 "timed out waiting for vc suspend (%d).%s", 2160 "timed out waiting for vc suspend (%d).%s",
2062 arm_state->autosuspend_override, service_err); 2161 arm_state->autosuspend_override, err);
2063 2162
2064} 2163}
2065 2164
@@ -2780,7 +2879,7 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2780 &vchiq_keepalive_thread_func, 2879 &vchiq_keepalive_thread_func,
2781 (void *)state, 2880 (void *)state,
2782 threadname); 2881 threadname);
2783 if (arm_state->ka_thread == NULL) { 2882 if (IS_ERR(arm_state->ka_thread)) {
2784 vchiq_log_error(vchiq_susp_log_level, 2883 vchiq_log_error(vchiq_susp_log_level,
2785 "vchiq: FATAL: couldn't create thread %s", 2884 "vchiq: FATAL: couldn't create thread %s",
2786 threadname); 2885 threadname);
@@ -2800,28 +2899,27 @@ static int vchiq_probe(struct platform_device *pdev)
2800 void *ptr_err; 2899 void *ptr_err;
2801 2900
2802 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); 2901 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
2803/* Remove comment when booting without Device Tree is no longer supported
2804 if (!fw_node) { 2902 if (!fw_node) {
2805 dev_err(&pdev->dev, "Missing firmware node\n"); 2903 dev_err(&pdev->dev, "Missing firmware node\n");
2806 return -ENOENT; 2904 return -ENOENT;
2807 } 2905 }
2808*/ 2906
2809 fw = rpi_firmware_get(fw_node); 2907 fw = rpi_firmware_get(fw_node);
2908 of_node_put(fw_node);
2810 if (!fw) 2909 if (!fw)
2811 return -EPROBE_DEFER; 2910 return -EPROBE_DEFER;
2812 2911
2813 platform_set_drvdata(pdev, fw); 2912 platform_set_drvdata(pdev, fw);
2814 2913
2815 /* create debugfs entries */ 2914 err = vchiq_platform_init(pdev, &g_state);
2816 err = vchiq_debugfs_init();
2817 if (err != 0) 2915 if (err != 0)
2818 goto failed_debugfs_init; 2916 goto failed_platform_init;
2819 2917
2820 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME); 2918 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2821 if (err != 0) { 2919 if (err != 0) {
2822 vchiq_log_error(vchiq_arm_log_level, 2920 vchiq_log_error(vchiq_arm_log_level,
2823 "Unable to allocate device number"); 2921 "Unable to allocate device number");
2824 goto failed_alloc_chrdev; 2922 goto failed_platform_init;
2825 } 2923 }
2826 cdev_init(&vchiq_cdev, &vchiq_fops); 2924 cdev_init(&vchiq_cdev, &vchiq_fops);
2827 vchiq_cdev.owner = THIS_MODULE; 2925 vchiq_cdev.owner = THIS_MODULE;
@@ -2844,9 +2942,10 @@ static int vchiq_probe(struct platform_device *pdev)
2844 if (IS_ERR(ptr_err)) 2942 if (IS_ERR(ptr_err))
2845 goto failed_device_create; 2943 goto failed_device_create;
2846 2944
2847 err = vchiq_platform_init(pdev, &g_state); 2945 /* create debugfs entries */
2946 err = vchiq_debugfs_init();
2848 if (err != 0) 2947 if (err != 0)
2849 goto failed_platform_init; 2948 goto failed_debugfs_init;
2850 2949
2851 vchiq_log_info(vchiq_arm_log_level, 2950 vchiq_log_info(vchiq_arm_log_level,
2852 "vchiq: initialised - version %d (min %d), device %d.%d", 2951 "vchiq: initialised - version %d (min %d), device %d.%d",
@@ -2855,7 +2954,7 @@ static int vchiq_probe(struct platform_device *pdev)
2855 2954
2856 return 0; 2955 return 0;
2857 2956
2858failed_platform_init: 2957failed_debugfs_init:
2859 device_destroy(vchiq_class, vchiq_devid); 2958 device_destroy(vchiq_class, vchiq_devid);
2860failed_device_create: 2959failed_device_create:
2861 class_destroy(vchiq_class); 2960 class_destroy(vchiq_class);
@@ -2864,15 +2963,14 @@ failed_class_create:
2864 err = PTR_ERR(ptr_err); 2963 err = PTR_ERR(ptr_err);
2865failed_cdev_add: 2964failed_cdev_add:
2866 unregister_chrdev_region(vchiq_devid, 1); 2965 unregister_chrdev_region(vchiq_devid, 1);
2867failed_alloc_chrdev: 2966failed_platform_init:
2868 vchiq_debugfs_deinit();
2869failed_debugfs_init:
2870 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq"); 2967 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2871 return err; 2968 return err;
2872} 2969}
2873 2970
2874static int vchiq_remove(struct platform_device *pdev) 2971static int vchiq_remove(struct platform_device *pdev)
2875{ 2972{
2973 vchiq_debugfs_deinit();
2876 device_destroy(vchiq_class, vchiq_devid); 2974 device_destroy(vchiq_class, vchiq_devid);
2877 class_destroy(vchiq_class); 2975 class_destroy(vchiq_class);
2878 cdev_del(&vchiq_cdev); 2976 cdev_del(&vchiq_cdev);
@@ -2890,7 +2988,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match);
2890static struct platform_driver vchiq_driver = { 2988static struct platform_driver vchiq_driver = {
2891 .driver = { 2989 .driver = {
2892 .name = "bcm2835_vchiq", 2990 .name = "bcm2835_vchiq",
2893 .owner = THIS_MODULE,
2894 .of_match_table = vchiq_of_match, 2991 .of_match_table = vchiq_of_match,
2895 }, 2992 },
2896 .probe = vchiq_probe, 2993 .probe = vchiq_probe,
@@ -2899,4 +2996,5 @@ static struct platform_driver vchiq_driver = {
2899module_platform_driver(vchiq_driver); 2996module_platform_driver(vchiq_driver);
2900 2997
2901MODULE_LICENSE("GPL"); 2998MODULE_LICENSE("GPL");
2999MODULE_DESCRIPTION("Videocore VCHIQ driver");
2902MODULE_AUTHOR("Broadcom Corporation"); 3000MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 5efc62ffb2f5..7ea29665bd0c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -72,7 +72,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
72{ 72{
73 connected_init(); 73 connected_init();
74 74
75 if (mutex_lock_interruptible(&g_connected_mutex) != 0) 75 if (mutex_lock_killable(&g_connected_mutex) != 0)
76 return; 76 return;
77 77
78 if (g_connected) 78 if (g_connected)
@@ -107,7 +107,7 @@ void vchiq_call_connected_callbacks(void)
107 107
108 connected_init(); 108 connected_init();
109 109
110 if (mutex_lock_interruptible(&g_connected_mutex) != 0) 110 if (mutex_lock_killable(&g_connected_mutex) != 0)
111 return; 111 return;
112 112
113 for (i = 0; i < g_num_deferred_callbacks; i++) 113 for (i = 0; i < g_num_deferred_callbacks; i++)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 2c98da4307df..028e90bc1cdc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -296,12 +296,13 @@ lock_service(VCHIQ_SERVICE_T *service)
296void 296void
297unlock_service(VCHIQ_SERVICE_T *service) 297unlock_service(VCHIQ_SERVICE_T *service)
298{ 298{
299 VCHIQ_STATE_T *state = service->state;
300 spin_lock(&service_spinlock); 299 spin_lock(&service_spinlock);
301 BUG_ON(!service || (service->ref_count == 0)); 300 BUG_ON(!service || (service->ref_count == 0));
302 if (service && service->ref_count) { 301 if (service && service->ref_count) {
303 service->ref_count--; 302 service->ref_count--;
304 if (!service->ref_count) { 303 if (!service->ref_count) {
304 VCHIQ_STATE_T *state = service->state;
305
305 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 306 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
306 state->services[service->localport] = NULL; 307 state->services[service->localport] = NULL;
307 } else 308 } else
@@ -380,9 +381,9 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
380 VCHIQ_HEADER_T *header, void *bulk_userdata) 381 VCHIQ_HEADER_T *header, void *bulk_userdata)
381{ 382{
382 VCHIQ_STATUS_T status; 383 VCHIQ_STATUS_T status;
383 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)", 384 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
384 service->state->id, service->localport, reason_names[reason], 385 service->state->id, service->localport, reason_names[reason],
385 (unsigned int)header, (unsigned int)bulk_userdata); 386 header, bulk_userdata);
386 status = service->base.callback(reason, header, service->handle, 387 status = service->base.callback(reason, header, service->handle,
387 bulk_userdata); 388 bulk_userdata);
388 if (status == VCHIQ_ERROR) { 389 if (status == VCHIQ_ERROR) {
@@ -406,28 +407,24 @@ vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
406} 407}
407 408
408static inline void 409static inline void
409remote_event_create(REMOTE_EVENT_T *event) 410remote_event_create(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
410{ 411{
411 event->armed = 0; 412 event->armed = 0;
412 /* Don't clear the 'fired' flag because it may already have been set 413 /* Don't clear the 'fired' flag because it may already have been set
413 ** by the other side. */ 414 ** by the other side. */
414 sema_init(event->event, 0); 415 sema_init((struct semaphore *)((char *)state + event->event), 0);
415}
416
417static inline void
418remote_event_destroy(REMOTE_EVENT_T *event)
419{
420 (void)event;
421} 416}
422 417
423static inline int 418static inline int
424remote_event_wait(REMOTE_EVENT_T *event) 419remote_event_wait(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
425{ 420{
426 if (!event->fired) { 421 if (!event->fired) {
427 event->armed = 1; 422 event->armed = 1;
428 dsb(); 423 dsb(sy);
429 if (!event->fired) { 424 if (!event->fired) {
430 if (down_interruptible(event->event) != 0) { 425 if (down_interruptible(
426 (struct semaphore *)
427 ((char *)state + event->event)) != 0) {
431 event->armed = 0; 428 event->armed = 0;
432 return 0; 429 return 0;
433 } 430 }
@@ -441,34 +438,34 @@ remote_event_wait(REMOTE_EVENT_T *event)
441} 438}
442 439
443static inline void 440static inline void
444remote_event_signal_local(REMOTE_EVENT_T *event) 441remote_event_signal_local(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
445{ 442{
446 event->armed = 0; 443 event->armed = 0;
447 up(event->event); 444 up((struct semaphore *)((char *)state + event->event));
448} 445}
449 446
450static inline void 447static inline void
451remote_event_poll(REMOTE_EVENT_T *event) 448remote_event_poll(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
452{ 449{
453 if (event->fired && event->armed) 450 if (event->fired && event->armed)
454 remote_event_signal_local(event); 451 remote_event_signal_local(state, event);
455} 452}
456 453
457void 454void
458remote_event_pollall(VCHIQ_STATE_T *state) 455remote_event_pollall(VCHIQ_STATE_T *state)
459{ 456{
460 remote_event_poll(&state->local->sync_trigger); 457 remote_event_poll(state, &state->local->sync_trigger);
461 remote_event_poll(&state->local->sync_release); 458 remote_event_poll(state, &state->local->sync_release);
462 remote_event_poll(&state->local->trigger); 459 remote_event_poll(state, &state->local->trigger);
463 remote_event_poll(&state->local->recycle); 460 remote_event_poll(state, &state->local->recycle);
464} 461}
465 462
466/* Round up message sizes so that any space at the end of a slot is always big 463/* Round up message sizes so that any space at the end of a slot is always big
467** enough for a header. This relies on header size being a power of two, which 464** enough for a header. This relies on header size being a power of two, which
468** has been verified earlier by a static assertion. */ 465** has been verified earlier by a static assertion. */
469 466
470static inline unsigned int 467static inline size_t
471calc_stride(unsigned int size) 468calc_stride(size_t size)
472{ 469{
473 /* Allow room for the header */ 470 /* Allow room for the header */
474 size += sizeof(VCHIQ_HEADER_T); 471 size += sizeof(VCHIQ_HEADER_T);
@@ -541,13 +538,13 @@ request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
541 wmb(); 538 wmb();
542 539
543 /* ... and ensure the slot handler runs. */ 540 /* ... and ensure the slot handler runs. */
544 remote_event_signal_local(&state->local->trigger); 541 remote_event_signal_local(state, &state->local->trigger);
545} 542}
546 543
547/* Called from queue_message, by the slot handler and application threads, 544/* Called from queue_message, by the slot handler and application threads,
548** with slot_mutex held */ 545** with slot_mutex held */
549static VCHIQ_HEADER_T * 546static VCHIQ_HEADER_T *
550reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking) 547reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
551{ 548{
552 VCHIQ_SHARED_STATE_T *local = state->local; 549 VCHIQ_SHARED_STATE_T *local = state->local;
553 int tx_pos = state->local_tx_pos; 550 int tx_pos = state->local_tx_pos;
@@ -626,8 +623,8 @@ process_free_queue(VCHIQ_STATE_T *state)
626 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); 623 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
627 int data_found = 0; 624 int data_found = 0;
628 625
629 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x", 626 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
630 state->id, slot_index, (unsigned int)data, 627 state->id, slot_index, data,
631 local->slot_queue_recycle, slot_queue_available); 628 local->slot_queue_recycle, slot_queue_available);
632 629
633 /* Initialise the bitmask for services which have used this 630 /* Initialise the bitmask for services which have used this
@@ -659,16 +656,10 @@ process_free_queue(VCHIQ_STATE_T *state)
659 up(&service_quota->quota_event); 656 up(&service_quota->quota_event);
660 else if (count == 0) { 657 else if (count == 0) {
661 vchiq_log_error(vchiq_core_log_level, 658 vchiq_log_error(vchiq_core_log_level,
662 "service %d " 659 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
663 "message_use_count=%d "
664 "(header %x, msgid %x, "
665 "header->msgid %x, "
666 "header->size %x)",
667 port, 660 port,
668 service_quota-> 661 service_quota->message_use_count,
669 message_use_count, 662 header, msgid, header->msgid,
670 (unsigned int)header, msgid,
671 header->msgid,
672 header->size); 663 header->size);
673 WARN(1, "invalid message use count\n"); 664 WARN(1, "invalid message use count\n");
674 } 665 }
@@ -690,26 +681,16 @@ process_free_queue(VCHIQ_STATE_T *state)
690 up(&service_quota->quota_event); 681 up(&service_quota->quota_event);
691 vchiq_log_trace( 682 vchiq_log_trace(
692 vchiq_core_log_level, 683 vchiq_core_log_level,
693 "%d: pfq:%d %x@%x - " 684 "%d: pfq:%d %x@%pK - slot_use->%d",
694 "slot_use->%d",
695 state->id, port, 685 state->id, port,
696 header->size, 686 header->size, header,
697 (unsigned int)header,
698 count - 1); 687 count - 1);
699 } else { 688 } else {
700 vchiq_log_error( 689 vchiq_log_error(
701 vchiq_core_log_level, 690 vchiq_core_log_level,
702 "service %d " 691 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
703 "slot_use_count" 692 port, count, header,
704 "=%d (header %x" 693 msgid, header->msgid,
705 ", msgid %x, "
706 "header->msgid"
707 " %x, header->"
708 "size %x)",
709 port, count,
710 (unsigned int)header,
711 msgid,
712 header->msgid,
713 header->size); 694 header->size);
714 WARN(1, "bad slot use count\n"); 695 WARN(1, "bad slot use count\n");
715 } 696 }
@@ -721,10 +702,9 @@ process_free_queue(VCHIQ_STATE_T *state)
721 pos += calc_stride(header->size); 702 pos += calc_stride(header->size);
722 if (pos > VCHIQ_SLOT_SIZE) { 703 if (pos > VCHIQ_SLOT_SIZE) {
723 vchiq_log_error(vchiq_core_log_level, 704 vchiq_log_error(vchiq_core_log_level,
724 "pfq - pos %x: header %x, msgid %x, " 705 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
725 "header->msgid %x, header->size %x", 706 pos, header, msgid, header->msgid,
726 pos, (unsigned int)header, msgid, 707 header->size);
727 header->msgid, header->size);
728 WARN(1, "invalid slot position\n"); 708 WARN(1, "invalid slot position\n");
729 } 709 }
730 } 710 }
@@ -746,18 +726,66 @@ process_free_queue(VCHIQ_STATE_T *state)
746 } 726 }
747} 727}
748 728
729static ssize_t
730memcpy_copy_callback(
731 void *context, void *dest,
732 size_t offset, size_t maxsize)
733{
734 void *src = context;
735
736 memcpy(dest + offset, src + offset, maxsize);
737 return maxsize;
738}
739
740static ssize_t
741copy_message_data(
742 ssize_t (*copy_callback)(void *context, void *dest,
743 size_t offset, size_t maxsize),
744 void *context,
745 void *dest,
746 size_t size)
747{
748 size_t pos = 0;
749
750 while (pos < size) {
751 ssize_t callback_result;
752 size_t max_bytes = size - pos;
753
754 callback_result =
755 copy_callback(context, dest + pos,
756 pos, max_bytes);
757
758 if (callback_result < 0)
759 return callback_result;
760
761 if (!callback_result)
762 return -EIO;
763
764 if (callback_result > max_bytes)
765 return -EIO;
766
767 pos += callback_result;
768 }
769
770 return size;
771}
772
749/* Called by the slot handler and application threads */ 773/* Called by the slot handler and application threads */
750static VCHIQ_STATUS_T 774static VCHIQ_STATUS_T
751queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, 775queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
752 int msgid, const VCHIQ_ELEMENT_T *elements, 776 int msgid,
753 int count, int size, int flags) 777 ssize_t (*copy_callback)(void *context, void *dest,
778 size_t offset, size_t maxsize),
779 void *context,
780 size_t size,
781 int flags)
754{ 782{
755 VCHIQ_SHARED_STATE_T *local; 783 VCHIQ_SHARED_STATE_T *local;
756 VCHIQ_SERVICE_QUOTA_T *service_quota = NULL; 784 VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
757 VCHIQ_HEADER_T *header; 785 VCHIQ_HEADER_T *header;
758 int type = VCHIQ_MSG_TYPE(msgid); 786 int type = VCHIQ_MSG_TYPE(msgid);
759 787
760 unsigned int stride; 788 size_t stride;
761 789
762 local = state->local; 790 local = state->local;
763 791
@@ -766,7 +794,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
766 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE)); 794 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
767 795
768 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && 796 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
769 (mutex_lock_interruptible(&state->slot_mutex) != 0)) 797 (mutex_lock_killable(&state->slot_mutex) != 0))
770 return VCHIQ_RETRY; 798 return VCHIQ_RETRY;
771 799
772 if (type == VCHIQ_MSG_DATA) { 800 if (type == VCHIQ_MSG_DATA) {
@@ -822,7 +850,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
822 service_quota->slot_quota))) { 850 service_quota->slot_quota))) {
823 spin_unlock(&quota_spinlock); 851 spin_unlock(&quota_spinlock);
824 vchiq_log_trace(vchiq_core_log_level, 852 vchiq_log_trace(vchiq_core_log_level,
825 "%d: qm:%d %s,%x - quota stall " 853 "%d: qm:%d %s,%zx - quota stall "
826 "(msg %d, slot %d)", 854 "(msg %d, slot %d)",
827 state->id, service->localport, 855 state->id, service->localport,
828 msg_type_str(type), size, 856 msg_type_str(type), size,
@@ -835,7 +863,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
835 return VCHIQ_RETRY; 863 return VCHIQ_RETRY;
836 if (service->closing) 864 if (service->closing)
837 return VCHIQ_ERROR; 865 return VCHIQ_ERROR;
838 if (mutex_lock_interruptible(&state->slot_mutex) != 0) 866 if (mutex_lock_killable(&state->slot_mutex) != 0)
839 return VCHIQ_RETRY; 867 return VCHIQ_RETRY;
840 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { 868 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
841 /* The service has been closed */ 869 /* The service has been closed */
@@ -863,43 +891,37 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
863 } 891 }
864 892
865 if (type == VCHIQ_MSG_DATA) { 893 if (type == VCHIQ_MSG_DATA) {
866 int i, pos; 894 ssize_t callback_result;
867 int tx_end_index; 895 int tx_end_index;
868 int slot_use_count; 896 int slot_use_count;
869 897
870 vchiq_log_info(vchiq_core_log_level, 898 vchiq_log_info(vchiq_core_log_level,
871 "%d: qm %s@%x,%x (%d->%d)", 899 "%d: qm %s@%pK,%zx (%d->%d)",
872 state->id, 900 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
873 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 901 header, size, VCHIQ_MSG_SRCPORT(msgid),
874 (unsigned int)header, size,
875 VCHIQ_MSG_SRCPORT(msgid),
876 VCHIQ_MSG_DSTPORT(msgid)); 902 VCHIQ_MSG_DSTPORT(msgid));
877 903
878 BUG_ON(!service); 904 BUG_ON(!service);
879 BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK | 905 BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
880 QMFLAGS_NO_MUTEX_UNLOCK)) != 0); 906 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
881 907
882 for (i = 0, pos = 0; i < (unsigned int)count; 908 callback_result =
883 pos += elements[i++].size) 909 copy_message_data(copy_callback, context,
884 if (elements[i].size) { 910 header->data, size);
885 if (vchiq_copy_from_user 911
886 (header->data + pos, elements[i].data, 912 if (callback_result < 0) {
887 (size_t) elements[i].size) != 913 mutex_unlock(&state->slot_mutex);
888 VCHIQ_SUCCESS) { 914 VCHIQ_SERVICE_STATS_INC(service,
889 mutex_unlock(&state->slot_mutex);
890 VCHIQ_SERVICE_STATS_INC(service,
891 error_count); 915 error_count);
892 return VCHIQ_ERROR; 916 return VCHIQ_ERROR;
893 } 917 }
894 if (i == 0) { 918
895 if (SRVTRACE_ENABLED(service, 919 if (SRVTRACE_ENABLED(service,
896 VCHIQ_LOG_INFO)) 920 VCHIQ_LOG_INFO))
897 vchiq_log_dump_mem("Sent", 0, 921 vchiq_log_dump_mem("Sent", 0,
898 header->data + pos, 922 header->data,
899 min(64u, 923 min((size_t)64,
900 elements[0].size)); 924 (size_t)callback_result));
901 }
902 }
903 925
904 spin_lock(&quota_spinlock); 926 spin_lock(&quota_spinlock);
905 service_quota->message_use_count++; 927 service_quota->message_use_count++;
@@ -927,7 +949,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
927 949
928 if (slot_use_count) 950 if (slot_use_count)
929 vchiq_log_trace(vchiq_core_log_level, 951 vchiq_log_trace(vchiq_core_log_level,
930 "%d: qm:%d %s,%x - slot_use->%d (hdr %p)", 952 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
931 state->id, service->localport, 953 state->id, service->localport,
932 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size, 954 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
933 slot_use_count, header); 955 slot_use_count, header);
@@ -936,15 +958,22 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
936 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 958 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
937 } else { 959 } else {
938 vchiq_log_info(vchiq_core_log_level, 960 vchiq_log_info(vchiq_core_log_level,
939 "%d: qm %s@%x,%x (%d->%d)", state->id, 961 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
940 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 962 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
941 (unsigned int)header, size, 963 header, size, VCHIQ_MSG_SRCPORT(msgid),
942 VCHIQ_MSG_SRCPORT(msgid),
943 VCHIQ_MSG_DSTPORT(msgid)); 964 VCHIQ_MSG_DSTPORT(msgid));
944 if (size != 0) { 965 if (size != 0) {
945 WARN_ON(!((count == 1) && (size == elements[0].size))); 966 /* It is assumed for now that this code path
946 memcpy(header->data, elements[0].data, 967 * only happens from calls inside this file.
947 elements[0].size); 968 *
969 * External callers are through the vchiq_queue_message
970 * path which always sets the type to be VCHIQ_MSG_DATA
971 *
972 * At first glance this appears to be correct but
973 * more review is needed.
974 */
975 copy_message_data(copy_callback, context,
976 header->data, size);
948 } 977 }
949 VCHIQ_STATS_INC(state, ctrl_tx_count); 978 VCHIQ_STATS_INC(state, ctrl_tx_count);
950 } 979 }
@@ -960,7 +989,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
960 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 989 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
961 990
962 vchiq_log_info(SRVTRACE_LEVEL(service), 991 vchiq_log_info(SRVTRACE_LEVEL(service),
963 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d", 992 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
964 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 993 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
965 VCHIQ_MSG_TYPE(msgid), 994 VCHIQ_MSG_TYPE(msgid),
966 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 995 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
@@ -990,19 +1019,24 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
990/* Called by the slot handler and application threads */ 1019/* Called by the slot handler and application threads */
991static VCHIQ_STATUS_T 1020static VCHIQ_STATUS_T
992queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, 1021queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
993 int msgid, const VCHIQ_ELEMENT_T *elements, 1022 int msgid,
994 int count, int size, int is_blocking) 1023 ssize_t (*copy_callback)(void *context, void *dest,
1024 size_t offset, size_t maxsize),
1025 void *context,
1026 int size,
1027 int is_blocking)
995{ 1028{
996 VCHIQ_SHARED_STATE_T *local; 1029 VCHIQ_SHARED_STATE_T *local;
997 VCHIQ_HEADER_T *header; 1030 VCHIQ_HEADER_T *header;
1031 ssize_t callback_result;
998 1032
999 local = state->local; 1033 local = state->local;
1000 1034
1001 if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) && 1035 if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
1002 (mutex_lock_interruptible(&state->sync_mutex) != 0)) 1036 (mutex_lock_killable(&state->sync_mutex) != 0))
1003 return VCHIQ_RETRY; 1037 return VCHIQ_RETRY;
1004 1038
1005 remote_event_wait(&local->sync_release); 1039 remote_event_wait(state, &local->sync_release);
1006 1040
1007 rmb(); 1041 rmb();
1008 1042
@@ -1017,52 +1051,34 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
1017 state->id, oldmsgid); 1051 state->id, oldmsgid);
1018 } 1052 }
1019 1053
1020 if (service) { 1054 vchiq_log_info(vchiq_sync_log_level,
1021 int i, pos; 1055 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1056 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1057 header, size, VCHIQ_MSG_SRCPORT(msgid),
1058 VCHIQ_MSG_DSTPORT(msgid));
1022 1059
1023 vchiq_log_info(vchiq_sync_log_level, 1060 callback_result =
1024 "%d: qms %s@%x,%x (%d->%d)", state->id, 1061 copy_message_data(copy_callback, context,
1025 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1062 header->data, size);
1026 (unsigned int)header, size,
1027 VCHIQ_MSG_SRCPORT(msgid),
1028 VCHIQ_MSG_DSTPORT(msgid));
1029 1063
1030 for (i = 0, pos = 0; i < (unsigned int)count; 1064 if (callback_result < 0) {
1031 pos += elements[i++].size) 1065 mutex_unlock(&state->slot_mutex);
1032 if (elements[i].size) { 1066 VCHIQ_SERVICE_STATS_INC(service,
1033 if (vchiq_copy_from_user 1067 error_count);
1034 (header->data + pos, elements[i].data, 1068 return VCHIQ_ERROR;
1035 (size_t) elements[i].size) != 1069 }
1036 VCHIQ_SUCCESS) { 1070
1037 mutex_unlock(&state->sync_mutex); 1071 if (service) {
1038 VCHIQ_SERVICE_STATS_INC(service, 1072 if (SRVTRACE_ENABLED(service,
1039 error_count); 1073 VCHIQ_LOG_INFO))
1040 return VCHIQ_ERROR; 1074 vchiq_log_dump_mem("Sent", 0,
1041 } 1075 header->data,
1042 if (i == 0) { 1076 min((size_t)64,
1043 if (vchiq_sync_log_level >= 1077 (size_t)callback_result));
1044 VCHIQ_LOG_TRACE)
1045 vchiq_log_dump_mem("Sent Sync",
1046 0, header->data + pos,
1047 min(64u,
1048 elements[0].size));
1049 }
1050 }
1051 1078
1052 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); 1079 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1053 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 1080 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1054 } else { 1081 } else {
1055 vchiq_log_info(vchiq_sync_log_level,
1056 "%d: qms %s@%x,%x (%d->%d)", state->id,
1057 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1058 (unsigned int)header, size,
1059 VCHIQ_MSG_SRCPORT(msgid),
1060 VCHIQ_MSG_DSTPORT(msgid));
1061 if (size != 0) {
1062 WARN_ON(!((count == 1) && (size == elements[0].size)));
1063 memcpy(header->data, elements[0].data,
1064 elements[0].size);
1065 }
1066 VCHIQ_STATS_INC(state, ctrl_tx_count); 1082 VCHIQ_STATS_INC(state, ctrl_tx_count);
1067 } 1083 }
1068 1084
@@ -1175,11 +1191,16 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
1175 VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE; 1191 VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
1176 int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport, 1192 int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
1177 service->remoteport); 1193 service->remoteport);
1178 VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
1179 /* Only reply to non-dummy bulk requests */ 1194 /* Only reply to non-dummy bulk requests */
1180 if (bulk->remote_data) { 1195 if (bulk->remote_data) {
1181 status = queue_message(service->state, NULL, 1196 status = queue_message(
1182 msgid, &element, 1, 4, 0); 1197 service->state,
1198 NULL,
1199 msgid,
1200 memcpy_copy_callback,
1201 &bulk->actual,
1202 4,
1203 0);
1183 if (status != VCHIQ_SUCCESS) 1204 if (status != VCHIQ_SUCCESS)
1184 break; 1205 break;
1185 } 1206 }
@@ -1344,7 +1365,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
1344 WARN_ON(!((int)(queue->local_insert - queue->process) > 0)); 1365 WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
1345 WARN_ON(!((int)(queue->remote_insert - queue->process) > 0)); 1366 WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
1346 1367
1347 rc = mutex_lock_interruptible(&state->bulk_transfer_mutex); 1368 rc = mutex_lock_killable(&state->bulk_transfer_mutex);
1348 if (rc != 0) 1369 if (rc != 0)
1349 break; 1370 break;
1350 1371
@@ -1356,26 +1377,22 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
1356 "Send Bulk to" : "Recv Bulk from"; 1377 "Send Bulk to" : "Recv Bulk from";
1357 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) 1378 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
1358 vchiq_log_info(SRVTRACE_LEVEL(service), 1379 vchiq_log_info(SRVTRACE_LEVEL(service),
1359 "%s %c%c%c%c d:%d len:%d %x<->%x", 1380 "%s %c%c%c%c d:%d len:%d %pK<->%pK",
1360 header, 1381 header,
1361 VCHIQ_FOURCC_AS_4CHARS( 1382 VCHIQ_FOURCC_AS_4CHARS(
1362 service->base.fourcc), 1383 service->base.fourcc),
1363 service->remoteport, 1384 service->remoteport, bulk->size,
1364 bulk->size, 1385 bulk->data, bulk->remote_data);
1365 (unsigned int)bulk->data,
1366 (unsigned int)bulk->remote_data);
1367 else 1386 else
1368 vchiq_log_info(SRVTRACE_LEVEL(service), 1387 vchiq_log_info(SRVTRACE_LEVEL(service),
1369 "%s %c%c%c%c d:%d ABORTED - tx len:%d," 1388 "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
1370 " rx len:%d %x<->%x", 1389 " rx len:%d %pK<->%pK",
1371 header, 1390 header,
1372 VCHIQ_FOURCC_AS_4CHARS( 1391 VCHIQ_FOURCC_AS_4CHARS(
1373 service->base.fourcc), 1392 service->base.fourcc),
1374 service->remoteport, 1393 service->remoteport,
1375 bulk->size, 1394 bulk->size, bulk->remote_size,
1376 bulk->remote_size, 1395 bulk->data, bulk->remote_data);
1377 (unsigned int)bulk->data,
1378 (unsigned int)bulk->remote_data);
1379 } 1396 }
1380 1397
1381 vchiq_complete_bulk(bulk); 1398 vchiq_complete_bulk(bulk);
@@ -1511,9 +1528,8 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
1511 1528
1512 fourcc = payload->fourcc; 1529 fourcc = payload->fourcc;
1513 vchiq_log_info(vchiq_core_log_level, 1530 vchiq_log_info(vchiq_core_log_level,
1514 "%d: prs OPEN@%x (%d->'%c%c%c%c')", 1531 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1515 state->id, (unsigned int)header, 1532 state->id, header, localport,
1516 localport,
1517 VCHIQ_FOURCC_AS_4CHARS(fourcc)); 1533 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1518 1534
1519 service = get_listening_service(state, fourcc); 1535 service = get_listening_service(state, fourcc);
@@ -1544,10 +1560,6 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
1544 struct vchiq_openack_payload ack_payload = { 1560 struct vchiq_openack_payload ack_payload = {
1545 service->version 1561 service->version
1546 }; 1562 };
1547 VCHIQ_ELEMENT_T body = {
1548 &ack_payload,
1549 sizeof(ack_payload)
1550 };
1551 1563
1552 if (state->version_common < 1564 if (state->version_common <
1553 VCHIQ_VERSION_SYNCHRONOUS_MODE) 1565 VCHIQ_VERSION_SYNCHRONOUS_MODE)
@@ -1557,21 +1569,28 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
1557 if (service->sync && 1569 if (service->sync &&
1558 (state->version_common >= 1570 (state->version_common >=
1559 VCHIQ_VERSION_SYNCHRONOUS_MODE)) { 1571 VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
1560 if (queue_message_sync(state, NULL, 1572 if (queue_message_sync(
1573 state,
1574 NULL,
1561 VCHIQ_MAKE_MSG( 1575 VCHIQ_MAKE_MSG(
1562 VCHIQ_MSG_OPENACK, 1576 VCHIQ_MSG_OPENACK,
1563 service->localport, 1577 service->localport,
1564 remoteport), 1578 remoteport),
1565 &body, 1, sizeof(ack_payload), 1579 memcpy_copy_callback,
1580 &ack_payload,
1581 sizeof(ack_payload),
1566 0) == VCHIQ_RETRY) 1582 0) == VCHIQ_RETRY)
1567 goto bail_not_ready; 1583 goto bail_not_ready;
1568 } else { 1584 } else {
1569 if (queue_message(state, NULL, 1585 if (queue_message(state,
1570 VCHIQ_MAKE_MSG( 1586 NULL,
1587 VCHIQ_MAKE_MSG(
1571 VCHIQ_MSG_OPENACK, 1588 VCHIQ_MSG_OPENACK,
1572 service->localport, 1589 service->localport,
1573 remoteport), 1590 remoteport),
1574 &body, 1, sizeof(ack_payload), 1591 memcpy_copy_callback,
1592 &ack_payload,
1593 sizeof(ack_payload),
1575 0) == VCHIQ_RETRY) 1594 0) == VCHIQ_RETRY)
1576 goto bail_not_ready; 1595 goto bail_not_ready;
1577 } 1596 }
@@ -1650,7 +1669,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1650 1669
1651 header = (VCHIQ_HEADER_T *)(state->rx_data + 1670 header = (VCHIQ_HEADER_T *)(state->rx_data +
1652 (state->rx_pos & VCHIQ_SLOT_MASK)); 1671 (state->rx_pos & VCHIQ_SLOT_MASK));
1653 DEBUG_VALUE(PARSE_HEADER, (int)header); 1672 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1654 msgid = header->msgid; 1673 msgid = header->msgid;
1655 DEBUG_VALUE(PARSE_MSGID, msgid); 1674 DEBUG_VALUE(PARSE_MSGID, msgid);
1656 size = header->size; 1675 size = header->size;
@@ -1684,21 +1703,18 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1684 remoteport); 1703 remoteport);
1685 if (service) 1704 if (service)
1686 vchiq_log_warning(vchiq_core_log_level, 1705 vchiq_log_warning(vchiq_core_log_level,
1687 "%d: prs %s@%x (%d->%d) - " 1706 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1688 "found connected service %d",
1689 state->id, msg_type_str(type), 1707 state->id, msg_type_str(type),
1690 (unsigned int)header, 1708 header, remoteport, localport,
1691 remoteport, localport,
1692 service->localport); 1709 service->localport);
1693 } 1710 }
1694 1711
1695 if (!service) { 1712 if (!service) {
1696 vchiq_log_error(vchiq_core_log_level, 1713 vchiq_log_error(vchiq_core_log_level,
1697 "%d: prs %s@%x (%d->%d) - " 1714 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1698 "invalid/closed service %d",
1699 state->id, msg_type_str(type), 1715 state->id, msg_type_str(type),
1700 (unsigned int)header, 1716 header, remoteport, localport,
1701 remoteport, localport, localport); 1717 localport);
1702 goto skip_message; 1718 goto skip_message;
1703 } 1719 }
1704 break; 1720 break;
@@ -1723,12 +1739,11 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1723 min(64, size)); 1739 min(64, size));
1724 } 1740 }
1725 1741
1726 if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size) 1742 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1727 > VCHIQ_SLOT_SIZE) { 1743 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1728 vchiq_log_error(vchiq_core_log_level, 1744 vchiq_log_error(vchiq_core_log_level,
1729 "header %x (msgid %x) - size %x too big for " 1745 "header %pK (msgid %x) - size %x too big for slot",
1730 "slot", 1746 header, (unsigned int)msgid,
1731 (unsigned int)header, (unsigned int)msgid,
1732 (unsigned int)size); 1747 (unsigned int)size);
1733 WARN(1, "oversized for slot\n"); 1748 WARN(1, "oversized for slot\n");
1734 } 1749 }
@@ -1747,9 +1762,9 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1747 service->peer_version = payload->version; 1762 service->peer_version = payload->version;
1748 } 1763 }
1749 vchiq_log_info(vchiq_core_log_level, 1764 vchiq_log_info(vchiq_core_log_level,
1750 "%d: prs OPENACK@%x,%x (%d->%d) v:%d", 1765 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1751 state->id, (unsigned int)header, size, 1766 state->id, header, size, remoteport, localport,
1752 remoteport, localport, service->peer_version); 1767 service->peer_version);
1753 if (service->srvstate == 1768 if (service->srvstate ==
1754 VCHIQ_SRVSTATE_OPENING) { 1769 VCHIQ_SRVSTATE_OPENING) {
1755 service->remoteport = remoteport; 1770 service->remoteport = remoteport;
@@ -1765,9 +1780,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1765 WARN_ON(size != 0); /* There should be no data */ 1780 WARN_ON(size != 0); /* There should be no data */
1766 1781
1767 vchiq_log_info(vchiq_core_log_level, 1782 vchiq_log_info(vchiq_core_log_level,
1768 "%d: prs CLOSE@%x (%d->%d)", 1783 "%d: prs CLOSE@%pK (%d->%d)",
1769 state->id, (unsigned int)header, 1784 state->id, header, remoteport, localport);
1770 remoteport, localport);
1771 1785
1772 mark_service_closing_internal(service, 1); 1786 mark_service_closing_internal(service, 1);
1773 1787
@@ -1783,9 +1797,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1783 break; 1797 break;
1784 case VCHIQ_MSG_DATA: 1798 case VCHIQ_MSG_DATA:
1785 vchiq_log_info(vchiq_core_log_level, 1799 vchiq_log_info(vchiq_core_log_level,
1786 "%d: prs DATA@%x,%x (%d->%d)", 1800 "%d: prs DATA@%pK,%x (%d->%d)",
1787 state->id, (unsigned int)header, size, 1801 state->id, header, size, remoteport, localport);
1788 remoteport, localport);
1789 1802
1790 if ((service->remoteport == remoteport) 1803 if ((service->remoteport == remoteport)
1791 && (service->srvstate == 1804 && (service->srvstate ==
@@ -1808,8 +1821,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1808 break; 1821 break;
1809 case VCHIQ_MSG_CONNECT: 1822 case VCHIQ_MSG_CONNECT:
1810 vchiq_log_info(vchiq_core_log_level, 1823 vchiq_log_info(vchiq_core_log_level,
1811 "%d: prs CONNECT@%x", 1824 "%d: prs CONNECT@%pK", state->id, header);
1812 state->id, (unsigned int)header);
1813 state->version_common = ((VCHIQ_SLOT_ZERO_T *) 1825 state->version_common = ((VCHIQ_SLOT_ZERO_T *)
1814 state->slot_data)->version; 1826 state->slot_data)->version;
1815 up(&state->connect); 1827 up(&state->connect);
@@ -1827,7 +1839,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1827 int resolved = 0; 1839 int resolved = 0;
1828 1840
1829 DEBUG_TRACE(PARSE_LINE); 1841 DEBUG_TRACE(PARSE_LINE);
1830 if (mutex_lock_interruptible( 1842 if (mutex_lock_killable(
1831 &service->bulk_mutex) != 0) { 1843 &service->bulk_mutex) != 0) {
1832 DEBUG_TRACE(PARSE_LINE); 1844 DEBUG_TRACE(PARSE_LINE);
1833 goto bail_not_ready; 1845 goto bail_not_ready;
@@ -1838,17 +1850,15 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1838 bulk = &queue->bulks[ 1850 bulk = &queue->bulks[
1839 BULK_INDEX(queue->remote_insert)]; 1851 BULK_INDEX(queue->remote_insert)];
1840 bulk->remote_data = 1852 bulk->remote_data =
1841 (void *)((int *)header->data)[0]; 1853 (void *)(long)((int *)header->data)[0];
1842 bulk->remote_size = ((int *)header->data)[1]; 1854 bulk->remote_size = ((int *)header->data)[1];
1843 wmb(); 1855 wmb();
1844 1856
1845 vchiq_log_info(vchiq_core_log_level, 1857 vchiq_log_info(vchiq_core_log_level,
1846 "%d: prs %s@%x (%d->%d) %x@%x", 1858 "%d: prs %s@%pK (%d->%d) %x@%pK",
1847 state->id, msg_type_str(type), 1859 state->id, msg_type_str(type),
1848 (unsigned int)header, 1860 header, remoteport, localport,
1849 remoteport, localport, 1861 bulk->remote_size, bulk->remote_data);
1850 bulk->remote_size,
1851 (unsigned int)bulk->remote_data);
1852 1862
1853 queue->remote_insert++; 1863 queue->remote_insert++;
1854 1864
@@ -1893,7 +1903,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1893 &service->bulk_rx : &service->bulk_tx; 1903 &service->bulk_rx : &service->bulk_tx;
1894 1904
1895 DEBUG_TRACE(PARSE_LINE); 1905 DEBUG_TRACE(PARSE_LINE);
1896 if (mutex_lock_interruptible( 1906 if (mutex_lock_killable(
1897 &service->bulk_mutex) != 0) { 1907 &service->bulk_mutex) != 0) {
1898 DEBUG_TRACE(PARSE_LINE); 1908 DEBUG_TRACE(PARSE_LINE);
1899 goto bail_not_ready; 1909 goto bail_not_ready;
@@ -1901,11 +1911,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1901 if ((int)(queue->remote_insert - 1911 if ((int)(queue->remote_insert -
1902 queue->local_insert) >= 0) { 1912 queue->local_insert) >= 0) {
1903 vchiq_log_error(vchiq_core_log_level, 1913 vchiq_log_error(vchiq_core_log_level,
1904 "%d: prs %s@%x (%d->%d) " 1914 "%d: prs %s@%pK (%d->%d) "
1905 "unexpected (ri=%d,li=%d)", 1915 "unexpected (ri=%d,li=%d)",
1906 state->id, msg_type_str(type), 1916 state->id, msg_type_str(type),
1907 (unsigned int)header, 1917 header, remoteport, localport,
1908 remoteport, localport,
1909 queue->remote_insert, 1918 queue->remote_insert,
1910 queue->local_insert); 1919 queue->local_insert);
1911 mutex_unlock(&service->bulk_mutex); 1920 mutex_unlock(&service->bulk_mutex);
@@ -1921,11 +1930,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1921 queue->remote_insert++; 1930 queue->remote_insert++;
1922 1931
1923 vchiq_log_info(vchiq_core_log_level, 1932 vchiq_log_info(vchiq_core_log_level,
1924 "%d: prs %s@%x (%d->%d) %x@%x", 1933 "%d: prs %s@%pK (%d->%d) %x@%pK",
1925 state->id, msg_type_str(type), 1934 state->id, msg_type_str(type),
1926 (unsigned int)header, 1935 header, remoteport, localport,
1927 remoteport, localport, 1936 bulk->actual, bulk->data);
1928 bulk->actual, (unsigned int)bulk->data);
1929 1937
1930 vchiq_log_trace(vchiq_core_log_level, 1938 vchiq_log_trace(vchiq_core_log_level,
1931 "%d: prs:%d %cx li=%x ri=%x p=%x", 1939 "%d: prs:%d %cx li=%x ri=%x p=%x",
@@ -1947,14 +1955,14 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1947 break; 1955 break;
1948 case VCHIQ_MSG_PADDING: 1956 case VCHIQ_MSG_PADDING:
1949 vchiq_log_trace(vchiq_core_log_level, 1957 vchiq_log_trace(vchiq_core_log_level,
1950 "%d: prs PADDING@%x,%x", 1958 "%d: prs PADDING@%pK,%x",
1951 state->id, (unsigned int)header, size); 1959 state->id, header, size);
1952 break; 1960 break;
1953 case VCHIQ_MSG_PAUSE: 1961 case VCHIQ_MSG_PAUSE:
1954 /* If initiated, signal the application thread */ 1962 /* If initiated, signal the application thread */
1955 vchiq_log_trace(vchiq_core_log_level, 1963 vchiq_log_trace(vchiq_core_log_level,
1956 "%d: prs PAUSE@%x,%x", 1964 "%d: prs PAUSE@%pK,%x",
1957 state->id, (unsigned int)header, size); 1965 state->id, header, size);
1958 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { 1966 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1959 vchiq_log_error(vchiq_core_log_level, 1967 vchiq_log_error(vchiq_core_log_level,
1960 "%d: PAUSE received in state PAUSED", 1968 "%d: PAUSE received in state PAUSED",
@@ -1977,8 +1985,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1977 break; 1985 break;
1978 case VCHIQ_MSG_RESUME: 1986 case VCHIQ_MSG_RESUME:
1979 vchiq_log_trace(vchiq_core_log_level, 1987 vchiq_log_trace(vchiq_core_log_level,
1980 "%d: prs RESUME@%x,%x", 1988 "%d: prs RESUME@%pK,%x",
1981 state->id, (unsigned int)header, size); 1989 state->id, header, size);
1982 /* Release the slot mutex */ 1990 /* Release the slot mutex */
1983 mutex_unlock(&state->slot_mutex); 1991 mutex_unlock(&state->slot_mutex);
1984 if (state->is_master) 1992 if (state->is_master)
@@ -1999,8 +2007,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
1999 2007
2000 default: 2008 default:
2001 vchiq_log_error(vchiq_core_log_level, 2009 vchiq_log_error(vchiq_core_log_level,
2002 "%d: prs invalid msgid %x@%x,%x", 2010 "%d: prs invalid msgid %x@%pK,%x",
2003 state->id, msgid, (unsigned int)header, size); 2011 state->id, msgid, header, size);
2004 WARN(1, "invalid message\n"); 2012 WARN(1, "invalid message\n");
2005 break; 2013 break;
2006 } 2014 }
@@ -2039,7 +2047,7 @@ slot_handler_func(void *v)
2039 while (1) { 2047 while (1) {
2040 DEBUG_COUNT(SLOT_HANDLER_COUNT); 2048 DEBUG_COUNT(SLOT_HANDLER_COUNT);
2041 DEBUG_TRACE(SLOT_HANDLER_LINE); 2049 DEBUG_TRACE(SLOT_HANDLER_LINE);
2042 remote_event_wait(&local->trigger); 2050 remote_event_wait(state, &local->trigger);
2043 2051
2044 rmb(); 2052 rmb();
2045 2053
@@ -2128,7 +2136,7 @@ recycle_func(void *v)
2128 VCHIQ_SHARED_STATE_T *local = state->local; 2136 VCHIQ_SHARED_STATE_T *local = state->local;
2129 2137
2130 while (1) { 2138 while (1) {
2131 remote_event_wait(&local->recycle); 2139 remote_event_wait(state, &local->recycle);
2132 2140
2133 process_free_queue(state); 2141 process_free_queue(state);
2134 } 2142 }
@@ -2151,7 +2159,7 @@ sync_func(void *v)
2151 int type; 2159 int type;
2152 unsigned int localport, remoteport; 2160 unsigned int localport, remoteport;
2153 2161
2154 remote_event_wait(&local->sync_trigger); 2162 remote_event_wait(state, &local->sync_trigger);
2155 2163
2156 rmb(); 2164 rmb();
2157 2165
@@ -2165,11 +2173,9 @@ sync_func(void *v)
2165 2173
2166 if (!service) { 2174 if (!service) {
2167 vchiq_log_error(vchiq_sync_log_level, 2175 vchiq_log_error(vchiq_sync_log_level,
2168 "%d: sf %s@%x (%d->%d) - " 2176 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2169 "invalid/closed service %d",
2170 state->id, msg_type_str(type), 2177 state->id, msg_type_str(type),
2171 (unsigned int)header, 2178 header, remoteport, localport, localport);
2172 remoteport, localport, localport);
2173 release_message_sync(state, header); 2179 release_message_sync(state, header);
2174 continue; 2180 continue;
2175 } 2181 }
@@ -2199,9 +2205,9 @@ sync_func(void *v)
2199 service->peer_version = payload->version; 2205 service->peer_version = payload->version;
2200 } 2206 }
2201 vchiq_log_info(vchiq_sync_log_level, 2207 vchiq_log_info(vchiq_sync_log_level,
2202 "%d: sf OPENACK@%x,%x (%d->%d) v:%d", 2208 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2203 state->id, (unsigned int)header, size, 2209 state->id, header, size, remoteport, localport,
2204 remoteport, localport, service->peer_version); 2210 service->peer_version);
2205 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { 2211 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2206 service->remoteport = remoteport; 2212 service->remoteport = remoteport;
2207 vchiq_set_service_state(service, 2213 vchiq_set_service_state(service,
@@ -2214,9 +2220,8 @@ sync_func(void *v)
2214 2220
2215 case VCHIQ_MSG_DATA: 2221 case VCHIQ_MSG_DATA:
2216 vchiq_log_trace(vchiq_sync_log_level, 2222 vchiq_log_trace(vchiq_sync_log_level,
2217 "%d: sf DATA@%x,%x (%d->%d)", 2223 "%d: sf DATA@%pK,%x (%d->%d)",
2218 state->id, (unsigned int)header, size, 2224 state->id, header, size, remoteport, localport);
2219 remoteport, localport);
2220 2225
2221 if ((service->remoteport == remoteport) && 2226 if ((service->remoteport == remoteport) &&
2222 (service->srvstate == 2227 (service->srvstate ==
@@ -2234,8 +2239,8 @@ sync_func(void *v)
2234 2239
2235 default: 2240 default:
2236 vchiq_log_error(vchiq_sync_log_level, 2241 vchiq_log_error(vchiq_sync_log_level,
2237 "%d: sf unexpected msgid %x@%x,%x", 2242 "%d: sf unexpected msgid %x@%pK,%x",
2238 state->id, msgid, (unsigned int)header, size); 2243 state->id, msgid, header, size);
2239 release_message_sync(state, header); 2244 release_message_sync(state, header);
2240 break; 2245 break;
2241 } 2246 }
@@ -2268,7 +2273,8 @@ get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
2268VCHIQ_SLOT_ZERO_T * 2273VCHIQ_SLOT_ZERO_T *
2269vchiq_init_slots(void *mem_base, int mem_size) 2274vchiq_init_slots(void *mem_base, int mem_size)
2270{ 2275{
2271 int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK; 2276 int mem_align =
2277 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2272 VCHIQ_SLOT_ZERO_T *slot_zero = 2278 VCHIQ_SLOT_ZERO_T *slot_zero =
2273 (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align); 2279 (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
2274 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE; 2280 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
@@ -2316,16 +2322,16 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2316 int i; 2322 int i;
2317 2323
2318 vchiq_log_warning(vchiq_core_log_level, 2324 vchiq_log_warning(vchiq_core_log_level,
2319 "%s: slot_zero = 0x%08lx, is_master = %d", 2325 "%s: slot_zero = %pK, is_master = %d",
2320 __func__, (unsigned long)slot_zero, is_master); 2326 __func__, slot_zero, is_master);
2321 2327
2322 /* Check the input configuration */ 2328 /* Check the input configuration */
2323 2329
2324 if (slot_zero->magic != VCHIQ_MAGIC) { 2330 if (slot_zero->magic != VCHIQ_MAGIC) {
2325 vchiq_loud_error_header(); 2331 vchiq_loud_error_header();
2326 vchiq_loud_error("Invalid VCHIQ magic value found."); 2332 vchiq_loud_error("Invalid VCHIQ magic value found.");
2327 vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)", 2333 vchiq_loud_error("slot_zero=%pK: magic=%x (expected %x)",
2328 (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC); 2334 slot_zero, slot_zero->magic, VCHIQ_MAGIC);
2329 vchiq_loud_error_footer(); 2335 vchiq_loud_error_footer();
2330 return VCHIQ_ERROR; 2336 return VCHIQ_ERROR;
2331 } 2337 }
@@ -2333,10 +2339,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2333 if (slot_zero->version < VCHIQ_VERSION_MIN) { 2339 if (slot_zero->version < VCHIQ_VERSION_MIN) {
2334 vchiq_loud_error_header(); 2340 vchiq_loud_error_header();
2335 vchiq_loud_error("Incompatible VCHIQ versions found."); 2341 vchiq_loud_error("Incompatible VCHIQ versions found.");
2336 vchiq_loud_error("slot_zero=%x: VideoCore version=%d " 2342 vchiq_loud_error("slot_zero=%pK: VideoCore version=%d (minimum %d)",
2337 "(minimum %d)", 2343 slot_zero, slot_zero->version, VCHIQ_VERSION_MIN);
2338 (unsigned int)slot_zero, slot_zero->version,
2339 VCHIQ_VERSION_MIN);
2340 vchiq_loud_error("Restart with a newer VideoCore image."); 2344 vchiq_loud_error("Restart with a newer VideoCore image.");
2341 vchiq_loud_error_footer(); 2345 vchiq_loud_error_footer();
2342 return VCHIQ_ERROR; 2346 return VCHIQ_ERROR;
@@ -2345,10 +2349,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2345 if (VCHIQ_VERSION < slot_zero->version_min) { 2349 if (VCHIQ_VERSION < slot_zero->version_min) {
2346 vchiq_loud_error_header(); 2350 vchiq_loud_error_header();
2347 vchiq_loud_error("Incompatible VCHIQ versions found."); 2351 vchiq_loud_error("Incompatible VCHIQ versions found.");
2348 vchiq_loud_error("slot_zero=%x: version=%d (VideoCore " 2352 vchiq_loud_error("slot_zero=%pK: version=%d (VideoCore minimum %d)",
2349 "minimum %d)", 2353 slot_zero, VCHIQ_VERSION, slot_zero->version_min);
2350 (unsigned int)slot_zero, VCHIQ_VERSION,
2351 slot_zero->version_min);
2352 vchiq_loud_error("Restart with a newer kernel."); 2354 vchiq_loud_error("Restart with a newer kernel.");
2353 vchiq_loud_error_footer(); 2355 vchiq_loud_error_footer();
2354 return VCHIQ_ERROR; 2356 return VCHIQ_ERROR;
@@ -2360,26 +2362,20 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2360 (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) { 2362 (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
2361 vchiq_loud_error_header(); 2363 vchiq_loud_error_header();
2362 if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) 2364 if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
2363 vchiq_loud_error("slot_zero=%x: slot_zero_size=%x " 2365 vchiq_loud_error("slot_zero=%pK: slot_zero_size=%d (expected %d)",
2364 "(expected %x)", 2366 slot_zero, slot_zero->slot_zero_size,
2365 (unsigned int)slot_zero, 2367 (int)sizeof(VCHIQ_SLOT_ZERO_T));
2366 slot_zero->slot_zero_size,
2367 sizeof(VCHIQ_SLOT_ZERO_T));
2368 if (slot_zero->slot_size != VCHIQ_SLOT_SIZE) 2368 if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
2369 vchiq_loud_error("slot_zero=%x: slot_size=%d " 2369 vchiq_loud_error("slot_zero=%pK: slot_size=%d (expected %d)",
2370 "(expected %d", 2370 slot_zero, slot_zero->slot_size,
2371 (unsigned int)slot_zero, slot_zero->slot_size,
2372 VCHIQ_SLOT_SIZE); 2371 VCHIQ_SLOT_SIZE);
2373 if (slot_zero->max_slots != VCHIQ_MAX_SLOTS) 2372 if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
2374 vchiq_loud_error("slot_zero=%x: max_slots=%d " 2373 vchiq_loud_error("slot_zero=%pK: max_slots=%d (expected %d)",
2375 "(expected %d)", 2374 slot_zero, slot_zero->max_slots,
2376 (unsigned int)slot_zero, slot_zero->max_slots,
2377 VCHIQ_MAX_SLOTS); 2375 VCHIQ_MAX_SLOTS);
2378 if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE) 2376 if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
2379 vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d " 2377 vchiq_loud_error("slot_zero=%pK: max_slots_per_side=%d (expected %d)",
2380 "(expected %d)", 2378 slot_zero, slot_zero->max_slots_per_side,
2381 (unsigned int)slot_zero,
2382 slot_zero->max_slots_per_side,
2383 VCHIQ_MAX_SLOTS_PER_SIDE); 2379 VCHIQ_MAX_SLOTS_PER_SIDE);
2384 vchiq_loud_error_footer(); 2380 vchiq_loud_error_footer();
2385 return VCHIQ_ERROR; 2381 return VCHIQ_ERROR;
@@ -2463,24 +2459,24 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2463 state->data_use_count = 0; 2459 state->data_use_count = 0;
2464 state->data_quota = state->slot_queue_available - 1; 2460 state->data_quota = state->slot_queue_available - 1;
2465 2461
2466 local->trigger.event = &state->trigger_event; 2462 local->trigger.event = offsetof(VCHIQ_STATE_T, trigger_event);
2467 remote_event_create(&local->trigger); 2463 remote_event_create(state, &local->trigger);
2468 local->tx_pos = 0; 2464 local->tx_pos = 0;
2469 2465
2470 local->recycle.event = &state->recycle_event; 2466 local->recycle.event = offsetof(VCHIQ_STATE_T, recycle_event);
2471 remote_event_create(&local->recycle); 2467 remote_event_create(state, &local->recycle);
2472 local->slot_queue_recycle = state->slot_queue_available; 2468 local->slot_queue_recycle = state->slot_queue_available;
2473 2469
2474 local->sync_trigger.event = &state->sync_trigger_event; 2470 local->sync_trigger.event = offsetof(VCHIQ_STATE_T, sync_trigger_event);
2475 remote_event_create(&local->sync_trigger); 2471 remote_event_create(state, &local->sync_trigger);
2476 2472
2477 local->sync_release.event = &state->sync_release_event; 2473 local->sync_release.event = offsetof(VCHIQ_STATE_T, sync_release_event);
2478 remote_event_create(&local->sync_release); 2474 remote_event_create(state, &local->sync_release);
2479 2475
2480 /* At start-of-day, the slot is empty and available */ 2476 /* At start-of-day, the slot is empty and available */
2481 ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid 2477 ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
2482 = VCHIQ_MSGID_PADDING; 2478 = VCHIQ_MSGID_PADDING;
2483 remote_event_signal_local(&local->sync_release); 2479 remote_event_signal_local(state, &local->sync_release);
2484 2480
2485 local->debug[DEBUG_ENTRIES] = DEBUG_MAX; 2481 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2486 2482
@@ -2494,7 +2490,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2494 (void *)state, 2490 (void *)state,
2495 threadname); 2491 threadname);
2496 2492
2497 if (state->slot_handler_thread == NULL) { 2493 if (IS_ERR(state->slot_handler_thread)) {
2498 vchiq_loud_error_header(); 2494 vchiq_loud_error_header();
2499 vchiq_loud_error("couldn't create thread %s", threadname); 2495 vchiq_loud_error("couldn't create thread %s", threadname);
2500 vchiq_loud_error_footer(); 2496 vchiq_loud_error_footer();
@@ -2507,7 +2503,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2507 state->recycle_thread = kthread_create(&recycle_func, 2503 state->recycle_thread = kthread_create(&recycle_func,
2508 (void *)state, 2504 (void *)state,
2509 threadname); 2505 threadname);
2510 if (state->recycle_thread == NULL) { 2506 if (IS_ERR(state->recycle_thread)) {
2511 vchiq_loud_error_header(); 2507 vchiq_loud_error_header();
2512 vchiq_loud_error("couldn't create thread %s", threadname); 2508 vchiq_loud_error("couldn't create thread %s", threadname);
2513 vchiq_loud_error_footer(); 2509 vchiq_loud_error_footer();
@@ -2520,7 +2516,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
2520 state->sync_thread = kthread_create(&sync_func, 2516 state->sync_thread = kthread_create(&sync_func,
2521 (void *)state, 2517 (void *)state,
2522 threadname); 2518 threadname);
2523 if (state->sync_thread == NULL) { 2519 if (IS_ERR(state->sync_thread)) {
2524 vchiq_loud_error_header(); 2520 vchiq_loud_error_header();
2525 vchiq_loud_error("couldn't create thread %s", threadname); 2521 vchiq_loud_error("couldn't create thread %s", threadname);
2526 vchiq_loud_error_footer(); 2522 vchiq_loud_error_footer();
@@ -2684,14 +2680,19 @@ vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
2684 service->version, 2680 service->version,
2685 service->version_min 2681 service->version_min
2686 }; 2682 };
2687 VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
2688 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 2683 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2689 2684
2690 service->client_id = client_id; 2685 service->client_id = client_id;
2691 vchiq_use_service_internal(service); 2686 vchiq_use_service_internal(service);
2692 status = queue_message(service->state, NULL, 2687 status = queue_message(service->state,
2693 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0), 2688 NULL,
2694 &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING); 2689 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2690 service->localport,
2691 0),
2692 memcpy_copy_callback,
2693 &payload,
2694 sizeof(payload),
2695 QMFLAGS_IS_BLOCKING);
2695 if (status == VCHIQ_SUCCESS) { 2696 if (status == VCHIQ_SUCCESS) {
2696 /* Wait for the ACK/NAK */ 2697 /* Wait for the ACK/NAK */
2697 if (down_interruptible(&service->remove_event) != 0) { 2698 if (down_interruptible(&service->remove_event) != 0) {
@@ -2756,20 +2757,16 @@ release_service_messages(VCHIQ_SERVICE_T *service)
2756 if ((port == service->localport) && 2757 if ((port == service->localport) &&
2757 (msgid & VCHIQ_MSGID_CLAIMED)) { 2758 (msgid & VCHIQ_MSGID_CLAIMED)) {
2758 vchiq_log_info(vchiq_core_log_level, 2759 vchiq_log_info(vchiq_core_log_level,
2759 " fsi - hdr %x", 2760 " fsi - hdr %pK", header);
2760 (unsigned int)header);
2761 release_slot(state, slot_info, header, 2761 release_slot(state, slot_info, header,
2762 NULL); 2762 NULL);
2763 } 2763 }
2764 pos += calc_stride(header->size); 2764 pos += calc_stride(header->size);
2765 if (pos > VCHIQ_SLOT_SIZE) { 2765 if (pos > VCHIQ_SLOT_SIZE) {
2766 vchiq_log_error(vchiq_core_log_level, 2766 vchiq_log_error(vchiq_core_log_level,
2767 "fsi - pos %x: header %x, " 2767 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2768 "msgid %x, header->msgid %x, " 2768 pos, header, msgid,
2769 "header->size %x", 2769 header->msgid, header->size);
2770 pos, (unsigned int)header,
2771 msgid, header->msgid,
2772 header->size);
2773 WARN(1, "invalid slot position\n"); 2770 WARN(1, "invalid slot position\n");
2774 } 2771 }
2775 } 2772 }
@@ -2783,7 +2780,7 @@ do_abort_bulks(VCHIQ_SERVICE_T *service)
2783 VCHIQ_STATUS_T status; 2780 VCHIQ_STATUS_T status;
2784 2781
2785 /* Abort any outstanding bulk transfers */ 2782 /* Abort any outstanding bulk transfers */
2786 if (mutex_lock_interruptible(&service->bulk_mutex) != 0) 2783 if (mutex_lock_killable(&service->bulk_mutex) != 0)
2787 return 0; 2784 return 0;
2788 abort_outstanding_bulks(service, &service->bulk_tx); 2785 abort_outstanding_bulks(service, &service->bulk_tx);
2789 abort_outstanding_bulks(service, &service->bulk_rx); 2786 abort_outstanding_bulks(service, &service->bulk_rx);
@@ -3303,7 +3300,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3303 queue = (dir == VCHIQ_BULK_TRANSMIT) ? 3300 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3304 &service->bulk_tx : &service->bulk_rx; 3301 &service->bulk_tx : &service->bulk_rx;
3305 3302
3306 if (mutex_lock_interruptible(&service->bulk_mutex) != 0) { 3303 if (mutex_lock_killable(&service->bulk_mutex) != 0) {
3307 status = VCHIQ_RETRY; 3304 status = VCHIQ_RETRY;
3308 goto error_exit; 3305 goto error_exit;
3309 } 3306 }
@@ -3317,7 +3314,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3317 status = VCHIQ_RETRY; 3314 status = VCHIQ_RETRY;
3318 goto error_exit; 3315 goto error_exit;
3319 } 3316 }
3320 if (mutex_lock_interruptible(&service->bulk_mutex) 3317 if (mutex_lock_killable(&service->bulk_mutex)
3321 != 0) { 3318 != 0) {
3322 status = VCHIQ_RETRY; 3319 status = VCHIQ_RETRY;
3323 goto error_exit; 3320 goto error_exit;
@@ -3341,14 +3338,13 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3341 wmb(); 3338 wmb();
3342 3339
3343 vchiq_log_info(vchiq_core_log_level, 3340 vchiq_log_info(vchiq_core_log_level,
3344 "%d: bt (%d->%d) %cx %x@%x %x", 3341 "%d: bt (%d->%d) %cx %x@%pK %pK",
3345 state->id, 3342 state->id, service->localport, service->remoteport, dir_char,
3346 service->localport, service->remoteport, dir_char, 3343 size, bulk->data, userdata);
3347 size, (unsigned int)bulk->data, (unsigned int)userdata);
3348 3344
3349 /* The slot mutex must be held when the service is being closed, so 3345 /* The slot mutex must be held when the service is being closed, so
3350 claim it here to ensure that isn't happening */ 3346 claim it here to ensure that isn't happening */
3351 if (mutex_lock_interruptible(&state->slot_mutex) != 0) { 3347 if (mutex_lock_killable(&state->slot_mutex) != 0) {
3352 status = VCHIQ_RETRY; 3348 status = VCHIQ_RETRY;
3353 goto cancel_bulk_error_exit; 3349 goto cancel_bulk_error_exit;
3354 } 3350 }
@@ -3363,16 +3359,19 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3363 (dir == VCHIQ_BULK_TRANSMIT) ? 3359 (dir == VCHIQ_BULK_TRANSMIT) ?
3364 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); 3360 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
3365 } else { 3361 } else {
3366 int payload[2] = { (int)bulk->data, bulk->size }; 3362 int payload[2] = { (int)(long)bulk->data, bulk->size };
3367 VCHIQ_ELEMENT_T element = { payload, sizeof(payload) }; 3363
3368 3364 status = queue_message(state,
3369 status = queue_message(state, NULL, 3365 NULL,
3370 VCHIQ_MAKE_MSG(dir_msgtype, 3366 VCHIQ_MAKE_MSG(dir_msgtype,
3371 service->localport, service->remoteport), 3367 service->localport,
3372 &element, 1, sizeof(payload), 3368 service->remoteport),
3373 QMFLAGS_IS_BLOCKING | 3369 memcpy_copy_callback,
3374 QMFLAGS_NO_MUTEX_LOCK | 3370 &payload,
3375 QMFLAGS_NO_MUTEX_UNLOCK); 3371 sizeof(payload),
3372 QMFLAGS_IS_BLOCKING |
3373 QMFLAGS_NO_MUTEX_LOCK |
3374 QMFLAGS_NO_MUTEX_UNLOCK);
3376 if (status != VCHIQ_SUCCESS) { 3375 if (status != VCHIQ_SUCCESS) {
3377 goto unlock_both_error_exit; 3376 goto unlock_both_error_exit;
3378 } 3377 }
@@ -3418,26 +3417,22 @@ error_exit:
3418 3417
3419VCHIQ_STATUS_T 3418VCHIQ_STATUS_T
3420vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle, 3419vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
3421 const VCHIQ_ELEMENT_T *elements, unsigned int count) 3420 ssize_t (*copy_callback)(void *context, void *dest,
3421 size_t offset, size_t maxsize),
3422 void *context,
3423 size_t size)
3422{ 3424{
3423 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 3425 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3424 VCHIQ_STATUS_T status = VCHIQ_ERROR; 3426 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3425 3427
3426 unsigned int size = 0;
3427 unsigned int i;
3428
3429 if (!service || 3428 if (!service ||
3430 (vchiq_check_service(service) != VCHIQ_SUCCESS)) 3429 (vchiq_check_service(service) != VCHIQ_SUCCESS))
3431 goto error_exit; 3430 goto error_exit;
3432 3431
3433 for (i = 0; i < (unsigned int)count; i++) { 3432 if (!size) {
3434 if (elements[i].size) { 3433 VCHIQ_SERVICE_STATS_INC(service, error_count);
3435 if (elements[i].data == NULL) { 3434 goto error_exit;
3436 VCHIQ_SERVICE_STATS_INC(service, error_count); 3435
3437 goto error_exit;
3438 }
3439 size += elements[i].size;
3440 }
3441 } 3436 }
3442 3437
3443 if (size > VCHIQ_MAX_MSG_SIZE) { 3438 if (size > VCHIQ_MAX_MSG_SIZE) {
@@ -3451,14 +3446,14 @@ vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
3451 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, 3446 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3452 service->localport, 3447 service->localport,
3453 service->remoteport), 3448 service->remoteport),
3454 elements, count, size, 1); 3449 copy_callback, context, size, 1);
3455 break; 3450 break;
3456 case VCHIQ_SRVSTATE_OPENSYNC: 3451 case VCHIQ_SRVSTATE_OPENSYNC:
3457 status = queue_message_sync(service->state, service, 3452 status = queue_message_sync(service->state, service,
3458 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, 3453 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3459 service->localport, 3454 service->localport,
3460 service->remoteport), 3455 service->remoteport),
3461 elements, count, size, 1); 3456 copy_callback, context, size, 1);
3462 break; 3457 break;
3463 default: 3458 default:
3464 status = VCHIQ_ERROR; 3459 status = VCHIQ_ERROR;
@@ -3691,13 +3686,11 @@ vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
3691 vchiq_dump(dump_context, buf, len + 1); 3686 vchiq_dump(dump_context, buf, len + 1);
3692 3687
3693 len = snprintf(buf, sizeof(buf), 3688 len = snprintf(buf, sizeof(buf),
3694 " tx_pos=%x(@%x), rx_pos=%x(@%x)", 3689 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3695 state->local->tx_pos, 3690 state->local->tx_pos,
3696 (uint32_t)state->tx_data + 3691 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3697 (state->local_tx_pos & VCHIQ_SLOT_MASK),
3698 state->rx_pos, 3692 state->rx_pos,
3699 (uint32_t)state->rx_data + 3693 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3700 (state->rx_pos & VCHIQ_SLOT_MASK));
3701 vchiq_dump(dump_context, buf, len + 1); 3694 vchiq_dump(dump_context, buf, len + 1);
3702 3695
3703 len = snprintf(buf, sizeof(buf), 3696 len = snprintf(buf, sizeof(buf),
@@ -3747,7 +3740,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3747 char buf[80]; 3740 char buf[80];
3748 int len; 3741 int len;
3749 3742
3750 len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)", 3743 len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3751 service->localport, srvstate_names[service->srvstate], 3744 service->localport, srvstate_names[service->srvstate],
3752 service->ref_count - 1); /*Don't include the lock just taken*/ 3745 service->ref_count - 1); /*Don't include the lock just taken*/
3753 3746
@@ -3759,7 +3752,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3759 int tx_pending, rx_pending; 3752 int tx_pending, rx_pending;
3760 if (service->remoteport != VCHIQ_PORT_FREE) { 3753 if (service->remoteport != VCHIQ_PORT_FREE) {
3761 int len2 = snprintf(remoteport, sizeof(remoteport), 3754 int len2 = snprintf(remoteport, sizeof(remoteport),
3762 "%d", service->remoteport); 3755 "%u", service->remoteport);
3763 if (service->public_fourcc != VCHIQ_FOURCC_INVALID) 3756 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3764 snprintf(remoteport + len2, 3757 snprintf(remoteport + len2,
3765 sizeof(remoteport) - len2, 3758 sizeof(remoteport) - len2,
@@ -3888,26 +3881,26 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
3888 return status; 3881 return status;
3889} 3882}
3890 3883
3891void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem, 3884void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *void_mem,
3892 size_t numBytes) 3885 size_t num_bytes)
3893{ 3886{
3894 const uint8_t *mem = (const uint8_t *)voidMem; 3887 const uint8_t *mem = (const uint8_t *)void_mem;
3895 size_t offset; 3888 size_t offset;
3896 char lineBuf[100]; 3889 char line_buf[100];
3897 char *s; 3890 char *s;
3898 3891
3899 while (numBytes > 0) { 3892 while (num_bytes > 0) {
3900 s = lineBuf; 3893 s = line_buf;
3901 3894
3902 for (offset = 0; offset < 16; offset++) { 3895 for (offset = 0; offset < 16; offset++) {
3903 if (offset < numBytes) 3896 if (offset < num_bytes)
3904 s += snprintf(s, 4, "%02x ", mem[offset]); 3897 s += snprintf(s, 4, "%02x ", mem[offset]);
3905 else 3898 else
3906 s += snprintf(s, 4, " "); 3899 s += snprintf(s, 4, " ");
3907 } 3900 }
3908 3901
3909 for (offset = 0; offset < 16; offset++) { 3902 for (offset = 0; offset < 16; offset++) {
3910 if (offset < numBytes) { 3903 if (offset < num_bytes) {
3911 uint8_t ch = mem[offset]; 3904 uint8_t ch = mem[offset];
3912 3905
3913 if ((ch < ' ') || (ch > '~')) 3906 if ((ch < ' ') || (ch > '~'))
@@ -3919,16 +3912,16 @@ void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
3919 3912
3920 if ((label != NULL) && (*label != '\0')) 3913 if ((label != NULL) && (*label != '\0'))
3921 vchiq_log_trace(VCHIQ_LOG_TRACE, 3914 vchiq_log_trace(VCHIQ_LOG_TRACE,
3922 "%s: %08x: %s", label, addr, lineBuf); 3915 "%s: %08x: %s", label, addr, line_buf);
3923 else 3916 else
3924 vchiq_log_trace(VCHIQ_LOG_TRACE, 3917 vchiq_log_trace(VCHIQ_LOG_TRACE,
3925 "%08x: %s", addr, lineBuf); 3918 "%08x: %s", addr, line_buf);
3926 3919
3927 addr += 16; 3920 addr += 16;
3928 mem += 16; 3921 mem += 16;
3929 if (numBytes > 16) 3922 if (num_bytes > 16)
3930 numBytes -= 16; 3923 num_bytes -= 16;
3931 else 3924 else
3932 numBytes = 0; 3925 num_bytes = 0;
3933 } 3926 }
3934} 3927}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9be484c776d0..9e164652548a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -184,11 +184,11 @@ enum {
184 184
185#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug; 185#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
186#define DEBUG_TRACE(d) \ 186#define DEBUG_TRACE(d) \
187 do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0) 187 do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
188#define DEBUG_VALUE(d, v) \ 188#define DEBUG_VALUE(d, v) \
189 do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0) 189 do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
190#define DEBUG_COUNT(d) \ 190#define DEBUG_COUNT(d) \
191 do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0) 191 do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
192 192
193#else /* VCHIQ_ENABLE_DEBUG */ 193#else /* VCHIQ_ENABLE_DEBUG */
194 194
@@ -264,7 +264,8 @@ typedef struct vchiq_bulk_queue_struct {
264typedef struct remote_event_struct { 264typedef struct remote_event_struct {
265 int armed; 265 int armed;
266 int fired; 266 int fired;
267 struct semaphore *event; 267 /* Contains offset from the beginning of the VCHIQ_STATE_T structure */
268 u32 event;
268} REMOTE_EVENT_T; 269} REMOTE_EVENT_T;
269 270
270typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T; 271typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
@@ -633,9 +634,6 @@ vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
633extern void 634extern void
634vchiq_complete_bulk(VCHIQ_BULK_T *bulk); 635vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
635 636
636extern VCHIQ_STATUS_T
637vchiq_copy_from_user(void *dst, const void *src, int size);
638
639extern void 637extern void
640remote_event_signal(REMOTE_EVENT_T *event); 638remote_event_signal(REMOTE_EVENT_T *event);
641 639
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 7e032130d967..f07cd4448ddf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -120,7 +120,7 @@ static int debugfs_log_open(struct inode *inode, struct file *file)
120 return single_open(file, debugfs_log_show, inode->i_private); 120 return single_open(file, debugfs_log_show, inode->i_private);
121} 121}
122 122
123static int debugfs_log_write(struct file *file, 123static ssize_t debugfs_log_write(struct file *file,
124 const char __user *buffer, 124 const char __user *buffer,
125 size_t count, loff_t *ppos) 125 size_t count, loff_t *ppos)
126{ 126{
@@ -229,7 +229,7 @@ static int debugfs_trace_open(struct inode *inode, struct file *file)
229 return single_open(file, debugfs_trace_show, inode->i_private); 229 return single_open(file, debugfs_trace_show, inode->i_private);
230} 230}
231 231
232static int debugfs_trace_write(struct file *file, 232static ssize_t debugfs_trace_write(struct file *file,
233 const char __user *buffer, 233 const char __user *buffer,
234 size_t count, loff_t *ppos) 234 size_t count, loff_t *ppos)
235{ 235{
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 8067bbe7ce8d..377e8e48bb54 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -141,9 +141,12 @@ extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
141extern VCHIQ_STATUS_T vchiq_use_service_no_resume( 141extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
142 VCHIQ_SERVICE_HANDLE_T service); 142 VCHIQ_SERVICE_HANDLE_T service);
143extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service); 143extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
144 144extern VCHIQ_STATUS_T
145extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service, 145vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
146 const VCHIQ_ELEMENT_T *elements, unsigned int count); 146 ssize_t (*copy_callback)(void *context, void *dest,
147 size_t offset, size_t maxsize),
148 void *context,
149 size_t size);
147extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service, 150extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
148 VCHIQ_HEADER_T *header); 151 VCHIQ_HEADER_T *header);
149extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service, 152extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 25e7011edc50..e93922a87263 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -70,7 +70,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
70* 70*
71***************************************************************************/ 71***************************************************************************/
72#define VCHIQ_INIT_RETRIES 10 72#define VCHIQ_INIT_RETRIES 10
73VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut) 73VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
74{ 74{
75 VCHIQ_STATUS_T status = VCHIQ_ERROR; 75 VCHIQ_STATUS_T status = VCHIQ_ERROR;
76 VCHIQ_STATE_T *state; 76 VCHIQ_STATE_T *state;
@@ -108,7 +108,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
108 mutex_init(&instance->bulk_waiter_list_mutex); 108 mutex_init(&instance->bulk_waiter_list_mutex);
109 INIT_LIST_HEAD(&instance->bulk_waiter_list); 109 INIT_LIST_HEAD(&instance->bulk_waiter_list);
110 110
111 *instanceOut = instance; 111 *instance_out = instance;
112 112
113 status = VCHIQ_SUCCESS; 113 status = VCHIQ_SUCCESS;
114 114
@@ -134,7 +134,7 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
134 vchiq_log_trace(vchiq_core_log_level, 134 vchiq_log_trace(vchiq_core_log_level,
135 "%s(%p) called", __func__, instance); 135 "%s(%p) called", __func__, instance);
136 136
137 if (mutex_lock_interruptible(&state->mutex) != 0) 137 if (mutex_lock_killable(&state->mutex) != 0)
138 return VCHIQ_RETRY; 138 return VCHIQ_RETRY;
139 139
140 /* Remove all services */ 140 /* Remove all services */
@@ -155,9 +155,8 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
155 list); 155 list);
156 list_del(pos); 156 list_del(pos);
157 vchiq_log_info(vchiq_arm_log_level, 157 vchiq_log_info(vchiq_arm_log_level,
158 "bulk_waiter - cleaned up %x " 158 "bulk_waiter - cleaned up %pK for pid %d",
159 "for pid %d", 159 waiter, waiter->pid);
160 (unsigned int)waiter, waiter->pid);
161 kfree(waiter); 160 kfree(waiter);
162 } 161 }
163 kfree(instance); 162 kfree(instance);
@@ -192,7 +191,7 @@ VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
192 vchiq_log_trace(vchiq_core_log_level, 191 vchiq_log_trace(vchiq_core_log_level,
193 "%s(%p) called", __func__, instance); 192 "%s(%p) called", __func__, instance);
194 193
195 if (mutex_lock_interruptible(&state->mutex) != 0) { 194 if (mutex_lock_killable(&state->mutex) != 0) {
196 vchiq_log_trace(vchiq_core_log_level, 195 vchiq_log_trace(vchiq_core_log_level,
197 "%s: call to mutex_lock failed", __func__); 196 "%s: call to mutex_lock failed", __func__);
198 status = VCHIQ_RETRY; 197 status = VCHIQ_RETRY;
@@ -450,8 +449,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
450 list_add(&waiter->list, &instance->bulk_waiter_list); 449 list_add(&waiter->list, &instance->bulk_waiter_list);
451 mutex_unlock(&instance->bulk_waiter_list_mutex); 450 mutex_unlock(&instance->bulk_waiter_list_mutex);
452 vchiq_log_info(vchiq_arm_log_level, 451 vchiq_log_info(vchiq_arm_log_level,
453 "saved bulk_waiter %x for pid %d", 452 "saved bulk_waiter %pK for pid %d",
454 (unsigned int)waiter, current->pid); 453 waiter, current->pid);
455 } 454 }
456 455
457 return status; 456 return status;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
index 335446e05476..778063ba312a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
@@ -52,18 +52,4 @@ static inline int __must_check down_interruptible_killable(struct semaphore *sem
52} 52}
53#define down_interruptible down_interruptible_killable 53#define down_interruptible down_interruptible_killable
54 54
55
56static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
57{
58 /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
59 int ret;
60 sigset_t blocked, oldset;
61 siginitsetinv(&blocked, SHUTDOWN_SIGS);
62 sigprocmask(SIG_SETMASK, &blocked, &oldset);
63 ret = mutex_lock_interruptible(lock);
64 sigprocmask(SIG_SETMASK, &oldset, NULL);
65 return ret;
66}
67#define mutex_lock_interruptible mutex_lock_interruptible_killable
68
69#endif 55#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
index d02e7764bd0d..dd43458f306f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -42,13 +42,13 @@
42/* ---- Constants and Types ---------------------------------------------- */ 42/* ---- Constants and Types ---------------------------------------------- */
43 43
44typedef struct { 44typedef struct {
45 void *armSharedMemVirt; 45 void *arm_shared_mem_virt;
46 dma_addr_t armSharedMemPhys; 46 dma_addr_t arm_shared_mem_phys;
47 size_t armSharedMemSize; 47 size_t arm_shared_mem_size;
48 48
49 void *vcSharedMemVirt; 49 void *vc_shared_mem_virt;
50 dma_addr_t vcSharedMemPhys; 50 dma_addr_t vc_shared_mem_phys;
51 size_t vcSharedMemSize; 51 size_t vc_shared_mem_size;
52} VCHIQ_SHARED_MEM_INFO_T; 52} VCHIQ_SHARED_MEM_INFO_T;
53 53
54/* ---- Variable Externs ------------------------------------------------- */ 54/* ---- Variable Externs ------------------------------------------------- */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 54a3ecec69ef..12c304ceb952 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -43,11 +43,13 @@
43#define PAGELIST_READ_WITH_FRAGMENTS 2 43#define PAGELIST_READ_WITH_FRAGMENTS 2
44 44
45typedef struct pagelist_struct { 45typedef struct pagelist_struct {
46 unsigned long length; 46 u32 length;
47 unsigned short type; 47 u16 type;
48 unsigned short offset; 48 u16 offset;
49 unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following 49 u32 addrs[1]; /* N.B. 12 LSBs hold the number
50 pages at consecutive addresses. */ 50 * of following pages at consecutive
51 * addresses.
52 */
51} PAGELIST_T; 53} PAGELIST_T;
52 54
53typedef struct fragments_struct { 55typedef struct fragments_struct {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8072ff613636..d9771394a041 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL(vchi_msg_remove);
148 * Name: vchi_msg_queue 148 * Name: vchi_msg_queue
149 * 149 *
150 * Arguments: VCHI_SERVICE_HANDLE_T handle, 150 * Arguments: VCHI_SERVICE_HANDLE_T handle,
151 * const void *data, 151 * ssize_t (*copy_callback)(void *context, void *dest,
152 * uint32_t data_size, 152 * size_t offset, size_t maxsize),
153 * VCHI_FLAGS_T flags, 153 * void *context,
154 * void *msg_handle, 154 * uint32_t data_size
155 * 155 *
156 * Description: Thin wrapper to queue a message onto a connection 156 * Description: Thin wrapper to queue a message onto a connection
157 * 157 *
@@ -159,28 +159,29 @@ EXPORT_SYMBOL(vchi_msg_remove);
159 * 159 *
160 ***********************************************************/ 160 ***********************************************************/
161int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle, 161int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
162 const void *data, 162 ssize_t (*copy_callback)(void *context, void *dest,
163 uint32_t data_size, 163 size_t offset, size_t maxsize),
164 VCHI_FLAGS_T flags, 164 void *context,
165 void *msg_handle) 165 uint32_t data_size)
166{ 166{
167 SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle; 167 SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
168 VCHIQ_ELEMENT_T element = {data, data_size};
169 VCHIQ_STATUS_T status; 168 VCHIQ_STATUS_T status;
170 169
171 (void)msg_handle; 170 while (1) {
172 171 status = vchiq_queue_message(service->handle,
173 WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED); 172 copy_callback,
173 context,
174 data_size);
174 175
175 status = vchiq_queue_message(service->handle, &element, 1); 176 /*
177 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
178 * implement a retry mechanism since this function is supposed
179 * to block until queued
180 */
181 if (status != VCHIQ_RETRY)
182 break;
176 183
177 /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
178 ** implement a retry mechanism since this function is supposed
179 ** to block until queued
180 */
181 while (status == VCHIQ_RETRY) {
182 msleep(1); 184 msleep(1);
183 status = vchiq_queue_message(service->handle, &element, 1);
184 } 185 }
185 186
186 return vchiq_status_to_vchi(status); 187 return vchiq_status_to_vchi(status);
@@ -229,17 +230,18 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
229 return vchiq_status_to_vchi(VCHIQ_ERROR); 230 return vchiq_status_to_vchi(VCHIQ_ERROR);
230 } 231 }
231 232
232 status = vchiq_bulk_receive(service->handle, data_dst, data_size, 233 while (1) {
233 bulk_handle, mode);
234
235 /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
236 ** implement a retry mechanism since this function is supposed
237 ** to block until queued
238 */
239 while (status == VCHIQ_RETRY) {
240 msleep(1);
241 status = vchiq_bulk_receive(service->handle, data_dst, 234 status = vchiq_bulk_receive(service->handle, data_dst,
242 data_size, bulk_handle, mode); 235 data_size, bulk_handle, mode);
236 /*
237 * vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
238 * implement a retry mechanism since this function is supposed
239 * to block until queued
240 */
241 if (status != VCHIQ_RETRY)
242 break;
243
244 msleep(1);
243 } 245 }
244 246
245 return vchiq_status_to_vchi(status); 247 return vchiq_status_to_vchi(status);
@@ -289,17 +291,19 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
289 return vchiq_status_to_vchi(VCHIQ_ERROR); 291 return vchiq_status_to_vchi(VCHIQ_ERROR);
290 } 292 }
291 293
292 status = vchiq_bulk_transmit(service->handle, data_src, data_size, 294 while (1) {
293 bulk_handle, mode);
294
295 /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
296 ** implement a retry mechanism since this function is supposed
297 ** to block until queued
298 */
299 while (status == VCHIQ_RETRY) {
300 msleep(1);
301 status = vchiq_bulk_transmit(service->handle, data_src, 295 status = vchiq_bulk_transmit(service->handle, data_src,
302 data_size, bulk_handle, mode); 296 data_size, bulk_handle, mode);
297
298 /*
299 * vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
300 * implement a retry mechanism since this function is supposed
301 * to block until queued
302 */
303 if (status != VCHIQ_RETRY)
304 break;
305
306 msleep(1);
303 } 307 }
304 308
305 return vchiq_status_to_vchi(status); 309 return vchiq_status_to_vchi(status);
@@ -350,44 +354,6 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
350EXPORT_SYMBOL(vchi_msg_dequeue); 354EXPORT_SYMBOL(vchi_msg_dequeue);
351 355
352/*********************************************************** 356/***********************************************************
353 * Name: vchi_msg_queuev
354 *
355 * Arguments: VCHI_SERVICE_HANDLE_T handle,
356 * VCHI_MSG_VECTOR_T *vector,
357 * uint32_t count,
358 * VCHI_FLAGS_T flags,
359 * void *msg_handle
360 *
361 * Description: Thin wrapper to queue a message onto a connection
362 *
363 * Returns: int32_t - success == 0
364 *
365 ***********************************************************/
366
367vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
368vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
369 offsetof(VCHIQ_ELEMENT_T, data));
370vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
371 offsetof(VCHIQ_ELEMENT_T, size));
372
373int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
374 VCHI_MSG_VECTOR_T *vector,
375 uint32_t count,
376 VCHI_FLAGS_T flags,
377 void *msg_handle)
378{
379 SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
380
381 (void)msg_handle;
382
383 WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
384
385 return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
386 (const VCHIQ_ELEMENT_T *)vector, count));
387}
388EXPORT_SYMBOL(vchi_msg_queuev);
389
390/***********************************************************
391 * Name: vchi_held_msg_release 357 * Name: vchi_held_msg_release
392 * 358 *
393 * Arguments: VCHI_HELD_MSG_T *message 359 * Arguments: VCHI_HELD_MSG_T *message
@@ -400,8 +366,16 @@ EXPORT_SYMBOL(vchi_msg_queuev);
400 ***********************************************************/ 366 ***********************************************************/
401int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message) 367int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
402{ 368{
403 vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service, 369 /*
404 (VCHIQ_HEADER_T *)message->message); 370 * Convert the service field pointer back to an
371 * VCHIQ_SERVICE_HANDLE_T which is an int.
372 * This pointer is opaque to everything except
373 * vchi_msg_hold which simply upcasted the int
374 * to a pointer.
375 */
376
377 vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
378 (VCHIQ_HEADER_T *)message->message);
405 379
406 return 0; 380 return 0;
407} 381}
@@ -445,8 +419,16 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
445 *data = header->data; 419 *data = header->data;
446 *msg_size = header->size; 420 *msg_size = header->size;
447 421
422 /*
423 * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
424 * to a pointer and stuff it in the held message.
425 * This pointer is opaque to everything except
426 * vchi_held_msg_release which simply downcasts it back
427 * to an int.
428 */
429
448 message_handle->service = 430 message_handle->service =
449 (struct opaque_vchi_service_t *)service->handle; 431 (struct opaque_vchi_service_t *)(long)service->handle;
450 message_handle->message = header; 432 message_handle->message = header;
451 433
452 return 0; 434 return 0;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 384acb8d2eae..f76f4d790532 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -61,8 +61,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
61 61
62void vchiu_queue_delete(VCHIU_QUEUE_T *queue) 62void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
63{ 63{
64 if (queue->storage != NULL) 64 kfree(queue->storage);
65 kfree(queue->storage);
66} 65}
67 66
68int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue) 67int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index d5d94c43c074..5577df3199e7 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -48,8 +48,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
48 PIO2_REGS_INT_MASK6, 48 PIO2_REGS_INT_MASK6,
49 PIO2_REGS_INT_MASK7 }; 49 PIO2_REGS_INT_MASK7 };
50 50
51
52
53#define PIO2_REGS_CTRL 0x18 51#define PIO2_REGS_CTRL 0x18
54#define PIO2_REGS_VME_VECTOR 0x19 52#define PIO2_REGS_VME_VECTOR 0x19
55#define PIO2_REGS_CNTR0 0x20 53#define PIO2_REGS_CNTR0 0x20
@@ -63,7 +61,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
63 61
64#define PIO2_REGS_ID 0x30 62#define PIO2_REGS_ID 0x30
65 63
66
67/* PIO2_REGS_DATAx (0x0 - 0x3) */ 64/* PIO2_REGS_DATAx (0x0 - 0x3) */
68 65
69static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 66static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -204,8 +201,6 @@ static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
204 201
205#define PIO2_CNTR_BCD 1 202#define PIO2_CNTR_BCD 1
206 203
207
208
209enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH }; 204enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
210enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 }; 205enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
211 206
@@ -240,10 +235,10 @@ struct pio2_card {
240 struct pio2_cntr cntr[6]; 235 struct pio2_cntr cntr[6];
241}; 236};
242 237
243int pio2_cntr_reset(struct pio2_card *); 238int pio2_cntr_reset(struct pio2_card *card);
244 239
245int pio2_gpio_reset(struct pio2_card *); 240int pio2_gpio_reset(struct pio2_card *card);
246int pio2_gpio_init(struct pio2_card *); 241int pio2_gpio_init(struct pio2_card *card);
247void pio2_gpio_exit(struct pio2_card *); 242void pio2_gpio_exit(struct pio2_card *card);
248 243
249#endif /* _VME_PIO2_H_ */ 244#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 8e66a520266c..20a2d835fdaa 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -365,7 +365,7 @@ static int pio2_probe(struct vme_dev *vdev)
365 vec = card->irq_vector | PIO2_VECTOR_CNTR[i]; 365 vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
366 366
367 retval = vme_irq_request(vdev, card->irq_level, vec, 367 retval = vme_irq_request(vdev, card->irq_level, vec,
368 &pio2_int, card); 368 &pio2_int, card);
369 if (retval < 0) { 369 if (retval < 0) {
370 dev_err(&card->vdev->dev, 370 dev_err(&card->vdev->dev,
371 "Unable to attach VME interrupt vector0x%x, level 0x%x\n", 371 "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index d84dffb894f4..87aa5174df22 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -661,7 +661,7 @@ err_sysfs:
661 } 661 }
662 class_destroy(vme_user_sysfs_class); 662 class_destroy(vme_user_sysfs_class);
663 663
664 /* Ensure counter set correcty to unalloc all master windows */ 664 /* Ensure counter set correctly to unalloc all master windows */
665 i = MASTER_MAX + 1; 665 i = MASTER_MAX + 1;
666err_master: 666err_master:
667 while (i > MASTER_MINOR) { 667 while (i > MASTER_MINOR) {
@@ -671,7 +671,7 @@ err_master:
671 } 671 }
672 672
673 /* 673 /*
674 * Ensure counter set correcty to unalloc all slave windows and buffers 674 * Ensure counter set correctly to unalloc all slave windows and buffers
675 */ 675 */
676 i = SLAVE_MAX + 1; 676 i = SLAVE_MAX + 1;
677err_slave: 677err_slave:
@@ -716,7 +716,7 @@ static int vme_user_remove(struct vme_dev *dev)
716 /* Unregister device driver */ 716 /* Unregister device driver */
717 cdev_del(vme_user_cdev); 717 cdev_del(vme_user_cdev);
718 718
719 /* Unregiser the major and minor device numbers */ 719 /* Unregister the major and minor device numbers */
720 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); 720 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
721 721
722 return 0; 722 return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de503a316e71..44dfa5421374 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: baseband.c 15 * File: baseband.c
21 * 16 *
22 * Purpose: Implement functions to access baseband 17 * Purpose: Implement functions to access baseband
@@ -1916,7 +1911,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
1916 * 1911 *
1917 * Parameters: 1912 * Parameters:
1918 * In: 1913 * In:
1919 * dwIoBase - I/O base address 1914 * iobase - I/O base address
1920 * byBBAddr - address of register in Baseband 1915 * byBBAddr - address of register in Baseband
1921 * Out: 1916 * Out:
1922 * pbyData - data read 1917 * pbyData - data read
@@ -1927,24 +1922,24 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
1927bool BBbReadEmbedded(struct vnt_private *priv, 1922bool BBbReadEmbedded(struct vnt_private *priv,
1928 unsigned char byBBAddr, unsigned char *pbyData) 1923 unsigned char byBBAddr, unsigned char *pbyData)
1929{ 1924{
1930 void __iomem *dwIoBase = priv->PortOffset; 1925 void __iomem *iobase = priv->PortOffset;
1931 unsigned short ww; 1926 unsigned short ww;
1932 unsigned char byValue; 1927 unsigned char byValue;
1933 1928
1934 /* BB reg offset */ 1929 /* BB reg offset */
1935 VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr); 1930 VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
1936 1931
1937 /* turn on REGR */ 1932 /* turn on REGR */
1938 MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGR); 1933 MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
1939 /* W_MAX_TIMEOUT is the timeout period */ 1934 /* W_MAX_TIMEOUT is the timeout period */
1940 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 1935 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
1941 VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue); 1936 VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
1942 if (byValue & BBREGCTL_DONE) 1937 if (byValue & BBREGCTL_DONE)
1943 break; 1938 break;
1944 } 1939 }
1945 1940
1946 /* get BB data */ 1941 /* get BB data */
1947 VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData); 1942 VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
1948 1943
1949 if (ww == W_MAX_TIMEOUT) { 1944 if (ww == W_MAX_TIMEOUT) {
1950 pr_debug(" DBG_PORT80(0x30)\n"); 1945 pr_debug(" DBG_PORT80(0x30)\n");
@@ -1958,7 +1953,7 @@ bool BBbReadEmbedded(struct vnt_private *priv,
1958 * 1953 *
1959 * Parameters: 1954 * Parameters:
1960 * In: 1955 * In:
1961 * dwIoBase - I/O base address 1956 * iobase - I/O base address
1962 * byBBAddr - address of register in Baseband 1957 * byBBAddr - address of register in Baseband
1963 * byData - data to write 1958 * byData - data to write
1964 * Out: 1959 * Out:
@@ -1970,20 +1965,20 @@ bool BBbReadEmbedded(struct vnt_private *priv,
1970bool BBbWriteEmbedded(struct vnt_private *priv, 1965bool BBbWriteEmbedded(struct vnt_private *priv,
1971 unsigned char byBBAddr, unsigned char byData) 1966 unsigned char byBBAddr, unsigned char byData)
1972{ 1967{
1973 void __iomem *dwIoBase = priv->PortOffset; 1968 void __iomem *iobase = priv->PortOffset;
1974 unsigned short ww; 1969 unsigned short ww;
1975 unsigned char byValue; 1970 unsigned char byValue;
1976 1971
1977 /* BB reg offset */ 1972 /* BB reg offset */
1978 VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr); 1973 VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
1979 /* set BB data */ 1974 /* set BB data */
1980 VNSvOutPortB(dwIoBase + MAC_REG_BBREGDATA, byData); 1975 VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
1981 1976
1982 /* turn on BBREGCTL_REGW */ 1977 /* turn on BBREGCTL_REGW */
1983 MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGW); 1978 MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
1984 /* W_MAX_TIMEOUT is the timeout period */ 1979 /* W_MAX_TIMEOUT is the timeout period */
1985 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 1980 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
1986 VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue); 1981 VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
1987 if (byValue & BBREGCTL_DONE) 1982 if (byValue & BBREGCTL_DONE)
1988 break; 1983 break;
1989 } 1984 }
@@ -2000,7 +1995,7 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
2000 * 1995 *
2001 * Parameters: 1996 * Parameters:
2002 * In: 1997 * In:
2003 * dwIoBase - I/O base address 1998 * iobase - I/O base address
2004 * byRevId - Revision ID 1999 * byRevId - Revision ID
2005 * byRFType - RF type 2000 * byRFType - RF type
2006 * Out: 2001 * Out:
@@ -2014,7 +2009,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
2014{ 2009{
2015 bool bResult = true; 2010 bool bResult = true;
2016 int ii; 2011 int ii;
2017 void __iomem *dwIoBase = priv->PortOffset; 2012 void __iomem *iobase = priv->PortOffset;
2018 unsigned char byRFType = priv->byRFType; 2013 unsigned char byRFType = priv->byRFType;
2019 unsigned char byLocalID = priv->byLocalID; 2014 unsigned char byLocalID = priv->byLocalID;
2020 2015
@@ -2036,8 +2031,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
2036 byVT3253B0_AGC4_RFMD2959[ii][0], 2031 byVT3253B0_AGC4_RFMD2959[ii][0],
2037 byVT3253B0_AGC4_RFMD2959[ii][1]); 2032 byVT3253B0_AGC4_RFMD2959[ii][1]);
2038 2033
2039 VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23); 2034 VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23);
2040 MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0)); 2035 MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
2041 } 2036 }
2042 priv->abyBBVGA[0] = 0x18; 2037 priv->abyBBVGA[0] = 0x18;
2043 priv->abyBBVGA[1] = 0x0A; 2038 priv->abyBBVGA[1] = 0x0A;
@@ -2076,8 +2071,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
2076 byVT3253B0_AGC[ii][0], 2071 byVT3253B0_AGC[ii][0],
2077 byVT3253B0_AGC[ii][1]); 2072 byVT3253B0_AGC[ii][1]);
2078 2073
2079 VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23); 2074 VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
2080 MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0)); 2075 MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
2081 2076
2082 priv->abyBBVGA[0] = 0x14; 2077 priv->abyBBVGA[0] = 0x14;
2083 priv->abyBBVGA[1] = 0x0A; 2078 priv->abyBBVGA[1] = 0x0A;
@@ -2098,7 +2093,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
2098 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) 2093 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
2099 */ 2094 */
2100 2095
2101 /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/ 2096 /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
2102 2097
2103 /* Init ANT B select, 2098 /* Init ANT B select,
2104 * RX Config CR10 = 0x28->0x2A, 2099 * RX Config CR10 = 0x28->0x2A,
@@ -2106,7 +2101,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
2106 * make the ANT_A, ANT_B inverted) 2101 * make the ANT_A, ANT_B inverted)
2107 */ 2102 */
2108 2103
2109 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ 2104 /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
2110 /* Select VC1/VC2, CR215 = 0x02->0x06 */ 2105 /* Select VC1/VC2, CR215 = 0x02->0x06 */
2111 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); 2106 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
2112 2107
@@ -2154,7 +2149,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
2154 priv->ldBmThreshold[2] = 0; 2149 priv->ldBmThreshold[2] = 0;
2155 priv->ldBmThreshold[3] = 0; 2150 priv->ldBmThreshold[3] = 0;
2156 /* Fix VT3226 DFC system timing issue */ 2151 /* Fix VT3226 DFC system timing issue */
2157 MACvSetRFLE_LatchBase(dwIoBase); 2152 MACvSetRFLE_LatchBase(iobase);
2158 /* {{ RobertYu: 20050104 */ 2153 /* {{ RobertYu: 20050104 */
2159 } else if (byRFType == RF_AIROHA7230) { 2154 } else if (byRFType == RF_AIROHA7230) {
2160 for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++) 2155 for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
@@ -2162,16 +2157,15 @@ bool BBbVT3253Init(struct vnt_private *priv)
2162 byVT3253B0_AIROHA2230[ii][0], 2157 byVT3253B0_AIROHA2230[ii][0],
2163 byVT3253B0_AIROHA2230[ii][1]); 2158 byVT3253B0_AIROHA2230[ii][1]);
2164 2159
2165
2166 /* {{ RobertYu:20050223, request by JerryChung */ 2160 /* {{ RobertYu:20050223, request by JerryChung */
2167 /* Init ANT B select,TX Config CR09 = 0x61->0x45, 2161 /* Init ANT B select,TX Config CR09 = 0x61->0x45,
2168 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) 2162 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
2169 */ 2163 */
2170 /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/ 2164 /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
2171 /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 2165 /* Init ANT B select,RX Config CR10 = 0x28->0x2A,
2172 * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) 2166 * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
2173 */ 2167 */
2174 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ 2168 /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
2175 /* Select VC1/VC2, CR215 = 0x02->0x06 */ 2169 /* Select VC1/VC2, CR215 = 0x02->0x06 */
2176 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); 2170 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
2177 /* }} */ 2171 /* }} */
@@ -2259,7 +2253,7 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
2259 * 2253 *
2260 * Parameters: 2254 * Parameters:
2261 * In: 2255 * In:
2262 * dwIoBase - I/O base address 2256 * iobase - I/O base address
2263 * Out: 2257 * Out:
2264 * none 2258 * none
2265 * 2259 *
@@ -2280,7 +2274,7 @@ BBvSoftwareReset(struct vnt_private *priv)
2280 * 2274 *
2281 * Parameters: 2275 * Parameters:
2282 * In: 2276 * In:
2283 * dwIoBase - I/O base address 2277 * iobase - I/O base address
2284 * Out: 2278 * Out:
2285 * none 2279 * none
2286 * 2280 *
@@ -2302,7 +2296,7 @@ BBvPowerSaveModeON(struct vnt_private *priv)
2302 * 2296 *
2303 * Parameters: 2297 * Parameters:
2304 * In: 2298 * In:
2305 * dwIoBase - I/O base address 2299 * iobase - I/O base address
2306 * Out: 2300 * Out:
2307 * none 2301 * none
2308 * 2302 *
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index b4e8c43180ec..8a567c9155b4 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: baseband.h 15 * File: baseband.h
21 * 16 *
22 * Purpose: Implement functions to access baseband 17 * Purpose: Implement functions to access baseband
@@ -60,12 +55,6 @@
60#define TOP_RATE_2M 0x00200000 55#define TOP_RATE_2M 0x00200000
61#define TOP_RATE_1M 0x00100000 56#define TOP_RATE_1M 0x00100000
62 57
63#define BBvClearFOE(dwIoBase) \
64 BBbWriteEmbedded(dwIoBase, 0xB1, 0)
65
66#define BBvSetFOE(dwIoBase) \
67 BBbWriteEmbedded(dwIoBase, 0xB1, 0x0C)
68
69unsigned int 58unsigned int
70BBuGetFrameTime( 59BBuGetFrameTime(
71 unsigned char byPreambleType, 60 unsigned char byPreambleType,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index dbcea4434725..e0c92818ed70 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: card.c 15 * File: card.c
20 * Purpose: Provide functions to setup NIC operation mode 16 * Purpose: Provide functions to setup NIC operation mode
21 * Functions: 17 * Functions:
@@ -36,7 +32,7 @@
36 * 32 *
37 * Revision History: 33 * Revision History:
38 * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. 34 * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
39 * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase. 35 * 08-26-2003 Kyle Hsu: Modify the defination type of iobase.
40 * 09-01-2003 Bryan YC Fan: Add vUpdateIFS(). 36 * 09-01-2003 Bryan YC Fan: Add vUpdateIFS().
41 * 37 *
42 */ 38 */
@@ -261,7 +257,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
261 BBbWriteEmbedded(priv, 0x88, 0x02); 257 BBbWriteEmbedded(priv, 0x88, 0x02);
262 bySlot = C_SLOT_LONG; 258 bySlot = C_SLOT_LONG;
263 bySIFS = C_SIFS_BG; 259 bySIFS = C_SIFS_BG;
264 byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; 260 byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
265 byCWMaxMin = 0xA5; 261 byCWMaxMin = 0xA5;
266 } else { /* PK_TYPE_11GA & PK_TYPE_11GB */ 262 } else { /* PK_TYPE_11GA & PK_TYPE_11GB */
267 MACvSetBBType(priv->PortOffset, BB_TYPE_11G); 263 MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
@@ -289,7 +285,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
289 byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT; 285 byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
290 } else { 286 } else {
291 bySlot = C_SLOT_LONG; 287 bySlot = C_SLOT_LONG;
292 byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; 288 byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
293 } 289 }
294 290
295 byCWMaxMin = 0xa4; 291 byCWMaxMin = 0xa4;
@@ -528,8 +524,11 @@ CARDvSafeResetTx(
528 struct vnt_tx_desc *pCurrTD; 524 struct vnt_tx_desc *pCurrTD;
529 525
530 /* initialize TD index */ 526 /* initialize TD index */
531 priv->apTailTD[0] = priv->apCurrTD[0] = &(priv->apTD0Rings[0]); 527 priv->apTailTD[0] = &(priv->apTD0Rings[0]);
532 priv->apTailTD[1] = priv->apCurrTD[1] = &(priv->apTD1Rings[0]); 528 priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
529
530 priv->apTailTD[1] = &(priv->apTD1Rings[0]);
531 priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
533 532
534 for (uu = 0; uu < TYPE_MAXTD; uu++) 533 for (uu = 0; uu < TYPE_MAXTD; uu++)
535 priv->iTDUsed[uu] = 0; 534 priv->iTDUsed[uu] = 0;
@@ -938,20 +937,20 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
938 */ 937 */
939bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF) 938bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
940{ 939{
941 void __iomem *dwIoBase = priv->PortOffset; 940 void __iomem *iobase = priv->PortOffset;
942 unsigned short ww; 941 unsigned short ww;
943 unsigned char byData; 942 unsigned char byData;
944 943
945 MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); 944 MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
946 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 945 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
947 VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData); 946 VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
948 if (!(byData & TFTCTL_TSFCNTRRD)) 947 if (!(byData & TFTCTL_TSFCNTRRD))
949 break; 948 break;
950 } 949 }
951 if (ww == W_MAX_TIMEOUT) 950 if (ww == W_MAX_TIMEOUT)
952 return false; 951 return false;
953 VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF); 952 VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
954 VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1); 953 VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
955 954
956 return true; 955 return true;
957} 956}
@@ -989,7 +988,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
989 * 988 *
990 * Parameters: 989 * Parameters:
991 * In: 990 * In:
992 * dwIoBase - IO Base 991 * iobase - IO Base
993 * wBeaconInterval - Beacon Interval 992 * wBeaconInterval - Beacon Interval
994 * Out: 993 * Out:
995 * none 994 * none
@@ -999,16 +998,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
999void CARDvSetFirstNextTBTT(struct vnt_private *priv, 998void CARDvSetFirstNextTBTT(struct vnt_private *priv,
1000 unsigned short wBeaconInterval) 999 unsigned short wBeaconInterval)
1001{ 1000{
1002 void __iomem *dwIoBase = priv->PortOffset; 1001 void __iomem *iobase = priv->PortOffset;
1003 u64 qwNextTBTT = 0; 1002 u64 qwNextTBTT = 0;
1004 1003
1005 CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */ 1004 CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
1006 1005
1007 qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); 1006 qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
1008 /* Set NextTBTT */ 1007 /* Set NextTBTT */
1009 VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); 1008 VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
1010 VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32)); 1009 VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
1011 MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); 1010 MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
1012} 1011}
1013 1012
1014/* 1013/*
@@ -1028,12 +1027,12 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
1028void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, 1027void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
1029 unsigned short wBeaconInterval) 1028 unsigned short wBeaconInterval)
1030{ 1029{
1031 void __iomem *dwIoBase = priv->PortOffset; 1030 void __iomem *iobase = priv->PortOffset;
1032 1031
1033 qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); 1032 qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
1034 /* Set NextTBTT */ 1033 /* Set NextTBTT */
1035 VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF); 1034 VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF);
1036 VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32)); 1035 VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
1037 MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); 1036 MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
1038 pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF); 1037 pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
1039} 1038}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 0203c7fd91a2..44420b5a445f 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: card.h 15 * File: card.h
20 * 16 *
21 * Purpose: Provide functions to setup NIC operation mode 17 * Purpose: Provide functions to setup NIC operation mode
@@ -50,7 +46,7 @@
50 46
51#define CB_MAX_CHANNEL_24G 14 47#define CB_MAX_CHANNEL_24G 14
52#define CB_MAX_CHANNEL_5G 42 48#define CB_MAX_CHANNEL_5G 42
53#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G) 49#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G + CB_MAX_CHANNEL_5G)
54 50
55typedef enum _CARD_PKT_TYPE { 51typedef enum _CARD_PKT_TYPE {
56 PKT_TYPE_802_11_BCN, 52 PKT_TYPE_802_11_BCN,
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 029a8df4ca1c..ab89956511a0 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: channel.c 15 * File: channel.c
20 * 16 *
21 */ 17 */
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 2d613e7f169c..2621dfabff06 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: channel.h 15 * File: channel.h
20 * 16 *
21 */ 17 */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 2d7f6ae89164..2fee6e759ad8 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: desc.h 15 * File: desc.h
20 * 16 *
21 * Purpose:The header file of descriptor 17 * Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 55405e058196..3ae40d846a09 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: device.h 15 * File: device.h
20 * 16 *
21 * Purpose: MAC Data structure 17 * Purpose: MAC Data structure
@@ -283,12 +279,12 @@ struct vnt_private {
283 unsigned char byOFDMPwrG; 279 unsigned char byOFDMPwrG;
284 unsigned char byCurPwr; 280 unsigned char byCurPwr;
285 char byCurPwrdBm; 281 char byCurPwrdBm;
286 unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1]; 282 unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1];
287 unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1]; 283 unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1];
288 char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1]; 284 char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G + 1];
289 char abyOFDMDefaultPwr[CB_MAX_CHANNEL+1]; 285 char abyOFDMDefaultPwr[CB_MAX_CHANNEL + 1];
290 char abyRegPwr[CB_MAX_CHANNEL+1]; 286 char abyRegPwr[CB_MAX_CHANNEL + 1];
291 char abyLocalPwr[CB_MAX_CHANNEL+1]; 287 char abyLocalPwr[CB_MAX_CHANNEL + 1];
292 288
293 /* BaseBand Loopback Use */ 289 /* BaseBand Loopback Use */
294 unsigned char byBBCR4d; 290 unsigned char byBBCR4d;
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index b4c9547d3138..0298ea923f97 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: device_cfg.h 15 * File: device_cfg.h
20 * 16 *
21 * Purpose: Driver configuration header 17 * Purpose: Driver configuration header
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eeac358d..da0f71191009 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: device_main.c 15 * File: device_main.c
20 * 16 *
21 * Purpose: driver entry for initial, open, close, tx and rx. 17 * Purpose: driver entry for initial, open, close, tx and rx.
@@ -314,7 +310,7 @@ static void device_init_registers(struct vnt_private *priv)
314 SROMbyReadEmbedded(priv->PortOffset, 310 SROMbyReadEmbedded(priv->PortOffset,
315 (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL)); 311 (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
316 if (priv->abyCCKPwrTbl[ii + 1] == 0) 312 if (priv->abyCCKPwrTbl[ii + 1] == 0)
317 priv->abyCCKPwrTbl[ii+1] = priv->byCCKPwr; 313 priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
318 314
319 priv->abyOFDMPwrTbl[ii + 1] = 315 priv->abyOFDMPwrTbl[ii + 1] =
320 SROMbyReadEmbedded(priv->PortOffset, 316 SROMbyReadEmbedded(priv->PortOffset,
@@ -556,7 +552,7 @@ static void device_init_rd0_ring(struct vnt_private *priv)
556 if (!device_alloc_rx_buf(priv, desc)) 552 if (!device_alloc_rx_buf(priv, desc))
557 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n"); 553 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
558 554
559 desc->next = &(priv->aRD0Ring[(i+1) % priv->opts.rx_descs0]); 555 desc->next = &(priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0]);
560 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc)); 556 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
561 } 557 }
562 558
@@ -1272,7 +1268,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
1272 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED; 1268 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1273} 1269}
1274 1270
1275
1276static int vnt_config(struct ieee80211_hw *hw, u32 changed) 1271static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1277{ 1272{
1278 struct vnt_private *priv = hw->priv; 1273 struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 700032e9c477..9b3fa779258a 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: dpc.c 15 * File: dpc.c
20 * 16 *
21 * Purpose: handle dpc rx functions 17 * Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index e80b30816968..6e75fa9c5618 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: dpc.h 15 * File: dpc.h
20 * 16 *
21 * Purpose: 17 * Purpose:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index e161d5d9aebb..dad9e292d4da 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: key.c 15 * File: key.c
21 * 16 *
22 * Purpose: Implement functions for 802.11i Key management 17 * Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index d72719741a56..a5024611af60 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: key.h 15 * File: key.h
21 * 16 *
22 * Purpose: Implement functions for 802.11i Key management 17 * Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 8e13f7f41415..4aaa99bafcda 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: mac.c 15 * File: mac.c
21 * 16 *
22 * Purpose: MAC routines 17 * Purpose: MAC routines
@@ -147,7 +142,6 @@ void MACvSetShortRetryLimit(struct vnt_private *priv,
147 iowrite8(byRetryLimit, io_base + MAC_REG_SRT); 142 iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
148} 143}
149 144
150
151/* 145/*
152 * Description: 146 * Description:
153 * Set 802.11 Long Retry Limit 147 * Set 802.11 Long Retry Limit
@@ -321,7 +315,7 @@ bool MACbSoftwareReset(struct vnt_private *priv)
321 */ 315 */
322bool MACbSafeSoftwareReset(struct vnt_private *priv) 316bool MACbSafeSoftwareReset(struct vnt_private *priv)
323{ 317{
324 unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1]; 318 unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
325 bool bRetVal; 319 bool bRetVal;
326 320
327 /* PATCH.... 321 /* PATCH....
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 030f529c339b..33b758cb79d4 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: mac.h 15 * File: mac.h
21 * 16 *
22 * Purpose: MAC routines 17 * Purpose: MAC routines
@@ -554,341 +549,341 @@
554 549
555/*--------------------- Export Macros ------------------------------*/ 550/*--------------------- Export Macros ------------------------------*/
556 551
557#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits) \ 552#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
558do { \ 553do { \
559 unsigned char byData; \ 554 unsigned char byData; \
560 VNSvInPortB(dwIoBase + byRegOfs, &byData); \ 555 VNSvInPortB(iobase + byRegOfs, &byData); \
561 VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \ 556 VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
562} while (0) 557} while (0)
563 558
564#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits) \ 559#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
565do { \ 560do { \
566 unsigned short wData; \ 561 unsigned short wData; \
567 VNSvInPortW(dwIoBase + byRegOfs, &wData); \ 562 VNSvInPortW(iobase + byRegOfs, &wData); \
568 VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits)); \ 563 VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
569} while (0) 564} while (0)
570 565
571#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits) \ 566#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \
572do { \ 567do { \
573 unsigned long dwData; \ 568 unsigned long dwData; \
574 VNSvInPortD(dwIoBase + byRegOfs, &dwData); \ 569 VNSvInPortD(iobase + byRegOfs, &dwData); \
575 VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits)); \ 570 VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \
576} while (0) 571} while (0)
577 572
578#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits) \ 573#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \
579do { \ 574do { \
580 unsigned char byData; \ 575 unsigned char byData; \
581 VNSvInPortB(dwIoBase + byRegOfs, &byData); \ 576 VNSvInPortB(iobase + byRegOfs, &byData); \
582 byData &= byMask; \ 577 byData &= byMask; \
583 VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \ 578 VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
584} while (0) 579} while (0)
585 580
586#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits) \ 581#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
587do { \ 582do { \
588 unsigned char byData; \ 583 unsigned char byData; \
589 VNSvInPortB(dwIoBase + byRegOfs, &byData); \ 584 VNSvInPortB(iobase + byRegOfs, &byData); \
590 VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits)); \ 585 VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \
591} while (0) 586} while (0)
592 587
593#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits) \ 588#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
594do { \ 589do { \
595 unsigned short wData; \ 590 unsigned short wData; \
596 VNSvInPortW(dwIoBase + byRegOfs, &wData); \ 591 VNSvInPortW(iobase + byRegOfs, &wData); \
597 VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits)); \ 592 VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
598} while (0) 593} while (0)
599 594
600#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits) \ 595#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \
601do { \ 596do { \
602 unsigned long dwData; \ 597 unsigned long dwData; \
603 VNSvInPortD(dwIoBase + byRegOfs, &dwData); \ 598 VNSvInPortD(iobase + byRegOfs, &dwData); \
604 VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits)); \ 599 VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \
605} while (0) 600} while (0)
606 601
607#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr) \ 602#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \
608 VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, \ 603 VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \
609 (unsigned long *)pdwCurrDescAddr) 604 (unsigned long *)pdwCurrDescAddr)
610 605
611#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr) \ 606#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \
612 VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, \ 607 VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \
613 (unsigned long *)pdwCurrDescAddr) 608 (unsigned long *)pdwCurrDescAddr)
614 609
615#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr) \ 610#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \
616 VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, \ 611 VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \
617 (unsigned long *)pdwCurrDescAddr) 612 (unsigned long *)pdwCurrDescAddr)
618 613
619#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr) \ 614#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \
620 VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, \ 615 VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \
621 (unsigned long *)pdwCurrDescAddr) 616 (unsigned long *)pdwCurrDescAddr)
622 617
623#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr) \ 618#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \
624 VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR, \ 619 VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \
625 (unsigned long *)pdwCurrDescAddr) 620 (unsigned long *)pdwCurrDescAddr)
626 621
627#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr) \ 622#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \
628 VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \ 623 VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \
629 (unsigned long *)pdwCurrDescAddr) 624 (unsigned long *)pdwCurrDescAddr)
630 625
631/* set the chip with current BCN tx descriptor address */ 626/* set the chip with current BCN tx descriptor address */
632#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr) \ 627#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
633 VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, \ 628 VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
634 dwCurrDescAddr) 629 dwCurrDescAddr)
635 630
636/* set the chip with current BCN length */ 631/* set the chip with current BCN length */
637#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength) \ 632#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \
638 VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2, \ 633 VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2, \
639 wCurrBCNLength) 634 wCurrBCNLength)
640 635
641#define MACvReadBSSIDAddress(dwIoBase, pbyEtherAddr) \ 636#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
642do { \ 637do { \
643 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ 638 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
644 VNSvInPortB(dwIoBase + MAC_REG_BSSID0, \ 639 VNSvInPortB(iobase + MAC_REG_BSSID0, \
645 (unsigned char *)pbyEtherAddr); \ 640 (unsigned char *)pbyEtherAddr); \
646 VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1, \ 641 VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \
647 pbyEtherAddr + 1); \ 642 pbyEtherAddr + 1); \
648 VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2, \ 643 VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \
649 pbyEtherAddr + 2); \ 644 pbyEtherAddr + 2); \
650 VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 3, \ 645 VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \
651 pbyEtherAddr + 3); \ 646 pbyEtherAddr + 3); \
652 VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 4, \ 647 VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \
653 pbyEtherAddr + 4); \ 648 pbyEtherAddr + 4); \
654 VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 5, \ 649 VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \
655 pbyEtherAddr + 5); \ 650 pbyEtherAddr + 5); \
656 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ 651 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
657} while (0) 652} while (0)
658 653
659#define MACvWriteBSSIDAddress(dwIoBase, pbyEtherAddr) \ 654#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
660do { \ 655do { \
661 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ 656 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
662 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0, \ 657 VNSvOutPortB(iobase + MAC_REG_BSSID0, \
663 *(pbyEtherAddr)); \ 658 *(pbyEtherAddr)); \
664 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 1, \ 659 VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \
665 *(pbyEtherAddr + 1)); \ 660 *(pbyEtherAddr + 1)); \
666 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 2, \ 661 VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \
667 *(pbyEtherAddr + 2)); \ 662 *(pbyEtherAddr + 2)); \
668 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 3, \ 663 VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \
669 *(pbyEtherAddr + 3)); \ 664 *(pbyEtherAddr + 3)); \
670 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 4, \ 665 VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \
671 *(pbyEtherAddr + 4)); \ 666 *(pbyEtherAddr + 4)); \
672 VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 5, \ 667 VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \
673 *(pbyEtherAddr + 5)); \ 668 *(pbyEtherAddr + 5)); \
674 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ 669 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
675} while (0) 670} while (0)
676 671
677#define MACvReadEtherAddress(dwIoBase, pbyEtherAddr) \ 672#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
678do { \ 673do { \
679 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ 674 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
680 VNSvInPortB(dwIoBase + MAC_REG_PAR0, \ 675 VNSvInPortB(iobase + MAC_REG_PAR0, \
681 (unsigned char *)pbyEtherAddr); \ 676 (unsigned char *)pbyEtherAddr); \
682 VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1, \ 677 VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \
683 pbyEtherAddr + 1); \ 678 pbyEtherAddr + 1); \
684 VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2, \ 679 VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \
685 pbyEtherAddr + 2); \ 680 pbyEtherAddr + 2); \
686 VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 3, \ 681 VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \
687 pbyEtherAddr + 3); \ 682 pbyEtherAddr + 3); \
688 VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 4, \ 683 VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \
689 pbyEtherAddr + 4); \ 684 pbyEtherAddr + 4); \
690 VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 5, \ 685 VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \
691 pbyEtherAddr + 5); \ 686 pbyEtherAddr + 5); \
692 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ 687 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
693} while (0) 688} while (0)
694 689
695#define MACvWriteEtherAddress(dwIoBase, pbyEtherAddr) \ 690#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \
696do { \ 691do { \
697 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ 692 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
698 VNSvOutPortB(dwIoBase + MAC_REG_PAR0, \ 693 VNSvOutPortB(iobase + MAC_REG_PAR0, \
699 *pbyEtherAddr); \ 694 *pbyEtherAddr); \
700 VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 1, \ 695 VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \
701 *(pbyEtherAddr + 1)); \ 696 *(pbyEtherAddr + 1)); \
702 VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 2, \ 697 VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \
703 *(pbyEtherAddr + 2)); \ 698 *(pbyEtherAddr + 2)); \
704 VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 3, \ 699 VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \
705 *(pbyEtherAddr + 3)); \ 700 *(pbyEtherAddr + 3)); \
706 VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 4, \ 701 VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \
707 *(pbyEtherAddr + 4)); \ 702 *(pbyEtherAddr + 4)); \
708 VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 5, \ 703 VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \
709 *(pbyEtherAddr + 5)); \ 704 *(pbyEtherAddr + 5)); \
710 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ 705 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
711} while (0) 706} while (0)
712 707
713#define MACvClearISR(dwIoBase) \ 708#define MACvClearISR(iobase) \
714 VNSvOutPortD(dwIoBase + MAC_REG_ISR, IMR_MASK_VALUE) 709 VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
715 710
716#define MACvStart(dwIoBase) \ 711#define MACvStart(iobase) \
717 VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, \ 712 VNSvOutPortB(iobase + MAC_REG_HOSTCR, \
718 (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON)) 713 (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
719 714
720#define MACvRx0PerPktMode(dwIoBase) \ 715#define MACvRx0PerPktMode(iobase) \
721 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKT) 716 VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
722 717
723#define MACvRx0BufferFillMode(dwIoBase) \ 718#define MACvRx0BufferFillMode(iobase) \
724 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKTCLR) 719 VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
725 720
726#define MACvRx1PerPktMode(dwIoBase) \ 721#define MACvRx1PerPktMode(iobase) \
727 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKT) 722 VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
728 723
729#define MACvRx1BufferFillMode(dwIoBase) \ 724#define MACvRx1BufferFillMode(iobase) \
730 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKTCLR) 725 VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
731 726
732#define MACvRxOn(dwIoBase) \ 727#define MACvRxOn(iobase) \
733 MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON) 728 MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
734 729
735#define MACvReceive0(dwIoBase) \ 730#define MACvReceive0(iobase) \
736do { \ 731do { \
737 unsigned long dwData; \ 732 unsigned long dwData; \
738 VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); \ 733 VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \
739 if (dwData & DMACTL_RUN) \ 734 if (dwData & DMACTL_RUN) \
740 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \ 735 VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
741 else \ 736 else \
742 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); \ 737 VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
743} while (0) 738} while (0)
744 739
745#define MACvReceive1(dwIoBase) \ 740#define MACvReceive1(iobase) \
746do { \ 741do { \
747 unsigned long dwData; \ 742 unsigned long dwData; \
748 VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); \ 743 VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \
749 if (dwData & DMACTL_RUN) \ 744 if (dwData & DMACTL_RUN) \
750 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \ 745 VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
751 else \ 746 else \
752 VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); \ 747 VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
753} while (0) 748} while (0)
754 749
755#define MACvTxOn(dwIoBase) \ 750#define MACvTxOn(iobase) \
756 MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON) 751 MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
757 752
758#define MACvTransmit0(dwIoBase) \ 753#define MACvTransmit0(iobase) \
759do { \ 754do { \
760 unsigned long dwData; \ 755 unsigned long dwData; \
761 VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); \ 756 VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \
762 if (dwData & DMACTL_RUN) \ 757 if (dwData & DMACTL_RUN) \
763 VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \ 758 VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
764 else \ 759 else \
765 VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); \ 760 VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
766} while (0) 761} while (0)
767 762
768#define MACvTransmitAC0(dwIoBase) \ 763#define MACvTransmitAC0(iobase) \
769do { \ 764do { \
770 unsigned long dwData; \ 765 unsigned long dwData; \
771 VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); \ 766 VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \
772 if (dwData & DMACTL_RUN) \ 767 if (dwData & DMACTL_RUN) \
773 VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \ 768 VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
774 else \ 769 else \
775 VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); \ 770 VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
776} while (0) 771} while (0)
777 772
778#define MACvTransmitSYNC(dwIoBase) \ 773#define MACvTransmitSYNC(iobase) \
779do { \ 774do { \
780 unsigned long dwData; \ 775 unsigned long dwData; \
781 VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData); \ 776 VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \
782 if (dwData & DMACTL_RUN) \ 777 if (dwData & DMACTL_RUN) \
783 VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \ 778 VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
784 else \ 779 else \
785 VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \ 780 VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
786} while (0) 781} while (0)
787 782
788#define MACvTransmitATIM(dwIoBase) \ 783#define MACvTransmitATIM(iobase) \
789do { \ 784do { \
790 unsigned long dwData; \ 785 unsigned long dwData; \
791 VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData); \ 786 VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \
792 if (dwData & DMACTL_RUN) \ 787 if (dwData & DMACTL_RUN) \
793 VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \ 788 VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
794 else \ 789 else \
795 VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \ 790 VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
796} while (0) 791} while (0)
797 792
798#define MACvTransmitBCN(dwIoBase) \ 793#define MACvTransmitBCN(iobase) \
799 VNSvOutPortB(dwIoBase + MAC_REG_BCNDMACTL, BEACON_READY) 794 VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
800 795
801#define MACvClearStckDS(dwIoBase) \ 796#define MACvClearStckDS(iobase) \
802do { \ 797do { \
803 unsigned char byOrgValue; \ 798 unsigned char byOrgValue; \
804 VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue); \ 799 VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \
805 byOrgValue = byOrgValue & 0xFC; \ 800 byOrgValue = byOrgValue & 0xFC; \
806 VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue); \ 801 VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \
807} while (0) 802} while (0)
808 803
809#define MACvReadISR(dwIoBase, pdwValue) \ 804#define MACvReadISR(iobase, pdwValue) \
810 VNSvInPortD(dwIoBase + MAC_REG_ISR, pdwValue) 805 VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
811 806
812#define MACvWriteISR(dwIoBase, dwValue) \ 807#define MACvWriteISR(iobase, dwValue) \
813 VNSvOutPortD(dwIoBase + MAC_REG_ISR, dwValue) 808 VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
814 809
815#define MACvIntEnable(dwIoBase, dwMask) \ 810#define MACvIntEnable(iobase, dwMask) \
816 VNSvOutPortD(dwIoBase + MAC_REG_IMR, dwMask) 811 VNSvOutPortD(iobase + MAC_REG_IMR, dwMask)
817 812
818#define MACvIntDisable(dwIoBase) \ 813#define MACvIntDisable(iobase) \
819 VNSvOutPortD(dwIoBase + MAC_REG_IMR, 0) 814 VNSvOutPortD(iobase + MAC_REG_IMR, 0)
820 815
821#define MACvSelectPage0(dwIoBase) \ 816#define MACvSelectPage0(iobase) \
822 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0) 817 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
823 818
824#define MACvSelectPage1(dwIoBase) \ 819#define MACvSelectPage1(iobase) \
825 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1) 820 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
826 821
827#define MACvReadMIBCounter(dwIoBase, pdwCounter) \ 822#define MACvReadMIBCounter(iobase, pdwCounter) \
828 VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter) 823 VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
829 824
830#define MACvPwrEvntDisable(dwIoBase) \ 825#define MACvPwrEvntDisable(iobase) \
831 VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000) 826 VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
832 827
833#define MACvEnableProtectMD(dwIoBase) \ 828#define MACvEnableProtectMD(iobase) \
834do { \ 829do { \
835 unsigned long dwOrgValue; \ 830 unsigned long dwOrgValue; \
836 VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ 831 VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
837 dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \ 832 dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
838 VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ 833 VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
839} while (0) 834} while (0)
840 835
841#define MACvDisableProtectMD(dwIoBase) \ 836#define MACvDisableProtectMD(iobase) \
842do { \ 837do { \
843 unsigned long dwOrgValue; \ 838 unsigned long dwOrgValue; \
844 VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ 839 VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
845 dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \ 840 dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
846 VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ 841 VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
847} while (0) 842} while (0)
848 843
849#define MACvEnableBarkerPreambleMd(dwIoBase) \ 844#define MACvEnableBarkerPreambleMd(iobase) \
850do { \ 845do { \
851 unsigned long dwOrgValue; \ 846 unsigned long dwOrgValue; \
852 VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ 847 VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
853 dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \ 848 dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
854 VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ 849 VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
855} while (0) 850} while (0)
856 851
857#define MACvDisableBarkerPreambleMd(dwIoBase) \ 852#define MACvDisableBarkerPreambleMd(iobase) \
858do { \ 853do { \
859 unsigned long dwOrgValue; \ 854 unsigned long dwOrgValue; \
860 VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ 855 VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
861 dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \ 856 dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
862 VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ 857 VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
863} while (0) 858} while (0)
864 859
865#define MACvSetBBType(dwIoBase, byTyp) \ 860#define MACvSetBBType(iobase, byTyp) \
866do { \ 861do { \
867 unsigned long dwOrgValue; \ 862 unsigned long dwOrgValue; \
868 VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ 863 VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
869 dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \ 864 dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
870 dwOrgValue = dwOrgValue | (unsigned long)byTyp; \ 865 dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
871 VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ 866 VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
872} while (0) 867} while (0)
873 868
874#define MACvReadATIMW(dwIoBase, pwCounter) \ 869#define MACvReadATIMW(iobase, pwCounter) \
875 VNSvInPortW(dwIoBase + MAC_REG_AIDATIM, pwCounter) 870 VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
876 871
877#define MACvWriteATIMW(dwIoBase, wCounter) \ 872#define MACvWriteATIMW(iobase, wCounter) \
878 VNSvOutPortW(dwIoBase + MAC_REG_AIDATIM, wCounter) 873 VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
879 874
880#define MACvWriteCRC16_128(dwIoBase, byRegOfs, wCRC) \ 875#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \
881do { \ 876do { \
882 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ 877 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
883 VNSvOutPortW(dwIoBase + byRegOfs, wCRC); \ 878 VNSvOutPortW(iobase + byRegOfs, wCRC); \
884 VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ 879 VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
885} while (0) 880} while (0)
886 881
887#define MACvGPIOIn(dwIoBase, pbyValue) \ 882#define MACvGPIOIn(iobase, pbyValue) \
888 VNSvInPortB(dwIoBase + MAC_REG_GPIOCTL1, pbyValue) 883 VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
889 884
890#define MACvSetRFLE_LatchBase(dwIoBase) \ 885#define MACvSetRFLE_LatchBase(iobase) \
891 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) 886 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
892 887
893bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs, 888bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
894 unsigned char byTestBits); 889 unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 7d6e7464ae51..716d2a80f840 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: power.c 15 * File: power.c
21 * 16 *
22 * Purpose: Handles 802.11 power management functions 17 * Purpose: Handles 802.11 power management functions
@@ -133,7 +128,6 @@ PSvDisablePowerSaving(
133 priv->bPWBitOn = false; 128 priv->bPWBitOn = false;
134} 129}
135 130
136
137/* 131/*
138 * 132 *
139 * Routine Description: 133 * Routine Description:
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index d82dd8d6d68b..dfcb0ca8b448 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: power.h 15 * File: power.h
20 * 16 *
21 * Purpose: Handles 802.11 power management functions 17 * Purpose: Handles 802.11 power management functions
@@ -46,7 +42,6 @@ PSvEnablePowerSaving(
46 unsigned short wListenInterval 42 unsigned short wListenInterval
47); 43);
48 44
49
50bool 45bool
51PSbIsNextTBTTWakeUp( 46PSbIsNextTBTTWakeUp(
52 struct vnt_private * 47 struct vnt_private *
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 447882c7a6be..edf7db9d53b3 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: rf.c 15 * File: rf.c
21 * 16 *
22 * Purpose: rf function code 17 * Purpose: rf function code
@@ -50,359 +45,362 @@
50#define AL7230_PWR_IDX_LEN 64 45#define AL7230_PWR_IDX_LEN 64
51 46
52static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = { 47static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
53 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 48 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
54 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 49 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
55 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 50 0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
56 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 51 0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
57 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 52 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
58 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 53 0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
59 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 54 0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
60 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 55 0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
61 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 56 0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
62 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 57 0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
63 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 58 0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
64 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 59 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
65 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 60 0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
66 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 61 0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
67 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW 62 0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
68}; 63};
69 64
70static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = { 65static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
71 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ 66 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
72 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ 67 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
73 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ 68 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
74 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ 69 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
75 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ 70 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
76 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ 71 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
77 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ 72 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
78 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ 73 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
79 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ 74 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
80 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ 75 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
81 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ 76 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
82 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ 77 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
83 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ 78 0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
84 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */ 79 0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
85}; 80};
86 81
87static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = { 82static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
88 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ 83 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
89 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ 84 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
90 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ 85 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
91 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ 86 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
92 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ 87 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
93 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ 88 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
94 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ 89 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
95 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ 90 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
96 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ 91 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
97 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ 92 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
98 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ 93 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
99 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ 94 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
100 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ 95 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
101 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */ 96 0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
102}; 97};
103 98
104static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = { 99static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
105 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 100 0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
106 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 101 0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
107 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 102 0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
108 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 103 0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
109 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 104 0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
110 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 105 0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
111 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 106 0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
112 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 107 0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
113 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 108 0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
114 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 109 0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
115 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 110 0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
116 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 111 0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
117 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 112 0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
118 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 113 0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
119 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 114 0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
120 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 115 0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
121 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 116 0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
122 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 117 0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
123 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 118 0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
124 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 119 0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
125 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 120 0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
126 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 121 0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
127 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 122 0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
128 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 123 0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
129 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 124 0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
130 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 125 0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
131 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 126 0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
132 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 127 0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
133 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 128 0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
134 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 129 0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
135 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 130 0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
136 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 131 0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
137 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 132 0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
138 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 133 0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
139 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 134 0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
140 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 135 0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
141 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 136 0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
142 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 137 0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
143 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 138 0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
144 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 139 0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
145 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 140 0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
146 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 141 0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
147 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 142 0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
148 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 143 0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
149 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 144 0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
150 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 145 0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
151 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 146 0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
152 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 147 0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
153 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 148 0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
154 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 149 0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
155 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 150 0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
156 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 151 0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
157 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 152 0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
158 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 153 0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
159 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 154 0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
160 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 155 0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
161 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 156 0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
162 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 157 0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
163 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 158 0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
164 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 159 0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
165 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 160 0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
166 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 161 0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
167 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 162 0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
168 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW 163 0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
169}; 164};
170 165
171/* 40MHz reference frequency 166/* 40MHz reference frequency
172 * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire. 167 * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
173 */ 168 */
174static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = { 169static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
175 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ 170 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
176 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ 171 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
177 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */ 172 0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
178 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */ 173 0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
179 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g // Need modify for 11a */ 174 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
180 /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */ 175 /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
181 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */ 176 0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
182 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 177 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
183 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */ 178 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */
184 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 179 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
185 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 180 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
186 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */ 181 0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */
187 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ 182 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
188 /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */ 183 /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
189 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */ 184 0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */
190 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 185 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
191 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 186 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
192 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11a: 12BACF */ 187 0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11a: 12BACF */
193}; 188};
194 189
195static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = { 190static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
196 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ 191 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
197 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ 192 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
198 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ 193 0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
199 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ 194 0x5FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
200 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a // Need modify for 11b/g */ 195 0x67F78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
201 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */ 196 0x853F5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
202 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 197 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
203 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ 198 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
204 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 199 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
205 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 200 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
206 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ 201 0xE0600A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
207 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ 202 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
208 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ 203 0x00147C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
209 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 204 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
210 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 205 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
211 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11b/g */ 206 0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11b/g */
212}; 207};
213 208
214static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = { 209static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
215 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ 210 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
216 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ 211 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
217 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ 212 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
218 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ 213 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
219 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ 214 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
220 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ 215 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
221 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ 216 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
222 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */ 217 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
223 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */ 218 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
224 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */ 219 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
225 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */ 220 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
226 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */ 221 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
227 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */ 222 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
228 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ 223 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
229 224
230 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ 225 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
231 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ 226 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
232 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ 227 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
233 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ 228 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
234 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ 229 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
235 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ 230 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
236 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ 231 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
237 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ 232 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
238 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ 233 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
239 234
240 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, 235 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
241 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ 236 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
242 237 */
243 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ 238
244 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ 239 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
245 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ 240 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
246 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ 241 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
247 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ 242 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
248 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ 243 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
249 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ 244 0x0FF55000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
250 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ 245 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
251 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */ 246 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
252 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ 247 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
253 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ 248 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
254 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ 249 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
255 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ 250 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
256 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ 251 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
257 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ 252 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
258 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ 253 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
259 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ 254 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
260 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ 255 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
261 256 0x0FF59000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
262 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ 257
263 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ 258 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
264 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ 259 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
265 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ 260 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
266 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ 261 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
267 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ 262 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
268 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ 263 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
269 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ 264 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
270 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ 265 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
271 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ 266 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
272 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ 267 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
273 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ 268 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
274 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ 269 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
275 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ 270 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
276 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ 271 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
277 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ 272 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
273 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
278}; 274};
279 275
280static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = { 276static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
281 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ 277 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
282 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ 278 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
283 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ 279 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
284 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ 280 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
285 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ 281 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
286 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ 282 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
287 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ 283 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
288 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ 284 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
289 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ 285 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
290 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ 286 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
291 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ 287 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
292 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ 288 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
293 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ 289 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
294 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ 290 0x06666100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
295 291
296 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ 292 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
297 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ 293 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
298 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ 294 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
299 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ 295 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
300 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ 296 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
301 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ 297 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
302 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ 298 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
303 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ 299 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
304 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ 300 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
305 301
306 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, 302 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
307 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ 303 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
308 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ 304 */
309 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ 305 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
310 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ 306 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
311 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ 307 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
312 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ 308 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
313 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ 309 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
314 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ 310 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
315 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ 311 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
316 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ 312 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
317 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ 313 0x10000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
318 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ 314 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
319 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ 315 0x1AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
320 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ 316 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
321 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ 317 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
322 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ 318 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
323 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ 319 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
324 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ 320 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
325 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ 321 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
326 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ 322 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
327 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ 323 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
328 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ 324 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
329 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ 325 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
330 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ 326 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
331 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ 327 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
332 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ 328 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
333 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ 329 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
334 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ 330 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
335 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ 331 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
336 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ 332 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
337 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ 333 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
338 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ 334 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
339 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ 335 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
340 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ 336 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
341 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ 337 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
338 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
342}; 339};
343 340
344static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { 341static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
345 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ 342 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
346 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ 343 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
347 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ 344 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
348 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ 345 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
349 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ 346 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
350 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ 347 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
351 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ 348 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
352 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ 349 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
353 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ 350 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
354 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ 351 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
355 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ 352 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
356 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ 353 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
357 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ 354 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
358 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ 355 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
359 356
360 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ 357 /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
361 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ 358 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
362 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ 359 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
363 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ 360 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
364 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ 361 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
365 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ 362 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
366 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ 363 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
367 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ 364 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
368 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ 365 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
369 366
370 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, 367 /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
371 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ 368 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
372 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ 369 */
373 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ 370 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
374 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ 371 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
375 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ 372 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
376 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ 373 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
377 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ 374 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
378 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ 375 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
379 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ 376 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
380 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ 377 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
381 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ 378 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
382 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ 379 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
383 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ 380 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
384 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ 381 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
385 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ 382 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
386 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ 383 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
387 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ 384 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
388 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ 385 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
389 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ 386 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
390 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ 387 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
391 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ 388 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
392 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ 389 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
393 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ 390 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
394 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ 391 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
395 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ 392 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
396 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ 393 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
397 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ 394 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
398 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ 395 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
399 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ 396 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
400 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ 397 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
401 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ 398 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
402 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ 399 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
403 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ 400 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
404 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ 401 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
405 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ 402 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
403 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
406}; 404};
407 405
408/* 406/*
@@ -410,7 +408,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
410 * 408 *
411 * Parameters: 409 * Parameters:
412 * In: 410 * In:
413 * dwIoBase - I/O base address 411 * iobase - I/O base address
414 * Out: 412 * Out:
415 * none 413 * none
416 * 414 *
@@ -419,16 +417,16 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
419 */ 417 */
420static bool s_bAL7230Init(struct vnt_private *priv) 418static bool s_bAL7230Init(struct vnt_private *priv)
421{ 419{
422 void __iomem *dwIoBase = priv->PortOffset; 420 void __iomem *iobase = priv->PortOffset;
423 int ii; 421 int ii;
424 bool ret; 422 bool ret;
425 423
426 ret = true; 424 ret = true;
427 425
428 /* 3-wire control for normal mode */ 426 /* 3-wire control for normal mode */
429 VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); 427 VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
430 428
431 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | 429 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
432 SOFTPWRCTL_TXPEINV)); 430 SOFTPWRCTL_TXPEINV));
433 BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */ 431 BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
434 432
@@ -436,20 +434,20 @@ static bool s_bAL7230Init(struct vnt_private *priv)
436 ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]); 434 ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
437 435
438 /* PLL On */ 436 /* PLL On */
439 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 437 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
440 438
441 /* Calibration */ 439 /* Calibration */
442 MACvTimer0MicroSDelay(priv, 150);/* 150us */ 440 MACvTimer0MicroSDelay(priv, 150);/* 150us */
443 /* TXDCOC:active, RCK:disable */ 441 /* TXDCOC:active, RCK:disable */
444 ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); 442 ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
445 MACvTimer0MicroSDelay(priv, 30);/* 30us */ 443 MACvTimer0MicroSDelay(priv, 30);/* 30us */
446 /* TXDCOC:disable, RCK:active */ 444 /* TXDCOC:disable, RCK:active */
447 ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); 445 ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
448 MACvTimer0MicroSDelay(priv, 30);/* 30us */ 446 MACvTimer0MicroSDelay(priv, 30);/* 30us */
449 /* TXDCOC:disable, RCK:disable */ 447 /* TXDCOC:disable, RCK:disable */
450 ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); 448 ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
451 449
452 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | 450 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
453 SOFTPWRCTL_SWPE2 | 451 SOFTPWRCTL_SWPE2 |
454 SOFTPWRCTL_SWPECTI | 452 SOFTPWRCTL_SWPECTI |
455 SOFTPWRCTL_TXPEINV)); 453 SOFTPWRCTL_TXPEINV));
@@ -458,7 +456,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
458 456
459 /* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */ 457 /* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
460 /* 3-wire control for power saving mode */ 458 /* 3-wire control for power saving mode */
461 VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ 459 VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
462 460
463 return ret; 461 return ret;
464} 462}
@@ -468,26 +466,26 @@ static bool s_bAL7230Init(struct vnt_private *priv)
468 */ 466 */
469static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel) 467static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
470{ 468{
471 void __iomem *dwIoBase = priv->PortOffset; 469 void __iomem *iobase = priv->PortOffset;
472 bool ret; 470 bool ret;
473 471
474 ret = true; 472 ret = true;
475 473
476 /* PLLON Off */ 474 /* PLLON Off */
477 MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 475 MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
478 476
479 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]); 477 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
480 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]); 478 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
481 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]); 479 ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
482 480
483 /* PLLOn On */ 481 /* PLLOn On */
484 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 482 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
485 483
486 /* Set Channel[7] = 0 to tell H/W channel is changing now. */ 484 /* Set Channel[7] = 0 to tell H/W channel is changing now. */
487 VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); 485 VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
488 MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230); 486 MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
489 /* Set Channel[7] = 1 to tell H/W channel change is done. */ 487 /* Set Channel[7] = 1 to tell H/W channel change is done. */
490 VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); 488 VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
491 489
492 return ret; 490 return ret;
493} 491}
@@ -497,7 +495,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
497 * 495 *
498 * Parameters: 496 * Parameters:
499 * In: 497 * In:
500 * dwIoBase - I/O base address 498 * iobase - I/O base address
501 * dwData - data to write 499 * dwData - data to write
502 * Out: 500 * Out:
503 * none 501 * none
@@ -507,15 +505,15 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
507 */ 505 */
508bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData) 506bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
509{ 507{
510 void __iomem *dwIoBase = priv->PortOffset; 508 void __iomem *iobase = priv->PortOffset;
511 unsigned short ww; 509 unsigned short ww;
512 unsigned long dwValue; 510 unsigned long dwValue;
513 511
514 VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData); 512 VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData);
515 513
516 /* W_MAX_TIMEOUT is the timeout period */ 514 /* W_MAX_TIMEOUT is the timeout period */
517 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 515 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
518 VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue); 516 VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
519 if (dwValue & IFREGCTL_DONE) 517 if (dwValue & IFREGCTL_DONE)
520 break; 518 break;
521 } 519 }
@@ -531,7 +529,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
531 * 529 *
532 * Parameters: 530 * Parameters:
533 * In: 531 * In:
534 * dwIoBase - I/O base address 532 * iobase - I/O base address
535 * Out: 533 * Out:
536 * none 534 * none
537 * 535 *
@@ -540,51 +538,51 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
540 */ 538 */
541static bool RFbAL2230Init(struct vnt_private *priv) 539static bool RFbAL2230Init(struct vnt_private *priv)
542{ 540{
543 void __iomem *dwIoBase = priv->PortOffset; 541 void __iomem *iobase = priv->PortOffset;
544 int ii; 542 int ii;
545 bool ret; 543 bool ret;
546 544
547 ret = true; 545 ret = true;
548 546
549 /* 3-wire control for normal mode */ 547 /* 3-wire control for normal mode */
550 VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); 548 VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
551 549
552 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | 550 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
553 SOFTPWRCTL_TXPEINV)); 551 SOFTPWRCTL_TXPEINV));
554 /* PLL Off */ 552 /* PLL Off */
555 MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 553 MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
556 554
557 /* patch abnormal AL2230 frequency output */ 555 /* patch abnormal AL2230 frequency output */
558 IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); 556 IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
559 557
560 for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) 558 for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
561 ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]); 559 ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
562 MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */ 560 MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
563 561
564 /* PLL On */ 562 /* PLL On */
565 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); 563 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
566 564
567 MACvTimer0MicroSDelay(priv, 150);/* 150us */ 565 MACvTimer0MicroSDelay(priv, 150);/* 150us */
568 ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); 566 ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
569 MACvTimer0MicroSDelay(priv, 30);/* 30us */ 567 MACvTimer0MicroSDelay(priv, 30);/* 30us */
570 ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); 568 ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
571 MACvTimer0MicroSDelay(priv, 30);/* 30us */ 569 MACvTimer0MicroSDelay(priv, 30);/* 30us */
572 ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); 570 ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
573 571
574 MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | 572 MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
575 SOFTPWRCTL_SWPE2 | 573 SOFTPWRCTL_SWPE2 |
576 SOFTPWRCTL_SWPECTI | 574 SOFTPWRCTL_SWPECTI |
577 SOFTPWRCTL_TXPEINV)); 575 SOFTPWRCTL_TXPEINV));
578 576
579 /* 3-wire control for power saving mode */ 577 /* 3-wire control for power saving mode */
580 VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ 578 VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
581 579
582 return ret; 580 return ret;
583} 581}
584 582
585static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel) 583static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
586{ 584{
587 void __iomem *dwIoBase = priv->PortOffset; 585 void __iomem *iobase = priv->PortOffset;
588 bool ret; 586 bool ret;
589 587
590 ret = true; 588 ret = true;
@@ -593,10 +591,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
593 ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]); 591 ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
594 592
595 /* Set Channel[7] = 0 to tell H/W channel is changing now. */ 593 /* Set Channel[7] = 0 to tell H/W channel is changing now. */
596 VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); 594 VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
597 MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230); 595 MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
598 /* Set Channel[7] = 1 to tell H/W channel change is done. */ 596 /* Set Channel[7] = 1 to tell H/W channel change is done. */
599 VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); 597 VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
600 598
601 return ret; 599 return ret;
602} 600}
@@ -681,7 +679,7 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
681 * 679 *
682 * Parameters: 680 * Parameters:
683 * In: 681 * In:
684 * dwIoBase - I/O base address 682 * iobase - I/O base address
685 * uChannel - channel number 683 * uChannel - channel number
686 * bySleepCnt - SleepProgSyn count 684 * bySleepCnt - SleepProgSyn count
687 * 685 *
@@ -691,12 +689,12 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
691bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, 689bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
692 u16 uChannel) 690 u16 uChannel)
693{ 691{
694 void __iomem *dwIoBase = priv->PortOffset; 692 void __iomem *iobase = priv->PortOffset;
695 int ii; 693 int ii;
696 unsigned char byInitCount = 0; 694 unsigned char byInitCount = 0;
697 unsigned char bySleepCount = 0; 695 unsigned char bySleepCount = 0;
698 696
699 VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0); 697 VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
700 switch (byRFType) { 698 switch (byRFType) {
701 case RF_AIROHA: 699 case RF_AIROHA:
702 case RF_AL2230S: 700 case RF_AL2230S:
@@ -758,7 +756,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
758 * 756 *
759 * Parameters: 757 * Parameters:
760 * In: 758 * In:
761 * dwIoBase - I/O base address 759 * iobase - I/O base address
762 * dwRFPowerTable - RF Tx Power Setting 760 * dwRFPowerTable - RF Tx Power Setting
763 * Out: 761 * Out:
764 * none 762 * none
@@ -830,7 +828,7 @@ bool RFbSetPower(
830 * 828 *
831 * Parameters: 829 * Parameters:
832 * In: 830 * In:
833 * dwIoBase - I/O base address 831 * iobase - I/O base address
834 * dwRFPowerTable - RF Tx Power Setting 832 * dwRFPowerTable - RF Tx Power Setting
835 * Out: 833 * Out:
836 * none 834 * none
@@ -855,20 +853,20 @@ bool RFbRawSetPower(
855 case RF_AIROHA: 853 case RF_AIROHA:
856 ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); 854 ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
857 if (rate <= RATE_11M) 855 if (rate <= RATE_11M)
858 ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 856 ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
859 else 857 else
860 ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 858 ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
861 859
862 break; 860 break;
863 861
864 case RF_AL2230S: 862 case RF_AL2230S:
865 ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); 863 ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
866 if (rate <= RATE_11M) { 864 if (rate <= RATE_11M) {
867 ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 865 ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
868 ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 866 ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
869 } else { 867 } else {
870 ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 868 ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
871 ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); 869 ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
872 } 870 }
873 871
874 break; 872 break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index e9c786995506..b6e853784a26 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: rf.h 15 * File: rf.h
21 * 16 *
22 * Purpose: 17 * Purpose:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7e69bc99d60f..3efe19a1b13f 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: rxtx.c 15 * File: rxtx.c
20 * 16 *
21 * Purpose: handle WMAC/802.3/802.11 rx & tx functions 17 * Purpose: handle WMAC/802.3/802.11 rx & tx functions
@@ -1086,8 +1082,8 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
1086 } 1082 }
1087 1083
1088 /* 1084 /*
1089 * Use for AUTO FALL BACK 1085 * Use for AUTO FALL BACK
1090 */ 1086 */
1091 if (fifo_ctl & FIFOCTL_AUTO_FB_0) 1087 if (fifo_ctl & FIFOCTL_AUTO_FB_0)
1092 byFBOption = AUTO_FB_0; 1088 byFBOption = AUTO_FB_0;
1093 else if (fifo_ctl & FIFOCTL_AUTO_FB_1) 1089 else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 1e30ecb5c63c..89de67115826 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: rxtx.h 15 * File: rxtx.h
20 * 16 *
21 * Purpose: 17 * Purpose:
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index ee992772066f..635f271595f6 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: srom.c 15 * File: srom.c
20 * 16 *
21 * Purpose:Implement functions to access eeprom 17 * Purpose:Implement functions to access eeprom
@@ -64,7 +60,7 @@
64 * 60 *
65 * Parameters: 61 * Parameters:
66 * In: 62 * In:
67 * dwIoBase - I/O base address 63 * iobase - I/O base address
68 * byContntOffset - address of EEPROM 64 * byContntOffset - address of EEPROM
69 * Out: 65 * Out:
70 * none 66 * none
@@ -72,7 +68,7 @@
72 * Return Value: data read 68 * Return Value: data read
73 * 69 *
74 */ 70 */
75unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, 71unsigned char SROMbyReadEmbedded(void __iomem *iobase,
76 unsigned char byContntOffset) 72 unsigned char byContntOffset)
77{ 73{
78 unsigned short wDelay, wNoACK; 74 unsigned short wDelay, wNoACK;
@@ -81,18 +77,18 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
81 unsigned char byOrg; 77 unsigned char byOrg;
82 78
83 byData = 0xFF; 79 byData = 0xFF;
84 VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg); 80 VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
85 /* turn off hardware retry for getting NACK */ 81 /* turn off hardware retry for getting NACK */
86 VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY))); 82 VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
87 for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) { 83 for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
88 VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID); 84 VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
89 VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset); 85 VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
90 86
91 /* issue read command */ 87 /* issue read command */
92 VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR); 88 VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
93 /* wait DONE be set */ 89 /* wait DONE be set */
94 for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) { 90 for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
95 VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait); 91 VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
96 if (byWait & (I2MCSR_DONE | I2MCSR_NACK)) 92 if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
97 break; 93 break;
98 PCAvDelayByIO(CB_DELAY_LOOP_WAIT); 94 PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
@@ -102,8 +98,8 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
102 break; 98 break;
103 } 99 }
104 } 100 }
105 VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData); 101 VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
106 VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); 102 VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
107 return byData; 103 return byData;
108} 104}
109 105
@@ -112,20 +108,20 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
112 * 108 *
113 * Parameters: 109 * Parameters:
114 * In: 110 * In:
115 * dwIoBase - I/O base address 111 * iobase - I/O base address
116 * Out: 112 * Out:
117 * pbyEepromRegs - EEPROM content Buffer 113 * pbyEepromRegs - EEPROM content Buffer
118 * 114 *
119 * Return Value: none 115 * Return Value: none
120 * 116 *
121 */ 117 */
122void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs) 118void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs)
123{ 119{
124 int ii; 120 int ii;
125 121
126 /* ii = Rom Address */ 122 /* ii = Rom Address */
127 for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { 123 for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
128 *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, 124 *pbyEepromRegs = SROMbyReadEmbedded(iobase,
129 (unsigned char)ii); 125 (unsigned char)ii);
130 pbyEepromRegs++; 126 pbyEepromRegs++;
131 } 127 }
@@ -136,21 +132,21 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
136 * 132 *
137 * Parameters: 133 * Parameters:
138 * In: 134 * In:
139 * dwIoBase - I/O base address 135 * iobase - I/O base address
140 * Out: 136 * Out:
141 * pbyEtherAddress - Ethernet Address buffer 137 * pbyEtherAddress - Ethernet Address buffer
142 * 138 *
143 * Return Value: none 139 * Return Value: none
144 * 140 *
145 */ 141 */
146void SROMvReadEtherAddress(void __iomem *dwIoBase, 142void SROMvReadEtherAddress(void __iomem *iobase,
147 unsigned char *pbyEtherAddress) 143 unsigned char *pbyEtherAddress)
148{ 144{
149 unsigned char ii; 145 unsigned char ii;
150 146
151 /* ii = Rom Address */ 147 /* ii = Rom Address */
152 for (ii = 0; ii < ETH_ALEN; ii++) { 148 for (ii = 0; ii < ETH_ALEN; ii++) {
153 *pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii); 149 *pbyEtherAddress = SROMbyReadEmbedded(iobase, ii);
154 pbyEtherAddress++; 150 pbyEtherAddress++;
155 } 151 }
156} 152}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 531bf0069373..6e03ab6dfa9d 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -12,11 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *
20 * File: srom.h 15 * File: srom.h
21 * 16 *
22 * Purpose: Implement functions to access eeprom 17 * Purpose: Implement functions to access eeprom
@@ -90,12 +85,12 @@
90 85
91/*--------------------- Export Functions --------------------------*/ 86/*--------------------- Export Functions --------------------------*/
92 87
93unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, 88unsigned char SROMbyReadEmbedded(void __iomem *iobase,
94 unsigned char byContntOffset); 89 unsigned char byContntOffset);
95 90
96void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs); 91void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs);
97 92
98void SROMvReadEtherAddress(void __iomem *dwIoBase, 93void SROMvReadEtherAddress(void __iomem *iobase,
99 unsigned char *pbyEtherAddress); 94 unsigned char *pbyEtherAddress);
100 95
101#endif /* __EEPROM_H__*/ 96#endif /* __EEPROM_H__*/
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 597efefc017f..d6a0563ad55c 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: tmacro.h 15 * File: tmacro.h
20 * 16 *
21 * Purpose: define basic common types and macros 17 * Purpose: define basic common types and macros
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 85fe0464cfb3..9806b5989014 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * File: upc.h 15 * File: upc.h
20 * 16 *
21 * Purpose: Macros to access device 17 * Purpose: Macros to access device
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 7cc13874f8f1..fe1c25c64cca 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -86,15 +86,15 @@ struct vnt_phy_field {
86unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type, 86unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
87 unsigned int frame_length, u16 tx_rate); 87 unsigned int frame_length, u16 tx_rate);
88 88
89void vnt_get_phy_field(struct vnt_private *, u32 frame_length, 89void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
90 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *); 90 u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
91 91
92void vnt_set_short_slot_time(struct vnt_private *); 92void vnt_set_short_slot_time(struct vnt_private *priv);
93void vnt_set_vga_gain_offset(struct vnt_private *, u8); 93void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
94void vnt_set_antenna_mode(struct vnt_private *, u8); 94void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
95int vnt_vt3184_init(struct vnt_private *); 95int vnt_vt3184_init(struct vnt_private *priv);
96void vnt_set_deep_sleep(struct vnt_private *); 96void vnt_set_deep_sleep(struct vnt_private *priv);
97void vnt_exit_deep_sleep(struct vnt_private *); 97void vnt_exit_deep_sleep(struct vnt_private *priv);
98void vnt_update_pre_ed_threshold(struct vnt_private *, int scanning); 98void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
99 99
100#endif /* __BASEBAND_H__ */ 100#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 53b469c71dc2..0e5a99375099 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -501,16 +501,7 @@ u8 vnt_get_pkt_type(struct vnt_private *priv)
501 */ 501 */
502u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2) 502u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
503{ 503{
504 u64 tsf_offset = 0; 504 return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
505 u16 rx_bcn_offset;
506
507 rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE];
508
509 tsf2 += (u64)rx_bcn_offset;
510
511 tsf_offset = tsf1 - tsf2;
512
513 return tsf_offset;
514} 505}
515 506
516/* 507/*
@@ -610,8 +601,8 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
610 beacon_int = beacon_interval * 1024; 601 beacon_int = beacon_interval * 1024;
611 602
612 /* Next TBTT = 603 /* Next TBTT =
613 * ((local_current_TSF / beacon_interval) + 1) * beacon_interval 604 * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
614 */ 605 */
615 if (beacon_int) { 606 if (beacon_int) {
616 do_div(tsf, beacon_int); 607 do_div(tsf, beacon_int);
617 tsf += 1; 608 tsf += 1;
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index eeed16e9124e..611da4929ddc 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -121,7 +121,7 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
121 u16 offset; 121 u16 offset;
122 122
123 offset = MISCFIFO_KEYETRY0; 123 offset = MISCFIFO_KEYETRY0;
124 offset += (entry_idx * MISCFIFO_KEYENTRYSIZE); 124 offset += entry_idx * MISCFIFO_KEYENTRYSIZE;
125 125
126 set_key.u.write.key_ctl = cpu_to_le16(key_ctl); 126 set_key.u.write.key_ctl = cpu_to_le16(key_ctl);
127 ether_addr_copy(set_key.u.write.addr, addr); 127 ether_addr_copy(set_key.u.write.addr, addr);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828bdabf..50d02d9aa535 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
85 * Static vars definitions 85 * Static vars definitions
86 */ 86 */
87 87
88static struct usb_device_id vt6656_table[] = { 88static const struct usb_device_id vt6656_table[] = {
89 {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, 89 {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
90 {} 90 {}
91}; 91};
@@ -326,9 +326,9 @@ static int vnt_init_registers(struct vnt_private *priv)
326 priv->current_net_addr); 326 priv->current_net_addr);
327 327
328 /* 328 /*
329 * set BB and packet type at the same time 329 * set BB and packet type at the same time
330 * set Short Slot Time, xIFS, and RSPINF 330 * set Short Slot Time, xIFS, and RSPINF
331 */ 331 */
332 if (priv->bb_type == BB_TYPE_11A) 332 if (priv->bb_type == BB_TYPE_11A)
333 priv->short_slot_time = true; 333 priv->short_slot_time = true;
334 else 334 else
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 79a3108719a6..6101a35582b6 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -730,9 +730,9 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
730 return false; 730 return false;
731 731
732 /* 732 /*
733 * 0x080F1B00 for 3 wire control TxGain(D10) 733 * 0x080F1B00 for 3 wire control TxGain(D10)
734 * and 0x31 as TX Gain value 734 * and 0x31 as TX Gain value
735 */ 735 */
736 power_setting = 0x080c0b00 | (power << 12); 736 power_setting = 0x080c0b00 | (power << 12);
737 737
738 ret &= vnt_rf_write_embedded(priv, power_setting); 738 ret &= vnt_rf_write_embedded(priv, power_setting);
@@ -800,8 +800,8 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
800/* Convert rssi to dbm */ 800/* Convert rssi to dbm */
801void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm) 801void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
802{ 802{
803 u8 idx = (((rssi & 0xc0) >> 6) & 0x03); 803 u8 idx = ((rssi & 0xc0) >> 6) & 0x03;
804 long b = (rssi & 0x3f); 804 long b = rssi & 0x3f;
805 long a = 0; 805 long a = 0;
806 u8 airoharf[4] = {0, 18, 0, 40}; 806 u8 airoharf[4] = {0, 18, 0, 40};
807 807
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 4b51c0ac27ac..622994795222 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -224,9 +224,7 @@ static inline u16 get_asoc_status(u8 *data)
224 u16 asoc_status; 224 u16 asoc_status;
225 225
226 asoc_status = data[3]; 226 asoc_status = data[3];
227 asoc_status = (asoc_status << 8) | data[2]; 227 return (asoc_status << 8) | data[2];
228
229 return asoc_status;
230} 228}
231 229
232static inline u16 get_asoc_id(u8 *data) 230static inline u16 get_asoc_id(u8 *data)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6ab7443eabde..b00ea75524e4 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1722,10 +1722,8 @@ _WPAPtk_end_case_:
1722 1722
1723 case PMKSA: 1723 case PMKSA:
1724 pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL); 1724 pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
1725 if (!pu8keybuf) { 1725 if (!pu8keybuf)
1726 netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
1727 return -ENOMEM; 1726 return -ENOMEM;
1728 }
1729 1727
1730 pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid; 1728 pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid;
1731 1729
@@ -1932,7 +1930,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
1932 wid.val = kmalloc(wid.size, GFP_KERNEL); 1930 wid.val = kmalloc(wid.size, GFP_KERNEL);
1933 1931
1934 stamac = wid.val; 1932 stamac = wid.val;
1935 memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); 1933 ether_addr_copy(stamac, strHostIfStaInactiveT->mac);
1936 1934
1937 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, 1935 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
1938 wilc_get_vif_idx(vif)); 1936 wilc_get_vif_idx(vif));
@@ -2168,7 +2166,7 @@ static void Handle_DelStation(struct wilc_vif *vif,
2168 2166
2169 pu8CurrByte = wid.val; 2167 pu8CurrByte = wid.val;
2170 2168
2171 memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN); 2169 ether_addr_copy(pu8CurrByte, pstrDelStaParam->mac_addr);
2172 2170
2173 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, 2171 result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
2174 wilc_get_vif_idx(vif)); 2172 wilc_get_vif_idx(vif));
@@ -2322,10 +2320,8 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
2322 wid.size = 2; 2320 wid.size = 2;
2323 wid.val = kmalloc(wid.size, GFP_KERNEL); 2321 wid.val = kmalloc(wid.size, GFP_KERNEL);
2324 2322
2325 if (!wid.val) { 2323 if (!wid.val)
2326 netdev_err(vif->ndev, "Failed to allocate memory\n");
2327 return -ENOMEM; 2324 return -ENOMEM;
2328 }
2329 2325
2330 wid.val[0] = u8remain_on_chan_flag; 2326 wid.val[0] = u8remain_on_chan_flag;
2331 wid.val[1] = FALSE_FRMWR_CHANNEL; 2327 wid.val[1] = FALSE_FRMWR_CHANNEL;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index ddfea29df2a7..f36d3b5a0370 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -367,7 +367,6 @@ extern u8 wilc_connected_ssid[6];
367extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN]; 367extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
368 368
369extern int wilc_connecting; 369extern int wilc_connecting;
370extern u8 wilc_initialized;
371extern struct timer_list wilc_during_ip_timer; 370extern struct timer_list wilc_during_ip_timer;
372 371
373#endif 372#endif
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 6370a5efe343..3775706578b2 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -37,6 +37,8 @@ static void linux_wlan_tx_complete(void *priv, int status);
37static int mac_init_fn(struct net_device *ndev); 37static int mac_init_fn(struct net_device *ndev);
38static struct net_device_stats *mac_stats(struct net_device *dev); 38static struct net_device_stats *mac_stats(struct net_device *dev);
39static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd); 39static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
40static int wilc_mac_open(struct net_device *ndev);
41static int wilc_mac_close(struct net_device *ndev);
40static void wilc_set_multicast_list(struct net_device *dev); 42static void wilc_set_multicast_list(struct net_device *dev);
41 43
42bool wilc_enable_ps = true; 44bool wilc_enable_ps = true;
@@ -218,17 +220,6 @@ static void deinit_irq(struct net_device *dev)
218 } 220 }
219} 221}
220 222
221int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
222{
223 /* FIXME: replace with mutex_lock or wait_for_completion */
224 int error = -1;
225
226 if (vp)
227 error = down_timeout(vp,
228 msecs_to_jiffies(timeout));
229 return error;
230}
231
232void wilc_mac_indicate(struct wilc *wilc, int flag) 223void wilc_mac_indicate(struct wilc *wilc, int flag)
233{ 224{
234 int status; 225 int status;
@@ -269,23 +260,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
269 260
270int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) 261int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
271{ 262{
272 int i = 0; 263 struct wilc_vif *vif = netdev_priv(wilc_netdev);
273 int ret = -1;
274 struct wilc_vif *vif;
275 struct wilc *wilc;
276
277 vif = netdev_priv(wilc_netdev);
278 wilc = vif->wilc;
279 264
280 for (i = 0; i < wilc->vif_num; i++) 265 memcpy(vif->bssid, bssid, 6);
281 if (wilc->vif[i]->ndev == wilc_netdev) { 266 vif->mode = mode;
282 memcpy(wilc->vif[i]->bssid, bssid, 6);
283 wilc->vif[i]->mode = mode;
284 ret = 0;
285 break;
286 }
287 267
288 return ret; 268 return 0;
289} 269}
290 270
291int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) 271int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -847,7 +827,7 @@ static int mac_init_fn(struct net_device *ndev)
847 return 0; 827 return 0;
848} 828}
849 829
850int wilc_mac_open(struct net_device *ndev) 830static int wilc_mac_open(struct net_device *ndev)
851{ 831{
852 struct wilc_vif *vif; 832 struct wilc_vif *vif;
853 833
@@ -1038,7 +1018,7 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
1038 return 0; 1018 return 0;
1039} 1019}
1040 1020
1041int wilc_mac_close(struct net_device *ndev) 1021static int wilc_mac_close(struct net_device *ndev)
1042{ 1022{
1043 struct wilc_priv *priv; 1023 struct wilc_priv *priv;
1044 struct wilc_vif *vif; 1024 struct wilc_vif *vif;
@@ -1212,16 +1192,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
1212 1192
1213void wilc_netdev_cleanup(struct wilc *wilc) 1193void wilc_netdev_cleanup(struct wilc *wilc)
1214{ 1194{
1215 int i = 0; 1195 int i;
1216 struct wilc_vif *vif[NUM_CONCURRENT_IFC];
1217 1196
1218 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { 1197 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev))
1219 unregister_inetaddr_notifier(&g_dev_notifier); 1198 unregister_inetaddr_notifier(&g_dev_notifier);
1220 1199
1221 for (i = 0; i < NUM_CONCURRENT_IFC; i++)
1222 vif[i] = netdev_priv(wilc->vif[i]->ndev);
1223 }
1224
1225 if (wilc && wilc->firmware) { 1200 if (wilc && wilc->firmware) {
1226 release_firmware(wilc->firmware); 1201 release_firmware(wilc->firmware);
1227 wilc->firmware = NULL; 1202 wilc->firmware = NULL;
@@ -1230,7 +1205,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
1230 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { 1205 if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
1231 for (i = 0; i < NUM_CONCURRENT_IFC; i++) 1206 for (i = 0; i < NUM_CONCURRENT_IFC; i++)
1232 if (wilc->vif[i]->ndev) 1207 if (wilc->vif[i]->ndev)
1233 if (vif[i]->mac_opened) 1208 if (wilc->vif[i]->mac_opened)
1234 wilc_mac_close(wilc->vif[i]->ndev); 1209 wilc_mac_close(wilc->vif[i]->ndev);
1235 1210
1236 for (i = 0; i < NUM_CONCURRENT_IFC; i++) { 1211 for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
@@ -1278,9 +1253,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1278 1253
1279 vif->idx = wl->vif_num; 1254 vif->idx = wl->vif_num;
1280 vif->wilc = *wilc; 1255 vif->wilc = *wilc;
1256 vif->ndev = ndev;
1281 wl->vif[i] = vif; 1257 wl->vif[i] = vif;
1282 wl->vif[wl->vif_num]->ndev = ndev; 1258 wl->vif_num = i;
1283 wl->vif_num++;
1284 ndev->netdev_ops = &wilc_netdev_ops; 1259 ndev->netdev_ops = &wilc_netdev_ops;
1285 1260
1286 { 1261 {
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 802bb1d5e207..07260c497db4 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -62,16 +62,16 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
62 return ret; 62 return ret;
63 63
64 if (flag > DBG_LEVEL_ALL) { 64 if (flag > DBG_LEVEL_ALL) {
65 printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL)); 65 pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
66 return -EINVAL; 66 return -EINVAL;
67 } 67 }
68 68
69 atomic_set(&WILC_DEBUG_LEVEL, (int)flag); 69 atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
70 70
71 if (flag == 0) 71 if (flag == 0)
72 printk(KERN_INFO "Debug-level disabled\n"); 72 pr_info("Debug-level disabled\n");
73 else 73 else
74 printk(KERN_INFO "Debug-level enabled\n"); 74 pr_info("Debug-level enabled\n");
75 75
76 return count; 76 return count;
77} 77}
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 39b73fb27398..3ad7cec4662d 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -39,6 +39,7 @@ struct wilc_sdio {
39}; 39};
40 40
41static struct wilc_sdio g_sdio; 41static struct wilc_sdio g_sdio;
42static const struct wilc_hif_func wilc_hif_sdio;
42 43
43static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data); 44static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
44static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data); 45static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
@@ -1100,7 +1101,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
1100 * 1101 *
1101 ********************************************/ 1102 ********************************************/
1102 1103
1103const struct wilc_hif_func wilc_hif_sdio = { 1104static const struct wilc_hif_func wilc_hif_sdio = {
1104 .hif_init = sdio_init, 1105 .hif_init = sdio_init,
1105 .hif_deinit = sdio_deinit, 1106 .hif_deinit = sdio_deinit,
1106 .hif_read_reg = sdio_read_reg, 1107 .hif_read_reg = sdio_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index f08cf6d9e1af..55d53c3a95df 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -30,6 +30,7 @@ struct wilc_spi {
30}; 30};
31 31
32static struct wilc_spi g_spi; 32static struct wilc_spi g_spi;
33static const struct wilc_hif_func wilc_hif_spi;
33 34
34static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32); 35static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
35static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32); 36static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -858,7 +859,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
858 /* the SPI to it's initial value. */ 859 /* the SPI to it's initial value. */
859 if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) { 860 if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
860 /* Read failed. Try with CRC off. This might happen when module 861 /* Read failed. Try with CRC off. This might happen when module
861 * is removed but chip isn't reset*/ 862 * is removed but chip isn't reset
863 */
862 g_spi.crc_off = 1; 864 g_spi.crc_off = 1;
863 dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n"); 865 dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n");
864 if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) { 866 if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
@@ -1133,7 +1135,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
1133 * Global spi HIF function table 1135 * Global spi HIF function table
1134 * 1136 *
1135 ********************************************/ 1137 ********************************************/
1136const struct wilc_hif_func wilc_hif_spi = { 1138static const struct wilc_hif_func wilc_hif_spi = {
1137 .hif_init = wilc_spi_init, 1139 .hif_init = wilc_spi_init,
1138 .hif_deinit = _wilc_spi_deinit, 1140 .hif_deinit = _wilc_spi_deinit,
1139 .hif_read_reg = wilc_spi_read_reg, 1141 .hif_read_reg = wilc_spi_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 60d8b055bb2f..c1a24f7bc85f 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -90,17 +90,12 @@ static const struct wiphy_wowlan_support wowlan_support = {
90#define IS_MGMT_STATUS_SUCCES 0x040 90#define IS_MGMT_STATUS_SUCCES 0x040
91#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff) 91#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
92 92
93extern int wilc_mac_open(struct net_device *ndev);
94extern int wilc_mac_close(struct net_device *ndev);
95
96static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW]; 93static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
97static u32 last_scanned_cnt; 94static u32 last_scanned_cnt;
98struct timer_list wilc_during_ip_timer; 95struct timer_list wilc_during_ip_timer;
99static struct timer_list hAgingTimer; 96static struct timer_list hAgingTimer;
100static u8 op_ifcs; 97static u8 op_ifcs;
101 98
102u8 wilc_initialized = 1;
103
104#define CHAN2G(_channel, _freq, _flags) { \ 99#define CHAN2G(_channel, _freq, _flags) { \
105 .band = NL80211_BAND_2GHZ, \ 100 .band = NL80211_BAND_2GHZ, \
106 .center_freq = (_freq), \ 101 .center_freq = (_freq), \
@@ -1193,6 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
1193 u32 i = 0; 1188 u32 i = 0;
1194 u32 associatedsta = ~0; 1189 u32 associatedsta = ~0;
1195 u32 inactive_time = 0; 1190 u32 inactive_time = 0;
1191
1196 priv = wiphy_priv(wiphy); 1192 priv = wiphy_priv(wiphy);
1197 vif = netdev_priv(dev); 1193 vif = netdev_priv(dev);
1198 1194
@@ -1590,28 +1586,25 @@ static int remain_on_channel(struct wiphy *wiphy,
1590 priv->strRemainOnChanParams.u32ListenDuration = duration; 1586 priv->strRemainOnChanParams.u32ListenDuration = duration;
1591 priv->strRemainOnChanParams.u32ListenSessionID++; 1587 priv->strRemainOnChanParams.u32ListenSessionID++;
1592 1588
1593 s32Error = wilc_remain_on_channel(vif, 1589 return wilc_remain_on_channel(vif,
1594 priv->strRemainOnChanParams.u32ListenSessionID, 1590 priv->strRemainOnChanParams.u32ListenSessionID,
1595 duration, chan->hw_value, 1591 duration, chan->hw_value,
1596 WILC_WFI_RemainOnChannelExpired, 1592 WILC_WFI_RemainOnChannelExpired,
1597 WILC_WFI_RemainOnChannelReady, (void *)priv); 1593 WILC_WFI_RemainOnChannelReady, (void *)priv);
1598
1599 return s32Error;
1600} 1594}
1601 1595
1602static int cancel_remain_on_channel(struct wiphy *wiphy, 1596static int cancel_remain_on_channel(struct wiphy *wiphy,
1603 struct wireless_dev *wdev, 1597 struct wireless_dev *wdev,
1604 u64 cookie) 1598 u64 cookie)
1605{ 1599{
1606 s32 s32Error = 0;
1607 struct wilc_priv *priv; 1600 struct wilc_priv *priv;
1608 struct wilc_vif *vif; 1601 struct wilc_vif *vif;
1609 1602
1610 priv = wiphy_priv(wiphy); 1603 priv = wiphy_priv(wiphy);
1611 vif = netdev_priv(priv->dev); 1604 vif = netdev_priv(priv->dev);
1612 1605
1613 s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID); 1606 return wilc_listen_state_expired(vif,
1614 return s32Error; 1607 priv->strRemainOnChanParams.u32ListenSessionID);
1615} 1608}
1616 1609
1617static int mgmt_tx(struct wiphy *wiphy, 1610static int mgmt_tx(struct wiphy *wiphy,
@@ -1935,12 +1928,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
1935 wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE); 1928 wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
1936 wilc_set_power_mgmt(vif, 0, 0); 1929 wilc_set_power_mgmt(vif, 0, 0);
1937 1930
1938 s32Error = wilc_add_beacon(vif, settings->beacon_interval, 1931 return wilc_add_beacon(vif, settings->beacon_interval,
1939 settings->dtim_period, beacon->head_len, 1932 settings->dtim_period, beacon->head_len,
1940 (u8 *)beacon->head, beacon->tail_len, 1933 (u8 *)beacon->head, beacon->tail_len,
1941 (u8 *)beacon->tail); 1934 (u8 *)beacon->tail);
1942
1943 return s32Error;
1944} 1935}
1945 1936
1946static int change_beacon(struct wiphy *wiphy, struct net_device *dev, 1937static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1948,16 +1939,13 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
1948{ 1939{
1949 struct wilc_priv *priv; 1940 struct wilc_priv *priv;
1950 struct wilc_vif *vif; 1941 struct wilc_vif *vif;
1951 s32 s32Error = 0;
1952 1942
1953 priv = wiphy_priv(wiphy); 1943 priv = wiphy_priv(wiphy);
1954 vif = netdev_priv(priv->dev); 1944 vif = netdev_priv(priv->dev);
1955 1945
1956 s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len, 1946 return wilc_add_beacon(vif, 0, 0, beacon->head_len,
1957 (u8 *)beacon->head, beacon->tail_len, 1947 (u8 *)beacon->head, beacon->tail_len,
1958 (u8 *)beacon->tail); 1948 (u8 *)beacon->tail);
1959
1960 return s32Error;
1961} 1949}
1962 1950
1963static int stop_ap(struct wiphy *wiphy, struct net_device *dev) 1951static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index ec6b1674cf38..d431673bc46c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -225,7 +225,6 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
225 225
226void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset); 226void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
227void wilc_mac_indicate(struct wilc *wilc, int flag); 227void wilc_mac_indicate(struct wilc *wilc, int flag);
228int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
229void wilc_netdev_cleanup(struct wilc *wilc); 228void wilc_netdev_cleanup(struct wilc *wilc);
230int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio, 229int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
231 const struct wilc_hif_func *ops); 230 const struct wilc_hif_func *ops);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index de6c4ddbf45a..11365efcc5d0 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -248,9 +248,6 @@ struct wilc_hif_func {
248 void (*disable_interrupt)(struct wilc *nic); 248 void (*disable_interrupt)(struct wilc *nic);
249}; 249};
250 250
251extern const struct wilc_hif_func wilc_hif_spi;
252extern const struct wilc_hif_func wilc_hif_sdio;
253
254/******************************************** 251/********************************************
255 * 252 *
256 * Configuration Structure 253 * Configuration Structure
@@ -297,9 +294,6 @@ void wilc_enable_tcp_ack_filter(bool value);
297int wilc_wlan_get_num_conn_ifcs(struct wilc *); 294int wilc_wlan_get_num_conn_ifcs(struct wilc *);
298int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev); 295int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
299 296
300int wilc_mac_open(struct net_device *ndev);
301int wilc_mac_close(struct net_device *ndev);
302
303void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size); 297void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
304void host_wakeup_notify(struct wilc *wilc); 298void host_wakeup_notify(struct wilc *wilc);
305void host_sleep_notify(struct wilc *wilc); 299void host_sleep_notify(struct wilc *wilc);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d564627..aa0e5a3d4a89 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -323,7 +323,7 @@ static int prism2_scan(struct wiphy *wiphy,
323 323
324 priv->scan_request = request; 324 priv->scan_request = request;
325 325
326 memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan)); 326 memset(&msg1, 0x00, sizeof(msg1));
327 msg1.msgcode = DIDmsg_dot11req_scan; 327 msg1.msgcode = DIDmsg_dot11req_scan;
328 msg1.bsstype.data = P80211ENUM_bsstype_any; 328 msg1.bsstype.data = P80211ENUM_bsstype_any;
329 329
@@ -375,13 +375,13 @@ static int prism2_scan(struct wiphy *wiphy,
375 ie_buf[0] = WLAN_EID_SSID; 375 ie_buf[0] = WLAN_EID_SSID;
376 ie_buf[1] = msg2.ssid.data.len; 376 ie_buf[1] = msg2.ssid.data.len;
377 ie_len = ie_buf[1] + 2; 377 ie_len = ie_buf[1] + 2;
378 memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len); 378 memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len);
379 freq = ieee80211_channel_to_frequency(msg2.dschannel.data, 379 freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
380 NL80211_BAND_2GHZ); 380 NL80211_BAND_2GHZ);
381 bss = cfg80211_inform_bss(wiphy, 381 bss = cfg80211_inform_bss(wiphy,
382 ieee80211_get_channel(wiphy, freq), 382 ieee80211_get_channel(wiphy, freq),
383 CFG80211_BSS_FTYPE_UNKNOWN, 383 CFG80211_BSS_FTYPE_UNKNOWN,
384 (const u8 *)&(msg2.bssid.data.data), 384 (const u8 *)&msg2.bssid.data.data,
385 msg2.timestamp.data, msg2.capinfo.data, 385 msg2.timestamp.data, msg2.capinfo.data,
386 msg2.beaconperiod.data, 386 msg2.beaconperiod.data,
387 ie_buf, 387 ie_buf,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 43c299c3b631..60caf9c37727 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -137,21 +137,11 @@
137#define HFA384x_DLSTATE_FLASHENABLED 2 137#define HFA384x_DLSTATE_FLASHENABLED 2
138 138
139/*--- Register Field Masks --------------------------*/ 139/*--- Register Field Masks --------------------------*/
140#define HFA384x_CMD_AINFO ((u16)(BIT(14) | BIT(13) \ 140#define HFA384x_CMD_AINFO ((u16)GENMASK(14, 8))
141 | BIT(12) | BIT(11) \ 141#define HFA384x_CMD_MACPORT ((u16)GENMASK(10, 8))
142 | BIT(10) | BIT(9) \ 142#define HFA384x_CMD_PROGMODE ((u16)GENMASK(9, 8))
143 | BIT(8))) 143#define HFA384x_CMD_CMDCODE ((u16)GENMASK(5, 0))
144#define HFA384x_CMD_MACPORT ((u16)(BIT(10) | BIT(9) | \ 144#define HFA384x_STATUS_RESULT ((u16)GENMASK(14, 8))
145 BIT(8)))
146#define HFA384x_CMD_PROGMODE ((u16)(BIT(9) | BIT(8)))
147#define HFA384x_CMD_CMDCODE ((u16)(BIT(5) | BIT(4) | \
148 BIT(3) | BIT(2) | \
149 BIT(1) | BIT(0)))
150
151#define HFA384x_STATUS_RESULT ((u16)(BIT(14) | BIT(13) \
152 | BIT(12) | BIT(11) \
153 | BIT(10) | BIT(9) \
154 | BIT(8)))
155 145
156/*--- Command Code Constants --------------------------*/ 146/*--- Command Code Constants --------------------------*/
157/*--- Controller Commands --------------------------*/ 147/*--- Controller Commands --------------------------*/
@@ -266,7 +256,7 @@
266#define HFA384x_RID_DBMCOMMSQUALITY_LEN \ 256#define HFA384x_RID_DBMCOMMSQUALITY_LEN \
267 ((u16)sizeof(struct hfa384x_dbmcommsquality)) 257 ((u16)sizeof(struct hfa384x_dbmcommsquality))
268#define HFA384x_RID_JOINREQUEST_LEN \ 258#define HFA384x_RID_JOINREQUEST_LEN \
269 ((u16)sizeof(struct hfa384x_JoinRequest_data)) 259 ((u16)sizeof(struct hfa384x_join_request_data))
270 260
271/*-------------------------------------------------------------------- 261/*--------------------------------------------------------------------
272 * Information RIDs: Modem Information 262 * Information RIDs: Modem Information
@@ -286,7 +276,7 @@
286#define HFA384x_RID_CNFWEPFLAGS ((u16)0xFC28) 276#define HFA384x_RID_CNFWEPFLAGS ((u16)0xFC28)
287#define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A) 277#define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A)
288#define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D) 278#define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D)
289#define HFA384x_RID_CNFAPBCNint ((u16)0xFC33) 279#define HFA384x_RID_CNFAPBCNINT ((u16)0xFC33)
290#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46) 280#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46)
291#define HFA384x_RID_CNFWPADATA ((u16)0xFC48) 281#define HFA384x_RID_CNFWPADATA ((u16)0xFC48)
292#define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3) 282#define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3)
@@ -408,27 +398,27 @@ struct hfa384x_caplevel {
408#define HFA384x_CREATEIBSS_JOINCREATEIBSS 0 398#define HFA384x_CREATEIBSS_JOINCREATEIBSS 0
409 399
410/*-- Configuration Record: HostScanRequest (data portion only) --*/ 400/*-- Configuration Record: HostScanRequest (data portion only) --*/
411struct hfa384x_HostScanRequest_data { 401struct hfa384x_host_scan_request_data {
412 u16 channelList; 402 u16 channel_list;
413 u16 txRate; 403 u16 tx_rate;
414 struct hfa384x_bytestr32 ssid; 404 struct hfa384x_bytestr32 ssid;
415} __packed; 405} __packed;
416 406
417/*-- Configuration Record: JoinRequest (data portion only) --*/ 407/*-- Configuration Record: JoinRequest (data portion only) --*/
418struct hfa384x_JoinRequest_data { 408struct hfa384x_join_request_data {
419 u8 bssid[WLAN_BSSID_LEN]; 409 u8 bssid[WLAN_BSSID_LEN];
420 u16 channel; 410 u16 channel;
421} __packed; 411} __packed;
422 412
423/*-- Configuration Record: authenticateStation (data portion only) --*/ 413/*-- Configuration Record: authenticateStation (data portion only) --*/
424struct hfa384x_authenticateStation_data { 414struct hfa384x_authenticate_station_data {
425 u8 address[ETH_ALEN]; 415 u8 address[ETH_ALEN];
426 u16 status; 416 u16 status;
427 u16 algorithm; 417 u16 algorithm;
428} __packed; 418} __packed;
429 419
430/*-- Configuration Record: WPAData (data portion only) --*/ 420/*-- Configuration Record: WPAData (data portion only) --*/
431struct hfa384x_WPAData { 421struct hfa384x_wpa_data {
432 u16 datalen; 422 u16 datalen;
433 u8 data[0]; /* max 80 */ 423 u8 data[0]; /* max 80 */
434} __packed; 424} __packed;
@@ -455,16 +445,16 @@ struct hfa384x_downloadbuffer {
455 445
456/*-- Information Record: commsquality --*/ 446/*-- Information Record: commsquality --*/
457struct hfa384x_commsquality { 447struct hfa384x_commsquality {
458 u16 CQ_currBSS; 448 u16 cq_curr_bss;
459 u16 ASL_currBSS; 449 u16 asl_curr_bss;
460 u16 ANL_currFC; 450 u16 anl_curr_fc;
461} __packed; 451} __packed;
462 452
463/*-- Information Record: dmbcommsquality --*/ 453/*-- Information Record: dmbcommsquality --*/
464struct hfa384x_dbmcommsquality { 454struct hfa384x_dbmcommsquality {
465 u16 CQdbm_currBSS; 455 u16 cq_dbm_curr_bss;
466 u16 ASLdbm_currBSS; 456 u16 asl_dbm_curr_bss;
467 u16 ANLdbm_currFC; 457 u16 anl_dbm_curr_fc;
468} __packed; 458} __packed;
469 459
470/*-------------------------------------------------------------------- 460/*--------------------------------------------------------------------
@@ -511,9 +501,8 @@ struct hfa384x_tx_frame {
511#define HFA384x_TXSTATUS_AGEDERR ((u16)BIT(1)) 501#define HFA384x_TXSTATUS_AGEDERR ((u16)BIT(1))
512#define HFA384x_TXSTATUS_RETRYERR ((u16)BIT(0)) 502#define HFA384x_TXSTATUS_RETRYERR ((u16)BIT(0))
513/*-- Transmit Control Field --*/ 503/*-- Transmit Control Field --*/
514#define HFA384x_TX_MACPORT ((u16)(BIT(10) | \ 504#define HFA384x_TX_MACPORT ((u16)GENMASK(10, 8))
515 BIT(9) | BIT(8))) 505#define HFA384x_TX_STRUCTYPE ((u16)GENMASK(4, 3))
516#define HFA384x_TX_STRUCTYPE ((u16)(BIT(4) | BIT(3)))
517#define HFA384x_TX_TXEX ((u16)BIT(2)) 506#define HFA384x_TX_TXEX ((u16)BIT(2))
518#define HFA384x_TX_TXOK ((u16)BIT(1)) 507#define HFA384x_TX_TXOK ((u16)BIT(1))
519/*-------------------------------------------------------------------- 508/*--------------------------------------------------------------------
@@ -571,9 +560,7 @@ struct hfa384x_rx_frame {
571 */ 560 */
572 561
573/*-- Status Fields --*/ 562/*-- Status Fields --*/
574#define HFA384x_RXSTATUS_MACPORT ((u16)(BIT(10) | \ 563#define HFA384x_RXSTATUS_MACPORT ((u16)GENMASK(10, 8))
575 BIT(9) | \
576 BIT(8)))
577#define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0)) 564#define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0))
578/*-------------------------------------------------------------------- 565/*--------------------------------------------------------------------
579 * Communication Frames: Test/Get/Set Field Values for Receive Frames 566 * Communication Frames: Test/Get/Set Field Values for Receive Frames
@@ -610,7 +597,7 @@ struct hfa384x_rx_frame {
610 */ 597 */
611 598
612/*-- Inquiry Frame, Diagnose: Communication Tallies --*/ 599/*-- Inquiry Frame, Diagnose: Communication Tallies --*/
613struct hfa384x_CommTallies16 { 600struct hfa384x_comm_tallies_16 {
614 u16 txunicastframes; 601 u16 txunicastframes;
615 u16 txmulticastframes; 602 u16 txmulticastframes;
616 u16 txfragments; 603 u16 txfragments;
@@ -634,7 +621,7 @@ struct hfa384x_CommTallies16 {
634 u16 rxmsginbadmsgfrag; 621 u16 rxmsginbadmsgfrag;
635} __packed; 622} __packed;
636 623
637struct hfa384x_CommTallies32 { 624struct hfa384x_comm_tallies_32 {
638 u32 txunicastframes; 625 u32 txunicastframes;
639 u32 txmulticastframes; 626 u32 txmulticastframes;
640 u32 txfragments; 627 u32 txfragments;
@@ -659,7 +646,7 @@ struct hfa384x_CommTallies32 {
659} __packed; 646} __packed;
660 647
661/*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/ 648/*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/
662struct hfa384x_ScanResultSub { 649struct hfa384x_scan_result_sub {
663 u16 chid; 650 u16 chid;
664 u16 anl; 651 u16 anl;
665 u16 sl; 652 u16 sl;
@@ -671,14 +658,14 @@ struct hfa384x_ScanResultSub {
671 u16 proberesp_rate; 658 u16 proberesp_rate;
672} __packed; 659} __packed;
673 660
674struct hfa384x_ScanResult { 661struct hfa384x_scan_result {
675 u16 rsvd; 662 u16 rsvd;
676 u16 scanreason; 663 u16 scanreason;
677 struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX]; 664 struct hfa384x_scan_result_sub result[HFA384x_SCANRESULT_MAX];
678} __packed; 665} __packed;
679 666
680/*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/ 667/*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/
681struct hfa384x_ChInfoResultSub { 668struct hfa384x_ch_info_result_sub {
682 u16 chid; 669 u16 chid;
683 u16 anl; 670 u16 anl;
684 u16 pnl; 671 u16 pnl;
@@ -688,13 +675,13 @@ struct hfa384x_ChInfoResultSub {
688#define HFA384x_CHINFORESULT_BSSACTIVE BIT(0) 675#define HFA384x_CHINFORESULT_BSSACTIVE BIT(0)
689#define HFA384x_CHINFORESULT_PCFACTIVE BIT(1) 676#define HFA384x_CHINFORESULT_PCFACTIVE BIT(1)
690 677
691struct hfa384x_ChInfoResult { 678struct hfa384x_ch_info_result {
692 u16 scanchannels; 679 u16 scanchannels;
693 struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX]; 680 struct hfa384x_ch_info_result_sub result[HFA384x_CHINFORESULT_MAX];
694} __packed; 681} __packed;
695 682
696/*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/ 683/*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
697struct hfa384x_HScanResultSub { 684struct hfa384x_hscan_result_sub {
698 u16 chid; 685 u16 chid;
699 u16 anl; 686 u16 anl;
700 u16 sl; 687 u16 sl;
@@ -707,10 +694,10 @@ struct hfa384x_HScanResultSub {
707 u16 atim; 694 u16 atim;
708} __packed; 695} __packed;
709 696
710struct hfa384x_HScanResult { 697struct hfa384x_hscan_result {
711 u16 nresult; 698 u16 nresult;
712 u16 rsvd; 699 u16 rsvd;
713 struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX]; 700 struct hfa384x_hscan_result_sub result[HFA384x_HSCANRESULT_MAX];
714} __packed; 701} __packed;
715 702
716/*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/ 703/*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/
@@ -723,7 +710,7 @@ struct hfa384x_HScanResult {
723#define HFA384x_LINK_AP_INRANGE ((u16)5) 710#define HFA384x_LINK_AP_INRANGE ((u16)5)
724#define HFA384x_LINK_ASSOCFAIL ((u16)6) 711#define HFA384x_LINK_ASSOCFAIL ((u16)6)
725 712
726struct hfa384x_LinkStatus { 713struct hfa384x_link_status {
727 u16 linkstatus; 714 u16 linkstatus;
728} __packed; 715} __packed;
729 716
@@ -733,7 +720,7 @@ struct hfa384x_LinkStatus {
733#define HFA384x_ASSOCSTATUS_REASSOC ((u16)2) 720#define HFA384x_ASSOCSTATUS_REASSOC ((u16)2)
734#define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5) 721#define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5)
735 722
736struct hfa384x_AssocStatus { 723struct hfa384x_assoc_status {
737 u16 assocstatus; 724 u16 assocstatus;
738 u8 sta_addr[ETH_ALEN]; 725 u8 sta_addr[ETH_ALEN];
739 /* old_ap_addr is only valid if assocstatus == 2 */ 726 /* old_ap_addr is only valid if assocstatus == 2 */
@@ -744,37 +731,37 @@ struct hfa384x_AssocStatus {
744 731
745/*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/ 732/*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/
746 733
747struct hfa384x_AuthRequest { 734struct hfa384x_auth_request {
748 u8 sta_addr[ETH_ALEN]; 735 u8 sta_addr[ETH_ALEN];
749 u16 algorithm; 736 u16 algorithm;
750} __packed; 737} __packed;
751 738
752/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/ 739/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
753 740
754struct hfa384x_PSUserCount { 741struct hfa384x_ps_user_count {
755 u16 usercnt; 742 u16 usercnt;
756} __packed; 743} __packed;
757 744
758struct hfa384x_KeyIDChanged { 745struct hfa384x_key_id_changed {
759 u8 sta_addr[ETH_ALEN]; 746 u8 sta_addr[ETH_ALEN];
760 u16 keyid; 747 u16 keyid;
761} __packed; 748} __packed;
762 749
763/*-- Collection of all Inf frames ---------------*/ 750/*-- Collection of all Inf frames ---------------*/
764union hfa384x_infodata { 751union hfa384x_infodata {
765 struct hfa384x_CommTallies16 commtallies16; 752 struct hfa384x_comm_tallies_16 commtallies16;
766 struct hfa384x_CommTallies32 commtallies32; 753 struct hfa384x_comm_tallies_32 commtallies32;
767 struct hfa384x_ScanResult scanresult; 754 struct hfa384x_scan_result scanresult;
768 struct hfa384x_ChInfoResult chinforesult; 755 struct hfa384x_ch_info_result chinforesult;
769 struct hfa384x_HScanResult hscanresult; 756 struct hfa384x_hscan_result hscanresult;
770 struct hfa384x_LinkStatus linkstatus; 757 struct hfa384x_link_status linkstatus;
771 struct hfa384x_AssocStatus assocstatus; 758 struct hfa384x_assoc_status assocstatus;
772 struct hfa384x_AuthRequest authreq; 759 struct hfa384x_auth_request authreq;
773 struct hfa384x_PSUserCount psusercnt; 760 struct hfa384x_ps_user_count psusercnt;
774 struct hfa384x_KeyIDChanged keyidchanged; 761 struct hfa384x_key_id_changed keyidchanged;
775} __packed; 762} __packed;
776 763
777struct hfa384x_InfFrame { 764struct hfa384x_inf_frame {
778 u16 framelen; 765 u16 framelen;
779 u16 infotype; 766 u16 infotype;
780 union hfa384x_infodata info; 767 union hfa384x_infodata info;
@@ -862,7 +849,7 @@ struct hfa384x_usb_rxfrm {
862 849
863struct hfa384x_usb_infofrm { 850struct hfa384x_usb_infofrm {
864 u16 type; 851 u16 type;
865 struct hfa384x_InfFrame info; 852 struct hfa384x_inf_frame info;
866} __packed; 853} __packed;
867 854
868struct hfa384x_usb_statusresp { 855struct hfa384x_usb_statusresp {
@@ -1169,7 +1156,6 @@ enum ctlx_state {
1169 CTLX_REQ_COMPLETE, /* OUT URB complete */ 1156 CTLX_REQ_COMPLETE, /* OUT URB complete */
1170 CTLX_RESP_COMPLETE /* IN URB received */ 1157 CTLX_RESP_COMPLETE /* IN URB received */
1171}; 1158};
1172typedef enum ctlx_state CTLX_STATE;
1173 1159
1174struct hfa384x_usbctlx; 1160struct hfa384x_usbctlx;
1175struct hfa384x; 1161struct hfa384x;
@@ -1186,7 +1172,7 @@ struct hfa384x_usbctlx {
1186 union hfa384x_usbout outbuf; /* pkt buf for OUT */ 1172 union hfa384x_usbout outbuf; /* pkt buf for OUT */
1187 union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */ 1173 union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */
1188 1174
1189 CTLX_STATE state; /* Tracks running state */ 1175 enum ctlx_state state; /* Tracks running state */
1190 1176
1191 struct completion done; 1177 struct completion done;
1192 volatile int reapable; /* Food for the reaper task */ 1178 volatile int reapable; /* Food for the reaper task */
@@ -1294,7 +1280,7 @@ struct hfa384x {
1294 int scanflag; /* to signal scan complete */ 1280 int scanflag; /* to signal scan complete */
1295 int join_ap; /* are we joined to a specific ap */ 1281 int join_ap; /* are we joined to a specific ap */
1296 int join_retries; /* number of join retries till we fail */ 1282 int join_retries; /* number of join retries till we fail */
1297 struct hfa384x_JoinRequest_data joinreq; /* join request saved data */ 1283 struct hfa384x_join_request_data joinreq;/* join request saved data */
1298 1284
1299 struct wlandevice *wlandev; 1285 struct wlandevice *wlandev;
1300 /* Timer to allow for the deferred processing of linkstatus messages */ 1286 /* Timer to allow for the deferred processing of linkstatus messages */
@@ -1360,17 +1346,17 @@ struct hfa384x {
1360 struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */ 1346 struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */
1361 1347
1362 u32 psusercount; /* Power save user count. */ 1348 u32 psusercount; /* Power save user count. */
1363 struct hfa384x_CommTallies32 tallies; /* Communication tallies. */ 1349 struct hfa384x_comm_tallies_32 tallies; /* Communication tallies. */
1364 u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */ 1350 u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */
1365 1351
1366 /* Channel Info request results (AP only) */ 1352 /* Channel Info request results (AP only) */
1367 struct { 1353 struct {
1368 atomic_t done; 1354 atomic_t done;
1369 u8 count; 1355 u8 count;
1370 struct hfa384x_ChInfoResult results; 1356 struct hfa384x_ch_info_result results;
1371 } channel_info; 1357 } channel_info;
1372 1358
1373 struct hfa384x_InfFrame *scanresults; 1359 struct hfa384x_inf_frame *scanresults;
1374 1360
1375 struct prism2sta_authlist authlist; /* Authenticated station list. */ 1361 struct prism2sta_authlist authlist; /* Authenticated station list. */
1376 unsigned int accessmode; /* Access mode. */ 1362 unsigned int accessmode; /* Access mode. */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6a107f8a06e2..4fe037aeef12 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,114 +1,114 @@
1/* src/prism2/driver/hfa384x_usb.c 1/* src/prism2/driver/hfa384x_usb.c
2* 2 *
3* Functions that talk to the USB variantof the Intersil hfa384x MAC 3 * Functions that talk to the USB variantof the Intersil hfa384x MAC
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file implements functions that correspond to the prism2/hfa384x 47 * This file implements functions that correspond to the prism2/hfa384x
48* 802.11 MAC hardware and firmware host interface. 48 * 802.11 MAC hardware and firmware host interface.
49* 49 *
50* The functions can be considered to represent several levels of 50 * The functions can be considered to represent several levels of
51* abstraction. The lowest level functions are simply C-callable wrappers 51 * abstraction. The lowest level functions are simply C-callable wrappers
52* around the register accesses. The next higher level represents C-callable 52 * around the register accesses. The next higher level represents C-callable
53* prism2 API functions that match the Intersil documentation as closely 53 * prism2 API functions that match the Intersil documentation as closely
54* as is reasonable. The next higher layer implements common sequences 54 * as is reasonable. The next higher layer implements common sequences
55* of invocations of the API layer (e.g. write to bap, followed by cmd). 55 * of invocations of the API layer (e.g. write to bap, followed by cmd).
56* 56 *
57* Common sequences: 57 * Common sequences:
58* hfa384x_drvr_xxx Highest level abstractions provided by the 58 * hfa384x_drvr_xxx Highest level abstractions provided by the
59* hfa384x code. They are driver defined wrappers 59 * hfa384x code. They are driver defined wrappers
60* for common sequences. These functions generally 60 * for common sequences. These functions generally
61* use the services of the lower levels. 61 * use the services of the lower levels.
62* 62 *
63* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These 63 * hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
64* functions are wrappers for the RID get/set 64 * functions are wrappers for the RID get/set
65* sequence. They call copy_[to|from]_bap() and 65 * sequence. They call copy_[to|from]_bap() and
66* cmd_access(). These functions operate on the 66 * cmd_access(). These functions operate on the
67* RIDs and buffers without validation. The caller 67 * RIDs and buffers without validation. The caller
68* is responsible for that. 68 * is responsible for that.
69* 69 *
70* API wrapper functions: 70 * API wrapper functions:
71* hfa384x_cmd_xxx functions that provide access to the f/w commands. 71 * hfa384x_cmd_xxx functions that provide access to the f/w commands.
72* The function arguments correspond to each command 72 * The function arguments correspond to each command
73* argument, even command arguments that get packed 73 * argument, even command arguments that get packed
74* into single registers. These functions _just_ 74 * into single registers. These functions _just_
75* issue the command by setting the cmd/parm regs 75 * issue the command by setting the cmd/parm regs
76* & reading the status/resp regs. Additional 76 * & reading the status/resp regs. Additional
77* activities required to fully use a command 77 * activities required to fully use a command
78* (read/write from/to bap, get/set int status etc.) 78 * (read/write from/to bap, get/set int status etc.)
79* are implemented separately. Think of these as 79 * are implemented separately. Think of these as
80* C-callable prism2 commands. 80 * C-callable prism2 commands.
81* 81 *
82* Lowest Layer Functions: 82 * Lowest Layer Functions:
83* hfa384x_docmd_xxx These functions implement the sequence required 83 * hfa384x_docmd_xxx These functions implement the sequence required
84* to issue any prism2 command. Primarily used by the 84 * to issue any prism2 command. Primarily used by the
85* hfa384x_cmd_xxx functions. 85 * hfa384x_cmd_xxx functions.
86* 86 *
87* hfa384x_bap_xxx BAP read/write access functions. 87 * hfa384x_bap_xxx BAP read/write access functions.
88* Note: we usually use BAP0 for non-interrupt context 88 * Note: we usually use BAP0 for non-interrupt context
89* and BAP1 for interrupt context. 89 * and BAP1 for interrupt context.
90* 90 *
91* hfa384x_dl_xxx download related functions. 91 * hfa384x_dl_xxx download related functions.
92* 92 *
93* Driver State Issues: 93 * Driver State Issues:
94* Note that there are two pairs of functions that manage the 94 * Note that there are two pairs of functions that manage the
95* 'initialized' and 'running' states of the hw/MAC combo. The four 95 * 'initialized' and 'running' states of the hw/MAC combo. The four
96* functions are create(), destroy(), start(), and stop(). create() 96 * functions are create(), destroy(), start(), and stop(). create()
97* sets up the data structures required to support the hfa384x_* 97 * sets up the data structures required to support the hfa384x_*
98* functions and destroy() cleans them up. The start() function gets 98 * functions and destroy() cleans them up. The start() function gets
99* the actual hardware running and enables the interrupts. The stop() 99 * the actual hardware running and enables the interrupts. The stop()
100* function shuts the hardware down. The sequence should be: 100 * function shuts the hardware down. The sequence should be:
101* create() 101 * create()
102* start() 102 * start()
103* . 103 * .
104* . Do interesting things w/ the hardware 104 * . Do interesting things w/ the hardware
105* . 105 * .
106* stop() 106 * stop()
107* destroy() 107 * destroy()
108* 108 *
109* Note that destroy() can be called without calling stop() first. 109 * Note that destroy() can be called without calling stop() first.
110* -------------------------------------------------------------------- 110 * --------------------------------------------------------------------
111*/ 111 */
112 112
113#include <linux/module.h> 113#include <linux/module.h>
114#include <linux/kernel.h> 114#include <linux/kernel.h>
@@ -153,8 +153,8 @@ enum cmd_mode {
153static void dbprint_urb(struct urb *urb); 153static void dbprint_urb(struct urb *urb);
154#endif 154#endif
155 155
156static void 156static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
157hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm); 157 struct hfa384x_usb_rxfrm *rxfrm);
158 158
159static void hfa384x_usb_defer(struct work_struct *data); 159static void hfa384x_usb_defer(struct work_struct *data);
160 160
@@ -173,7 +173,8 @@ hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
173 173
174static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb); 174static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
175 175
176static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin); 176static void hfa384x_usbin_info(struct wlandevice *wlandev,
177 union hfa384x_usbin *usbin);
177 178
178static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin, 179static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
179 int urb_status); 180 int urb_status);
@@ -193,9 +194,11 @@ static void hfa384x_usbctlx_completion_task(unsigned long data);
193 194
194static void hfa384x_usbctlx_reaper_task(unsigned long data); 195static void hfa384x_usbctlx_reaper_task(unsigned long data);
195 196
196static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); 197static int hfa384x_usbctlx_submit(struct hfa384x *hw,
198 struct hfa384x_usbctlx *ctlx);
197 199
198static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); 200static void unlocked_usbctlx_complete(struct hfa384x *hw,
201 struct hfa384x_usbctlx *ctlx);
199 202
200struct usbctlx_completor { 203struct usbctlx_completor {
201 int (*complete)(struct usbctlx_completor *); 204 int (*complete)(struct usbctlx_completor *);
@@ -209,7 +212,8 @@ hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
209static int 212static int
210unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); 213unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
211 214
212static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx); 215static void hfa384x_cb_status(struct hfa384x *hw,
216 const struct hfa384x_usbctlx *ctlx);
213 217
214static int 218static int
215usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp, 219usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
@@ -263,7 +267,7 @@ hfa384x_dowmem(struct hfa384x *hw,
263 267
264static int hfa384x_isgood_pdrcode(u16 pdrcode); 268static int hfa384x_isgood_pdrcode(u16 pdrcode);
265 269
266static inline const char *ctlxstr(CTLX_STATE s) 270static inline const char *ctlxstr(enum ctlx_state s)
267{ 271{
268 static const char * const ctlx_str[] = { 272 static const char * const ctlx_str[] = {
269 "Initial state", 273 "Initial state",
@@ -307,21 +311,22 @@ void dbprint_urb(struct urb *urb)
307#endif 311#endif
308 312
309/*---------------------------------------------------------------- 313/*----------------------------------------------------------------
310* submit_rx_urb 314 * submit_rx_urb
311* 315 *
312* Listen for input data on the BULK-IN pipe. If the pipe has 316 * Listen for input data on the BULK-IN pipe. If the pipe has
313* stalled then schedule it to be reset. 317 * stalled then schedule it to be reset.
314* 318 *
315* Arguments: 319 * Arguments:
316* hw device struct 320 * hw device struct
317* memflags memory allocation flags 321 * memflags memory allocation flags
318* 322 *
319* Returns: 323 * Returns:
320* error code from submission 324 * error code from submission
321* 325 *
322* Call context: 326 * Call context:
323* Any 327 * Any
324----------------------------------------------------------------*/ 328 *----------------------------------------------------------------
329 */
325static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags) 330static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
326{ 331{
327 struct sk_buff *skb; 332 struct sk_buff *skb;
@@ -367,23 +372,24 @@ done:
367} 372}
368 373
369/*---------------------------------------------------------------- 374/*----------------------------------------------------------------
370* submit_tx_urb 375 * submit_tx_urb
371* 376 *
372* Prepares and submits the URB of transmitted data. If the 377 * Prepares and submits the URB of transmitted data. If the
373* submission fails then it will schedule the output pipe to 378 * submission fails then it will schedule the output pipe to
374* be reset. 379 * be reset.
375* 380 *
376* Arguments: 381 * Arguments:
377* hw device struct 382 * hw device struct
378* tx_urb URB of data for transmission 383 * tx_urb URB of data for transmission
379* memflags memory allocation flags 384 * memflags memory allocation flags
380* 385 *
381* Returns: 386 * Returns:
382* error code from submission 387 * error code from submission
383* 388 *
384* Call context: 389 * Call context:
385* Any 390 * Any
386----------------------------------------------------------------*/ 391 *----------------------------------------------------------------
392 */
387static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags) 393static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
388{ 394{
389 struct net_device *netdev = hw->wlandev->netdev; 395 struct net_device *netdev = hw->wlandev->netdev;
@@ -412,21 +418,22 @@ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
412} 418}
413 419
414/*---------------------------------------------------------------- 420/*----------------------------------------------------------------
415* hfa394x_usb_defer 421 * hfa394x_usb_defer
416* 422 *
417* There are some things that the USB stack cannot do while 423 * There are some things that the USB stack cannot do while
418* in interrupt context, so we arrange this function to run 424 * in interrupt context, so we arrange this function to run
419* in process context. 425 * in process context.
420* 426 *
421* Arguments: 427 * Arguments:
422* hw device structure 428 * hw device structure
423* 429 *
424* Returns: 430 * Returns:
425* nothing 431 * nothing
426* 432 *
427* Call context: 433 * Call context:
428* process (by design) 434 * process (by design)
429----------------------------------------------------------------*/ 435 *----------------------------------------------------------------
436 */
430static void hfa384x_usb_defer(struct work_struct *data) 437static void hfa384x_usb_defer(struct work_struct *data)
431{ 438{
432 struct hfa384x *hw = container_of(data, struct hfa384x, usb_work); 439 struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
@@ -501,29 +508,30 @@ static void hfa384x_usb_defer(struct work_struct *data)
501} 508}
502 509
503/*---------------------------------------------------------------- 510/*----------------------------------------------------------------
504* hfa384x_create 511 * hfa384x_create
505* 512 *
506* Sets up the struct hfa384x data structure for use. Note this 513 * Sets up the struct hfa384x data structure for use. Note this
507* does _not_ initialize the actual hardware, just the data structures 514 * does _not_ initialize the actual hardware, just the data structures
508* we use to keep track of its state. 515 * we use to keep track of its state.
509* 516 *
510* Arguments: 517 * Arguments:
511* hw device structure 518 * hw device structure
512* irq device irq number 519 * irq device irq number
513* iobase i/o base address for register access 520 * iobase i/o base address for register access
514* membase memory base address for register access 521 * membase memory base address for register access
515* 522 *
516* Returns: 523 * Returns:
517* nothing 524 * nothing
518* 525 *
519* Side effects: 526 * Side effects:
520* 527 *
521* Call context: 528 * Call context:
522* process 529 * process
523----------------------------------------------------------------*/ 530 *----------------------------------------------------------------
531 */
524void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) 532void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
525{ 533{
526 memset(hw, 0, sizeof(struct hfa384x)); 534 memset(hw, 0, sizeof(*hw));
527 hw->usb = usb; 535 hw->usb = usb;
528 536
529 /* set up the endpoints */ 537 /* set up the endpoints */
@@ -571,27 +579,28 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
571} 579}
572 580
573/*---------------------------------------------------------------- 581/*----------------------------------------------------------------
574* hfa384x_destroy 582 * hfa384x_destroy
575* 583 *
576* Partner to hfa384x_create(). This function cleans up the hw 584 * Partner to hfa384x_create(). This function cleans up the hw
577* structure so that it can be freed by the caller using a simple 585 * structure so that it can be freed by the caller using a simple
578* kfree. Currently, this function is just a placeholder. If, at some 586 * kfree. Currently, this function is just a placeholder. If, at some
579* point in the future, an hw in the 'shutdown' state requires a 'deep' 587 * point in the future, an hw in the 'shutdown' state requires a 'deep'
580* kfree, this is where it should be done. Note that if this function 588 * kfree, this is where it should be done. Note that if this function
581* is called on a _running_ hw structure, the drvr_stop() function is 589 * is called on a _running_ hw structure, the drvr_stop() function is
582* called. 590 * called.
583* 591 *
584* Arguments: 592 * Arguments:
585* hw device structure 593 * hw device structure
586* 594 *
587* Returns: 595 * Returns:
588* nothing, this function is not allowed to fail. 596 * nothing, this function is not allowed to fail.
589* 597 *
590* Side effects: 598 * Side effects:
591* 599 *
592* Call context: 600 * Call context:
593* process 601 * process
594----------------------------------------------------------------*/ 602 *----------------------------------------------------------------
603 */
595void hfa384x_destroy(struct hfa384x *hw) 604void hfa384x_destroy(struct hfa384x *hw)
596{ 605{
597 struct sk_buff *skb; 606 struct sk_buff *skb;
@@ -645,10 +654,11 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
645} 654}
646 655
647/*---------------------------------------------------------------- 656/*----------------------------------------------------------------
648* Completor object: 657 * Completor object:
649* This completor must be passed to hfa384x_usbctlx_complete_sync() 658 * This completor must be passed to hfa384x_usbctlx_complete_sync()
650* when processing a CTLX that returns a struct hfa384x_cmdresult structure. 659 * when processing a CTLX that returns a struct hfa384x_cmdresult structure.
651----------------------------------------------------------------*/ 660 *----------------------------------------------------------------
661 */
652struct usbctlx_cmd_completor { 662struct usbctlx_cmd_completor {
653 struct usbctlx_completor head; 663 struct usbctlx_completor head;
654 664
@@ -664,24 +674,23 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
664 return usbctlx_get_status(complete->cmdresp, complete->result); 674 return usbctlx_get_status(complete->cmdresp, complete->result);
665} 675}
666 676
667static inline struct usbctlx_completor *init_cmd_completor( 677static inline struct usbctlx_completor *
668 struct usbctlx_cmd_completor 678init_cmd_completor(struct usbctlx_cmd_completor *completor,
669 *completor, 679 const struct hfa384x_usb_statusresp *cmdresp,
670 const struct hfa384x_usb_statusresp 680 struct hfa384x_cmdresult *result)
671 *cmdresp,
672 struct hfa384x_cmdresult *result)
673{ 681{
674 completor->head.complete = usbctlx_cmd_completor_fn; 682 completor->head.complete = usbctlx_cmd_completor_fn;
675 completor->cmdresp = cmdresp; 683 completor->cmdresp = cmdresp;
676 completor->result = result; 684 completor->result = result;
677 return &(completor->head); 685 return &completor->head;
678} 686}
679 687
680/*---------------------------------------------------------------- 688/*----------------------------------------------------------------
681* Completor object: 689 * Completor object:
682* This completor must be passed to hfa384x_usbctlx_complete_sync() 690 * This completor must be passed to hfa384x_usbctlx_complete_sync()
683* when processing a CTLX that reads a RID. 691 * when processing a CTLX that reads a RID.
684----------------------------------------------------------------*/ 692 *----------------------------------------------------------------
693 */
685struct usbctlx_rrid_completor { 694struct usbctlx_rrid_completor {
686 struct usbctlx_completor head; 695 struct usbctlx_completor head;
687 696
@@ -710,37 +719,38 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
710 return 0; 719 return 0;
711} 720}
712 721
713static inline struct usbctlx_completor *init_rrid_completor( 722static inline struct usbctlx_completor *
714 struct usbctlx_rrid_completor 723init_rrid_completor(struct usbctlx_rrid_completor *completor,
715 *completor, 724 const struct hfa384x_usb_rridresp *rridresp,
716 const struct hfa384x_usb_rridresp 725 void *riddata,
717 *rridresp, 726 unsigned int riddatalen)
718 void *riddata,
719 unsigned int riddatalen)
720{ 727{
721 completor->head.complete = usbctlx_rrid_completor_fn; 728 completor->head.complete = usbctlx_rrid_completor_fn;
722 completor->rridresp = rridresp; 729 completor->rridresp = rridresp;
723 completor->riddata = riddata; 730 completor->riddata = riddata;
724 completor->riddatalen = riddatalen; 731 completor->riddatalen = riddatalen;
725 return &(completor->head); 732 return &completor->head;
726} 733}
727 734
728/*---------------------------------------------------------------- 735/*----------------------------------------------------------------
729* Completor object: 736 * Completor object:
730* Interprets the results of a synchronous RID-write 737 * Interprets the results of a synchronous RID-write
731----------------------------------------------------------------*/ 738 *----------------------------------------------------------------
739 */
732#define init_wrid_completor init_cmd_completor 740#define init_wrid_completor init_cmd_completor
733 741
734/*---------------------------------------------------------------- 742/*----------------------------------------------------------------
735* Completor object: 743 * Completor object:
736* Interprets the results of a synchronous memory-write 744 * Interprets the results of a synchronous memory-write
737----------------------------------------------------------------*/ 745 *----------------------------------------------------------------
746 */
738#define init_wmem_completor init_cmd_completor 747#define init_wmem_completor init_cmd_completor
739 748
740/*---------------------------------------------------------------- 749/*----------------------------------------------------------------
741* Completor object: 750 * Completor object:
742* Interprets the results of a synchronous memory-read 751 * Interprets the results of a synchronous memory-read
743----------------------------------------------------------------*/ 752 *----------------------------------------------------------------
753 */
744struct usbctlx_rmem_completor { 754struct usbctlx_rmem_completor {
745 struct usbctlx_completor head; 755 struct usbctlx_completor head;
746 756
@@ -759,43 +769,43 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
759 return 0; 769 return 0;
760} 770}
761 771
762static inline struct usbctlx_completor *init_rmem_completor( 772static inline struct usbctlx_completor *
763 struct usbctlx_rmem_completor 773init_rmem_completor(struct usbctlx_rmem_completor *completor,
764 *completor, 774 struct hfa384x_usb_rmemresp *rmemresp,
765 struct hfa384x_usb_rmemresp 775 void *data,
766 *rmemresp, 776 unsigned int len)
767 void *data,
768 unsigned int len)
769{ 777{
770 completor->head.complete = usbctlx_rmem_completor_fn; 778 completor->head.complete = usbctlx_rmem_completor_fn;
771 completor->rmemresp = rmemresp; 779 completor->rmemresp = rmemresp;
772 completor->data = data; 780 completor->data = data;
773 completor->len = len; 781 completor->len = len;
774 return &(completor->head); 782 return &completor->head;
775} 783}
776 784
777/*---------------------------------------------------------------- 785/*----------------------------------------------------------------
778* hfa384x_cb_status 786 * hfa384x_cb_status
779* 787 *
780* Ctlx_complete handler for async CMD type control exchanges. 788 * Ctlx_complete handler for async CMD type control exchanges.
781* mark the hw struct as such. 789 * mark the hw struct as such.
782* 790 *
783* Note: If the handling is changed here, it should probably be 791 * Note: If the handling is changed here, it should probably be
784* changed in docmd as well. 792 * changed in docmd as well.
785* 793 *
786* Arguments: 794 * Arguments:
787* hw hw struct 795 * hw hw struct
788* ctlx completed CTLX 796 * ctlx completed CTLX
789* 797 *
790* Returns: 798 * Returns:
791* nothing 799 * nothing
792* 800 *
793* Side effects: 801 * Side effects:
794* 802 *
795* Call context: 803 * Call context:
796* interrupt 804 * interrupt
797----------------------------------------------------------------*/ 805 *----------------------------------------------------------------
798static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx) 806 */
807static void hfa384x_cb_status(struct hfa384x *hw,
808 const struct hfa384x_usbctlx *ctlx)
799{ 809{
800 if (ctlx->usercb) { 810 if (ctlx->usercb) {
801 struct hfa384x_cmdresult cmdresult; 811 struct hfa384x_cmdresult cmdresult;
@@ -812,7 +822,8 @@ static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *
812 } 822 }
813} 823}
814 824
815static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd) 825static inline int hfa384x_docmd_wait(struct hfa384x *hw,
826 struct hfa384x_metacmd *cmd)
816{ 827{
817 return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL); 828 return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
818} 829}
@@ -905,24 +916,25 @@ hfa384x_dowmem_async(struct hfa384x *hw,
905} 916}
906 917
907/*---------------------------------------------------------------- 918/*----------------------------------------------------------------
908* hfa384x_cmd_initialize 919 * hfa384x_cmd_initialize
909* 920 *
910* Issues the initialize command and sets the hw->state based 921 * Issues the initialize command and sets the hw->state based
911* on the result. 922 * on the result.
912* 923 *
913* Arguments: 924 * Arguments:
914* hw device structure 925 * hw device structure
915* 926 *
916* Returns: 927 * Returns:
917* 0 success 928 * 0 success
918* >0 f/w reported error - f/w status code 929 * >0 f/w reported error - f/w status code
919* <0 driver reported error 930 * <0 driver reported error
920* 931 *
921* Side effects: 932 * Side effects:
922* 933 *
923* Call context: 934 * Call context:
924* process 935 * process
925----------------------------------------------------------------*/ 936 *----------------------------------------------------------------
937 */
926int hfa384x_cmd_initialize(struct hfa384x *hw) 938int hfa384x_cmd_initialize(struct hfa384x *hw)
927{ 939{
928 int result = 0; 940 int result = 0;
@@ -950,25 +962,26 @@ int hfa384x_cmd_initialize(struct hfa384x *hw)
950} 962}
951 963
952/*---------------------------------------------------------------- 964/*----------------------------------------------------------------
953* hfa384x_cmd_disable 965 * hfa384x_cmd_disable
954* 966 *
955* Issues the disable command to stop communications on one of 967 * Issues the disable command to stop communications on one of
956* the MACs 'ports'. 968 * the MACs 'ports'.
957* 969 *
958* Arguments: 970 * Arguments:
959* hw device structure 971 * hw device structure
960* macport MAC port number (host order) 972 * macport MAC port number (host order)
961* 973 *
962* Returns: 974 * Returns:
963* 0 success 975 * 0 success
964* >0 f/w reported failure - f/w status code 976 * >0 f/w reported failure - f/w status code
965* <0 driver reported error (timeout|bad arg) 977 * <0 driver reported error (timeout|bad arg)
966* 978 *
967* Side effects: 979 * Side effects:
968* 980 *
969* Call context: 981 * Call context:
970* process 982 * process
971----------------------------------------------------------------*/ 983 *----------------------------------------------------------------
984 */
972int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport) 985int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
973{ 986{
974 struct hfa384x_metacmd cmd; 987 struct hfa384x_metacmd cmd;
@@ -983,25 +996,26 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
983} 996}
984 997
985/*---------------------------------------------------------------- 998/*----------------------------------------------------------------
986* hfa384x_cmd_enable 999 * hfa384x_cmd_enable
987* 1000 *
988* Issues the enable command to enable communications on one of 1001 * Issues the enable command to enable communications on one of
989* the MACs 'ports'. 1002 * the MACs 'ports'.
990* 1003 *
991* Arguments: 1004 * Arguments:
992* hw device structure 1005 * hw device structure
993* macport MAC port number 1006 * macport MAC port number
994* 1007 *
995* Returns: 1008 * Returns:
996* 0 success 1009 * 0 success
997* >0 f/w reported failure - f/w status code 1010 * >0 f/w reported failure - f/w status code
998* <0 driver reported error (timeout|bad arg) 1011 * <0 driver reported error (timeout|bad arg)
999* 1012 *
1000* Side effects: 1013 * Side effects:
1001* 1014 *
1002* Call context: 1015 * Call context:
1003* process 1016 * process
1004----------------------------------------------------------------*/ 1017 *----------------------------------------------------------------
1018 */
1005int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport) 1019int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
1006{ 1020{
1007 struct hfa384x_metacmd cmd; 1021 struct hfa384x_metacmd cmd;
@@ -1016,34 +1030,35 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
1016} 1030}
1017 1031
1018/*---------------------------------------------------------------- 1032/*----------------------------------------------------------------
1019* hfa384x_cmd_monitor 1033 * hfa384x_cmd_monitor
1020* 1034 *
1021* Enables the 'monitor mode' of the MAC. Here's the description of 1035 * Enables the 'monitor mode' of the MAC. Here's the description of
1022* monitor mode that I've received thus far: 1036 * monitor mode that I've received thus far:
1023* 1037 *
1024* "The "monitor mode" of operation is that the MAC passes all 1038 * "The "monitor mode" of operation is that the MAC passes all
1025* frames for which the PLCP checks are correct. All received 1039 * frames for which the PLCP checks are correct. All received
1026* MPDUs are passed to the host with MAC Port = 7, with a 1040 * MPDUs are passed to the host with MAC Port = 7, with a
1027* receive status of good, FCS error, or undecryptable. Passing 1041 * receive status of good, FCS error, or undecryptable. Passing
1028* certain MPDUs is a violation of the 802.11 standard, but useful 1042 * certain MPDUs is a violation of the 802.11 standard, but useful
1029* for a debugging tool." Normal communication is not possible 1043 * for a debugging tool." Normal communication is not possible
1030* while monitor mode is enabled. 1044 * while monitor mode is enabled.
1031* 1045 *
1032* Arguments: 1046 * Arguments:
1033* hw device structure 1047 * hw device structure
1034* enable a code (0x0b|0x0f) that enables/disables 1048 * enable a code (0x0b|0x0f) that enables/disables
1035* monitor mode. (host order) 1049 * monitor mode. (host order)
1036* 1050 *
1037* Returns: 1051 * Returns:
1038* 0 success 1052 * 0 success
1039* >0 f/w reported failure - f/w status code 1053 * >0 f/w reported failure - f/w status code
1040* <0 driver reported error (timeout|bad arg) 1054 * <0 driver reported error (timeout|bad arg)
1041* 1055 *
1042* Side effects: 1056 * Side effects:
1043* 1057 *
1044* Call context: 1058 * Call context:
1045* process 1059 * process
1046----------------------------------------------------------------*/ 1060 *----------------------------------------------------------------
1061 */
1047int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable) 1062int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
1048{ 1063{
1049 struct hfa384x_metacmd cmd; 1064 struct hfa384x_metacmd cmd;
@@ -1058,43 +1073,44 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
1058} 1073}
1059 1074
1060/*---------------------------------------------------------------- 1075/*----------------------------------------------------------------
1061* hfa384x_cmd_download 1076 * hfa384x_cmd_download
1062* 1077 *
1063* Sets the controls for the MAC controller code/data download 1078 * Sets the controls for the MAC controller code/data download
1064* process. The arguments set the mode and address associated 1079 * process. The arguments set the mode and address associated
1065* with a download. Note that the aux registers should be enabled 1080 * with a download. Note that the aux registers should be enabled
1066* prior to setting one of the download enable modes. 1081 * prior to setting one of the download enable modes.
1067* 1082 *
1068* Arguments: 1083 * Arguments:
1069* hw device structure 1084 * hw device structure
1070* mode 0 - Disable programming and begin code exec 1085 * mode 0 - Disable programming and begin code exec
1071* 1 - Enable volatile mem programming 1086 * 1 - Enable volatile mem programming
1072* 2 - Enable non-volatile mem programming 1087 * 2 - Enable non-volatile mem programming
1073* 3 - Program non-volatile section from NV download 1088 * 3 - Program non-volatile section from NV download
1074* buffer. 1089 * buffer.
1075* (host order) 1090 * (host order)
1076* lowaddr 1091 * lowaddr
1077* highaddr For mode 1, sets the high & low order bits of 1092 * highaddr For mode 1, sets the high & low order bits of
1078* the "destination address". This address will be 1093 * the "destination address". This address will be
1079* the execution start address when download is 1094 * the execution start address when download is
1080* subsequently disabled. 1095 * subsequently disabled.
1081* For mode 2, sets the high & low order bits of 1096 * For mode 2, sets the high & low order bits of
1082* the destination in NV ram. 1097 * the destination in NV ram.
1083* For modes 0 & 3, should be zero. (host order) 1098 * For modes 0 & 3, should be zero. (host order)
1084* NOTE: these are CMD format. 1099 * NOTE: these are CMD format.
1085* codelen Length of the data to write in mode 2, 1100 * codelen Length of the data to write in mode 2,
1086* zero otherwise. (host order) 1101 * zero otherwise. (host order)
1087* 1102 *
1088* Returns: 1103 * Returns:
1089* 0 success 1104 * 0 success
1090* >0 f/w reported failure - f/w status code 1105 * >0 f/w reported failure - f/w status code
1091* <0 driver reported error (timeout|bad arg) 1106 * <0 driver reported error (timeout|bad arg)
1092* 1107 *
1093* Side effects: 1108 * Side effects:
1094* 1109 *
1095* Call context: 1110 * Call context:
1096* process 1111 * process
1097----------------------------------------------------------------*/ 1112 *----------------------------------------------------------------
1113 */
1098int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr, 1114int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
1099 u16 highaddr, u16 codelen) 1115 u16 highaddr, u16 codelen)
1100{ 1116{
@@ -1114,29 +1130,31 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
1114} 1130}
1115 1131
1116/*---------------------------------------------------------------- 1132/*----------------------------------------------------------------
1117* hfa384x_corereset 1133 * hfa384x_corereset
1118* 1134 *
1119* Perform a reset of the hfa38xx MAC core. We assume that the hw 1135 * Perform a reset of the hfa38xx MAC core. We assume that the hw
1120* structure is in its "created" state. That is, it is initialized 1136 * structure is in its "created" state. That is, it is initialized
1121* with proper values. Note that if a reset is done after the 1137 * with proper values. Note that if a reset is done after the
1122* device has been active for awhile, the caller might have to clean 1138 * device has been active for awhile, the caller might have to clean
1123* up some leftover cruft in the hw structure. 1139 * up some leftover cruft in the hw structure.
1124* 1140 *
1125* Arguments: 1141 * Arguments:
1126* hw device structure 1142 * hw device structure
1127* holdtime how long (in ms) to hold the reset 1143 * holdtime how long (in ms) to hold the reset
1128* settletime how long (in ms) to wait after releasing 1144 * settletime how long (in ms) to wait after releasing
1129* the reset 1145 * the reset
1130* 1146 *
1131* Returns: 1147 * Returns:
1132* nothing 1148 * nothing
1133* 1149 *
1134* Side effects: 1150 * Side effects:
1135* 1151 *
1136* Call context: 1152 * Call context:
1137* process 1153 * process
1138----------------------------------------------------------------*/ 1154 *----------------------------------------------------------------
1139int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis) 1155 */
1156int hfa384x_corereset(struct hfa384x *hw, int holdtime,
1157 int settletime, int genesis)
1140{ 1158{
1141 int result; 1159 int result;
1142 1160
@@ -1150,29 +1168,30 @@ int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int gene
1150} 1168}
1151 1169
1152/*---------------------------------------------------------------- 1170/*----------------------------------------------------------------
1153* hfa384x_usbctlx_complete_sync 1171 * hfa384x_usbctlx_complete_sync
1154* 1172 *
1155* Waits for a synchronous CTLX object to complete, 1173 * Waits for a synchronous CTLX object to complete,
1156* and then handles the response. 1174 * and then handles the response.
1157* 1175 *
1158* Arguments: 1176 * Arguments:
1159* hw device structure 1177 * hw device structure
1160* ctlx CTLX ptr 1178 * ctlx CTLX ptr
1161* completor functor object to decide what to 1179 * completor functor object to decide what to
1162* do with the CTLX's result. 1180 * do with the CTLX's result.
1163* 1181 *
1164* Returns: 1182 * Returns:
1165* 0 Success 1183 * 0 Success
1166* -ERESTARTSYS Interrupted by a signal 1184 * -ERESTARTSYS Interrupted by a signal
1167* -EIO CTLX failed 1185 * -EIO CTLX failed
1168* -ENODEV Adapter was unplugged 1186 * -ENODEV Adapter was unplugged
1169* ??? Result from completor 1187 * ??? Result from completor
1170* 1188 *
1171* Side effects: 1189 * Side effects:
1172* 1190 *
1173* Call context: 1191 * Call context:
1174* process 1192 * process
1175----------------------------------------------------------------*/ 1193 *----------------------------------------------------------------
1194 */
1176static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw, 1195static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
1177 struct hfa384x_usbctlx *ctlx, 1196 struct hfa384x_usbctlx *ctlx,
1178 struct usbctlx_completor *completor) 1197 struct usbctlx_completor *completor)
@@ -1257,37 +1276,38 @@ cleanup:
1257} 1276}
1258 1277
1259/*---------------------------------------------------------------- 1278/*----------------------------------------------------------------
1260* hfa384x_docmd 1279 * hfa384x_docmd
1261* 1280 *
1262* Constructs a command CTLX and submits it. 1281 * Constructs a command CTLX and submits it.
1263* 1282 *
1264* NOTE: Any changes to the 'post-submit' code in this function 1283 * NOTE: Any changes to the 'post-submit' code in this function
1265* need to be carried over to hfa384x_cbcmd() since the handling 1284 * need to be carried over to hfa384x_cbcmd() since the handling
1266* is virtually identical. 1285 * is virtually identical.
1267* 1286 *
1268* Arguments: 1287 * Arguments:
1269* hw device structure 1288 * hw device structure
1270* mode DOWAIT or DOASYNC 1289 * mode DOWAIT or DOASYNC
1271* cmd cmd structure. Includes all arguments and result 1290 * cmd cmd structure. Includes all arguments and result
1272* data points. All in host order. in host order 1291 * data points. All in host order. in host order
1273* cmdcb command-specific callback 1292 * cmdcb command-specific callback
1274* usercb user callback for async calls, NULL for DOWAIT calls 1293 * usercb user callback for async calls, NULL for DOWAIT calls
1275* usercb_data user supplied data pointer for async calls, NULL 1294 * usercb_data user supplied data pointer for async calls, NULL
1276* for DOASYNC calls 1295 * for DOASYNC calls
1277* 1296 *
1278* Returns: 1297 * Returns:
1279* 0 success 1298 * 0 success
1280* -EIO CTLX failure 1299 * -EIO CTLX failure
1281* -ERESTARTSYS Awakened on signal 1300 * -ERESTARTSYS Awakened on signal
1282* >0 command indicated error, Status and Resp0-2 are 1301 * >0 command indicated error, Status and Resp0-2 are
1283* in hw structure. 1302 * in hw structure.
1284* 1303 *
1285* Side effects: 1304 * Side effects:
1286* 1305 *
1287* 1306 *
1288* Call context: 1307 * Call context:
1289* process 1308 * process
1290----------------------------------------------------------------*/ 1309 *----------------------------------------------------------------
1310 */
1291static int 1311static int
1292hfa384x_docmd(struct hfa384x *hw, 1312hfa384x_docmd(struct hfa384x *hw,
1293 enum cmd_mode mode, 1313 enum cmd_mode mode,
@@ -1341,41 +1361,42 @@ done:
1341} 1361}
1342 1362
1343/*---------------------------------------------------------------- 1363/*----------------------------------------------------------------
1344* hfa384x_dorrid 1364 * hfa384x_dorrid
1345* 1365 *
1346* Constructs a read rid CTLX and issues it. 1366 * Constructs a read rid CTLX and issues it.
1347* 1367 *
1348* NOTE: Any changes to the 'post-submit' code in this function 1368 * NOTE: Any changes to the 'post-submit' code in this function
1349* need to be carried over to hfa384x_cbrrid() since the handling 1369 * need to be carried over to hfa384x_cbrrid() since the handling
1350* is virtually identical. 1370 * is virtually identical.
1351* 1371 *
1352* Arguments: 1372 * Arguments:
1353* hw device structure 1373 * hw device structure
1354* mode DOWAIT or DOASYNC 1374 * mode DOWAIT or DOASYNC
1355* rid Read RID number (host order) 1375 * rid Read RID number (host order)
1356* riddata Caller supplied buffer that MAC formatted RID.data 1376 * riddata Caller supplied buffer that MAC formatted RID.data
1357* record will be written to for DOWAIT calls. Should 1377 * record will be written to for DOWAIT calls. Should
1358* be NULL for DOASYNC calls. 1378 * be NULL for DOASYNC calls.
1359* riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls. 1379 * riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
1360* cmdcb command callback for async calls, NULL for DOWAIT calls 1380 * cmdcb command callback for async calls, NULL for DOWAIT calls
1361* usercb user callback for async calls, NULL for DOWAIT calls 1381 * usercb user callback for async calls, NULL for DOWAIT calls
1362* usercb_data user supplied data pointer for async calls, NULL 1382 * usercb_data user supplied data pointer for async calls, NULL
1363* for DOWAIT calls 1383 * for DOWAIT calls
1364* 1384 *
1365* Returns: 1385 * Returns:
1366* 0 success 1386 * 0 success
1367* -EIO CTLX failure 1387 * -EIO CTLX failure
1368* -ERESTARTSYS Awakened on signal 1388 * -ERESTARTSYS Awakened on signal
1369* -ENODATA riddatalen != macdatalen 1389 * -ENODATA riddatalen != macdatalen
1370* >0 command indicated error, Status and Resp0-2 are 1390 * >0 command indicated error, Status and Resp0-2 are
1371* in hw structure. 1391 * in hw structure.
1372* 1392 *
1373* Side effects: 1393 * Side effects:
1374* 1394 *
1375* Call context: 1395 * Call context:
1376* interrupt (DOASYNC) 1396 * interrupt (DOASYNC)
1377* process (DOWAIT or DOASYNC) 1397 * process (DOWAIT or DOASYNC)
1378----------------------------------------------------------------*/ 1398 *----------------------------------------------------------------
1399 */
1379static int 1400static int
1380hfa384x_dorrid(struct hfa384x *hw, 1401hfa384x_dorrid(struct hfa384x *hw,
1381 enum cmd_mode mode, 1402 enum cmd_mode mode,
@@ -1426,37 +1447,38 @@ done:
1426} 1447}
1427 1448
1428/*---------------------------------------------------------------- 1449/*----------------------------------------------------------------
1429* hfa384x_dowrid 1450 * hfa384x_dowrid
1430* 1451 *
1431* Constructs a write rid CTLX and issues it. 1452 * Constructs a write rid CTLX and issues it.
1432* 1453 *
1433* NOTE: Any changes to the 'post-submit' code in this function 1454 * NOTE: Any changes to the 'post-submit' code in this function
1434* need to be carried over to hfa384x_cbwrid() since the handling 1455 * need to be carried over to hfa384x_cbwrid() since the handling
1435* is virtually identical. 1456 * is virtually identical.
1436* 1457 *
1437* Arguments: 1458 * Arguments:
1438* hw device structure 1459 * hw device structure
1439* enum cmd_mode DOWAIT or DOASYNC 1460 * enum cmd_mode DOWAIT or DOASYNC
1440* rid RID code 1461 * rid RID code
1441* riddata Data portion of RID formatted for MAC 1462 * riddata Data portion of RID formatted for MAC
1442* riddatalen Length of the data portion in bytes 1463 * riddatalen Length of the data portion in bytes
1443* cmdcb command callback for async calls, NULL for DOWAIT calls 1464 * cmdcb command callback for async calls, NULL for DOWAIT calls
1444* usercb user callback for async calls, NULL for DOWAIT calls 1465 * usercb user callback for async calls, NULL for DOWAIT calls
1445* usercb_data user supplied data pointer for async calls 1466 * usercb_data user supplied data pointer for async calls
1446* 1467 *
1447* Returns: 1468 * Returns:
1448* 0 success 1469 * 0 success
1449* -ETIMEDOUT timed out waiting for register ready or 1470 * -ETIMEDOUT timed out waiting for register ready or
1450* command completion 1471 * command completion
1451* >0 command indicated error, Status and Resp0-2 are 1472 * >0 command indicated error, Status and Resp0-2 are
1452* in hw structure. 1473 * in hw structure.
1453* 1474 *
1454* Side effects: 1475 * Side effects:
1455* 1476 *
1456* Call context: 1477 * Call context:
1457* interrupt (DOASYNC) 1478 * interrupt (DOASYNC)
1458* process (DOWAIT or DOASYNC) 1479 * process (DOWAIT or DOASYNC)
1459----------------------------------------------------------------*/ 1480 *----------------------------------------------------------------
1481 */
1460static int 1482static int
1461hfa384x_dowrid(struct hfa384x *hw, 1483hfa384x_dowrid(struct hfa384x *hw,
1462 enum cmd_mode mode, 1484 enum cmd_mode mode,
@@ -1512,38 +1534,39 @@ done:
1512} 1534}
1513 1535
1514/*---------------------------------------------------------------- 1536/*----------------------------------------------------------------
1515* hfa384x_dormem 1537 * hfa384x_dormem
1516* 1538 *
1517* Constructs a readmem CTLX and issues it. 1539 * Constructs a readmem CTLX and issues it.
1518* 1540 *
1519* NOTE: Any changes to the 'post-submit' code in this function 1541 * NOTE: Any changes to the 'post-submit' code in this function
1520* need to be carried over to hfa384x_cbrmem() since the handling 1542 * need to be carried over to hfa384x_cbrmem() since the handling
1521* is virtually identical. 1543 * is virtually identical.
1522* 1544 *
1523* Arguments: 1545 * Arguments:
1524* hw device structure 1546 * hw device structure
1525* mode DOWAIT or DOASYNC 1547 * mode DOWAIT or DOASYNC
1526* page MAC address space page (CMD format) 1548 * page MAC address space page (CMD format)
1527* offset MAC address space offset 1549 * offset MAC address space offset
1528* data Ptr to data buffer to receive read 1550 * data Ptr to data buffer to receive read
1529* len Length of the data to read (max == 2048) 1551 * len Length of the data to read (max == 2048)
1530* cmdcb command callback for async calls, NULL for DOWAIT calls 1552 * cmdcb command callback for async calls, NULL for DOWAIT calls
1531* usercb user callback for async calls, NULL for DOWAIT calls 1553 * usercb user callback for async calls, NULL for DOWAIT calls
1532* usercb_data user supplied data pointer for async calls 1554 * usercb_data user supplied data pointer for async calls
1533* 1555 *
1534* Returns: 1556 * Returns:
1535* 0 success 1557 * 0 success
1536* -ETIMEDOUT timed out waiting for register ready or 1558 * -ETIMEDOUT timed out waiting for register ready or
1537* command completion 1559 * command completion
1538* >0 command indicated error, Status and Resp0-2 are 1560 * >0 command indicated error, Status and Resp0-2 are
1539* in hw structure. 1561 * in hw structure.
1540* 1562 *
1541* Side effects: 1563 * Side effects:
1542* 1564 *
1543* Call context: 1565 * Call context:
1544* interrupt (DOASYNC) 1566 * interrupt (DOASYNC)
1545* process (DOWAIT or DOASYNC) 1567 * process (DOWAIT or DOASYNC)
1546----------------------------------------------------------------*/ 1568 *----------------------------------------------------------------
1569 */
1547static int 1570static int
1548hfa384x_dormem(struct hfa384x *hw, 1571hfa384x_dormem(struct hfa384x *hw,
1549 enum cmd_mode mode, 1572 enum cmd_mode mode,
@@ -1603,38 +1626,39 @@ done:
1603} 1626}
1604 1627
1605/*---------------------------------------------------------------- 1628/*----------------------------------------------------------------
1606* hfa384x_dowmem 1629 * hfa384x_dowmem
1607* 1630 *
1608* Constructs a writemem CTLX and issues it. 1631 * Constructs a writemem CTLX and issues it.
1609* 1632 *
1610* NOTE: Any changes to the 'post-submit' code in this function 1633 * NOTE: Any changes to the 'post-submit' code in this function
1611* need to be carried over to hfa384x_cbwmem() since the handling 1634 * need to be carried over to hfa384x_cbwmem() since the handling
1612* is virtually identical. 1635 * is virtually identical.
1613* 1636 *
1614* Arguments: 1637 * Arguments:
1615* hw device structure 1638 * hw device structure
1616* mode DOWAIT or DOASYNC 1639 * mode DOWAIT or DOASYNC
1617* page MAC address space page (CMD format) 1640 * page MAC address space page (CMD format)
1618* offset MAC address space offset 1641 * offset MAC address space offset
1619* data Ptr to data buffer containing write data 1642 * data Ptr to data buffer containing write data
1620* len Length of the data to read (max == 2048) 1643 * len Length of the data to read (max == 2048)
1621* cmdcb command callback for async calls, NULL for DOWAIT calls 1644 * cmdcb command callback for async calls, NULL for DOWAIT calls
1622* usercb user callback for async calls, NULL for DOWAIT calls 1645 * usercb user callback for async calls, NULL for DOWAIT calls
1623* usercb_data user supplied data pointer for async calls. 1646 * usercb_data user supplied data pointer for async calls.
1624* 1647 *
1625* Returns: 1648 * Returns:
1626* 0 success 1649 * 0 success
1627* -ETIMEDOUT timed out waiting for register ready or 1650 * -ETIMEDOUT timed out waiting for register ready or
1628* command completion 1651 * command completion
1629* >0 command indicated error, Status and Resp0-2 are 1652 * >0 command indicated error, Status and Resp0-2 are
1630* in hw structure. 1653 * in hw structure.
1631* 1654 *
1632* Side effects: 1655 * Side effects:
1633* 1656 *
1634* Call context: 1657 * Call context:
1635* interrupt (DOWAIT) 1658 * interrupt (DOWAIT)
1636* process (DOWAIT or DOASYNC) 1659 * process (DOWAIT or DOASYNC)
1637----------------------------------------------------------------*/ 1660 *----------------------------------------------------------------
1661 */
1638static int 1662static int
1639hfa384x_dowmem(struct hfa384x *hw, 1663hfa384x_dowmem(struct hfa384x *hw,
1640 enum cmd_mode mode, 1664 enum cmd_mode mode,
@@ -1694,27 +1718,28 @@ done:
1694} 1718}
1695 1719
1696/*---------------------------------------------------------------- 1720/*----------------------------------------------------------------
1697* hfa384x_drvr_disable 1721 * hfa384x_drvr_disable
1698* 1722 *
1699* Issues the disable command to stop communications on one of 1723 * Issues the disable command to stop communications on one of
1700* the MACs 'ports'. Only macport 0 is valid for stations. 1724 * the MACs 'ports'. Only macport 0 is valid for stations.
1701* APs may also disable macports 1-6. Only ports that have been 1725 * APs may also disable macports 1-6. Only ports that have been
1702* previously enabled may be disabled. 1726 * previously enabled may be disabled.
1703* 1727 *
1704* Arguments: 1728 * Arguments:
1705* hw device structure 1729 * hw device structure
1706* macport MAC port number (host order) 1730 * macport MAC port number (host order)
1707* 1731 *
1708* Returns: 1732 * Returns:
1709* 0 success 1733 * 0 success
1710* >0 f/w reported failure - f/w status code 1734 * >0 f/w reported failure - f/w status code
1711* <0 driver reported error (timeout|bad arg) 1735 * <0 driver reported error (timeout|bad arg)
1712* 1736 *
1713* Side effects: 1737 * Side effects:
1714* 1738 *
1715* Call context: 1739 * Call context:
1716* process 1740 * process
1717----------------------------------------------------------------*/ 1741 *----------------------------------------------------------------
1742 */
1718int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport) 1743int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
1719{ 1744{
1720 int result = 0; 1745 int result = 0;
@@ -1732,27 +1757,28 @@ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
1732} 1757}
1733 1758
1734/*---------------------------------------------------------------- 1759/*----------------------------------------------------------------
1735* hfa384x_drvr_enable 1760 * hfa384x_drvr_enable
1736* 1761 *
1737* Issues the enable command to enable communications on one of 1762 * Issues the enable command to enable communications on one of
1738* the MACs 'ports'. Only macport 0 is valid for stations. 1763 * the MACs 'ports'. Only macport 0 is valid for stations.
1739* APs may also enable macports 1-6. Only ports that are currently 1764 * APs may also enable macports 1-6. Only ports that are currently
1740* disabled may be enabled. 1765 * disabled may be enabled.
1741* 1766 *
1742* Arguments: 1767 * Arguments:
1743* hw device structure 1768 * hw device structure
1744* macport MAC port number 1769 * macport MAC port number
1745* 1770 *
1746* Returns: 1771 * Returns:
1747* 0 success 1772 * 0 success
1748* >0 f/w reported failure - f/w status code 1773 * >0 f/w reported failure - f/w status code
1749* <0 driver reported error (timeout|bad arg) 1774 * <0 driver reported error (timeout|bad arg)
1750* 1775 *
1751* Side effects: 1776 * Side effects:
1752* 1777 *
1753* Call context: 1778 * Call context:
1754* process 1779 * process
1755----------------------------------------------------------------*/ 1780 *----------------------------------------------------------------
1781 */
1756int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport) 1782int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
1757{ 1783{
1758 int result = 0; 1784 int result = 0;
@@ -1770,26 +1796,27 @@ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
1770} 1796}
1771 1797
1772/*---------------------------------------------------------------- 1798/*----------------------------------------------------------------
1773* hfa384x_drvr_flashdl_enable 1799 * hfa384x_drvr_flashdl_enable
1774* 1800 *
1775* Begins the flash download state. Checks to see that we're not 1801 * Begins the flash download state. Checks to see that we're not
1776* already in a download state and that a port isn't enabled. 1802 * already in a download state and that a port isn't enabled.
1777* Sets the download state and retrieves the flash download 1803 * Sets the download state and retrieves the flash download
1778* buffer location, buffer size, and timeout length. 1804 * buffer location, buffer size, and timeout length.
1779* 1805 *
1780* Arguments: 1806 * Arguments:
1781* hw device structure 1807 * hw device structure
1782* 1808 *
1783* Returns: 1809 * Returns:
1784* 0 success 1810 * 0 success
1785* >0 f/w reported error - f/w status code 1811 * >0 f/w reported error - f/w status code
1786* <0 driver reported error 1812 * <0 driver reported error
1787* 1813 *
1788* Side effects: 1814 * Side effects:
1789* 1815 *
1790* Call context: 1816 * Call context:
1791* process 1817 * process
1792----------------------------------------------------------------*/ 1818 *----------------------------------------------------------------
1819 */
1793int hfa384x_drvr_flashdl_enable(struct hfa384x *hw) 1820int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
1794{ 1821{
1795 int result = 0; 1822 int result = 0;
@@ -1809,7 +1836,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
1809 1836
1810 /* Retrieve the buffer loc&size and timeout */ 1837 /* Retrieve the buffer loc&size and timeout */
1811 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER, 1838 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER,
1812 &(hw->bufinfo), sizeof(hw->bufinfo)); 1839 &hw->bufinfo, sizeof(hw->bufinfo));
1813 if (result) 1840 if (result)
1814 return result; 1841 return result;
1815 1842
@@ -1817,7 +1844,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
1817 hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset); 1844 hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset);
1818 hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len); 1845 hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len);
1819 result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME, 1846 result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
1820 &(hw->dltimeout)); 1847 &hw->dltimeout);
1821 if (result) 1848 if (result)
1822 return result; 1849 return result;
1823 1850
@@ -1831,24 +1858,25 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
1831} 1858}
1832 1859
1833/*---------------------------------------------------------------- 1860/*----------------------------------------------------------------
1834* hfa384x_drvr_flashdl_disable 1861 * hfa384x_drvr_flashdl_disable
1835* 1862 *
1836* Ends the flash download state. Note that this will cause the MAC 1863 * Ends the flash download state. Note that this will cause the MAC
1837* firmware to restart. 1864 * firmware to restart.
1838* 1865 *
1839* Arguments: 1866 * Arguments:
1840* hw device structure 1867 * hw device structure
1841* 1868 *
1842* Returns: 1869 * Returns:
1843* 0 success 1870 * 0 success
1844* >0 f/w reported error - f/w status code 1871 * >0 f/w reported error - f/w status code
1845* <0 driver reported error 1872 * <0 driver reported error
1846* 1873 *
1847* Side effects: 1874 * Side effects:
1848* 1875 *
1849* Call context: 1876 * Call context:
1850* process 1877 * process
1851----------------------------------------------------------------*/ 1878 *----------------------------------------------------------------
1879 */
1852int hfa384x_drvr_flashdl_disable(struct hfa384x *hw) 1880int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
1853{ 1881{
1854 /* Check that we're already in the download state */ 1882 /* Check that we're already in the download state */
@@ -1866,35 +1894,37 @@ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
1866} 1894}
1867 1895
1868/*---------------------------------------------------------------- 1896/*----------------------------------------------------------------
1869* hfa384x_drvr_flashdl_write 1897 * hfa384x_drvr_flashdl_write
1870* 1898 *
1871* Performs a FLASH download of a chunk of data. First checks to see 1899 * Performs a FLASH download of a chunk of data. First checks to see
1872* that we're in the FLASH download state, then sets the download 1900 * that we're in the FLASH download state, then sets the download
1873* mode, uses the aux functions to 1) copy the data to the flash 1901 * mode, uses the aux functions to 1) copy the data to the flash
1874* buffer, 2) sets the download 'write flash' mode, 3) readback and 1902 * buffer, 2) sets the download 'write flash' mode, 3) readback and
1875* compare. Lather rinse, repeat as many times an necessary to get 1903 * compare. Lather rinse, repeat as many times an necessary to get
1876* all the given data into flash. 1904 * all the given data into flash.
1877* When all data has been written using this function (possibly 1905 * When all data has been written using this function (possibly
1878* repeatedly), call drvr_flashdl_disable() to end the download state 1906 * repeatedly), call drvr_flashdl_disable() to end the download state
1879* and restart the MAC. 1907 * and restart the MAC.
1880* 1908 *
1881* Arguments: 1909 * Arguments:
1882* hw device structure 1910 * hw device structure
1883* daddr Card address to write to. (host order) 1911 * daddr Card address to write to. (host order)
1884* buf Ptr to data to write. 1912 * buf Ptr to data to write.
1885* len Length of data (host order). 1913 * len Length of data (host order).
1886* 1914 *
1887* Returns: 1915 * Returns:
1888* 0 success 1916 * 0 success
1889* >0 f/w reported error - f/w status code 1917 * >0 f/w reported error - f/w status code
1890* <0 driver reported error 1918 * <0 driver reported error
1891* 1919 *
1892* Side effects: 1920 * Side effects:
1893* 1921 *
1894* Call context: 1922 * Call context:
1895* process 1923 * process
1896----------------------------------------------------------------*/ 1924 *----------------------------------------------------------------
1897int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len) 1925 */
1926int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
1927 void *buf, u32 len)
1898{ 1928{
1899 int result = 0; 1929 int result = 0;
1900 u32 dlbufaddr; 1930 u32 dlbufaddr;
@@ -2008,30 +2038,31 @@ exit_proc:
2008} 2038}
2009 2039
2010/*---------------------------------------------------------------- 2040/*----------------------------------------------------------------
2011* hfa384x_drvr_getconfig 2041 * hfa384x_drvr_getconfig
2012* 2042 *
2013* Performs the sequence necessary to read a config/info item. 2043 * Performs the sequence necessary to read a config/info item.
2014* 2044 *
2015* Arguments: 2045 * Arguments:
2016* hw device structure 2046 * hw device structure
2017* rid config/info record id (host order) 2047 * rid config/info record id (host order)
2018* buf host side record buffer. Upon return it will 2048 * buf host side record buffer. Upon return it will
2019* contain the body portion of the record (minus the 2049 * contain the body portion of the record (minus the
2020* RID and len). 2050 * RID and len).
2021* len buffer length (in bytes, should match record length) 2051 * len buffer length (in bytes, should match record length)
2022* 2052 *
2023* Returns: 2053 * Returns:
2024* 0 success 2054 * 0 success
2025* >0 f/w reported error - f/w status code 2055 * >0 f/w reported error - f/w status code
2026* <0 driver reported error 2056 * <0 driver reported error
2027* -ENODATA length mismatch between argument and retrieved 2057 * -ENODATA length mismatch between argument and retrieved
2028* record. 2058 * record.
2029* 2059 *
2030* Side effects: 2060 * Side effects:
2031* 2061 *
2032* Call context: 2062 * Call context:
2033* process 2063 * process
2034----------------------------------------------------------------*/ 2064 *----------------------------------------------------------------
2065 */
2035int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len) 2066int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
2036{ 2067{
2037 return hfa384x_dorrid_wait(hw, rid, buf, len); 2068 return hfa384x_dorrid_wait(hw, rid, buf, len);
@@ -2059,7 +2090,8 @@ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
2059 * 2090 *
2060 * Call context: 2091 * Call context:
2061 * process 2092 * process
2062 ----------------------------------------------------------------*/ 2093 *----------------------------------------------------------------
2094 */
2063int 2095int
2064hfa384x_drvr_setconfig_async(struct hfa384x *hw, 2096hfa384x_drvr_setconfig_async(struct hfa384x *hw,
2065 u16 rid, 2097 u16 rid,
@@ -2071,23 +2103,24 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
2071} 2103}
2072 2104
2073/*---------------------------------------------------------------- 2105/*----------------------------------------------------------------
2074* hfa384x_drvr_ramdl_disable 2106 * hfa384x_drvr_ramdl_disable
2075* 2107 *
2076* Ends the ram download state. 2108 * Ends the ram download state.
2077* 2109 *
2078* Arguments: 2110 * Arguments:
2079* hw device structure 2111 * hw device structure
2080* 2112 *
2081* Returns: 2113 * Returns:
2082* 0 success 2114 * 0 success
2083* >0 f/w reported error - f/w status code 2115 * >0 f/w reported error - f/w status code
2084* <0 driver reported error 2116 * <0 driver reported error
2085* 2117 *
2086* Side effects: 2118 * Side effects:
2087* 2119 *
2088* Call context: 2120 * Call context:
2089* process 2121 * process
2090----------------------------------------------------------------*/ 2122 *----------------------------------------------------------------
2123 */
2091int hfa384x_drvr_ramdl_disable(struct hfa384x *hw) 2124int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
2092{ 2125{
2093 /* Check that we're already in the download state */ 2126 /* Check that we're already in the download state */
@@ -2105,29 +2138,30 @@ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
2105} 2138}
2106 2139
2107/*---------------------------------------------------------------- 2140/*----------------------------------------------------------------
2108* hfa384x_drvr_ramdl_enable 2141 * hfa384x_drvr_ramdl_enable
2109* 2142 *
2110* Begins the ram download state. Checks to see that we're not 2143 * Begins the ram download state. Checks to see that we're not
2111* already in a download state and that a port isn't enabled. 2144 * already in a download state and that a port isn't enabled.
2112* Sets the download state and calls cmd_download with the 2145 * Sets the download state and calls cmd_download with the
2113* ENABLE_VOLATILE subcommand and the exeaddr argument. 2146 * ENABLE_VOLATILE subcommand and the exeaddr argument.
2114* 2147 *
2115* Arguments: 2148 * Arguments:
2116* hw device structure 2149 * hw device structure
2117* exeaddr the card execution address that will be 2150 * exeaddr the card execution address that will be
2118* jumped to when ramdl_disable() is called 2151 * jumped to when ramdl_disable() is called
2119* (host order). 2152 * (host order).
2120* 2153 *
2121* Returns: 2154 * Returns:
2122* 0 success 2155 * 0 success
2123* >0 f/w reported error - f/w status code 2156 * >0 f/w reported error - f/w status code
2124* <0 driver reported error 2157 * <0 driver reported error
2125* 2158 *
2126* Side effects: 2159 * Side effects:
2127* 2160 *
2128* Call context: 2161 * Call context:
2129* process 2162 * process
2130----------------------------------------------------------------*/ 2163 *----------------------------------------------------------------
2164 */
2131int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr) 2165int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
2132{ 2166{
2133 int result = 0; 2167 int result = 0;
@@ -2146,7 +2180,8 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
2146 2180
2147 /* Check that we're not already in a download state */ 2181 /* Check that we're not already in a download state */
2148 if (hw->dlstate != HFA384x_DLSTATE_DISABLED) { 2182 if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
2149 netdev_err(hw->wlandev->netdev, "Download state not disabled.\n"); 2183 netdev_err(hw->wlandev->netdev,
2184 "Download state not disabled.\n");
2150 return -EINVAL; 2185 return -EINVAL;
2151 } 2186 }
2152 2187
@@ -2171,31 +2206,32 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
2171} 2206}
2172 2207
2173/*---------------------------------------------------------------- 2208/*----------------------------------------------------------------
2174* hfa384x_drvr_ramdl_write 2209 * hfa384x_drvr_ramdl_write
2175* 2210 *
2176* Performs a RAM download of a chunk of data. First checks to see 2211 * Performs a RAM download of a chunk of data. First checks to see
2177* that we're in the RAM download state, then uses the [read|write]mem USB 2212 * that we're in the RAM download state, then uses the [read|write]mem USB
2178* commands to 1) copy the data, 2) readback and compare. The download 2213 * commands to 1) copy the data, 2) readback and compare. The download
2179* state is unaffected. When all data has been written using 2214 * state is unaffected. When all data has been written using
2180* this function, call drvr_ramdl_disable() to end the download state 2215 * this function, call drvr_ramdl_disable() to end the download state
2181* and restart the MAC. 2216 * and restart the MAC.
2182* 2217 *
2183* Arguments: 2218 * Arguments:
2184* hw device structure 2219 * hw device structure
2185* daddr Card address to write to. (host order) 2220 * daddr Card address to write to. (host order)
2186* buf Ptr to data to write. 2221 * buf Ptr to data to write.
2187* len Length of data (host order). 2222 * len Length of data (host order).
2188* 2223 *
2189* Returns: 2224 * Returns:
2190* 0 success 2225 * 0 success
2191* >0 f/w reported error - f/w status code 2226 * >0 f/w reported error - f/w status code
2192* <0 driver reported error 2227 * <0 driver reported error
2193* 2228 *
2194* Side effects: 2229 * Side effects:
2195* 2230 *
2196* Call context: 2231 * Call context:
2197* process 2232 * process
2198----------------------------------------------------------------*/ 2233 *----------------------------------------------------------------
2234 */
2199int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len) 2235int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
2200{ 2236{
2201 int result = 0; 2237 int result = 0;
@@ -2246,36 +2282,37 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
2246} 2282}
2247 2283
2248/*---------------------------------------------------------------- 2284/*----------------------------------------------------------------
2249* hfa384x_drvr_readpda 2285 * hfa384x_drvr_readpda
2250* 2286 *
2251* Performs the sequence to read the PDA space. Note there is no 2287 * Performs the sequence to read the PDA space. Note there is no
2252* drvr_writepda() function. Writing a PDA is 2288 * drvr_writepda() function. Writing a PDA is
2253* generally implemented by a calling component via calls to 2289 * generally implemented by a calling component via calls to
2254* cmd_download and writing to the flash download buffer via the 2290 * cmd_download and writing to the flash download buffer via the
2255* aux regs. 2291 * aux regs.
2256* 2292 *
2257* Arguments: 2293 * Arguments:
2258* hw device structure 2294 * hw device structure
2259* buf buffer to store PDA in 2295 * buf buffer to store PDA in
2260* len buffer length 2296 * len buffer length
2261* 2297 *
2262* Returns: 2298 * Returns:
2263* 0 success 2299 * 0 success
2264* >0 f/w reported error - f/w status code 2300 * >0 f/w reported error - f/w status code
2265* <0 driver reported error 2301 * <0 driver reported error
2266* -ETIMEDOUT timeout waiting for the cmd regs to become 2302 * -ETIMEDOUT timeout waiting for the cmd regs to become
2267* available, or waiting for the control reg 2303 * available, or waiting for the control reg
2268* to indicate the Aux port is enabled. 2304 * to indicate the Aux port is enabled.
2269* -ENODATA the buffer does NOT contain a valid PDA. 2305 * -ENODATA the buffer does NOT contain a valid PDA.
2270* Either the card PDA is bad, or the auxdata 2306 * Either the card PDA is bad, or the auxdata
2271* reads are giving us garbage. 2307 * reads are giving us garbage.
2272 2308 *
2273* 2309 *
2274* Side effects: 2310 * Side effects:
2275* 2311 *
2276* Call context: 2312 * Call context:
2277* process or non-card interrupt. 2313 * process or non-card interrupt.
2278----------------------------------------------------------------*/ 2314 *----------------------------------------------------------------
2315 */
2279int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len) 2316int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
2280{ 2317{
2281 int result = 0; 2318 int result = 0;
@@ -2306,7 +2343,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
2306 2343
2307 /* units of bytes */ 2344 /* units of bytes */
2308 result = hfa384x_dormem_wait(hw, currpage, curroffset, buf, 2345 result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
2309 len); 2346 len);
2310 2347
2311 if (result) { 2348 if (result) {
2312 netdev_warn(hw->wlandev->netdev, 2349 netdev_warn(hw->wlandev->netdev,
@@ -2366,51 +2403,52 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
2366} 2403}
2367 2404
2368/*---------------------------------------------------------------- 2405/*----------------------------------------------------------------
2369* hfa384x_drvr_setconfig 2406 * hfa384x_drvr_setconfig
2370* 2407 *
2371* Performs the sequence necessary to write a config/info item. 2408 * Performs the sequence necessary to write a config/info item.
2372* 2409 *
2373* Arguments: 2410 * Arguments:
2374* hw device structure 2411 * hw device structure
2375* rid config/info record id (in host order) 2412 * rid config/info record id (in host order)
2376* buf host side record buffer 2413 * buf host side record buffer
2377* len buffer length (in bytes) 2414 * len buffer length (in bytes)
2378* 2415 *
2379* Returns: 2416 * Returns:
2380* 0 success 2417 * 0 success
2381* >0 f/w reported error - f/w status code 2418 * >0 f/w reported error - f/w status code
2382* <0 driver reported error 2419 * <0 driver reported error
2383* 2420 *
2384* Side effects: 2421 * Side effects:
2385* 2422 *
2386* Call context: 2423 * Call context:
2387* process 2424 * process
2388----------------------------------------------------------------*/ 2425 *----------------------------------------------------------------
2426 */
2389int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len) 2427int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
2390{ 2428{
2391 return hfa384x_dowrid_wait(hw, rid, buf, len); 2429 return hfa384x_dowrid_wait(hw, rid, buf, len);
2392} 2430}
2393 2431
2394/*---------------------------------------------------------------- 2432/*----------------------------------------------------------------
2395* hfa384x_drvr_start 2433 * hfa384x_drvr_start
2396* 2434 *
2397* Issues the MAC initialize command, sets up some data structures, 2435 * Issues the MAC initialize command, sets up some data structures,
2398* and enables the interrupts. After this function completes, the 2436 * and enables the interrupts. After this function completes, the
2399* low-level stuff should be ready for any/all commands. 2437 * low-level stuff should be ready for any/all commands.
2400* 2438 *
2401* Arguments: 2439 * Arguments:
2402* hw device structure 2440 * hw device structure
2403* Returns: 2441 * Returns:
2404* 0 success 2442 * 0 success
2405* >0 f/w reported error - f/w status code 2443 * >0 f/w reported error - f/w status code
2406* <0 driver reported error 2444 * <0 driver reported error
2407* 2445 *
2408* Side effects: 2446 * Side effects:
2409* 2447 *
2410* Call context: 2448 * Call context:
2411* process 2449 * process
2412----------------------------------------------------------------*/ 2450 *----------------------------------------------------------------
2413 2451 */
2414int hfa384x_drvr_start(struct hfa384x *hw) 2452int hfa384x_drvr_start(struct hfa384x *hw)
2415{ 2453{
2416 int result, result1, result2; 2454 int result, result1, result2;
@@ -2494,24 +2532,25 @@ done:
2494} 2532}
2495 2533
2496/*---------------------------------------------------------------- 2534/*----------------------------------------------------------------
2497* hfa384x_drvr_stop 2535 * hfa384x_drvr_stop
2498* 2536 *
2499* Shuts down the MAC to the point where it is safe to unload the 2537 * Shuts down the MAC to the point where it is safe to unload the
2500* driver. Any subsystem that may be holding a data or function 2538 * driver. Any subsystem that may be holding a data or function
2501* ptr into the driver must be cleared/deinitialized. 2539 * ptr into the driver must be cleared/deinitialized.
2502* 2540 *
2503* Arguments: 2541 * Arguments:
2504* hw device structure 2542 * hw device structure
2505* Returns: 2543 * Returns:
2506* 0 success 2544 * 0 success
2507* >0 f/w reported error - f/w status code 2545 * >0 f/w reported error - f/w status code
2508* <0 driver reported error 2546 * <0 driver reported error
2509* 2547 *
2510* Side effects: 2548 * Side effects:
2511* 2549 *
2512* Call context: 2550 * Call context:
2513* process 2551 * process
2514----------------------------------------------------------------*/ 2552 *----------------------------------------------------------------
2553 */
2515int hfa384x_drvr_stop(struct hfa384x *hw) 2554int hfa384x_drvr_stop(struct hfa384x *hw)
2516{ 2555{
2517 int i; 2556 int i;
@@ -2542,26 +2581,27 @@ int hfa384x_drvr_stop(struct hfa384x *hw)
2542} 2581}
2543 2582
2544/*---------------------------------------------------------------- 2583/*----------------------------------------------------------------
2545* hfa384x_drvr_txframe 2584 * hfa384x_drvr_txframe
2546* 2585 *
2547* Takes a frame from prism2sta and queues it for transmission. 2586 * Takes a frame from prism2sta and queues it for transmission.
2548* 2587 *
2549* Arguments: 2588 * Arguments:
2550* hw device structure 2589 * hw device structure
2551* skb packet buffer struct. Contains an 802.11 2590 * skb packet buffer struct. Contains an 802.11
2552* data frame. 2591 * data frame.
2553* p80211_hdr points to the 802.11 header for the packet. 2592 * p80211_hdr points to the 802.11 header for the packet.
2554* Returns: 2593 * Returns:
2555* 0 Success and more buffs available 2594 * 0 Success and more buffs available
2556* 1 Success but no more buffs 2595 * 1 Success but no more buffs
2557* 2 Allocation failure 2596 * 2 Allocation failure
2558* 4 Buffer full or queue busy 2597 * 4 Buffer full or queue busy
2559* 2598 *
2560* Side effects: 2599 * Side effects:
2561* 2600 *
2562* Call context: 2601 * Call context:
2563* interrupt 2602 * interrupt
2564----------------------------------------------------------------*/ 2603 *----------------------------------------------------------------
2604 */
2565int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb, 2605int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
2566 union p80211_hdr *p80211_hdr, 2606 union p80211_hdr *p80211_hdr,
2567 struct p80211_metawep *p80211_wep) 2607 struct p80211_metawep *p80211_wep)
@@ -2608,7 +2648,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
2608 cpu_to_le16(hw->txbuff.txfrm.desc.tx_control); 2648 cpu_to_le16(hw->txbuff.txfrm.desc.tx_control);
2609 2649
2610 /* copy the header over to the txdesc */ 2650 /* copy the header over to the txdesc */
2611 memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr, 2651 memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr,
2612 sizeof(union p80211_hdr)); 2652 sizeof(union p80211_hdr));
2613 2653
2614 /* if we're using host WEP, increase size by IV+ICV */ 2654 /* if we're using host WEP, increase size by IV+ICV */
@@ -2638,9 +2678,9 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
2638 memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv)); 2678 memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv));
2639 2679
2640 /* Send the USB packet */ 2680 /* Send the USB packet */
2641 usb_fill_bulk_urb(&(hw->tx_urb), hw->usb, 2681 usb_fill_bulk_urb(&hw->tx_urb, hw->usb,
2642 hw->endp_out, 2682 hw->endp_out,
2643 &(hw->txbuff), ROUNDUP64(usbpktlen), 2683 &hw->txbuff, ROUNDUP64(usbpktlen),
2644 hfa384x_usbout_callback, hw->wlandev); 2684 hfa384x_usbout_callback, hw->wlandev);
2645 hw->tx_urb.transfer_flags |= USB_QUEUE_BULK; 2685 hw->tx_urb.transfer_flags |= USB_QUEUE_BULK;
2646 2686
@@ -2676,18 +2716,19 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
2676} 2716}
2677 2717
2678/*---------------------------------------------------------------- 2718/*----------------------------------------------------------------
2679* hfa384x_usbctlx_reaper_task 2719 * hfa384x_usbctlx_reaper_task
2680* 2720 *
2681* Tasklet to delete dead CTLX objects 2721 * Tasklet to delete dead CTLX objects
2682* 2722 *
2683* Arguments: 2723 * Arguments:
2684* data ptr to a struct hfa384x 2724 * data ptr to a struct hfa384x
2685* 2725 *
2686* Returns: 2726 * Returns:
2687* 2727 *
2688* Call context: 2728 * Call context:
2689* Interrupt 2729 * Interrupt
2690----------------------------------------------------------------*/ 2730 *----------------------------------------------------------------
2731 */
2691static void hfa384x_usbctlx_reaper_task(unsigned long data) 2732static void hfa384x_usbctlx_reaper_task(unsigned long data)
2692{ 2733{
2693 struct hfa384x *hw = (struct hfa384x *)data; 2734 struct hfa384x *hw = (struct hfa384x *)data;
@@ -2708,19 +2749,20 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
2708} 2749}
2709 2750
2710/*---------------------------------------------------------------- 2751/*----------------------------------------------------------------
2711* hfa384x_usbctlx_completion_task 2752 * hfa384x_usbctlx_completion_task
2712* 2753 *
2713* Tasklet to call completion handlers for returned CTLXs 2754 * Tasklet to call completion handlers for returned CTLXs
2714* 2755 *
2715* Arguments: 2756 * Arguments:
2716* data ptr to struct hfa384x 2757 * data ptr to struct hfa384x
2717* 2758 *
2718* Returns: 2759 * Returns:
2719* Nothing 2760 * Nothing
2720* 2761 *
2721* Call context: 2762 * Call context:
2722* Interrupt 2763 * Interrupt
2723----------------------------------------------------------------*/ 2764 *----------------------------------------------------------------
2765 */
2724static void hfa384x_usbctlx_completion_task(unsigned long data) 2766static void hfa384x_usbctlx_completion_task(unsigned long data)
2725{ 2767{
2726 struct hfa384x *hw = (struct hfa384x *)data; 2768 struct hfa384x *hw = (struct hfa384x *)data;
@@ -2781,22 +2823,23 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
2781} 2823}
2782 2824
2783/*---------------------------------------------------------------- 2825/*----------------------------------------------------------------
2784* unlocked_usbctlx_cancel_async 2826 * unlocked_usbctlx_cancel_async
2785* 2827 *
2786* Mark the CTLX dead asynchronously, and ensure that the 2828 * Mark the CTLX dead asynchronously, and ensure that the
2787* next command on the queue is run afterwards. 2829 * next command on the queue is run afterwards.
2788* 2830 *
2789* Arguments: 2831 * Arguments:
2790* hw ptr to the struct hfa384x structure 2832 * hw ptr to the struct hfa384x structure
2791* ctlx ptr to a CTLX structure 2833 * ctlx ptr to a CTLX structure
2792* 2834 *
2793* Returns: 2835 * Returns:
2794* 0 the CTLX's URB is inactive 2836 * 0 the CTLX's URB is inactive
2795* -EINPROGRESS the URB is currently being unlinked 2837 * -EINPROGRESS the URB is currently being unlinked
2796* 2838 *
2797* Call context: 2839 * Call context:
2798* Either process or interrupt, but presumably interrupt 2840 * Either process or interrupt, but presumably interrupt
2799----------------------------------------------------------------*/ 2841 *----------------------------------------------------------------
2842 */
2800static int unlocked_usbctlx_cancel_async(struct hfa384x *hw, 2843static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
2801 struct hfa384x_usbctlx *ctlx) 2844 struct hfa384x_usbctlx *ctlx)
2802{ 2845{
@@ -2826,28 +2869,30 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
2826} 2869}
2827 2870
2828/*---------------------------------------------------------------- 2871/*----------------------------------------------------------------
2829* unlocked_usbctlx_complete 2872 * unlocked_usbctlx_complete
2830* 2873 *
2831* A CTLX has completed. It may have been successful, it may not 2874 * A CTLX has completed. It may have been successful, it may not
2832* have been. At this point, the CTLX should be quiescent. The URBs 2875 * have been. At this point, the CTLX should be quiescent. The URBs
2833* aren't active and the timers should have been stopped. 2876 * aren't active and the timers should have been stopped.
2834* 2877 *
2835* The CTLX is migrated to the "completing" queue, and the completing 2878 * The CTLX is migrated to the "completing" queue, and the completing
2836* tasklet is scheduled. 2879 * tasklet is scheduled.
2837* 2880 *
2838* Arguments: 2881 * Arguments:
2839* hw ptr to a struct hfa384x structure 2882 * hw ptr to a struct hfa384x structure
2840* ctlx ptr to a ctlx structure 2883 * ctlx ptr to a ctlx structure
2841* 2884 *
2842* Returns: 2885 * Returns:
2843* nothing 2886 * nothing
2844* 2887 *
2845* Side effects: 2888 * Side effects:
2846* 2889 *
2847* Call context: 2890 * Call context:
2848* Either, assume interrupt 2891 * Either, assume interrupt
2849----------------------------------------------------------------*/ 2892 *----------------------------------------------------------------
2850static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx) 2893 */
2894static void unlocked_usbctlx_complete(struct hfa384x *hw,
2895 struct hfa384x_usbctlx *ctlx)
2851{ 2896{
2852 /* Timers have been stopped, and ctlx should be in 2897 /* Timers have been stopped, and ctlx should be in
2853 * a terminal state. Retire it from the "active" 2898 * a terminal state. Retire it from the "active"
@@ -2871,21 +2916,22 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx
2871} 2916}
2872 2917
2873/*---------------------------------------------------------------- 2918/*----------------------------------------------------------------
2874* hfa384x_usbctlxq_run 2919 * hfa384x_usbctlxq_run
2875* 2920 *
2876* Checks to see if the head item is running. If not, starts it. 2921 * Checks to see if the head item is running. If not, starts it.
2877* 2922 *
2878* Arguments: 2923 * Arguments:
2879* hw ptr to struct hfa384x 2924 * hw ptr to struct hfa384x
2880* 2925 *
2881* Returns: 2926 * Returns:
2882* nothing 2927 * nothing
2883* 2928 *
2884* Side effects: 2929 * Side effects:
2885* 2930 *
2886* Call context: 2931 * Call context:
2887* any 2932 * any
2888----------------------------------------------------------------*/ 2933 *----------------------------------------------------------------
2934 */
2889static void hfa384x_usbctlxq_run(struct hfa384x *hw) 2935static void hfa384x_usbctlxq_run(struct hfa384x *hw)
2890{ 2936{
2891 unsigned long flags; 2937 unsigned long flags;
@@ -2916,9 +2962,9 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw)
2916 list_move_tail(&head->list, &hw->ctlxq.active); 2962 list_move_tail(&head->list, &hw->ctlxq.active);
2917 2963
2918 /* Fill the out packet */ 2964 /* Fill the out packet */
2919 usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb, 2965 usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb,
2920 hw->endp_out, 2966 hw->endp_out,
2921 &(head->outbuf), ROUNDUP64(head->outbufsize), 2967 &head->outbuf, ROUNDUP64(head->outbufsize),
2922 hfa384x_ctlxout_callback, hw); 2968 hfa384x_ctlxout_callback, hw);
2923 hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK; 2969 hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
2924 2970
@@ -2971,26 +3017,27 @@ unlock:
2971} 3017}
2972 3018
2973/*---------------------------------------------------------------- 3019/*----------------------------------------------------------------
2974* hfa384x_usbin_callback 3020 * hfa384x_usbin_callback
2975* 3021 *
2976* Callback for URBs on the BULKIN endpoint. 3022 * Callback for URBs on the BULKIN endpoint.
2977* 3023 *
2978* Arguments: 3024 * Arguments:
2979* urb ptr to the completed urb 3025 * urb ptr to the completed urb
2980* 3026 *
2981* Returns: 3027 * Returns:
2982* nothing 3028 * nothing
2983* 3029 *
2984* Side effects: 3030 * Side effects:
2985* 3031 *
2986* Call context: 3032 * Call context:
2987* interrupt 3033 * interrupt
2988----------------------------------------------------------------*/ 3034 *----------------------------------------------------------------
3035 */
2989static void hfa384x_usbin_callback(struct urb *urb) 3036static void hfa384x_usbin_callback(struct urb *urb)
2990{ 3037{
2991 struct wlandevice *wlandev = urb->context; 3038 struct wlandevice *wlandev = urb->context;
2992 struct hfa384x *hw; 3039 struct hfa384x *hw;
2993 union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer; 3040 union hfa384x_usbin *usbin;
2994 struct sk_buff *skb = NULL; 3041 struct sk_buff *skb = NULL;
2995 int result; 3042 int result;
2996 int urb_status; 3043 int urb_status;
@@ -3010,7 +3057,10 @@ static void hfa384x_usbin_callback(struct urb *urb)
3010 goto exit; 3057 goto exit;
3011 3058
3012 skb = hw->rx_urb_skb; 3059 skb = hw->rx_urb_skb;
3013 BUG_ON(!skb || (skb->data != urb->transfer_buffer)); 3060 if (!skb || (skb->data != urb->transfer_buffer)) {
3061 WARN_ON(1);
3062 return;
3063 }
3014 3064
3015 hw->rx_urb_skb = NULL; 3065 hw->rx_urb_skb = NULL;
3016 3066
@@ -3089,6 +3139,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
3089 /* Note: the check of the sw_support field, the type field doesn't 3139 /* Note: the check of the sw_support field, the type field doesn't
3090 * have bit 12 set like the docs suggest. 3140 * have bit 12 set like the docs suggest.
3091 */ 3141 */
3142 usbin = (union hfa384x_usbin *)urb->transfer_buffer;
3092 type = le16_to_cpu(usbin->type); 3143 type = le16_to_cpu(usbin->type);
3093 if (HFA384x_USB_ISRXFRM(type)) { 3144 if (HFA384x_USB_ISRXFRM(type)) {
3094 if (action == HANDLE) { 3145 if (action == HANDLE) {
@@ -3147,25 +3198,26 @@ exit:
3147} 3198}
3148 3199
3149/*---------------------------------------------------------------- 3200/*----------------------------------------------------------------
3150* hfa384x_usbin_ctlx 3201 * hfa384x_usbin_ctlx
3151* 3202 *
3152* We've received a URB containing a Prism2 "response" message. 3203 * We've received a URB containing a Prism2 "response" message.
3153* This message needs to be matched up with a CTLX on the active 3204 * This message needs to be matched up with a CTLX on the active
3154* queue and our state updated accordingly. 3205 * queue and our state updated accordingly.
3155* 3206 *
3156* Arguments: 3207 * Arguments:
3157* hw ptr to struct hfa384x 3208 * hw ptr to struct hfa384x
3158* usbin ptr to USB IN packet 3209 * usbin ptr to USB IN packet
3159* urb_status status of this Bulk-In URB 3210 * urb_status status of this Bulk-In URB
3160* 3211 *
3161* Returns: 3212 * Returns:
3162* nothing 3213 * nothing
3163* 3214 *
3164* Side effects: 3215 * Side effects:
3165* 3216 *
3166* Call context: 3217 * Call context:
3167* interrupt 3218 * interrupt
3168----------------------------------------------------------------*/ 3219 *----------------------------------------------------------------
3220 */
3169static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin, 3221static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
3170 int urb_status) 3222 int urb_status)
3171{ 3223{
@@ -3269,22 +3321,23 @@ unlock:
3269} 3321}
3270 3322
3271/*---------------------------------------------------------------- 3323/*----------------------------------------------------------------
3272* hfa384x_usbin_txcompl 3324 * hfa384x_usbin_txcompl
3273* 3325 *
3274* At this point we have the results of a previous transmit. 3326 * At this point we have the results of a previous transmit.
3275* 3327 *
3276* Arguments: 3328 * Arguments:
3277* wlandev wlan device 3329 * wlandev wlan device
3278* usbin ptr to the usb transfer buffer 3330 * usbin ptr to the usb transfer buffer
3279* 3331 *
3280* Returns: 3332 * Returns:
3281* nothing 3333 * nothing
3282* 3334 *
3283* Side effects: 3335 * Side effects:
3284* 3336 *
3285* Call context: 3337 * Call context:
3286* interrupt 3338 * interrupt
3287----------------------------------------------------------------*/ 3339 *----------------------------------------------------------------
3340 */
3288static void hfa384x_usbin_txcompl(struct wlandevice *wlandev, 3341static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
3289 union hfa384x_usbin *usbin) 3342 union hfa384x_usbin *usbin)
3290{ 3343{
@@ -3300,22 +3353,23 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
3300} 3353}
3301 3354
3302/*---------------------------------------------------------------- 3355/*----------------------------------------------------------------
3303* hfa384x_usbin_rx 3356 * hfa384x_usbin_rx
3304* 3357 *
3305* At this point we have a successful received a rx frame packet. 3358 * At this point we have a successful received a rx frame packet.
3306* 3359 *
3307* Arguments: 3360 * Arguments:
3308* wlandev wlan device 3361 * wlandev wlan device
3309* usbin ptr to the usb transfer buffer 3362 * usbin ptr to the usb transfer buffer
3310* 3363 *
3311* Returns: 3364 * Returns:
3312* nothing 3365 * nothing
3313* 3366 *
3314* Side effects: 3367 * Side effects:
3315* 3368 *
3316* Call context: 3369 * Call context:
3317* interrupt 3370 * interrupt
3318----------------------------------------------------------------*/ 3371 *----------------------------------------------------------------
3372 */
3319static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) 3373static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
3320{ 3374{
3321 union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data; 3375 union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
@@ -3396,30 +3450,31 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
3396} 3450}
3397 3451
3398/*---------------------------------------------------------------- 3452/*----------------------------------------------------------------
3399* hfa384x_int_rxmonitor 3453 * hfa384x_int_rxmonitor
3400* 3454 *
3401* Helper function for int_rx. Handles monitor frames. 3455 * Helper function for int_rx. Handles monitor frames.
3402* Note that this function allocates space for the FCS and sets it 3456 * Note that this function allocates space for the FCS and sets it
3403* to 0xffffffff. The hfa384x doesn't give us the FCS value but the 3457 * to 0xffffffff. The hfa384x doesn't give us the FCS value but the
3404* higher layers expect it. 0xffffffff is used as a flag to indicate 3458 * higher layers expect it. 0xffffffff is used as a flag to indicate
3405* the FCS is bogus. 3459 * the FCS is bogus.
3406* 3460 *
3407* Arguments: 3461 * Arguments:
3408* wlandev wlan device structure 3462 * wlandev wlan device structure
3409* rxfrm rx descriptor read from card in int_rx 3463 * rxfrm rx descriptor read from card in int_rx
3410* 3464 *
3411* Returns: 3465 * Returns:
3412* nothing 3466 * nothing
3413* 3467 *
3414* Side effects: 3468 * Side effects:
3415* Allocates an skb and passes it up via the PF_PACKET interface. 3469 * Allocates an skb and passes it up via the PF_PACKET interface.
3416* Call context: 3470 * Call context:
3417* interrupt 3471 * interrupt
3418----------------------------------------------------------------*/ 3472 *----------------------------------------------------------------
3473 */
3419static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, 3474static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
3420 struct hfa384x_usb_rxfrm *rxfrm) 3475 struct hfa384x_usb_rxfrm *rxfrm)
3421{ 3476{
3422 struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc); 3477 struct hfa384x_rx_frame *rxdesc = &rxfrm->desc;
3423 unsigned int hdrlen = 0; 3478 unsigned int hdrlen = 0;
3424 unsigned int datalen = 0; 3479 unsigned int datalen = 0;
3425 unsigned int skblen = 0; 3480 unsigned int skblen = 0;
@@ -3474,9 +3529,10 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
3474 } 3529 }
3475 3530
3476 /* Copy the 802.11 header to the skb 3531 /* Copy the 802.11 header to the skb
3477 (ctl frames may be less than a full header) */ 3532 * (ctl frames may be less than a full header)
3533 */
3478 datap = skb_put(skb, hdrlen); 3534 datap = skb_put(skb, hdrlen);
3479 memcpy(datap, &(rxdesc->frame_control), hdrlen); 3535 memcpy(datap, &rxdesc->frame_control, hdrlen);
3480 3536
3481 /* If any, copy the data from the card to the skb */ 3537 /* If any, copy the data from the card to the skb */
3482 if (datalen > 0) { 3538 if (datalen > 0) {
@@ -3501,22 +3557,23 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
3501} 3557}
3502 3558
3503/*---------------------------------------------------------------- 3559/*----------------------------------------------------------------
3504* hfa384x_usbin_info 3560 * hfa384x_usbin_info
3505* 3561 *
3506* At this point we have a successful received a Prism2 info frame. 3562 * At this point we have a successful received a Prism2 info frame.
3507* 3563 *
3508* Arguments: 3564 * Arguments:
3509* wlandev wlan device 3565 * wlandev wlan device
3510* usbin ptr to the usb transfer buffer 3566 * usbin ptr to the usb transfer buffer
3511* 3567 *
3512* Returns: 3568 * Returns:
3513* nothing 3569 * nothing
3514* 3570 *
3515* Side effects: 3571 * Side effects:
3516* 3572 *
3517* Call context: 3573 * Call context:
3518* interrupt 3574 * interrupt
3519----------------------------------------------------------------*/ 3575 *----------------------------------------------------------------
3576 */
3520static void hfa384x_usbin_info(struct wlandevice *wlandev, 3577static void hfa384x_usbin_info(struct wlandevice *wlandev,
3521 union hfa384x_usbin *usbin) 3578 union hfa384x_usbin *usbin)
3522{ 3579{
@@ -3526,21 +3583,22 @@ static void hfa384x_usbin_info(struct wlandevice *wlandev,
3526} 3583}
3527 3584
3528/*---------------------------------------------------------------- 3585/*----------------------------------------------------------------
3529* hfa384x_usbout_callback 3586 * hfa384x_usbout_callback
3530* 3587 *
3531* Callback for URBs on the BULKOUT endpoint. 3588 * Callback for URBs on the BULKOUT endpoint.
3532* 3589 *
3533* Arguments: 3590 * Arguments:
3534* urb ptr to the completed urb 3591 * urb ptr to the completed urb
3535* 3592 *
3536* Returns: 3593 * Returns:
3537* nothing 3594 * nothing
3538* 3595 *
3539* Side effects: 3596 * Side effects:
3540* 3597 *
3541* Call context: 3598 * Call context:
3542* interrupt 3599 * interrupt
3543----------------------------------------------------------------*/ 3600 *----------------------------------------------------------------
3601 */
3544static void hfa384x_usbout_callback(struct urb *urb) 3602static void hfa384x_usbout_callback(struct urb *urb)
3545{ 3603{
3546 struct wlandevice *wlandev = urb->context; 3604 struct wlandevice *wlandev = urb->context;
@@ -3601,21 +3659,22 @@ static void hfa384x_usbout_callback(struct urb *urb)
3601} 3659}
3602 3660
3603/*---------------------------------------------------------------- 3661/*----------------------------------------------------------------
3604* hfa384x_ctlxout_callback 3662 * hfa384x_ctlxout_callback
3605* 3663 *
3606* Callback for control data on the BULKOUT endpoint. 3664 * Callback for control data on the BULKOUT endpoint.
3607* 3665 *
3608* Arguments: 3666 * Arguments:
3609* urb ptr to the completed urb 3667 * urb ptr to the completed urb
3610* 3668 *
3611* Returns: 3669 * Returns:
3612* nothing 3670 * nothing
3613* 3671 *
3614* Side effects: 3672 * Side effects:
3615* 3673 *
3616* Call context: 3674 * Call context:
3617* interrupt 3675 * interrupt
3618----------------------------------------------------------------*/ 3676 *----------------------------------------------------------------
3677 */
3619static void hfa384x_ctlxout_callback(struct urb *urb) 3678static void hfa384x_ctlxout_callback(struct urb *urb)
3620{ 3679{
3621 struct hfa384x *hw = urb->context; 3680 struct hfa384x *hw = urb->context;
@@ -3730,23 +3789,24 @@ delresp:
3730} 3789}
3731 3790
3732/*---------------------------------------------------------------- 3791/*----------------------------------------------------------------
3733* hfa384x_usbctlx_reqtimerfn 3792 * hfa384x_usbctlx_reqtimerfn
3734* 3793 *
3735* Timer response function for CTLX request timeouts. If this 3794 * Timer response function for CTLX request timeouts. If this
3736* function is called, it means that the callback for the OUT 3795 * function is called, it means that the callback for the OUT
3737* URB containing a Prism2.x XXX_Request was never called. 3796 * URB containing a Prism2.x XXX_Request was never called.
3738* 3797 *
3739* Arguments: 3798 * Arguments:
3740* data a ptr to the struct hfa384x 3799 * data a ptr to the struct hfa384x
3741* 3800 *
3742* Returns: 3801 * Returns:
3743* nothing 3802 * nothing
3744* 3803 *
3745* Side effects: 3804 * Side effects:
3746* 3805 *
3747* Call context: 3806 * Call context:
3748* interrupt 3807 * interrupt
3749----------------------------------------------------------------*/ 3808 *----------------------------------------------------------------
3809 */
3750static void hfa384x_usbctlx_reqtimerfn(unsigned long data) 3810static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
3751{ 3811{
3752 struct hfa384x *hw = (struct hfa384x *)data; 3812 struct hfa384x *hw = (struct hfa384x *)data;
@@ -3788,23 +3848,24 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
3788} 3848}
3789 3849
3790/*---------------------------------------------------------------- 3850/*----------------------------------------------------------------
3791* hfa384x_usbctlx_resptimerfn 3851 * hfa384x_usbctlx_resptimerfn
3792* 3852 *
3793* Timer response function for CTLX response timeouts. If this 3853 * Timer response function for CTLX response timeouts. If this
3794* function is called, it means that the callback for the IN 3854 * function is called, it means that the callback for the IN
3795* URB containing a Prism2.x XXX_Response was never called. 3855 * URB containing a Prism2.x XXX_Response was never called.
3796* 3856 *
3797* Arguments: 3857 * Arguments:
3798* data a ptr to the struct hfa384x 3858 * data a ptr to the struct hfa384x
3799* 3859 *
3800* Returns: 3860 * Returns:
3801* nothing 3861 * nothing
3802* 3862 *
3803* Side effects: 3863 * Side effects:
3804* 3864 *
3805* Call context: 3865 * Call context:
3806* interrupt 3866 * interrupt
3807----------------------------------------------------------------*/ 3867 *----------------------------------------------------------------
3868 */
3808static void hfa384x_usbctlx_resptimerfn(unsigned long data) 3869static void hfa384x_usbctlx_resptimerfn(unsigned long data)
3809{ 3870{
3810 struct hfa384x *hw = (struct hfa384x *)data; 3871 struct hfa384x *hw = (struct hfa384x *)data;
@@ -3830,20 +3891,21 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
3830} 3891}
3831 3892
3832/*---------------------------------------------------------------- 3893/*----------------------------------------------------------------
3833* hfa384x_usb_throttlefn 3894 * hfa384x_usb_throttlefn
3834* 3895 *
3835* 3896 *
3836* Arguments: 3897 * Arguments:
3837* data ptr to hw 3898 * data ptr to hw
3838* 3899 *
3839* Returns: 3900 * Returns:
3840* Nothing 3901 * Nothing
3841* 3902 *
3842* Side effects: 3903 * Side effects:
3843* 3904 *
3844* Call context: 3905 * Call context:
3845* Interrupt 3906 * Interrupt
3846----------------------------------------------------------------*/ 3907 *----------------------------------------------------------------
3908 */
3847static void hfa384x_usb_throttlefn(unsigned long data) 3909static void hfa384x_usb_throttlefn(unsigned long data)
3848{ 3910{
3849 struct hfa384x *hw = (struct hfa384x *)data; 3911 struct hfa384x *hw = (struct hfa384x *)data;
@@ -3869,24 +3931,26 @@ static void hfa384x_usb_throttlefn(unsigned long data)
3869} 3931}
3870 3932
3871/*---------------------------------------------------------------- 3933/*----------------------------------------------------------------
3872* hfa384x_usbctlx_submit 3934 * hfa384x_usbctlx_submit
3873* 3935 *
3874* Called from the doxxx functions to submit a CTLX to the queue 3936 * Called from the doxxx functions to submit a CTLX to the queue
3875* 3937 *
3876* Arguments: 3938 * Arguments:
3877* hw ptr to the hw struct 3939 * hw ptr to the hw struct
3878* ctlx ctlx structure to enqueue 3940 * ctlx ctlx structure to enqueue
3879* 3941 *
3880* Returns: 3942 * Returns:
3881* -ENODEV if the adapter is unplugged 3943 * -ENODEV if the adapter is unplugged
3882* 0 3944 * 0
3883* 3945 *
3884* Side effects: 3946 * Side effects:
3885* 3947 *
3886* Call context: 3948 * Call context:
3887* process or interrupt 3949 * process or interrupt
3888----------------------------------------------------------------*/ 3950 *----------------------------------------------------------------
3889static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx) 3951 */
3952static int hfa384x_usbctlx_submit(struct hfa384x *hw,
3953 struct hfa384x_usbctlx *ctlx)
3890{ 3954{
3891 unsigned long flags; 3955 unsigned long flags;
3892 3956
@@ -3906,21 +3970,22 @@ static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ct
3906} 3970}
3907 3971
3908/*---------------------------------------------------------------- 3972/*----------------------------------------------------------------
3909* hfa384x_isgood_pdrcore 3973 * hfa384x_isgood_pdrcore
3910* 3974 *
3911* Quick check of PDR codes. 3975 * Quick check of PDR codes.
3912* 3976 *
3913* Arguments: 3977 * Arguments:
3914* pdrcode PDR code number (host order) 3978 * pdrcode PDR code number (host order)
3915* 3979 *
3916* Returns: 3980 * Returns:
3917* zero not good. 3981 * zero not good.
3918* one is good. 3982 * one is good.
3919* 3983 *
3920* Side effects: 3984 * Side effects:
3921* 3985 *
3922* Call context: 3986 * Call context:
3923----------------------------------------------------------------*/ 3987 *----------------------------------------------------------------
3988 */
3924static int hfa384x_isgood_pdrcode(u16 pdrcode) 3989static int hfa384x_isgood_pdrcode(u16 pdrcode)
3925{ 3990{
3926 switch (pdrcode) { 3991 switch (pdrcode) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0247cbc29145..8387e6a3031a 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,56 +1,56 @@
1/* src/p80211/p80211conv.c 1/* src/p80211/p80211conv.c
2* 2 *
3* Ether/802.11 conversions and packet buffer routines 3 * Ether/802.11 conversions and packet buffer routines
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file defines the functions that perform Ethernet to/from 47 * This file defines the functions that perform Ethernet to/from
48* 802.11 frame conversions. 48 * 802.11 frame conversions.
49* 49 *
50* -------------------------------------------------------------------- 50 * --------------------------------------------------------------------
51* 51 *
52*================================================================ 52 *================================================================
53*/ 53 */
54 54
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/kernel.h> 56#include <linux/kernel.h>
@@ -79,31 +79,31 @@ static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
79static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 }; 79static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
80 80
81/*---------------------------------------------------------------- 81/*----------------------------------------------------------------
82* p80211pb_ether_to_80211 82 * p80211pb_ether_to_80211
83* 83 *
84* Uses the contents of the ether frame and the etherconv setting 84 * Uses the contents of the ether frame and the etherconv setting
85* to build the elements of the 802.11 frame. 85 * to build the elements of the 802.11 frame.
86* 86 *
87* We don't actually set 87 * We don't actually set
88* up the frame header here. That's the MAC's job. We're only handling 88 * up the frame header here. That's the MAC's job. We're only handling
89* conversion of DIXII or 802.3+LLC frames to something that works 89 * conversion of DIXII or 802.3+LLC frames to something that works
90* with 802.11. 90 * with 802.11.
91* 91 *
92* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11 92 * Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
93* FCS is also not present and will need to be added elsewhere. 93 * FCS is also not present and will need to be added elsewhere.
94* 94 *
95* Arguments: 95 * Arguments:
96* ethconv Conversion type to perform 96 * ethconv Conversion type to perform
97* skb skbuff containing the ether frame 97 * skb skbuff containing the ether frame
98* p80211_hdr 802.11 header 98 * p80211_hdr 802.11 header
99* 99 *
100* Returns: 100 * Returns:
101* 0 on success, non-zero otherwise 101 * 0 on success, non-zero otherwise
102* 102 *
103* Call context: 103 * Call context:
104* May be called in interrupt or non-interrupt context 104 * May be called in interrupt or non-interrupt context
105*---------------------------------------------------------------- 105 *----------------------------------------------------------------
106*/ 106 */
107int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv, 107int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
108 struct sk_buff *skb, union p80211_hdr *p80211_hdr, 108 struct sk_buff *skb, union p80211_hdr *p80211_hdr,
109 struct p80211_metawep *p80211_wep) 109 struct p80211_metawep *p80211_wep)
@@ -255,25 +255,25 @@ static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
255} 255}
256 256
257/*---------------------------------------------------------------- 257/*----------------------------------------------------------------
258* p80211pb_80211_to_ether 258 * p80211pb_80211_to_ether
259* 259 *
260* Uses the contents of a received 802.11 frame and the etherconv 260 * Uses the contents of a received 802.11 frame and the etherconv
261* setting to build an ether frame. 261 * setting to build an ether frame.
262* 262 *
263* This function extracts the src and dest address from the 802.11 263 * This function extracts the src and dest address from the 802.11
264* frame to use in the construction of the eth frame. 264 * frame to use in the construction of the eth frame.
265* 265 *
266* Arguments: 266 * Arguments:
267* ethconv Conversion type to perform 267 * ethconv Conversion type to perform
268* skb Packet buffer containing the 802.11 frame 268 * skb Packet buffer containing the 802.11 frame
269* 269 *
270* Returns: 270 * Returns:
271* 0 on success, non-zero otherwise 271 * 0 on success, non-zero otherwise
272* 272 *
273* Call context: 273 * Call context:
274* May be called in interrupt or non-interrupt context 274 * May be called in interrupt or non-interrupt context
275*---------------------------------------------------------------- 275 *----------------------------------------------------------------
276*/ 276 */
277int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv, 277int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
278 struct sk_buff *skb) 278 struct sk_buff *skb)
279{ 279{
@@ -508,22 +508,22 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
508} 508}
509 509
510/*---------------------------------------------------------------- 510/*----------------------------------------------------------------
511* p80211_stt_findproto 511 * p80211_stt_findproto
512* 512 *
513* Searches the 802.1h Selective Translation Table for a given 513 * Searches the 802.1h Selective Translation Table for a given
514* protocol. 514 * protocol.
515* 515 *
516* Arguments: 516 * Arguments:
517* proto protocol number (in host order) to search for. 517 * proto protocol number (in host order) to search for.
518* 518 *
519* Returns: 519 * Returns:
520* 1 - if the table is empty or a match is found. 520 * 1 - if the table is empty or a match is found.
521* 0 - if the table is non-empty and a match is not found. 521 * 0 - if the table is non-empty and a match is not found.
522* 522 *
523* Call context: 523 * Call context:
524* May be called in interrupt or non-interrupt context 524 * May be called in interrupt or non-interrupt context
525*---------------------------------------------------------------- 525 *----------------------------------------------------------------
526*/ 526 */
527int p80211_stt_findproto(u16 proto) 527int p80211_stt_findproto(u16 proto)
528{ 528{
529 /* Always return found for now. This is the behavior used by the */ 529 /* Always return found for now. This is the behavior used by the */
@@ -540,21 +540,21 @@ int p80211_stt_findproto(u16 proto)
540} 540}
541 541
542/*---------------------------------------------------------------- 542/*----------------------------------------------------------------
543* p80211skb_rxmeta_detach 543 * p80211skb_rxmeta_detach
544* 544 *
545* Disconnects the frmmeta and rxmeta from an skb. 545 * Disconnects the frmmeta and rxmeta from an skb.
546* 546 *
547* Arguments: 547 * Arguments:
548* wlandev The wlandev this skb belongs to. 548 * wlandev The wlandev this skb belongs to.
549* skb The skb we're attaching to. 549 * skb The skb we're attaching to.
550* 550 *
551* Returns: 551 * Returns:
552* 0 on success, non-zero otherwise 552 * 0 on success, non-zero otherwise
553* 553 *
554* Call context: 554 * Call context:
555* May be called in interrupt or non-interrupt context 555 * May be called in interrupt or non-interrupt context
556*---------------------------------------------------------------- 556 *----------------------------------------------------------------
557*/ 557 */
558void p80211skb_rxmeta_detach(struct sk_buff *skb) 558void p80211skb_rxmeta_detach(struct sk_buff *skb)
559{ 559{
560 struct p80211_rxmeta *rxmeta; 560 struct p80211_rxmeta *rxmeta;
@@ -584,22 +584,22 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
584} 584}
585 585
586/*---------------------------------------------------------------- 586/*----------------------------------------------------------------
587* p80211skb_rxmeta_attach 587 * p80211skb_rxmeta_attach
588* 588 *
589* Allocates a p80211rxmeta structure, initializes it, and attaches 589 * Allocates a p80211rxmeta structure, initializes it, and attaches
590* it to an skb. 590 * it to an skb.
591* 591 *
592* Arguments: 592 * Arguments:
593* wlandev The wlandev this skb belongs to. 593 * wlandev The wlandev this skb belongs to.
594* skb The skb we're attaching to. 594 * skb The skb we're attaching to.
595* 595 *
596* Returns: 596 * Returns:
597* 0 on success, non-zero otherwise 597 * 0 on success, non-zero otherwise
598* 598 *
599* Call context: 599 * Call context:
600* May be called in interrupt or non-interrupt context 600 * May be called in interrupt or non-interrupt context
601*---------------------------------------------------------------- 601 *----------------------------------------------------------------
602*/ 602 */
603int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) 603int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
604{ 604{
605 int result = 0; 605 int result = 0;
@@ -615,11 +615,9 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
615 } 615 }
616 616
617 /* Allocate the rxmeta */ 617 /* Allocate the rxmeta */
618 rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC); 618 rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC);
619 619
620 if (!rxmeta) { 620 if (!rxmeta) {
621 netdev_err(wlandev->netdev,
622 "%s: Failed to allocate rxmeta.\n", wlandev->name);
623 result = 1; 621 result = 1;
624 goto exit; 622 goto exit;
625 } 623 }
@@ -638,22 +636,22 @@ exit:
638} 636}
639 637
640/*---------------------------------------------------------------- 638/*----------------------------------------------------------------
641* p80211skb_free 639 * p80211skb_free
642* 640 *
643* Frees an entire p80211skb by checking and freeing the meta struct 641 * Frees an entire p80211skb by checking and freeing the meta struct
644* and then freeing the skb. 642 * and then freeing the skb.
645* 643 *
646* Arguments: 644 * Arguments:
647* wlandev The wlandev this skb belongs to. 645 * wlandev The wlandev this skb belongs to.
648* skb The skb we're attaching to. 646 * skb The skb we're attaching to.
649* 647 *
650* Returns: 648 * Returns:
651* 0 on success, non-zero otherwise 649 * 0 on success, non-zero otherwise
652* 650 *
653* Call context: 651 * Call context:
654* May be called in interrupt or non-interrupt context 652 * May be called in interrupt or non-interrupt context
655*---------------------------------------------------------------- 653 *----------------------------------------------------------------
656*/ 654 */
657void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb) 655void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
658{ 656{
659 struct p80211_frmmeta *meta; 657 struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 8c10357bedf0..ed70d98e5cf1 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,54 +1,54 @@
1/* p80211conv.h 1/* p80211conv.h
2* 2 *
3* Ether/802.11 conversions and packet buffer routines 3 * Ether/802.11 conversions and packet buffer routines
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file declares the functions, types and macros that perform 47 * This file declares the functions, types and macros that perform
48* Ethernet to/from 802.11 frame conversions. 48 * Ethernet to/from 802.11 frame conversions.
49* 49 *
50* -------------------------------------------------------------------- 50 * --------------------------------------------------------------------
51*/ 51 */
52 52
53#ifndef _LINUX_P80211CONV_H 53#ifndef _LINUX_P80211CONV_H
54#define _LINUX_P80211CONV_H 54#define _LINUX_P80211CONV_H
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 79d9b20b364d..2c44c613a586 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,61 +1,61 @@
1/* p80211hdr.h 1/* p80211hdr.h
2* 2 *
3* Macros, types, and functions for handling 802.11 MAC headers 3 * Macros, types, and functions for handling 802.11 MAC headers
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file declares the constants and types used in the interface 47 * This file declares the constants and types used in the interface
48* between a wlan driver and the user mode utilities. 48 * between a wlan driver and the user mode utilities.
49* 49 *
50* Note: 50 * Note:
51* - Constant values are always in HOST byte order. To assign 51 * - Constant values are always in HOST byte order. To assign
52* values to multi-byte fields they _must_ be converted to 52 * values to multi-byte fields they _must_ be converted to
53* ieee byte order. To retrieve multi-byte values from incoming 53 * ieee byte order. To retrieve multi-byte values from incoming
54* frames, they must be converted to host order. 54 * frames, they must be converted to host order.
55* 55 *
56* All functions declared here are implemented in p80211.c 56 * All functions declared here are implemented in p80211.c
57* -------------------------------------------------------------------- 57 * --------------------------------------------------------------------
58*/ 58 */
59 59
60#ifndef _P80211HDR_H 60#ifndef _P80211HDR_H
61#define _P80211HDR_H 61#define _P80211HDR_H
@@ -131,8 +131,8 @@
131/* SET_FC_FSTYPE(WLAN_FSTYPE_RTS) ); */ 131/* SET_FC_FSTYPE(WLAN_FSTYPE_RTS) ); */
132/*------------------------------------------------------------*/ 132/*------------------------------------------------------------*/
133 133
134#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & (BIT(2) | BIT(3))) >> 2) 134#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & GENMASK(3, 2)) >> 2)
135#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4) 135#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & GENMASK(7, 4)) >> 4)
136#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8) 136#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8)
137#define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9) 137#define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9)
138#define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14) 138#define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14)
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 06c5e36649a7..ab6067e65050 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,64 +1,64 @@
1/* p80211ioctl.h 1/* p80211ioctl.h
2* 2 *
3* Declares constants and types for the p80211 ioctls 3 * Declares constants and types for the p80211 ioctls
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* While this file is called 'ioctl' is purpose goes a little beyond 47 * While this file is called 'ioctl' is purpose goes a little beyond
48* that. This file defines the types and contants used to implement 48 * that. This file defines the types and contants used to implement
49* the p80211 request/confirm/indicate interfaces on Linux. The 49 * the p80211 request/confirm/indicate interfaces on Linux. The
50* request/confirm interface is, in fact, normally implemented as an 50 * request/confirm interface is, in fact, normally implemented as an
51* ioctl. The indicate interface on the other hand, is implemented 51 * ioctl. The indicate interface on the other hand, is implemented
52* using the Linux 'netlink' interface. 52 * using the Linux 'netlink' interface.
53* 53 *
54* The reason I say that request/confirm is 'normally' implemented 54 * The reason I say that request/confirm is 'normally' implemented
55* via ioctl is that we're reserving the right to be able to send 55 * via ioctl is that we're reserving the right to be able to send
56* request commands via the netlink interface. This will be necessary 56 * request commands via the netlink interface. This will be necessary
57* if we ever need to send request messages when there aren't any 57 * if we ever need to send request messages when there aren't any
58* wlan network devices present (i.e. sending a message that only p80211 58 * wlan network devices present (i.e. sending a message that only p80211
59* cares about. 59 * cares about.
60* -------------------------------------------------------------------- 60 * --------------------------------------------------------------------
61*/ 61 */
62 62
63#ifndef _P80211IOCTL_H 63#ifndef _P80211IOCTL_H
64#define _P80211IOCTL_H 64#define _P80211IOCTL_H
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index b0d3567ca0ad..ea3d9ce222b9 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,48 +1,48 @@
1/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY. 1/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
2* -------------------------------------------------------------------- 2 * --------------------------------------------------------------------
3* 3 *
4* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 4 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
5* -------------------------------------------------------------------- 5 * --------------------------------------------------------------------
6* 6 *
7* linux-wlan 7 * linux-wlan
8* 8 *
9* The contents of this file are subject to the Mozilla Public 9 * The contents of this file are subject to the Mozilla Public
10* License Version 1.1 (the "License"); you may not use this file 10 * License Version 1.1 (the "License"); you may not use this file
11* except in compliance with the License. You may obtain a copy of 11 * except in compliance with the License. You may obtain a copy of
12* the License at http://www.mozilla.org/MPL/ 12 * the License at http://www.mozilla.org/MPL/
13* 13 *
14* Software distributed under the License is distributed on an "AS 14 * Software distributed under the License is distributed on an "AS
15* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 15 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
16* implied. See the License for the specific language governing 16 * implied. See the License for the specific language governing
17* rights and limitations under the License. 17 * rights and limitations under the License.
18* 18 *
19* Alternatively, the contents of this file may be used under the 19 * Alternatively, the contents of this file may be used under the
20* terms of the GNU Public License version 2 (the "GPL"), in which 20 * terms of the GNU Public License version 2 (the "GPL"), in which
21* case the provisions of the GPL are applicable instead of the 21 * case the provisions of the GPL are applicable instead of the
22* above. If you wish to allow the use of your version of this file 22 * above. If you wish to allow the use of your version of this file
23* only under the terms of the GPL and not to allow others to use 23 * only under the terms of the GPL and not to allow others to use
24* your version of this file under the MPL, indicate your decision 24 * your version of this file under the MPL, indicate your decision
25* by deleting the provisions above and replace them with the notice 25 * by deleting the provisions above and replace them with the notice
26* and other provisions required by the GPL. If you do not delete 26 * and other provisions required by the GPL. If you do not delete
27* the provisions above, a recipient may use your version of this 27 * the provisions above, a recipient may use your version of this
28* file under either the MPL or the GPL. 28 * file under either the MPL or the GPL.
29* 29 *
30* -------------------------------------------------------------------- 30 * --------------------------------------------------------------------
31* 31 *
32* Inquiries regarding the linux-wlan Open Source project can be 32 * Inquiries regarding the linux-wlan Open Source project can be
33* made directly to: 33 * made directly to:
34* 34 *
35* AbsoluteValue Systems Inc. 35 * AbsoluteValue Systems Inc.
36* info@linux-wlan.com 36 * info@linux-wlan.com
37* http://www.linux-wlan.com 37 * http://www.linux-wlan.com
38* 38 *
39* -------------------------------------------------------------------- 39 * --------------------------------------------------------------------
40* 40 *
41* Portions of the development of this software were funded by 41 * Portions of the development of this software were funded by
42* Intersil Corporation as part of PRISM(R) chipset product development. 42 * Intersil Corporation as part of PRISM(R) chipset product development.
43* 43 *
44* -------------------------------------------------------------------- 44 * --------------------------------------------------------------------
45*/ 45 */
46 46
47#ifndef _P80211MKMETADEF_H 47#ifndef _P80211MKMETADEF_H
48#define _P80211MKMETADEF_H 48#define _P80211MKMETADEF_H
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 3dd066ac034e..653950fd9843 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,101 +1,101 @@
1/* p80211mgmt.h 1/* p80211mgmt.h
2* 2 *
3* Macros, types, and functions to handle 802.11 mgmt frames 3 * Macros, types, and functions to handle 802.11 mgmt frames
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file declares the constants and types used in the interface 47 * This file declares the constants and types used in the interface
48* between a wlan driver and the user mode utilities. 48 * between a wlan driver and the user mode utilities.
49* 49 *
50* Notes: 50 * Notes:
51* - Constant values are always in HOST byte order. To assign 51 * - Constant values are always in HOST byte order. To assign
52* values to multi-byte fields they _must_ be converted to 52 * values to multi-byte fields they _must_ be converted to
53* ieee byte order. To retrieve multi-byte values from incoming 53 * ieee byte order. To retrieve multi-byte values from incoming
54* frames, they must be converted to host order. 54 * frames, they must be converted to host order.
55* 55 *
56* - The len member of the frame structure does NOT!!! include 56 * - The len member of the frame structure does NOT!!! include
57* the MAC CRC. Therefore, the len field on rx'd frames should 57 * the MAC CRC. Therefore, the len field on rx'd frames should
58* have 4 subtracted from it. 58 * have 4 subtracted from it.
59* 59 *
60* All functions declared here are implemented in p80211.c 60 * All functions declared here are implemented in p80211.c
61* 61 *
62* The types, macros, and functions defined here are primarily 62 * The types, macros, and functions defined here are primarily
63* used for encoding and decoding management frames. They are 63 * used for encoding and decoding management frames. They are
64* designed to follow these patterns of use: 64 * designed to follow these patterns of use:
65* 65 *
66* DECODE: 66 * DECODE:
67* 1) a frame of length len is received into buffer b 67 * 1) a frame of length len is received into buffer b
68* 2) using the hdr structure and macros, we determine the type 68 * 2) using the hdr structure and macros, we determine the type
69* 3) an appropriate mgmt frame structure, mf, is allocated and zeroed 69 * 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
70* 4) mf.hdr = b 70 * 4) mf.hdr = b
71* mf.buf = b 71 * mf.buf = b
72* mf.len = len 72 * mf.len = len
73* 5) call mgmt_decode( mf ) 73 * 5) call mgmt_decode( mf )
74* 6) the frame field pointers in mf are now set. Note that any 74 * 6) the frame field pointers in mf are now set. Note that any
75* multi-byte frame field values accessed using the frame field 75 * multi-byte frame field values accessed using the frame field
76* pointers are in ieee byte order and will have to be converted 76 * pointers are in ieee byte order and will have to be converted
77* to host order. 77 * to host order.
78* 78 *
79* ENCODE: 79 * ENCODE:
80* 1) Library client allocates buffer space for maximum length 80 * 1) Library client allocates buffer space for maximum length
81* frame of the desired type 81 * frame of the desired type
82* 2) Library client allocates a mgmt frame structure, called mf, 82 * 2) Library client allocates a mgmt frame structure, called mf,
83* of the desired type 83 * of the desired type
84* 3) Set the following: 84 * 3) Set the following:
85* mf.type = <desired type> 85 * mf.type = <desired type>
86* mf.buf = <allocated buffer address> 86 * mf.buf = <allocated buffer address>
87* 4) call mgmt_encode( mf ) 87 * 4) call mgmt_encode( mf )
88* 5) all of the fixed field pointers and fixed length information element 88 * 5) all of the fixed field pointers and fixed length information element
89* pointers in mf are now set to their respective locations in the 89 * pointers in mf are now set to their respective locations in the
90* allocated space (fortunately, all variable length information elements 90 * allocated space (fortunately, all variable length information elements
91* fall at the end of their respective frames). 91 * fall at the end of their respective frames).
92* 5a) The length field is set to include the last of the fixed and fixed 92 * 5a) The length field is set to include the last of the fixed and fixed
93* length fields. It may have to be updated for optional or variable 93 * length fields. It may have to be updated for optional or variable
94* length information elements. 94 * length information elements.
95* 6) Optional and variable length information elements are special cases 95 * 6) Optional and variable length information elements are special cases
96* and must be handled individually by the client code. 96 * and must be handled individually by the client code.
97* -------------------------------------------------------------------- 97 * --------------------------------------------------------------------
98*/ 98 */
99 99
100#ifndef _P80211MGMT_H 100#ifndef _P80211MGMT_H
101#define _P80211MGMT_H 101#define _P80211MGMT_H
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 43d2f971e2cd..40c5cf5997c7 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,49 +1,49 @@
1/* p80211msg.h 1/* p80211msg.h
2* 2 *
3* Macros, constants, types, and funcs for req and ind messages 3 * Macros, constants, types, and funcs for req and ind messages
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46*/ 46 */
47 47
48#ifndef _P80211MSG_H 48#ifndef _P80211MSG_H
49#define _P80211MSG_H 49#define _P80211MSG_H
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 4762d38a720e..73fcf07254fe 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,53 +1,53 @@
1/* src/p80211/p80211knetdev.c 1/* src/p80211/p80211knetdev.c
2* 2 *
3* Linux Kernel net device interface 3 * Linux Kernel net device interface
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* The functions required for a Linux network device are defined here. 47 * The functions required for a Linux network device are defined here.
48* 48 *
49* -------------------------------------------------------------------- 49 * --------------------------------------------------------------------
50*/ 50 */
51 51
52#include <linux/module.h> 52#include <linux/module.h>
53#include <linux/kernel.h> 53#include <linux/kernel.h>
@@ -112,17 +112,18 @@ module_param(wlan_wext_write, int, 0644);
112MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions"); 112MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
113 113
114/*---------------------------------------------------------------- 114/*----------------------------------------------------------------
115* p80211knetdev_init 115 * p80211knetdev_init
116* 116 *
117* Init method for a Linux netdevice. Called in response to 117 * Init method for a Linux netdevice. Called in response to
118* register_netdev. 118 * register_netdev.
119* 119 *
120* Arguments: 120 * Arguments:
121* none 121 * none
122* 122 *
123* Returns: 123 * Returns:
124* nothing 124 * nothing
125----------------------------------------------------------------*/ 125 *----------------------------------------------------------------
126 */
126static int p80211knetdev_init(struct net_device *netdev) 127static int p80211knetdev_init(struct net_device *netdev)
127{ 128{
128 /* Called in response to register_netdev */ 129 /* Called in response to register_netdev */
@@ -133,19 +134,20 @@ static int p80211knetdev_init(struct net_device *netdev)
133} 134}
134 135
135/*---------------------------------------------------------------- 136/*----------------------------------------------------------------
136* p80211knetdev_open 137 * p80211knetdev_open
137* 138 *
138* Linux netdevice open method. Following a successful call here, 139 * Linux netdevice open method. Following a successful call here,
139* the device is supposed to be ready for tx and rx. In our 140 * the device is supposed to be ready for tx and rx. In our
140* situation that may not be entirely true due to the state of the 141 * situation that may not be entirely true due to the state of the
141* MAC below. 142 * MAC below.
142* 143 *
143* Arguments: 144 * Arguments:
144* netdev Linux network device structure 145 * netdev Linux network device structure
145* 146 *
146* Returns: 147 * Returns:
147* zero on success, non-zero otherwise 148 * zero on success, non-zero otherwise
148----------------------------------------------------------------*/ 149 *----------------------------------------------------------------
150 */
149static int p80211knetdev_open(struct net_device *netdev) 151static int p80211knetdev_open(struct net_device *netdev)
150{ 152{
151 int result = 0; /* success */ 153 int result = 0; /* success */
@@ -170,17 +172,18 @@ static int p80211knetdev_open(struct net_device *netdev)
170} 172}
171 173
172/*---------------------------------------------------------------- 174/*----------------------------------------------------------------
173* p80211knetdev_stop 175 * p80211knetdev_stop
174* 176 *
175* Linux netdevice stop (close) method. Following this call, 177 * Linux netdevice stop (close) method. Following this call,
176* no frames should go up or down through this interface. 178 * no frames should go up or down through this interface.
177* 179 *
178* Arguments: 180 * Arguments:
179* netdev Linux network device structure 181 * netdev Linux network device structure
180* 182 *
181* Returns: 183 * Returns:
182* zero on success, non-zero otherwise 184 * zero on success, non-zero otherwise
183----------------------------------------------------------------*/ 185 *----------------------------------------------------------------
186 */
184static int p80211knetdev_stop(struct net_device *netdev) 187static int p80211knetdev_stop(struct net_device *netdev)
185{ 188{
186 int result = 0; 189 int result = 0;
@@ -196,18 +199,19 @@ static int p80211knetdev_stop(struct net_device *netdev)
196} 199}
197 200
198/*---------------------------------------------------------------- 201/*----------------------------------------------------------------
199* p80211netdev_rx 202 * p80211netdev_rx
200* 203 *
201* Frame receive function called by the mac specific driver. 204 * Frame receive function called by the mac specific driver.
202* 205 *
203* Arguments: 206 * Arguments:
204* wlandev WLAN network device structure 207 * wlandev WLAN network device structure
205* skb skbuff containing a full 802.11 frame. 208 * skb skbuff containing a full 802.11 frame.
206* Returns: 209 * Returns:
207* nothing 210 * nothing
208* Side effects: 211 * Side effects:
209* 212 *
210----------------------------------------------------------------*/ 213 *----------------------------------------------------------------
214 */
211void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb) 215void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
212{ 216{
213 /* Enqueue for post-irq processing */ 217 /* Enqueue for post-irq processing */
@@ -227,7 +231,8 @@ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
227 * CONV_TO_ETHER_FAILED if conversion failed 231 * CONV_TO_ETHER_FAILED if conversion failed
228 * CONV_TO_ETHER_SKIPPED if frame is ignored 232 * CONV_TO_ETHER_SKIPPED if frame is ignored
229 */ 233 */
230static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb) 234static int p80211_convert_to_ether(struct wlandevice *wlandev,
235 struct sk_buff *skb)
231{ 236{
232 struct p80211_hdr_a3 *hdr; 237 struct p80211_hdr_a3 *hdr;
233 238
@@ -272,7 +277,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
272 /* Let's empty our our queue */ 277 /* Let's empty our our queue */
273 while ((skb = skb_dequeue(&wlandev->nsd_rxq))) { 278 while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
274 if (wlandev->state == WLAN_DEVICE_OPEN) { 279 if (wlandev->state == WLAN_DEVICE_OPEN) {
275
276 if (dev->type != ARPHRD_ETHER) { 280 if (dev->type != ARPHRD_ETHER) {
277 /* RAW frame; we shouldn't convert it */ 281 /* RAW frame; we shouldn't convert it */
278 /* XXX Append the Prism Header here instead. */ 282 /* XXX Append the Prism Header here instead. */
@@ -299,24 +303,25 @@ static void p80211netdev_rx_bh(unsigned long arg)
299} 303}
300 304
301/*---------------------------------------------------------------- 305/*----------------------------------------------------------------
302* p80211knetdev_hard_start_xmit 306 * p80211knetdev_hard_start_xmit
303* 307 *
304* Linux netdevice method for transmitting a frame. 308 * Linux netdevice method for transmitting a frame.
305* 309 *
306* Arguments: 310 * Arguments:
307* skb Linux sk_buff containing the frame. 311 * skb Linux sk_buff containing the frame.
308* netdev Linux netdevice. 312 * netdev Linux netdevice.
309* 313 *
310* Side effects: 314 * Side effects:
311* If the lower layers report that buffers are full. netdev->tbusy 315 * If the lower layers report that buffers are full. netdev->tbusy
312* will be set to prevent higher layers from sending more traffic. 316 * will be set to prevent higher layers from sending more traffic.
313* 317 *
314* Note: If this function returns non-zero, higher layers retain 318 * Note: If this function returns non-zero, higher layers retain
315* ownership of the skb. 319 * ownership of the skb.
316* 320 *
317* Returns: 321 * Returns:
318* zero on success, non-zero on failure. 322 * zero on success, non-zero on failure.
319----------------------------------------------------------------*/ 323 *----------------------------------------------------------------
324 */
320static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, 325static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
321 struct net_device *netdev) 326 struct net_device *netdev)
322{ 327{
@@ -336,8 +341,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
336 goto failed; 341 goto failed;
337 } 342 }
338 343
339 memset(&p80211_hdr, 0, sizeof(union p80211_hdr)); 344 memset(&p80211_hdr, 0, sizeof(p80211_hdr));
340 memset(&p80211_wep, 0, sizeof(struct p80211_metawep)); 345 memset(&p80211_wep, 0, sizeof(p80211_wep));
341 346
342 if (netif_queue_stopped(netdev)) { 347 if (netif_queue_stopped(netdev)) {
343 netdev_dbg(netdev, "called when queue stopped.\n"); 348 netdev_dbg(netdev, "called when queue stopped.\n");
@@ -375,8 +380,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
375 goto failed; 380 goto failed;
376 } 381 }
377 /* move the header over */ 382 /* move the header over */
378 memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr)); 383 memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
379 skb_pull(skb, sizeof(union p80211_hdr)); 384 skb_pull(skb, sizeof(p80211_hdr));
380 } else { 385 } else {
381 if (skb_ether_to_p80211 386 if (skb_ether_to_p80211
382 (wlandev, wlandev->ethconv, skb, &p80211_hdr, 387 (wlandev, wlandev->ethconv, skb, &p80211_hdr,
@@ -435,17 +440,18 @@ failed:
435} 440}
436 441
437/*---------------------------------------------------------------- 442/*----------------------------------------------------------------
438* p80211knetdev_set_multicast_list 443 * p80211knetdev_set_multicast_list
439* 444 *
440* Called from higher layers whenever there's a need to set/clear 445 * Called from higher layers whenever there's a need to set/clear
441* promiscuous mode or rewrite the multicast list. 446 * promiscuous mode or rewrite the multicast list.
442* 447 *
443* Arguments: 448 * Arguments:
444* none 449 * none
445* 450 *
446* Returns: 451 * Returns:
447* nothing 452 * nothing
448----------------------------------------------------------------*/ 453 *----------------------------------------------------------------
454 */
449static void p80211knetdev_set_multicast_list(struct net_device *dev) 455static void p80211knetdev_set_multicast_list(struct net_device *dev)
450{ 456{
451 struct wlandevice *wlandev = dev->ml_priv; 457 struct wlandevice *wlandev = dev->ml_priv;
@@ -454,12 +460,12 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
454 460
455 if (wlandev->set_multicast_list) 461 if (wlandev->set_multicast_list)
456 wlandev->set_multicast_list(wlandev, dev); 462 wlandev->set_multicast_list(wlandev, dev);
457
458} 463}
459 464
460#ifdef SIOCETHTOOL 465#ifdef SIOCETHTOOL
461 466
462static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr) 467static int p80211netdev_ethtool(struct wlandevice *wlandev,
468 void __user *useraddr)
463{ 469{
464 u32 ethcmd; 470 u32 ethcmd;
465 struct ethtool_drvinfo info; 471 struct ethtool_drvinfo info;
@@ -505,33 +511,35 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useradd
505#endif 511#endif
506 512
507/*---------------------------------------------------------------- 513/*----------------------------------------------------------------
508* p80211knetdev_do_ioctl 514 * p80211knetdev_do_ioctl
509* 515 *
510* Handle an ioctl call on one of our devices. Everything Linux 516 * Handle an ioctl call on one of our devices. Everything Linux
511* ioctl specific is done here. Then we pass the contents of the 517 * ioctl specific is done here. Then we pass the contents of the
512* ifr->data to the request message handler. 518 * ifr->data to the request message handler.
513* 519 *
514* Arguments: 520 * Arguments:
515* dev Linux kernel netdevice 521 * dev Linux kernel netdevice
516* ifr Our private ioctl request structure, typed for the 522 * ifr Our private ioctl request structure, typed for the
517* generic struct ifreq so we can use ptr to func 523 * generic struct ifreq so we can use ptr to func
518* w/o cast. 524 * w/o cast.
519* 525 *
520* Returns: 526 * Returns:
521* zero on success, a negative errno on failure. Possible values: 527 * zero on success, a negative errno on failure. Possible values:
522* -ENETDOWN Device isn't up. 528 * -ENETDOWN Device isn't up.
523* -EBUSY cmd already in progress 529 * -EBUSY cmd already in progress
524* -ETIME p80211 cmd timed out (MSD may have its own timers) 530 * -ETIME p80211 cmd timed out (MSD may have its own timers)
525* -EFAULT memory fault copying msg from user buffer 531 * -EFAULT memory fault copying msg from user buffer
526* -ENOMEM unable to allocate kernel msg buffer 532 * -ENOMEM unable to allocate kernel msg buffer
527* -ENOSYS bad magic, it the cmd really for us? 533 * -EINVAL bad magic, it the cmd really for us?
528* -EintR sleeping on cmd, awakened by signal, cmd cancelled. 534 * -EintR sleeping on cmd, awakened by signal, cmd cancelled.
529* 535 *
530* Call Context: 536 * Call Context:
531* Process thread (ioctl caller). TODO: SMP support may require 537 * Process thread (ioctl caller). TODO: SMP support may require
532* locks. 538 * locks.
533----------------------------------------------------------------*/ 539 *----------------------------------------------------------------
534static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 540 */
541static int p80211knetdev_do_ioctl(struct net_device *dev,
542 struct ifreq *ifr, int cmd)
535{ 543{
536 int result = 0; 544 int result = 0;
537 struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr; 545 struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -550,7 +558,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
550 558
551 /* Test the magic, assume ifr is good if it's there */ 559 /* Test the magic, assume ifr is good if it's there */
552 if (req->magic != P80211_IOCTL_MAGIC) { 560 if (req->magic != P80211_IOCTL_MAGIC) {
553 result = -ENOSYS; 561 result = -EINVAL;
554 goto bail; 562 goto bail;
555 } 563 }
556 564
@@ -558,7 +566,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
558 result = 0; 566 result = 0;
559 goto bail; 567 goto bail;
560 } else if (cmd != P80211_IFREQ) { 568 } else if (cmd != P80211_IFREQ) {
561 result = -ENOSYS; 569 result = -EINVAL;
562 goto bail; 570 goto bail;
563 } 571 }
564 572
@@ -586,30 +594,31 @@ bail:
586} 594}
587 595
588/*---------------------------------------------------------------- 596/*----------------------------------------------------------------
589* p80211knetdev_set_mac_address 597 * p80211knetdev_set_mac_address
590* 598 *
591* Handles the ioctl for changing the MACAddress of a netdevice 599 * Handles the ioctl for changing the MACAddress of a netdevice
592* 600 *
593* references: linux/netdevice.h and drivers/net/net_init.c 601 * references: linux/netdevice.h and drivers/net/net_init.c
594* 602 *
595* NOTE: [MSM] We only prevent address changes when the netdev is 603 * NOTE: [MSM] We only prevent address changes when the netdev is
596* up. We don't control anything based on dot11 state. If the 604 * up. We don't control anything based on dot11 state. If the
597* address is changed on a STA that's currently associated, you 605 * address is changed on a STA that's currently associated, you
598* will probably lose the ability to send and receive data frames. 606 * will probably lose the ability to send and receive data frames.
599* Just be aware. Therefore, this should usually only be done 607 * Just be aware. Therefore, this should usually only be done
600* prior to scan/join/auth/assoc. 608 * prior to scan/join/auth/assoc.
601* 609 *
602* Arguments: 610 * Arguments:
603* dev netdevice struct 611 * dev netdevice struct
604* addr the new MACAddress (a struct) 612 * addr the new MACAddress (a struct)
605* 613 *
606* Returns: 614 * Returns:
607* zero on success, a negative errno on failure. Possible values: 615 * zero on success, a negative errno on failure. Possible values:
608* -EBUSY device is bussy (cmd not possible) 616 * -EBUSY device is bussy (cmd not possible)
609* -and errors returned by: p80211req_dorequest(..) 617 * -and errors returned by: p80211req_dorequest(..)
610* 618 *
611* by: Collin R. Mulliner <collin@mulliner.org> 619 * by: Collin R. Mulliner <collin@mulliner.org>
612----------------------------------------------------------------*/ 620 *----------------------------------------------------------------
621 */
613static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr) 622static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
614{ 623{
615 struct sockaddr *new_addr = addr; 624 struct sockaddr *new_addr = addr;
@@ -629,9 +638,9 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
629 resultcode = &dot11req.resultcode; 638 resultcode = &dot11req.resultcode;
630 639
631 /* Set up a dot11req_mibset */ 640 /* Set up a dot11req_mibset */
632 memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset)); 641 memset(&dot11req, 0, sizeof(dot11req));
633 dot11req.msgcode = DIDmsg_dot11req_mibset; 642 dot11req.msgcode = DIDmsg_dot11req_mibset;
634 dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset); 643 dot11req.msglen = sizeof(dot11req);
635 memcpy(dot11req.devname, 644 memcpy(dot11req.devname,
636 ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1); 645 ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
637 646
@@ -682,28 +691,29 @@ static const struct net_device_ops p80211_netdev_ops = {
682}; 691};
683 692
684/*---------------------------------------------------------------- 693/*----------------------------------------------------------------
685* wlan_setup 694 * wlan_setup
686* 695 *
687* Roughly matches the functionality of ether_setup. Here 696 * Roughly matches the functionality of ether_setup. Here
688* we set up any members of the wlandevice structure that are common 697 * we set up any members of the wlandevice structure that are common
689* to all devices. Additionally, we allocate a linux 'struct device' 698 * to all devices. Additionally, we allocate a linux 'struct device'
690* and perform the same setup as ether_setup. 699 * and perform the same setup as ether_setup.
691* 700 *
692* Note: It's important that the caller have setup the wlandev->name 701 * Note: It's important that the caller have setup the wlandev->name
693* ptr prior to calling this function. 702 * ptr prior to calling this function.
694* 703 *
695* Arguments: 704 * Arguments:
696* wlandev ptr to the wlandev structure for the 705 * wlandev ptr to the wlandev structure for the
697* interface. 706 * interface.
698* physdev ptr to usb device 707 * physdev ptr to usb device
699* Returns: 708 * Returns:
700* zero on success, non-zero otherwise. 709 * zero on success, non-zero otherwise.
701* Call Context: 710 * Call Context:
702* Should be process thread. We'll assume it might be 711 * Should be process thread. We'll assume it might be
703* interrupt though. When we add support for statically 712 * interrupt though. When we add support for statically
704* compiled drivers, this function will be called in the 713 * compiled drivers, this function will be called in the
705* context of the kernel startup code. 714 * context of the kernel startup code.
706----------------------------------------------------------------*/ 715 *----------------------------------------------------------------
716 */
707int wlan_setup(struct wlandevice *wlandev, struct device *physdev) 717int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
708{ 718{
709 int result = 0; 719 int result = 0;
@@ -757,24 +767,25 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
757} 767}
758 768
759/*---------------------------------------------------------------- 769/*----------------------------------------------------------------
760* wlan_unsetup 770 * wlan_unsetup
761* 771 *
762* This function is paired with the wlan_setup routine. It should 772 * This function is paired with the wlan_setup routine. It should
763* be called after unregister_wlandev. Basically, all it does is 773 * be called after unregister_wlandev. Basically, all it does is
764* free the 'struct device' that's associated with the wlandev. 774 * free the 'struct device' that's associated with the wlandev.
765* We do it here because the 'struct device' isn't allocated 775 * We do it here because the 'struct device' isn't allocated
766* explicitly in the driver code, it's done in wlan_setup. To 776 * explicitly in the driver code, it's done in wlan_setup. To
767* do the free in the driver might seem like 'magic'. 777 * do the free in the driver might seem like 'magic'.
768* 778 *
769* Arguments: 779 * Arguments:
770* wlandev ptr to the wlandev structure for the 780 * wlandev ptr to the wlandev structure for the
771* interface. 781 * interface.
772* Call Context: 782 * Call Context:
773* Should be process thread. We'll assume it might be 783 * Should be process thread. We'll assume it might be
774* interrupt though. When we add support for statically 784 * interrupt though. When we add support for statically
775* compiled drivers, this function will be called in the 785 * compiled drivers, this function will be called in the
776* context of the kernel startup code. 786 * context of the kernel startup code.
777----------------------------------------------------------------*/ 787 *----------------------------------------------------------------
788 */
778void wlan_unsetup(struct wlandevice *wlandev) 789void wlan_unsetup(struct wlandevice *wlandev)
779{ 790{
780 struct wireless_dev *wdev; 791 struct wireless_dev *wdev;
@@ -791,46 +802,48 @@ void wlan_unsetup(struct wlandevice *wlandev)
791} 802}
792 803
793/*---------------------------------------------------------------- 804/*----------------------------------------------------------------
794* register_wlandev 805 * register_wlandev
795* 806 *
796* Roughly matches the functionality of register_netdev. This function 807 * Roughly matches the functionality of register_netdev. This function
797* is called after the driver has successfully probed and set up the 808 * is called after the driver has successfully probed and set up the
798* resources for the device. It's now ready to become a named device 809 * resources for the device. It's now ready to become a named device
799* in the Linux system. 810 * in the Linux system.
800* 811 *
801* First we allocate a name for the device (if not already set), then 812 * First we allocate a name for the device (if not already set), then
802* we call the Linux function register_netdevice. 813 * we call the Linux function register_netdevice.
803* 814 *
804* Arguments: 815 * Arguments:
805* wlandev ptr to the wlandev structure for the 816 * wlandev ptr to the wlandev structure for the
806* interface. 817 * interface.
807* Returns: 818 * Returns:
808* zero on success, non-zero otherwise. 819 * zero on success, non-zero otherwise.
809* Call Context: 820 * Call Context:
810* Can be either interrupt or not. 821 * Can be either interrupt or not.
811----------------------------------------------------------------*/ 822 *----------------------------------------------------------------
823 */
812int register_wlandev(struct wlandevice *wlandev) 824int register_wlandev(struct wlandevice *wlandev)
813{ 825{
814 return register_netdev(wlandev->netdev); 826 return register_netdev(wlandev->netdev);
815} 827}
816 828
817/*---------------------------------------------------------------- 829/*----------------------------------------------------------------
818* unregister_wlandev 830 * unregister_wlandev
819* 831 *
820* Roughly matches the functionality of unregister_netdev. This 832 * Roughly matches the functionality of unregister_netdev. This
821* function is called to remove a named device from the system. 833 * function is called to remove a named device from the system.
822* 834 *
823* First we tell linux that the device should no longer exist. 835 * First we tell linux that the device should no longer exist.
824* Then we remove it from the list of known wlan devices. 836 * Then we remove it from the list of known wlan devices.
825* 837 *
826* Arguments: 838 * Arguments:
827* wlandev ptr to the wlandev structure for the 839 * wlandev ptr to the wlandev structure for the
828* interface. 840 * interface.
829* Returns: 841 * Returns:
830* zero on success, non-zero otherwise. 842 * zero on success, non-zero otherwise.
831* Call Context: 843 * Call Context:
832* Can be either interrupt or not. 844 * Can be either interrupt or not.
833----------------------------------------------------------------*/ 845 *----------------------------------------------------------------
846 */
834int unregister_wlandev(struct wlandevice *wlandev) 847int unregister_wlandev(struct wlandevice *wlandev)
835{ 848{
836 struct sk_buff *skb; 849 struct sk_buff *skb;
@@ -845,35 +858,36 @@ int unregister_wlandev(struct wlandevice *wlandev)
845} 858}
846 859
847/*---------------------------------------------------------------- 860/*----------------------------------------------------------------
848* p80211netdev_hwremoved 861 * p80211netdev_hwremoved
849* 862 *
850* Hardware removed notification. This function should be called 863 * Hardware removed notification. This function should be called
851* immediately after an MSD has detected that the underlying hardware 864 * immediately after an MSD has detected that the underlying hardware
852* has been yanked out from under us. The primary things we need 865 * has been yanked out from under us. The primary things we need
853* to do are: 866 * to do are:
854* - Mark the wlandev 867 * - Mark the wlandev
855* - Prevent any further traffic from the knetdev i/f 868 * - Prevent any further traffic from the knetdev i/f
856* - Prevent any further requests from mgmt i/f 869 * - Prevent any further requests from mgmt i/f
857* - If there are any waitq'd mgmt requests or mgmt-frame exchanges, 870 * - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
858* shut them down. 871 * shut them down.
859* - Call the MSD hwremoved function. 872 * - Call the MSD hwremoved function.
860* 873 *
861* The remainder of the cleanup will be handled by unregister(). 874 * The remainder of the cleanup will be handled by unregister().
862* Our primary goal here is to prevent as much tickling of the MSD 875 * Our primary goal here is to prevent as much tickling of the MSD
863* as possible since the MSD is already in a 'wounded' state. 876 * as possible since the MSD is already in a 'wounded' state.
864* 877 *
865* TODO: As new features are added, this function should be 878 * TODO: As new features are added, this function should be
866* updated. 879 * updated.
867* 880 *
868* Arguments: 881 * Arguments:
869* wlandev WLAN network device structure 882 * wlandev WLAN network device structure
870* Returns: 883 * Returns:
871* nothing 884 * nothing
872* Side effects: 885 * Side effects:
873* 886 *
874* Call context: 887 * Call context:
875* Usually interrupt. 888 * Usually interrupt.
876----------------------------------------------------------------*/ 889 *----------------------------------------------------------------
890 */
877void p80211netdev_hwremoved(struct wlandevice *wlandev) 891void p80211netdev_hwremoved(struct wlandevice *wlandev)
878{ 892{
879 wlandev->hwremoved = 1; 893 wlandev->hwremoved = 1;
@@ -884,26 +898,27 @@ void p80211netdev_hwremoved(struct wlandevice *wlandev)
884} 898}
885 899
886/*---------------------------------------------------------------- 900/*----------------------------------------------------------------
887* p80211_rx_typedrop 901 * p80211_rx_typedrop
888* 902 *
889* Classifies the frame, increments the appropriate counter, and 903 * Classifies the frame, increments the appropriate counter, and
890* returns 0|1|2 indicating whether the driver should handle, ignore, or 904 * returns 0|1|2 indicating whether the driver should handle, ignore, or
891* drop the frame 905 * drop the frame
892* 906 *
893* Arguments: 907 * Arguments:
894* wlandev wlan device structure 908 * wlandev wlan device structure
895* fc frame control field 909 * fc frame control field
896* 910 *
897* Returns: 911 * Returns:
898* zero if the frame should be handled by the driver, 912 * zero if the frame should be handled by the driver,
899* one if the frame should be ignored 913 * one if the frame should be ignored
900* anything else means we drop it. 914 * anything else means we drop it.
901* 915 *
902* Side effects: 916 * Side effects:
903* 917 *
904* Call context: 918 * Call context:
905* interrupt 919 * interrupt
906----------------------------------------------------------------*/ 920 *----------------------------------------------------------------
921 */
907static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc) 922static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
908{ 923{
909 u16 ftype; 924 u16 ftype;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1e6a774fc7c5..8e0d08298c8b 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,54 +1,54 @@
1/* p80211netdev.h 1/* p80211netdev.h
2* 2 *
3* WLAN net device structure and functions 3 * WLAN net device structure and functions
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file declares the structure type that represents each wlan 47 * This file declares the structure type that represents each wlan
48* interface. 48 * interface.
49* 49 *
50* -------------------------------------------------------------------- 50 * --------------------------------------------------------------------
51*/ 51 */
52 52
53#ifndef _LINUX_P80211NETDEV_H 53#ifndef _LINUX_P80211NETDEV_H
54#define _LINUX_P80211NETDEV_H 54#define _LINUX_P80211NETDEV_H
@@ -143,7 +143,7 @@ extern struct iw_handler_def p80211wext_handler_def;
143#define NUM_WEPKEYS 4 143#define NUM_WEPKEYS 4
144#define MAX_KEYLEN 32 144#define MAX_KEYLEN 32
145 145
146#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0)) 146#define HOSTWEP_DEFAULTKEY_MASK GENMASK(1, 0)
147#define HOSTWEP_SHAREDKEY BIT(3) 147#define HOSTWEP_SHAREDKEY BIT(3)
148#define HOSTWEP_DECRYPT BIT(4) 148#define HOSTWEP_DECRYPT BIT(4)
149#define HOSTWEP_ENCRYPT BIT(5) 149#define HOSTWEP_ENCRYPT BIT(5)
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index d43e85b5d49b..621df98183bf 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,54 +1,54 @@
1/* src/p80211/p80211req.c 1/* src/p80211/p80211req.c
2* 2 *
3* Request/Indication/MacMgmt interface handling functions 3 * Request/Indication/MacMgmt interface handling functions
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file contains the functions, types, and macros to support the 47 * This file contains the functions, types, and macros to support the
48* MLME request interface that's implemented via the device ioctls. 48 * MLME request interface that's implemented via the device ioctls.
49* 49 *
50* -------------------------------------------------------------------- 50 * --------------------------------------------------------------------
51*/ 51 */
52 52
53#include <linux/module.h> 53#include <linux/module.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
@@ -72,10 +72,11 @@
72#include "p80211metastruct.h" 72#include "p80211metastruct.h"
73#include "p80211req.h" 73#include "p80211req.h"
74 74
75static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg); 75static void p80211req_handlemsg(struct wlandevice *wlandev,
76 struct p80211msg *msg);
76static void p80211req_mibset_mibget(struct wlandevice *wlandev, 77static void p80211req_mibset_mibget(struct wlandevice *wlandev,
77 struct p80211msg_dot11req_mibget *mib_msg, 78 struct p80211msg_dot11req_mibget *mib_msg,
78 int isget); 79 int isget);
79 80
80static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data, 81static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
81 int isget, u32 flag) 82 int isget, u32 flag)
@@ -93,21 +94,22 @@ static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
93} 94}
94 95
95/*---------------------------------------------------------------- 96/*----------------------------------------------------------------
96* p80211req_dorequest 97 * p80211req_dorequest
97* 98 *
98* Handles an MLME request/confirm message. 99 * Handles an MLME request/confirm message.
99* 100 *
100* Arguments: 101 * Arguments:
101* wlandev WLAN device struct 102 * wlandev WLAN device struct
102* msgbuf Buffer containing a request message 103 * msgbuf Buffer containing a request message
103* 104 *
104* Returns: 105 * Returns:
105* 0 on success, an errno otherwise 106 * 0 on success, an errno otherwise
106* 107 *
107* Call context: 108 * Call context:
108* Potentially blocks the caller, so it's a good idea to 109 * Potentially blocks the caller, so it's a good idea to
109* not call this function from an interrupt context. 110 * not call this function from an interrupt context.
110----------------------------------------------------------------*/ 111 *----------------------------------------------------------------
112 */
111int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf) 113int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
112{ 114{
113 struct p80211msg *msg = (struct p80211msg *)msgbuf; 115 struct p80211msg *msg = (struct p80211msg *)msgbuf;
@@ -122,7 +124,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
122 124
123 /* Check Permissions */ 125 /* Check Permissions */
124 if (!capable(CAP_NET_ADMIN) && 126 if (!capable(CAP_NET_ADMIN) &&
125 (msg->msgcode != DIDmsg_dot11req_mibget)) { 127 (msg->msgcode != DIDmsg_dot11req_mibget)) {
126 netdev_err(wlandev->netdev, 128 netdev_err(wlandev->netdev,
127 "%s: only dot11req_mibget allowed for non-root.\n", 129 "%s: only dot11req_mibget allowed for non-root.\n",
128 wlandev->name); 130 wlandev->name);
@@ -130,7 +132,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
130 } 132 }
131 133
132 /* Check for busy status */ 134 /* Check for busy status */
133 if (test_and_set_bit(1, &(wlandev->request_pending))) 135 if (test_and_set_bit(1, &wlandev->request_pending))
134 return -EBUSY; 136 return -EBUSY;
135 137
136 /* Allow p80211 to look at msg and handle if desired. */ 138 /* Allow p80211 to look at msg and handle if desired. */
@@ -139,35 +141,36 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
139 p80211req_handlemsg(wlandev, msg); 141 p80211req_handlemsg(wlandev, msg);
140 142
141 /* Pass it down to wlandev via wlandev->mlmerequest */ 143 /* Pass it down to wlandev via wlandev->mlmerequest */
142 if (wlandev->mlmerequest != NULL) 144 if (wlandev->mlmerequest)
143 wlandev->mlmerequest(wlandev, msg); 145 wlandev->mlmerequest(wlandev, msg);
144 146
145 clear_bit(1, &(wlandev->request_pending)); 147 clear_bit(1, &wlandev->request_pending);
146 return 0; /* if result==0, msg->status still may contain an err */ 148 return 0; /* if result==0, msg->status still may contain an err */
147} 149}
148 150
149/*---------------------------------------------------------------- 151/*----------------------------------------------------------------
150* p80211req_handlemsg 152 * p80211req_handlemsg
151* 153 *
152* p80211 message handler. Primarily looks for messages that 154 * p80211 message handler. Primarily looks for messages that
153* belong to p80211 and then dispatches the appropriate response. 155 * belong to p80211 and then dispatches the appropriate response.
154* TODO: we don't do anything yet. Once the linuxMIB is better 156 * TODO: we don't do anything yet. Once the linuxMIB is better
155* defined we'll need a get/set handler. 157 * defined we'll need a get/set handler.
156* 158 *
157* Arguments: 159 * Arguments:
158* wlandev WLAN device struct 160 * wlandev WLAN device struct
159* msg message structure 161 * msg message structure
160* 162 *
161* Returns: 163 * Returns:
162* nothing (any results are set in the status field of the msg) 164 * nothing (any results are set in the status field of the msg)
163* 165 *
164* Call context: 166 * Call context:
165* Process thread 167 * Process thread
166----------------------------------------------------------------*/ 168 *----------------------------------------------------------------
167static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg) 169 */
170static void p80211req_handlemsg(struct wlandevice *wlandev,
171 struct p80211msg *msg)
168{ 172{
169 switch (msg->msgcode) { 173 switch (msg->msgcode) {
170
171 case DIDmsg_lnxreq_hostwep:{ 174 case DIDmsg_lnxreq_hostwep:{
172 struct p80211msg_lnxreq_hostwep *req = 175 struct p80211msg_lnxreq_hostwep *req =
173 (struct p80211msg_lnxreq_hostwep *)msg; 176 (struct p80211msg_lnxreq_hostwep *)msg;
@@ -192,8 +195,8 @@ static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *ms
192} 195}
193 196
194static void p80211req_mibset_mibget(struct wlandevice *wlandev, 197static void p80211req_mibset_mibget(struct wlandevice *wlandev,
195 struct p80211msg_dot11req_mibget *mib_msg, 198 struct p80211msg_dot11req_mibget *mib_msg,
196 int isget) 199 int isget)
197{ 200{
198 struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data; 201 struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
199 struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data; 202 struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 8d3054c22a05..6c72f59993e0 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,49 +1,49 @@
1/* p80211req.h 1/* p80211req.h
2* 2 *
3* Request handling functions 3 * Request handling functions
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46*/ 46 */
47 47
48#ifndef _LINUX_P80211REQ_H 48#ifndef _LINUX_P80211REQ_H
49#define _LINUX_P80211REQ_H 49#define _LINUX_P80211REQ_H
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 23b183738037..6492ffe59085 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,49 +1,49 @@
1/* src/p80211/p80211wep.c 1/* src/p80211/p80211wep.c
2* 2 *
3* WEP encode/decode for P80211. 3 * WEP encode/decode for P80211.
4* 4 *
5* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46*/ 46 */
47 47
48/*================================================================*/ 48/*================================================================*/
49/* System Includes */ 49/* System Includes */
@@ -52,8 +52,6 @@
52#include <linux/wireless.h> 52#include <linux/wireless.h>
53#include <linux/random.h> 53#include <linux/random.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
55
56
57#include "p80211hdr.h" 55#include "p80211hdr.h"
58#include "p80211types.h" 56#include "p80211types.h"
59#include "p80211msg.h" 57#include "p80211msg.h"
@@ -125,14 +123,13 @@ int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
125 return -1; 123 return -1;
126 if (keylen >= MAX_KEYLEN) 124 if (keylen >= MAX_KEYLEN)
127 return -1; 125 return -1;
128 if (key == NULL) 126 if (!key)
129 return -1; 127 return -1;
130 if (keynum < 0) 128 if (keynum < 0)
131 return -1; 129 return -1;
132 if (keynum >= NUM_WEPKEYS) 130 if (keynum >= NUM_WEPKEYS)
133 return -1; 131 return -1;
134 132
135
136 wlandev->wep_keylens[keynum] = keylen; 133 wlandev->wep_keylens[keynum] = keylen;
137 memcpy(wlandev->wep_keys[keynum], key, keylen); 134 memcpy(wlandev->wep_keys[keynum], key, keylen);
138 135
@@ -176,7 +173,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
176 173
177 keylen += 3; /* add in IV bytes */ 174 keylen += 3; /* add in IV bytes */
178 175
179
180 /* set up the RC4 state */ 176 /* set up the RC4 state */
181 for (i = 0; i < 256; i++) 177 for (i = 0; i < 256; i++)
182 s[i] = i; 178 s[i] = i;
@@ -217,8 +213,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
217} 213}
218 214
219/* encrypts in-place. */ 215/* encrypts in-place. */
220int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, 216int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
221 u8 *iv, u8 *icv) 217 u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv)
222{ 218{
223 u32 i, j, k, crc, keylen; 219 u32 i, j, k, crc, keylen;
224 u8 s[256], key[64]; 220 u8 s[256], key[64];
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 96aa21188669..2e349f87e738 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1,49 +1,49 @@
1/* from src/prism2/download/prism2dl.c 1/* from src/prism2/download/prism2dl.c
2* 2 *
3* utility for downloading prism2 images moved into kernelspace 3 * utility for downloading prism2 images moved into kernelspace
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46*/ 46 */
47 47
48/*================================================================*/ 48/*================================================================*/
49/* System Includes */ 49/* System Includes */
@@ -124,7 +124,7 @@ struct imgchunk {
124 124
125/* Data records */ 125/* Data records */
126static unsigned int ns3data; 126static unsigned int ns3data;
127static struct s3datarec s3data[S3DATA_MAX]; 127static struct s3datarec *s3data;
128 128
129/* Plug records */ 129/* Plug records */
130static unsigned int ns3plug; 130static unsigned int ns3plug;
@@ -161,7 +161,7 @@ static struct hfa384x_caplevel priid;
161/* Local Function Declarations */ 161/* Local Function Declarations */
162 162
163static int prism2_fwapply(const struct ihex_binrec *rfptr, 163static int prism2_fwapply(const struct ihex_binrec *rfptr,
164struct wlandevice *wlandev); 164 struct wlandevice *wlandev);
165 165
166static int read_fwfile(const struct ihex_binrec *rfptr); 166static int read_fwfile(const struct ihex_binrec *rfptr);
167 167
@@ -172,13 +172,15 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
172static int mkpdrlist(struct pda *pda); 172static int mkpdrlist(struct pda *pda);
173 173
174static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks, 174static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
175 struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda); 175 struct s3plugrec *s3plug, unsigned int ns3plug,
176 struct pda *pda);
176 177
177static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks, 178static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
178 struct s3crcrec *s3crc, unsigned int ns3crc); 179 struct s3crcrec *s3crc, unsigned int ns3crc);
179 180
180static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, 181static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
181 unsigned int nfchunks); 182 unsigned int nfchunks);
183
182static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks); 184static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
183 185
184static void free_srecs(void); 186static void free_srecs(void);
@@ -189,30 +191,31 @@ static int validate_identity(void);
189/* Function Definitions */ 191/* Function Definitions */
190 192
191/*---------------------------------------------------------------- 193/*----------------------------------------------------------------
192* prism2_fwtry 194 * prism2_fwtry
193* 195 *
194* Try and get firmware into memory 196 * Try and get firmware into memory
195* 197 *
196* Arguments: 198 * Arguments:
197* udev usb device structure 199 * udev usb device structure
198* wlandev wlan device structure 200 * wlandev wlan device structure
199* 201 *
200* Returns: 202 * Returns:
201* 0 - success 203 * 0 - success
202* ~0 - failure 204 * ~0 - failure
203----------------------------------------------------------------*/ 205 *----------------------------------------------------------------
206 */
204static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev) 207static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
205{ 208{
206 const struct firmware *fw_entry = NULL; 209 const struct firmware *fw_entry = NULL;
207 210
208 netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n", 211 netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n",
209 PRISM2_USB_FWFILE); 212 PRISM2_USB_FWFILE);
210 if (request_ihex_firmware(&fw_entry, 213 if (request_ihex_firmware(&fw_entry,
211 PRISM2_USB_FWFILE, &udev->dev) != 0) { 214 PRISM2_USB_FWFILE, &udev->dev) != 0) {
212 netdev_info(wlandev->netdev, 215 netdev_info(wlandev->netdev,
213 "prism2_usb: Firmware not available, but not essential\n"); 216 "prism2_usb: Firmware not available, but not essential\n");
214 netdev_info(wlandev->netdev, 217 netdev_info(wlandev->netdev,
215 "prism2_usb: can continue to use card anyway.\n"); 218 "prism2_usb: can continue to use card anyway.\n");
216 return 1; 219 return 1;
217 } 220 }
218 221
@@ -226,18 +229,19 @@ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
226} 229}
227 230
228/*---------------------------------------------------------------- 231/*----------------------------------------------------------------
229* prism2_fwapply 232 * prism2_fwapply
230* 233 *
231* Apply the firmware loaded into memory 234 * Apply the firmware loaded into memory
232* 235 *
233* Arguments: 236 * Arguments:
234* rfptr firmware image in kernel memory 237 * rfptr firmware image in kernel memory
235* wlandev device 238 * wlandev device
236* 239 *
237* Returns: 240 * Returns:
238* 0 - success 241 * 0 - success
239* ~0 - failure 242 * ~0 - failure
240----------------------------------------------------------------*/ 243 *----------------------------------------------------------------
244 */
241static int prism2_fwapply(const struct ihex_binrec *rfptr, 245static int prism2_fwapply(const struct ihex_binrec *rfptr,
242 struct wlandevice *wlandev) 246 struct wlandevice *wlandev)
243{ 247{
@@ -248,7 +252,12 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
248 252
249 /* Initialize the data structures */ 253 /* Initialize the data structures */
250 ns3data = 0; 254 ns3data = 0;
251 memset(s3data, 0, sizeof(s3data)); 255 s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL);
256 if (!s3data) {
257 result = -ENOMEM;
258 goto out;
259 }
260
252 ns3plug = 0; 261 ns3plug = 0;
253 memset(s3plug, 0, sizeof(s3plug)); 262 memset(s3plug, 0, sizeof(s3plug));
254 ns3crc = 0; 263 ns3crc = 0;
@@ -372,24 +381,25 @@ out:
372} 381}
373 382
374/*---------------------------------------------------------------- 383/*----------------------------------------------------------------
375* crcimage 384 * crcimage
376* 385 *
377* Adds a CRC16 in the two bytes prior to each block identified by 386 * Adds a CRC16 in the two bytes prior to each block identified by
378* an S3 CRC record. Currently, we don't actually do a CRC we just 387 * an S3 CRC record. Currently, we don't actually do a CRC we just
379* insert the value 0xC0DE in hfa384x order. 388 * insert the value 0xC0DE in hfa384x order.
380* 389 *
381* Arguments: 390 * Arguments:
382* fchunk Array of image chunks 391 * fchunk Array of image chunks
383* nfchunks Number of image chunks 392 * nfchunks Number of image chunks
384* s3crc Array of crc records 393 * s3crc Array of crc records
385* ns3crc Number of crc records 394 * ns3crc Number of crc records
386* 395 *
387* Returns: 396 * Returns:
388* 0 success 397 * 0 success
389* ~0 failure 398 * ~0 failure
390----------------------------------------------------------------*/ 399 *----------------------------------------------------------------
400 */
391static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks, 401static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
392 struct s3crcrec *s3crc, unsigned int ns3crc) 402 struct s3crcrec *s3crc, unsigned int ns3crc)
393{ 403{
394 int result = 0; 404 int result = 0;
395 int i; 405 int i;
@@ -433,22 +443,22 @@ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
433 dest = fchunk[c].data + chunkoff; 443 dest = fchunk[c].data + chunkoff;
434 *dest = 0xde; 444 *dest = 0xde;
435 *(dest + 1) = 0xc0; 445 *(dest + 1) = 0xc0;
436
437 } 446 }
438 return result; 447 return result;
439} 448}
440 449
441/*---------------------------------------------------------------- 450/*----------------------------------------------------------------
442* free_chunks 451 * free_chunks
443* 452 *
444* Clears the chunklist data structures in preparation for a new file. 453 * Clears the chunklist data structures in preparation for a new file.
445* 454 *
446* Arguments: 455 * Arguments:
447* none 456 * none
448* 457 *
449* Returns: 458 * Returns:
450* nothing 459 * nothing
451----------------------------------------------------------------*/ 460 *----------------------------------------------------------------
461 */
452static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks) 462static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
453{ 463{
454 int i; 464 int i;
@@ -458,24 +468,24 @@ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
458 468
459 *nfchunks = 0; 469 *nfchunks = 0;
460 memset(fchunk, 0, sizeof(*fchunk)); 470 memset(fchunk, 0, sizeof(*fchunk));
461
462} 471}
463 472
464/*---------------------------------------------------------------- 473/*----------------------------------------------------------------
465* free_srecs 474 * free_srecs
466* 475 *
467* Clears the srec data structures in preparation for a new file. 476 * Clears the srec data structures in preparation for a new file.
468* 477 *
469* Arguments: 478 * Arguments:
470* none 479 * none
471* 480 *
472* Returns: 481 * Returns:
473* nothing 482 * nothing
474----------------------------------------------------------------*/ 483 *----------------------------------------------------------------
484 */
475static void free_srecs(void) 485static void free_srecs(void)
476{ 486{
477 ns3data = 0; 487 ns3data = 0;
478 memset(s3data, 0, sizeof(s3data)); 488 kfree(s3data);
479 ns3plug = 0; 489 ns3plug = 0;
480 memset(s3plug, 0, sizeof(s3plug)); 490 memset(s3plug, 0, sizeof(s3plug));
481 ns3crc = 0; 491 ns3crc = 0;
@@ -486,19 +496,20 @@ static void free_srecs(void)
486} 496}
487 497
488/*---------------------------------------------------------------- 498/*----------------------------------------------------------------
489* mkimage 499 * mkimage
490* 500 *
491* Scans the currently loaded set of S records for data residing 501 * Scans the currently loaded set of S records for data residing
492* in contiguous memory regions. Each contiguous region is then 502 * in contiguous memory regions. Each contiguous region is then
493* made into a 'chunk'. This function assumes that we're building 503 * made into a 'chunk'. This function assumes that we're building
494* a new chunk list. Assumes the s3data items are in sorted order. 504 * a new chunk list. Assumes the s3data items are in sorted order.
495* 505 *
496* Arguments: none 506 * Arguments: none
497* 507 *
498* Returns: 508 * Returns:
499* 0 - success 509 * 0 - success
500* ~0 - failure (probably an errno) 510 * ~0 - failure (probably an errno)
501----------------------------------------------------------------*/ 511 *----------------------------------------------------------------
512 */
502static int mkimage(struct imgchunk *clist, unsigned int *ccnt) 513static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
503{ 514{
504 int result = 0; 515 int result = 0;
@@ -577,19 +588,20 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
577} 588}
578 589
579/*---------------------------------------------------------------- 590/*----------------------------------------------------------------
580* mkpdrlist 591 * mkpdrlist
581* 592 *
582* Reads a raw PDA and builds an array of pdrec_t structures. 593 * Reads a raw PDA and builds an array of pdrec_t structures.
583* 594 *
584* Arguments: 595 * Arguments:
585* pda buffer containing raw PDA bytes 596 * pda buffer containing raw PDA bytes
586* pdrec ptr to an array of pdrec_t's. Will be filled on exit. 597 * pdrec ptr to an array of pdrec_t's. Will be filled on exit.
587* nrec ptr to a variable that will contain the count of PDRs 598 * nrec ptr to a variable that will contain the count of PDRs
588* 599 *
589* Returns: 600 * Returns:
590* 0 - success 601 * 0 - success
591* ~0 - failure (probably an errno) 602 * ~0 - failure (probably an errno)
592----------------------------------------------------------------*/ 603 *----------------------------------------------------------------
604 */
593static int mkpdrlist(struct pda *pda) 605static int mkpdrlist(struct pda *pda)
594{ 606{
595 u16 *pda16 = (u16 *)pda->buf; 607 u16 *pda16 = (u16 *)pda->buf;
@@ -599,7 +611,7 @@ static int mkpdrlist(struct pda *pda)
599 curroff = 0; 611 curroff = 0;
600 while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) && 612 while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
601 le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) { 613 le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
602 pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]); 614 pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
603 615
604 if (le16_to_cpu(pda->rec[pda->nrec]->code) == 616 if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
605 HFA384x_PDR_NICID) { 617 HFA384x_PDR_NICID) {
@@ -631,37 +643,38 @@ static int mkpdrlist(struct pda *pda)
631 643
632 (pda->nrec)++; 644 (pda->nrec)++;
633 curroff += le16_to_cpu(pda16[curroff]) + 1; 645 curroff += le16_to_cpu(pda16[curroff]) + 1;
634
635 } 646 }
636 if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) { 647 if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) {
637 pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n", 648 pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n",
638 curroff, pda->nrec); 649 curroff, pda->nrec);
639 return 1; 650 return 1;
640 } 651 }
641 pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]); 652 pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
642 (pda->nrec)++; 653 (pda->nrec)++;
643 return 0; 654 return 0;
644} 655}
645 656
646/*---------------------------------------------------------------- 657/*----------------------------------------------------------------
647* plugimage 658 * plugimage
648* 659 *
649* Plugs the given image using the given plug records from the given 660 * Plugs the given image using the given plug records from the given
650* PDA and filename. 661 * PDA and filename.
651* 662 *
652* Arguments: 663 * Arguments:
653* fchunk Array of image chunks 664 * fchunk Array of image chunks
654* nfchunks Number of image chunks 665 * nfchunks Number of image chunks
655* s3plug Array of plug records 666 * s3plug Array of plug records
656* ns3plug Number of plug records 667 * ns3plug Number of plug records
657* pda Current pda data 668 * pda Current pda data
658* 669 *
659* Returns: 670 * Returns:
660* 0 success 671 * 0 success
661* ~0 failure 672 * ~0 failure
662----------------------------------------------------------------*/ 673 *----------------------------------------------------------------
674 */
663static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks, 675static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
664 struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda) 676 struct s3plugrec *s3plug, unsigned int ns3plug,
677 struct pda *pda)
665{ 678{
666 int result = 0; 679 int result = 0;
667 int i; /* plug index */ 680 int i; /* plug index */
@@ -741,31 +754,31 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
741 memset(dest, 0, s3plug[i].len); 754 memset(dest, 0, s3plug[i].len);
742 strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1); 755 strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
743 } else { /* plug a PDR */ 756 } else { /* plug a PDR */
744 memcpy(dest, &(pda->rec[j]->data), s3plug[i].len); 757 memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
745 } 758 }
746 } 759 }
747 return result; 760 return result;
748
749} 761}
750 762
751/*---------------------------------------------------------------- 763/*----------------------------------------------------------------
752* read_cardpda 764 * read_cardpda
753* 765 *
754* Sends the command for the driver to read the pda from the card 766 * Sends the command for the driver to read the pda from the card
755* named in the device variable. Upon success, the card pda is 767 * named in the device variable. Upon success, the card pda is
756* stored in the "cardpda" variables. Note that the pda structure 768 * stored in the "cardpda" variables. Note that the pda structure
757* is considered 'well formed' after this function. That means 769 * is considered 'well formed' after this function. That means
758* that the nrecs is valid, the rec array has been set up, and there's 770 * that the nrecs is valid, the rec array has been set up, and there's
759* a valid PDAEND record in the raw PDA data. 771 * a valid PDAEND record in the raw PDA data.
760* 772 *
761* Arguments: 773 * Arguments:
762* pda pda structure 774 * pda pda structure
763* wlandev device 775 * wlandev device
764* 776 *
765* Returns: 777 * Returns:
766* 0 - success 778 * 0 - success
767* ~0 - failure (probably an errno) 779 * ~0 - failure (probably an errno)
768----------------------------------------------------------------*/ 780 *----------------------------------------------------------------
781 */
769static int read_cardpda(struct pda *pda, struct wlandevice *wlandev) 782static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
770{ 783{
771 int result = 0; 784 int result = 0;
@@ -802,65 +815,66 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
802} 815}
803 816
804/*---------------------------------------------------------------- 817/*----------------------------------------------------------------
805* read_fwfile 818 * read_fwfile
806* 819 *
807* Reads the given fw file which should have been compiled from an srec 820 * Reads the given fw file which should have been compiled from an srec
808* file. Each record in the fw file will either be a plain data record, 821 * file. Each record in the fw file will either be a plain data record,
809* a start address record, or other records used for plugging. 822 * a start address record, or other records used for plugging.
810* 823 *
811* Note that data records are expected to be sorted into 824 * Note that data records are expected to be sorted into
812* ascending address order in the fw file. 825 * ascending address order in the fw file.
813* 826 *
814* Note also that the start address record, originally an S7 record in 827 * Note also that the start address record, originally an S7 record in
815* the srec file, is expected in the fw file to be like a data record but 828 * the srec file, is expected in the fw file to be like a data record but
816* with a certain address to make it identifiable. 829 * with a certain address to make it identifiable.
817* 830 *
818* Here's the SREC format that the fw should have come from: 831 * Here's the SREC format that the fw should have come from:
819* S[37]nnaaaaaaaaddd...dddcc 832 * S[37]nnaaaaaaaaddd...dddcc
820* 833 *
821* nn - number of bytes starting with the address field 834 * nn - number of bytes starting with the address field
822* aaaaaaaa - address in readable (or big endian) format 835 * aaaaaaaa - address in readable (or big endian) format
823* dd....dd - 0-245 data bytes (two chars per byte) 836 * dd....dd - 0-245 data bytes (two chars per byte)
824* cc - checksum 837 * cc - checksum
825* 838 *
826* The S7 record's (there should be only one) address value gets 839 * The S7 record's (there should be only one) address value gets
827* converted to an S3 record with address of 0xff400000, with the 840 * converted to an S3 record with address of 0xff400000, with the
828* start address being stored as a 4 byte data word. That address is 841 * start address being stored as a 4 byte data word. That address is
829* the start execution address used for RAM downloads. 842 * the start execution address used for RAM downloads.
830* 843 *
831* The S3 records have a collection of subformats indicated by the 844 * The S3 records have a collection of subformats indicated by the
832* value of aaaaaaaa: 845 * value of aaaaaaaa:
833* 0xff000000 - Plug record, data field format: 846 * 0xff000000 - Plug record, data field format:
834* xxxxxxxxaaaaaaaassssssss 847 * xxxxxxxxaaaaaaaassssssss
835* x - PDR code number (little endian) 848 * x - PDR code number (little endian)
836* a - Address in load image to plug (little endian) 849 * a - Address in load image to plug (little endian)
837* s - Length of plug data area (little endian) 850 * s - Length of plug data area (little endian)
838* 851 *
839* 0xff100000 - CRC16 generation record, data field format: 852 * 0xff100000 - CRC16 generation record, data field format:
840* aaaaaaaassssssssbbbbbbbb 853 * aaaaaaaassssssssbbbbbbbb
841* a - Start address for CRC calculation (little endian) 854 * a - Start address for CRC calculation (little endian)
842* s - Length of data to calculate over (little endian) 855 * s - Length of data to calculate over (little endian)
843* b - Boolean, true=write crc, false=don't write 856 * b - Boolean, true=write crc, false=don't write
844* 857 *
845* 0xff200000 - Info record, data field format: 858 * 0xff200000 - Info record, data field format:
846* ssssttttdd..dd 859 * ssssttttdd..dd
847* s - Size in words (little endian) 860 * s - Size in words (little endian)
848* t - Info type (little endian), see #defines and 861 * t - Info type (little endian), see #defines and
849* struct s3inforec for details about types. 862 * struct s3inforec for details about types.
850* d - (s - 1) little endian words giving the contents of 863 * d - (s - 1) little endian words giving the contents of
851* the given info type. 864 * the given info type.
852* 865 *
853* 0xff400000 - Start address record, data field format: 866 * 0xff400000 - Start address record, data field format:
854* aaaaaaaa 867 * aaaaaaaa
855* a - Address in load image to plug (little endian) 868 * a - Address in load image to plug (little endian)
856* 869 *
857* Arguments: 870 * Arguments:
858* record firmware image (ihex record structure) in kernel memory 871 * record firmware image (ihex record structure) in kernel memory
859* 872 *
860* Returns: 873 * Returns:
861* 0 - success 874 * 0 - success
862* ~0 - failure (probably an errno) 875 * ~0 - failure (probably an errno)
863----------------------------------------------------------------*/ 876 *----------------------------------------------------------------
877 */
864static int read_fwfile(const struct ihex_binrec *record) 878static int read_fwfile(const struct ihex_binrec *record)
865{ 879{
866 int i; 880 int i;
@@ -872,7 +886,6 @@ static int read_fwfile(const struct ihex_binrec *record)
872 pr_debug("Reading fw file ...\n"); 886 pr_debug("Reading fw file ...\n");
873 887
874 while (record) { 888 while (record) {
875
876 rcnt++; 889 rcnt++;
877 890
878 len = be16_to_cpu(record->len); 891 len = be16_to_cpu(record->len);
@@ -887,8 +900,8 @@ static int read_fwfile(const struct ihex_binrec *record)
887 case S3ADDR_START: 900 case S3ADDR_START:
888 startaddr = *ptr32; 901 startaddr = *ptr32;
889 pr_debug(" S7 start addr, record=%d addr=0x%08x\n", 902 pr_debug(" S7 start addr, record=%d addr=0x%08x\n",
890 rcnt, 903 rcnt,
891 startaddr); 904 startaddr);
892 break; 905 break;
893 case S3ADDR_PLUG: 906 case S3ADDR_PLUG:
894 s3plug[ns3plug].itemcode = *ptr32; 907 s3plug[ns3plug].itemcode = *ptr32;
@@ -896,10 +909,10 @@ static int read_fwfile(const struct ihex_binrec *record)
896 s3plug[ns3plug].len = *(ptr32 + 2); 909 s3plug[ns3plug].len = *(ptr32 + 2);
897 910
898 pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n", 911 pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n",
899 rcnt, 912 rcnt,
900 s3plug[ns3plug].itemcode, 913 s3plug[ns3plug].itemcode,
901 s3plug[ns3plug].addr, 914 s3plug[ns3plug].addr,
902 s3plug[ns3plug].len); 915 s3plug[ns3plug].len);
903 916
904 ns3plug++; 917 ns3plug++;
905 if (ns3plug == S3PLUG_MAX) { 918 if (ns3plug == S3PLUG_MAX) {
@@ -913,10 +926,10 @@ static int read_fwfile(const struct ihex_binrec *record)
913 s3crc[ns3crc].dowrite = *(ptr32 + 2); 926 s3crc[ns3crc].dowrite = *(ptr32 + 2);
914 927
915 pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n", 928 pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n",
916 rcnt, 929 rcnt,
917 s3crc[ns3crc].addr, 930 s3crc[ns3crc].addr,
918 s3crc[ns3crc].len, 931 s3crc[ns3crc].len,
919 s3crc[ns3crc].dowrite); 932 s3crc[ns3crc].dowrite);
920 ns3crc++; 933 ns3crc++;
921 if (ns3crc == S3CRC_MAX) { 934 if (ns3crc == S3CRC_MAX) {
922 pr_err("S3 crcrec limit reached - aborting\n"); 935 pr_err("S3 crcrec limit reached - aborting\n");
@@ -928,16 +941,16 @@ static int read_fwfile(const struct ihex_binrec *record)
928 s3info[ns3info].type = *(ptr16 + 1); 941 s3info[ns3info].type = *(ptr16 + 1);
929 942
930 pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n", 943 pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n",
931 rcnt, 944 rcnt,
932 s3info[ns3info].len, 945 s3info[ns3info].len,
933 s3info[ns3info].type); 946 s3info[ns3info].type);
934 if (((s3info[ns3info].len - 1) * sizeof(u16)) > 947 if (((s3info[ns3info].len - 1) * sizeof(u16)) >
935 sizeof(s3info[ns3info].info)) { 948 sizeof(s3info[ns3info].info)) {
936 pr_err("S3 inforec length too long - aborting\n"); 949 pr_err("S3 inforec length too long - aborting\n");
937 return 1; 950 return 1;
938 } 951 }
939 952
940 tmpinfo = (u16 *)&(s3info[ns3info].info.version); 953 tmpinfo = (u16 *)&s3info[ns3info].info.version;
941 pr_debug(" info="); 954 pr_debug(" info=");
942 for (i = 0; i < s3info[ns3info].len - 1; i++) { 955 for (i = 0; i < s3info[ns3info].len - 1; i++) {
943 tmpinfo[i] = *(ptr16 + 2 + i); 956 tmpinfo[i] = *(ptr16 + 2 + i);
@@ -968,22 +981,23 @@ static int read_fwfile(const struct ihex_binrec *record)
968} 981}
969 982
970/*---------------------------------------------------------------- 983/*----------------------------------------------------------------
971* writeimage 984 * writeimage
972* 985 *
973* Takes the chunks, builds p80211 messages and sends them down 986 * Takes the chunks, builds p80211 messages and sends them down
974* to the driver for writing to the card. 987 * to the driver for writing to the card.
975* 988 *
976* Arguments: 989 * Arguments:
977* wlandev device 990 * wlandev device
978* fchunk Array of image chunks 991 * fchunk Array of image chunks
979* nfchunks Number of image chunks 992 * nfchunks Number of image chunks
980* 993 *
981* Returns: 994 * Returns:
982* 0 success 995 * 0 success
983* ~0 failure 996 * ~0 failure
984----------------------------------------------------------------*/ 997 *----------------------------------------------------------------
998 */
985static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, 999static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
986 unsigned int nfchunks) 1000 unsigned int nfchunks)
987{ 1001{
988 int result = 0; 1002 int result = 0;
989 struct p80211msg_p2req_ramdl_state *rstmsg; 1003 struct p80211msg_p2req_ramdl_state *rstmsg;
@@ -1099,7 +1113,6 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
1099 result = 1; 1113 result = 1;
1100 goto free_result; 1114 goto free_result;
1101 } 1115 }
1102
1103 } 1116 }
1104 } 1117 }
1105 1118
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c9eac4..c558ad656c49 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,61 +1,61 @@
1/* src/prism2/driver/prism2mgmt.c 1/* src/prism2/driver/prism2mgmt.c
2* 2 *
3* Management request handler functions. 3 * Management request handler functions.
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* The functions in this file handle management requests sent from 47 * The functions in this file handle management requests sent from
48* user mode. 48 * user mode.
49* 49 *
50* Most of these functions have two separate blocks of code that are 50 * Most of these functions have two separate blocks of code that are
51* conditional on whether this is a station or an AP. This is used 51 * conditional on whether this is a station or an AP. This is used
52* to separate out the STA and AP responses to these management primitives. 52 * to separate out the STA and AP responses to these management primitives.
53* It's a choice (good, bad, indifferent?) to have the code in the same 53 * It's a choice (good, bad, indifferent?) to have the code in the same
54* place so it's clear that the same primitive is implemented in both 54 * place so it's clear that the same primitive is implemented in both
55* cases but has different behavior. 55 * cases but has different behavior.
56* 56 *
57* -------------------------------------------------------------------- 57 * --------------------------------------------------------------------
58*/ 58 */
59 59
60#include <linux/if_arp.h> 60#include <linux/if_arp.h>
61#include <linux/module.h> 61#include <linux/module.h>
@@ -84,35 +84,36 @@
84#include "prism2mgmt.h" 84#include "prism2mgmt.h"
85 85
86/* Converts 802.11 format rate specifications to prism2 */ 86/* Converts 802.11 format rate specifications to prism2 */
87#define p80211rate_to_p2bit(n) ((((n)&~BIT(7)) == 2) ? BIT(0) : \ 87#define p80211rate_to_p2bit(n) ((((n) & ~BIT(7)) == 2) ? BIT(0) : \
88 (((n)&~BIT(7)) == 4) ? BIT(1) : \ 88 (((n) & ~BIT(7)) == 4) ? BIT(1) : \
89 (((n)&~BIT(7)) == 11) ? BIT(2) : \ 89 (((n) & ~BIT(7)) == 11) ? BIT(2) : \
90 (((n)&~BIT(7)) == 22) ? BIT(3) : 0) 90 (((n) & ~BIT(7)) == 22) ? BIT(3) : 0)
91 91
92/*---------------------------------------------------------------- 92/*----------------------------------------------------------------
93* prism2mgmt_scan 93 * prism2mgmt_scan
94* 94 *
95* Initiate a scan for BSSs. 95 * Initiate a scan for BSSs.
96* 96 *
97* This function corresponds to MLME-scan.request and part of 97 * This function corresponds to MLME-scan.request and part of
98* MLME-scan.confirm. As far as I can tell in the standard, there 98 * MLME-scan.confirm. As far as I can tell in the standard, there
99* are no restrictions on when a scan.request may be issued. We have 99 * are no restrictions on when a scan.request may be issued. We have
100* to handle in whatever state the driver/MAC happen to be. 100 * to handle in whatever state the driver/MAC happen to be.
101* 101 *
102* Arguments: 102 * Arguments:
103* wlandev wlan device structure 103 * wlandev wlan device structure
104* msgp ptr to msg buffer 104 * msgp ptr to msg buffer
105* 105 *
106* Returns: 106 * Returns:
107* 0 success and done 107 * 0 success and done
108* <0 success, but we're waiting for something to finish. 108 * <0 success, but we're waiting for something to finish.
109* >0 an error occurred while handling the message. 109 * >0 an error occurred while handling the message.
110* Side effects: 110 * Side effects:
111* 111 *
112* Call context: 112 * Call context:
113* process thread (usually) 113 * process thread (usually)
114* interrupt 114 * interrupt
115----------------------------------------------------------------*/ 115 *----------------------------------------------------------------
116 */
116int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) 117int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
117{ 118{
118 int result = 0; 119 int result = 0;
@@ -122,7 +123,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
122 int i, timeout; 123 int i, timeout;
123 int istmpenable = 0; 124 int istmpenable = 0;
124 125
125 struct hfa384x_HostScanRequest_data scanreq; 126 struct hfa384x_host_scan_request_data scanreq;
126 127
127 /* gatekeeper check */ 128 /* gatekeeper check */
128 if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major, 129 if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
@@ -184,7 +185,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
184 185
185 /* set up the txrate to be 2MBPS. Should be fastest basicrate... */ 186 /* set up the txrate to be 2MBPS. Should be fastest basicrate... */
186 word = HFA384x_RATEBIT_2; 187 word = HFA384x_RATEBIT_2;
187 scanreq.txRate = cpu_to_le16(word); 188 scanreq.tx_rate = cpu_to_le16(word);
188 189
189 /* set up the channel list */ 190 /* set up the channel list */
190 word = 0; 191 word = 0;
@@ -196,7 +197,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
196 /* channel 1 is BIT 0 ... channel 14 is BIT 13 */ 197 /* channel 1 is BIT 0 ... channel 14 is BIT 13 */
197 word |= (1 << (channel - 1)); 198 word |= (1 << (channel - 1));
198 } 199 }
199 scanreq.channelList = cpu_to_le16(word); 200 scanreq.channel_list = cpu_to_le16(word);
200 201
201 /* set up the ssid, if present. */ 202 /* set up the ssid, if present. */
202 scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len); 203 scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len);
@@ -292,7 +293,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
292 293
293 result = hfa384x_drvr_setconfig(hw, 294 result = hfa384x_drvr_setconfig(hw,
294 HFA384x_RID_HOSTSCAN, &scanreq, 295 HFA384x_RID_HOSTSCAN, &scanreq,
295 sizeof(struct hfa384x_HostScanRequest_data)); 296 sizeof(scanreq));
296 if (result) { 297 if (result) {
297 netdev_err(wlandev->netdev, 298 netdev_err(wlandev->netdev,
298 "setconfig(SCANREQUEST) failed. result=%d\n", 299 "setconfig(SCANREQUEST) failed. result=%d\n",
@@ -347,31 +348,32 @@ exit:
347} 348}
348 349
349/*---------------------------------------------------------------- 350/*----------------------------------------------------------------
350* prism2mgmt_scan_results 351 * prism2mgmt_scan_results
351* 352 *
352* Retrieve the BSS description for one of the BSSs identified in 353 * Retrieve the BSS description for one of the BSSs identified in
353* a scan. 354 * a scan.
354* 355 *
355* Arguments: 356 * Arguments:
356* wlandev wlan device structure 357 * wlandev wlan device structure
357* msgp ptr to msg buffer 358 * msgp ptr to msg buffer
358* 359 *
359* Returns: 360 * Returns:
360* 0 success and done 361 * 0 success and done
361* <0 success, but we're waiting for something to finish. 362 * <0 success, but we're waiting for something to finish.
362* >0 an error occurred while handling the message. 363 * >0 an error occurred while handling the message.
363* Side effects: 364 * Side effects:
364* 365 *
365* Call context: 366 * Call context:
366* process thread (usually) 367 * process thread (usually)
367* interrupt 368 * interrupt
368----------------------------------------------------------------*/ 369 *----------------------------------------------------------------
370 */
369int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp) 371int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
370{ 372{
371 int result = 0; 373 int result = 0;
372 struct p80211msg_dot11req_scan_results *req; 374 struct p80211msg_dot11req_scan_results *req;
373 struct hfa384x *hw = wlandev->priv; 375 struct hfa384x *hw = wlandev->priv;
374 struct hfa384x_HScanResultSub *item = NULL; 376 struct hfa384x_hscan_result_sub *item = NULL;
375 377
376 int count; 378 int count;
377 379
@@ -425,8 +427,8 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
425#define REQBASICRATE(N) \ 427#define REQBASICRATE(N) \
426 do { \ 428 do { \
427 if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \ 429 if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
428 item->supprates[(N)-1])) { \ 430 item->supprates[(N) - 1])) { \
429 req->basicrate ## N .data = item->supprates[(N)-1]; \ 431 req->basicrate ## N .data = item->supprates[(N) - 1]; \
430 req->basicrate ## N .status = \ 432 req->basicrate ## N .status = \
431 P80211ENUM_msgitem_status_data_ok; \ 433 P80211ENUM_msgitem_status_data_ok; \
432 } \ 434 } \
@@ -444,7 +446,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
444#define REQSUPPRATE(N) \ 446#define REQSUPPRATE(N) \
445 do { \ 447 do { \
446 if (count >= N) { \ 448 if (count >= N) { \
447 req->supprate ## N .data = item->supprates[(N)-1]; \ 449 req->supprate ## N .data = item->supprates[(N) - 1]; \
448 req->supprate ## N .status = \ 450 req->supprate ## N .status = \
449 P80211ENUM_msgitem_status_data_ok; \ 451 P80211ENUM_msgitem_status_data_ok; \
450 } \ 452 } \
@@ -507,24 +509,25 @@ exit:
507} 509}
508 510
509/*---------------------------------------------------------------- 511/*----------------------------------------------------------------
510* prism2mgmt_start 512 * prism2mgmt_start
511* 513 *
512* Start a BSS. Any station can do this for IBSS, only AP for ESS. 514 * Start a BSS. Any station can do this for IBSS, only AP for ESS.
513* 515 *
514* Arguments: 516 * Arguments:
515* wlandev wlan device structure 517 * wlandev wlan device structure
516* msgp ptr to msg buffer 518 * msgp ptr to msg buffer
517* 519 *
518* Returns: 520 * Returns:
519* 0 success and done 521 * 0 success and done
520* <0 success, but we're waiting for something to finish. 522 * <0 success, but we're waiting for something to finish.
521* >0 an error occurred while handling the message. 523 * >0 an error occurred while handling the message.
522* Side effects: 524 * Side effects:
523* 525 *
524* Call context: 526 * Call context:
525* process thread (usually) 527 * process thread (usually)
526* interrupt 528 * interrupt
527----------------------------------------------------------------*/ 529 *----------------------------------------------------------------
530 */
528int prism2mgmt_start(struct wlandevice *wlandev, void *msgp) 531int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
529{ 532{
530 int result = 0; 533 int result = 0;
@@ -580,7 +583,7 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
580 583
581 /* beacon period */ 584 /* beacon period */
582 word = msg->beaconperiod.data; 585 word = msg->beaconperiod.data;
583 result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word); 586 result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNINT, word);
584 if (result) { 587 if (result) {
585 netdev_err(wlandev->netdev, 588 netdev_err(wlandev->netdev,
586 "Failed to set beacon period=%d.\n", word); 589 "Failed to set beacon period=%d.\n", word);
@@ -689,23 +692,24 @@ done:
689} 692}
690 693
691/*---------------------------------------------------------------- 694/*----------------------------------------------------------------
692* prism2mgmt_readpda 695 * prism2mgmt_readpda
693* 696 *
694* Collect the PDA data and put it in the message. 697 * Collect the PDA data and put it in the message.
695* 698 *
696* Arguments: 699 * Arguments:
697* wlandev wlan device structure 700 * wlandev wlan device structure
698* msgp ptr to msg buffer 701 * msgp ptr to msg buffer
699* 702 *
700* Returns: 703 * Returns:
701* 0 success and done 704 * 0 success and done
702* <0 success, but we're waiting for something to finish. 705 * <0 success, but we're waiting for something to finish.
703* >0 an error occurred while handling the message. 706 * >0 an error occurred while handling the message.
704* Side effects: 707 * Side effects:
705* 708 *
706* Call context: 709 * Call context:
707* process thread (usually) 710 * process thread (usually)
708----------------------------------------------------------------*/ 711 *----------------------------------------------------------------
712 */
709int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp) 713int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
710{ 714{
711 struct hfa384x *hw = wlandev->priv; 715 struct hfa384x *hw = wlandev->priv;
@@ -748,30 +752,31 @@ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
748} 752}
749 753
750/*---------------------------------------------------------------- 754/*----------------------------------------------------------------
751* prism2mgmt_ramdl_state 755 * prism2mgmt_ramdl_state
752* 756 *
753* Establishes the beginning/end of a card RAM download session. 757 * Establishes the beginning/end of a card RAM download session.
754* 758 *
755* It is expected that the ramdl_write() function will be called 759 * It is expected that the ramdl_write() function will be called
756* one or more times between the 'enable' and 'disable' calls to 760 * one or more times between the 'enable' and 'disable' calls to
757* this function. 761 * this function.
758* 762 *
759* Note: This function should not be called when a mac comm port 763 * Note: This function should not be called when a mac comm port
760* is active. 764 * is active.
761* 765 *
762* Arguments: 766 * Arguments:
763* wlandev wlan device structure 767 * wlandev wlan device structure
764* msgp ptr to msg buffer 768 * msgp ptr to msg buffer
765* 769 *
766* Returns: 770 * Returns:
767* 0 success and done 771 * 0 success and done
768* <0 success, but we're waiting for something to finish. 772 * <0 success, but we're waiting for something to finish.
769* >0 an error occurred while handling the message. 773 * >0 an error occurred while handling the message.
770* Side effects: 774 * Side effects:
771* 775 *
772* Call context: 776 * Call context:
773* process thread (usually) 777 * process thread (usually)
774----------------------------------------------------------------*/ 778 *----------------------------------------------------------------
779 */
775int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp) 780int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
776{ 781{
777 struct hfa384x *hw = wlandev->priv; 782 struct hfa384x *hw = wlandev->priv;
@@ -808,25 +813,26 @@ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
808} 813}
809 814
810/*---------------------------------------------------------------- 815/*----------------------------------------------------------------
811* prism2mgmt_ramdl_write 816 * prism2mgmt_ramdl_write
812* 817 *
813* Writes a buffer to the card RAM using the download state. This 818 * Writes a buffer to the card RAM using the download state. This
814* is for writing code to card RAM. To just read or write raw data 819 * is for writing code to card RAM. To just read or write raw data
815* use the aux functions. 820 * use the aux functions.
816* 821 *
817* Arguments: 822 * Arguments:
818* wlandev wlan device structure 823 * wlandev wlan device structure
819* msgp ptr to msg buffer 824 * msgp ptr to msg buffer
820* 825 *
821* Returns: 826 * Returns:
822* 0 success and done 827 * 0 success and done
823* <0 success, but we're waiting for something to finish. 828 * <0 success, but we're waiting for something to finish.
824* >0 an error occurred while handling the message. 829 * >0 an error occurred while handling the message.
825* Side effects: 830 * Side effects:
826* 831 *
827* Call context: 832 * Call context:
828* process thread (usually) 833 * process thread (usually)
829----------------------------------------------------------------*/ 834 *----------------------------------------------------------------
835 */
830int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp) 836int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
831{ 837{
832 struct hfa384x *hw = wlandev->priv; 838 struct hfa384x *hw = wlandev->priv;
@@ -864,30 +870,31 @@ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
864} 870}
865 871
866/*---------------------------------------------------------------- 872/*----------------------------------------------------------------
867* prism2mgmt_flashdl_state 873 * prism2mgmt_flashdl_state
868* 874 *
869* Establishes the beginning/end of a card Flash download session. 875 * Establishes the beginning/end of a card Flash download session.
870* 876 *
871* It is expected that the flashdl_write() function will be called 877 * It is expected that the flashdl_write() function will be called
872* one or more times between the 'enable' and 'disable' calls to 878 * one or more times between the 'enable' and 'disable' calls to
873* this function. 879 * this function.
874* 880 *
875* Note: This function should not be called when a mac comm port 881 * Note: This function should not be called when a mac comm port
876* is active. 882 * is active.
877* 883 *
878* Arguments: 884 * Arguments:
879* wlandev wlan device structure 885 * wlandev wlan device structure
880* msgp ptr to msg buffer 886 * msgp ptr to msg buffer
881* 887 *
882* Returns: 888 * Returns:
883* 0 success and done 889 * 0 success and done
884* <0 success, but we're waiting for something to finish. 890 * <0 success, but we're waiting for something to finish.
885* >0 an error occurred while handling the message. 891 * >0 an error occurred while handling the message.
886* Side effects: 892 * Side effects:
887* 893 *
888* Call context: 894 * Call context:
889* process thread (usually) 895 * process thread (usually)
890----------------------------------------------------------------*/ 896 *----------------------------------------------------------------
897 */
891int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp) 898int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
892{ 899{
893 int result = 0; 900 int result = 0;
@@ -942,23 +949,24 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
942} 949}
943 950
944/*---------------------------------------------------------------- 951/*----------------------------------------------------------------
945* prism2mgmt_flashdl_write 952 * prism2mgmt_flashdl_write
946* 953 *
947* 954 *
948* 955 *
949* Arguments: 956 * Arguments:
950* wlandev wlan device structure 957 * wlandev wlan device structure
951* msgp ptr to msg buffer 958 * msgp ptr to msg buffer
952* 959 *
953* Returns: 960 * Returns:
954* 0 success and done 961 * 0 success and done
955* <0 success, but we're waiting for something to finish. 962 * <0 success, but we're waiting for something to finish.
956* >0 an error occurred while handling the message. 963 * >0 an error occurred while handling the message.
957* Side effects: 964 * Side effects:
958* 965 *
959* Call context: 966 * Call context:
960* process thread (usually) 967 * process thread (usually)
961----------------------------------------------------------------*/ 968 *----------------------------------------------------------------
969 */
962int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp) 970int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
963{ 971{
964 struct hfa384x *hw = wlandev->priv; 972 struct hfa384x *hw = wlandev->priv;
@@ -1001,24 +1009,25 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
1001} 1009}
1002 1010
1003/*---------------------------------------------------------------- 1011/*----------------------------------------------------------------
1004* prism2mgmt_autojoin 1012 * prism2mgmt_autojoin
1005* 1013 *
1006* Associate with an ESS. 1014 * Associate with an ESS.
1007* 1015 *
1008* Arguments: 1016 * Arguments:
1009* wlandev wlan device structure 1017 * wlandev wlan device structure
1010* msgp ptr to msg buffer 1018 * msgp ptr to msg buffer
1011* 1019 *
1012* Returns: 1020 * Returns:
1013* 0 success and done 1021 * 0 success and done
1014* <0 success, but we're waiting for something to finish. 1022 * <0 success, but we're waiting for something to finish.
1015* >0 an error occurred while handling the message. 1023 * >0 an error occurred while handling the message.
1016* Side effects: 1024 * Side effects:
1017* 1025 *
1018* Call context: 1026 * Call context:
1019* process thread (usually) 1027 * process thread (usually)
1020* interrupt 1028 * interrupt
1021----------------------------------------------------------------*/ 1029 *----------------------------------------------------------------
1030 */
1022int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp) 1031int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
1023{ 1032{
1024 struct hfa384x *hw = wlandev->priv; 1033 struct hfa384x *hw = wlandev->priv;
@@ -1072,24 +1081,25 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
1072} 1081}
1073 1082
1074/*---------------------------------------------------------------- 1083/*----------------------------------------------------------------
1075* prism2mgmt_wlansniff 1084 * prism2mgmt_wlansniff
1076* 1085 *
1077* Start or stop sniffing. 1086 * Start or stop sniffing.
1078* 1087 *
1079* Arguments: 1088 * Arguments:
1080* wlandev wlan device structure 1089 * wlandev wlan device structure
1081* msgp ptr to msg buffer 1090 * msgp ptr to msg buffer
1082* 1091 *
1083* Returns: 1092 * Returns:
1084* 0 success and done 1093 * 0 success and done
1085* <0 success, but we're waiting for something to finish. 1094 * <0 success, but we're waiting for something to finish.
1086* >0 an error occurred while handling the message. 1095 * >0 an error occurred while handling the message.
1087* Side effects: 1096 * Side effects:
1088* 1097 *
1089* Call context: 1098 * Call context:
1090* process thread (usually) 1099 * process thread (usually)
1091* interrupt 1100 * interrupt
1092----------------------------------------------------------------*/ 1101 *----------------------------------------------------------------
1102 */
1093int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp) 1103int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
1094{ 1104{
1095 int result = 0; 1105 int result = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index cc1ac7a60dfe..88b979ff68b3 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,61 +1,61 @@
1/* prism2mgmt.h 1/* prism2mgmt.h
2* 2 *
3* Declares the mgmt command handler functions 3 * Declares the mgmt command handler functions
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* This file contains the constants and data structures for interaction 47 * This file contains the constants and data structures for interaction
48* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC). 48 * with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
49* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset. 49 * The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
50* 50 *
51* [Implementation and usage notes] 51 * [Implementation and usage notes]
52* 52 *
53* [References] 53 * [References]
54* CW10 Programmer's Manual v1.5 54 * CW10 Programmer's Manual v1.5
55* IEEE 802.11 D10.0 55 * IEEE 802.11 D10.0
56* 56 *
57* -------------------------------------------------------------------- 57 * --------------------------------------------------------------------
58*/ 58 */
59 59
60#ifndef _PRISM2MGMT_H 60#ifndef _PRISM2MGMT_H
61#define _PRISM2MGMT_H 61#define _PRISM2MGMT_H
@@ -65,7 +65,8 @@ extern int prism2_reset_settletime;
65 65
66u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate); 66u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
67 67
68void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf); 68void prism2sta_ev_info(struct wlandevice *wlandev,
69 struct hfa384x_inf_frame *inf);
69void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status); 70void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
70void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status); 71void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
71void prism2sta_ev_alloc(struct wlandevice *wlandev); 72void prism2sta_ev_alloc(struct wlandevice *wlandev);
@@ -83,9 +84,11 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp);
83int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp); 84int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp);
84 85
85/*--------------------------------------------------------------- 86/*---------------------------------------------------------------
86* conversion functions going between wlan message data types and 87 * conversion functions going between wlan message data types and
87* Prism2 data types 88 * Prism2 data types
88---------------------------------------------------------------*/ 89 *---------------------------------------------------------------
90 */
91
89/* byte area conversion functions*/ 92/* byte area conversion functions*/
90void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len); 93void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len);
91 94
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 63ab6bc88654..8ea6a647d037 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,54 +1,54 @@
1/* src/prism2/driver/prism2mib.c 1/* src/prism2/driver/prism2mib.c
2* 2 *
3* Management request for mibset/mibget 3 * Management request for mibset/mibget
4* 4 *
5* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. 5 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
6* -------------------------------------------------------------------- 6 * --------------------------------------------------------------------
7* 7 *
8* linux-wlan 8 * linux-wlan
9* 9 *
10* The contents of this file are subject to the Mozilla Public 10 * The contents of this file are subject to the Mozilla Public
11* License Version 1.1 (the "License"); you may not use this file 11 * License Version 1.1 (the "License"); you may not use this file
12* except in compliance with the License. You may obtain a copy of 12 * except in compliance with the License. You may obtain a copy of
13* the License at http://www.mozilla.org/MPL/ 13 * the License at http://www.mozilla.org/MPL/
14* 14 *
15* Software distributed under the License is distributed on an "AS 15 * Software distributed under the License is distributed on an "AS
16* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or 16 * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
17* implied. See the License for the specific language governing 17 * implied. See the License for the specific language governing
18* rights and limitations under the License. 18 * rights and limitations under the License.
19* 19 *
20* Alternatively, the contents of this file may be used under the 20 * Alternatively, the contents of this file may be used under the
21* terms of the GNU Public License version 2 (the "GPL"), in which 21 * terms of the GNU Public License version 2 (the "GPL"), in which
22* case the provisions of the GPL are applicable instead of the 22 * case the provisions of the GPL are applicable instead of the
23* above. If you wish to allow the use of your version of this file 23 * above. If you wish to allow the use of your version of this file
24* only under the terms of the GPL and not to allow others to use 24 * only under the terms of the GPL and not to allow others to use
25* your version of this file under the MPL, indicate your decision 25 * your version of this file under the MPL, indicate your decision
26* by deleting the provisions above and replace them with the notice 26 * by deleting the provisions above and replace them with the notice
27* and other provisions required by the GPL. If you do not delete 27 * and other provisions required by the GPL. If you do not delete
28* the provisions above, a recipient may use your version of this 28 * the provisions above, a recipient may use your version of this
29* file under either the MPL or the GPL. 29 * file under either the MPL or the GPL.
30* 30 *
31* -------------------------------------------------------------------- 31 * --------------------------------------------------------------------
32* 32 *
33* Inquiries regarding the linux-wlan Open Source project can be 33 * Inquiries regarding the linux-wlan Open Source project can be
34* made directly to: 34 * made directly to:
35* 35 *
36* AbsoluteValue Systems Inc. 36 * AbsoluteValue Systems Inc.
37* info@linux-wlan.com 37 * info@linux-wlan.com
38* http://www.linux-wlan.com 38 * http://www.linux-wlan.com
39* 39 *
40* -------------------------------------------------------------------- 40 * --------------------------------------------------------------------
41* 41 *
42* Portions of the development of this software were funded by 42 * Portions of the development of this software were funded by
43* Intersil Corporation as part of PRISM(R) chipset product development. 43 * Intersil Corporation as part of PRISM(R) chipset product development.
44* 44 *
45* -------------------------------------------------------------------- 45 * --------------------------------------------------------------------
46* 46 *
47* The functions in this file handle the mibset/mibget management 47 * The functions in this file handle the mibset/mibget management
48* functions. 48 * functions.
49* 49 *
50* -------------------------------------------------------------------- 50 * --------------------------------------------------------------------
51*/ 51 */
52 52
53#include <linux/module.h> 53#include <linux/module.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
@@ -709,7 +709,7 @@ static int prism2mib_priv(struct mibrec *mib,
709 709
710 switch (mib->did) { 710 switch (mib->did) {
711 case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{ 711 case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
712 struct hfa384x_WPAData wpa; 712 struct hfa384x_wpa_data wpa;
713 713
714 if (isget) { 714 if (isget) {
715 hfa384x_drvr_getconfig(hw, 715 hfa384x_drvr_getconfig(hw,
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index e1b4a94292ff..984804b92e05 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -104,32 +104,33 @@ static void prism2sta_reset(struct wlandevice *wlandev);
104static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb, 104static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
105 union p80211_hdr *p80211_hdr, 105 union p80211_hdr *p80211_hdr,
106 struct p80211_metawep *p80211_wep); 106 struct p80211_metawep *p80211_wep);
107static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg); 107static int prism2sta_mlmerequest(struct wlandevice *wlandev,
108 struct p80211msg *msg);
108static int prism2sta_getcardinfo(struct wlandevice *wlandev); 109static int prism2sta_getcardinfo(struct wlandevice *wlandev);
109static int prism2sta_globalsetup(struct wlandevice *wlandev); 110static int prism2sta_globalsetup(struct wlandevice *wlandev);
110static int prism2sta_setmulticast(struct wlandevice *wlandev, 111static int prism2sta_setmulticast(struct wlandevice *wlandev,
111 struct net_device *dev); 112 struct net_device *dev);
112 113
113static void prism2sta_inf_handover(struct wlandevice *wlandev, 114static void prism2sta_inf_handover(struct wlandevice *wlandev,
114 struct hfa384x_InfFrame *inf); 115 struct hfa384x_inf_frame *inf);
115static void prism2sta_inf_tallies(struct wlandevice *wlandev, 116static void prism2sta_inf_tallies(struct wlandevice *wlandev,
116 struct hfa384x_InfFrame *inf); 117 struct hfa384x_inf_frame *inf);
117static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, 118static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
118 struct hfa384x_InfFrame *inf); 119 struct hfa384x_inf_frame *inf);
119static void prism2sta_inf_scanresults(struct wlandevice *wlandev, 120static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
120 struct hfa384x_InfFrame *inf); 121 struct hfa384x_inf_frame *inf);
121static void prism2sta_inf_chinforesults(struct wlandevice *wlandev, 122static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
122 struct hfa384x_InfFrame *inf); 123 struct hfa384x_inf_frame *inf);
123static void prism2sta_inf_linkstatus(struct wlandevice *wlandev, 124static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
124 struct hfa384x_InfFrame *inf); 125 struct hfa384x_inf_frame *inf);
125static void prism2sta_inf_assocstatus(struct wlandevice *wlandev, 126static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
126 struct hfa384x_InfFrame *inf); 127 struct hfa384x_inf_frame *inf);
127static void prism2sta_inf_authreq(struct wlandevice *wlandev, 128static void prism2sta_inf_authreq(struct wlandevice *wlandev,
128 struct hfa384x_InfFrame *inf); 129 struct hfa384x_inf_frame *inf);
129static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, 130static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
130 struct hfa384x_InfFrame *inf); 131 struct hfa384x_inf_frame *inf);
131static void prism2sta_inf_psusercnt(struct wlandevice *wlandev, 132static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
132 struct hfa384x_InfFrame *inf); 133 struct hfa384x_inf_frame *inf);
133 134
134/* 135/*
135 * prism2sta_open 136 * prism2sta_open
@@ -278,7 +279,8 @@ static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
278 * Call context: 279 * Call context:
279 * process thread 280 * process thread
280 */ 281 */
281static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg) 282static int prism2sta_mlmerequest(struct wlandevice *wlandev,
283 struct p80211msg *msg)
282{ 284{
283 struct hfa384x *hw = wlandev->priv; 285 struct hfa384x *hw = wlandev->priv;
284 286
@@ -370,9 +372,10 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *m
370 qualmsg->noise.status = 372 qualmsg->noise.status =
371 P80211ENUM_msgitem_status_data_ok; 373 P80211ENUM_msgitem_status_data_ok;
372 374
373 qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS); 375 qualmsg->link.data = le16_to_cpu(hw->qual.cq_curr_bss);
374 qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS); 376 qualmsg->level.data =
375 qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC); 377 le16_to_cpu(hw->qual.asl_curr_bss);
378 qualmsg->noise.data = le16_to_cpu(hw->qual.anl_curr_fc);
376 qualmsg->txrate.data = hw->txrate; 379 qualmsg->txrate.data = hw->txrate;
377 380
378 break; 381 break;
@@ -606,8 +609,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
606 hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor); 609 hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
607 610
608 netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n", 611 netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
609 hw->ident_nic.id, hw->ident_nic.major, 612 hw->ident_nic.id, hw->ident_nic.major,
610 hw->ident_nic.minor, hw->ident_nic.variant); 613 hw->ident_nic.minor, hw->ident_nic.variant);
611 614
612 /* Primary f/w identity */ 615 /* Primary f/w identity */
613 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY, 616 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
@@ -625,8 +628,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
625 hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor); 628 hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
626 629
627 netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n", 630 netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
628 hw->ident_pri_fw.id, hw->ident_pri_fw.major, 631 hw->ident_pri_fw.id, hw->ident_pri_fw.major,
629 hw->ident_pri_fw.minor, hw->ident_pri_fw.variant); 632 hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
630 633
631 /* Station (Secondary?) f/w identity */ 634 /* Station (Secondary?) f/w identity */
632 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY, 635 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
@@ -639,7 +642,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
639 642
640 if (hw->ident_nic.id < 0x8000) { 643 if (hw->ident_nic.id < 0x8000) {
641 netdev_err(wlandev->netdev, 644 netdev_err(wlandev->netdev,
642 "FATAL: Card is not an Intersil Prism2/2.5/3\n"); 645 "FATAL: Card is not an Intersil Prism2/2.5/3\n");
643 result = -1; 646 result = -1;
644 goto failed; 647 goto failed;
645 } 648 }
@@ -651,19 +654,19 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
651 hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor); 654 hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
652 655
653 /* strip out the 'special' variant bits */ 656 /* strip out the 'special' variant bits */
654 hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15)); 657 hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14);
655 hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15))); 658 hw->ident_sta_fw.variant &= ~((u16)GENMASK(15, 14));
656 659
657 if (hw->ident_sta_fw.id == 0x1f) { 660 if (hw->ident_sta_fw.id == 0x1f) {
658 netdev_info(wlandev->netdev, 661 netdev_info(wlandev->netdev,
659 "ident: sta f/w: id=0x%02x %d.%d.%d\n", 662 "ident: sta f/w: id=0x%02x %d.%d.%d\n",
660 hw->ident_sta_fw.id, hw->ident_sta_fw.major, 663 hw->ident_sta_fw.id, hw->ident_sta_fw.major,
661 hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); 664 hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
662 } else { 665 } else {
663 netdev_info(wlandev->netdev, 666 netdev_info(wlandev->netdev,
664 "ident: ap f/w: id=0x%02x %d.%d.%d\n", 667 "ident: ap f/w: id=0x%02x %d.%d.%d\n",
665 hw->ident_sta_fw.id, hw->ident_sta_fw.major, 668 hw->ident_sta_fw.id, hw->ident_sta_fw.major,
666 hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); 669 hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
667 netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n"); 670 netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
668 goto failed; 671 goto failed;
669 } 672 }
@@ -687,10 +690,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
687 hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top); 690 hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
688 691
689 netdev_info(wlandev->netdev, 692 netdev_info(wlandev->netdev,
690 "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 693 "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
691 hw->cap_sup_mfi.role, hw->cap_sup_mfi.id, 694 hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
692 hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom, 695 hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
693 hw->cap_sup_mfi.top); 696 hw->cap_sup_mfi.top);
694 697
695 /* Compatibility range, Controller supplier */ 698 /* Compatibility range, Controller supplier */
696 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE, 699 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
@@ -711,10 +714,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
711 hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top); 714 hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
712 715
713 netdev_info(wlandev->netdev, 716 netdev_info(wlandev->netdev,
714 "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 717 "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
715 hw->cap_sup_cfi.role, hw->cap_sup_cfi.id, 718 hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
716 hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom, 719 hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
717 hw->cap_sup_cfi.top); 720 hw->cap_sup_cfi.top);
718 721
719 /* Compatibility range, Primary f/w supplier */ 722 /* Compatibility range, Primary f/w supplier */
720 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE, 723 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
@@ -735,10 +738,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
735 hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top); 738 hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
736 739
737 netdev_info(wlandev->netdev, 740 netdev_info(wlandev->netdev,
738 "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 741 "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
739 hw->cap_sup_pri.role, hw->cap_sup_pri.id, 742 hw->cap_sup_pri.role, hw->cap_sup_pri.id,
740 hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom, 743 hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
741 hw->cap_sup_pri.top); 744 hw->cap_sup_pri.top);
742 745
743 /* Compatibility range, Station f/w supplier */ 746 /* Compatibility range, Station f/w supplier */
744 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE, 747 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
@@ -791,10 +794,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
791 hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top); 794 hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
792 795
793 netdev_info(wlandev->netdev, 796 netdev_info(wlandev->netdev,
794 "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 797 "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
795 hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id, 798 hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
796 hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom, 799 hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
797 hw->cap_act_pri_cfi.top); 800 hw->cap_act_pri_cfi.top);
798 801
799 /* Compatibility range, sta f/w actor, CFI supplier */ 802 /* Compatibility range, sta f/w actor, CFI supplier */
800 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES, 803 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
@@ -815,10 +818,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
815 hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top); 818 hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
816 819
817 netdev_info(wlandev->netdev, 820 netdev_info(wlandev->netdev,
818 "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 821 "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
819 hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id, 822 hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
820 hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom, 823 hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
821 hw->cap_act_sta_cfi.top); 824 hw->cap_act_sta_cfi.top);
822 825
823 /* Compatibility range, sta f/w actor, MFI supplier */ 826 /* Compatibility range, sta f/w actor, MFI supplier */
824 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES, 827 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
@@ -839,10 +842,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
839 hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top); 842 hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
840 843
841 netdev_info(wlandev->netdev, 844 netdev_info(wlandev->netdev,
842 "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", 845 "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
843 hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id, 846 hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
844 hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom, 847 hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
845 hw->cap_act_sta_mfi.top); 848 hw->cap_act_sta_mfi.top);
846 849
847 /* Serial Number */ 850 /* Serial Number */
848 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER, 851 result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
@@ -920,7 +923,7 @@ static int prism2sta_globalsetup(struct wlandevice *wlandev)
920} 923}
921 924
922static int prism2sta_setmulticast(struct wlandevice *wlandev, 925static int prism2sta_setmulticast(struct wlandevice *wlandev,
923 struct net_device *dev) 926 struct net_device *dev)
924{ 927{
925 int result = 0; 928 int result = 0;
926 struct hfa384x *hw = wlandev->priv; 929 struct hfa384x *hw = wlandev->priv;
@@ -962,7 +965,7 @@ exit:
962 * interrupt 965 * interrupt
963 */ 966 */
964static void prism2sta_inf_handover(struct wlandevice *wlandev, 967static void prism2sta_inf_handover(struct wlandevice *wlandev,
965 struct hfa384x_InfFrame *inf) 968 struct hfa384x_inf_frame *inf)
966{ 969{
967 pr_debug("received infoframe:HANDOVER (unhandled)\n"); 970 pr_debug("received infoframe:HANDOVER (unhandled)\n");
968} 971}
@@ -985,7 +988,7 @@ static void prism2sta_inf_handover(struct wlandevice *wlandev,
985 * interrupt 988 * interrupt
986 */ 989 */
987static void prism2sta_inf_tallies(struct wlandevice *wlandev, 990static void prism2sta_inf_tallies(struct wlandevice *wlandev,
988 struct hfa384x_InfFrame *inf) 991 struct hfa384x_inf_frame *inf)
989{ 992{
990 struct hfa384x *hw = wlandev->priv; 993 struct hfa384x *hw = wlandev->priv;
991 u16 *src16; 994 u16 *src16;
@@ -999,7 +1002,7 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
999 * record length of the info record. 1002 * record length of the info record.
1000 */ 1003 */
1001 1004
1002 cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32); 1005 cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32);
1003 if (inf->framelen > 22) { 1006 if (inf->framelen > 22) {
1004 dst = (u32 *)&hw->tallies; 1007 dst = (u32 *)&hw->tallies;
1005 src32 = (u32 *)&inf->info.commtallies32; 1008 src32 = (u32 *)&inf->info.commtallies32;
@@ -1031,19 +1034,19 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
1031 * interrupt 1034 * interrupt
1032 */ 1035 */
1033static void prism2sta_inf_scanresults(struct wlandevice *wlandev, 1036static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
1034 struct hfa384x_InfFrame *inf) 1037 struct hfa384x_inf_frame *inf)
1035{ 1038{
1036 struct hfa384x *hw = wlandev->priv; 1039 struct hfa384x *hw = wlandev->priv;
1037 int nbss; 1040 int nbss;
1038 struct hfa384x_ScanResult *sr = &(inf->info.scanresult); 1041 struct hfa384x_scan_result *sr = &inf->info.scanresult;
1039 int i; 1042 int i;
1040 struct hfa384x_JoinRequest_data joinreq; 1043 struct hfa384x_join_request_data joinreq;
1041 int result; 1044 int result;
1042 1045
1043 /* Get the number of results, first in bytes, then in results */ 1046 /* Get the number of results, first in bytes, then in results */
1044 nbss = (inf->framelen * sizeof(u16)) - 1047 nbss = (inf->framelen * sizeof(u16)) -
1045 sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason); 1048 sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
1046 nbss /= sizeof(struct hfa384x_ScanResultSub); 1049 nbss /= sizeof(struct hfa384x_scan_result_sub);
1047 1050
1048 /* Print em */ 1051 /* Print em */
1049 pr_debug("rx scanresults, reason=%d, nbss=%d:\n", 1052 pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
@@ -1064,7 +1067,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
1064 &joinreq, HFA384x_RID_JOINREQUEST_LEN); 1067 &joinreq, HFA384x_RID_JOINREQUEST_LEN);
1065 if (result) { 1068 if (result) {
1066 netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n", 1069 netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
1067 result); 1070 result);
1068 } 1071 }
1069} 1072}
1070 1073
@@ -1086,7 +1089,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
1086 * interrupt 1089 * interrupt
1087 */ 1090 */
1088static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, 1091static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
1089 struct hfa384x_InfFrame *inf) 1092 struct hfa384x_inf_frame *inf)
1090{ 1093{
1091 struct hfa384x *hw = wlandev->priv; 1094 struct hfa384x *hw = wlandev->priv;
1092 int nbss; 1095 int nbss;
@@ -1099,7 +1102,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
1099 1102
1100 kfree(hw->scanresults); 1103 kfree(hw->scanresults);
1101 1104
1102 hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC); 1105 hw->scanresults = kmemdup(inf, sizeof(*inf), GFP_ATOMIC);
1103 1106
1104 if (nbss == 0) 1107 if (nbss == 0)
1105 nbss = -1; 1108 nbss = -1;
@@ -1127,7 +1130,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
1127 * interrupt 1130 * interrupt
1128 */ 1131 */
1129static void prism2sta_inf_chinforesults(struct wlandevice *wlandev, 1132static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
1130 struct hfa384x_InfFrame *inf) 1133 struct hfa384x_inf_frame *inf)
1131{ 1134{
1132 struct hfa384x *hw = wlandev->priv; 1135 struct hfa384x *hw = wlandev->priv;
1133 unsigned int i, n; 1136 unsigned int i, n;
@@ -1136,8 +1139,8 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
1136 le16_to_cpu(inf->info.chinforesult.scanchannels); 1139 le16_to_cpu(inf->info.chinforesult.scanchannels);
1137 1140
1138 for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) { 1141 for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
1139 struct hfa384x_ChInfoResultSub *result; 1142 struct hfa384x_ch_info_result_sub *result;
1140 struct hfa384x_ChInfoResultSub *chinforesult; 1143 struct hfa384x_ch_info_result_sub *chinforesult;
1141 int chan; 1144 int chan;
1142 1145
1143 if (!(hw->channel_info.results.scanchannels & (1 << i))) 1146 if (!(hw->channel_info.results.scanchannels & (1 << i)))
@@ -1179,10 +1182,10 @@ void prism2sta_processing_defer(struct work_struct *data)
1179 /* First let's process the auth frames */ 1182 /* First let's process the auth frames */
1180 { 1183 {
1181 struct sk_buff *skb; 1184 struct sk_buff *skb;
1182 struct hfa384x_InfFrame *inf; 1185 struct hfa384x_inf_frame *inf;
1183 1186
1184 while ((skb = skb_dequeue(&hw->authq))) { 1187 while ((skb = skb_dequeue(&hw->authq))) {
1185 inf = (struct hfa384x_InfFrame *)skb->data; 1188 inf = (struct hfa384x_inf_frame *)skb->data;
1186 prism2sta_inf_authreq_defer(wlandev, inf); 1189 prism2sta_inf_authreq_defer(wlandev, inf);
1187 } 1190 }
1188 1191
@@ -1294,7 +1297,7 @@ void prism2sta_processing_defer(struct work_struct *data)
1294 */ 1297 */
1295 if (wlandev->netdev->type == ARPHRD_ETHER) 1298 if (wlandev->netdev->type == ARPHRD_ETHER)
1296 netdev_info(wlandev->netdev, 1299 netdev_info(wlandev->netdev,
1297 "linkstatus=DISCONNECTED (unhandled)\n"); 1300 "linkstatus=DISCONNECTED (unhandled)\n");
1298 wlandev->macmode = WLAN_MACMODE_NONE; 1301 wlandev->macmode = WLAN_MACMODE_NONE;
1299 1302
1300 netif_carrier_off(wlandev->netdev); 1303 netif_carrier_off(wlandev->netdev);
@@ -1391,7 +1394,7 @@ void prism2sta_processing_defer(struct work_struct *data)
1391 * Disable Transmits, Ignore receives of data frames 1394 * Disable Transmits, Ignore receives of data frames
1392 */ 1395 */
1393 if (hw->join_ap && --hw->join_retries > 0) { 1396 if (hw->join_ap && --hw->join_retries > 0) {
1394 struct hfa384x_JoinRequest_data joinreq; 1397 struct hfa384x_join_request_data joinreq;
1395 1398
1396 joinreq = hw->joinreq; 1399 joinreq = hw->joinreq;
1397 /* Send the join request */ 1400 /* Send the join request */
@@ -1415,7 +1418,7 @@ void prism2sta_processing_defer(struct work_struct *data)
1415 default: 1418 default:
1416 /* This is bad, IO port problems? */ 1419 /* This is bad, IO port problems? */
1417 netdev_warn(wlandev->netdev, 1420 netdev_warn(wlandev->netdev,
1418 "unknown linkstatus=0x%02x\n", hw->link_status); 1421 "unknown linkstatus=0x%02x\n", hw->link_status);
1419 return; 1422 return;
1420 } 1423 }
1421 1424
@@ -1440,7 +1443,7 @@ void prism2sta_processing_defer(struct work_struct *data)
1440 * interrupt 1443 * interrupt
1441 */ 1444 */
1442static void prism2sta_inf_linkstatus(struct wlandevice *wlandev, 1445static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
1443 struct hfa384x_InfFrame *inf) 1446 struct hfa384x_inf_frame *inf)
1444{ 1447{
1445 struct hfa384x *hw = wlandev->priv; 1448 struct hfa384x *hw = wlandev->priv;
1446 1449
@@ -1468,10 +1471,10 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
1468 * interrupt 1471 * interrupt
1469 */ 1472 */
1470static void prism2sta_inf_assocstatus(struct wlandevice *wlandev, 1473static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
1471 struct hfa384x_InfFrame *inf) 1474 struct hfa384x_inf_frame *inf)
1472{ 1475{
1473 struct hfa384x *hw = wlandev->priv; 1476 struct hfa384x *hw = wlandev->priv;
1474 struct hfa384x_AssocStatus rec; 1477 struct hfa384x_assoc_status rec;
1475 int i; 1478 int i;
1476 1479
1477 memcpy(&rec, &inf->info.assocstatus, sizeof(rec)); 1480 memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
@@ -1529,7 +1532,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
1529 * 1532 *
1530 */ 1533 */
1531static void prism2sta_inf_authreq(struct wlandevice *wlandev, 1534static void prism2sta_inf_authreq(struct wlandevice *wlandev,
1532 struct hfa384x_InfFrame *inf) 1535 struct hfa384x_inf_frame *inf)
1533{ 1536{
1534 struct hfa384x *hw = wlandev->priv; 1537 struct hfa384x *hw = wlandev->priv;
1535 struct sk_buff *skb; 1538 struct sk_buff *skb;
@@ -1544,10 +1547,10 @@ static void prism2sta_inf_authreq(struct wlandevice *wlandev,
1544} 1547}
1545 1548
1546static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, 1549static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
1547 struct hfa384x_InfFrame *inf) 1550 struct hfa384x_inf_frame *inf)
1548{ 1551{
1549 struct hfa384x *hw = wlandev->priv; 1552 struct hfa384x *hw = wlandev->priv;
1550 struct hfa384x_authenticateStation_data rec; 1553 struct hfa384x_authenticate_station_data rec;
1551 1554
1552 int i, added, result, cnt; 1555 int i, added, result, cnt;
1553 u8 *addr; 1556 u8 *addr;
@@ -1718,7 +1721,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
1718 * interrupt 1721 * interrupt
1719 */ 1722 */
1720static void prism2sta_inf_psusercnt(struct wlandevice *wlandev, 1723static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
1721 struct hfa384x_InfFrame *inf) 1724 struct hfa384x_inf_frame *inf)
1722{ 1725{
1723 struct hfa384x *hw = wlandev->priv; 1726 struct hfa384x *hw = wlandev->priv;
1724 1727
@@ -1742,7 +1745,8 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
1742 * Call context: 1745 * Call context:
1743 * interrupt 1746 * interrupt
1744 */ 1747 */
1745void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf) 1748void prism2sta_ev_info(struct wlandevice *wlandev,
1749 struct hfa384x_inf_frame *inf)
1746{ 1750{
1747 inf->infotype = le16_to_cpu(inf->infotype); 1751 inf->infotype = le16_to_cpu(inf->infotype);
1748 /* Dispatch */ 1752 /* Dispatch */
@@ -1785,7 +1789,7 @@ void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
1785 break; 1789 break;
1786 default: 1790 default:
1787 netdev_warn(wlandev->netdev, 1791 netdev_warn(wlandev->netdev,
1788 "Unknown info type=0x%02x\n", inf->infotype); 1792 "Unknown info type=0x%02x\n", inf->infotype);
1789 break; 1793 break;
1790 } 1794 }
1791} 1795}
@@ -1859,32 +1863,32 @@ void prism2sta_ev_alloc(struct wlandevice *wlandev)
1859} 1863}
1860 1864
1861/* 1865/*
1862* create_wlan 1866 * create_wlan
1863* 1867 *
1864* Called at module init time. This creates the struct wlandevice structure 1868 * Called at module init time. This creates the struct wlandevice structure
1865* and initializes it with relevant bits. 1869 * and initializes it with relevant bits.
1866* 1870 *
1867* Arguments: 1871 * Arguments:
1868* none 1872 * none
1869* 1873 *
1870* Returns: 1874 * Returns:
1871* the created struct wlandevice structure. 1875 * the created struct wlandevice structure.
1872* 1876 *
1873* Side effects: 1877 * Side effects:
1874* also allocates the priv/hw structures. 1878 * also allocates the priv/hw structures.
1875* 1879 *
1876* Call context: 1880 * Call context:
1877* process thread 1881 * process thread
1878* 1882 *
1879*/ 1883 */
1880static struct wlandevice *create_wlan(void) 1884static struct wlandevice *create_wlan(void)
1881{ 1885{
1882 struct wlandevice *wlandev = NULL; 1886 struct wlandevice *wlandev = NULL;
1883 struct hfa384x *hw = NULL; 1887 struct hfa384x *hw = NULL;
1884 1888
1885 /* Alloc our structures */ 1889 /* Alloc our structures */
1886 wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL); 1890 wlandev = kzalloc(sizeof(*wlandev), GFP_KERNEL);
1887 hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL); 1891 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
1888 1892
1889 if (!wlandev || !hw) { 1893 if (!wlandev || !hw) {
1890 kfree(wlandev); 1894 kfree(wlandev);
@@ -1943,9 +1947,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
1943 } 1947 }
1944 1948
1945 pr_debug("commsqual %d %d %d\n", 1949 pr_debug("commsqual %d %d %d\n",
1946 le16_to_cpu(hw->qual.CQ_currBSS), 1950 le16_to_cpu(hw->qual.cq_curr_bss),
1947 le16_to_cpu(hw->qual.ASL_currBSS), 1951 le16_to_cpu(hw->qual.asl_curr_bss),
1948 le16_to_cpu(hw->qual.ANL_currFC)); 1952 le16_to_cpu(hw->qual.anl_curr_fc));
1949 } 1953 }
1950 1954
1951 /* Get the signal rate */ 1955 /* Get the signal rate */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 85079fea7152..7a80a90f229f 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -139,7 +139,7 @@ static const struct _XGIbios_mode {
139 139
140static const unsigned short XGI310paneltype[] = { 140static const unsigned short XGI310paneltype[] = {
141 LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024, 141 LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
142 LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960, 142 LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
143 LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200, 143 LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
144 LCD_1024x768, LCD_1024x768, LCD_1024x768}; 144 LCD_1024x768, LCD_1024x768, LCD_1024x768};
145 145
@@ -174,7 +174,7 @@ static const struct _XGI_tvtype {
174 {"NTSC", 2}, 174 {"NTSC", 2},
175 {"pal", 1}, 175 {"pal", 1},
176 {"ntsc", 2}, 176 {"ntsc", 2},
177 {"\0", -1} 177 {"\0", -1}
178}; 178};
179 179
180static const struct _XGI_vrate { 180static const struct _XGI_vrate {
@@ -183,44 +183,44 @@ static const struct _XGI_vrate {
183 u16 yres; 183 u16 yres;
184 u16 refresh; 184 u16 refresh;
185} XGIfb_vrate[] = { 185} XGIfb_vrate[] = {
186 {1, 640, 480, 60}, {2, 640, 480, 72}, 186 {1, 640, 480, 60}, {2, 640, 480, 72},
187 {3, 640, 480, 75}, {4, 640, 480, 85}, 187 {3, 640, 480, 75}, {4, 640, 480, 85},
188 188
189 {5, 640, 480, 100}, {6, 640, 480, 120}, 189 {5, 640, 480, 100}, {6, 640, 480, 120},
190 {7, 640, 480, 160}, {8, 640, 480, 200}, 190 {7, 640, 480, 160}, {8, 640, 480, 200},
191 191
192 {1, 720, 480, 60}, 192 {1, 720, 480, 60},
193 {1, 720, 576, 58}, 193 {1, 720, 576, 58},
194 {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85}, 194 {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
195 {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75}, 195 {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
196 {4, 800, 600, 85}, {5, 800, 600, 100}, 196 {4, 800, 600, 85}, {5, 800, 600, 100},
197 {6, 800, 600, 120}, {7, 800, 600, 160}, 197 {6, 800, 600, 120}, {7, 800, 600, 160},
198 198
199 {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75}, 199 {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
200 {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120}, 200 {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
201 {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85}, 201 {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
202 {1, 1024, 600, 60}, 202 {1, 1024, 600, 60},
203 {1, 1152, 768, 60}, 203 {1, 1152, 768, 60},
204 {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85}, 204 {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
205 {1, 1280, 768, 60}, 205 {1, 1280, 768, 60},
206 {1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85}, 206 {1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85},
207 {1, 1280, 960, 70}, 207 {1, 1280, 960, 70},
208 {1, 1400, 1050, 60}, 208 {1, 1400, 1050, 60},
209 {1, 1600, 1200, 60}, {2, 1600, 1200, 65}, 209 {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
210 {3, 1600, 1200, 70}, {4, 1600, 1200, 75}, 210 {3, 1600, 1200, 70}, {4, 1600, 1200, 75},
211 211
212 {5, 1600, 1200, 85}, {6, 1600, 1200, 100}, 212 {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
213 {7, 1600, 1200, 120}, 213 {7, 1600, 1200, 120},
214 214
215 {1, 1920, 1440, 60}, {2, 1920, 1440, 65}, 215 {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
216 {3, 1920, 1440, 70}, {4, 1920, 1440, 75}, 216 {3, 1920, 1440, 70}, {4, 1920, 1440, 75},
217 217
218 {5, 1920, 1440, 85}, {6, 1920, 1440, 100}, 218 {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
219 {1, 2048, 1536, 60}, {2, 2048, 1536, 65}, 219 {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
220 {3, 2048, 1536, 70}, {4, 2048, 1536, 75}, 220 {3, 2048, 1536, 70}, {4, 2048, 1536, 75},
221 221
222 {5, 2048, 1536, 85}, 222 {5, 2048, 1536, 85},
223 {0, 0, 0, 0} 223 {0, 0, 0, 0}
224}; 224};
225 225
226static const struct _XGI_TV_filter { 226static const struct _XGI_TV_filter {
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 0c78491ff5a1..777cd6e11694 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -56,8 +56,8 @@ static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
56/* --------------- Hardware Access Routines -------------------------- */ 56/* --------------- Hardware Access Routines -------------------------- */
57 57
58static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, 58static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
59 struct xgi_hw_device_info *HwDeviceExtension, 59 struct xgi_hw_device_info *HwDeviceExtension,
60 unsigned char modeno) 60 unsigned char modeno)
61{ 61{
62 unsigned short ModeNo = modeno; 62 unsigned short ModeNo = modeno;
63 unsigned short ModeIdIndex = 0, ClockIndex = 0; 63 unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
68 XGI_SearchModeID(ModeNo, &ModeIdIndex); 68 XGI_SearchModeID(ModeNo, &ModeIdIndex);
69 69
70 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, 70 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
71 ModeIdIndex, XGI_Pr); 71 ModeIdIndex, XGI_Pr);
72 72
73 ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; 73 ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
74 74
@@ -76,11 +76,11 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
76} 76}
77 77
78static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, 78static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
79 struct xgi_hw_device_info *HwDeviceExtension, 79 struct xgi_hw_device_info *HwDeviceExtension,
80 unsigned char modeno, 80 unsigned char modeno, u32 *left_margin,
81 u32 *left_margin, u32 *right_margin, u32 *upper_margin, 81 u32 *right_margin, u32 *upper_margin,
82 u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync, 82 u32 *lower_margin, u32 *hsync_len,
83 u32 *vmode) 83 u32 *vsync_len, u32 *sync, u32 *vmode)
84{ 84{
85 unsigned short ModeNo = modeno; 85 unsigned short ModeNo = modeno;
86 unsigned short ModeIdIndex, index = 0; 86 unsigned short ModeIdIndex, index = 0;
@@ -95,7 +95,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
95 if (!XGI_SearchModeID(ModeNo, &ModeIdIndex)) 95 if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
96 return 0; 96 return 0;
97 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, 97 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
98 ModeIdIndex, XGI_Pr); 98 ModeIdIndex, XGI_Pr);
99 index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC; 99 index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
100 100
101 sr_data = XGI_CRT1Table[index].CR[5]; 101 sr_data = XGI_CRT1Table[index].CR[5];
@@ -105,7 +105,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
105 cr_data = XGI_CRT1Table[index].CR[3]; 105 cr_data = XGI_CRT1Table[index].CR[3];
106 106
107 /* Horizontal retrace (=sync) start */ 107 /* Horizontal retrace (=sync) start */
108 HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2); 108 HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2);
109 F = HRS - HDE - 3; 109 F = HRS - HDE - 3;
110 110
111 sr_data = XGI_CRT1Table[index].CR[6]; 111 sr_data = XGI_CRT1Table[index].CR[6];
@@ -115,8 +115,8 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
115 cr_data2 = XGI_CRT1Table[index].CR[4]; 115 cr_data2 = XGI_CRT1Table[index].CR[4];
116 116
117 /* Horizontal blank end */ 117 /* Horizontal blank end */
118 HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2) 118 HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2)
119 | ((unsigned short) (sr_data & 0x03) << 6); 119 | ((unsigned short)(sr_data & 0x03) << 6);
120 120
121 /* Horizontal retrace (=sync) end */ 121 /* Horizontal retrace (=sync) end */
122 HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3); 122 HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
@@ -142,15 +142,15 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
142 cr_data = XGI_CRT1Table[index].CR[10]; 142 cr_data = XGI_CRT1Table[index].CR[10];
143 143
144 /* Vertical retrace (=sync) start */ 144 /* Vertical retrace (=sync) start */
145 VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6) 145 VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6)
146 | ((unsigned short) (cr_data2 & 0x80) << 2) 146 | ((unsigned short)(cr_data2 & 0x80) << 2)
147 | ((unsigned short) (sr_data & 0x08) << 7); 147 | ((unsigned short)(sr_data & 0x08) << 7);
148 F = VRS + 1 - VDE; 148 F = VRS + 1 - VDE;
149 149
150 cr_data = XGI_CRT1Table[index].CR[13]; 150 cr_data = XGI_CRT1Table[index].CR[13];
151 151
152 /* Vertical blank end */ 152 /* Vertical blank end */
153 VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4); 153 VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4);
154 temp = VBE - ((VDE - 1) & 511); 154 temp = VBE - ((VDE - 1) & 511);
155 B = (temp > 0) ? temp : (temp + 512); 155 B = (temp > 0) ? temp : (temp + 512);
156 156
@@ -231,11 +231,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
231{ 231{
232 int i = 0; 232 int i = 0;
233 233
234 while ((XGIbios_mode[i].mode_no != 0) 234 while ((XGIbios_mode[i].mode_no != 0) &&
235 && (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) { 235 (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
236 if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) 236 if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) &&
237 && (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) 237 (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) &&
238 && (XGIbios_mode[i].bpp == 8)) { 238 (XGIbios_mode[i].bpp == 8)) {
239 return i; 239 return i;
240 } 240 }
241 i++; 241 i++;
@@ -384,9 +384,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
384 return -1; 384 return -1;
385 break; 385 break;
386 case 640: 386 case 640:
387 if ((XGIbios_mode[myindex].yres != 400) 387 if ((XGIbios_mode[myindex].yres != 400) &&
388 && (XGIbios_mode[myindex].yres 388 (XGIbios_mode[myindex].yres != 480))
389 != 480))
390 return -1; 389 return -1;
391 break; 390 break;
392 case 800: 391 case 800:
@@ -518,7 +517,7 @@ static void XGIfb_search_crt2type(const char *name)
518{ 517{
519 int i = 0; 518 int i = 0;
520 519
521 if (name == NULL) 520 if (!name)
522 return; 521 return;
523 522
524 while (XGI_crt2type[i].type_no != -1) { 523 while (XGI_crt2type[i].type_no != -1) {
@@ -562,7 +561,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
562 != 1)) { 561 != 1)) {
563 pr_debug("Adjusting rate from %d down to %d\n", 562 pr_debug("Adjusting rate from %d down to %d\n",
564 rate, 563 rate,
565 XGIfb_vrate[i-1].refresh); 564 XGIfb_vrate[i - 1].refresh);
566 xgifb_info->rate_idx = 565 xgifb_info->rate_idx =
567 XGIfb_vrate[i - 1].idx; 566 XGIfb_vrate[i - 1].idx;
568 xgifb_info->refresh_rate = 567 xgifb_info->refresh_rate =
@@ -589,7 +588,7 @@ static void XGIfb_search_tvstd(const char *name)
589{ 588{
590 int i = 0; 589 int i = 0;
591 590
592 if (name == NULL) 591 if (!name)
593 return; 592 return;
594 593
595 while (XGI_tvtype[i].type_no != -1) { 594 while (XGI_tvtype[i].type_no != -1) {
@@ -683,7 +682,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
683 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30); 682 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
684 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31); 683 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
685 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33, 684 xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
686 (xgifb_info->rate_idx & 0x0F)); 685 (xgifb_info->rate_idx & 0x0F));
687} 686}
688 687
689static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) 688static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
@@ -730,7 +729,6 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
730 729
731 if (xgifb_info->display2 == XGIFB_DISP_TV && 730 if (xgifb_info->display2 == XGIFB_DISP_TV &&
732 xgifb_info->hasVB == HASVB_301) { 731 xgifb_info->hasVB == HASVB_301) {
733
734 reg = xgifb_reg_get(XGIPART4, 0x01); 732 reg = xgifb_reg_get(XGIPART4, 0x01);
735 733
736 if (reg < 0xB0) { /* Set filter for XGI301 */ 734 if (reg < 0xB0) { /* Set filter for XGI301 */
@@ -763,16 +761,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
763 0x01); 761 0x01);
764 762
765 if (xgifb_info->TV_type == TVMODE_NTSC) { 763 if (xgifb_info->TV_type == TVMODE_NTSC) {
766
767 xgifb_reg_and(XGIPART2, 0x3a, 0x1f); 764 xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
768 765
769 if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { 766 if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
770
771 xgifb_reg_and(XGIPART2, 0x30, 0xdf); 767 xgifb_reg_and(XGIPART2, 0x30, 0xdf);
772 768
773 } else if (xgifb_info->TV_plug 769 } else if (xgifb_info->TV_plug
774 == TVPLUG_COMPOSITE) { 770 == TVPLUG_COMPOSITE) {
775
776 xgifb_reg_or(XGIPART2, 0x30, 0x20); 771 xgifb_reg_or(XGIPART2, 0x30, 0x20);
777 772
778 switch (xgifb_info->video_width) { 773 switch (xgifb_info->video_width) {
@@ -822,16 +817,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
822 } 817 }
823 818
824 } else if (xgifb_info->TV_type == TVMODE_PAL) { 819 } else if (xgifb_info->TV_type == TVMODE_PAL) {
825
826 xgifb_reg_and(XGIPART2, 0x3A, 0x1F); 820 xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
827 821
828 if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { 822 if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
829
830 xgifb_reg_and(XGIPART2, 0x30, 0xDF); 823 xgifb_reg_and(XGIPART2, 0x30, 0xDF);
831 824
832 } else if (xgifb_info->TV_plug 825 } else if (xgifb_info->TV_plug
833 == TVPLUG_COMPOSITE) { 826 == TVPLUG_COMPOSITE) {
834
835 xgifb_reg_or(XGIPART2, 0x30, 0x20); 827 xgifb_reg_or(XGIPART2, 0x30, 0x20);
836 828
837 switch (xgifb_info->video_width) { 829 switch (xgifb_info->video_width) {
@@ -912,7 +904,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
912} 904}
913 905
914static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, 906static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
915 struct fb_info *info) 907 struct fb_info *info)
916{ 908{
917 struct xgifb_video_info *xgifb_info = info->par; 909 struct xgifb_video_info *xgifb_info = info->par;
918 struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info; 910 struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
@@ -945,17 +937,15 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
945 if (var->pixclock) { 937 if (var->pixclock) {
946 drate = 1000000000 / var->pixclock; 938 drate = 1000000000 / var->pixclock;
947 hrate = (drate * 1000) / htotal; 939 hrate = (drate * 1000) / htotal;
948 xgifb_info->refresh_rate = (unsigned int) (hrate * 2 940 xgifb_info->refresh_rate = (unsigned int)(hrate * 2
949 / vtotal); 941 / vtotal);
950 } else { 942 } else {
951 xgifb_info->refresh_rate = 60; 943 xgifb_info->refresh_rate = 60;
952 } 944 }
953 945
954 pr_debug("Change mode to %dx%dx%d-%dHz\n", 946 pr_debug("Change mode to %dx%dx%d-%dHz\n",
955 var->xres, 947 var->xres, var->yres, var->bits_per_pixel,
956 var->yres, 948 xgifb_info->refresh_rate);
957 var->bits_per_pixel,
958 xgifb_info->refresh_rate);
959 949
960 old_mode = xgifb_info->mode_idx; 950 old_mode = xgifb_info->mode_idx;
961 xgifb_info->mode_idx = 0; 951 xgifb_info->mode_idx = 0;
@@ -992,7 +982,6 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
992 } 982 }
993 983
994 if (isactive) { 984 if (isactive) {
995
996 XGIfb_pre_setmode(xgifb_info); 985 XGIfb_pre_setmode(xgifb_info);
997 if (XGISetModeNew(xgifb_info, hw_info, 986 if (XGISetModeNew(xgifb_info, hw_info,
998 XGIbios_mode[xgifb_info->mode_idx].mode_no) 987 XGIbios_mode[xgifb_info->mode_idx].mode_no)
@@ -1064,7 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
1064 break; 1053 break;
1065 } 1054 }
1066 } 1055 }
1067 XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/ 1056 XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */
1068 1057
1069 dumpVGAReg(xgifb_info); 1058 dumpVGAReg(xgifb_info);
1070 return 0; 1059 return 0;
@@ -1150,7 +1139,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
1150 } 1139 }
1151 break; 1140 break;
1152 case 16: 1141 case 16:
1153 ((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800)) 1142 ((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800))
1154 | ((green & 0xfc00) >> 5) | ((blue & 0xf800) 1143 | ((green & 0xfc00) >> 5) | ((blue & 0xf800)
1155 >> 11); 1144 >> 11);
1156 break; 1145 break;
@@ -1158,7 +1147,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
1158 red >>= 8; 1147 red >>= 8;
1159 green >>= 8; 1148 green >>= 8;
1160 blue >>= 8; 1149 blue >>= 8;
1161 ((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green 1150 ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green
1162 << 8) | (blue); 1151 << 8) | (blue);
1163 break; 1152 break;
1164 } 1153 }
@@ -1168,7 +1157,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
1168/* ----------- FBDev related routines for all series ---------- */ 1157/* ----------- FBDev related routines for all series ---------- */
1169 1158
1170static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con, 1159static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
1171 struct fb_info *info) 1160 struct fb_info *info)
1172{ 1161{
1173 struct xgifb_video_info *xgifb_info = info->par; 1162 struct xgifb_video_info *xgifb_info = info->par;
1174 1163
@@ -1250,7 +1239,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1250 drate = 1000000000 / var->pixclock; 1239 drate = 1000000000 / var->pixclock;
1251 hrate = (drate * 1000) / htotal; 1240 hrate = (drate * 1000) / htotal;
1252 xgifb_info->refresh_rate = 1241 xgifb_info->refresh_rate =
1253 (unsigned int) (hrate * 2 / vtotal); 1242 (unsigned int)(hrate * 2 / vtotal);
1254 pr_debug( 1243 pr_debug(
1255 "%s: pixclock = %d ,htotal=%d, vtotal=%d\n" 1244 "%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
1256 "%s: drate=%d, hrate=%d, refresh_rate=%d\n", 1245 "%s: drate=%d, hrate=%d, refresh_rate=%d\n",
@@ -1262,10 +1251,10 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1262 1251
1263 search_idx = 0; 1252 search_idx = 0;
1264 while ((XGIbios_mode[search_idx].mode_no != 0) && 1253 while ((XGIbios_mode[search_idx].mode_no != 0) &&
1265 (XGIbios_mode[search_idx].xres <= var->xres)) { 1254 (XGIbios_mode[search_idx].xres <= var->xres)) {
1266 if ((XGIbios_mode[search_idx].xres == var->xres) && 1255 if ((XGIbios_mode[search_idx].xres == var->xres) &&
1267 (XGIbios_mode[search_idx].yres == var->yres) && 1256 (XGIbios_mode[search_idx].yres == var->yres) &&
1268 (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) { 1257 (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
1269 if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) { 1258 if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
1270 found_mode = 1; 1259 found_mode = 1;
1271 break; 1260 break;
@@ -1275,9 +1264,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1275 } 1264 }
1276 1265
1277 if (!found_mode) { 1266 if (!found_mode) {
1278
1279 pr_err("%dx%dx%d is no valid mode\n", 1267 pr_err("%dx%dx%d is no valid mode\n",
1280 var->xres, var->yres, var->bits_per_pixel); 1268 var->xres, var->yres, var->bits_per_pixel);
1281 search_idx = 0; 1269 search_idx = 0;
1282 while (XGIbios_mode[search_idx].mode_no != 0) { 1270 while (XGIbios_mode[search_idx].mode_no != 0) {
1283 if ((var->xres <= XGIbios_mode[search_idx].xres) && 1271 if ((var->xres <= XGIbios_mode[search_idx].xres) &&
@@ -1296,11 +1284,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1296 var->xres = XGIbios_mode[search_idx].xres; 1284 var->xres = XGIbios_mode[search_idx].xres;
1297 var->yres = XGIbios_mode[search_idx].yres; 1285 var->yres = XGIbios_mode[search_idx].yres;
1298 pr_debug("Adapted to mode %dx%dx%d\n", 1286 pr_debug("Adapted to mode %dx%dx%d\n",
1299 var->xres, var->yres, var->bits_per_pixel); 1287 var->xres, var->yres, var->bits_per_pixel);
1300 1288
1301 } else { 1289 } else {
1302 pr_err("Failed to find similar mode to %dx%dx%d\n", 1290 pr_err("Failed to find similar mode to %dx%dx%d\n",
1303 var->xres, var->yres, var->bits_per_pixel); 1291 var->xres, var->yres, var->bits_per_pixel);
1304 return -EINVAL; 1292 return -EINVAL;
1305 } 1293 }
1306 } 1294 }
@@ -1332,7 +1320,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1332} 1320}
1333 1321
1334static int XGIfb_pan_display(struct fb_var_screeninfo *var, 1322static int XGIfb_pan_display(struct fb_var_screeninfo *var,
1335 struct fb_info *info) 1323 struct fb_info *info)
1336{ 1324{
1337 int err; 1325 int err;
1338 1326
@@ -1344,9 +1332,8 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
1344 if (var->vmode & FB_VMODE_YWRAP) { 1332 if (var->vmode & FB_VMODE_YWRAP) {
1345 if (var->yoffset >= info->var.yres_virtual || var->xoffset) 1333 if (var->yoffset >= info->var.yres_virtual || var->xoffset)
1346 return -EINVAL; 1334 return -EINVAL;
1347 } else if (var->xoffset + info->var.xres > info->var.xres_virtual 1335 } else if (var->xoffset + info->var.xres > info->var.xres_virtual ||
1348 || var->yoffset + info->var.yres 1336 var->yoffset + info->var.yres > info->var.yres_virtual) {
1349 > info->var.yres_virtual) {
1350 return -EINVAL; 1337 return -EINVAL;
1351 } 1338 }
1352 err = XGIfb_pan_var(var, info); 1339 err = XGIfb_pan_var(var, info);
@@ -1401,7 +1388,6 @@ static struct fb_ops XGIfb_ops = {
1401 1388
1402static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info) 1389static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
1403{ 1390{
1404
1405 u8 ChannelNum, tmp; 1391 u8 ChannelNum, tmp;
1406 u8 reg = 0; 1392 u8 reg = 0;
1407 1393
@@ -1474,10 +1460,8 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
1474 xgifb_info->video_size = xgifb_info->video_size * ChannelNum; 1460 xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
1475 1461
1476 pr_info("SR14=%x DramSzie %x ChannelNum %x\n", 1462 pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
1477 reg, 1463 reg, xgifb_info->video_size, ChannelNum);
1478 xgifb_info->video_size, ChannelNum);
1479 return 0; 1464 return 0;
1480
1481} 1465}
1482 1466
1483static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info) 1467static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
@@ -1597,7 +1581,6 @@ static int __init XGIfb_setup(char *options)
1597 pr_info("Options: %s\n", options); 1581 pr_info("Options: %s\n", options);
1598 1582
1599 while ((this_opt = strsep(&options, ",")) != NULL) { 1583 while ((this_opt = strsep(&options, ",")) != NULL) {
1600
1601 if (!*this_opt) 1584 if (!*this_opt)
1602 continue; 1585 continue;
1603 1586
@@ -1634,8 +1617,7 @@ static int __init XGIfb_setup(char *options)
1634 return 0; 1617 return 0;
1635} 1618}
1636 1619
1637static int xgifb_probe(struct pci_dev *pdev, 1620static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1638 const struct pci_device_id *ent)
1639{ 1621{
1640 u8 reg, reg1; 1622 u8 reg, reg1;
1641 u8 CR48, CR38; 1623 u8 CR48, CR38;
@@ -1670,7 +1652,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1670 xgifb_info->mmio_size = pci_resource_len(pdev, 1); 1652 xgifb_info->mmio_size = pci_resource_len(pdev, 1);
1671 xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30; 1653 xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
1672 dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n", 1654 dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n",
1673 (u64) pci_resource_start(pdev, 2), 1655 (u64)pci_resource_start(pdev, 2),
1674 xgifb_info->vga_base); 1656 xgifb_info->vga_base);
1675 1657
1676 if (pci_enable_device(pdev)) { 1658 if (pci_enable_device(pdev)) {
@@ -1688,7 +1670,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1688 xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD); 1670 xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
1689 reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD); 1671 reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
1690 1672
1691 if (reg1 != 0xa1) { /*I/O error */ 1673 if (reg1 != 0xa1) { /* I/O error */
1692 dev_err(&pdev->dev, "I/O error\n"); 1674 dev_err(&pdev->dev, "I/O error\n");
1693 ret = -EIO; 1675 ret = -EIO;
1694 goto error_disable; 1676 goto error_disable;
@@ -1698,7 +1680,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1698 case PCI_DEVICE_ID_XGI_20: 1680 case PCI_DEVICE_ID_XGI_20:
1699 xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN); 1681 xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
1700 CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1); 1682 CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
1701 if (CR48&GPIOG_READ) 1683 if (CR48 & GPIOG_READ)
1702 xgifb_info->chip = XG21; 1684 xgifb_info->chip = XG21;
1703 else 1685 else
1704 xgifb_info->chip = XG20; 1686 xgifb_info->chip = XG20;
@@ -1727,7 +1709,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1727 xgifb_info->video_size = video_size_max; 1709 xgifb_info->video_size = video_size_max;
1728 } 1710 }
1729 1711
1730 /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ 1712 /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
1731 xgifb_reg_or(XGISR, 1713 xgifb_reg_or(XGISR,
1732 IND_SIS_PCI_ADDRESS_SET, 1714 IND_SIS_PCI_ADDRESS_SET,
1733 (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE)); 1715 (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
@@ -1740,7 +1722,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1740 xgifb_info->video_size, 1722 xgifb_info->video_size,
1741 "XGIfb FB")) { 1723 "XGIfb FB")) {
1742 dev_err(&pdev->dev, "Unable request memory size %x\n", 1724 dev_err(&pdev->dev, "Unable request memory size %x\n",
1743 xgifb_info->video_size); 1725 xgifb_info->video_size);
1744 dev_err(&pdev->dev, 1726 dev_err(&pdev->dev,
1745 "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n"); 1727 "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
1746 ret = -ENODEV; 1728 ret = -ENODEV;
@@ -1763,13 +1745,13 @@ static int xgifb_probe(struct pci_dev *pdev,
1763 1745
1764 dev_info(&pdev->dev, 1746 dev_info(&pdev->dev,
1765 "Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n", 1747 "Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n",
1766 (u64) xgifb_info->video_base, 1748 (u64)xgifb_info->video_base,
1767 xgifb_info->video_vbase, 1749 xgifb_info->video_vbase,
1768 xgifb_info->video_size / 1024); 1750 xgifb_info->video_size / 1024);
1769 1751
1770 dev_info(&pdev->dev, 1752 dev_info(&pdev->dev,
1771 "MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n", 1753 "MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n",
1772 (u64) xgifb_info->mmio_base, xgifb_info->mmio_vbase, 1754 (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
1773 xgifb_info->mmio_size / 1024); 1755 xgifb_info->mmio_size / 1024);
1774 1756
1775 pci_set_drvdata(pdev, xgifb_info); 1757 pci_set_drvdata(pdev, xgifb_info);
@@ -1784,9 +1766,9 @@ static int xgifb_probe(struct pci_dev *pdev,
1784 xgifb_info->hasVB = HASVB_NONE; 1766 xgifb_info->hasVB = HASVB_NONE;
1785 } else if (xgifb_info->chip == XG21) { 1767 } else if (xgifb_info->chip == XG21) {
1786 CR38 = xgifb_reg_get(XGICR, 0x38); 1768 CR38 = xgifb_reg_get(XGICR, 0x38);
1787 if ((CR38&0xE0) == 0xC0) 1769 if ((CR38 & 0xE0) == 0xC0)
1788 xgifb_info->display2 = XGIFB_DISP_LCD; 1770 xgifb_info->display2 = XGIFB_DISP_LCD;
1789 else if ((CR38&0xE0) == 0x60) 1771 else if ((CR38 & 0xE0) == 0x60)
1790 xgifb_info->hasVB = HASVB_CHRONTEL; 1772 xgifb_info->hasVB = HASVB_CHRONTEL;
1791 else 1773 else
1792 xgifb_info->hasVB = HASVB_NONE; 1774 xgifb_info->hasVB = HASVB_NONE;
@@ -1903,8 +1885,7 @@ static int xgifb_probe(struct pci_dev *pdev,
1903 xgifb_info->refresh_rate = refresh_rate; 1885 xgifb_info->refresh_rate = refresh_rate;
1904 if (xgifb_info->refresh_rate == 0) 1886 if (xgifb_info->refresh_rate == 0)
1905 xgifb_info->refresh_rate = 60; 1887 xgifb_info->refresh_rate = 60;
1906 if (XGIfb_search_refresh_rate(xgifb_info, 1888 if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) {
1907 xgifb_info->refresh_rate) == 0) {
1908 xgifb_info->rate_idx = 1; 1889 xgifb_info->rate_idx = 1;
1909 xgifb_info->refresh_rate = 60; 1890 xgifb_info->refresh_rate = 60;
1910 } 1891 }
@@ -1939,15 +1920,13 @@ static int xgifb_probe(struct pci_dev *pdev,
1939 default: 1920 default:
1940 xgifb_info->video_cmap_len = 16; 1921 xgifb_info->video_cmap_len = 16;
1941 pr_info("Unsupported depth %d\n", 1922 pr_info("Unsupported depth %d\n",
1942 xgifb_info->video_bpp); 1923 xgifb_info->video_bpp);
1943 break; 1924 break;
1944 } 1925 }
1945 1926
1946 pr_info("Default mode is %dx%dx%d (%dHz)\n", 1927 pr_info("Default mode is %dx%dx%d (%dHz)\n",
1947 xgifb_info->video_width, 1928 xgifb_info->video_width, xgifb_info->video_height,
1948 xgifb_info->video_height, 1929 xgifb_info->video_bpp, xgifb_info->refresh_rate);
1949 xgifb_info->video_bpp,
1950 xgifb_info->refresh_rate);
1951 1930
1952 fb_info->var.red.length = 8; 1931 fb_info->var.red.length = 8;
1953 fb_info->var.green.length = 8; 1932 fb_info->var.green.length = 8;
@@ -1964,22 +1943,20 @@ static int xgifb_probe(struct pci_dev *pdev,
1964 1943
1965 XGIfb_bpp_to_var(xgifb_info, &fb_info->var); 1944 XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
1966 1945
1967 fb_info->var.pixclock = (u32) (1000000000 / 1946 fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
1968 XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info, 1947 (&xgifb_info->dev_info, hw_info,
1969 hw_info, 1948 XGIbios_mode[xgifb_info->mode_idx].mode_no));
1970 XGIbios_mode[xgifb_info->mode_idx].mode_no)); 1949
1971 1950 if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info,
1972 if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info, 1951 hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no,
1973 XGIbios_mode[xgifb_info->mode_idx].mode_no, 1952 &fb_info->var.left_margin,
1974 &fb_info->var.left_margin, 1953 &fb_info->var.right_margin,
1975 &fb_info->var.right_margin, 1954 &fb_info->var.upper_margin,
1976 &fb_info->var.upper_margin, 1955 &fb_info->var.lower_margin,
1977 &fb_info->var.lower_margin, 1956 &fb_info->var.hsync_len,
1978 &fb_info->var.hsync_len, 1957 &fb_info->var.vsync_len,
1979 &fb_info->var.vsync_len, 1958 &fb_info->var.sync,
1980 &fb_info->var.sync, 1959 &fb_info->var.vmode)) {
1981 &fb_info->var.vmode)) {
1982
1983 if ((fb_info->var.vmode & FB_VMODE_MASK) == 1960 if ((fb_info->var.vmode & FB_VMODE_MASK) ==
1984 FB_VMODE_INTERLACED) { 1961 FB_VMODE_INTERLACED) {
1985 fb_info->var.yres <<= 1; 1962 fb_info->var.yres <<= 1;
@@ -1990,7 +1967,6 @@ static int xgifb_probe(struct pci_dev *pdev,
1990 fb_info->var.yres >>= 1; 1967 fb_info->var.yres >>= 1;
1991 fb_info->var.yres_virtual >>= 1; 1968 fb_info->var.yres_virtual >>= 1;
1992 } 1969 }
1993
1994 } 1970 }
1995 1971
1996 fb_info->flags = FBINFO_FLAG_DEFAULT; 1972 fb_info->flags = FBINFO_FLAG_DEFAULT;
@@ -2028,9 +2004,7 @@ error:
2028 return ret; 2004 return ret;
2029} 2005}
2030 2006
2031/*****************************************************/ 2007/* -------------------- PCI DEVICE HANDLING -------------------- */
2032/* PCI DEVICE HANDLING */
2033/*****************************************************/
2034 2008
2035static void xgifb_remove(struct pci_dev *pdev) 2009static void xgifb_remove(struct pci_dev *pdev)
2036{ 2010{
@@ -2054,25 +2028,23 @@ static struct pci_driver xgifb_driver = {
2054 .remove = xgifb_remove 2028 .remove = xgifb_remove
2055}; 2029};
2056 2030
2057/*****************************************************/ 2031/* -------------------- MODULE -------------------- */
2058/* MODULE */
2059/*****************************************************/
2060 2032
2061module_param(mode, charp, 0); 2033module_param(mode, charp, 0000);
2062MODULE_PARM_DESC(mode, 2034MODULE_PARM_DESC(mode,
2063 "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16)."); 2035 "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
2064 2036
2065module_param(forcecrt2type, charp, 0); 2037module_param(forcecrt2type, charp, 0000);
2066MODULE_PARM_DESC(forcecrt2type, 2038MODULE_PARM_DESC(forcecrt2type,
2067 "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE."); 2039 "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
2068 2040
2069module_param(vesa, int, 0); 2041module_param(vesa, int, 0000);
2070MODULE_PARM_DESC(vesa, 2042MODULE_PARM_DESC(vesa,
2071 "Selects the desired default display mode by VESA mode number (eg. 0x117)."); 2043 "Selects the desired default display mode by VESA mode number (eg. 0x117).");
2072 2044
2073module_param(filter, int, 0); 2045module_param(filter, int, 0000);
2074MODULE_PARM_DESC(filter, 2046MODULE_PARM_DESC(filter,
2075 "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter])."); 2047 "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
2076 2048
2077static int __init xgifb_init(void) 2049static int __init xgifb_init(void)
2078{ 2050{
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 062ece22ed84..14af157958cd 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -55,8 +55,9 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
55 xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */ 55 xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
56 /* GPIOF 0:DVI 1:DVO */ 56 /* GPIOF 0:DVI 1:DVO */
57 data = xgifb_reg_get(pVBInfo->P3d4, 0x48); 57 data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
58 /* HOTPLUG_SUPPORT */ 58 /*
59 /* for current XG20 & XG21, GPIOH is floating, driver will 59 * HOTPLUG_SUPPORT
60 * for current XG20 & XG21, GPIOH is floating, driver will
60 * fix DDR temporarily 61 * fix DDR temporarily
61 */ 62 */
62 /* DVI read GPIOH */ 63 /* DVI read GPIOH */
@@ -199,7 +200,8 @@ static void XGINew_DDRII_Bootup_XG27(
199} 200}
200 201
201static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension, 202static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
202 unsigned long P3c4, struct vb_device_info *pVBInfo) 203 unsigned long P3c4,
204 struct vb_device_info *pVBInfo)
203{ 205{
204 unsigned long P3d4 = P3c4 + 0x10; 206 unsigned long P3d4 = P3c4 + 0x10;
205 207
@@ -353,8 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
353 unsigned long Port, struct vb_device_info *pVBInfo) 355 unsigned long Port, struct vb_device_info *pVBInfo)
354{ 356{
355 unsigned long P3d4 = Port, P3c4 = Port - 0x10; 357 unsigned long P3d4 = Port, P3c4 = Port - 0x10;
356 358 /*
357 /* keep following setting sequence, each setting in 359 * keep following setting sequence, each setting in
358 * the same reg insert idle 360 * the same reg insert idle
359 */ 361 */
360 xgifb_reg_set(P3d4, 0x82, 0x77); 362 xgifb_reg_set(P3d4, 0x82, 0x77);
@@ -387,7 +389,7 @@ static void XGINew_DDR2_DefaultRegister(
387} 389}
388 390
389static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg, 391static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
390 u8 shift_factor, u8 mask1, u8 mask2) 392 u8 shift_factor, u8 mask1, u8 mask2)
391{ 393{
392 u8 j; 394 u8 j;
393 395
@@ -460,15 +462,15 @@ static void XGINew_SetDRAMDefaultRegister340(
460 462
461 for (j = 0; j <= 6; j++) /* CR90 - CR96 */ 463 for (j = 0; j <= 6; j++) /* CR90 - CR96 */
462 xgifb_reg_set(P3d4, (0x90 + j), 464 xgifb_reg_set(P3d4, (0x90 + j),
463 pVBInfo->CR40[14 + j][pVBInfo->ram_type]); 465 pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
464 466
465 for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */ 467 for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
466 xgifb_reg_set(P3d4, (0xC3 + j), 468 xgifb_reg_set(P3d4, (0xC3 + j),
467 pVBInfo->CR40[21 + j][pVBInfo->ram_type]); 469 pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
468 470
469 for (j = 0; j < 2; j++) /* CR8A - CR8B */ 471 for (j = 0; j < 2; j++) /* CR8A - CR8B */
470 xgifb_reg_set(P3d4, (0x8A + j), 472 xgifb_reg_set(P3d4, (0x8A + j),
471 pVBInfo->CR40[1 + j][pVBInfo->ram_type]); 473 pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
472 474
473 if (HwDeviceExtension->jChipType == XG42) 475 if (HwDeviceExtension->jChipType == XG42)
474 xgifb_reg_set(P3d4, 0x8C, 0x87); 476 xgifb_reg_set(P3d4, 0x8C, 0x87);
@@ -539,7 +541,8 @@ static unsigned short XGINew_SetDRAMSize20Reg(
539} 541}
540 542
541static int XGINew_ReadWriteRest(unsigned short StopAddr, 543static int XGINew_ReadWriteRest(unsigned short StopAddr,
542 unsigned short StartAddr, struct vb_device_info *pVBInfo) 544 unsigned short StartAddr,
545 struct vb_device_info *pVBInfo)
543{ 546{
544 int i; 547 int i;
545 unsigned long Position = 0; 548 unsigned long Position = 0;
@@ -583,7 +586,7 @@ static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
583} 586}
584 587
585static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, 588static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
586 struct vb_device_info *pVBInfo) 589 struct vb_device_info *pVBInfo)
587{ 590{
588 unsigned char data; 591 unsigned char data;
589 592
@@ -647,7 +650,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
647 pVBInfo->ram_bus = 16; /* 16 bits */ 650 pVBInfo->ram_bus = 16; /* 16 bits */
648 /* (0x31:12x8x2) 22bit + 2 rank */ 651 /* (0x31:12x8x2) 22bit + 2 rank */
649 xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); 652 xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
650 /* 0x41:16Mx16 bit*/ 653 /* 0x41:16Mx16 bit */
651 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41); 654 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
652 usleep_range(15, 1015); 655 usleep_range(15, 1015);
653 656
@@ -660,7 +663,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
660 xgifb_reg_set(pVBInfo->P3c4, 663 xgifb_reg_set(pVBInfo->P3c4,
661 0x13, 664 0x13,
662 0x31); 665 0x31);
663 /* 0x31:8Mx16 bit*/ 666 /* 0x31:8Mx16 bit */
664 xgifb_reg_set(pVBInfo->P3c4, 667 xgifb_reg_set(pVBInfo->P3c4,
665 0x14, 668 0x14,
666 0x31); 669 0x31);
@@ -678,7 +681,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
678 pVBInfo->ram_bus = 8; /* 8 bits */ 681 pVBInfo->ram_bus = 8; /* 8 bits */
679 /* (0x31:12x8x2) 22bit + 2 rank */ 682 /* (0x31:12x8x2) 22bit + 2 rank */
680 xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); 683 xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
681 /* 0x30:8Mx8 bit*/ 684 /* 0x30:8Mx8 bit */
682 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30); 685 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
683 usleep_range(15, 1015); 686 usleep_range(15, 1015);
684 687
@@ -697,7 +700,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
697 case XG27: 700 case XG27:
698 pVBInfo->ram_bus = 16; /* 16 bits */ 701 pVBInfo->ram_bus = 16; /* 16 bits */
699 pVBInfo->ram_channel = 1; /* Single channel */ 702 pVBInfo->ram_channel = 1; /* Single channel */
700 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/ 703 xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */
701 break; 704 break;
702 case XG42: 705 case XG42:
703 /* 706 /*
@@ -785,7 +788,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
785} 788}
786 789
787static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension, 790static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
788 struct vb_device_info *pVBInfo) 791 struct vb_device_info *pVBInfo)
789{ 792{
790 u8 i, size; 793 u8 i, size;
791 unsigned short memsize, start_addr; 794 unsigned short memsize, start_addr;
@@ -827,8 +830,8 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
827} 830}
828 831
829static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info, 832static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
830 struct xgi_hw_device_info *HwDeviceExtension, 833 struct xgi_hw_device_info *HwDeviceExtension,
831 struct vb_device_info *pVBInfo) 834 struct vb_device_info *pVBInfo)
832{ 835{
833 unsigned short data; 836 unsigned short data;
834 837
@@ -905,9 +908,9 @@ static bool xgifb_read_vbios(struct pci_dev *pdev)
905 goto error; 908 goto error;
906 if (j == 0xff) 909 if (j == 0xff)
907 j = 1; 910 j = 1;
908 /* 911
909 * Read the LVDS table index scratch register set by the BIOS. 912 /* Read the LVDS table index scratch register set by the BIOS. */
910 */ 913
911 entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36); 914 entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
912 if (entry >= j) 915 if (entry >= j)
913 entry = 0; 916 entry = 0;
@@ -1039,8 +1042,9 @@ static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
1039 } 1042 }
1040 1043
1041 tempcl |= SetSimuScanMode; 1044 tempcl |= SetSimuScanMode;
1042 if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV) 1045 if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) ||
1043 || (temp & ActiveCRT2))) 1046 (temp & ActiveTV) ||
1047 (temp & ActiveCRT2)))
1044 tempcl ^= (SetSimuScanMode | SwitchCRT2); 1048 tempcl ^= (SetSimuScanMode | SwitchCRT2);
1045 if ((temp & ActiveLCD) && (temp & ActiveTV)) 1049 if ((temp & ActiveLCD) && (temp & ActiveTV))
1046 tempcl ^= (SetSimuScanMode | SwitchCRT2); 1050 tempcl ^= (SetSimuScanMode | SwitchCRT2);
@@ -1085,7 +1089,7 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
1085} 1089}
1086 1090
1087static void XGINew_GetXG21Sense(struct pci_dev *pdev, 1091static void XGINew_GetXG21Sense(struct pci_dev *pdev,
1088 struct vb_device_info *pVBInfo) 1092 struct vb_device_info *pVBInfo)
1089{ 1093{
1090 struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); 1094 struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
1091 unsigned char Temp; 1095 unsigned char Temp;
@@ -1095,7 +1099,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
1095 /* LVDS on chip */ 1099 /* LVDS on chip */
1096 xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0); 1100 xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
1097 } else { 1101 } else {
1098 /* Enable GPIOA/B read */ 1102 /* Enable GPIOA/B read */
1099 xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03); 1103 xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
1100 Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0; 1104 Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
1101 if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */ 1105 if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
@@ -1119,7 +1123,7 @@ static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
1119 unsigned char Temp, bCR4A; 1123 unsigned char Temp, bCR4A;
1120 1124
1121 bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A); 1125 bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
1122 /* Enable GPIOA/B/C read */ 1126 /* Enable GPIOA/B/C read */
1123 xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07); 1127 xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
1124 Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07; 1128 Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
1125 xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A); 1129 xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d8010c5c1a70..7c7c8c8f1df3 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -55,7 +55,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
55 pVBInfo->XGINew_CR97 = 0xc1; 55 pVBInfo->XGINew_CR97 = 0xc1;
56 pVBInfo->SR18 = XG27_SR18; 56 pVBInfo->SR18 = XG27_SR18;
57 57
58 /*Z11m DDR*/ 58 /* Z11m DDR */
59 temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B); 59 temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
60 /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */ 60 /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
61 if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08)) 61 if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
@@ -73,7 +73,7 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
73 /* Get SR1,2,3,4 from file */ 73 /* Get SR1,2,3,4 from file */
74 /* SR1 is with screen off 0x20 */ 74 /* SR1 is with screen off 0x20 */
75 SRdata = XGI330_StandTable.SR[i]; 75 SRdata = XGI330_StandTable.SR[i];
76 xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */ 76 xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */
77 } 77 }
78} 78}
79 79
@@ -167,7 +167,8 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
167} 167}
168 168
169static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex, 169static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
170 unsigned short RefreshRateTableIndex, unsigned short *i, 170 unsigned short RefreshRateTableIndex,
171 unsigned short *i,
171 struct vb_device_info *pVBInfo) 172 struct vb_device_info *pVBInfo)
172{ 173{
173 unsigned short tempax, tempbx, resinfo, modeflag, infoflag; 174 unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
@@ -244,7 +245,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
244} 245}
245 246
246static void XGI_SetSync(unsigned short RefreshRateTableIndex, 247static void XGI_SetSync(unsigned short RefreshRateTableIndex,
247 struct vb_device_info *pVBInfo) 248 struct vb_device_info *pVBInfo)
248{ 249{
249 unsigned short sync, temp; 250 unsigned short sync, temp;
250 251
@@ -257,7 +258,7 @@ static void XGI_SetSync(unsigned short RefreshRateTableIndex,
257} 258}
258 259
259static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo, 260static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
260 struct xgi_hw_device_info *HwDeviceExtension) 261 struct xgi_hw_device_info *HwDeviceExtension)
261{ 262{
262 unsigned char data, data1, pushax; 263 unsigned char data, data1, pushax;
263 unsigned short i, j; 264 unsigned short i, j;
@@ -359,9 +360,9 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
359} 360}
360 361
361static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex, 362static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
362 unsigned short RefreshRateTableIndex, 363 unsigned short RefreshRateTableIndex,
363 struct vb_device_info *pVBInfo, 364 struct vb_device_info *pVBInfo,
364 struct xgi_hw_device_info *HwDeviceExtension) 365 struct xgi_hw_device_info *HwDeviceExtension)
365{ 366{
366 unsigned char index, data; 367 unsigned char index, data;
367 unsigned short i; 368 unsigned short i;
@@ -390,14 +391,14 @@ static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
390 xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F); 391 xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
391} 392}
392 393
393/* --------------------------------------------------------------------- */ 394/*
394/* Function : XGI_SetXG21CRTC */ 395 * Function : XGI_SetXG21CRTC
395/* Input : Stand or enhance CRTC table */ 396 * Input : Stand or enhance CRTC table
396/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */ 397 * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F
397/* Description : Set LCD timing */ 398 * Description : Set LCD timing
398/* --------------------------------------------------------------------- */ 399 */
399static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex, 400static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
400 struct vb_device_info *pVBInfo) 401 struct vb_device_info *pVBInfo)
401{ 402{
402 unsigned char index, Tempax, Tempbx, Tempcx, Tempdx; 403 unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
403 unsigned short Temp1, Temp2, Temp3; 404 unsigned short Temp1, Temp2, Temp3;
@@ -506,8 +507,8 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
506 507
507 /* SR0B */ 508 /* SR0B */
508 Tempax = XGI_CRT1Table[index].CR[5]; 509 Tempax = XGI_CRT1Table[index].CR[5];
509 Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/ 510 Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */
510 Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */ 511 Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */
511 512
512 Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */ 513 Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
513 Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */ 514 Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
@@ -530,7 +531,7 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
530 Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */ 531 Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */
531 Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/ 532 Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
532 Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/ 533 Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
533 Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */ 534 Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */
534 /* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */ 535 /* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
535 xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax); 536 xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
536 xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00); 537 xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
@@ -548,12 +549,12 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
548 Tempax >>= 2; /* Tempax[0]: VRS[8] */ 549 Tempax >>= 2; /* Tempax[0]: VRS[8] */
549 /* SR35[0]: VRS[8] */ 550 /* SR35[0]: VRS[8] */
550 xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax); 551 xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
551 Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */ 552 Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */
552 Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */ 553 Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */
553 /* Tempax: SR0A */ 554 /* Tempax: SR0A */
554 Tempax = XGI_CRT1Table[index].CR[14]; 555 Tempax = XGI_CRT1Table[index].CR[14];
555 Tempax &= 0x08; /* SR0A[3] VRS[10] */ 556 Tempax &= 0x08; /* SR0A[3] VRS[10] */
556 Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */ 557 Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */
557 558
558 /* Tempax: CR11 VRE */ 559 /* Tempax: CR11 VRE */
559 Tempax = XGI_CRT1Table[index].CR[11]; 560 Tempax = XGI_CRT1Table[index].CR[11];
@@ -636,12 +637,12 @@ static void xgifb_set_lcd(int chip_id,
636 xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80); 637 xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
637} 638}
638 639
639/* --------------------------------------------------------------------- */ 640/*
640/* Function : XGI_UpdateXG21CRTC */ 641 * Function : XGI_UpdateXG21CRTC
641/* Input : */ 642 * Input :
642/* Output : CRT1 CRTC */ 643 * Output : CRT1 CRTC
643/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */ 644 * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing
644/* --------------------------------------------------------------------- */ 645 */
645static void XGI_UpdateXG21CRTC(unsigned short ModeNo, 646static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
646 struct vb_device_info *pVBInfo, 647 struct vb_device_info *pVBInfo,
647 unsigned short RefreshRateTableIndex) 648 unsigned short RefreshRateTableIndex)
@@ -665,19 +666,19 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
665 666
666 if (index != -1) { 667 if (index != -1) {
667 xgifb_reg_set(pVBInfo->P3d4, 0x02, 668 xgifb_reg_set(pVBInfo->P3d4, 0x02,
668 XGI_UpdateCRT1Table[index].CR02); 669 XGI_UpdateCRT1Table[index].CR02);
669 xgifb_reg_set(pVBInfo->P3d4, 0x03, 670 xgifb_reg_set(pVBInfo->P3d4, 0x03,
670 XGI_UpdateCRT1Table[index].CR03); 671 XGI_UpdateCRT1Table[index].CR03);
671 xgifb_reg_set(pVBInfo->P3d4, 0x15, 672 xgifb_reg_set(pVBInfo->P3d4, 0x15,
672 XGI_UpdateCRT1Table[index].CR15); 673 XGI_UpdateCRT1Table[index].CR15);
673 xgifb_reg_set(pVBInfo->P3d4, 0x16, 674 xgifb_reg_set(pVBInfo->P3d4, 0x16,
674 XGI_UpdateCRT1Table[index].CR16); 675 XGI_UpdateCRT1Table[index].CR16);
675 } 676 }
676} 677}
677 678
678static void XGI_SetCRT1DE(unsigned short ModeIdIndex, 679static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
679 unsigned short RefreshRateTableIndex, 680 unsigned short RefreshRateTableIndex,
680 struct vb_device_info *pVBInfo) 681 struct vb_device_info *pVBInfo)
681{ 682{
682 unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag; 683 unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
683 684
@@ -715,7 +716,7 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
715 xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */ 716 xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
716 xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff)); 717 xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
717 xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c, 718 xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
718 (unsigned short)((tempcx & 0x0ff00) >> 10)); 719 (unsigned short)((tempcx & 0x0ff00) >> 10));
719 xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff)); 720 xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
720 tempax = 0; 721 tempax = 0;
721 tempbx >>= 8; 722 tempbx >>= 8;
@@ -796,7 +797,7 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
796 i |= temp; 797 i |= temp;
797 xgifb_reg_set(pVBInfo->P3c4, 0x0E, i); 798 xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
798 799
799 temp = (unsigned char) temp2; 800 temp = (unsigned char)temp2;
800 temp &= 0xFF; /* al */ 801 temp &= 0xFF; /* al */
801 xgifb_reg_set(pVBInfo->P3d4, 0x13, temp); 802 xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
802 803
@@ -822,15 +823,15 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
822} 823}
823 824
824static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex, 825static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
825 unsigned short RefreshRateTableIndex, 826 unsigned short RefreshRateTableIndex,
826 struct vb_device_info *pVBInfo) 827 struct vb_device_info *pVBInfo)
827{ 828{
828 unsigned short VCLKIndex, modeflag; 829 unsigned short VCLKIndex, modeflag;
829 830
830 /* si+Ext_ResInfo */ 831 /* si+Ext_ResInfo */
831 modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag; 832 modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
832 833
833 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/ 834 if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */
834 if (pVBInfo->LCDResInfo != Panel_1024x768) 835 if (pVBInfo->LCDResInfo != Panel_1024x768)
835 /* LCDXlat2VCLK */ 836 /* LCDXlat2VCLK */
836 VCLKIndex = VCLK108_2_315 + 5; 837 VCLKIndex = VCLK108_2_315 + 5;
@@ -951,8 +952,8 @@ static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
951} 952}
952 953
953static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension, 954static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
954 unsigned short RefreshRateTableIndex, 955 unsigned short RefreshRateTableIndex,
955 struct vb_device_info *pVBInfo) 956 struct vb_device_info *pVBInfo)
956{ 957{
957 unsigned short data, data2 = 0; 958 unsigned short data, data2 = 0;
958 short VCLK; 959 short VCLK;
@@ -989,9 +990,9 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
989} 990}
990 991
991static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension, 992static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
992 unsigned short ModeIdIndex, 993 unsigned short ModeIdIndex,
993 unsigned short RefreshRateTableIndex, 994 unsigned short RefreshRateTableIndex,
994 struct vb_device_info *pVBInfo) 995 struct vb_device_info *pVBInfo)
995{ 996{
996 unsigned short data, data2, data3, infoflag = 0, modeflag, resindex, 997 unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
997 xres; 998 xres;
@@ -1087,9 +1088,9 @@ static void XGI_WriteDAC(unsigned short dl,
1087 else 1088 else
1088 swap(bl, bh); 1089 swap(bl, bh);
1089 } 1090 }
1090 outb((unsigned short) dh, pVBInfo->P3c9); 1091 outb((unsigned short)dh, pVBInfo->P3c9);
1091 outb((unsigned short) bh, pVBInfo->P3c9); 1092 outb((unsigned short)bh, pVBInfo->P3c9);
1092 outb((unsigned short) bl, pVBInfo->P3c9); 1093 outb((unsigned short)bl, pVBInfo->P3c9);
1093} 1094}
1094 1095
1095static void XGI_LoadDAC(struct vb_device_info *pVBInfo) 1096static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
@@ -1187,8 +1188,8 @@ static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
1187} 1188}
1188 1189
1189static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table, 1190static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
1190 unsigned short ModeIdIndex, 1191 unsigned short ModeIdIndex,
1191 struct vb_device_info *pVBInfo) 1192 struct vb_device_info *pVBInfo)
1192{ 1193{
1193 unsigned short i, tempdx, tempbx, modeflag; 1194 unsigned short i, tempdx, tempbx, modeflag;
1194 1195
@@ -1201,12 +1202,12 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
1201 while (table[i].PANELID != 0xff) { 1202 while (table[i].PANELID != 0xff) {
1202 tempdx = pVBInfo->LCDResInfo; 1203 tempdx = pVBInfo->LCDResInfo;
1203 if (tempbx & 0x0080) { /* OEMUtil */ 1204 if (tempbx & 0x0080) { /* OEMUtil */
1204 tempbx &= (~0x0080); 1205 tempbx &= ~0x0080;
1205 tempdx = pVBInfo->LCDTypeInfo; 1206 tempdx = pVBInfo->LCDTypeInfo;
1206 } 1207 }
1207 1208
1208 if (pVBInfo->LCDInfo & EnableScalingLCD) 1209 if (pVBInfo->LCDInfo & EnableScalingLCD)
1209 tempdx &= (~PanelResInfo); 1210 tempdx &= ~PanelResInfo;
1210 1211
1211 if (table[i].PANELID == tempdx) { 1212 if (table[i].PANELID == tempdx) {
1212 tempbx = table[i].MASK; 1213 tempbx = table[i].MASK;
@@ -1226,8 +1227,8 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
1226} 1227}
1227 1228
1228static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex, 1229static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
1229 unsigned short RefreshRateTableIndex, 1230 unsigned short RefreshRateTableIndex,
1230 struct vb_device_info *pVBInfo) 1231 struct vb_device_info *pVBInfo)
1231{ 1232{
1232 unsigned short i, tempdx, tempal, modeflag; 1233 unsigned short i, tempdx, tempal, modeflag;
1233 1234
@@ -1441,9 +1442,9 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1441 tempbx >>= 3; 1442 tempbx >>= 3;
1442 1443
1443 xgifb_reg_set(pVBInfo->Part1Port, 0x16, 1444 xgifb_reg_set(pVBInfo->Part1Port, 0x16,
1444 (unsigned short) (tempbx & 0xff)); 1445 (unsigned short)(tempbx & 0xff));
1445 xgifb_reg_set(pVBInfo->Part1Port, 0x17, 1446 xgifb_reg_set(pVBInfo->Part1Port, 0x17,
1446 (unsigned short) (tempcx & 0xff)); 1447 (unsigned short)(tempcx & 0xff));
1447 1448
1448 tempax = pVBInfo->HT; 1449 tempax = pVBInfo->HT;
1449 1450
@@ -1469,7 +1470,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1469 1470
1470 xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax); 1471 xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
1471 xgifb_reg_set(pVBInfo->Part1Port, 0x14, 1472 xgifb_reg_set(pVBInfo->Part1Port, 0x14,
1472 (unsigned short) (tempbx & 0xff)); 1473 (unsigned short)(tempbx & 0xff));
1473 1474
1474 tempax = pVBInfo->VT; 1475 tempax = pVBInfo->VT;
1475 tempbx = LCDPtr1->LCDVDES; 1476 tempbx = LCDPtr1->LCDVDES;
@@ -1480,17 +1481,14 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1480 if (tempcx >= tempax) 1481 if (tempcx >= tempax)
1481 tempcx -= tempax; 1482 tempcx -= tempax;
1482 1483
1483 xgifb_reg_set(pVBInfo->Part1Port, 0x1b, 1484 xgifb_reg_set(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff));
1484 (unsigned short) (tempbx & 0xff)); 1485 xgifb_reg_set(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff));
1485 xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
1486 (unsigned short) (tempcx & 0xff));
1487 1486
1488 tempbx = (tempbx >> 8) & 0x07; 1487 tempbx = (tempbx >> 8) & 0x07;
1489 tempcx = (tempcx >> 8) & 0x07; 1488 tempcx = (tempcx >> 8) & 0x07;
1490 1489
1491 xgifb_reg_set(pVBInfo->Part1Port, 0x1d, 1490 xgifb_reg_set(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) |
1492 (unsigned short) ((tempcx << 3) 1491 tempbx));
1493 | tempbx));
1494 1492
1495 tempax = pVBInfo->VT; 1493 tempax = pVBInfo->VT;
1496 tempbx = LCDPtr1->LCDVRS; 1494 tempbx = LCDPtr1->LCDVRS;
@@ -1504,10 +1502,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1504 if (tempcx >= tempax) 1502 if (tempcx >= tempax)
1505 tempcx -= tempax; 1503 tempcx -= tempax;
1506 1504
1507 xgifb_reg_set(pVBInfo->Part1Port, 0x18, 1505 xgifb_reg_set(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff));
1508 (unsigned short) (tempbx & 0xff)); 1506 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
1509 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
1510 (unsigned short) (tempcx & 0x0f));
1511 1507
1512 tempax = ((tempbx >> 8) & 0x07) << 3; 1508 tempax = ((tempbx >> 8) & 0x07) << 3;
1513 1509
@@ -1518,8 +1514,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1518 if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA) 1514 if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
1519 tempax |= 0x40; 1515 tempax |= 0x40;
1520 1516
1521 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, 1517 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax);
1522 tempax);
1523 1518
1524 tempbx = pVBInfo->VDE; 1519 tempbx = pVBInfo->VDE;
1525 tempax = pVBInfo->VGAVDE; 1520 tempax = pVBInfo->VGAVDE;
@@ -1527,7 +1522,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1527 temp = tempax; /* 0430 ylshieh */ 1522 temp = tempax; /* 0430 ylshieh */
1528 temp1 = (temp << 18) / tempbx; 1523 temp1 = (temp << 18) / tempbx;
1529 1524
1530 tempdx = (unsigned short) ((temp << 18) % tempbx); 1525 tempdx = (unsigned short)((temp << 18) % tempbx);
1531 1526
1532 if (tempdx != 0) 1527 if (tempdx != 0)
1533 temp1 += 1; 1528 temp1 += 1;
@@ -1535,12 +1530,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1535 temp2 = temp1; 1530 temp2 = temp1;
1536 push3 = temp2; 1531 push3 = temp2;
1537 1532
1538 xgifb_reg_set(pVBInfo->Part1Port, 0x37, 1533 xgifb_reg_set(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff));
1539 (unsigned short) (temp2 & 0xff)); 1534 xgifb_reg_set(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff));
1540 xgifb_reg_set(pVBInfo->Part1Port, 0x36,
1541 (unsigned short) ((temp2 >> 8) & 0xff));
1542 1535
1543 tempbx = (unsigned short) (temp2 >> 16); 1536 tempbx = (unsigned short)(temp2 >> 16);
1544 tempax = tempbx & 0x03; 1537 tempax = tempbx & 0x03;
1545 1538
1546 tempbx = pVBInfo->VGAVDE; 1539 tempbx = pVBInfo->VGAVDE;
@@ -1553,24 +1546,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1553 temp2 = push3; 1546 temp2 = push3;
1554 xgifb_reg_set(pVBInfo->Part4Port, 1547 xgifb_reg_set(pVBInfo->Part4Port,
1555 0x3c, 1548 0x3c,
1556 (unsigned short) (temp2 & 0xff)); 1549 (unsigned short)(temp2 & 0xff));
1557 xgifb_reg_set(pVBInfo->Part4Port, 1550 xgifb_reg_set(pVBInfo->Part4Port,
1558 0x3b, 1551 0x3b,
1559 (unsigned short) ((temp2 >> 8) & 1552 (unsigned short)((temp2 >> 8) &
1560 0xff)); 1553 0xff));
1561 tempbx = (unsigned short) (temp2 >> 16); 1554 tempbx = (unsigned short)(temp2 >> 16);
1562 xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, 1555 xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0,
1563 ~0xc0, 1556 (unsigned short)((tempbx & 0xff) << 6));
1564 (unsigned short) ((tempbx &
1565 0xff) << 6));
1566 1557
1567 tempcx = pVBInfo->VGAVDE; 1558 tempcx = pVBInfo->VGAVDE;
1568 if (tempcx == pVBInfo->VDE) 1559 if (tempcx == pVBInfo->VDE)
1569 xgifb_reg_and_or(pVBInfo->Part4Port, 1560 xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00);
1570 0x30, ~0x0c, 0x00);
1571 else 1561 else
1572 xgifb_reg_and_or(pVBInfo->Part4Port, 1562 xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08);
1573 0x30, ~0x0c, 0x08);
1574 } 1563 }
1575 1564
1576 tempcx = pVBInfo->VGAHDE; 1565 tempcx = pVBInfo->VGAHDE;
@@ -1578,7 +1567,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1578 1567
1579 temp1 = tempcx << 16; 1568 temp1 = tempcx << 16;
1580 1569
1581 tempax = (unsigned short) (temp1 / tempbx); 1570 tempax = (unsigned short)(temp1 / tempbx);
1582 1571
1583 if ((tempbx & 0xffff) == (tempcx & 0xffff)) 1572 if ((tempbx & 0xffff) == (tempcx & 0xffff))
1584 tempax = 65535; 1573 tempax = 65535;
@@ -1592,42 +1581,38 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
1592 1581
1593 temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff); 1582 temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
1594 1583
1595 tempax = (unsigned short) (temp3 & 0xff); 1584 tempax = (unsigned short)(temp3 & 0xff);
1596 xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax); 1585 xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
1597 1586
1598 temp1 = pVBInfo->VGAVDE << 18; 1587 temp1 = pVBInfo->VGAVDE << 18;
1599 temp1 = temp1 / push3; 1588 temp1 = temp1 / push3;
1600 tempbx = (unsigned short) (temp1 & 0xffff); 1589 tempbx = (unsigned short)(temp1 & 0xffff);
1601 1590
1602 if (pVBInfo->LCDResInfo == Panel_1024x768) 1591 if (pVBInfo->LCDResInfo == Panel_1024x768)
1603 tempbx -= 1; 1592 tempbx -= 1;
1604 1593
1605 tempax = ((tempbx >> 8) & 0xff) << 3; 1594 tempax = ((tempbx >> 8) & 0xff) << 3;
1606 tempax |= (unsigned short) ((temp3 >> 8) & 0x07); 1595 tempax |= (unsigned short)((temp3 >> 8) & 0x07);
1607 xgifb_reg_set(pVBInfo->Part1Port, 0x20, 1596 xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
1608 (unsigned short) (tempax & 0xff)); 1597 xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
1609 xgifb_reg_set(pVBInfo->Part1Port, 0x21,
1610 (unsigned short) (tempbx & 0xff));
1611 1598
1612 temp3 >>= 16; 1599 temp3 >>= 16;
1613 1600
1614 if (modeflag & HalfDCLK) 1601 if (modeflag & HalfDCLK)
1615 temp3 >>= 1; 1602 temp3 >>= 1;
1616 1603
1617 xgifb_reg_set(pVBInfo->Part1Port, 0x22, 1604 xgifb_reg_set(pVBInfo->Part1Port, 0x22, (unsigned short)((temp3 >> 8) & 0xff));
1618 (unsigned short) ((temp3 >> 8) & 0xff)); 1605 xgifb_reg_set(pVBInfo->Part1Port, 0x23, (unsigned short)(temp3 & 0xff));
1619 xgifb_reg_set(pVBInfo->Part1Port, 0x23,
1620 (unsigned short) (temp3 & 0xff));
1621} 1606}
1622 1607
1623/* --------------------------------------------------------------------- */ 1608/*
1624/* Function : XGI_GETLCDVCLKPtr */ 1609 * Function : XGI_GETLCDVCLKPtr
1625/* Input : */ 1610 * Input :
1626/* Output : al -> VCLK Index */ 1611 * Output : al -> VCLK Index
1627/* Description : */ 1612 * Description :
1628/* --------------------------------------------------------------------- */ 1613 */
1629static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1, 1614static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
1630 struct vb_device_info *pVBInfo) 1615 struct vb_device_info *pVBInfo)
1631{ 1616{
1632 unsigned short index; 1617 unsigned short index;
1633 1618
@@ -1645,7 +1630,8 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
1645} 1630}
1646 1631
1647static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex, 1632static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
1648 unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) 1633 unsigned short ModeIdIndex,
1634 struct vb_device_info *pVBInfo)
1649{ 1635{
1650 unsigned short index, modeflag; 1636 unsigned short index, modeflag;
1651 unsigned char tempal; 1637 unsigned char tempal;
@@ -1681,15 +1667,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
1681 return tempal; 1667 return tempal;
1682 } 1668 }
1683 1669
1684 if (pVBInfo->TVInfo & TVSetYPbPr750p) { 1670 if (pVBInfo->TVInfo & TVSetYPbPr750p)
1685 tempal = XGI_YPbPr750pVCLK; 1671 return XGI_YPbPr750pVCLK;
1686 return tempal;
1687 }
1688 1672
1689 if (pVBInfo->TVInfo & TVSetYPbPr525p) { 1673 if (pVBInfo->TVInfo & TVSetYPbPr525p)
1690 tempal = YPbPr525pVCLK; 1674 return YPbPr525pVCLK;
1691 return tempal;
1692 }
1693 1675
1694 tempal = NTSC1024VCLK; 1676 tempal = NTSC1024VCLK;
1695 1677
@@ -1705,12 +1687,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
1705 } /* {End of VB} */ 1687 } /* {End of VB} */
1706 1688
1707 inb((pVBInfo->P3ca + 0x02)); 1689 inb((pVBInfo->P3ca + 0x02));
1708 tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; 1690 return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
1709 return tempal;
1710} 1691}
1711 1692
1712static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0, 1693static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
1713 unsigned char *di_1, struct vb_device_info *pVBInfo) 1694 unsigned char *di_1, struct vb_device_info *pVBInfo)
1714{ 1695{
1715 if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B 1696 if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
1716 | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) { 1697 | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
@@ -1726,8 +1707,8 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
1726} 1707}
1727 1708
1728static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex, 1709static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
1729 unsigned short RefreshRateTableIndex, 1710 unsigned short RefreshRateTableIndex,
1730 struct vb_device_info *pVBInfo) 1711 struct vb_device_info *pVBInfo)
1731{ 1712{
1732 unsigned char di_0, di_1, tempal; 1713 unsigned char di_0, di_1, tempal;
1733 int i; 1714 int i;
@@ -1738,7 +1719,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
1738 1719
1739 for (i = 0; i < 4; i++) { 1720 for (i = 0; i < 4; i++) {
1740 xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30, 1721 xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
1741 (unsigned short) (0x10 * i)); 1722 (unsigned short)(0x10 * i));
1742 if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) && 1723 if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
1743 !(pVBInfo->VBInfo & SetInSlaveMode)) { 1724 !(pVBInfo->VBInfo & SetInSlaveMode)) {
1744 xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0); 1725 xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
@@ -1876,8 +1857,7 @@ finish:
1876 pVBInfo->VBType = tempbx; 1857 pVBInfo->VBType = tempbx;
1877} 1858}
1878 1859
1879static void XGI_GetVBInfo(unsigned short ModeIdIndex, 1860static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
1880 struct vb_device_info *pVBInfo)
1881{ 1861{
1882 unsigned short tempax, push, tempbx, temp, modeflag; 1862 unsigned short tempax, push, tempbx, temp, modeflag;
1883 1863
@@ -1921,7 +1901,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
1921 tempbx |= SetCRT2ToHiVision; 1901 tempbx |= SetCRT2ToHiVision;
1922 1902
1923 if (temp != YPbPrMode1080i) { 1903 if (temp != YPbPrMode1080i) {
1924 tempbx &= (~SetCRT2ToHiVision); 1904 tempbx &= ~SetCRT2ToHiVision;
1925 tempbx |= SetCRT2ToYPbPr525750; 1905 tempbx |= SetCRT2ToYPbPr525750;
1926 } 1906 }
1927 } 1907 }
@@ -2002,8 +1982,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
2002 pVBInfo->VBInfo = tempbx; 1982 pVBInfo->VBInfo = tempbx;
2003} 1983}
2004 1984
2005static void XGI_GetTVInfo(unsigned short ModeIdIndex, 1985static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
2006 struct vb_device_info *pVBInfo)
2007{ 1986{
2008 unsigned short tempbx = 0, resinfo = 0, modeflag, index1; 1987 unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
2009 1988
@@ -2078,7 +2057,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
2078 pVBInfo->LCDTypeInfo = 0; 2057 pVBInfo->LCDTypeInfo = 0;
2079 pVBInfo->LCDInfo = 0; 2058 pVBInfo->LCDInfo = 0;
2080 2059
2081 /* si+Ext_ResInfo // */ 2060 /* si+Ext_ResInfo */
2082 resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO; 2061 resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
2083 temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */ 2062 temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
2084 tempbx = temp & 0x0F; 2063 tempbx = temp & 0x0F;
@@ -2175,12 +2154,12 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
2175 return ujRet; 2154 return ujRet;
2176} 2155}
2177 2156
2178/*----------------------------------------------------------------------------*/ 2157/*
2179/* output */ 2158 * output
2180/* bl[5] : LVDS signal */ 2159 * bl[5] : LVDS signal
2181/* bl[1] : LVDS backlight */ 2160 * bl[1] : LVDS backlight
2182/* bl[0] : LVDS VDD */ 2161 * bl[0] : LVDS VDD
2183/*----------------------------------------------------------------------------*/ 2162 */
2184static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo) 2163static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
2185{ 2164{
2186 unsigned char CR4A, temp; 2165 unsigned char CR4A, temp;
@@ -2196,12 +2175,12 @@ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
2196 return temp; 2175 return temp;
2197} 2176}
2198 2177
2199/*----------------------------------------------------------------------------*/ 2178/*
2200/* output */ 2179 * output
2201/* bl[5] : LVDS signal */ 2180 * bl[5] : LVDS signal
2202/* bl[1] : LVDS backlight */ 2181 * bl[1] : LVDS backlight
2203/* bl[0] : LVDS VDD */ 2182 * bl[0] : LVDS VDD
2204/*----------------------------------------------------------------------------*/ 2183 */
2205static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo) 2184static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
2206{ 2185{
2207 unsigned char CR4A, CRB4, temp; 2186 unsigned char CR4A, CRB4, temp;
@@ -2219,17 +2198,17 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
2219 return temp; 2198 return temp;
2220} 2199}
2221 2200
2222/*----------------------------------------------------------------------------*/ 2201/*
2223/* input */ 2202 * input
2224/* bl[5] : 1;LVDS signal on */ 2203 * bl[5] : 1;LVDS signal on
2225/* bl[1] : 1;LVDS backlight on */ 2204 * bl[1] : 1;LVDS backlight on
2226/* bl[0] : 1:LVDS VDD on */ 2205 * bl[0] : 1:LVDS VDD on
2227/* bh: 100000b : clear bit 5, to set bit5 */ 2206 * bh: 100000b : clear bit 5, to set bit5
2228/* 000010b : clear bit 1, to set bit1 */ 2207 * 000010b : clear bit 1, to set bit1
2229/* 000001b : clear bit 0, to set bit0 */ 2208 * 000001b : clear bit 0, to set bit0
2230/*----------------------------------------------------------------------------*/ 2209 */
2231static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, 2210static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
2232 struct vb_device_info *pVBInfo) 2211 struct vb_device_info *pVBInfo)
2233{ 2212{
2234 unsigned char CR4A, temp; 2213 unsigned char CR4A, temp;
2235 2214
@@ -2254,7 +2233,7 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
2254} 2233}
2255 2234
2256static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, 2235static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
2257 struct vb_device_info *pVBInfo) 2236 struct vb_device_info *pVBInfo)
2258{ 2237{
2259 unsigned char CR4A, temp; 2238 unsigned char CR4A, temp;
2260 unsigned short tempbh0, tempbl0; 2239 unsigned short tempbh0, tempbl0;
@@ -2284,8 +2263,8 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
2284} 2263}
2285 2264
2286static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info, 2265static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
2287 struct xgi_hw_device_info *pXGIHWDE, 2266 struct xgi_hw_device_info *pXGIHWDE,
2288 struct vb_device_info *pVBInfo) 2267 struct vb_device_info *pVBInfo)
2289{ 2268{
2290 xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00); 2269 xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
2291 if (pXGIHWDE->jChipType == XG21) { 2270 if (pXGIHWDE->jChipType == XG21) {
@@ -2328,8 +2307,8 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
2328} 2307}
2329 2308
2330void XGI_DisplayOff(struct xgifb_video_info *xgifb_info, 2309void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
2331 struct xgi_hw_device_info *pXGIHWDE, 2310 struct xgi_hw_device_info *pXGIHWDE,
2332 struct vb_device_info *pVBInfo) 2311 struct vb_device_info *pVBInfo)
2333{ 2312{
2334 if (pXGIHWDE->jChipType == XG21) { 2313 if (pXGIHWDE->jChipType == XG21) {
2335 if (pVBInfo->IF_DEF_LVDS == 1) { 2314 if (pVBInfo->IF_DEF_LVDS == 1) {
@@ -2448,7 +2427,7 @@ exit:
2448static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo) 2427static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
2449{ 2428{
2450 if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) && 2429 if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
2451 (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */ 2430 (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
2452 return 1; 2431 return 1;
2453 2432
2454 return 0; 2433 return 0;
@@ -2466,16 +2445,15 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
2466 modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag; 2445 modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
2467 CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC; 2446 CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
2468 CRT1Index &= IndexMask; 2447 CRT1Index &= IndexMask;
2469 temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[0]; 2448 temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0];
2470 temp2 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[5]; 2449 temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5];
2471 tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8); 2450 tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
2472 tempbx = (unsigned short) XGI_CRT1Table[CRT1Index].CR[8]; 2451 tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8];
2473 tempcx = (unsigned short) 2452 tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8;
2474 XGI_CRT1Table[CRT1Index].CR[14] << 8;
2475 tempcx &= 0x0100; 2453 tempcx &= 0x0100;
2476 tempcx <<= 2; 2454 tempcx <<= 2;
2477 tempbx |= tempcx; 2455 tempbx |= tempcx;
2478 temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[9]; 2456 temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9];
2479 2457
2480 if (temp1 & 0x01) 2458 if (temp1 & 0x01)
2481 tempbx |= 0x0100; 2459 tempbx |= 0x0100;
@@ -2497,8 +2475,8 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
2497} 2475}
2498 2476
2499static void XGI_GetCRT2Data(unsigned short ModeIdIndex, 2477static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
2500 unsigned short RefreshRateTableIndex, 2478 unsigned short RefreshRateTableIndex,
2501 struct vb_device_info *pVBInfo) 2479 struct vb_device_info *pVBInfo)
2502{ 2480{
2503 unsigned short tempax = 0, tempbx = 0, modeflag, resinfo; 2481 unsigned short tempax = 0, tempbx = 0, modeflag, resinfo;
2504 2482
@@ -2667,8 +2645,8 @@ static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
2667} 2645}
2668 2646
2669static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex, 2647static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
2670 unsigned short RefreshRateTableIndex, 2648 unsigned short RefreshRateTableIndex,
2671 struct vb_device_info *pVBInfo) 2649 struct vb_device_info *pVBInfo)
2672{ 2650{
2673 unsigned char di_0, di_1, tempal; 2651 unsigned char di_0, di_1, tempal;
2674 2652
@@ -2739,9 +2717,9 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
2739} 2717}
2740 2718
2741static void XGI_SetCRT2Offset(unsigned short ModeNo, 2719static void XGI_SetCRT2Offset(unsigned short ModeNo,
2742 unsigned short ModeIdIndex, 2720 unsigned short ModeIdIndex,
2743 unsigned short RefreshRateTableIndex, 2721 unsigned short RefreshRateTableIndex,
2744 struct vb_device_info *pVBInfo) 2722 struct vb_device_info *pVBInfo)
2745{ 2723{
2746 unsigned short offset; 2724 unsigned short offset;
2747 unsigned char temp; 2725 unsigned char temp;
@@ -2750,11 +2728,11 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
2750 return; 2728 return;
2751 2729
2752 offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex); 2730 offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
2753 temp = (unsigned char) (offset & 0xFF); 2731 temp = (unsigned char)(offset & 0xFF);
2754 xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp); 2732 xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
2755 temp = (unsigned char) ((offset & 0xFF00) >> 8); 2733 temp = (unsigned char)((offset & 0xFF00) >> 8);
2756 xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp); 2734 xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
2757 temp = (unsigned char) (((offset >> 3) & 0xFF) + 1); 2735 temp = (unsigned char)(((offset >> 3) & 0xFF) + 1);
2758 xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp); 2736 xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
2759} 2737}
2760 2738
@@ -2767,8 +2745,8 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
2767} 2745}
2768 2746
2769static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex, 2747static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
2770 unsigned short RefreshRateTableIndex, 2748 unsigned short RefreshRateTableIndex,
2771 struct vb_device_info *pVBInfo) 2749 struct vb_device_info *pVBInfo)
2772{ 2750{
2773 u8 tempcx; 2751 u8 tempcx;
2774 2752
@@ -2783,8 +2761,8 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
2783} 2761}
2784 2762
2785static void XGI_SetGroup1(unsigned short ModeIdIndex, 2763static void XGI_SetGroup1(unsigned short ModeIdIndex,
2786 unsigned short RefreshRateTableIndex, 2764 unsigned short RefreshRateTableIndex,
2787 struct vb_device_info *pVBInfo) 2765 struct vb_device_info *pVBInfo)
2788{ 2766{
2789 unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0, 2767 unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
2790 pushbx = 0, CRT1Index, modeflag; 2768 pushbx = 0, CRT1Index, modeflag;
@@ -2933,11 +2911,11 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
2933 tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT; 2911 tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
2934 tempax = (tempax * pVBInfo->HT) / tempbx; 2912 tempax = (tempax * pVBInfo->HT) / tempbx;
2935 2913
2936 return (unsigned short) tempax; 2914 return (unsigned short)tempax;
2937} 2915}
2938 2916
2939static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, 2917static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
2940 struct vb_device_info *pVBInfo) 2918 struct vb_device_info *pVBInfo)
2941{ 2919{
2942 unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo, 2920 unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
2943 modeflag; 2921 modeflag;
@@ -3044,14 +3022,14 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
3044 if (ModeNo == 0x50) { 3022 if (ModeNo == 0x50) {
3045 if (pVBInfo->TVInfo == SetNTSCTV) { 3023 if (pVBInfo->TVInfo == SetNTSCTV) {
3046 xgifb_reg_set(pVBInfo->Part1Port, 3024 xgifb_reg_set(pVBInfo->Part1Port,
3047 0x07, 0x30); 3025 0x07, 0x30);
3048 xgifb_reg_set(pVBInfo->Part1Port, 3026 xgifb_reg_set(pVBInfo->Part1Port,
3049 0x08, 0x03); 3027 0x08, 0x03);
3050 } else { 3028 } else {
3051 xgifb_reg_set(pVBInfo->Part1Port, 3029 xgifb_reg_set(pVBInfo->Part1Port,
3052 0x07, 0x2f); 3030 0x07, 0x2f);
3053 xgifb_reg_set(pVBInfo->Part1Port, 3031 xgifb_reg_set(pVBInfo->Part1Port,
3054 0x08, 0x02); 3032 0x08, 0x02);
3055 } 3033 }
3056 } 3034 }
3057 } 3035 }
@@ -3064,7 +3042,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
3064 tempbx = pVBInfo->VGAVT; 3042 tempbx = pVBInfo->VGAVT;
3065 push1 = tempbx; 3043 push1 = tempbx;
3066 tempcx = 0x121; 3044 tempcx = 0x121;
3067 tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */ 3045 tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */
3068 3046
3069 if (tempbx == 357) 3047 if (tempbx == 357)
3070 tempbx = 350; 3048 tempbx = 350;
@@ -3116,7 +3094,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
3116 if (tempbx & 0x0400) 3094 if (tempbx & 0x0400)
3117 tempcx |= 0x0600; 3095 tempcx |= 0x0600;
3118 3096
3119 /* 0x11 Vertival Blank End */ 3097 /* 0x11 Vertical Blank End */
3120 xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00); 3098 xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
3121 3099
3122 tempax = push1; 3100 tempax = push1;
@@ -3227,7 +3205,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
3227} 3205}
3228 3206
3229static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, 3207static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3230 struct vb_device_info *pVBInfo) 3208 struct vb_device_info *pVBInfo)
3231{ 3209{
3232 unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2, 3210 unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
3233 modeflag; 3211 modeflag;
@@ -3315,7 +3293,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3315 tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8); 3293 tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
3316 push1 = tempax; 3294 push1 = tempax;
3317 temp = (tempax & 0xFF00) >> 8; 3295 temp = (tempax & 0xFF00) >> 8;
3318 temp += (unsigned short) TimingPoint[0]; 3296 temp += (unsigned short)TimingPoint[0];
3319 3297
3320 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV 3298 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
3321 | VB_SIS302LV | VB_XGI301C)) { 3299 | VB_SIS302LV | VB_XGI301C)) {
@@ -3526,7 +3504,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3526 3504
3527 tempcx = 0x0101; 3505 tempcx = 0x0101;
3528 3506
3529 if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/ 3507 if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */
3530 if (pVBInfo->VGAHDE >= 1024) { 3508 if (pVBInfo->VGAHDE >= 1024) {
3531 tempcx = 0x1920; 3509 tempcx = 0x1920;
3532 if (pVBInfo->VGAHDE >= 1280) { 3510 if (pVBInfo->VGAHDE >= 1280) {
@@ -3562,7 +3540,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3562 if (temp2 != 0) 3540 if (temp2 != 0)
3563 tempeax += 1; 3541 tempeax += 1;
3564 3542
3565 tempax = (unsigned short) tempeax; 3543 tempax = (unsigned short)tempeax;
3566 3544
3567 /* 301b */ 3545 /* 301b */
3568 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV 3546 if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
@@ -3572,9 +3550,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3572 /* end 301b */ 3550 /* end 301b */
3573 3551
3574 tempbx = push1; 3552 tempbx = push1;
3575 tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00) 3553 tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00)
3576 | (tempbx & 0x00FF)); 3554 | (tempbx & 0x00FF));
3577 tempax = (unsigned short) (((tempeax & 0x000000FF) << 8) 3555 tempax = (unsigned short)(((tempeax & 0x000000FF) << 8)
3578 | (tempax & 0x00FF)); 3556 | (tempax & 0x00FF));
3579 temp = (tempax & 0xFF00) >> 8; 3557 temp = (tempax & 0xFF00) >> 8;
3580 } else { 3558 } else {
@@ -3622,14 +3600,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3622 3600
3623 xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp); 3601 xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
3624 temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */ 3602 temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
3625 xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3)); 3603 xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3));
3626 3604
3627 if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) { 3605 if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
3628 if (pVBInfo->TVInfo & NTSC1024x768) { 3606 if (pVBInfo->TVInfo & NTSC1024x768) {
3629 TimingPoint = XGI_NTSC1024AdjTime; 3607 TimingPoint = XGI_NTSC1024AdjTime;
3630 for (i = 0x1c, j = 0; i <= 0x30; i++, j++) { 3608 for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
3631 xgifb_reg_set(pVBInfo->Part2Port, i, 3609 xgifb_reg_set(pVBInfo->Part2Port, i,
3632 TimingPoint[j]); 3610 TimingPoint[j]);
3633 } 3611 }
3634 xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72); 3612 xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
3635 } 3613 }
@@ -3639,7 +3617,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3639 if (pVBInfo->VBType & VB_XGI301C) { 3617 if (pVBInfo->VBType & VB_XGI301C) {
3640 if (pVBInfo->TVInfo & TVSetPALM) 3618 if (pVBInfo->TVInfo & TVSetPALM)
3641 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08, 3619 xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
3642 0x08); /* PALM Mode */ 3620 0x08); /* PALM Mode */
3643 } 3621 }
3644 3622
3645 if (pVBInfo->TVInfo & TVSetPALM) { 3623 if (pVBInfo->TVInfo & TVSetPALM) {
@@ -3656,8 +3634,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
3656 } 3634 }
3657} 3635}
3658 3636
3659static void XGI_SetLCDRegs(unsigned short ModeIdIndex, 3637static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
3660 struct vb_device_info *pVBInfo)
3661{ 3638{
3662 unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah, 3639 unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
3663 tempbh, tempch; 3640 tempbh, tempch;
@@ -3853,12 +3830,12 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
3853 } 3830 }
3854} 3831}
3855 3832
3856/* --------------------------------------------------------------------- */ 3833/*
3857/* Function : XGI_GetTap4Ptr */ 3834 * Function : XGI_GetTap4Ptr
3858/* Input : */ 3835 * Input :
3859/* Output : di -> Tap4 Reg. Setting Pointer */ 3836 * Output : di -> Tap4 Reg. Setting Pointer
3860/* Description : */ 3837 * Description :
3861/* --------------------------------------------------------------------- */ 3838 */
3862static struct XGI301C_Tap4TimingStruct const 3839static struct XGI301C_Tap4TimingStruct const
3863*XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo) 3840*XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo)
3864{ 3841{
@@ -3882,7 +3859,7 @@ static struct XGI301C_Tap4TimingStruct const
3882 3859
3883 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) { 3860 if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
3884 if ((pVBInfo->TVInfo & TVSetYPbPr525i) || 3861 if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
3885 (pVBInfo->TVInfo & TVSetYPbPr525p)) 3862 (pVBInfo->TVInfo & TVSetYPbPr525p))
3886 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; 3863 Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
3887 if (pVBInfo->TVInfo & TVSetYPbPr750p) 3864 if (pVBInfo->TVInfo & TVSetYPbPr750p)
3888 Tap4TimingPtr = YPbPr750pTap4Timing; 3865 Tap4TimingPtr = YPbPr750pTap4Timing;
@@ -3988,8 +3965,8 @@ static void XGI_SetGroup3(unsigned short ModeIdIndex,
3988} 3965}
3989 3966
3990static void XGI_SetGroup4(unsigned short ModeIdIndex, 3967static void XGI_SetGroup4(unsigned short ModeIdIndex,
3991 unsigned short RefreshRateTableIndex, 3968 unsigned short RefreshRateTableIndex,
3992 struct vb_device_info *pVBInfo) 3969 struct vb_device_info *pVBInfo)
3993{ 3970{
3994 unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2; 3971 unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
3995 3972
@@ -4080,12 +4057,12 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
4080 if (templong != 0) 4057 if (templong != 0)
4081 tempebx++; 4058 tempebx++;
4082 4059
4083 temp = (unsigned short) (tempebx & 0x000000FF); 4060 temp = (unsigned short)(tempebx & 0x000000FF);
4084 xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp); 4061 xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
4085 4062
4086 temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8); 4063 temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
4087 xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp); 4064 xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
4088 tempbx = (unsigned short) (tempebx >> 16); 4065 tempbx = (unsigned short)(tempebx >> 16);
4089 temp = tempbx & 0x00FF; 4066 temp = tempbx & 0x00FF;
4090 temp <<= 4; 4067 temp <<= 4;
4091 temp |= ((tempcx & 0xFF00) >> 8); 4068 temp |= ((tempcx & 0xFF00) >> 8);
@@ -4132,8 +4109,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
4132 | TVSetHiVision))) { 4109 | TVSetHiVision))) {
4133 temp |= 0x0001; 4110 temp |= 0x0001;
4134 if ((pVBInfo->VBInfo & SetInSlaveMode) && 4111 if ((pVBInfo->VBInfo & SetInSlaveMode) &&
4135 !(pVBInfo->TVInfo 4112 !(pVBInfo->TVInfo & TVSimuMode))
4136 & TVSimuMode))
4137 temp &= (~0x0001); 4113 temp &= (~0x0001);
4138 } 4114 }
4139 } 4115 }
@@ -4174,7 +4150,8 @@ static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
4174} 4150}
4175 4151
4176static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info, 4152static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
4177 unsigned short ModeNo, unsigned short ModeIdIndex) 4153 unsigned short ModeNo,
4154 unsigned short ModeIdIndex)
4178{ 4155{
4179 unsigned short xres, yres, colordepth, modeflag, resindex; 4156 unsigned short xres, yres, colordepth, modeflag, resindex;
4180 4157
@@ -4221,7 +4198,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
4221 unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE; 4198 unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
4222 unsigned short value; 4199 unsigned short value;
4223 4200
4224 temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability & 4201 temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability &
4225 (LCDPolarity << 8)) >> 8); 4202 (LCDPolarity << 8)) >> 8);
4226 temp &= LCDPolarity; 4203 temp &= LCDPolarity;
4227 Miscdata = inb(pVBInfo->P3cc); 4204 Miscdata = inb(pVBInfo->P3cc);
@@ -4354,12 +4331,12 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
4354 if (chip_id == XG27) { 4331 if (chip_id == XG27) {
4355 /* Panel VRS SR35[2:0] SR34[7:0] */ 4332 /* Panel VRS SR35[2:0] SR34[7:0] */
4356 xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, 4333 xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
4357 (value & 0x700) >> 8); 4334 (value & 0x700) >> 8);
4358 xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF); 4335 xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
4359 } else { 4336 } else {
4360 /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */ 4337 /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
4361 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03, 4338 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
4362 (value & 0x600) >> 9); 4339 (value & 0x600) >> 9);
4363 xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF); 4340 xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
4364 xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01); 4341 xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
4365 } 4342 }
@@ -4372,11 +4349,11 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
4372 /* Panel VRE SR3F[7:2] */ 4349 /* Panel VRE SR3F[7:2] */
4373 if (chip_id == XG27) 4350 if (chip_id == XG27)
4374 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, 4351 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
4375 (value << 2) & 0xFC); 4352 (value << 2) & 0xFC);
4376 else 4353 else
4377 /* SR3F[7] has to be 0, h/w bug */ 4354 /* SR3F[7] has to be 0, h/w bug */
4378 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, 4355 xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
4379 (value << 2) & 0x7C); 4356 (value << 2) & 0x7C);
4380 4357
4381 for (temp = 0, value = 0; temp < 3; temp++) { 4358 for (temp = 0, value = 0; temp < 3; temp++) {
4382 xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value); 4359 xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
@@ -4400,13 +4377,13 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
4400 } 4377 }
4401} 4378}
4402 4379
4403/* --------------------------------------------------------------------- */ 4380/*
4404/* Function : XGI_IsLCDON */ 4381 * Function : XGI_IsLCDON
4405/* Input : */ 4382 * Input :
4406/* Output : 0 : Skip PSC Control */ 4383 * Output : 0 : Skip PSC Control
4407/* 1: Disable PSC */ 4384 * 1: Disable PSC
4408/* Description : */ 4385 * Description :
4409/* --------------------------------------------------------------------- */ 4386 */
4410static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo) 4387static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
4411{ 4388{
4412 unsigned short tempax; 4389 unsigned short tempax;
@@ -4421,8 +4398,8 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
4421} 4398}
4422 4399
4423static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info, 4400static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
4424 struct xgi_hw_device_info *HwDeviceExtension, 4401 struct xgi_hw_device_info *HwDeviceExtension,
4425 struct vb_device_info *pVBInfo) 4402 struct vb_device_info *pVBInfo)
4426{ 4403{
4427 unsigned short tempah = 0; 4404 unsigned short tempah = 0;
4428 4405
@@ -4498,23 +4475,23 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
4498 } 4475 }
4499} 4476}
4500 4477
4501/* --------------------------------------------------------------------- */ 4478/*
4502/* Function : XGI_GetTVPtrIndex */ 4479 * Function : XGI_GetTVPtrIndex
4503/* Input : */ 4480 * Input :
4504/* Output : */ 4481 * Output :
4505/* Description : bx 0 : ExtNTSC */ 4482 * Description : bx 0 : ExtNTSC
4506/* 1 : StNTSC */ 4483 * 1 : StNTSC
4507/* 2 : ExtPAL */ 4484 * 2 : ExtPAL
4508/* 3 : StPAL */ 4485 * 3 : StPAL
4509/* 4 : ExtHiTV */ 4486 * 4 : ExtHiTV
4510/* 5 : StHiTV */ 4487 * 5 : StHiTV
4511/* 6 : Ext525i */ 4488 * 6 : Ext525i
4512/* 7 : St525i */ 4489 * 7 : St525i
4513/* 8 : Ext525p */ 4490 * 8 : Ext525p
4514/* 9 : St525p */ 4491 * 9 : St525p
4515/* A : Ext750p */ 4492 * A : Ext750p
4516/* B : St750p */ 4493 * B : St750p
4517/* --------------------------------------------------------------------- */ 4494 */
4518static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo) 4495static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
4519{ 4496{
4520 unsigned short tempbx = 0; 4497 unsigned short tempbx = 0;
@@ -4535,24 +4512,24 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
4535 return tempbx; 4512 return tempbx;
4536} 4513}
4537 4514
4538/* --------------------------------------------------------------------- */ 4515/*
4539/* Function : XGI_GetTVPtrIndex2 */ 4516 * Function : XGI_GetTVPtrIndex2
4540/* Input : */ 4517 * Input :
4541/* Output : bx 0 : NTSC */ 4518 * Output : bx 0 : NTSC
4542/* 1 : PAL */ 4519 * 1 : PAL
4543/* 2 : PALM */ 4520 * 2 : PALM
4544/* 3 : PALN */ 4521 * 3 : PALN
4545/* 4 : NTSC1024x768 */ 4522 * 4 : NTSC1024x768
4546/* 5 : PAL-M 1024x768 */ 4523 * 5 : PAL-M 1024x768
4547/* 6-7: reserved */ 4524 * 6-7: reserved
4548/* cl 0 : YFilter1 */ 4525 * cl 0 : YFilter1
4549/* 1 : YFilter2 */ 4526 * 1 : YFilter2
4550/* ch 0 : 301A */ 4527 * ch 0 : 301A
4551/* 1 : 301B/302B/301LV/302LV */ 4528 * 1 : 301B/302B/301LV/302LV
4552/* Description : */ 4529 * Description :
4553/* --------------------------------------------------------------------- */ 4530 */
4554static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl, 4531static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
4555 unsigned char *tempch, struct vb_device_info *pVBInfo) 4532 unsigned char *tempch, struct vb_device_info *pVBInfo)
4556{ 4533{
4557 *tempbx = 0; 4534 *tempbx = 0;
4558 *tempcl = 0; 4535 *tempcl = 0;
@@ -4637,33 +4614,32 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
4637 4614
4638 if (temp & LCDRGB18Bit) { 4615 if (temp & LCDRGB18Bit) {
4639 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F, 4616 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
4640 /* Enable Dither */ 4617 /* Enable Dither */
4641 (unsigned short) (0x20 | (tempcx & 0x00C0))); 4618 (unsigned short)(0x20 | (tempcx & 0x00C0)));
4642 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80); 4619 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
4643 } else { 4620 } else {
4644 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F, 4621 xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
4645 (unsigned short) (0x30 | (tempcx & 0x00C0))); 4622 (unsigned short)(0x30 | (tempcx & 0x00C0)));
4646 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00); 4623 xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
4647 } 4624 }
4648} 4625}
4649 4626
4650/* --------------------------------------------------------------------- */ 4627/*
4651/* Function : XGI_SetLCDCap_B */ 4628 * Function : XGI_SetLCDCap_B
4652/* Input : cx -> LCD Capability */ 4629 * Input : cx -> LCD Capability
4653/* Output : */ 4630 * Output :
4654/* Description : */ 4631 * Description :
4655/* --------------------------------------------------------------------- */ 4632 */
4656static void XGI_SetLCDCap_B(unsigned short tempcx, 4633static void XGI_SetLCDCap_B(unsigned short tempcx,
4657 struct vb_device_info *pVBInfo) 4634 struct vb_device_info *pVBInfo)
4658{ 4635{
4659 if (tempcx & EnableLCD24bpp) /* 24bits */ 4636 if (tempcx & EnableLCD24bpp) /* 24bits */
4660 xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0, 4637 xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
4661 (unsigned short) (((tempcx & 0x00ff) >> 6) 4638 (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
4662 | 0x0c));
4663 else 4639 else
4664 xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0, 4640 xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
4665 (unsigned short) (((tempcx & 0x00ff) >> 6) 4641 (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
4666 | 0x18)); /* Enable Dither */ 4642 /* Enable Dither */
4667} 4643}
4668 4644
4669static void XGI_LongWait(struct vb_device_info *pVBInfo) 4645static void XGI_LongWait(struct vb_device_info *pVBInfo)
@@ -4698,13 +4674,13 @@ static void SetSpectrum(struct vb_device_info *pVBInfo)
4698 XGI_LongWait(pVBInfo); 4674 XGI_LongWait(pVBInfo);
4699 4675
4700 xgifb_reg_set(pVBInfo->Part4Port, 0x31, 4676 xgifb_reg_set(pVBInfo->Part4Port, 0x31,
4701 pVBInfo->LCDCapList[index].Spectrum_31); 4677 pVBInfo->LCDCapList[index].Spectrum_31);
4702 xgifb_reg_set(pVBInfo->Part4Port, 0x32, 4678 xgifb_reg_set(pVBInfo->Part4Port, 0x32,
4703 pVBInfo->LCDCapList[index].Spectrum_32); 4679 pVBInfo->LCDCapList[index].Spectrum_32);
4704 xgifb_reg_set(pVBInfo->Part4Port, 0x33, 4680 xgifb_reg_set(pVBInfo->Part4Port, 0x33,
4705 pVBInfo->LCDCapList[index].Spectrum_33); 4681 pVBInfo->LCDCapList[index].Spectrum_33);
4706 xgifb_reg_set(pVBInfo->Part4Port, 0x34, 4682 xgifb_reg_set(pVBInfo->Part4Port, 0x34,
4707 pVBInfo->LCDCapList[index].Spectrum_34); 4683 pVBInfo->LCDCapList[index].Spectrum_34);
4708 XGI_LongWait(pVBInfo); 4684 XGI_LongWait(pVBInfo);
4709 xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */ 4685 xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
4710} 4686}
@@ -4721,13 +4697,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
4721 (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) { 4697 (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
4722 /* Set 301LV Capability */ 4698 /* Set 301LV Capability */
4723 xgifb_reg_set(pVBInfo->Part4Port, 0x24, 4699 xgifb_reg_set(pVBInfo->Part4Port, 0x24,
4724 (unsigned char) (tempcx & 0x1F)); 4700 (unsigned char)(tempcx & 0x1F));
4725 } 4701 }
4726 /* VB Driving */ 4702 /* VB Driving */
4727 xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, 4703 xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
4728 ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8), 4704 ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
4729 (unsigned short) ((tempcx & (EnableVBCLKDRVLOW 4705 (unsigned short)((tempcx & (EnableVBCLKDRVLOW |
4730 | EnablePLLSPLOW)) >> 8)); 4706 EnablePLLSPLOW)) >> 8));
4731 4707
4732 if (pVBInfo->VBInfo & SetCRT2ToLCD) 4708 if (pVBInfo->VBInfo & SetCRT2ToLCD)
4733 XGI_SetLCDCap_B(tempcx, pVBInfo); 4709 XGI_SetLCDCap_B(tempcx, pVBInfo);
@@ -4744,12 +4720,12 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
4744 } 4720 }
4745} 4721}
4746 4722
4747/* --------------------------------------------------------------------- */ 4723/*
4748/* Function : XGI_SetAntiFlicker */ 4724 * Function : XGI_SetAntiFlicker
4749/* Input : */ 4725 * Input :
4750/* Output : */ 4726 * Output :
4751/* Description : Set TV Customized Param. */ 4727 * Description : Set TV Customized Param.
4752/* --------------------------------------------------------------------- */ 4728 */
4753static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo) 4729static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
4754{ 4730{
4755 unsigned short tempbx; 4731 unsigned short tempbx;
@@ -4792,13 +4768,13 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
4792 XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */ 4768 XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
4793 tempData = TVPhaseList[tempbx]; 4769 tempData = TVPhaseList[tempbx];
4794 4770
4795 xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData 4771 xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData
4796 & 0x000000FF)); 4772 & 0x000000FF));
4797 xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData 4773 xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData
4798 & 0x0000FF00) >> 8)); 4774 & 0x0000FF00) >> 8));
4799 xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData 4775 xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData
4800 & 0x00FF0000) >> 16)); 4776 & 0x00FF0000) >> 16));
4801 xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData 4777 xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData
4802 & 0xFF000000) >> 24)); 4778 & 0xFF000000) >> 24));
4803} 4779}
4804 4780
@@ -4866,12 +4842,12 @@ static void XGI_SetYFilter(unsigned short ModeIdIndex,
4866 } 4842 }
4867} 4843}
4868 4844
4869/* --------------------------------------------------------------------- */ 4845/*
4870/* Function : XGI_OEM310Setting */ 4846 * Function : XGI_OEM310Setting
4871/* Input : */ 4847 * Input :
4872/* Output : */ 4848 * Output :
4873/* Description : Customized Param. for 301 */ 4849 * Description : Customized Param. for 301
4874/* --------------------------------------------------------------------- */ 4850 */
4875static void XGI_OEM310Setting(unsigned short ModeIdIndex, 4851static void XGI_OEM310Setting(unsigned short ModeIdIndex,
4876 struct vb_device_info *pVBInfo) 4852 struct vb_device_info *pVBInfo)
4877{ 4853{
@@ -4890,12 +4866,12 @@ static void XGI_OEM310Setting(unsigned short ModeIdIndex,
4890 } 4866 }
4891} 4867}
4892 4868
4893/* --------------------------------------------------------------------- */ 4869/*
4894/* Function : XGI_SetCRT2ModeRegs */ 4870 * Function : XGI_SetCRT2ModeRegs
4895/* Input : */ 4871 * Input :
4896/* Output : */ 4872 * Output :
4897/* Description : Origin code for crt2group */ 4873 * Description : Origin code for crt2group
4898/* --------------------------------------------------------------------- */ 4874 */
4899static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo) 4875static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
4900{ 4876{
4901 unsigned short tempbl; 4877 unsigned short tempbl;
@@ -4999,8 +4975,8 @@ reg_and_or:
4999 tempah |= 0x40; 4975 tempah |= 0x40;
5000 } 4976 }
5001 4977
5002 if ((pVBInfo->LCDResInfo == Panel_1280x1024) 4978 if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
5003 || (pVBInfo->LCDResInfo == Panel_1280x1024x75)) 4979 (pVBInfo->LCDResInfo == Panel_1280x1024x75))
5004 tempah |= 0x80; 4980 tempah |= 0x80;
5005 4981
5006 if (pVBInfo->LCDResInfo == Panel_1280x960) 4982 if (pVBInfo->LCDResInfo == Panel_1280x960)
@@ -5068,8 +5044,9 @@ void XGI_LockCRT2(struct vb_device_info *pVBInfo)
5068} 5044}
5069 5045
5070unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE, 5046unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
5071 unsigned short ModeNo, unsigned short ModeIdIndex, 5047 unsigned short ModeNo,
5072 struct vb_device_info *pVBInfo) 5048 unsigned short ModeIdIndex,
5049 struct vb_device_info *pVBInfo)
5073{ 5050{
5074 const u8 LCDARefreshIndex[] = { 5051 const u8 LCDARefreshIndex[] = {
5075 0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 }; 5052 0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
@@ -5143,14 +5120,14 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
5143} 5120}
5144 5121
5145static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex, 5122static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
5146 struct xgi_hw_device_info *HwDeviceExtension, 5123 struct xgi_hw_device_info *HwDeviceExtension,
5147 struct vb_device_info *pVBInfo) 5124 struct vb_device_info *pVBInfo)
5148{ 5125{
5149 unsigned short RefreshRateTableIndex; 5126 unsigned short RefreshRateTableIndex;
5150 5127
5151 pVBInfo->SetFlag |= ProgrammingCRT2; 5128 pVBInfo->SetFlag |= ProgrammingCRT2;
5152 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, 5129 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
5153 ModeIdIndex, pVBInfo); 5130 ModeIdIndex, pVBInfo);
5154 XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo); 5131 XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
5155 XGI_GetLVDSData(ModeIdIndex, pVBInfo); 5132 XGI_GetLVDSData(ModeIdIndex, pVBInfo);
5156 XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo); 5133 XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
@@ -5159,8 +5136,8 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
5159} 5136}
5160 5137
5161static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo, 5138static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
5162 struct xgi_hw_device_info *HwDeviceExtension, 5139 struct xgi_hw_device_info *HwDeviceExtension,
5163 struct vb_device_info *pVBInfo) 5140 struct vb_device_info *pVBInfo)
5164{ 5141{
5165 unsigned short ModeIdIndex, RefreshRateTableIndex; 5142 unsigned short ModeIdIndex, RefreshRateTableIndex;
5166 5143
@@ -5168,7 +5145,7 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
5168 XGI_SearchModeID(ModeNo, &ModeIdIndex); 5145 XGI_SearchModeID(ModeNo, &ModeIdIndex);
5169 pVBInfo->SelectCRT2Rate = 4; 5146 pVBInfo->SelectCRT2Rate = 4;
5170 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, 5147 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
5171 ModeIdIndex, pVBInfo); 5148 ModeIdIndex, pVBInfo);
5172 XGI_SaveCRT2Info(ModeNo, pVBInfo); 5149 XGI_SaveCRT2Info(ModeNo, pVBInfo);
5173 XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo); 5150 XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
5174 XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo); 5151 XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
@@ -5210,39 +5187,39 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
5210 CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63); 5187 CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
5211 SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01); 5188 SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
5212 5189
5213 xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF)); 5190 xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
5214 xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF)); 5191 xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
5215 5192
5216 CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17); 5193 CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
5217 xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80)); 5194 xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80));
5218 5195
5219 SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F); 5196 SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
5220 xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04)); 5197 xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
5221 5198
5222 SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07); 5199 SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
5223 xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB)); 5200 xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
5224 SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06); 5201 SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
5225 xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3)); 5202 xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
5226 5203
5227 xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00); 5204 xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
5228 5205
5229 for (i = 0; i < 8; i++) 5206 for (i = 0; i < 8; i++)
5230 xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]); 5207 xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
5231 5208
5232 for (i = 8; i < 11; i++) 5209 for (i = 8; i < 11; i++)
5233 xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8), 5210 xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8),
5234 CRTCData[i]); 5211 CRTCData[i]);
5235 5212
5236 for (i = 11; i < 13; i++) 5213 for (i = 11; i < 13; i++)
5237 xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4), 5214 xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4),
5238 CRTCData[i]); 5215 CRTCData[i]);
5239 5216
5240 for (i = 13; i < 16; i++) 5217 for (i = 13; i < 16; i++)
5241 xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3), 5218 xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3),
5242 CRTCData[i]); 5219 CRTCData[i]);
5243 5220
5244 xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16] 5221 xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16]
5245 & 0xE0)); 5222 & 0xE0));
5246 5223
5247 xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00); 5224 xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
5248 xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B); 5225 xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
@@ -5275,12 +5252,12 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
5275 5252
5276 xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get( 5253 xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
5277 pVBInfo->P3d4, 0x53) & 0xFD)); 5254 pVBInfo->P3d4, 0x53) & 0xFD));
5278 xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F); 5255 xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
5279} 5256}
5280 5257
5281static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info, 5258static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
5282 struct xgi_hw_device_info *HwDeviceExtension, 5259 struct xgi_hw_device_info *HwDeviceExtension,
5283 struct vb_device_info *pVBInfo) 5260 struct vb_device_info *pVBInfo)
5284{ 5261{
5285 unsigned short tempah; 5262 unsigned short tempah;
5286 5263
@@ -5310,11 +5287,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
5310 5287
5311 if (!(pVBInfo->VBInfo & DisableCRT2Display)) { 5288 if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
5312 xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0, 5289 xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
5313 0x20); /* shampoo 0129 */ 5290 0x20); /* shampoo 0129 */
5314 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) { 5291 if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
5315 if (pVBInfo->VBInfo & 5292 if (pVBInfo->VBInfo &
5316 (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) 5293 (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
5317 /* LVDS PLL power on */ 5294 /* LVDS PLL power on */
5318 xgifb_reg_and(pVBInfo->Part4Port, 0x2A, 5295 xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
5319 0x7F); 5296 0x7F);
5320 /* LVDS Driver power on */ 5297 /* LVDS Driver power on */
@@ -5358,9 +5335,9 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
5358} 5335}
5359 5336
5360static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info, 5337static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
5361 struct xgi_hw_device_info *HwDeviceExtension, 5338 struct xgi_hw_device_info *HwDeviceExtension,
5362 unsigned short ModeNo, unsigned short ModeIdIndex, 5339 unsigned short ModeNo, unsigned short ModeIdIndex,
5363 struct vb_device_info *pVBInfo) 5340 struct vb_device_info *pVBInfo)
5364{ 5341{
5365 unsigned short RefreshRateTableIndex, temp; 5342 unsigned short RefreshRateTableIndex, temp;
5366 5343
@@ -5389,14 +5366,14 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
5389 } 5366 }
5390 5367
5391 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, 5368 RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
5392 ModeIdIndex, pVBInfo); 5369 ModeIdIndex, pVBInfo);
5393 if (RefreshRateTableIndex != 0xFFFF) { 5370 if (RefreshRateTableIndex != 0xFFFF) {
5394 XGI_SetSync(RefreshRateTableIndex, pVBInfo); 5371 XGI_SetSync(RefreshRateTableIndex, pVBInfo);
5395 XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex, 5372 XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
5396 pVBInfo, HwDeviceExtension); 5373 pVBInfo, HwDeviceExtension);
5397 XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo); 5374 XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
5398 XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex, 5375 XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
5399 HwDeviceExtension, pVBInfo); 5376 HwDeviceExtension, pVBInfo);
5400 XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension, 5377 XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
5401 RefreshRateTableIndex, pVBInfo); 5378 RefreshRateTableIndex, pVBInfo);
5402 } 5379 }
@@ -5410,15 +5387,15 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
5410 XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo); 5387 XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
5411 5388
5412 XGI_UpdateXG21CRTC(ModeNo, pVBInfo, 5389 XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
5413 RefreshRateTableIndex); 5390 RefreshRateTableIndex);
5414 5391
5415 xgifb_set_lcd(HwDeviceExtension->jChipType, 5392 xgifb_set_lcd(HwDeviceExtension->jChipType,
5416 pVBInfo, RefreshRateTableIndex); 5393 pVBInfo, RefreshRateTableIndex);
5417 5394
5418 if (pVBInfo->IF_DEF_LVDS == 1) 5395 if (pVBInfo->IF_DEF_LVDS == 1)
5419 xgifb_set_lvds(xgifb_info, 5396 xgifb_set_lvds(xgifb_info,
5420 HwDeviceExtension->jChipType, 5397 HwDeviceExtension->jChipType,
5421 ModeIdIndex, pVBInfo); 5398 ModeIdIndex, pVBInfo);
5422 } 5399 }
5423 } 5400 }
5424 5401
@@ -5430,8 +5407,8 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
5430} 5407}
5431 5408
5432unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, 5409unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5433 struct xgi_hw_device_info *HwDeviceExtension, 5410 struct xgi_hw_device_info *HwDeviceExtension,
5434 unsigned short ModeNo) 5411 unsigned short ModeNo)
5435{ 5412{
5436 unsigned short ModeIdIndex; 5413 unsigned short ModeIdIndex;
5437 struct vb_device_info VBINF; 5414 struct vb_device_info VBINF;
@@ -5440,7 +5417,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5440 pVBInfo->IF_DEF_LVDS = 0; 5417 pVBInfo->IF_DEF_LVDS = 0;
5441 5418
5442 if (HwDeviceExtension->jChipType >= XG20) 5419 if (HwDeviceExtension->jChipType >= XG20)
5443 pVBInfo->VBType = 0; /*set VBType default 0*/ 5420 pVBInfo->VBType = 0; /* set VBType default 0 */
5444 5421
5445 XGIRegInit(pVBInfo, xgifb_info->vga_base); 5422 XGIRegInit(pVBInfo, xgifb_info->vga_base);
5446 5423
@@ -5473,13 +5450,13 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5473 XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo); 5450 XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
5474 5451
5475 if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) || 5452 if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
5476 !(pVBInfo->VBInfo & SwitchCRT2)) { 5453 !(pVBInfo->VBInfo & SwitchCRT2)) {
5477 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo, 5454 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
5478 ModeIdIndex, pVBInfo); 5455 ModeIdIndex, pVBInfo);
5479 5456
5480 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { 5457 if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
5481 XGI_SetLCDAGroup(ModeNo, ModeIdIndex, 5458 XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
5482 HwDeviceExtension, pVBInfo); 5459 HwDeviceExtension, pVBInfo);
5483 } 5460 }
5484 } 5461 }
5485 5462
@@ -5488,7 +5465,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5488 case VB_CHIP_301: /* fall through */ 5465 case VB_CHIP_301: /* fall through */
5489 case VB_CHIP_302: 5466 case VB_CHIP_302:
5490 XGI_SetCRT2Group301(ModeNo, HwDeviceExtension, 5467 XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
5491 pVBInfo); /*add for CRT2 */ 5468 pVBInfo); /* add for CRT2 */
5492 break; 5469 break;
5493 5470
5494 default: 5471 default:
@@ -5497,7 +5474,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5497 } 5474 }
5498 5475
5499 XGI_SetCRT2ModeRegs(pVBInfo); 5476 XGI_SetCRT2ModeRegs(pVBInfo);
5500 XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/ 5477 XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */
5501 XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo); 5478 XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
5502 } /* !XG20 */ 5479 } /* !XG20 */
5503 else { 5480 else {
@@ -5515,7 +5492,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
5515 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo); 5492 XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
5516 5493
5517 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo, 5494 XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
5518 ModeIdIndex, pVBInfo); 5495 ModeIdIndex, pVBInfo);
5519 5496
5520 XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo); 5497 XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
5521 } 5498 }
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index c801deb142f6..f9f98e06e6d5 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1701,6 +1701,7 @@ static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = {
1701 { {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */ 1701 { {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */
1702 { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */ 1702 { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
1703}; 1703};
1704
1704/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */ 1705/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
1705static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = { 1706static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = {
1706 { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */ 1707 { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
@@ -1886,17 +1887,17 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
1886 0x6C, 0xC3, 0x35, 0x62, 1887 0x6C, 0xC3, 0x35, 0x62,
1887 0x0A, 0xC0, 0x28, 0x10}, 1888 0x0A, 0xC0, 0x28, 0x10},
1888/* LCDCap1280x1024 */ 1889/* LCDCap1280x1024 */
1889 {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, 1890 {Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap,
1890 0x70, 0x03, VCLK108_2_315, 1891 0x70, 0x03, VCLK108_2_315,
1891 0x70, 0x44, 0xF8, 0x2F, 1892 0x70, 0x44, 0xF8, 0x2F,
1892 0x0A, 0xC0, 0x30, 0x10}, 1893 0x0A, 0xC0, 0x30, 0x10},
1893/* LCDCap1400x1050 */ 1894/* LCDCap1400x1050 */
1894 {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, 1895 {Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap,
1895 0x70, 0x03, VCLK108_2_315, 1896 0x70, 0x03, VCLK108_2_315,
1896 0x70, 0x44, 0xF8, 0x2F, 1897 0x70, 0x44, 0xF8, 0x2F,
1897 0x0A, 0xC0, 0x30, 0x10}, 1898 0x0A, 0xC0, 0x30, 0x10},
1898/* LCDCap1600x1200 */ 1899/* LCDCap1600x1200 */
1899 {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, 1900 {Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap,
1900 0xC0, 0x03, VCLK162, 1901 0xC0, 0x03, VCLK162,
1901 0x43, 0x22, 0x70, 0x24, 1902 0x43, 0x22, 0x70, 0x24,
1902 0x0A, 0xC0, 0x30, 0x10}, 1903 0x0A, 0xC0, 0x30, 0x10},
@@ -1905,7 +1906,7 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
1905 0x2B, 0x61, 0x2B, 0x61, 1906 0x2B, 0x61, 0x2B, 0x61,
1906 0x0A, 0xC0, 0x28, 0x10}, 1907 0x0A, 0xC0, 0x28, 0x10},
1907/* LCDCap1280x1024x75 */ 1908/* LCDCap1280x1024x75 */
1908 {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, 1909 {Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap,
1909 0x90, 0x03, VCLK135_5, 1910 0x90, 0x03, VCLK135_5,
1910 0x54, 0x42, 0x4A, 0x61, 1911 0x54, 0x42, 0x4A, 0x61,
1911 0x0A, 0xC0, 0x30, 0x10}, 1912 0x0A, 0xC0, 0x30, 0x10},
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 08db58b396b2..052694e75053 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -18,7 +18,7 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
18 u8 temp; 18 u8 temp;
19 19
20 temp = xgifb_reg_get(port, index); 20 temp = xgifb_reg_get(port, index);
21 temp = (u8) ((temp & data_and) | data_or); 21 temp = (u8)((temp & data_and) | data_or);
22 xgifb_reg_set(port, index, temp); 22 xgifb_reg_set(port, index, temp);
23} 23}
24 24
@@ -28,7 +28,7 @@ static inline void xgifb_reg_and(unsigned long port, u8 index,
28 u8 temp; 28 u8 temp;
29 29
30 temp = xgifb_reg_get(port, index); 30 temp = xgifb_reg_get(port, index);
31 temp = (u8) (temp & data_and); 31 temp = (u8)(temp & data_and);
32 xgifb_reg_set(port, index, temp); 32 xgifb_reg_set(port, index, temp);
33} 33}
34 34
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index e7fdec4db9da..5ba430cc9a87 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
136 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 136 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
137 BIT(IIO_CHAN_INFO_OFFSET), \ 137 BIT(IIO_CHAN_INFO_OFFSET), \
138 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ 138 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
139 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
139 .scan_index = (_si), \ 140 .scan_index = (_si), \
140 .scan_type = { \ 141 .scan_type = { \
141 .sign = 'u', \ 142 .sign = 'u', \
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9edccfba1ffb..47eeec3218b5 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val);
226int iio_write_channel_raw(struct iio_channel *chan, int val); 226int iio_write_channel_raw(struct iio_channel *chan, int val);
227 227
228/** 228/**
229 * iio_read_max_channel_raw() - read maximum available raw value from a given
230 * channel, i.e. the maximum possible value.
231 * @chan: The channel being queried.
232 * @val: Value read back.
233 *
234 * Note raw reads from iio channels are in adc counts and hence
235 * scale will need to be applied if standard units are required.
236 */
237int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
238
239/**
240 * iio_read_avail_channel_raw() - read available raw values from a given channel
241 * @chan: The channel being queried.
242 * @vals: Available values read back.
243 * @length: Number of entries in vals.
244 *
245 * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
246 *
247 * For ranges, three vals are always returned; min, step and max.
248 * For lists, all the possible values are enumerated.
249 *
250 * Note raw available values from iio channels are in adc counts and
251 * hence scale will need to be applied if standard units are required.
252 */
253int iio_read_avail_channel_raw(struct iio_channel *chan,
254 const int **vals, int *length);
255
256/**
229 * iio_get_channel_type() - get the type of a channel 257 * iio_get_channel_type() - get the type of a channel
230 * @channel: The channel being queried. 258 * @channel: The channel being queried.
231 * @type: The type of the channel. 259 * @type: The type of the channel.
@@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel,
236 enum iio_chan_type *type); 264 enum iio_chan_type *type);
237 265
238/** 266/**
267 * iio_read_channel_offset() - read the offset value for a channel
268 * @chan: The channel being queried.
269 * @val: First part of value read back.
270 * @val2: Second part of value read back.
271 *
272 * Note returns a description of what is in val and val2, such
273 * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
274 * + val2/1e6
275 */
276int iio_read_channel_offset(struct iio_channel *chan, int *val,
277 int *val2);
278
279/**
239 * iio_read_channel_scale() - read the scale value for a channel 280 * iio_read_channel_scale() - read the scale value for a channel
240 * @chan: The channel being queried. 281 * @chan: The channel being queried.
241 * @val: First part of value read back. 282 * @val: First part of value read back.
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 91530e6611e9..628b2cf54c50 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -9,8 +9,18 @@
9#ifndef IIO_DAC_MCP4725_H_ 9#ifndef IIO_DAC_MCP4725_H_
10#define IIO_DAC_MCP4725_H_ 10#define IIO_DAC_MCP4725_H_
11 11
12/**
13 * struct mcp4725_platform_data - MCP4725/6 DAC specific data.
14 * @use_vref: Whether an external reference voltage on Vref pin should be used.
15 * Additional vref-supply must be specified when used.
16 * @vref_buffered: Controls buffering of the external reference voltage.
17 *
18 * Vref related settings are available only on MCP4756. See
19 * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
20 */
12struct mcp4725_platform_data { 21struct mcp4725_platform_data {
13 u16 vref_mv; 22 bool use_vref;
23 bool vref_buffered;
14}; 24};
15 25
16#endif /* IIO_DAC_MCP4725_H_ */ 26#endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b4a0679e4a49..3f5ea2e9a39e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -225,12 +225,22 @@ struct iio_event_spec {
225 * endianness: little or big endian 225 * endianness: little or big endian
226 * @info_mask_separate: What information is to be exported that is specific to 226 * @info_mask_separate: What information is to be exported that is specific to
227 * this channel. 227 * this channel.
228 * @info_mask_separate_available: What availability information is to be
229 * exported that is specific to this channel.
228 * @info_mask_shared_by_type: What information is to be exported that is shared 230 * @info_mask_shared_by_type: What information is to be exported that is shared
229 * by all channels of the same type. 231 * by all channels of the same type.
232 * @info_mask_shared_by_type_available: What availability information is to be
233 * exported that is shared by all channels of the same
234 * type.
230 * @info_mask_shared_by_dir: What information is to be exported that is shared 235 * @info_mask_shared_by_dir: What information is to be exported that is shared
231 * by all channels of the same direction. 236 * by all channels of the same direction.
237 * @info_mask_shared_by_dir_available: What availability information is to be
238 * exported that is shared by all channels of the same
239 * direction.
232 * @info_mask_shared_by_all: What information is to be exported that is shared 240 * @info_mask_shared_by_all: What information is to be exported that is shared
233 * by all channels. 241 * by all channels.
242 * @info_mask_shared_by_all_available: What availability information is to be
243 * exported that is shared by all channels.
234 * @event_spec: Array of events which should be registered for this 244 * @event_spec: Array of events which should be registered for this
235 * channel. 245 * channel.
236 * @num_event_specs: Size of the event_spec array. 246 * @num_event_specs: Size of the event_spec array.
@@ -269,9 +279,13 @@ struct iio_chan_spec {
269 enum iio_endian endianness; 279 enum iio_endian endianness;
270 } scan_type; 280 } scan_type;
271 long info_mask_separate; 281 long info_mask_separate;
282 long info_mask_separate_available;
272 long info_mask_shared_by_type; 283 long info_mask_shared_by_type;
284 long info_mask_shared_by_type_available;
273 long info_mask_shared_by_dir; 285 long info_mask_shared_by_dir;
286 long info_mask_shared_by_dir_available;
274 long info_mask_shared_by_all; 287 long info_mask_shared_by_all;
288 long info_mask_shared_by_all_available;
275 const struct iio_event_spec *event_spec; 289 const struct iio_event_spec *event_spec;
276 unsigned int num_event_specs; 290 unsigned int num_event_specs;
277 const struct iio_chan_spec_ext_info *ext_info; 291 const struct iio_chan_spec_ext_info *ext_info;
@@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
301 (chan->info_mask_shared_by_all & BIT(type)); 315 (chan->info_mask_shared_by_all & BIT(type));
302} 316}
303 317
318/**
319 * iio_channel_has_available() - Checks if a channel has an available attribute
320 * @chan: The channel to be queried
321 * @type: Type of the available attribute to be checked
322 *
323 * Returns true if the channel supports reporting available values for the
324 * given attribute type, false otherwise.
325 */
326static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
327 enum iio_chan_info_enum type)
328{
329 return (chan->info_mask_separate_available & BIT(type)) |
330 (chan->info_mask_shared_by_type_available & BIT(type)) |
331 (chan->info_mask_shared_by_dir_available & BIT(type)) |
332 (chan->info_mask_shared_by_all_available & BIT(type));
333}
334
304#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ 335#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
305 .type = IIO_TIMESTAMP, \ 336 .type = IIO_TIMESTAMP, \
306 .channel = -1, \ 337 .channel = -1, \
@@ -349,6 +380,14 @@ struct iio_dev;
349 * max_len specifies maximum number of elements 380 * max_len specifies maximum number of elements
350 * vals pointer can contain. val_len is used to return 381 * vals pointer can contain. val_len is used to return
351 * length of valid elements in vals. 382 * length of valid elements in vals.
383 * @read_avail: function to return the available values from the device.
384 * mask specifies which value. Note 0 means the available
385 * values for the channel in question. Return value
386 * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
387 * returned in vals. The type of the vals are returned in
388 * type and the number of vals is returned in length. For
389 * ranges, there are always three vals returned; min, step
390 * and max. For lists, all possible values are enumerated.
352 * @write_raw: function to write a value to the device. 391 * @write_raw: function to write a value to the device.
353 * Parameters are the same as for read_raw. 392 * Parameters are the same as for read_raw.
354 * @write_raw_get_fmt: callback function to query the expected 393 * @write_raw_get_fmt: callback function to query the expected
@@ -381,7 +420,7 @@ struct iio_dev;
381 **/ 420 **/
382struct iio_info { 421struct iio_info {
383 struct module *driver_module; 422 struct module *driver_module;
384 struct attribute_group *event_attrs; 423 const struct attribute_group *event_attrs;
385 const struct attribute_group *attrs; 424 const struct attribute_group *attrs;
386 425
387 int (*read_raw)(struct iio_dev *indio_dev, 426 int (*read_raw)(struct iio_dev *indio_dev,
@@ -397,6 +436,13 @@ struct iio_info {
397 int *val_len, 436 int *val_len,
398 long mask); 437 long mask);
399 438
439 int (*read_avail)(struct iio_dev *indio_dev,
440 struct iio_chan_spec const *chan,
441 const int **vals,
442 int *type,
443 int *length,
444 long mask);
445
400 int (*write_raw)(struct iio_dev *indio_dev, 446 int (*write_raw)(struct iio_dev *indio_dev,
401 struct iio_chan_spec const *chan, 447 struct iio_chan_spec const *chan,
402 int val, 448 int val,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 9cd8f747212f..ce9426c507fd 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -55,10 +55,34 @@ struct iio_const_attr {
55 { .dev_attr = __ATTR(_name, _mode, _show, _store), \ 55 { .dev_attr = __ATTR(_name, _mode, _show, _store), \
56 .address = _addr } 56 .address = _addr }
57 57
58#define IIO_ATTR_RO(_name, _addr) \
59 { .dev_attr = __ATTR_RO(_name), \
60 .address = _addr }
61
62#define IIO_ATTR_WO(_name, _addr) \
63 { .dev_attr = __ATTR_WO(_name), \
64 .address = _addr }
65
66#define IIO_ATTR_RW(_name, _addr) \
67 { .dev_attr = __ATTR_RW(_name), \
68 .address = _addr }
69
58#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ 70#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
59 struct iio_dev_attr iio_dev_attr_##_name \ 71 struct iio_dev_attr iio_dev_attr_##_name \
60 = IIO_ATTR(_name, _mode, _show, _store, _addr) 72 = IIO_ATTR(_name, _mode, _show, _store, _addr)
61 73
74#define IIO_DEVICE_ATTR_RO(_name, _addr) \
75 struct iio_dev_attr iio_dev_attr_##_name \
76 = IIO_ATTR_RO(_name, _addr)
77
78#define IIO_DEVICE_ATTR_WO(_name, _addr) \
79 struct iio_dev_attr iio_dev_attr_##_name \
80 = IIO_ATTR_WO(_name, _addr)
81
82#define IIO_DEVICE_ATTR_RW(_name, _addr) \
83 struct iio_dev_attr iio_dev_attr_##_name \
84 = IIO_ATTR_RW(_name, _addr)
85
62#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ 86#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
63 struct iio_dev_attr iio_dev_attr_##_vname \ 87 struct iio_dev_attr iio_dev_attr_##_vname \
64 = IIO_ATTR(_name, _mode, _show, _store, _addr) 88 = IIO_ATTR(_name, _mode, _show, _store, _addr)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4f1154f7a33c..ea08302f2d7b 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig);
170 */ 170 */
171bool iio_trigger_using_own(struct iio_dev *indio_dev); 171bool iio_trigger_using_own(struct iio_dev *indio_dev);
172 172
173int iio_trigger_validate_own_device(struct iio_trigger *trig,
174 struct iio_dev *indio_dev);
173 175
174#else 176#else
175struct iio_trigger; 177struct iio_trigger;
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 32b579525004..2aa7b6384d64 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,4 +29,9 @@ enum iio_event_info {
29#define IIO_VAL_FRACTIONAL 10 29#define IIO_VAL_FRACTIONAL 10
30#define IIO_VAL_FRACTIONAL_LOG2 11 30#define IIO_VAL_FRACTIONAL_LOG2 11
31 31
32enum iio_available_type {
33 IIO_AVAIL_LIST,
34 IIO_AVAIL_RANGE,
35};
36
32#endif /* _IIO_TYPES_H_ */ 37#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 76f7ef4d3a0d..f62043a75f43 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -148,6 +148,15 @@ struct cros_ec_device {
148 int event_size; 148 int event_size;
149}; 149};
150 150
151/**
152 * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
153 *
154 * @sensor_num: Id of the sensor, as reported by the EC.
155 */
156struct cros_ec_sensor_platform {
157 u8 sensor_num;
158};
159
151/* struct cros_ec_platform - ChromeOS EC platform information 160/* struct cros_ec_platform - ChromeOS EC platform information
152 * 161 *
153 * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) 162 * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
@@ -175,6 +184,7 @@ struct cros_ec_dev {
175 struct cros_ec_device *ec_dev; 184 struct cros_ec_device *ec_dev;
176 struct device *dev; 185 struct device *dev;
177 u16 cmd_offset; 186 u16 cmd_offset;
187 u32 features[2];
178}; 188};
179 189
180/** 190/**
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 76728ff37d01..1683003603f3 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -713,6 +713,90 @@ struct ec_response_get_set_value {
713/* More than one command can use these structs to get/set paramters. */ 713/* More than one command can use these structs to get/set paramters. */
714#define EC_CMD_GSV_PAUSE_IN_S5 0x0c 714#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
715 715
716/*****************************************************************************/
717/* List the features supported by the firmware */
718#define EC_CMD_GET_FEATURES 0x0d
719
720/* Supported features */
721enum ec_feature_code {
722 /*
723 * This image contains a limited set of features. Another image
724 * in RW partition may support more features.
725 */
726 EC_FEATURE_LIMITED = 0,
727 /*
728 * Commands for probing/reading/writing/erasing the flash in the
729 * EC are present.
730 */
731 EC_FEATURE_FLASH = 1,
732 /*
733 * Can control the fan speed directly.
734 */
735 EC_FEATURE_PWM_FAN = 2,
736 /*
737 * Can control the intensity of the keyboard backlight.
738 */
739 EC_FEATURE_PWM_KEYB = 3,
740 /*
741 * Support Google lightbar, introduced on Pixel.
742 */
743 EC_FEATURE_LIGHTBAR = 4,
744 /* Control of LEDs */
745 EC_FEATURE_LED = 5,
746 /* Exposes an interface to control gyro and sensors.
747 * The host goes through the EC to access these sensors.
748 * In addition, the EC may provide composite sensors, like lid angle.
749 */
750 EC_FEATURE_MOTION_SENSE = 6,
751 /* The keyboard is controlled by the EC */
752 EC_FEATURE_KEYB = 7,
753 /* The AP can use part of the EC flash as persistent storage. */
754 EC_FEATURE_PSTORE = 8,
755 /* The EC monitors BIOS port 80h, and can return POST codes. */
756 EC_FEATURE_PORT80 = 9,
757 /*
758 * Thermal management: include TMP specific commands.
759 * Higher level than direct fan control.
760 */
761 EC_FEATURE_THERMAL = 10,
762 /* Can switch the screen backlight on/off */
763 EC_FEATURE_BKLIGHT_SWITCH = 11,
764 /* Can switch the wifi module on/off */
765 EC_FEATURE_WIFI_SWITCH = 12,
766 /* Monitor host events, through for example SMI or SCI */
767 EC_FEATURE_HOST_EVENTS = 13,
768 /* The EC exposes GPIO commands to control/monitor connected devices. */
769 EC_FEATURE_GPIO = 14,
770 /* The EC can send i2c messages to downstream devices. */
771 EC_FEATURE_I2C = 15,
772 /* Command to control charger are included */
773 EC_FEATURE_CHARGER = 16,
774 /* Simple battery support. */
775 EC_FEATURE_BATTERY = 17,
776 /*
777 * Support Smart battery protocol
778 * (Common Smart Battery System Interface Specification)
779 */
780 EC_FEATURE_SMART_BATTERY = 18,
781 /* EC can dectect when the host hangs. */
782 EC_FEATURE_HANG_DETECT = 19,
783 /* Report power information, for pit only */
784 EC_FEATURE_PMU = 20,
785 /* Another Cros EC device is present downstream of this one */
786 EC_FEATURE_SUB_MCU = 21,
787 /* Support USB Power delivery (PD) commands */
788 EC_FEATURE_USB_PD = 22,
789 /* Control USB multiplexer, for audio through USB port for instance. */
790 EC_FEATURE_USB_MUX = 23,
791 /* Motion Sensor code has an internal software FIFO */
792 EC_FEATURE_MOTION_SENSE_FIFO = 24,
793};
794
795#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
796#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
797struct ec_response_get_features {
798 uint32_t flags[2];
799} __packed;
716 800
717/*****************************************************************************/ 801/*****************************************************************************/
718/* Flash commands */ 802/* Flash commands */
@@ -1315,6 +1399,24 @@ enum motionsense_command {
1315 */ 1399 */
1316 MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, 1400 MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
1317 1401
1402 /*
1403 * Returns a single sensor data.
1404 */
1405 MOTIONSENSE_CMD_DATA = 6,
1406
1407 /*
1408 * Perform low level calibration.. On sensors that support it, ask to
1409 * do offset calibration.
1410 */
1411 MOTIONSENSE_CMD_PERFORM_CALIB = 10,
1412
1413 /*
1414 * Sensor Offset command is a setter/getter command for the offset used
1415 * for calibration. The offsets can be calculated by the host, or via
1416 * PERFORM_CALIB command.
1417 */
1418 MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
1419
1318 /* Number of motionsense sub-commands. */ 1420 /* Number of motionsense sub-commands. */
1319 MOTIONSENSE_NUM_CMDS 1421 MOTIONSENSE_NUM_CMDS
1320}; 1422};
@@ -1335,12 +1437,18 @@ enum motionsensor_id {
1335enum motionsensor_type { 1437enum motionsensor_type {
1336 MOTIONSENSE_TYPE_ACCEL = 0, 1438 MOTIONSENSE_TYPE_ACCEL = 0,
1337 MOTIONSENSE_TYPE_GYRO = 1, 1439 MOTIONSENSE_TYPE_GYRO = 1,
1440 MOTIONSENSE_TYPE_MAG = 2,
1441 MOTIONSENSE_TYPE_PROX = 3,
1442 MOTIONSENSE_TYPE_LIGHT = 4,
1443 MOTIONSENSE_TYPE_ACTIVITY = 5,
1444 MOTIONSENSE_TYPE_MAX
1338}; 1445};
1339 1446
1340/* List of motion sensor locations. */ 1447/* List of motion sensor locations. */
1341enum motionsensor_location { 1448enum motionsensor_location {
1342 MOTIONSENSE_LOC_BASE = 0, 1449 MOTIONSENSE_LOC_BASE = 0,
1343 MOTIONSENSE_LOC_LID = 1, 1450 MOTIONSENSE_LOC_LID = 1,
1451 MOTIONSENSE_LOC_MAX,
1344}; 1452};
1345 1453
1346/* List of motion sensor chips. */ 1454/* List of motion sensor chips. */
@@ -1361,6 +1469,31 @@ enum motionsensor_chip {
1361 */ 1469 */
1362#define EC_MOTION_SENSE_NO_VALUE -1 1470#define EC_MOTION_SENSE_NO_VALUE -1
1363 1471
1472#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
1473
1474/* Set Calibration information */
1475#define MOTION_SENSE_SET_OFFSET 1
1476
1477struct ec_response_motion_sensor_data {
1478 /* Flags for each sensor. */
1479 uint8_t flags;
1480 /* Sensor number the data comes from */
1481 uint8_t sensor_num;
1482 /* Each sensor is up to 3-axis. */
1483 union {
1484 int16_t data[3];
1485 struct {
1486 uint16_t rsvd;
1487 uint32_t timestamp;
1488 } __packed;
1489 struct {
1490 uint8_t activity; /* motionsensor_activity */
1491 uint8_t state;
1492 int16_t add_info[2];
1493 };
1494 };
1495} __packed;
1496
1364struct ec_params_motion_sense { 1497struct ec_params_motion_sense {
1365 uint8_t cmd; 1498 uint8_t cmd;
1366 union { 1499 union {
@@ -1378,9 +1511,37 @@ struct ec_params_motion_sense {
1378 int16_t data; 1511 int16_t data;
1379 } ec_rate, kb_wake_angle; 1512 } ec_rate, kb_wake_angle;
1380 1513
1514 /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
1515 struct {
1516 uint8_t sensor_num;
1517
1518 /*
1519 * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
1520 * the calibration information in the EC.
1521 * If unset, just retrieve calibration information.
1522 */
1523 uint16_t flags;
1524
1525 /*
1526 * Temperature at calibration, in units of 0.01 C
1527 * 0x8000: invalid / unknown.
1528 * 0x0: 0C
1529 * 0x7fff: +327.67C
1530 */
1531 int16_t temp;
1532
1533 /*
1534 * Offset for calibration.
1535 * Unit:
1536 * Accelerometer: 1/1024 g
1537 * Gyro: 1/1024 deg/s
1538 * Compass: 1/16 uT
1539 */
1540 int16_t offset[3];
1541 } __packed sensor_offset;
1542
1381 /* Used for MOTIONSENSE_CMD_INFO. */ 1543 /* Used for MOTIONSENSE_CMD_INFO. */
1382 struct { 1544 struct {
1383 /* Should be element of enum motionsensor_id. */
1384 uint8_t sensor_num; 1545 uint8_t sensor_num;
1385 } info; 1546 } info;
1386 1547
@@ -1410,11 +1571,14 @@ struct ec_response_motion_sense {
1410 /* Flags representing the motion sensor module. */ 1571 /* Flags representing the motion sensor module. */
1411 uint8_t module_flags; 1572 uint8_t module_flags;
1412 1573
1413 /* Flags for each sensor in enum motionsensor_id. */ 1574 /* Number of sensors managed directly by the EC. */
1414 uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; 1575 uint8_t sensor_count;
1415 1576
1416 /* Array of all sensor data. Each sensor is 3-axis. */ 1577 /*
1417 int16_t data[3*EC_MOTION_SENSOR_COUNT]; 1578 * Sensor data is truncated if response_max is too small
1579 * for holding all the data.
1580 */
1581 struct ec_response_motion_sensor_data sensor[0];
1418 } dump; 1582 } dump;
1419 1583
1420 /* Used for MOTIONSENSE_CMD_INFO. */ 1584 /* Used for MOTIONSENSE_CMD_INFO. */
@@ -1429,6 +1593,9 @@ struct ec_response_motion_sense {
1429 uint8_t chip; 1593 uint8_t chip;
1430 } info; 1594 } info;
1431 1595
1596 /* Used for MOTIONSENSE_CMD_DATA */
1597 struct ec_response_motion_sensor_data data;
1598
1432 /* 1599 /*
1433 * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, 1600 * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
1434 * MOTIONSENSE_CMD_SENSOR_RANGE, and 1601 * MOTIONSENSE_CMD_SENSOR_RANGE, and
@@ -1438,6 +1605,12 @@ struct ec_response_motion_sense {
1438 /* Current value of the parameter queried. */ 1605 /* Current value of the parameter queried. */
1439 int32_t ret; 1606 int32_t ret;
1440 } ec_rate, sensor_odr, sensor_range, kb_wake_angle; 1607 } ec_rate, sensor_odr, sensor_range, kb_wake_angle;
1608
1609 /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
1610 struct {
1611 int16_t temp;
1612 int16_t offset[3];
1613 } sensor_offset, perform_calib;
1441 }; 1614 };
1442} __packed; 1615} __packed;
1443 1616
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 7f55b8b41032..b9a53e013bff 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -23,6 +23,8 @@
23#define REG_IRQENABLE 0x02C 23#define REG_IRQENABLE 0x02C
24#define REG_IRQCLR 0x030 24#define REG_IRQCLR 0x030
25#define REG_IRQWAKEUP 0x034 25#define REG_IRQWAKEUP 0x034
26#define REG_DMAENABLE_SET 0x038
27#define REG_DMAENABLE_CLEAR 0x03c
26#define REG_CTRL 0x040 28#define REG_CTRL 0x040
27#define REG_ADCFSM 0x044 29#define REG_ADCFSM 0x044
28#define REG_CLKDIV 0x04C 30#define REG_CLKDIV 0x04C
@@ -36,6 +38,7 @@
36#define REG_FIFO0THR 0xE8 38#define REG_FIFO0THR 0xE8
37#define REG_FIFO1CNT 0xF0 39#define REG_FIFO1CNT 0xF0
38#define REG_FIFO1THR 0xF4 40#define REG_FIFO1THR 0xF4
41#define REG_DMA1REQ 0xF8
39#define REG_FIFO0 0x100 42#define REG_FIFO0 0x100
40#define REG_FIFO1 0x200 43#define REG_FIFO1 0x200
41 44
@@ -126,6 +129,10 @@
126#define FIFOREAD_DATA_MASK (0xfff << 0) 129#define FIFOREAD_DATA_MASK (0xfff << 0)
127#define FIFOREAD_CHNLID_MASK (0xf << 16) 130#define FIFOREAD_CHNLID_MASK (0xf << 16)
128 131
132/* DMA ENABLE/CLEAR Register */
133#define DMA_FIFO0 BIT(0)
134#define DMA_FIFO1 BIT(1)
135
129/* Sequencer Status */ 136/* Sequencer Status */
130#define SEQ_STATUS BIT(5) 137#define SEQ_STATUS BIT(5)
131#define CHARGE_STEP 0x11 138#define CHARGE_STEP 0x11
@@ -155,6 +162,7 @@ struct ti_tscadc_dev {
155 struct device *dev; 162 struct device *dev;
156 struct regmap *regmap; 163 struct regmap *regmap;
157 void __iomem *tscadc_base; 164 void __iomem *tscadc_base;
165 phys_addr_t tscadc_phys_base;
158 int irq; 166 int irq;
159 int used_cells; /* 1-2 */ 167 int used_cells; /* 1-2 */
160 int tsc_wires; 168 int tsc_wires;
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 3fb357193f09..cb979ad90401 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -109,14 +109,35 @@ enum rpi_firmware_property_tag {
109 RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a, 109 RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
110 RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b, 110 RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
111 111
112 RPI_FIRMWARE_VCHIQ_INIT = 0x00048010,
113
112 RPI_FIRMWARE_GET_COMMAND_LINE = 0x00050001, 114 RPI_FIRMWARE_GET_COMMAND_LINE = 0x00050001,
113 RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001, 115 RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
114}; 116};
115 117
118#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
116int rpi_firmware_property(struct rpi_firmware *fw, 119int rpi_firmware_property(struct rpi_firmware *fw,
117 u32 tag, void *data, size_t len); 120 u32 tag, void *data, size_t len);
118int rpi_firmware_property_list(struct rpi_firmware *fw, 121int rpi_firmware_property_list(struct rpi_firmware *fw,
119 void *data, size_t tag_size); 122 void *data, size_t tag_size);
120struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); 123struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
124#else
125static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
126 void *data, size_t len)
127{
128 return 0;
129}
130
131static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
132 void *data, size_t tag_size)
133{
134 return 0;
135}
136
137static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
138{
139 return NULL;
140}
141#endif
121 142
122#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */ 143#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 22e5e589a274..e54d14a7f876 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -40,6 +40,8 @@ enum iio_chan_type {
40 IIO_PH, 40 IIO_PH,
41 IIO_UVINDEX, 41 IIO_UVINDEX,
42 IIO_ELECTRICALCONDUCTIVITY, 42 IIO_ELECTRICALCONDUCTIVITY,
43 IIO_COUNT,
44 IIO_INDEX,
43}; 45};
44 46
45enum iio_modifier { 47enum iio_modifier {
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index f39c0e9c0d5c..f0c6f54a8b2f 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -247,6 +247,7 @@ void print_usage(void)
247 fprintf(stderr, "Usage: generic_buffer [options]...\n" 247 fprintf(stderr, "Usage: generic_buffer [options]...\n"
248 "Capture, convert and output data from IIO device buffer\n" 248 "Capture, convert and output data from IIO device buffer\n"
249 " -a Auto-activate all available channels\n" 249 " -a Auto-activate all available channels\n"
250 " -A Force-activate ALL channels\n"
250 " -c <n> Do n conversions\n" 251 " -c <n> Do n conversions\n"
251 " -e Disable wait for event (new data)\n" 252 " -e Disable wait for event (new data)\n"
252 " -g Use trigger-less mode\n" 253 " -g Use trigger-less mode\n"
@@ -347,16 +348,22 @@ int main(int argc, char **argv)
347 int noevents = 0; 348 int noevents = 0;
348 int notrigger = 0; 349 int notrigger = 0;
349 char *dummy; 350 char *dummy;
351 bool force_autochannels = false;
350 352
351 struct iio_channel_info *channels = NULL; 353 struct iio_channel_info *channels = NULL;
352 354
353 register_cleanup(); 355 register_cleanup();
354 356
355 while ((c = getopt_long(argc, argv, "ac:egl:n:N:t:T:w:", longopts, NULL)) != -1) { 357 while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts,
358 NULL)) != -1) {
356 switch (c) { 359 switch (c) {
357 case 'a': 360 case 'a':
358 autochannels = AUTOCHANNELS_ENABLED; 361 autochannels = AUTOCHANNELS_ENABLED;
359 break; 362 break;
363 case 'A':
364 autochannels = AUTOCHANNELS_ENABLED;
365 force_autochannels = true;
366 break;
360 case 'c': 367 case 'c':
361 errno = 0; 368 errno = 0;
362 num_loops = strtoul(optarg, &dummy, 10); 369 num_loops = strtoul(optarg, &dummy, 10);
@@ -519,15 +526,16 @@ int main(int argc, char **argv)
519 "diag %s\n", dev_dir_name); 526 "diag %s\n", dev_dir_name);
520 goto error; 527 goto error;
521 } 528 }
522 if (num_channels && autochannels == AUTOCHANNELS_ENABLED) { 529 if (num_channels && autochannels == AUTOCHANNELS_ENABLED &&
530 !force_autochannels) {
523 fprintf(stderr, "Auto-channels selected but some channels " 531 fprintf(stderr, "Auto-channels selected but some channels "
524 "are already activated in sysfs\n"); 532 "are already activated in sysfs\n");
525 fprintf(stderr, "Proceeding without activating any channels\n"); 533 fprintf(stderr, "Proceeding without activating any channels\n");
526 } 534 }
527 535
528 if (!num_channels && autochannels == AUTOCHANNELS_ENABLED) { 536 if ((!num_channels && autochannels == AUTOCHANNELS_ENABLED) ||
529 fprintf(stderr, 537 (autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
530 "No channels are enabled, enabling all channels\n"); 538 fprintf(stderr, "Enabling all channels\n");
531 539
532 ret = enable_disable_all_channels(dev_dir_name, 1); 540 ret = enable_disable_all_channels(dev_dir_name, 1);
533 if (ret) { 541 if (ret) {